[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] x86/vPIT: account for "counter stopped" time



commit 14f42af3f52d56e769263dc414616be805bd6e2d
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Wed Jun 21 13:45:36 2023 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Jun 21 13:45:36 2023 +0200

    x86/vPIT: account for "counter stopped" time
    
    For an approach like that used in "x86: detect PIT aliasing on ports
    other than 0x4[0-3]" [1] to work, channel 2 may not (appear to) continue
    counting when "gate" is low. Record the time when "gate" goes low, and
    adjust pit_get_{count,out}() accordingly. Additionally for most of the
    modes a rising edge of "gate" doesn't mean just "resume counting", but
    "initiate counting", i.e. specifically the reloading of the counter with
    its init value.
    
    No special handling for state save/load: See the comment near the end of
    pit_load().
    
    Along with introducing the get_count() helper to have the calculations
    (and the locking check) in a single place, switch pit_get_count()'s d,
    counter, and return type to unsigned int.
    
    [1] https://lists.xen.org/archives/html/xen-devel/2023-05/msg00898.html
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/emul-i8254.c          | 82 ++++++++++++++++++++++++--------------
 xen/arch/x86/include/asm/hvm/vpt.h |  8 +++-
 2 files changed, 59 insertions(+), 31 deletions(-)

diff --git a/xen/arch/x86/emul-i8254.c b/xen/arch/x86/emul-i8254.c
index 586ad7b331..a81232fc55 100644
--- a/xen/arch/x86/emul-i8254.c
+++ b/xen/arch/x86/emul-i8254.c
@@ -56,17 +56,24 @@ static int cf_check handle_speaker_io(
 #define get_guest_time(v) \
    (is_hvm_vcpu(v) ? hvm_get_guest_time(v) : (u64)get_s_time())
 
-static int pit_get_count(PITState *pit, int channel)
+static uint64_t get_count(PITState *pit, unsigned int channel)
 {
-    uint64_t d;
-    int  counter;
-    struct hvm_hw_pit_channel *c = &pit->hw.channels[channel];
-    struct vcpu *v = vpit_vcpu(pit);
+    const struct hvm_hw_pit_channel *c = &pit->hw.channels[channel];
+    uint64_t d = c->gate || (c->mode & 3) == 1
+                 ? get_guest_time(vpit_vcpu(pit))
+                 : pit->count_stop_time[channel];
 
     ASSERT(spin_is_locked(&pit->lock));
 
-    d = muldiv64(get_guest_time(v) - pit->count_load_time[channel],
-                 PIT_FREQ, SYSTEM_TIME_HZ);
+    return muldiv64((d - pit->count_load_time[channel] -
+                     pit->stopped_time[channel]),
+                    PIT_FREQ, SYSTEM_TIME_HZ);
+}
+
+static unsigned int pit_get_count(PITState *pit, int channel)
+{
+    unsigned int d = get_count(pit, channel), counter;
+    struct hvm_hw_pit_channel *c = &pit->hw.channels[channel];
 
     switch ( c->mode )
     {
@@ -110,6 +117,10 @@ static void pit_load_count(PITState *pit, int channel, int 
val)
         pit->count_load_time[channel] = 0;
     else
         pit->count_load_time[channel] = get_guest_time(v);
+
+    pit->count_stop_time[channel] = pit->count_load_time[channel];
+    pit->stopped_time[channel] = 0;
+
     s->count = val;
     period = DIV_ROUND(val * SYSTEM_TIME_HZ, PIT_FREQ);
 
@@ -142,14 +153,8 @@ static void pit_load_count(PITState *pit, int channel, int 
val)
 static int pit_get_out(PITState *pit, int channel)
 {
     struct hvm_hw_pit_channel *s = &pit->hw.channels[channel];
-    uint64_t d;
+    uint64_t d = get_count(pit, channel);
     int out;
-    struct vcpu *v = vpit_vcpu(pit);
-
-    ASSERT(spin_is_locked(&pit->lock));
-
-    d = muldiv64(get_guest_time(v) - pit->count_load_time[channel], 
-                 PIT_FREQ, SYSTEM_TIME_HZ);
 
     switch ( s->mode )
     {
@@ -182,22 +187,39 @@ static void pit_set_gate(PITState *pit, int channel, int 
val)
 
     ASSERT(spin_is_locked(&pit->lock));
 
-    switch ( s->mode )
-    {
-    default:
-    case 0:
-    case 4:
-        /* XXX: just disable/enable counting */
-        break;
-    case 1:
-    case 5:
-    case 2:
-    case 3:
-        /* Restart counting on rising edge. */
-        if ( s->gate < val )
-            pit->count_load_time[channel] = get_guest_time(v);
-        break;
-    }
+    if ( s->gate > val )
+        switch ( s->mode )
+        {
+        case 0:
+        case 2:
+        case 3:
+        case 4:
+            /* Disable counting. */
+            if ( !channel )
+                destroy_periodic_time(&pit->pt0);
+            pit->count_stop_time[channel] = get_guest_time(v);
+            break;
+        }
+
+    if ( s->gate < val )
+        switch ( s->mode )
+        {
+        default:
+        case 0:
+        case 4:
+            /* Enable counting. */
+            pit->stopped_time[channel] += get_guest_time(v) -
+                                          pit->count_stop_time[channel];
+            break;
+
+        case 1:
+        case 5:
+        case 2:
+        case 3:
+            /* Initiate counting on rising edge. */
+            pit_load_count(pit, channel, pit->hw.channels[channel].count);
+            break;
+        }
 
     s->gate = val;
 }
diff --git a/xen/arch/x86/include/asm/hvm/vpt.h 
b/xen/arch/x86/include/asm/hvm/vpt.h
index 935cbe333b..2af76ca8dc 100644
--- a/xen/arch/x86/include/asm/hvm/vpt.h
+++ b/xen/arch/x86/include/asm/hvm/vpt.h
@@ -48,8 +48,14 @@ struct periodic_time {
 typedef struct PITState {
     /* Hardware state */
     struct hvm_hw_pit hw;
-    /* Last time the counters read zero, for calcuating counter reads */
+
+    /* Last time the counters read zero, for calculating counter reads */
     int64_t count_load_time[3];
+    /* Last time the counters were stopped, for calculating counter reads */
+    int64_t count_stop_time[3];
+    /* Accumulate "stopped" time, since the last counter write/reload. */
+    uint64_t stopped_time[3];
+
     /* Channel 0 IRQ handling. */
     struct periodic_time pt0;
     spinlock_t lock;
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.