[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/2] x86, hvm: allow another vcpu to receive PIT IRQ (support kexec/kdump on hvm)



This patch is needed for kexec/kdump on hvm since kdump halts bsp.

- allow another vcpu than bsp to receive PIT IRQ
- deliver PIT IRQ to the vcpu whose LINT0 is ExtINT mode

Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>

diff -r 7397608bce87 xen/arch/x86/hvm/i8254.c
--- a/xen/arch/x86/hvm/i8254.c  Mon Jun 29 15:50:32 2009 +0100
+++ b/xen/arch/x86/hvm/i8254.c  Tue Jun 30 13:15:31 2009 +0900
@@ -42,7 +42,7 @@
 #define vcpu_vpit(vcpu)  (domain_vpit((vcpu)->domain))
 #define vpit_domain(pit) (container_of((pit), struct domain, \
                                        arch.hvm_domain.pl_time.vpit))
-#define vpit_vcpu(pit)   (vpit_domain(pit)->vcpu[0])
+#define vpit_vcpu(pit)   (pit->pt0.vcpu)
 
 #define RW_STATE_LSB 1
 #define RW_STATE_MSB 2
@@ -174,14 +174,11 @@ static void pit_load_count(PITState *pit
     if ( val == 0 )
         val = 0x10000;
 
-    if ( v == NULL )
-        pit->count_load_time[channel] = 0;
-    else
-        pit->count_load_time[channel] = get_guest_time(v);
+    pit->count_load_time[channel] = get_guest_time(v);
     s->count = val;
     period = DIV_ROUND(val * SYSTEM_TIME_HZ, PIT_FREQ);
 
-    if ( (v == NULL) || !is_hvm_vcpu(v) || (channel != 0) )
+    if ( !is_hvm_vcpu(v) || (channel != 0) )
         return;
 
     switch ( s->mode )
@@ -397,6 +394,15 @@ static int pit_save(struct domain *d, hv
     return rc;
 }
 
+static struct vcpu *find_pic_intr_accept_vcpu(struct domain *d)
+{
+    struct vcpu *v;
+    for_each_vcpu(d, v)
+        if ( vlapic_accept_pic_intr(v) )
+            return v;
+    return d->vcpu[0];
+}
+
 static int pit_load(struct domain *d, hvm_domain_context_t *h)
 {
     PITState *pit = domain_vpit(d);
@@ -415,7 +421,8 @@ static int pit_load(struct domain *d, hv
      * time jitter here, but the wall-clock will have jumped massively, so 
      * we hope the guest can handle it.
      */
-    pit->pt0.last_plt_gtime = get_guest_time(d->vcpu[0]);
+    pit->pt0.vcpu = find_pic_intr_accept_vcpu(d);
+    pit->pt0.last_plt_gtime = get_guest_time(pit->pt0.vcpu);
     for ( i = 0; i < 3; i++ )
         pit_load_count(pit, i, pit->hw.channels[i].count);
 
@@ -432,7 +439,9 @@ void pit_reset(struct domain *d)
     struct hvm_hw_pit_channel *s;
     int i;
 
-    destroy_periodic_time(&pit->pt0);
+    if ( is_hvm_domain(d) )
+        destroy_periodic_time(&pit->pt0);
+
     pit->pt0.source = PTSRC_isa;
 
     spin_lock(&pit->lock);
@@ -452,14 +461,43 @@ void pit_init(struct vcpu *v, unsigned l
 {
     PITState *pit = vcpu_vpit(v);
 
+    ASSERT(v->vcpu_id == 0);
+
     spin_lock_init(&pit->lock);
 
     register_portio_handler(v->domain, PIT_BASE, 4, handle_pit_io);
     register_portio_handler(v->domain, 0x61, 1, handle_speaker_io);
 
+    pit->pt0.vcpu = v; /* VCPU0 */
+
     pit_reset(v->domain);
 }
 
+void pit_bind_vcpu(struct vcpu *v)
+{
+    PITState *pit = vcpu_vpit(v);
+    struct periodic_time *pt = &pit->pt0;
+
+    ASSERT(is_hvm_vcpu(v));
+
+    spin_lock(&pit->lock);
+    if ( pt->vcpu != v )
+    {
+        /*
+         * Recreate the platform timer only if it was previously created
+         * as a periodic timer.
+         * For one-shot timer, the timer works along with the previous
+         * vcpu but kicks the new vcpu.
+         */
+        pt->vcpu = v;
+        if ( pt->cb != NULL && !pt->one_shot )
+            /* XXX: delta should be recalculated ? */
+            create_periodic_time(v, pt, pt->period_cycles, pt->period_cycles,
+                                 pt->irq, pt->cb, pt->priv);
+    }
+    spin_unlock(&pit->lock);
+}
+
 void pit_deinit(struct domain *d)
 {
     PITState *pit = domain_vpit(d);
diff -r 7397608bce87 xen/arch/x86/hvm/irq.c
--- a/xen/arch/x86/hvm/irq.c    Mon Jun 29 15:50:32 2009 +0100
+++ b/xen/arch/x86/hvm/irq.c    Tue Jun 30 13:15:31 2009 +0900
@@ -348,7 +348,6 @@ struct hvm_intack hvm_vcpu_ack_pending_i
             intack = hvm_intack_none;
         break;
     case hvm_intsrc_pic:
-        ASSERT(v->vcpu_id == 0);
         if ( (vector = vpic_ack_pending_irq(v)) == -1 )
             intack = hvm_intack_none;
         else
diff -r 7397608bce87 xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Mon Jun 29 15:50:32 2009 +0100
+++ b/xen/arch/x86/hvm/vlapic.c Tue Jun 30 13:15:31 2009 +0900
@@ -697,6 +697,9 @@ static int vlapic_write(struct vcpu *v, 
             val |= APIC_LVT_MASKED;
         val &= vlapic_lvt_mask[(offset - APIC_LVTT) >> 4];
         vlapic_set_reg(vlapic, offset, val);
+        if ( offset == APIC_LVT0 &&
+             (val & (APIC_MODE_MASK|APIC_LVT_MASKED)) == APIC_DM_EXTINT )
+            pit_bind_vcpu(vlapic_vcpu(vlapic));
         break;
 
     case APIC_TMICT:
@@ -782,12 +785,11 @@ int vlapic_accept_pic_intr(struct vcpu *
     uint32_t lvt0 = vlapic_get_reg(vlapic, APIC_LVT0);
 
     /*
-     * Only CPU0 is wired to the 8259A. INTA cycles occur if LINT0 is set up
-     * accept ExtInts, or if the LAPIC is disabled (so LINT0 behaves as INTR).
+     * INTA cycles occur if LINT0 is set up accept ExtInts,
+     * or if the LAPIC is disabled (so LINT0 behaves as INTR).
      */
-    return ((v->vcpu_id == 0) &&
-            (((lvt0 & (APIC_MODE_MASK|APIC_LVT_MASKED)) == APIC_DM_EXTINT) ||
-             vlapic_hw_disabled(vlapic)));
+    return (((lvt0 & (APIC_MODE_MASK|APIC_LVT_MASKED)) == APIC_DM_EXTINT) ||
+             vlapic_hw_disabled(vlapic));
 }
 
 int vlapic_has_pending_irq(struct vcpu *v)
diff -r 7397608bce87 xen/include/asm-x86/hvm/vpt.h
--- a/xen/include/asm-x86/hvm/vpt.h     Mon Jun 29 15:50:32 2009 +0100
+++ b/xen/include/asm-x86/hvm/vpt.h     Tue Jun 30 13:15:31 2009 +0900
@@ -162,7 +162,7 @@ void destroy_periodic_time(struct period
 
 int pv_pit_handler(int port, int data, int write);
 void pit_reset(struct domain *d);
-
+void pit_bind_vcpu(struct vcpu *v);
 void pit_init(struct vcpu *v, unsigned long cpu_khz);
 void pit_stop_channel0_irq(PITState * pit);
 void pit_deinit(struct domain *d);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.