[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Manual merge.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 215d8b2f3d94e3ad623cd219d4371cd04a84fb70
# Parent  49a00af507771bee9451a01ae98deffdc575cc1f

# Parent  05b63285047c1bef1ec11158b14b9a5375e9645e
Manual merge.

diff -r 49a00af50777 -r 215d8b2f3d94 
linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h 
Sat Jul  9 10:01:49 2005
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h 
Sat Jul  9 10:24:14 2005
@@ -126,8 +126,8 @@
 /* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
 extern int  bind_virq_to_irq(int virq);
 extern void unbind_virq_from_irq(int virq);
-extern int  bind_ipi_on_cpu_to_irq(int cpu, int ipi);
-extern void unbind_ipi_on_cpu_from_irq(int cpu, int ipi);
+extern int  bind_ipi_to_irq(int ipi);
+extern void unbind_ipi_from_irq(int ipi);
 extern int  bind_evtchn_to_irq(int evtchn);
 extern void unbind_evtchn_from_irq(int evtchn);
 
diff -r 49a00af50777 -r 215d8b2f3d94 xen/common/event_channel.c
--- a/xen/common/event_channel.c        Sat Jul  9 10:01:49 2005
+++ b/xen/common/event_channel.c        Sat Jul  9 10:24:14 2005
@@ -220,12 +220,10 @@
 
     chn1->u.interdomain.remote_dom  = d2;
     chn1->u.interdomain.remote_port = (u16)port2;
-    chn1->notify_vcpu_id            = 0;
     chn1->state                     = ECS_INTERDOMAIN;
     
     chn2->u.interdomain.remote_dom  = d1;
     chn2->u.interdomain.remote_port = (u16)port1;
-    chn2->notify_vcpu_id            = 0;
     chn2->state                     = ECS_INTERDOMAIN;
 
  out:
@@ -285,10 +283,7 @@
 {
     struct evtchn *chn;
     struct domain *d = current->domain;
-    int            port, ipi_vcpu = bind->ipi_vcpu;
-
-    if ( (ipi_vcpu >= MAX_VIRT_CPUS) || (d->vcpu[ipi_vcpu] == NULL) )
-        return -EINVAL;
+    int            port;
 
     spin_lock(&d->evtchn_lock);
 
@@ -296,7 +291,7 @@
     {
         chn = evtchn_from_port(d, port);
         chn->state          = ECS_IPI;
-        chn->notify_vcpu_id = ipi_vcpu;
+        chn->notify_vcpu_id = current->vcpu_id;
     }
 
     spin_unlock(&d->evtchn_lock);
@@ -325,8 +320,6 @@
         goto out;
 
     chn = evtchn_from_port(d, port);
-
-    chn->notify_vcpu_id = 0;
 
     d->pirq_to_evtchn[pirq] = port;
     rc = pirq_guest_bind(d->vcpu[0], pirq, 
@@ -441,7 +434,9 @@
         BUG();
     }
 
-    chn1->state = ECS_FREE;
+    /* Reset binding to vcpu0 when the channel is freed. */
+    chn1->state          = ECS_FREE;
+    chn1->notify_vcpu_id = 0;
 
  out:
     if ( d2 != NULL )
@@ -570,12 +565,13 @@
         status->u.virq = chn->u.virq;
         break;
     case ECS_IPI:
-        status->status     = EVTCHNSTAT_ipi;
-        status->u.ipi_vcpu = chn->notify_vcpu_id;
+        status->status = EVTCHNSTAT_ipi;
         break;
     default:
         BUG();
     }
+
+    status->vcpu = chn->notify_vcpu_id;
 
  out:
     spin_unlock(&d->evtchn_lock);
@@ -583,24 +579,41 @@
     return rc;
 }
 
-static long evtchn_rebind(evtchn_rebind_t *bind) 
+static long evtchn_bind_vcpu(evtchn_bind_vcpu_t *bind) 
 {
     struct domain *d    = current->domain;
     int            port = bind->port;
     int            vcpu = bind->vcpu;
     struct evtchn *chn;
-    long             rc = 0;
+    long           rc = 0;
+
+    if ( (vcpu >= MAX_VIRT_CPUS) || (d->vcpu[vcpu] == NULL) ) {
+        printf("vcpu %d bad.\n", vcpu);
+        return -EINVAL;
+    }
 
     spin_lock(&d->evtchn_lock);
 
     if ( !port_is_valid(d, port) )
     {
+        printf("port %d bad.\n", port);
         rc = -EINVAL;
         goto out;
     }
 
     chn = evtchn_from_port(d, port);
-    chn->notify_vcpu_id = vcpu;
+    switch ( chn->state )
+    {
+    case ECS_UNBOUND:
+    case ECS_INTERDOMAIN:
+    case ECS_PIRQ:
+        chn->notify_vcpu_id = vcpu;
+        break;
+    default:
+        printf("evtchn type %d can't be rebound.\n", chn->state);
+        rc = -EINVAL;
+        break;
+    }
 
  out:
     spin_unlock(&d->evtchn_lock);
@@ -664,10 +677,8 @@
             rc = -EFAULT;
         break;
 
-    case EVTCHNOP_rebind:
-        rc = evtchn_rebind(&op.u.rebind);
-        if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
-            rc = -EFAULT;
+    case EVTCHNOP_bind_vcpu:
+        rc = evtchn_bind_vcpu(&op.u.bind_vcpu);
         break;
 
     default:
diff -r 49a00af50777 -r 215d8b2f3d94 
linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c  Sat Jul  9 10:01:49 2005
+++ b/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c  Sat Jul  9 10:24:14 2005
@@ -86,7 +86,7 @@
      cpu_evtchn_mask[cpu][idx] &                \
      ~(sh)->evtchn_mask[idx])
 
-static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
+void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
 {
     clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
     set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
@@ -99,8 +99,9 @@
     ((sh)->evtchn_pending[idx] &                \
      ~(sh)->evtchn_mask[idx])
 
-#define bind_evtchn_to_cpu(chn,cpu) ((void)0)
-
+void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
+{
+}
 #endif
 
 /* Upcall to generic IRQ layer. */
@@ -228,6 +229,13 @@
         if ( HYPERVISOR_event_channel_op(&op) != 0 )
             panic("Failed to unbind virtual IRQ %d\n", virq);
 
+       /* This is a slight hack.  Interdomain ports can be allocated
+          directly by userspace, and at that point they get bound by
+          Xen to vcpu 0.  We therefore need to make sure that if we
+          get an event on an event channel we don't know about vcpu 0
+          handles it.  Binding channels to vcpu 0 when closing them
+          achieves this. */
+       bind_evtchn_to_cpu(evtchn, 0);
         evtchn_to_irq[evtchn] = -1;
         irq_to_evtchn[irq]    = -1;
         per_cpu(virq_to_irq, cpu)[virq]     = -1;
@@ -236,17 +244,17 @@
     spin_unlock(&irq_mapping_update_lock);
 }
 
-int bind_ipi_on_cpu_to_irq(int cpu, int ipi)
+int bind_ipi_on_cpu_to_irq(int ipi)
 {
     evtchn_op_t op;
     int evtchn, irq;
+    int cpu = smp_processor_id();
 
     spin_lock(&irq_mapping_update_lock);
 
     if ( (evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0 )
     {
-        op.cmd                 = EVTCHNOP_bind_ipi;
-        op.u.bind_ipi.ipi_vcpu = cpu;
+        op.cmd = EVTCHNOP_bind_ipi;
         if ( HYPERVISOR_event_channel_op(&op) != 0 )
             panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, cpu);
         evtchn = op.u.bind_ipi.port;
@@ -271,41 +279,10 @@
     return irq;
 }
 
-void rebind_evtchn_from_ipi(int cpu, int newcpu, int ipi)
-{
-    evtchn_op_t op;
-    int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
-
-    spin_lock(&irq_mapping_update_lock);
-
-    op.cmd          = EVTCHNOP_rebind;
-    op.u.rebind.port = evtchn;
-    op.u.rebind.vcpu = newcpu;
-    if ( HYPERVISOR_event_channel_op(&op) != 0 )
-       printk(KERN_INFO "Failed to rebind IPI%d to CPU%d\n",ipi,newcpu);
-
-    spin_unlock(&irq_mapping_update_lock);
-}
-
-void rebind_evtchn_from_irq(int cpu, int newcpu, int irq)
-{
-    evtchn_op_t op;
-    int evtchn = irq_to_evtchn[irq];
-
-    spin_lock(&irq_mapping_update_lock);
-
-    op.cmd          = EVTCHNOP_rebind;
-    op.u.rebind.port = evtchn;
-    op.u.rebind.vcpu = newcpu;
-    if ( HYPERVISOR_event_channel_op(&op) != 0 )
-       printk(KERN_INFO "Failed to rebind IRQ%d to CPU%d\n",irq,newcpu);
-
-    spin_unlock(&irq_mapping_update_lock);
-}
-
-void unbind_ipi_on_cpu_from_irq(int cpu, int ipi)
-{
-    evtchn_op_t op;
+void unbind_ipi_from_irq(int ipi)
+{
+    evtchn_op_t op;
+    int cpu    = smp_processor_id();
     int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
     int irq    = irq_to_evtchn[evtchn];
 
@@ -319,6 +296,8 @@
        if ( HYPERVISOR_event_channel_op(&op) != 0 )
            panic("Failed to unbind virtual IPI %d on cpu %d\n", ipi, cpu);
 
+       /* See comments in unbind_virq_from_irq */
+       bind_evtchn_to_cpu(evtchn, 0);
         evtchn_to_irq[evtchn] = -1;
         irq_to_evtchn[irq]    = -1;
        per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
@@ -362,6 +341,59 @@
     spin_unlock(&irq_mapping_update_lock);
 }
 
+static void do_nothing_function(void *ign)
+{
+}
+
+/* Rebind an evtchn so that it gets delivered to a specific cpu */
+static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
+{
+    evtchn_op_t op;
+    int evtchn;
+
+    spin_lock(&irq_mapping_update_lock);
+    evtchn = irq_to_evtchn[irq];
+    if (!VALID_EVTCHN(evtchn)) {
+       spin_unlock(&irq_mapping_update_lock);
+       return;
+    }
+
+    /* Tell Xen to send future instances of this interrupt to the
+       other vcpu */
+    op.cmd = EVTCHNOP_bind_vcpu;
+    op.u.bind_vcpu.port = evtchn;
+    op.u.bind_vcpu.vcpu = tcpu;
+
+    /* If this fails, it usually just indicates that we're dealing
+       with a virq or IPI channel, which don't actually need to be
+       rebound.  Ignore it, but don't do the xenlinux-level rebind
+       in that case. */
+    if (HYPERVISOR_event_channel_op(&op) >= 0)
+       bind_evtchn_to_cpu(evtchn, tcpu);
+
+    spin_unlock(&irq_mapping_update_lock);
+
+    /* Now send the new target processor a NOP IPI.  When this
+       returns, it will check for any pending interrupts, and so
+       service any that got delivered to the wrong processor by
+       mistake. */
+    /* XXX: The only time this is called with interrupts disabled is
+       from the hotplug/hotunplug path.  In that case, all cpus are
+       stopped with interrupts disabled, and the missed interrupts
+       will be picked up when they start again.  This is kind of a
+       hack.
+    */
+    if (!irqs_disabled()) {
+       smp_call_function(do_nothing_function, NULL, 0, 0);
+    }
+}
+
+
+static void set_affinity_irq(unsigned irq, cpumask_t dest)
+{
+    unsigned tcpu = first_cpu(dest);
+    rebind_irq_to_cpu(irq, tcpu);
+}
 
 /*
  * Interface to generic handling in irq.c
@@ -424,7 +456,7 @@
     disable_dynirq,
     ack_dynirq,
     end_dynirq,
-    NULL
+    set_affinity_irq
 };
 
 static inline void pirq_unmask_notify(int pirq)
@@ -473,6 +505,7 @@
 
     pirq_query_unmask(irq_to_pirq(irq));
 
+    bind_evtchn_to_cpu(evtchn, 0);
     evtchn_to_irq[evtchn] = irq;
     irq_to_evtchn[irq]    = evtchn;
 
@@ -498,6 +531,7 @@
     if ( HYPERVISOR_event_channel_op(&op) != 0 )
         panic("Failed to unbind physical IRQ %d\n", irq);
 
+    bind_evtchn_to_cpu(evtchn, 0);
     evtchn_to_irq[evtchn] = -1;
     irq_to_evtchn[irq]    = -1;
 }
@@ -548,7 +582,7 @@
     disable_pirq,
     ack_pirq,
     end_pirq,
-    NULL
+    set_affinity_irq
 };
 
 void irq_suspend(void)
@@ -597,6 +631,7 @@
         evtchn = op.u.bind_virq.port;
         
         /* Record the new mapping. */
+       bind_evtchn_to_cpu(evtchn, 0);
         evtchn_to_irq[evtchn] = irq;
         irq_to_evtchn[irq]    = evtchn;
 
diff -r 49a00af50777 -r 215d8b2f3d94 xen/include/public/dom0_ops.h
--- a/xen/include/public/dom0_ops.h     Sat Jul  9 10:01:49 2005
+++ b/xen/include/public/dom0_ops.h     Sat Jul  9 10:24:14 2005
@@ -19,7 +19,7 @@
  * This makes sure that old versions of dom0 tools will stop working in a
  * well-defined way (rather than crashing the machine, for instance).
  */
-#define DOM0_INTERFACE_VERSION   0xAAAA100D
+#define DOM0_INTERFACE_VERSION   0xAAAA100E
 
 /************************************************************************/
 
diff -r 49a00af50777 -r 215d8b2f3d94 xen/include/public/event_channel.h
--- a/xen/include/public/event_channel.h        Sat Jul  9 10:01:49 2005
+++ b/xen/include/public/event_channel.h        Sat Jul  9 10:24:14 2005
@@ -89,8 +89,6 @@
  */
 #define EVTCHNOP_bind_ipi         7
 typedef struct evtchn_bind_ipi {
-    /* IN parameters. */
-    u32 ipi_vcpu;
     /* OUT parameters. */
     u32 port;
 } evtchn_bind_ipi_t;
@@ -144,6 +142,7 @@
 #define EVTCHNSTAT_virq         4  /* Channel is bound to a virtual IRQ line */
 #define EVTCHNSTAT_ipi          5  /* Channel is bound to a virtual IPI line */
     u32     status;
+    u32     vcpu;                  /* VCPU to which this channel is bound.   */
     union {
         struct {
             domid_t dom;
@@ -154,16 +153,25 @@
         } interdomain; /* EVTCHNSTAT_interdomain */
         u32 pirq;      /* EVTCHNSTAT_pirq        */
         u32 virq;      /* EVTCHNSTAT_virq        */
-        u32 ipi_vcpu;  /* EVTCHNSTAT_ipi         */
     } u;
 } evtchn_status_t;
 
-#define EVTCHNOP_rebind        8
-typedef struct {
+/*
+ * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
+ * event is pending.
+ * NOTES:
+ *  1. IPI- and VIRQ-bound channels always notify the vcpu that initialised
+ *     the binding. This binding cannot be changed.
+ *  2. All other channels notify vcpu0 by default. This default is set when
+ *     the channel is allocated (a port that is freed and subsequently reused
+ *     has its binding reset to vcpu0).
+ */
+#define EVTCHNOP_bind_vcpu        8
+typedef struct evtchn_bind_vcpu {
     /* IN parameters. */
-    u32 port;                         /*  0 */
-    u32 vcpu;                         /*  4 */
-} evtchn_rebind_t; /* 8 bytes */
+    u32 port;
+    u32 vcpu;
+} evtchn_bind_vcpu_t;
 
 typedef struct evtchn_op {
     u32 cmd; /* EVTCHNOP_* */
@@ -176,7 +184,7 @@
         evtchn_close_t            close;
         evtchn_send_t             send;
         evtchn_status_t           status;
-        evtchn_rebind_t           rebind;
+        evtchn_bind_vcpu_t        bind_vcpu;
     } u;
 } evtchn_op_t;
 
diff -r 49a00af50777 -r 215d8b2f3d94 
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h   
Sat Jul  9 10:01:49 2005
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h   
Sat Jul  9 10:24:14 2005
@@ -128,8 +128,8 @@
 /* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
 extern int  bind_virq_to_irq(int virq);
 extern void unbind_virq_from_irq(int virq);
-extern int  bind_ipi_on_cpu_to_irq(int cpu, int ipi);
-extern void unbind_ipi_on_cpu_from_irq(int cpu, int ipi);
+extern int  bind_ipi_to_irq(int ipi);
+extern void unbind_ipi_from_irq(int ipi);
 extern int  bind_evtchn_to_irq(int evtchn);
 extern void unbind_evtchn_from_irq(int evtchn);
 
diff -r 49a00af50777 -r 215d8b2f3d94 
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c    Sat Jul  9 
10:01:49 2005
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c    Sat Jul  9 
10:24:14 2005
@@ -1312,7 +1312,7 @@
 
 /* hotplug down/up funtion pointer and target vcpu */
 struct vcpu_hotplug_handler_t {
-       void (*fn)();
+       void (*fn)(int vcpu);
        u32 vcpu;
 };
 static struct vcpu_hotplug_handler_t vcpu_hotplug_handler;
@@ -1333,11 +1333,8 @@
        while (!cpu_online(cpu))
                cpu_relax();
 
-   /* re-route bound IRQs 0 to cpu */
-   rebind_evtchn_from_irq(0, cpu,  per_cpu(resched_irq, cpu));
-   rebind_evtchn_from_irq(0, cpu, per_cpu(callfunc_irq, cpu));
-
        fixup_irqs(cpu_online_map);
+
        /* counter the disable in fixup_irqs() */
        local_irq_enable();
        return 0;
@@ -1359,17 +1356,8 @@
        if (cpu == 0)
                return -EBUSY;
 
-       /* Allow any queued timer interrupts to get serviced */
-       local_irq_enable();
-       mdelay(1);
-       local_irq_disable();
-
        cpu_clear(cpu, map);
        fixup_irqs(map);
-
-   /* re-route IRQs from dead vcpu to another */
-   rebind_evtchn_from_irq(cpu, 0,  per_cpu(resched_irq, cpu));
-   rebind_evtchn_from_irq(cpu, 0, per_cpu(callfunc_irq, cpu));
 
        /* It's now safe to remove this processor from the online map */
        cpu_clear(cpu, cpu_online_map);
@@ -1533,13 +1521,13 @@
        int cpu = smp_processor_id();
 
        per_cpu(resched_irq, cpu) =
-               bind_ipi_on_cpu_to_irq(cpu, RESCHEDULE_VECTOR);
+               bind_ipi_on_cpu_to_irq(RESCHEDULE_VECTOR);
        sprintf(resched_name[cpu], "resched%d", cpu);
        BUG_ON(request_irq(per_cpu(resched_irq, cpu), smp_reschedule_interrupt,
                           SA_INTERRUPT, resched_name[cpu], NULL));
 
        per_cpu(callfunc_irq, cpu) =
-               bind_ipi_on_cpu_to_irq(cpu, CALL_FUNCTION_VECTOR);
+               bind_ipi_on_cpu_to_irq(CALL_FUNCTION_VECTOR);
        sprintf(callfunc_name[cpu], "callfunc%d", cpu);
        BUG_ON(request_irq(per_cpu(callfunc_irq, cpu),
                           smp_call_function_interrupt,
diff -r 49a00af50777 -r 215d8b2f3d94 
linux-2.6.11-xen-sparse/arch/xen/kernel/ctrl_if.c
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/ctrl_if.c Sat Jul  9 10:01:49 2005
+++ b/linux-2.6.11-xen-sparse/arch/xen/kernel/ctrl_if.c Sat Jul  9 10:24:14 2005
@@ -491,6 +491,8 @@
          * pick up its end of the event channel from 
          */
         evtchn_op_t op;
+       extern void bind_evtchn_to_cpu(unsigned port, unsigned cpu);
+
         op.cmd = EVTCHNOP_bind_interdomain;
         op.u.bind_interdomain.dom1 = DOMID_SELF;
         op.u.bind_interdomain.dom2 = DOMID_SELF;
@@ -500,6 +502,7 @@
             BUG();
         xen_start_info.domain_controller_evtchn = op.u.bind_interdomain.port1;
         initdom_ctrlif_domcontroller_port   = op.u.bind_interdomain.port2;
+       bind_evtchn_to_cpu(op.u.bind_interdomain.port1, 0);
     }
 
     /* Sync up with shared indexes. */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.