[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 05/12] viridian: use hypercall_vpmask in hvcall_ipi()



From: Paul Durrant <pdurrant@xxxxxxxxxx>

A subsequent patch will need to IPI a mask of virtual processors potentially
wider than 64 bits. A previous patch introduced per-cpu hypercall_vpmask
to allow hvcall_flush() to deal with such wide masks. This patch modifies
the implementation of hvcall_ipi() to make use of the same mask structures,
introducing a for_each_vp() macro to facilitate traversing a mask.

Signed-off-by: Paul Durrant <pdurrant@xxxxxxxxxx>
---
Cc: Wei Liu <wl@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>

v2:
 - Drop the 'vp' loop now that vpmask_set() will do it internally
---
 xen/arch/x86/hvm/viridian/viridian.c | 43 +++++++++++++++++++++-------
 1 file changed, 32 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/hvm/viridian/viridian.c 
b/xen/arch/x86/hvm/viridian/viridian.c
index 334d8527ff59..d8d8ecc89c80 100644
--- a/xen/arch/x86/hvm/viridian/viridian.c
+++ b/xen/arch/x86/hvm/viridian/viridian.c
@@ -551,6 +551,25 @@ static bool vpmask_test(const struct hypercall_vpmask 
*vpmask,
     return test_bit(vp, vpmask->mask);
 }
 
+static unsigned int vpmask_first(struct hypercall_vpmask *vpmask)
+{
+    return find_first_bit(vpmask->mask, HVM_MAX_VCPUS);
+}
+
+static unsigned int vpmask_next(struct hypercall_vpmask *vpmask, unsigned int 
vp)
+{
+    /*
+     * If vp + 1 > HVM_MAX_VCPUS then find_next_bit() will return
+     * HVM_MAX_VCPUS, ensuring the for_each_vp ( ... ) loop terminates.
+     */
+    return find_next_bit(vpmask->mask, HVM_MAX_VCPUS, vp + 1);
+}
+
+#define for_each_vp(vpmask, vp) \
+       for ( (vp) = vpmask_first(vpmask); \
+             (vp) < HVM_MAX_VCPUS; \
+             (vp) = vpmask_next(vpmask, vp) )
+
 /*
  * Windows should not issue the hypercalls requiring this callback in the
  * case where vcpu_id would exceed the size of the mask.
@@ -631,13 +650,21 @@ static int hvcall_flush(union hypercall_input *input,
     return 0;
 }
 
+static void send_ipi(struct hypercall_vpmask *vpmask, uint8_t vector)
+{
+    struct domain *currd = current->domain;
+    unsigned int vp;
+
+    for_each_vp ( vpmask, vp )
+        vlapic_set_irq(vcpu_vlapic(currd->vcpu[vp]), vector, 0);
+}
+
 static int hvcall_ipi(union hypercall_input *input,
                       union hypercall_output *output,
                       unsigned long input_params_gpa,
                       unsigned long output_params_gpa)
 {
-    struct domain *currd = current->domain;
-    struct vcpu *v;
+    struct hypercall_vpmask *vpmask = &this_cpu(hypercall_vpmask);
     uint32_t vector;
     uint64_t vcpu_mask;
 
@@ -676,16 +703,10 @@ static int hvcall_ipi(union hypercall_input *input,
     if ( vector < 0x10 || vector > 0xff )
         return -EINVAL;
 
-    for_each_vcpu ( currd, v )
-    {
-        if ( v->vcpu_id >= (sizeof(vcpu_mask) * 8) )
-            return -EINVAL;
+    vpmask_empty(vpmask);
+    vpmask_set(vpmask, 0, vcpu_mask);
 
-        if ( !(vcpu_mask & (1ul << v->vcpu_id)) )
-            continue;
-
-        vlapic_set_irq(vcpu_vlapic(v), vector, 0);
-    }
+    send_ipi(vpmask, vector);
 
     return 0;
 }
-- 
2.20.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.