[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] viridian: introduce a per-cpu hypercall_vpmask and accessor functions...
commit 33c1a1c378e38d73d1b35b4bec6bdcd94bae41c2 Author: Paul Durrant <pdurrant@xxxxxxxxxx> AuthorDate: Fri Dec 4 13:14:03 2020 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Fri Dec 4 13:14:03 2020 +0100 viridian: introduce a per-cpu hypercall_vpmask and accessor functions... ... and make use of them in hvcall_flush()/need_flush(). Subsequent patches will need to deal with virtual processor masks potentially wider than 64 bits. Thus, to avoid using too much stack, this patch introduces global per-cpu virtual processor masks and converts the implementation of hvcall_flush() to use them. Signed-off-by: Paul Durrant <pdurrant@xxxxxxxxxx> Acked-by: Wei Liu <wl@xxxxxxx> --- xen/arch/x86/hvm/viridian/viridian.c | 58 +++++++++++++++++++++++++++++++++--- 1 file changed, 54 insertions(+), 4 deletions(-) diff --git a/xen/arch/x86/hvm/viridian/viridian.c b/xen/arch/x86/hvm/viridian/viridian.c index 77e90b502c..0274c8f2ee 100644 --- a/xen/arch/x86/hvm/viridian/viridian.c +++ b/xen/arch/x86/hvm/viridian/viridian.c @@ -507,15 +507,59 @@ void viridian_domain_deinit(struct domain *d) XFREE(d->arch.hvm.viridian); } +struct hypercall_vpmask { + DECLARE_BITMAP(mask, HVM_MAX_VCPUS); +}; + +static DEFINE_PER_CPU(struct hypercall_vpmask, hypercall_vpmask); + +static void vpmask_empty(struct hypercall_vpmask *vpmask) +{ + bitmap_zero(vpmask->mask, HVM_MAX_VCPUS); +} + +static void vpmask_set(struct hypercall_vpmask *vpmask, unsigned int vp, + uint64_t mask) +{ + unsigned int count = sizeof(mask) * 8; + + while ( count-- ) + { + if ( !mask ) + break; + + if ( mask & 1 ) + { + ASSERT(vp < HVM_MAX_VCPUS); + __set_bit(vp, vpmask->mask); + } + + mask >>= 1; + vp++; + } +} + +static void vpmask_fill(struct hypercall_vpmask *vpmask) +{ + bitmap_fill(vpmask->mask, HVM_MAX_VCPUS); +} + +static bool vpmask_test(const struct hypercall_vpmask *vpmask, + unsigned int vp) +{ + ASSERT(vp < HVM_MAX_VCPUS); + return test_bit(vp, vpmask->mask); +} + /* * Windows should not issue the hypercalls requiring this callback in the * case where vcpu_id would exceed the size of the mask. */ static bool need_flush(void *ctxt, struct vcpu *v) { - uint64_t vcpu_mask = *(uint64_t *)ctxt; + struct hypercall_vpmask *vpmask = ctxt; - return vcpu_mask & (1ul << v->vcpu_id); + return vpmask_test(vpmask, v->vcpu_id); } union hypercall_input { @@ -546,6 +590,7 @@ static int hvcall_flush(const union hypercall_input *input, paddr_t input_params_gpa, paddr_t output_params_gpa) { + struct hypercall_vpmask *vpmask = &this_cpu(hypercall_vpmask); struct { uint64_t address_space; uint64_t flags; @@ -567,13 +612,18 @@ static int hvcall_flush(const union hypercall_input *input, * so err on the safe side. */ if ( input_params.flags & HV_FLUSH_ALL_PROCESSORS ) - input_params.vcpu_mask = ~0ul; + vpmask_fill(vpmask); + else + { + vpmask_empty(vpmask); + vpmask_set(vpmask, 0, input_params.vcpu_mask); + } /* * A false return means that another vcpu is currently trying * a similar operation, so back off. */ - if ( !paging_flush_tlb(need_flush, &input_params.vcpu_mask) ) + if ( !paging_flush_tlb(need_flush, vpmask) ) return -ERESTART; output->rep_complete = input->rep_count; -- generated by git-patchbot for /home/xen/git/xen.git#staging
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |