[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/2] x86/hvm/ioreq: allow ioreq servers to use HVM_PARAM_[BUF]IOREQ_PFN
Since commit 2c257bd6 "x86/hvm: remove default ioreq server (again)" the GFNs allocated by the toolstack and set in HVM_PARAM_IOREQ_PFN and HVM_PARAM_BUFIOREQ_PFN have been unused. This patch allows them to be used by (non-default) ioreq servers. NOTE: This fixes a compatibility issue. A guest created on a version of Xen that pre-dates the initial ioreq server implementation and then migrated in will currently fail to resume because its migration stream will lack values for HVM_PARAM_IOREQ_SERVER_PFN and HVM_PARAM_NR_IOREQ_SERVER_PAGES *unless* the system has an emulator domain that uses direct resource mapping (which depends on the version of privcmd it happens to have) in which case it will not require use of GFNs for the ioreq server shared pages. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Cc: Wei Liu <wei.liu2@xxxxxxxxxx> A similar compatibility issue with migrated-in VMs exists with Xen 4.11 because the upstream QEMU fall-back to use legacy ioreq server was broken when direct resource mapping was introduced. This is because, prior to the resource mapping patches, it was the creation of the non-default ioreq server that failed if GFNs were not available whereas, as of 4.11, it is retrieval of the info that fails which does not trigger the fall-back. --- xen/arch/x86/hvm/ioreq.c | 50 ++++++++++++++++++++++++++++++++++++++-- xen/include/asm-x86/hvm/domain.h | 3 ++- 2 files changed, 50 insertions(+), 3 deletions(-) diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c index 3569beaad5..4bac0a100c 100644 --- a/xen/arch/x86/hvm/ioreq.c +++ b/xen/arch/x86/hvm/ioreq.c @@ -237,6 +237,26 @@ bool handle_hvm_io_completion(struct vcpu *v) return true; } +static gfn_t hvm_alloc_legacy_ioreq_gfn(struct hvm_ioreq_server *s) +{ + struct domain *d = s->target; + unsigned int i; + + BUILD_BUG_ON(HVM_PARAM_IOREQ_PFN > + sizeof(d->arch.hvm.ioreq_gfn.legacy_mask) * 8); + BUILD_BUG_ON(HVM_PARAM_BUFIOREQ_PFN > + sizeof(d->arch.hvm.ioreq_gfn.legacy_mask) * 8); + BUILD_BUG_ON(HVM_PARAM_BUFIOREQ_PFN != HVM_PARAM_IOREQ_PFN + 1); + + for ( i = HVM_PARAM_IOREQ_PFN; i <= HVM_PARAM_BUFIOREQ_PFN; i++ ) + { + if ( !test_and_set_bit(i, &d->arch.hvm.ioreq_gfn.legacy_mask) ) + return _gfn(d->arch.hvm.params[i]); + } + + return INVALID_GFN; +} + static gfn_t hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s) { struct domain *d = s->target; @@ -248,7 +268,29 @@ static gfn_t hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s) return _gfn(d->arch.hvm.ioreq_gfn.base + i); } - return INVALID_GFN; + /* + * If we are out of 'normal' GFNs then we may still have a 'legacy' + * GFN available. + */ + return hvm_alloc_legacy_ioreq_gfn(s); +} + +static bool hvm_free_legacy_ioreq_gfn(struct hvm_ioreq_server *s, + gfn_t gfn) +{ + struct domain *d = s->target; + unsigned int i; + + for ( i = HVM_PARAM_IOREQ_PFN; i <= HVM_PARAM_BUFIOREQ_PFN; i++ ) + { + if ( gfn_eq(gfn, _gfn(d->arch.hvm.params[i])) ) + break; + } + if ( i > HVM_PARAM_BUFIOREQ_PFN ) + return false; + + clear_bit(i, &d->arch.hvm.ioreq_gfn.legacy_mask); + return true; } static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s, gfn_t gfn) @@ -258,7 +300,11 @@ static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s, gfn_t gfn) ASSERT(!gfn_eq(gfn, INVALID_GFN)); - set_bit(i, &d->arch.hvm.ioreq_gfn.mask); + if ( !hvm_free_legacy_ioreq_gfn(s, gfn) ) + { + ASSERT(i < sizeof(d->arch.hvm.ioreq_gfn.mask) * 8); + set_bit(i, &d->arch.hvm.ioreq_gfn.mask); + } } static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf) diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h index 80b2ab041e..a9f68d9571 100644 --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -95,7 +95,8 @@ struct hvm_domain { /* Guest page range used for non-default ioreq servers */ struct { unsigned long base; - unsigned long mask; + unsigned long mask; /* clear to allocate */ + unsigned long legacy_mask; /* set to allocate */ } ioreq_gfn; /* Lock protects all other values in the sub-struct and the default */ -- 2.11.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |