[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v7 3/7] x86: add xen_iommu_op to query reserved ranges
This patch adds a xen_iommu_op to allow the virtual machine's reserved IOMMU ranges to be queried by the guest. NOTE: The number of reserved ranges is determined by system firmware, in conjunction with Xen command line options, and is expected to be small. Thus, to avoid over-complicating the code, there is no pre-emption check within the operation. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx> Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx> Cc: Tim Deegan <tim@xxxxxxx> Cc: Wei Liu <wei.liu2@xxxxxxxxxx> v7: - Use fixed-width types in hypercall. - Keep avoiding use of XLAT_iommu_op() for copying back data in the compat case. A handle translation macro is still defined (in case there is ever a need to start using XLAT_iommu_op() in future) but it is directly evaluates if cmp.op == XEN_IOMMUOP_query_reserved. v4: - Make xen_bfn_t strictly 64 bits wide and drop associated compat translation. v3: - Avoid speculation beyond array bounds check. v2: - Re-implemented for v2 based on new rangeset. --- xen/common/iommu_op.c | 177 ++++++++++++++++++++++++++++++++++++++++-- xen/include/public/iommu_op.h | 39 ++++++++++ xen/include/xlat.lst | 2 + 3 files changed, 212 insertions(+), 6 deletions(-) diff --git a/xen/common/iommu_op.c b/xen/common/iommu_op.c index 6acc7b1888..9d914a67db 100644 --- a/xen/common/iommu_op.c +++ b/xen/common/iommu_op.c @@ -22,11 +22,70 @@ #include <xen/event.h> #include <xen/guest_access.h> #include <xen/hypercall.h> +#include <xen/nospec.h> + +struct get_reserved_ctxt { + unsigned int max_entries; + unsigned int nr_entries; + XEN_GUEST_HANDLE(xen_iommu_reserved_range_t) ranges; +}; + +static int get_reserved(unsigned long s, unsigned long e, void *arg) +{ + struct get_reserved_ctxt *ctxt = arg; + + if ( ctxt->nr_entries < ctxt->max_entries ) + { + xen_iommu_reserved_range_t range = { + .start_dfn = s, + .nr_frames = e - s, + }; + + if ( copy_to_guest_offset(ctxt->ranges, ctxt->nr_entries, &range, + 1) ) + return -EFAULT; + } + + ctxt->nr_entries++; + return 0; +} + +static int iommu_op_query_reserved(struct xen_iommu_op_query_reserved *op) +{ + struct domain *currd = current->domain; + struct domain_iommu *iommu = dom_iommu(currd); + struct get_reserved_ctxt ctxt = { + .max_entries = op->nr_entries, + .ranges = op->ranges, + }; + int rc; + + if ( op->pad ) + return -EINVAL; + + rc = rangeset_report_ranges(iommu->reserved_ranges, 0, ~0ul, + get_reserved, &ctxt); + if ( rc ) + return rc; + + /* Pass back the actual number of reserved ranges */ + op->nr_entries = ctxt.nr_entries; + + if ( !guest_handle_is_null(ctxt.ranges) && + ctxt.nr_entries > ctxt.max_entries ) + return -ENOBUFS; + + return 0; +} static void iommu_op(xen_iommu_op_t *op) { switch ( op->op ) { + case XEN_IOMMUOP_query_reserved: + op->status = iommu_op_query_reserved(&op->u.query_reserved); + break; + default: op->status = -EOPNOTSUPP; break; @@ -38,12 +97,20 @@ int do_one_iommu_op(xen_iommu_op_buf_t *buf) const XEN_GUEST_HANDLE(xen_iommu_op_t) h = guest_handle_cast(buf->h, xen_iommu_op_t); xen_iommu_op_t op; + size_t offset; + static const size_t op_size[] = { + [XEN_IOMMUOP_query_reserved] = + sizeof(struct xen_iommu_op_query_reserved), + }; + size_t size; int rc; - if ( buf->size < sizeof(op) ) + offset = offsetof(struct xen_iommu_op, u); + + if ( buf->size < offset ) return -ENODATA; - if ( copy_from_guest(&op, h, 1) ) + if ( copy_from_guest((void *)&op, buf->h, offset) ) return -EFAULT; if ( op.pad ) @@ -53,8 +120,22 @@ int do_one_iommu_op(xen_iommu_op_buf_t *buf) if ( rc ) return rc; + if ( op.op >= ARRAY_SIZE(op_size) ) + return -EOPNOTSUPP; + + size = op_size[array_index_nospec(op.op, ARRAY_SIZE(op_size))]; + if ( buf->size < offset + size ) + return -EFAULT; + + if ( copy_from_guest_offset((void *)&op.u, buf->h, offset, size) ) + return -EFAULT; + iommu_op(&op); + if ( op.op == XEN_IOMMUOP_query_reserved && + __copy_field_to_guest(h, &op, u.query_reserved.nr_entries) ) + return -EFAULT; + if ( __copy_field_to_guest(h, &op, status) ) return -EFAULT; @@ -103,18 +184,29 @@ long do_iommu_op(unsigned int nr_bufs, #ifdef CONFIG_COMPAT +CHECK_iommu_reserved_range; + int compat_one_iommu_op(compat_iommu_op_buf_t *buf) { const COMPAT_HANDLE(compat_iommu_op_t) h = compat_handle_cast(buf->h, compat_iommu_op_t); compat_iommu_op_t cmp; + size_t offset; + static const size_t op_size[] = { + [XEN_IOMMUOP_query_reserved] = + sizeof(struct compat_iommu_op_query_reserved), + }; + size_t size; xen_iommu_op_t nat; + unsigned int u; int rc; - if ( buf->size < sizeof(cmp) ) + offset = offsetof(struct compat_iommu_op, u); + + if ( buf->size < offset ) return -ENODATA; - if ( copy_from_compat(&cmp, h, 1) ) + if ( copy_from_compat((void *)&cmp, buf->h, offset) ) return -EFAULT; if ( cmp.pad ) @@ -124,16 +216,89 @@ int compat_one_iommu_op(compat_iommu_op_buf_t *buf) if ( rc ) return rc; + if ( cmp.op >= ARRAY_SIZE(op_size) ) + return -EOPNOTSUPP; + + size = op_size[array_index_nospec(cmp.op, ARRAY_SIZE(op_size))]; + if ( buf->size < offset + size ) + return -EFAULT; + + if ( copy_from_compat_offset((void *)&cmp.u, buf->h, offset, size) ) + return -EFAULT; + + /* + * The xlat magic doesn't quite know how to handle the union so + * we need to fix things up here. + */ +#define XLAT_iommu_op_u_query_reserved XEN_IOMMUOP_query_reserved + u = cmp.op; + +#define XLAT_iommu_op_query_reserved_HNDL_ranges(_d_, _s_) \ + do \ + { \ + if ( !compat_handle_is_null((_s_)->ranges) ) \ + { \ + typeof(cmp.u.query_reserved.nr_entries) *nr_entries = \ + COMPAT_ARG_XLAT_VIRT_BASE; \ + xen_iommu_reserved_range_t *ranges = \ + (void *)(nr_entries + 1); \ + \ + if ( sizeof(*nr_entries) + \ + (sizeof(*ranges) * (_s_)->nr_entries) > \ + COMPAT_ARG_XLAT_SIZE ) \ + return -E2BIG; \ + \ + *nr_entries = (_s_)->nr_entries; \ + set_xen_guest_handle((_d_)->ranges, ranges); \ + } \ + else \ + set_xen_guest_handle((_d_)->ranges, NULL); \ + } while (false) + XLAT_iommu_op(&nat, &cmp); +#undef XLAT_iommu_op_query_reserved_HNDL_ranges +#undef XLAT_iommu_op_u_query_reserved + iommu_op(&nat); +#define XLAT_iommu_op_query_reserved_HNDL_ranges(_d_, _s_) \ + do \ + { \ + if ( !compat_handle_is_null((_d_)->ranges) ) \ + { \ + typeof(cmp.u.query_reserved.nr_entries) *nr_entries = \ + COMPAT_ARG_XLAT_VIRT_BASE; \ + compat_iommu_reserved_range_t *ranges = \ + (void *)(nr_entries + 1); \ + unsigned int nr = \ + min_t(unsigned int, (_d_)->nr_entries, *nr_entries); \ + \ + if ( __copy_to_compat_offset((_d_)->ranges, 0, ranges, nr) ) \ + cmp.status = -EFAULT; \ + } \ + } while (false) + /* - * Avoid the full (and lengthy) XLAT code as the only thing that - * needs copying back is the status field. + * Avoid the full (and lengthy) XLAT code as the only things that + * need copying back are the reserved ranges (in the case of the + * query op) and the status field (for all ops). */ cmp.status = nat.status; + if ( cmp.op == XEN_IOMMUOP_query_reserved ) + { + XLAT_iommu_op_query_reserved_HNDL_ranges(&cmp.u.query_reserved, + &nat.u.query_reserved); + + cmp.u.query_reserved.nr_entries = nat.u.query_reserved.nr_entries; + + if ( __copy_field_to_compat(h, &cmp, u.query_reserved.nr_entries) ) + return -EFAULT; + } + +#undef XLAT_iommu_op_query_reserved_HNDL_ranges + if ( __copy_field_to_compat(h, &cmp, status) ) return -EFAULT; diff --git a/xen/include/public/iommu_op.h b/xen/include/public/iommu_op.h index c3b68f665a..001f515bb3 100644 --- a/xen/include/public/iommu_op.h +++ b/xen/include/public/iommu_op.h @@ -25,11 +25,50 @@ #include "xen.h" +typedef uint64_t xen_dfn_t; + +/* Structure describing a single range reserved in the IOMMU */ +struct xen_iommu_reserved_range { + xen_dfn_t start_dfn; + uint32_t nr_frames; + uint32_t pad; +}; +typedef struct xen_iommu_reserved_range xen_iommu_reserved_range_t; +DEFINE_XEN_GUEST_HANDLE(xen_iommu_reserved_range_t); + +/* + * XEN_IOMMUOP_query_reserved: Query ranges reserved in the IOMMU. + */ +#define XEN_IOMMUOP_query_reserved 1 + +struct xen_iommu_op_query_reserved { + /* + * IN/OUT - On entry this is the number of entries available + * in the ranges array below. + * On exit this is the actual number of reserved ranges. + */ + uint32_t nr_entries; + uint32_t pad; + /* + * OUT - This array is populated with reserved ranges. If it is + * not sufficiently large then available entries are populated, + * but the op status code will be set to -ENOBUFS. + * It is permissable to set this to NULL if nr_entries is also + * set to zero. In this case, on exit, nr_entries will still be + * set to the actual number of reserved ranges but the status + * code will be set to zero. + */ + XEN_GUEST_HANDLE(xen_iommu_reserved_range_t) ranges; +}; + struct xen_iommu_op { uint16_t op; /* op type */ uint16_t pad; int32_t status; /* op completion status: */ /* 0 for success otherwise, negative errno */ + union { + struct xen_iommu_op_query_reserved query_reserved; + } u; }; typedef struct xen_iommu_op xen_iommu_op_t; DEFINE_XEN_GUEST_HANDLE(xen_iommu_op_t); diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst index 3b15c18c4e..d2f9b1034b 100644 --- a/xen/include/xlat.lst +++ b/xen/include/xlat.lst @@ -79,6 +79,8 @@ ? vcpu_hvm_x86_64 hvm/hvm_vcpu.h ! iommu_op iommu_op.h ! iommu_op_buf iommu_op.h +! iommu_op_query_reserved iommu_op.h +? iommu_reserved_range iommu_op.h ? kexec_exec kexec.h ! kexec_image kexec.h ! kexec_range kexec.h -- 2.11.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |