[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 14/15] x86: add iommu_ops to modify and flush IOMMU mappings
This patch adds iommu_ops to add (map) or remove (unmap) frames in the domain's IOMMU mappings, and an iommu_op to synchronize (flush) those manipulations with the hardware. Mappings added by the map operation are tracked and only those mappings may be removed by a subsequent unmap operation. Frames are specified by the owning domain and GFN. It is, of course, permissable for a domain to map its own frames using DOMID_SELF. NOTE: The owning domain and GFN must also be specified in the unmap operation, as well as the BFN, so that they can be cross-checked with the existent mapping. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx> Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> Cc: Julien Grall <julien.grall@xxxxxxx> Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx> Cc: Tim Deegan <tim@xxxxxxx> Cc: Wei Liu <wei.liu2@xxxxxxxxxx> v4: - Fixed logic inversion when checking return of iommu_unmap_page(). v3: - Add type pinning. v2: - Heavily re-worked in v2, including explicit tracking of mappings. This avoids the need to clear non-reserved mappings from IOMMU at start of day, which would be prohibitively slow on a large host. --- xen/arch/x86/iommu_op.c | 151 ++++++++++++++++++++++++++++++++++++++++++ xen/include/public/iommu_op.h | 43 ++++++++++++ xen/include/xlat.lst | 2 + 3 files changed, 196 insertions(+) diff --git a/xen/arch/x86/iommu_op.c b/xen/arch/x86/iommu_op.c index b29547bffd..35daeeed92 100644 --- a/xen/arch/x86/iommu_op.c +++ b/xen/arch/x86/iommu_op.c @@ -114,6 +114,131 @@ static int iommu_op_enable_modification(void) return 0; } +static int iommuop_map(struct xen_iommu_op_map *op) +{ + struct domain *d, *currd = current->domain; + struct domain_iommu *iommu = dom_iommu(currd); + bool readonly = op->flags & XEN_IOMMUOP_map_readonly; + bfn_t bfn = _bfn(op->bfn); + struct page_info *page; + unsigned int prot; + int rc, ignore; + + if ( op->pad || (op->flags & ~XEN_IOMMUOP_map_readonly) ) + return -EINVAL; + + if ( !iommu->iommu_op_ranges ) + return -EOPNOTSUPP; + + /* Check whether the specified BFN falls in a reserved region */ + if ( rangeset_contains_singleton(iommu->reserved_ranges, bfn_x(bfn)) ) + return -EINVAL; + + d = rcu_lock_domain_by_any_id(op->domid); + if ( !d ) + return -ESRCH; + + rc = get_paged_gfn(d, _gfn(op->gfn), readonly, NULL, &page); + if ( rc ) + goto unlock; + + rc = -EINVAL; + if ( !readonly && !get_page_type(page, PGT_writable_page) ) + { + put_page(page); + goto unlock; + } + + prot = IOMMUF_readable; + if ( !readonly ) + prot |= IOMMUF_writable; + + rc = -EIO; + if ( iommu_map_page(currd, bfn, page_to_mfn(page), prot) ) + goto release; + + rc = rangeset_add_singleton(iommu->iommu_op_ranges, bfn_x(bfn)); + if ( rc ) + goto unmap; + + rc = 0; + goto unlock; /* retain mapping and references */ + + unmap: + ignore = iommu_unmap_page(currd, bfn); + + release: + if ( !readonly ) + put_page_type(page); + put_page(page); + + unlock: + rcu_unlock_domain(d); + return rc; +} + +static int iommuop_unmap(struct xen_iommu_op_unmap *op) +{ + struct domain *d, *currd = current->domain; + struct domain_iommu *iommu = dom_iommu(currd); + bfn_t bfn = _bfn(op->bfn); + mfn_t mfn; + bool readonly; + unsigned int prot; + struct page_info *page; + int rc; + + if ( op->pad0 || op->pad1 ) + return -EINVAL; + + if ( !iommu->iommu_op_ranges ) + return -EOPNOTSUPP; + + if ( !rangeset_contains_singleton(iommu->iommu_op_ranges, bfn_x(bfn)) || + iommu_lookup_page(currd, bfn, &mfn, &prot) || + !mfn_valid(mfn) ) + return -ENOENT; + + readonly = !(prot & IOMMUF_writable); + + d = rcu_lock_domain_by_any_id(op->domid); + if ( !d ) + return -ESRCH; + + rc = get_paged_gfn(d, _gfn(op->gfn), !(prot & IOMMUF_writable), NULL, + &page); + if ( rc ) + goto unlock; + + put_page(page); /* release extra reference just taken */ + + rc = -EINVAL; + if ( !mfn_eq(page_to_mfn(page), mfn) ) + goto unlock; + + /* release reference taken in map */ + if ( !readonly ) + put_page_type(page); + put_page(page); + + rc = rangeset_remove_singleton(iommu->iommu_op_ranges, bfn_x(bfn)); + if ( rc ) + goto unlock; + + if ( iommu_unmap_page(currd, bfn) ) + rc = -EIO; + + unlock: + rcu_unlock_domain(d); + + return rc; +} + +static int iommuop_flush(void) +{ + return !iommu_iotlb_flush_all(current->domain) ? 0 : -EIO; +} + static void iommu_op(xen_iommu_op_t *op) { switch ( op->op ) @@ -126,6 +251,22 @@ static void iommu_op(xen_iommu_op_t *op) op->status = iommu_op_enable_modification(); break; + case XEN_IOMMUOP_map: + this_cpu(iommu_dont_flush_iotlb) = 1; + op->status = iommuop_map(&op->u.map); + this_cpu(iommu_dont_flush_iotlb) = 0; + break; + + case XEN_IOMMUOP_unmap: + this_cpu(iommu_dont_flush_iotlb) = 1; + op->status = iommuop_unmap(&op->u.unmap); + this_cpu(iommu_dont_flush_iotlb) = 0; + break; + + case XEN_IOMMUOP_flush: + op->status = iommuop_flush(); + break; + default: op->status = -EOPNOTSUPP; break; @@ -139,6 +280,9 @@ int do_one_iommu_op(xen_iommu_op_buf_t *buf) static const size_t op_size[] = { [XEN_IOMMUOP_query_reserved] = sizeof(struct xen_iommu_op_query_reserved), [XEN_IOMMUOP_enable_modification] = 0, + [XEN_IOMMUOP_map] = sizeof(struct xen_iommu_op_map), + [XEN_IOMMUOP_unmap] = sizeof(struct xen_iommu_op_unmap), + [XEN_IOMMUOP_flush] = 0, }; size_t size; int rc; @@ -226,6 +370,9 @@ int compat_one_iommu_op(compat_iommu_op_buf_t *buf) static const size_t op_size[] = { [XEN_IOMMUOP_query_reserved] = sizeof(struct compat_iommu_op_query_reserved), [XEN_IOMMUOP_enable_modification] = 0, + [XEN_IOMMUOP_map] = sizeof(struct compat_iommu_op_map), + [XEN_IOMMUOP_unmap] = sizeof(struct compat_iommu_op_unmap), + [XEN_IOMMUOP_flush] = 0, }; size_t size; xen_iommu_op_t nat; @@ -263,6 +410,8 @@ int compat_one_iommu_op(compat_iommu_op_buf_t *buf) * we need to fix things up here. */ #define XLAT_iommu_op_u_query_reserved XEN_IOMMUOP_query_reserved +#define XLAT_iommu_op_u_map XEN_IOMMUOP_map +#define XLAT_iommu_op_u_unmap XEN_IOMMUOP_unmap u = cmp.op; #define XLAT_iommu_op_query_reserved_HNDL_ranges(_d_, _s_) \ @@ -322,6 +471,8 @@ int compat_one_iommu_op(compat_iommu_op_buf_t *buf) &cmp, status) ) return -EFAULT; +#undef XLAT_iommu_op_u_unmap +#undef XLAT_iommu_op_u_map #undef XLAT_iommu_op_u_query_reserved return 0; diff --git a/xen/include/public/iommu_op.h b/xen/include/public/iommu_op.h index 9bf74bd007..c8dc531c83 100644 --- a/xen/include/public/iommu_op.h +++ b/xen/include/public/iommu_op.h @@ -67,6 +67,47 @@ struct xen_iommu_op_query_reserved { */ #define XEN_IOMMUOP_enable_modification 2 +/* + * XEN_IOMMUOP_map: Map a guest page in the IOMMU. + */ +#define XEN_IOMMUOP_map 3 + +struct xen_iommu_op_map { + /* IN - The domid of the guest */ + domid_t domid; + uint16_t flags; + +#define _XEN_IOMMUOP_map_readonly 0 +#define XEN_IOMMUOP_map_readonly (1 << (_XEN_IOMMUOP_map_readonly)) + + uint32_t pad; + /* IN - The IOMMU frame number which will hold the new mapping */ + xen_bfn_t bfn; + /* IN - The guest frame number of the page to be mapped */ + xen_pfn_t gfn; +}; + +/* + * XEN_IOMMUOP_unmap_gfn: Remove a mapping in the IOMMU. + */ +#define XEN_IOMMUOP_unmap 4 + +struct xen_iommu_op_unmap { + /* IN - The domid of the guest */ + domid_t domid; + uint16_t pad0; + uint32_t pad1; + /* IN - The IOMMU frame number which holds the mapping to be removed */ + xen_bfn_t bfn; + /* IN - The guest frame number of the page that is mapped */ + xen_pfn_t gfn; +}; + +/* + * XEN_IOMMUOP_flush: Flush the IOMMU TLB. + */ +#define XEN_IOMMUOP_flush 5 + struct xen_iommu_op { uint16_t op; /* op type */ uint16_t pad; @@ -74,6 +115,8 @@ struct xen_iommu_op { /* 0 for success otherwise, negative errno */ union { struct xen_iommu_op_query_reserved query_reserved; + struct xen_iommu_op_map map; + struct xen_iommu_op_unmap unmap; } u; }; typedef struct xen_iommu_op xen_iommu_op_t; diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst index d2f9b1034b..3ad7eadb5a 100644 --- a/xen/include/xlat.lst +++ b/xen/include/xlat.lst @@ -79,7 +79,9 @@ ? vcpu_hvm_x86_64 hvm/hvm_vcpu.h ! iommu_op iommu_op.h ! iommu_op_buf iommu_op.h +! iommu_op_map iommu_op.h ! iommu_op_query_reserved iommu_op.h +! iommu_op_unmap iommu_op.h ? iommu_reserved_range iommu_op.h ? kexec_exec kexec.h ! kexec_image kexec.h -- 2.11.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |