[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH for-4.5 v8 12/19] xen/arm: p2m changes for mem_access support
Add p2m_access_t to struct page_info and add necessary changes for page table construction routines to pass the default access information. We store the p2m_access_t info in page_info as the PTE lacks enough software programmable bits. Signed-off-by: Tamas K Lengyel <tklengyel@xxxxxxxxxxxxx> --- v8: - Drop lock inputs as common mem_access_check is postponed. - Resurrect the radix tree with an extra boolean access_in_use flag to indicate if the tree is empty to avoid lookups. v7: - Remove radix tree init/destroy and move p2m_access_t store to page_info. - Add p2m_gpfn_lock/unlock functions. - Add bool_t lock input to p2m_lookup and apply_p2m_changes so the caller can specify if locking should be performed. This is needed in order to support mem_access_check from common. v6: - Move mem_event header include to first patch that needs it. v5: - #include grouping style-fix. v4: - Move p2m_get_hostp2m definition here. --- xen/arch/arm/p2m.c | 49 +++++++++++++++++++--------- xen/include/asm-arm/domain.h | 1 + xen/include/asm-arm/p2m.h | 77 ++++++++++++++++++++++++++++++-------------- 3 files changed, 86 insertions(+), 41 deletions(-) diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c index 4dccf7b..760d064 100644 --- a/xen/arch/arm/p2m.c +++ b/xen/arch/arm/p2m.c @@ -228,7 +228,7 @@ int p2m_pod_decrease_reservation(struct domain *d, } static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr, - p2m_type_t t) + p2m_type_t t, p2m_access_t a) { paddr_t pa = ((paddr_t) mfn) << PAGE_SHIFT; /* sh, xn and write bit will be defined in the following switches @@ -346,7 +346,7 @@ static int p2m_create_table(struct domain *d, lpae_t *entry, for ( i=0 ; i < LPAE_ENTRIES; i++ ) { pte = mfn_to_p2m_entry(base_pfn + (i<<(level_shift-LPAE_SHIFT)), - MATTR_MEM, t); + MATTR_MEM, t, p2m->default_access); /* * First and second level super pages set p2m.table = 0, but @@ -366,7 +366,8 @@ static int p2m_create_table(struct domain *d, lpae_t *entry, unmap_domain_page(p); - pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid); + pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid, + p2m->default_access); p2m_write_pte(entry, pte, flush_cache); @@ -469,7 +470,8 @@ static int apply_one_level(struct domain *d, paddr_t *maddr, bool_t *flush, int mattr, - p2m_type_t t) + p2m_type_t t, + p2m_access_t a) { const paddr_t level_size = level_sizes[level]; const paddr_t level_mask = level_masks[level]; @@ -498,7 +500,7 @@ static int apply_one_level(struct domain *d, page = alloc_domheap_pages(d, level_shift - PAGE_SHIFT, 0); if ( page ) { - pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t); + pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t, a); if ( level < 3 ) pte.p2m.table = 0; p2m_write_pte(entry, pte, flush_cache); @@ -533,7 +535,7 @@ static int apply_one_level(struct domain *d, (level == 3 || !p2m_table(orig_pte)) ) { /* New mapping is superpage aligned, make it */ - pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t); + pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t, a); if ( level < 3 ) pte.p2m.table = 0; /* Superpage entry */ @@ -712,7 +714,9 @@ static int apply_p2m_changes(struct domain *d, paddr_t end_gpaddr, paddr_t maddr, int mattr, - p2m_type_t t) + uint32_t mask, + p2m_type_t t, + p2m_access_t a) { int rc, ret; struct p2m_domain *p2m = &d->arch.p2m; @@ -805,7 +809,7 @@ static int apply_p2m_changes(struct domain *d, level, flush_pt, op, start_gpaddr, end_gpaddr, &addr, &maddr, &flush, - mattr, t); + mattr, t, a); if ( ret < 0 ) { rc = ret ; goto out; } count += ret; /* L3 had better have done something! We cannot descend any further */ @@ -863,7 +867,7 @@ out: */ apply_p2m_changes(d, REMOVE, start_gpaddr, addr + level_sizes[level], orig_maddr, - mattr, p2m_invalid); + mattr, 0, p2m_invalid, d->arch.p2m.default_access); } for ( level = P2M_ROOT_LEVEL; level < 4; level ++ ) @@ -882,7 +886,8 @@ int p2m_populate_ram(struct domain *d, paddr_t end) { return apply_p2m_changes(d, ALLOCATE, start, end, - 0, MATTR_MEM, p2m_ram_rw); + 0, MATTR_MEM, 0, p2m_ram_rw, + d->arch.p2m.default_access); } int map_mmio_regions(struct domain *d, @@ -894,7 +899,8 @@ int map_mmio_regions(struct domain *d, pfn_to_paddr(start_gfn), pfn_to_paddr(start_gfn + nr), pfn_to_paddr(mfn), - MATTR_DEV, p2m_mmio_direct); + MATTR_DEV, 0, p2m_mmio_direct, + d->arch.p2m.default_access); } int unmap_mmio_regions(struct domain *d, @@ -906,7 +912,8 @@ int unmap_mmio_regions(struct domain *d, pfn_to_paddr(start_gfn), pfn_to_paddr(start_gfn + nr), pfn_to_paddr(mfn), - MATTR_DEV, p2m_invalid); + MATTR_DEV, 0, p2m_invalid, + d->arch.p2m.default_access); } int guest_physmap_add_entry(struct domain *d, @@ -918,7 +925,8 @@ int guest_physmap_add_entry(struct domain *d, return apply_p2m_changes(d, INSERT, pfn_to_paddr(gpfn), pfn_to_paddr(gpfn + (1 << page_order)), - pfn_to_paddr(mfn), MATTR_MEM, t); + pfn_to_paddr(mfn), MATTR_MEM, 0, t, + d->arch.p2m.default_access); } void guest_physmap_remove_page(struct domain *d, @@ -928,7 +936,8 @@ void guest_physmap_remove_page(struct domain *d, apply_p2m_changes(d, REMOVE, pfn_to_paddr(gpfn), pfn_to_paddr(gpfn + (1<<page_order)), - pfn_to_paddr(mfn), MATTR_MEM, p2m_invalid); + pfn_to_paddr(mfn), MATTR_MEM, 0, p2m_invalid, + d->arch.p2m.default_access); } int arch_grant_map_page_identity(struct domain *d, unsigned long frame, @@ -1058,6 +1067,8 @@ void p2m_teardown(struct domain *d) p2m_free_vmid(d); + radix_tree_destroy(&p2m->mem_access_settings, NULL); + spin_unlock(&p2m->lock); } @@ -1083,6 +1094,10 @@ int p2m_init(struct domain *d) p2m->max_mapped_gfn = 0; p2m->lowest_mapped_gfn = ULONG_MAX; + p2m->default_access = p2m_access_rwx; + p2m->access_in_use = false; + radix_tree_init(&p2m->mem_access_settings); + err: spin_unlock(&p2m->lock); @@ -1097,7 +1112,8 @@ int relinquish_p2m_mapping(struct domain *d) pfn_to_paddr(p2m->lowest_mapped_gfn), pfn_to_paddr(p2m->max_mapped_gfn), pfn_to_paddr(INVALID_MFN), - MATTR_MEM, p2m_invalid); + MATTR_MEM, 0, p2m_invalid, + d->arch.p2m.default_access); } int p2m_cache_flush(struct domain *d, xen_pfn_t start_mfn, xen_pfn_t end_mfn) @@ -1111,7 +1127,8 @@ int p2m_cache_flush(struct domain *d, xen_pfn_t start_mfn, xen_pfn_t end_mfn) pfn_to_paddr(start_mfn), pfn_to_paddr(end_mfn), pfn_to_paddr(INVALID_MFN), - MATTR_MEM, p2m_invalid); + MATTR_MEM, 0, p2m_invalid, + d->arch.p2m.default_access); } unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn) diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h index 787e93c..3d69152 100644 --- a/xen/include/asm-arm/domain.h +++ b/xen/include/asm-arm/domain.h @@ -17,6 +17,7 @@ struct hvm_domain { uint64_t params[HVM_NR_PARAMS]; struct hvm_iommu iommu; + bool_t introspection_enabled; } __cacheline_aligned; #ifdef CONFIG_ARM_64 diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h index 27225c4..dba0df5 100644 --- a/xen/include/asm-arm/p2m.h +++ b/xen/include/asm-arm/p2m.h @@ -2,6 +2,8 @@ #define _XEN_P2M_H #include <xen/mm.h> +#include <xen/radix-tree.h> +#include <public/mem_event.h> /* for mem_event_response_t */ #include <xen/p2m-common.h> @@ -11,6 +13,31 @@ struct domain; extern void memory_type_changed(struct domain *); +/* List of possible type for each page in the p2m entry. + * The number of available bit per page in the pte for this purpose is 4 bits. + * So it's possible to only have 16 fields. If we run out of value in the + * future, it's possible to use higher value for pseudo-type and don't store + * them in the p2m entry. + */ +typedef enum { + p2m_invalid = 0, /* Nothing mapped here */ + p2m_ram_rw, /* Normal read/write guest RAM */ + p2m_ram_ro, /* Read-only; writes are silently dropped */ + p2m_mmio_direct, /* Read/write mapping of genuine MMIO area */ + p2m_map_foreign, /* Ram pages from foreign domain */ + p2m_grant_map_rw, /* Read/write grant mapping */ + p2m_grant_map_ro, /* Read-only grant mapping */ + /* The types below are only used to decide the page attribute in the P2M */ + p2m_iommu_map_rw, /* Read/write iommu mapping */ + p2m_iommu_map_ro, /* Read-only iommu mapping */ + p2m_max_real_type, /* Types after this won't be store in the p2m */ +} p2m_type_t; + +/* Look up a GFN and take a reference count on the backing page. */ +typedef unsigned int p2m_query_t; +#define P2M_ALLOC (1u<<0) /* Populate PoD and paged-out entries */ +#define P2M_UNSHARE (1u<<1) /* Break CoW sharing */ + /* Per-p2m-table state */ struct p2m_domain { /* Lock that protects updates to the p2m */ @@ -48,27 +75,20 @@ struct p2m_domain { /* If true, and an access fault comes in and there is no mem_event listener, * pause domain. Otherwise, remove access restrictions. */ bool_t access_required; -}; -/* List of possible type for each page in the p2m entry. - * The number of available bit per page in the pte for this purpose is 4 bits. - * So it's possible to only have 16 fields. If we run out of value in the - * future, it's possible to use higher value for pseudo-type and don't store - * them in the p2m entry. - */ -typedef enum { - p2m_invalid = 0, /* Nothing mapped here */ - p2m_ram_rw, /* Normal read/write guest RAM */ - p2m_ram_ro, /* Read-only; writes are silently dropped */ - p2m_mmio_direct, /* Read/write mapping of genuine MMIO area */ - p2m_map_foreign, /* Ram pages from foreign domain */ - p2m_grant_map_rw, /* Read/write grant mapping */ - p2m_grant_map_ro, /* Read-only grant mapping */ - /* The types below are only used to decide the page attribute in the P2M */ - p2m_iommu_map_rw, /* Read/write iommu mapping */ - p2m_iommu_map_ro, /* Read-only iommu mapping */ - p2m_max_real_type, /* Types after this won't be store in the p2m */ -} p2m_type_t; + /* Defines if mem_access is in use for the domain to avoid uneccessary radix + * lookups. */ + bool_t access_in_use; + + /* Default P2M access type for each page in the the domain: new pages, + * swapped in pages, cleared pages, and pages that are ambiguously + * retyped get this access type. See definition of p2m_access_t. */ + p2m_access_t default_access; + + /* Radix tree to store the p2m_access_t settings as the pte's don't have + * enough available bits to store this information. */ + struct radix_tree_root mem_access_settings; +}; static inline void p2m_mem_event_emulate_check(struct domain *d, @@ -77,6 +97,7 @@ void p2m_mem_event_emulate_check(struct domain *d, /* Not supported on ARM. */ }; +static inline void p2m_enable_msr_exit_interception(struct domain *d) { /* Not supported on ARM. */ @@ -157,11 +178,6 @@ p2m_pod_decrease_reservation(struct domain *d, xen_pfn_t gpfn, unsigned int order); -/* Look up a GFN and take a reference count on the backing page. */ -typedef unsigned int p2m_query_t; -#define P2M_ALLOC (1u<<0) /* Populate PoD and paged-out entries */ -#define P2M_UNSHARE (1u<<1) /* Break CoW sharing */ - static inline struct page_info *get_page_from_gfn( struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q) { @@ -220,6 +236,17 @@ int arch_grant_unmap_page_identity(struct domain *d, unsigned long frame); /* get host p2m table */ #define p2m_get_hostp2m(d) (&(d)->arch.p2m) +/* mem_event and mem_access are supported on any ARM guest */ +static inline bool_t p2m_mem_access_sanity_check(struct domain *d) +{ + return 1; +} + +static inline bool_t p2m_mem_event_sanity_check(struct domain *d) +{ + return 1; +} + #endif /* _XEN_P2M_H */ /* -- 2.1.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |