[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 11/22] xen/arm: p2m: Find the memory attributes based on the p2m type
On Wed, 27 Jul 2016, Julien Grall wrote: > Hi, > > On 20/07/16 17:10, Julien Grall wrote: > > Currently, mfn_to_p2m_entry is relying on the caller to provide the > > correct memory attribute and will deduce the sharability based on it. > > > > Some of the callers, such as p2m_create_table, are using same memory > > attribute regardless the underlying p2m type. For instance, this will > > lead to use change the memory attribute from MATTR_DEV to MATTR_MEM when > > a MMIO superpage is shattered. > > > > Furthermore, it makes more difficult to support different shareability > > with the same memory attribute. > > > > All the memory attributes could be deduced via the p2m type. This will > > simplify the code by dropping one parameter. > > I just noticed that I forgot to add my Signed-off-by. Stefano, can you add my > Signed-off-by while committing? I could, but given that you need to resend some of the patches anyway, it might be easier for me to wait for the next version. > > --- > > I am not sure whether p2m_direct_mmio_c (cacheable MMIO) should use > > the outer-shareability or inner-shareability. Any opinions? > > --- > > xen/arch/arm/p2m.c | 55 > > ++++++++++++++++++++++++------------------------------ > > 1 file changed, 24 insertions(+), 31 deletions(-) > > > > diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c > > index 999de2b..2f50b4f 100644 > > --- a/xen/arch/arm/p2m.c > > +++ b/xen/arch/arm/p2m.c > > @@ -325,8 +325,7 @@ static void p2m_set_permission(lpae_t *e, p2m_type_t t, > > p2m_access_t a) > > } > > } > > > > -static lpae_t mfn_to_p2m_entry(mfn_t mfn, unsigned int mattr, > > - p2m_type_t t, p2m_access_t a) > > +static lpae_t mfn_to_p2m_entry(mfn_t mfn, p2m_type_t t, p2m_access_t a) > > { > > /* > > * sh, xn and write bit will be defined in the following switches > > @@ -335,7 +334,6 @@ static lpae_t mfn_to_p2m_entry(mfn_t mfn, unsigned int > > mattr, > > lpae_t e = (lpae_t) { > > .p2m.af = 1, > > .p2m.read = 1, > > - .p2m.mattr = mattr, > > .p2m.table = 1, > > .p2m.valid = 1, > > .p2m.type = t, > > @@ -343,18 +341,21 @@ static lpae_t mfn_to_p2m_entry(mfn_t mfn, unsigned int > > mattr, > > > > BUILD_BUG_ON(p2m_max_real_type > (1 << 4)); > > > > - switch (mattr) > > + switch ( t ) > > { > > - case MATTR_MEM: > > - e.p2m.sh = LPAE_SH_INNER; > > + case p2m_mmio_direct_nc: > > + e.p2m.mattr = MATTR_DEV; > > + e.p2m.sh = LPAE_SH_OUTER; > > break; > > > > - case MATTR_DEV: > > + case p2m_mmio_direct_c: > > + e.p2m.mattr = MATTR_MEM; > > e.p2m.sh = LPAE_SH_OUTER; > > break; > > + > > default: > > - BUG(); > > - break; > > + e.p2m.mattr = MATTR_MEM; > > + e.p2m.sh = LPAE_SH_INNER; > > } > > > > p2m_set_permission(&e, t, a); > > @@ -421,7 +422,7 @@ static int p2m_create_table(struct domain *d, lpae_t > > *entry, > > */ > > for ( i=0 ; i < LPAE_ENTRIES; i++ ) > > { > > - pte = mfn_to_p2m_entry(mfn, MATTR_MEM, t, > > p2m->default_access); > > + pte = mfn_to_p2m_entry(mfn, t, p2m->default_access); > > > > mfn = mfn_add(mfn, 1UL << (level_shift - LPAE_SHIFT)); > > > > @@ -445,7 +446,7 @@ static int p2m_create_table(struct domain *d, lpae_t > > *entry, > > > > unmap_domain_page(p); > > > > - pte = mfn_to_p2m_entry(_mfn(page_to_mfn(page)), MATTR_MEM, p2m_invalid, > > + pte = mfn_to_p2m_entry(_mfn(page_to_mfn(page)), p2m_invalid, > > p2m->default_access); > > > > p2m_write_pte(entry, pte, flush_cache); > > @@ -666,7 +667,6 @@ static int apply_one_level(struct domain *d, > > paddr_t *addr, > > paddr_t *maddr, > > bool_t *flush, > > - int mattr, > > p2m_type_t t, > > p2m_access_t a) > > { > > @@ -695,7 +695,7 @@ static int apply_one_level(struct domain *d, > > return rc; > > > > /* New mapping is superpage aligned, make it */ > > - pte = mfn_to_p2m_entry(_mfn(*maddr >> PAGE_SHIFT), mattr, t, > > a); > > + pte = mfn_to_p2m_entry(_mfn(*maddr >> PAGE_SHIFT), t, a); > > if ( level < 3 ) > > pte.p2m.table = 0; /* Superpage entry */ > > > > @@ -915,7 +915,6 @@ static int apply_p2m_changes(struct domain *d, > > gfn_t sgfn, > > unsigned long nr, > > mfn_t smfn, > > - int mattr, > > uint32_t mask, > > p2m_type_t t, > > p2m_access_t a) > > @@ -1054,7 +1053,7 @@ static int apply_p2m_changes(struct domain *d, > > level, flush_pt, op, > > start_gpaddr, end_gpaddr, > > &addr, &maddr, &flush, > > - mattr, t, a); > > + t, a); > > if ( ret < 0 ) { rc = ret ; goto out; } > > count += ret; > > > > @@ -1163,7 +1162,7 @@ out: > > * mapping. > > */ > > apply_p2m_changes(d, REMOVE, sgfn, gfn - gfn_x(sgfn), smfn, > > - mattr, 0, p2m_invalid, > > d->arch.p2m.default_access); > > + 0, p2m_invalid, d->arch.p2m.default_access); > > } > > > > return rc; > > @@ -1173,10 +1172,10 @@ static inline int p2m_insert_mapping(struct domain > > *d, > > gfn_t start_gfn, > > unsigned long nr, > > mfn_t mfn, > > - int mattr, p2m_type_t t) > > + p2m_type_t t) > > { > > return apply_p2m_changes(d, INSERT, start_gfn, nr, mfn, > > - mattr, 0, t, d->arch.p2m.default_access); > > + 0, t, d->arch.p2m.default_access); > > } > > > > static inline int p2m_remove_mapping(struct domain *d, > > @@ -1186,8 +1185,7 @@ static inline int p2m_remove_mapping(struct domain *d, > > { > > return apply_p2m_changes(d, REMOVE, start_gfn, nr, mfn, > > /* arguments below not used when removing > > mapping */ > > - MATTR_MEM, 0, p2m_invalid, > > - d->arch.p2m.default_access); > > + 0, p2m_invalid, d->arch.p2m.default_access); > > } > > > > int map_regions_rw_cache(struct domain *d, > > @@ -1195,8 +1193,7 @@ int map_regions_rw_cache(struct domain *d, > > unsigned long nr, > > mfn_t mfn) > > { > > - return p2m_insert_mapping(d, gfn, nr, mfn, > > - MATTR_MEM, p2m_mmio_direct_c); > > + return p2m_insert_mapping(d, gfn, nr, mfn, p2m_mmio_direct_c); > > } > > > > int unmap_regions_rw_cache(struct domain *d, > > @@ -1212,8 +1209,7 @@ int map_mmio_regions(struct domain *d, > > unsigned long nr, > > mfn_t mfn) > > { > > - return p2m_insert_mapping(d, start_gfn, nr, mfn, > > - MATTR_DEV, p2m_mmio_direct_nc); > > + return p2m_insert_mapping(d, start_gfn, nr, mfn, p2m_mmio_direct_nc); > > } > > > > int unmap_mmio_regions(struct domain *d, > > @@ -1251,8 +1247,7 @@ int guest_physmap_add_entry(struct domain *d, > > unsigned long page_order, > > p2m_type_t t) > > { > > - return p2m_insert_mapping(d, gfn, (1 << page_order), mfn, > > - MATTR_MEM, t); > > + return p2m_insert_mapping(d, gfn, (1 << page_order), mfn, t); > > } > > > > void guest_physmap_remove_page(struct domain *d, > > @@ -1412,7 +1407,7 @@ int relinquish_p2m_mapping(struct domain *d) > > nr = gfn_x(p2m->max_mapped_gfn) - gfn_x(p2m->lowest_mapped_gfn); > > > > return apply_p2m_changes(d, RELINQUISH, p2m->lowest_mapped_gfn, nr, > > - INVALID_MFN, MATTR_MEM, 0, p2m_invalid, > > + INVALID_MFN, 0, p2m_invalid, > > d->arch.p2m.default_access); > > } > > > > @@ -1425,8 +1420,7 @@ int p2m_cache_flush(struct domain *d, gfn_t start, > > unsigned long nr) > > end = gfn_min(end, p2m->max_mapped_gfn); > > > > return apply_p2m_changes(d, CACHEFLUSH, start, nr, INVALID_MFN, > > - MATTR_MEM, 0, p2m_invalid, > > - d->arch.p2m.default_access); > > + 0, p2m_invalid, d->arch.p2m.default_access); > > } > > > > mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn) > > @@ -1827,8 +1821,7 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, > > uint32_t nr, > > } > > > > rc = apply_p2m_changes(d, MEMACCESS, gfn_add(gfn, start), > > - (nr - start), INVALID_MFN, > > - MATTR_MEM, mask, 0, a); > > + (nr - start), INVALID_MFN, mask, 0, a); > > if ( rc < 0 ) > > return rc; > > else if ( rc > 0 ) > > > > -- > Julien Grall > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |