[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 04/22] xen/arm: p2m: Fix multi-lines coding style comments
The start and end markers should be on separate lines. Signed-off-by: Julien Grall <julien.grall@xxxxxxx> --- xen/arch/arm/p2m.c | 35 ++++++++++++++++++++++------------ xen/include/asm-arm/p2m.h | 48 +++++++++++++++++++++++++++++++---------------- 2 files changed, 55 insertions(+), 28 deletions(-) diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c index 64d84cc..79095f1 100644 --- a/xen/arch/arm/p2m.c +++ b/xen/arch/arm/p2m.c @@ -33,9 +33,11 @@ static bool_t p2m_valid(lpae_t pte) { return pte.p2m.valid; } -/* These two can only be used on L0..L2 ptes because L3 mappings set +/* + * These two can only be used on L0..L2 ptes because L3 mappings set * the table bit and therefore these would return the opposite to what - * you would expect. */ + * you would expect. + */ static bool_t p2m_table(lpae_t pte) { return p2m_valid(pte) && pte.p2m.table; @@ -119,7 +121,8 @@ void flush_tlb_domain(struct domain *d) { unsigned long flags = 0; - /* Update the VTTBR if necessary with the domain d. In this case, + /* + * Update the VTTBR if necessary with the domain d. In this case, * it's only necessary to flush TLBs on every CPUs with the current VMID * (our domain). */ @@ -325,8 +328,10 @@ static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr, p2m_type_t t, p2m_access_t a) { paddr_t pa = ((paddr_t) mfn) << PAGE_SHIFT; - /* sh, xn and write bit will be defined in the following switches - * based on mattr and t. */ + /* + * sh, xn and write bit will be defined in the following switches + * based on mattr and t. + */ lpae_t e = (lpae_t) { .p2m.af = 1, .p2m.read = 1, @@ -552,15 +557,17 @@ enum p2m_operation { MEMACCESS, }; -/* Put any references on the single 4K page referenced by pte. TODO: - * Handle superpages, for now we only take special references for leaf +/* + * Put any references on the single 4K page referenced by pte. + * TODO: Handle superpages, for now we only take special references for leaf * pages (specifically foreign ones, which can't be super mapped today). */ static void p2m_put_l3_page(const lpae_t pte) { ASSERT(p2m_valid(pte)); - /* TODO: Handle other p2m types + /* + * TODO: Handle other p2m types * * It's safe to do the put_page here because page_alloc will * flush the TLBs if the page is reallocated before the end of @@ -932,7 +939,8 @@ static int apply_p2m_changes(struct domain *d, PAGE_LIST_HEAD(free_pages); struct page_info *pg; - /* Some IOMMU don't support coherent PT walk. When the p2m is + /* + * Some IOMMU don't support coherent PT walk. When the p2m is * shared with the CPU, Xen has to make sure that the PT changes have * reached the memory */ @@ -1275,7 +1283,8 @@ int p2m_alloc_table(struct domain *d) d->arch.vttbr = page_to_maddr(p2m->root) | ((uint64_t)p2m->vmid&0xff)<<48; - /* Make sure that all TLBs corresponding to the new VMID are flushed + /* + * Make sure that all TLBs corresponding to the new VMID are flushed * before using it */ flush_tlb_domain(d); @@ -1290,8 +1299,10 @@ int p2m_alloc_table(struct domain *d) static spinlock_t vmid_alloc_lock = SPIN_LOCK_UNLOCKED; -/* VTTBR_EL2 VMID field is 8 bits. Using a bitmap here limits us to - * 256 concurrent domains. */ +/* + * VTTBR_EL2 VMID field is 8 bits. Using a bitmap here limits us to + * 256 concurrent domains. + */ static DECLARE_BITMAP(vmid_mask, MAX_VMID); void p2m_vmid_allocator_init(void) diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h index 34096bc..8fe78c1 100644 --- a/xen/include/asm-arm/p2m.h +++ b/xen/include/asm-arm/p2m.h @@ -31,12 +31,14 @@ struct p2m_domain { /* Current VMID in use */ uint8_t vmid; - /* Highest guest frame that's ever been mapped in the p2m + /* + * Highest guest frame that's ever been mapped in the p2m * Only takes into account ram and foreign mapping */ gfn_t max_mapped_gfn; - /* Lowest mapped gfn in the p2m. When releasing mapped gfn's in a + /* + * Lowest mapped gfn in the p2m. When releasing mapped gfn's in a * preemptible manner this is update to track recall where to * resume the search. Apart from during teardown this can only * decrease. */ @@ -51,24 +53,31 @@ struct p2m_domain { unsigned long shattered[4]; } stats; - /* If true, and an access fault comes in and there is no vm_event listener, - * pause domain. Otherwise, remove access restrictions. */ + /* + * If true, and an access fault comes in and there is no vm_event listener, + * pause domain. Otherwise, remove access restrictions. + */ bool_t access_required; /* Defines if mem_access is in use for the domain. */ bool_t mem_access_enabled; - /* Default P2M access type for each page in the the domain: new pages, + /* + * Default P2M access type for each page in the the domain: new pages, * swapped in pages, cleared pages, and pages that are ambiguously - * retyped get this access type. See definition of p2m_access_t. */ + * retyped get this access type. See definition of p2m_access_t. + */ p2m_access_t default_access; - /* Radix tree to store the p2m_access_t settings as the pte's don't have - * enough available bits to store this information. */ + /* + * Radix tree to store the p2m_access_t settings as the pte's don't have + * enough available bits to store this information. + */ struct radix_tree_root mem_access_settings; }; -/* List of possible type for each page in the p2m entry. +/* + * List of possible type for each page in the p2m entry. * The number of available bit per page in the pte for this purpose is 4 bits. * So it's possible to only have 16 fields. If we run out of value in the * future, it's possible to use higher value for pseudo-type and don't store @@ -116,13 +125,15 @@ int p2m_init(struct domain *d); /* Return all the p2m resources to Xen. */ void p2m_teardown(struct domain *d); -/* Remove mapping refcount on each mapping page in the p2m +/* + * Remove mapping refcount on each mapping page in the p2m * * TODO: For the moment only foreign mappings are handled */ int relinquish_p2m_mapping(struct domain *d); -/* Allocate a new p2m table for a domain. +/* + * Allocate a new p2m table for a domain. * * Returns 0 for success or -errno. */ @@ -181,8 +192,10 @@ mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn); * Populate-on-demand */ -/* Call when decreasing memory reservation to handle PoD entries properly. - * Will return '1' if all entries were handled and nothing more need be done.*/ +/* + * Call when decreasing memory reservation to handle PoD entries properly. + * Will return '1' if all entries were handled and nothing more need be done. + */ int p2m_pod_decrease_reservation(struct domain *d, xen_pfn_t gpfn, @@ -210,7 +223,8 @@ static inline struct page_info *get_page_from_gfn( return NULL; page = mfn_to_page(mfn); - /* get_page won't work on foreign mapping because the page doesn't + /* + * get_page won't work on foreign mapping because the page doesn't * belong to the current domain. */ if ( p2mt == p2m_map_foreign ) @@ -257,8 +271,10 @@ static inline bool_t p2m_vm_event_sanity_check(struct domain *d) return 1; } -/* Send mem event based on the access. Boolean return value indicates if trap - * needs to be injected into guest. */ +/* + * Send mem event based on the access. Boolean return value indicates if trap + * needs to be injected into guest. + */ bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec); #endif /* _XEN_P2M_H */ -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |