[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 07/17] x86: Use new cache mode type in asm/pgtable.h
Instead of directly using the cache mode bits in the pte switch to using the cache mode type. This requires changing some callers of is_new_memtype_allowed() to be changed as well. Signed-off-by: Stefan Bader <stefan.bader@xxxxxxxxxxxxx> Signed-off-by: Juergen Gross <jgross@xxxxxxxx> --- arch/x86/include/asm/pgtable.h | 19 ++++++++++--------- arch/x86/mm/ioremap.c | 3 ++- arch/x86/mm/pat.c | 8 ++++++-- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index aa97a07..c112ea6 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -9,9 +9,10 @@ /* * Macro to mark a page protection value as UC- */ -#define pgprot_noncached(prot) \ - ((boot_cpu_data.x86 > 3) \ - ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \ +#define pgprot_noncached(prot) \ + ((boot_cpu_data.x86 > 3) \ + ? (__pgprot(pgprot_val(prot) | \ + cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \ : (prot)) #ifndef __ASSEMBLY__ @@ -404,8 +405,8 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) #define canon_pgprot(p) __pgprot(massage_pgprot(p)) static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, - unsigned long flags, - unsigned long new_flags) + enum page_cache_mode pcm, + enum page_cache_mode new_pcm) { /* * PAT type is always WB for untracked ranges, so no need to check. @@ -419,10 +420,10 @@ static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, * - request is uncached, return cannot be write-back * - request is write-combine, return cannot be write-back */ - if ((flags == _PAGE_CACHE_UC_MINUS && - new_flags == _PAGE_CACHE_WB) || - (flags == _PAGE_CACHE_WC && - new_flags == _PAGE_CACHE_WB)) { + if ((pcm == _PAGE_CACHE_MODE_UC_MINUS && + new_pcm == _PAGE_CACHE_MODE_WB) || + (pcm == _PAGE_CACHE_MODE_WC && + new_pcm == _PAGE_CACHE_MODE_WB)) { return 0; } diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index af78e50..3a81eb9 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -142,7 +142,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, if (prot_val != new_prot_val) { if (!is_new_memtype_allowed(phys_addr, size, - prot_val, new_prot_val)) { + pgprot2cachemode(__pgprot(prot_val)), + pgprot2cachemode(__pgprot(new_prot_val)))) { printk(KERN_ERR "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", (unsigned long long)phys_addr, diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 6574388..47282c2 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -455,7 +455,9 @@ int io_reserve_memtype(resource_size_t start, resource_size_t end, if (ret) goto out_err; - if (!is_new_memtype_allowed(start, size, req_type, new_type)) + if (!is_new_memtype_allowed(start, size, + pgprot2cachemode(__pgprot(req_type)), + pgprot2cachemode(__pgprot(new_type)))) goto out_free; if (kernel_map_sync_memtype(start, size, new_type) < 0) @@ -630,7 +632,9 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, if (flags != want_flags) { if (strict_prot || - !is_new_memtype_allowed(paddr, size, want_flags, flags)) { + !is_new_memtype_allowed(paddr, size, + pgprot2cachemode(__pgprot(want_flags)), + pgprot2cachemode(__pgprot(flags)))) { free_memtype(paddr, paddr + size); printk(KERN_ERR "%s:%d map pfn expected mapping type %s" " for [mem %#010Lx-%#010Lx], got %s\n", -- 1.8.4.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |