[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] xen: arm: correct terminology for cache flush macros



commit 890f16edb44a7ea856cc02263a27d2ba10348127
Author:     Ian Campbell <ian.campbell@xxxxxxxxxx>
AuthorDate: Tue Feb 11 14:11:04 2014 +0000
Commit:     Ian Campbell <ian.campbell@xxxxxxxxxx>
CommitDate: Wed Feb 12 12:49:10 2014 +0000

    xen: arm: correct terminology for cache flush macros
    
    The term "flush" is slightly ambiguous. The correct ARM term for for this
    operaton is clean, as opposed to clean+invalidate for which we also now 
have a
    function.
    
    This is a pure rename, no functional change.
    
    Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
    Acked-by: Julien Grall <julien.grall@xxxxxxxxxx>
---
 xen/arch/arm/guestcopy.c         |    2 +-
 xen/arch/arm/kernel.c            |    2 +-
 xen/arch/arm/mm.c                |   16 ++++++++--------
 xen/arch/arm/smpboot.c           |    2 +-
 xen/include/asm-arm/arm32/page.h |    2 +-
 xen/include/asm-arm/arm64/page.h |    2 +-
 xen/include/asm-arm/page.h       |   10 +++++-----
 7 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c
index bd0a355..af0af6b 100644
--- a/xen/arch/arm/guestcopy.c
+++ b/xen/arch/arm/guestcopy.c
@@ -24,7 +24,7 @@ static unsigned long raw_copy_to_guest_helper(void *to, const 
void *from,
         p += offset;
         memcpy(p, from, size);
         if ( flush_dcache )
-            flush_xen_dcache_va_range(p, size);
+            clean_xen_dcache_va_range(p, size);
 
         unmap_domain_page(p - offset);
         len -= size;
diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c
index 6a5772b..1e3107d 100644
--- a/xen/arch/arm/kernel.c
+++ b/xen/arch/arm/kernel.c
@@ -58,7 +58,7 @@ void copy_from_paddr(void *dst, paddr_t paddr, unsigned long 
len, int attrindx)
 
         set_fixmap(FIXMAP_MISC, p, attrindx);
         memcpy(dst, src + s, l);
-        flush_xen_dcache_va_range(dst, l);
+        clean_xen_dcache_va_range(dst, l);
 
         paddr += l;
         dst += l;
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 146b2fb..308a798 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -484,13 +484,13 @@ void __init setup_pagetables(unsigned long 
boot_phys_offset, paddr_t xen_paddr)
     /* Clear the copy of the boot pagetables. Each secondary CPU
      * rebuilds these itself (see head.S) */
     memset(boot_pgtable, 0x0, PAGE_SIZE);
-    flush_xen_dcache(boot_pgtable);
+    clean_xen_dcache(boot_pgtable);
 #ifdef CONFIG_ARM_64
     memset(boot_first, 0x0, PAGE_SIZE);
-    flush_xen_dcache(boot_first);
+    clean_xen_dcache(boot_first);
 #endif
     memset(boot_second, 0x0, PAGE_SIZE);
-    flush_xen_dcache(boot_second);
+    clean_xen_dcache(boot_second);
 
     /* Break up the Xen mapping into 4k pages and protect them separately. */
     for ( i = 0; i < LPAE_ENTRIES; i++ )
@@ -528,7 +528,7 @@ void __init setup_pagetables(unsigned long 
boot_phys_offset, paddr_t xen_paddr)
 
     /* Make sure it is clear */
     memset(this_cpu(xen_dommap), 0, DOMHEAP_SECOND_PAGES*PAGE_SIZE);
-    flush_xen_dcache_va_range(this_cpu(xen_dommap),
+    clean_xen_dcache_va_range(this_cpu(xen_dommap),
                               DOMHEAP_SECOND_PAGES*PAGE_SIZE);
 #endif
 }
@@ -539,7 +539,7 @@ int init_secondary_pagetables(int cpu)
     /* Set init_ttbr for this CPU coming up. All CPus share a single setof
      * pagetables, but rewrite it each time for consistency with 32 bit. */
     init_ttbr = (uintptr_t) xen_pgtable + phys_offset;
-    flush_xen_dcache(init_ttbr);
+    clean_xen_dcache(init_ttbr);
     return 0;
 }
 #else
@@ -574,15 +574,15 @@ int init_secondary_pagetables(int cpu)
         write_pte(&first[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)], 
pte);
     }
 
-    flush_xen_dcache_va_range(first, PAGE_SIZE);
-    flush_xen_dcache_va_range(domheap, DOMHEAP_SECOND_PAGES*PAGE_SIZE);
+    clean_xen_dcache_va_range(first, PAGE_SIZE);
+    clean_xen_dcache_va_range(domheap, DOMHEAP_SECOND_PAGES*PAGE_SIZE);
 
     per_cpu(xen_pgtable, cpu) = first;
     per_cpu(xen_dommap, cpu) = domheap;
 
     /* Set init_ttbr for this CPU coming up */
     init_ttbr = __pa(first);
-    flush_xen_dcache(init_ttbr);
+    clean_xen_dcache(init_ttbr);
 
     return 0;
 }
diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
index c53c765..a829957 100644
--- a/xen/arch/arm/smpboot.c
+++ b/xen/arch/arm/smpboot.c
@@ -378,7 +378,7 @@ int __cpu_up(unsigned int cpu)
 
     /* Open the gate for this CPU */
     smp_up_cpu = cpu_logical_map(cpu);
-    flush_xen_dcache(smp_up_cpu);
+    clean_xen_dcache(smp_up_cpu);
 
     rc = arch_cpu_up(cpu);
 
diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
index cb6add4..b8221ca 100644
--- a/xen/include/asm-arm/arm32/page.h
+++ b/xen/include/asm-arm/arm32/page.h
@@ -20,7 +20,7 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
 }
 
 /* Inline ASM to flush dcache on register R (may be an inline asm operand) */
-#define __flush_xen_dcache_one(R) STORE_CP32(R, DCCMVAC)
+#define __clean_xen_dcache_one(R) STORE_CP32(R, DCCMVAC)
 
 /* Inline ASM to clean and invalidate dcache on register R (may be an
  * inline asm operand) */
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
index baf8903..3352821 100644
--- a/xen/include/asm-arm/arm64/page.h
+++ b/xen/include/asm-arm/arm64/page.h
@@ -15,7 +15,7 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
 }
 
 /* Inline ASM to flush dcache on register R (may be an inline asm operand) */
-#define __flush_xen_dcache_one(R) "dc cvac, %" #R ";"
+#define __clean_xen_dcache_one(R) "dc cvac, %" #R ";"
 
 /* Inline ASM to clean and invalidate dcache on register R (may be an
  * inline asm operand) */
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 5a371da..e00be9e 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -229,26 +229,26 @@ extern size_t cacheline_bytes;
 /* Function for flushing medium-sized areas.
  * if 'range' is large enough we might want to use model-specific
  * full-cache flushes. */
-static inline void flush_xen_dcache_va_range(void *p, unsigned long size)
+static inline void clean_xen_dcache_va_range(void *p, unsigned long size)
 {
     void *end;
     dsb();           /* So the CPU issues all writes to the range */
     for ( end = p + size; p < end; p += cacheline_bytes )
-        asm volatile (__flush_xen_dcache_one(0) : : "r" (p));
+        asm volatile (__clean_xen_dcache_one(0) : : "r" (p));
     dsb();           /* So we know the flushes happen before continuing */
 }
 
 /* Macro for flushing a single small item.  The predicate is always
  * compile-time constant so this will compile down to 3 instructions in
  * the common case. */
-#define flush_xen_dcache(x) do {                                        \
+#define clean_xen_dcache(x) do {                                        \
     typeof(x) *_p = &(x);                                               \
     if ( sizeof(x) > MIN_CACHELINE_BYTES || sizeof(x) > alignof(x) )    \
-        flush_xen_dcache_va_range(_p, sizeof(x));                       \
+        clean_xen_dcache_va_range(_p, sizeof(x));                       \
     else                                                                \
         asm volatile (                                                  \
             "dsb sy;"   /* Finish all earlier writes */                 \
-            __flush_xen_dcache_one(0)                                   \
+            __clean_xen_dcache_one(0)                                   \
             "dsb sy;"   /* Finish flush before continuing */            \
             : : "r" (_p), "m" (*_p));                                   \
 } while (0)
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.