[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xen/arm: flush D-cache and I-cache when appropriate


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Wed, 21 Nov 2012 03:11:08 +0000
  • Delivery-date: Wed, 21 Nov 2012 03:11:18 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
# Date 1353329986 0
# Node ID ddb109120dbd67a166dc0fa512cb0ef54dc799ca
# Parent  9da9781cc3fb820b0aba0023c4cd29c8af223ea6
xen/arm: flush D-cache and I-cache when appropriate

- invalidate tlb after setting WXN
- flush D-cache and I-cache after relocation;
- invalidate D-cache after writing to smp_up_cpu;
- flush I-cache after changing HTTBR;
- flush I-cache and branch predictor after writing Xen text ptes.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Committed-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---


diff -r 9da9781cc3fb -r ddb109120dbd xen/arch/arm/head.S
--- a/xen/arch/arm/head.S       Mon Nov 19 09:48:22 2012 +0100
+++ b/xen/arch/arm/head.S       Mon Nov 19 12:59:46 2012 +0000
@@ -278,8 +278,12 @@ paging:
        ldr   r4, =boot_httbr        /* VA of HTTBR value stashed by CPU 0 */
        add   r4, r4, r10            /* PA of it */
        ldrd  r4, r5, [r4]           /* Actual value */
+       dsb
        mcrr  CP64(r4, r5, HTTBR)
+       dsb
+       isb
        mcr   CP32(r0, TLBIALLH)     /* Flush hypervisor TLB */
+       mcr   CP32(r0, ICIALLU)      /* Flush I-cache */
        mcr   CP32(r0, BPIALL)       /* Flush branch predictor */
        dsb                          /* Ensure completion of TLB+BP flush */
        isb
@@ -292,6 +296,8 @@ 1:  ldrex r1, [r0]               /*      
        teq   r2, #0
        bne   1b
        dsb
+       mcr   CP32(r0, DCCMVAC)      /* flush D-Cache */
+       dsb
 
        /* Here, the non-boot CPUs must wait again -- they're now running on
         * the boot CPU's pagetables so it's safe for the boot CPU to
diff -r 9da9781cc3fb -r ddb109120dbd xen/arch/arm/mm.c
--- a/xen/arch/arm/mm.c Mon Nov 19 09:48:22 2012 +0100
+++ b/xen/arch/arm/mm.c Mon Nov 19 12:59:46 2012 +0000
@@ -247,14 +247,13 @@ void __init setup_pagetables(unsigned lo
 
     /* Change pagetables to the copy in the relocated Xen */
     boot_httbr = (unsigned long) xen_pgtable + phys_offset;
-    asm volatile (
-        STORE_CP64(0, HTTBR)          /* Change translation base */
-        "dsb;"                        /* Ensure visibility of HTTBR update */
-        STORE_CP32(0, TLBIALLH)       /* Flush hypervisor TLB */
-        STORE_CP32(0, BPIALL)         /* Flush branch predictor */
-        "dsb;"                        /* Ensure completion of TLB+BP flush */
-        "isb;"
-        : : "r" (boot_httbr) : "memory");
+    flush_xen_dcache_va(&boot_httbr);
+    flush_xen_dcache_va_range((void*)dest_va, _end - _start);
+    flush_xen_text_tlb();
+
+    WRITE_CP64(boot_httbr, HTTBR); /* Change translation base */
+    dsb();                         /* Ensure visibility of HTTBR update */
+    flush_xen_text_tlb();
 
     /* Undo the temporary map */
     pte.bits = 0;
@@ -294,11 +293,12 @@ void __init setup_pagetables(unsigned lo
                            >> PAGE_SHIFT);
     pte.pt.table = 1;
     write_pte(xen_second + second_linear_offset(XEN_VIRT_START), pte);
-    /* Have changed a mapping used for .text. Flush everything for safety. */
-    flush_xen_text_tlb();
+    /* TLBFLUSH and ISB would be needed here, but wait until we set WXN */
 
     /* From now on, no mapping may be both writable and executable. */
     WRITE_CP32(READ_CP32(HSCTLR) | SCTLR_WXN, HSCTLR);
+    /* Flush everything after setting WXN bit. */
+    flush_xen_text_tlb();
 }
 
 /* MMU setup for secondary CPUS (which already have paging enabled) */
@@ -306,6 +306,7 @@ void __cpuinit mmu_init_secondary_cpu(vo
 {
     /* From now on, no mapping may be both writable and executable. */
     WRITE_CP32(READ_CP32(HSCTLR) | SCTLR_WXN, HSCTLR);
+    flush_xen_text_tlb();
 }
 
 /* Create Xen's mappings of memory.
diff -r 9da9781cc3fb -r ddb109120dbd xen/arch/arm/smpboot.c
--- a/xen/arch/arm/smpboot.c    Mon Nov 19 09:48:22 2012 +0100
+++ b/xen/arch/arm/smpboot.c    Mon Nov 19 12:59:46 2012 +0000
@@ -105,6 +105,7 @@ make_cpus_ready(unsigned int max_cpus, u
         /* Tell the next CPU to get ready */
         /* TODO: handle boards where CPUIDs are not contiguous */
         *gate = i;
+        flush_xen_dcache_va(gate);
         asm volatile("dsb; isb; sev");
         /* And wait for it to respond */
         while ( ready_cpus < i )
@@ -201,6 +202,9 @@ int __cpu_up(unsigned int cpu)
     /* Unblock the CPU.  It should be waiting in the loop in head.S
      * for an event to arrive when smp_up_cpu matches its cpuid. */
     smp_up_cpu = cpu;
+    /* we need to make sure that the change to smp_up_cpu is visible to
+     * secondary cpus with D-cache off */
+    flush_xen_dcache_va(&smp_up_cpu);
     asm volatile("dsb; isb; sev");
 
     while ( !cpu_online(cpu) )
diff -r 9da9781cc3fb -r ddb109120dbd xen/include/asm-arm/page.h
--- a/xen/include/asm-arm/page.h        Mon Nov 19 09:48:22 2012 +0100
+++ b/xen/include/asm-arm/page.h        Mon Nov 19 12:59:46 2012 +0000
@@ -228,27 +228,74 @@ static inline lpae_t mfn_to_p2m_entry(un
     return e;
 }
 
-/* Write a pagetable entry */
+/* Write a pagetable entry.
+ *
+ * If the table entry is changing a text mapping, it is responsibility
+ * of the caller to issue an ISB after write_pte.
+ */
 static inline void write_pte(lpae_t *p, lpae_t pte)
 {
     asm volatile (
+        /* Ensure any writes have completed with the old mappings. */
+        "dsb;"
         /* Safely write the entry (STRD is atomic on CPUs that support LPAE) */
         "strd %0, %H0, [%1];"
+        "dsb;"
         /* Push this cacheline to the PoC so the rest of the system sees it. */
         STORE_CP32(1, DCCMVAC)
+        /* Ensure that the data flush is completed before proceeding */
+        "dsb;"
         : : "r" (pte.bits), "r" (p) : "memory");
 }
 
+
+/* Function for flushing medium-sized areas.
+ * if 'range' is large enough we might want to use model-specific
+ * full-cache flushes. */
+static inline void flush_xen_dcache_va_range(void *p, unsigned long size)
+{
+    int cacheline_bytes  = READ_CP32(CCSIDR);
+    void *end;
+    dsb();           /* So the CPU issues all writes to the range */
+    for ( end = p + size; p < end; p += cacheline_bytes )
+        WRITE_CP32((uint32_t) p, DCCMVAC);
+    dsb();           /* So we know the flushes happen before continuing */
+}
+
+
+/* Macro for flushing a single small item.  The predicate is always
+ * compile-time constant so this will compile down to 3 instructions in
+ * the common case.  Make sure to call it with the correct type of
+ * pointer! */
+#define flush_xen_dcache_va(p) do {                                     \
+    int cacheline_bytes  = READ_CP32(CCSIDR);                           \
+    typeof(p) _p = (p);                                                 \
+    if ( ((unsigned long)_p & ~(cacheline_bytes - 1)) !=                \
+        (((unsigned long)_p + (sizeof *_p)) & ~(cacheline_bytes - 1)) ) \
+        flush_xen_dcache_va_range(_p, sizeof *_p);                      \
+    else                                                                \
+        asm volatile (                                                  \
+            "dsb;"   /* Finish all earlier writes */                    \
+            STORE_CP32(0, DCCMVAC)                                      \
+            "dsb;"   /* Finish flush before continuing */               \
+            : : "r" (_p), "m" (*_p));                                   \
+} while (0)
+
+
 /*
  * Flush all hypervisor mappings from the TLB and branch predictor.
- * This is needed after changing Xen code mappings. 
+ * This is needed after changing Xen code mappings.
+ *
+ * The caller needs to issue the necessary DSB and D-cache flushes
+ * before calling flush_xen_text_tlb.
  */
 static inline void flush_xen_text_tlb(void)
 {
     register unsigned long r0 asm ("r0");
     asm volatile (
-        "dsb;"                        /* Ensure visibility of PTE writes */
+        "isb;"                        /* Ensure synchronization with previous 
changes to text */
         STORE_CP32(0, TLBIALLH)       /* Flush hypervisor TLB */
+        STORE_CP32(0, ICIALLU)        /* Flush I-cache */
         STORE_CP32(0, BPIALL)         /* Flush branch predictor */
         "dsb;"                        /* Ensure completion of TLB+BP flush */
         "isb;"

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.