[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [IA64] cache flush



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 3b3a5588baca92e5518a5c890156fae7fba7b0b6
# Parent  ddc279c915023e2ec9489dd7014ba88062db904b
[IA64] cache flush

domain_cache_flush added.
SAL_CACHE_FLUSH implemented.

Signed-off-by: Tristan Gingold <tristan.gingold@xxxxxxxx>

diff -r ddc279c91502 -r 3b3a5588baca xen/arch/ia64/linux-xen/setup.c
--- a/xen/arch/ia64/linux-xen/setup.c   Fri Mar 31 14:04:16 2006 -0700
+++ b/xen/arch/ia64/linux-xen/setup.c   Mon Apr 03 08:33:35 2006 -0600
@@ -104,6 +104,11 @@ extern void early_cmdline_parse(char **)
  */
 #define        I_CACHE_STRIDE_SHIFT    5       /* Safest way to go: 32 bytes 
by 32 bytes */
 unsigned long ia64_i_cache_stride_shift = ~0;
+
+#ifdef XEN
+#define D_CACHE_STRIDE_SHIFT   5       /* Safest.  */
+unsigned long ia64_d_cache_stride_shift = ~0;
+#endif
 
 /*
  * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 
1).  This
@@ -718,6 +723,9 @@ get_max_cacheline_size (void)
                 max = SMP_CACHE_BYTES;
                /* Safest setup for "flush_icache_range()" */
                ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
+#ifdef XEN
+               ia64_d_cache_stride_shift = D_CACHE_STRIDE_SHIFT;
+#endif
                goto out;
         }
 
@@ -733,6 +741,10 @@ get_max_cacheline_size (void)
                        cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
                        cci.pcci_unified = 1;
                }
+#ifdef XEN
+               if (cci.pcci_stride < ia64_d_cache_stride_shift)
+                       ia64_d_cache_stride_shift = cci.pcci_stride;
+#endif
                line_size = 1 << cci.pcci_line_size;
                if (line_size > max)
                        max = line_size;
@@ -754,6 +766,11 @@ get_max_cacheline_size (void)
   out:
        if (max > ia64_max_cacheline_size)
                ia64_max_cacheline_size = max;
+#ifdef XEN
+       if (ia64_d_cache_stride_shift > ia64_i_cache_stride_shift)
+               ia64_d_cache_stride_shift = ia64_i_cache_stride_shift;
+#endif
+
 }
 
 /*
diff -r ddc279c91502 -r 3b3a5588baca xen/arch/ia64/xen/Makefile
--- a/xen/arch/ia64/xen/Makefile        Fri Mar 31 14:04:16 2006 -0700
+++ b/xen/arch/ia64/xen/Makefile        Mon Apr 03 08:33:35 2006 -0600
@@ -24,6 +24,7 @@ obj-y += xenmisc.o
 obj-y += xenmisc.o
 obj-y += xensetup.o
 obj-y += xentime.o
+obj-y += flushd.o
 
 obj-$(crash_debug) += gdbstub.o
 
diff -r ddc279c91502 -r 3b3a5588baca xen/arch/ia64/xen/dom_fw.c
--- a/xen/arch/ia64/xen/dom_fw.c        Fri Mar 31 14:04:16 2006 -0700
+++ b/xen/arch/ia64/xen/dom_fw.c        Mon Apr 03 08:33:35 2006 -0600
@@ -176,7 +176,9 @@ sal_emulator (long index, unsigned long 
                printf("*** CALLED SAL_MC_SET_PARAMS.  IGNORED...\n");
                break;
            case SAL_CACHE_FLUSH:
-               printf("*** CALLED SAL_CACHE_FLUSH.  IGNORED...\n");
+               /*  The best we can do is to flush with fc all the domain.  */
+               domain_cache_flush (current->domain, in1 == 4 ? 1 : 0);
+               status = 0;
                break;
            case SAL_CACHE_INIT:
                printf("*** CALLED SAL_CACHE_INIT.  IGNORED...\n");
diff -r ddc279c91502 -r 3b3a5588baca xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Fri Mar 31 14:04:16 2006 -0700
+++ b/xen/arch/ia64/xen/domain.c        Mon Apr 03 08:33:35 2006 -0600
@@ -674,6 +674,61 @@ tryagain:
        return 0;
 }
 
+/* Flush cache of domain d.  */
+void domain_cache_flush (struct domain *d, int sync_only)
+{
+       struct mm_struct *mm = d->arch.mm;
+       pgd_t *pgd = mm->pgd;
+       unsigned long maddr;
+       int i,j,k, l;
+       int nbr_page = 0;
+       void (*flush_func)(unsigned long start, unsigned long end);
+       extern void flush_dcache_range (unsigned long, unsigned long);
+
+       if (sync_only)
+               flush_func = &flush_icache_range;
+       else
+               flush_func = &flush_dcache_range;
+
+#ifdef CONFIG_DOMAIN0_CONTIGUOUS
+       if (d == dom0) {
+               /* This is not fully correct (because of hole), but it should
+                  be enough for now.  */
+               (*flush_func)(__va_ul (dom0_start),
+                             __va_ul (dom0_start + dom0_size));
+               return;
+       }
+#endif
+       for (i = 0; i < PTRS_PER_PGD; pgd++, i++) {
+               pud_t *pud;
+               if (!pgd_present(*pgd))
+                       continue;
+               pud = pud_offset(pgd, 0);
+               for (j = 0; j < PTRS_PER_PUD; pud++, j++) {
+                       pmd_t *pmd;
+                       if (!pud_present(*pud))
+                               continue;
+                       pmd = pmd_offset(pud, 0);
+                       for (k = 0; k < PTRS_PER_PMD; pmd++, k++) {
+                               pte_t *pte;
+                               if (!pmd_present(*pmd))
+                                       continue;
+                               pte = pte_offset_map(pmd, 0);
+                               for (l = 0; l < PTRS_PER_PTE; pte++, l++) {
+                                       if (!pte_present(*pte))
+                                               continue;
+                                       /* Convert PTE to maddr.  */
+                                       maddr = __va_ul (pte_val(*pte)
+                                                        & _PAGE_PPN_MASK);
+                                       (*flush_func)(maddr, maddr+ PAGE_SIZE);
+                                       nbr_page++;
+                               }
+                       }
+               }
+       }
+       //printf ("domain_cache_flush: %d %d pages\n", d->domain_id, nbr_page);
+}
+
 // FIXME: ONLY USE FOR DOMAIN PAGE_SIZE == PAGE_SIZE
 #if 1
 unsigned long domain_mpa_to_imva(struct domain *d, unsigned long mpaddr)
diff -r ddc279c91502 -r 3b3a5588baca xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Fri Mar 31 14:04:16 2006 -0700
+++ b/xen/include/asm-ia64/domain.h     Mon Apr 03 08:33:35 2006 -0600
@@ -12,6 +12,11 @@
 #include <xen/cpumask.h>
 
 extern void domain_relinquish_resources(struct domain *);
+
+/* Flush cache of domain d.
+   If sync_only is true, only synchronize I&D caches,
+   if false, flush and invalidate caches.  */
+extern void domain_cache_flush (struct domain *d, int sync_only);
 
 struct arch_domain {
     struct mm_struct *mm;
diff -r ddc279c91502 -r 3b3a5588baca xen/include/asm-ia64/vhpt.h
--- a/xen/include/asm-ia64/vhpt.h       Fri Mar 31 14:04:16 2006 -0700
+++ b/xen/include/asm-ia64/vhpt.h       Mon Apr 03 08:33:35 2006 -0600
@@ -127,6 +127,7 @@ extern void vhpt_insert (unsigned long v
 extern void vhpt_insert (unsigned long vadr, unsigned long ptr,
                         unsigned logps);
 extern void vhpt_flush(void);
+extern void smp_vhpt_flush_all(void);
 
 /* Currently the VHPT is allocated per CPU.  */
 DECLARE_PER_CPU (unsigned long, vhpt_paddr);
diff -r ddc279c91502 -r 3b3a5588baca xen/include/asm-ia64/xenpage.h
--- a/xen/include/asm-ia64/xenpage.h    Fri Mar 31 14:04:16 2006 -0700
+++ b/xen/include/asm-ia64/xenpage.h    Mon Apr 03 08:33:35 2006 -0600
@@ -55,6 +55,9 @@ static inline int get_order_from_pages(u
 #define __pa(x)                ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; 
_v.l;})
 #define __va(x)                ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; 
_v.p;})
 
+/* It is sometimes very useful to have unsigned long as result.  */
+#define __va_ul(x)     ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.l;})
+
 #undef PAGE_OFFSET
 #define PAGE_OFFSET    __IA64_UL_CONST(0xf000000000000000)
 
diff -r ddc279c91502 -r 3b3a5588baca xen/arch/ia64/xen/flushd.S
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/xen/flushd.S        Mon Apr 03 08:33:35 2006 -0600
@@ -0,0 +1,62 @@
+/*
+ * Cache flushing routines.
+ *
+ * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ * 05/28/05 Zoltan Menyhart    Dynamic stride size
+ * 03/31/06 Tristan Gingold     copied and modified for dcache.
+ */
+
+#include <asm/asmmacro.h>
+
+
+       /*
+        * flush_dcache_range(start,end)
+        *
+        *      Flush cache.
+        *
+        *      Must deal with range from start to end-1 but nothing else (need 
to
+        *      be careful not to touch addresses that may be unmapped).
+        *
+        *      Note: "in0" and "in1" are preserved for debugging purposes.
+        */
+GLOBAL_ENTRY(flush_dcache_range)
+
+       .prologue
+       alloc   r2=ar.pfs,2,0,0,0
+       movl    r3=ia64_d_cache_stride_shift
+       mov     r21=1
+       ;;
+       ld8     r20=[r3]                // r20: stride shift
+       sub     r22=in1,r0,1            // last byte address
+       ;;
+       shr.u   r23=in0,r20             // start / (stride size)
+       shr.u   r22=r22,r20             // (last byte address) / (stride size)
+       shl     r21=r21,r20             // r21: stride size of the i-cache(s)
+       ;;
+       sub     r8=r22,r23              // number of strides - 1
+       shl     r24=r23,r20             // r24: addresses for "fc" =
+                                       //      "start" rounded down to stride 
boundary
+       .save   ar.lc,r3
+       mov     r3=ar.lc                // save ar.lc
+       ;;
+
+       .body
+       mov     ar.lc=r8
+       ;;
+       /*
+        * 32 byte aligned loop, even number of (actually 2) bundles
+        */
+.Loop: fc      r24                     // issuable on M0 only
+       add     r24=r21,r24             // we flush "stride size" bytes per 
iteration
+       nop.i   0
+       br.cloop.sptk.few .Loop
+       ;;
+       sync.i
+       ;;
+       srlz.i
+       ;;
+       mov     ar.lc=r3                // restore ar.lc
+       br.ret.sptk.many rp
+END(flush_dcache_range)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.