[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [IA64] vhtp clean-up



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 77ccce98ddef972ef187eea09583f8545b5858c3
# Parent  06e5c5599147cd34caab554ca71ab03e52c85e6f
[IA64] vhtp clean-up

Create a clean interface for vhpt/tlb.

Signed-off-by: Tristan Gingold <tristan.gingold@xxxxxxxx>
---
 xen/include/asm-ia64/flushtlb.h               |    9 -
 xen/include/asm-ia64/linux-xen/asm/tlbflush.h |  119 --------------
 xen/arch/ia64/linux-xen/smp.c                 |   32 ---
 xen/arch/ia64/xen/domain.c                    |   20 --
 xen/arch/ia64/xen/hyperprivop.S               |    2 
 xen/arch/ia64/xen/process.c                   |    6 
 xen/arch/ia64/xen/vcpu.c                      |   46 -----
 xen/arch/ia64/xen/vhpt.c                      |  214 +++++++++++++++++---------
 xen/arch/ia64/xen/xenmisc.c                   |    3 
 xen/include/asm-ia64/mm.h                     |    2 
 xen/include/asm-ia64/tlbflush.h               |   37 ++++
 xen/include/asm-ia64/vcpu.h                   |    5 
 xen/include/asm-ia64/vhpt.h                   |   11 -
 13 files changed, 193 insertions(+), 313 deletions(-)

diff -r 06e5c5599147 -r 77ccce98ddef xen/arch/ia64/linux-xen/smp.c
--- a/xen/arch/ia64/linux-xen/smp.c     Tue May 16 09:05:36 2006 -0600
+++ b/xen/arch/ia64/linux-xen/smp.c     Tue May 16 10:35:58 2006 -0600
@@ -53,28 +53,6 @@
 #endif
 
 #ifdef XEN
-// FIXME: MOVE ELSEWHERE
-//Huh? This seems to be used on ia64 even if !CONFIG_SMP
-void flush_tlb_mask(cpumask_t mask)
-{
-    int cpu;
-
-    cpu = smp_processor_id();
-    if (cpu_isset (cpu, mask)) {
-        cpu_clear(cpu, mask);
-       local_flush_tlb_all ();
-    }
-
-#ifdef CONFIG_SMP
-    if (cpus_empty(mask))
-        return;
-
-    for (cpu = 0; cpu < NR_CPUS; ++cpu)
-        if (cpu_isset(cpu, mask))
-          smp_call_function_single
-            (cpu, (void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
-#endif
-}
 //#if CONFIG_SMP || IA64
 #if CONFIG_SMP
 //Huh? This seems to be used on ia64 even if !CONFIG_SMP
@@ -276,7 +254,6 @@ smp_send_reschedule (int cpu)
 {
        platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
 }
-#endif
 
 void
 smp_flush_tlb_all (void)
@@ -284,15 +261,6 @@ smp_flush_tlb_all (void)
        on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
 }
 
-#ifdef XEN
-void
-smp_vhpt_flush_all(void)
-{
-       on_each_cpu((void (*)(void *))vhpt_flush, NULL, 1, 1);
-}
-#endif
-
-#ifndef XEN
 void
 smp_flush_tlb_mm (struct mm_struct *mm)
 {
diff -r 06e5c5599147 -r 77ccce98ddef xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Tue May 16 09:05:36 2006 -0600
+++ b/xen/arch/ia64/xen/domain.c        Tue May 16 10:35:58 2006 -0600
@@ -89,13 +89,9 @@ void arch_domain_destroy(struct domain *
        if (d->shared_info != NULL)
                free_xenheap_page(d->shared_info);
 
+       domain_flush_destroy (d);
+
        deallocate_rid_range(d);
-
-       /* It is really good in this? */
-       flush_tlb_all();
-
-       /* It is really good in this? */
-       vhpt_flush_all();
 }
 
 static void default_idle(void)
@@ -873,17 +869,7 @@ domain_page_flush(struct domain* d, unsi
 domain_page_flush(struct domain* d, unsigned long mpaddr,
                   unsigned long old_mfn, unsigned long new_mfn)
 {
-    struct vcpu* v;
-    //XXX SMP
-    for_each_vcpu(d, v) {
-        vcpu_purge_tr_entry(&v->arch.dtlb);
-        vcpu_purge_tr_entry(&v->arch.itlb);
-    }
-
-    // flush vhpt
-    vhpt_flush();
-    // flush tlb
-    flush_tlb_all();
+    domain_flush_vtlb_all (d);
 }
 #endif
 
diff -r 06e5c5599147 -r 77ccce98ddef xen/arch/ia64/xen/hyperprivop.S
--- a/xen/arch/ia64/xen/hyperprivop.S   Tue May 16 09:05:36 2006 -0600
+++ b/xen/arch/ia64/xen/hyperprivop.S   Tue May 16 10:35:58 2006 -0600
@@ -47,7 +47,7 @@
 #endif
 
 #ifdef CONFIG_SMP
-#warning "FIXME: ptc.ga instruction requires spinlock for SMP"
+//#warning "FIXME: ptc.ga instruction requires spinlock for SMP"
 #undef FAST_PTC_GA
 #endif
 
diff -r 06e5c5599147 -r 77ccce98ddef xen/arch/ia64/xen/process.c
--- a/xen/arch/ia64/xen/process.c       Tue May 16 09:05:36 2006 -0600
+++ b/xen/arch/ia64/xen/process.c       Tue May 16 10:35:58 2006 -0600
@@ -307,11 +307,7 @@ void ia64_do_page_fault (unsigned long a
                if (fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) {
                        /* dtlb has been purged in-between.  This dtlb was
                           matching.  Undo the work.  */
-#ifdef VHPT_GLOBAL
-                       vhpt_flush_address (address, 1);
-#endif
-                       ia64_ptcl(address, 1<<2);
-                       ia64_srlz_i();
+                       vcpu_flush_tlb_vhpt_range (address, 1);
                        goto again;
                }
                return;
diff -r 06e5c5599147 -r 77ccce98ddef xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c  Tue May 16 09:05:36 2006 -0600
+++ b/xen/arch/ia64/xen/vcpu.c  Tue May 16 10:35:58 2006 -0600
@@ -28,8 +28,6 @@ extern void setfpreg (unsigned long regn
 
 extern void panic_domain(struct pt_regs *, const char *, ...);
 extern unsigned long translate_domain_mpaddr(unsigned long);
-extern void ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
-
 
 typedef        union {
        struct ia64_psr ia64_psr;
@@ -1702,11 +1700,6 @@ IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT6
  VCPU translation register access routines
 **************************************************************************/
 
-void vcpu_purge_tr_entry(TR_ENTRY *trp)
-{
-       trp->pte.val = 0;
-}
-
 static void vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 
ifa)
 {
        UINT64 ps;
@@ -1867,21 +1860,13 @@ IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vad
        return fault;
 }
 
-int ptce_count = 0;
 IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
 {
        // Note that this only needs to be called once, i.e. the
        // architected loop to purge the entire TLB, should use
        //  base = stride1 = stride2 = 0, count0 = count 1 = 1
 
-       // just invalidate the "whole" tlb
-       vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
-       vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
-
-#ifdef VHPT_GLOBAL
-       vhpt_flush();   // FIXME: This is overdoing it
-#endif
-       local_flush_tlb_all();
+       vcpu_flush_vtlb_all ();
 
        return IA64_NO_FAULT;
 }
@@ -1899,33 +1884,8 @@ IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 
        // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
 //printf("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
 
-#ifdef CONFIG_XEN_SMP
-       struct domain *d = vcpu->domain;
-       struct vcpu *v;
-
-       for_each_vcpu (d, v) {
-               if (v == vcpu)
-                       continue;
-
-               /* Purge TC entries.
-                  FIXME: clear only if match.  */
-               vcpu_purge_tr_entry(&PSCBX(v,dtlb));
-               vcpu_purge_tr_entry(&PSCBX(v,itlb));
-
-#ifdef VHPT_GLOBAL
-               /* Invalidate VHPT entries.  */
-               vhpt_flush_address_remote (v->processor, vadr, addr_range);
-#endif
-       }
-#endif
-
-#ifdef VHPT_GLOBAL
-       vhpt_flush_address(vadr,addr_range);
-#endif
-       ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
-       /* Purge tc.  */
-       vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
-       vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
+       domain_flush_vtlb_range (vcpu->domain, vadr, addr_range);
+
        return IA64_NO_FAULT;
 }
 
diff -r 06e5c5599147 -r 77ccce98ddef xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c  Tue May 16 09:05:36 2006 -0600
+++ b/xen/arch/ia64/xen/vhpt.c  Tue May 16 10:35:58 2006 -0600
@@ -12,32 +12,31 @@
 #include <asm/system.h>
 #include <asm/pgalloc.h>
 #include <asm/page.h>
-#include <asm/dma.h>
 #include <asm/vhpt.h>
+#include <asm/vcpu.h>
+
+/* Defined in tlb.c  */
+extern void ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
 
 extern long running_on_sim;
 
 DEFINE_PER_CPU (unsigned long, vhpt_paddr);
 DEFINE_PER_CPU (unsigned long, vhpt_pend);
 
-void vhpt_flush(void)
-{
-       struct vhpt_lf_entry *v =__va(__ia64_per_cpu_var(vhpt_paddr));
-       int i;
-#if 0
-static int firsttime = 2;
-
-if (firsttime) firsttime--;
-else {
-printf("vhpt_flush: *********************************************\n");
-printf("vhpt_flush: *********************************************\n");
-printf("vhpt_flush: *********************************************\n");
-printf("vhpt_flush: flushing vhpt (seems to crash at rid wrap?)...\n");
-printf("vhpt_flush: *********************************************\n");
-printf("vhpt_flush: *********************************************\n");
-printf("vhpt_flush: *********************************************\n");
-}
-#endif
+static void vhpt_flush(void)
+{
+       struct vhpt_lf_entry *v = (struct vhpt_lf_entry *)VHPT_ADDR;
+       int i;
+
+       for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++)
+               v->ti_tag = INVALID_TI_TAG;
+}
+
+static void vhpt_erase(void)
+{
+       struct vhpt_lf_entry *v = (struct vhpt_lf_entry *)VHPT_ADDR;
+       int i;
+
        for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++) {
                v->itir = 0;
                v->CChain = 0;
@@ -47,51 +46,6 @@ printf("vhpt_flush: ********************
        // initialize cache too???
 }
 
-#ifdef VHPT_GLOBAL
-void vhpt_flush_address(unsigned long vadr, unsigned long addr_range)
-{
-       struct vhpt_lf_entry *vlfe;
-
-       if ((vadr >> 61) == 7) {
-               // no vhpt for region 7 yet, see vcpu_itc_no_srlz
-               printf("vhpt_flush_address: region 7, spinning...\n");
-               while(1);
-       }
-#if 0
-       // this only seems to occur at shutdown, but it does occur
-       if ((!addr_range) || addr_range & (addr_range - 1)) {
-               printf("vhpt_flush_address: weird range, spinning...\n");
-               while(1);
-       }
-//printf("************** vhpt_flush_address(%p,%p)\n",vadr,addr_range);
-#endif
-       while ((long)addr_range > 0) {
-               vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr);
-               // FIXME: for now, just blow it away even if it belongs to
-               // another domain.  Later, use ttag to check for match
-//if (!(vlfe->ti_tag & INVALID_TI_TAG)) {
-//printf("vhpt_flush_address: blowing away valid tag for vadr=%p\n",vadr);
-//}
-               vlfe->ti_tag |= INVALID_TI_TAG;
-               addr_range -= PAGE_SIZE;
-               vadr += PAGE_SIZE;
-       }
-}
-
-void vhpt_flush_address_remote(int cpu,
-                              unsigned long vadr, unsigned long addr_range)
-{
-       while ((long)addr_range > 0) {
-               /* Get the VHPT entry.  */
-               unsigned int off = ia64_thash(vadr) - VHPT_ADDR;
-               volatile struct vhpt_lf_entry *v;
-               v =__va(per_cpu(vhpt_paddr, cpu) + off);
-               v->ti_tag = INVALID_TI_TAG;
-               addr_range -= PAGE_SIZE;
-               vadr += PAGE_SIZE;
-       }
-}
-#endif
 
 static void vhpt_map(unsigned long pte)
 {
@@ -147,17 +101,11 @@ void vhpt_multiple_insert(unsigned long 
 
 void vhpt_init(void)
 {
-       unsigned long vhpt_total_size, vhpt_alignment;
        unsigned long paddr, pte;
        struct page_info *page;
 #if !VHPT_ENABLED
        return;
 #endif
-       // allocate a huge chunk of physical memory.... how???
-       vhpt_total_size = 1 << VHPT_SIZE_LOG2;  // 4MB, 16MB, 64MB, or 256MB
-       vhpt_alignment = 1 << VHPT_SIZE_LOG2;   // 4MB, 16MB, 64MB, or 256MB
-       printf("vhpt_init: vhpt size=0x%lx, align=0x%lx\n",
-               vhpt_total_size, vhpt_alignment);
        /* This allocation only holds true if vhpt table is unique for
         * all domains. Or else later new vhpt table should be allocated
         * from domain heap when each domain is created. Assume xen buddy
@@ -167,17 +115,135 @@ void vhpt_init(void)
        if (!page)
                panic("vhpt_init: can't allocate VHPT!\n");
        paddr = page_to_maddr(page);
+       if (paddr & ((1 << VHPT_SIZE_LOG2) - 1))
+               panic("vhpt_init: bad VHPT alignment!\n");
        __get_cpu_var(vhpt_paddr) = paddr;
-       __get_cpu_var(vhpt_pend) = paddr + vhpt_total_size - 1;
+       __get_cpu_var(vhpt_pend) = paddr + (1 << VHPT_SIZE_LOG2) - 1;
        printf("vhpt_init: vhpt paddr=0x%lx, end=0x%lx\n",
                paddr, __get_cpu_var(vhpt_pend));
        pte = pte_val(pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL));
        vhpt_map(pte);
        ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
                VHPT_ENABLED);
-       vhpt_flush();
-}
-
+       vhpt_erase();
+}
+
+
+void vcpu_flush_vtlb_all (void)
+{
+       struct vcpu *v = current;
+
+       /* First VCPU tlb.  */
+       vcpu_purge_tr_entry(&PSCBX(v,dtlb));
+       vcpu_purge_tr_entry(&PSCBX(v,itlb));
+
+       /* Then VHPT.  */
+       vhpt_flush ();
+
+       /* Then mTLB.  */
+       local_flush_tlb_all ();
+
+       /* We could clear bit in d->domain_dirty_cpumask only if domain d in
+          not running on this processor.  There is currently no easy way to
+          check this.  */
+}
+
+void domain_flush_vtlb_all (void)
+{
+       int cpu = smp_processor_id ();
+       struct vcpu *v;
+
+       for_each_vcpu (current->domain, v)
+               if (v->processor == cpu)
+                       vcpu_flush_vtlb_all ();
+               else
+                       smp_call_function_single
+                               (v->processor,
+                                (void(*)(void *))vcpu_flush_vtlb_all,
+                                NULL,1,1);
+}
+
+static void cpu_flush_vhpt_range (int cpu, u64 vadr, u64 addr_range)
+{
+       void *vhpt_base = __va(per_cpu(vhpt_paddr, cpu));
+
+       while ((long)addr_range > 0) {
+               /* Get the VHPT entry.  */
+               unsigned int off = ia64_thash(vadr) - VHPT_ADDR;
+               volatile struct vhpt_lf_entry *v;
+               v = vhpt_base + off;
+               v->ti_tag = INVALID_TI_TAG;
+               addr_range -= PAGE_SIZE;
+               vadr += PAGE_SIZE;
+       }
+}
+
+void vcpu_flush_tlb_vhpt_range (u64 vadr, u64 log_range)
+{
+       cpu_flush_vhpt_range (current->processor, vadr, 1UL << log_range);
+       ia64_ptcl(vadr, log_range << 2);
+       ia64_srlz_i();
+}
+
+void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range)
+{
+       struct vcpu *v;
+
+#if 0
+       // this only seems to occur at shutdown, but it does occur
+       if ((!addr_range) || addr_range & (addr_range - 1)) {
+               printf("vhpt_flush_address: weird range, spinning...\n");
+               while(1);
+       }
+#endif
+
+       for_each_vcpu (d, v) {
+               /* Purge TC entries.
+                  FIXME: clear only if match.  */
+               vcpu_purge_tr_entry(&PSCBX(v,dtlb));
+               vcpu_purge_tr_entry(&PSCBX(v,itlb));
+
+               /* Invalidate VHPT entries.  */
+               cpu_flush_vhpt_range (v->processor, vadr, addr_range);
+       }
+
+       /* ptc.ga  */
+       ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
+}
+
+static void flush_tlb_vhpt_all (struct domain *d)
+{
+       /* First VHPT.  */
+       vhpt_flush ();
+
+       /* Then mTLB.  */
+       local_flush_tlb_all ();
+}
+
+void domain_flush_destroy (struct domain *d)
+{
+       /* Very heavy...  */
+       on_each_cpu ((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);
+       cpus_clear (d->domain_dirty_cpumask);
+}
+
+void flush_tlb_mask(cpumask_t mask)
+{
+    int cpu;
+
+    cpu = smp_processor_id();
+    if (cpu_isset (cpu, mask)) {
+        cpu_clear(cpu, mask);
+        flush_tlb_vhpt_all (NULL);
+    }
+
+    if (cpus_empty(mask))
+        return;
+
+    for_each_cpu_mask (cpu, mask)
+        smp_call_function_single
+            (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1);
+}
 
 void zero_vhpt_stats(void)
 {
diff -r 06e5c5599147 -r 77ccce98ddef xen/arch/ia64/xen/xenmisc.c
--- a/xen/arch/ia64/xen/xenmisc.c       Tue May 16 09:05:36 2006 -0600
+++ b/xen/arch/ia64/xen/xenmisc.c       Tue May 16 10:35:58 2006 -0600
@@ -267,6 +267,9 @@ void context_switch(struct vcpu *prev, s
            vmx_load_state(next);
     /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/
     prev = ia64_switch_to(next);
+
+    //cpu_set(smp_processor_id(), current->domain->domain_dirty_cpumask);
+
     if (!VMX_DOMAIN(current)){
            vcpu_set_next_timer(current);
     }
diff -r 06e5c5599147 -r 77ccce98ddef xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Tue May 16 09:05:36 2006 -0600
+++ b/xen/include/asm-ia64/mm.h Tue May 16 10:35:58 2006 -0600
@@ -12,7 +12,7 @@
 
 #include <asm/processor.h>
 #include <asm/atomic.h>
-#include <asm/flushtlb.h>
+#include <asm/tlbflush.h>
 #include <asm/io.h>
 
 #include <public/xen.h>
diff -r 06e5c5599147 -r 77ccce98ddef xen/include/asm-ia64/vcpu.h
--- a/xen/include/asm-ia64/vcpu.h       Tue May 16 09:05:36 2006 -0600
+++ b/xen/include/asm-ia64/vcpu.h       Tue May 16 10:35:58 2006 -0600
@@ -135,7 +135,10 @@ extern IA64FAULT vcpu_set_pkr(VCPU *vcpu
 extern IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
 extern IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key);
 /* TLB */
-extern void vcpu_purge_tr_entry(TR_ENTRY *trp);
+static inline void vcpu_purge_tr_entry(TR_ENTRY *trp)
+{
+       trp->pte.val = 0;
+}
 extern IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 padr,
                UINT64 itir, UINT64 ifa);
 extern IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 padr,
diff -r 06e5c5599147 -r 77ccce98ddef xen/include/asm-ia64/vhpt.h
--- a/xen/include/asm-ia64/vhpt.h       Tue May 16 09:05:36 2006 -0600
+++ b/xen/include/asm-ia64/vhpt.h       Tue May 16 10:35:58 2006 -0600
@@ -14,13 +14,7 @@
 /* Number of entries in the VHPT.  The size of an entry is 4*8B == 32B */
 #define        VHPT_NUM_ENTRIES                (1 << (VHPT_SIZE_LOG2 - 5))
 
-#ifdef CONFIG_SMP
-# define vhpt_flush_all()      smp_vhpt_flush_all()
-#else
-# define vhpt_flush_all()      vhpt_flush()
-#endif
 // FIXME: These should be automatically generated
-
 #define        VLE_PGFLAGS_OFFSET              0
 #define        VLE_ITIR_OFFSET                 8
 #define        VLE_TITAG_OFFSET                16
@@ -42,15 +36,10 @@ extern void vhpt_init (void);
 extern void vhpt_init (void);
 extern void zero_vhpt_stats(void);
 extern int dump_vhpt_stats(char *buf);
-extern void vhpt_flush_address(unsigned long vadr, unsigned long addr_range);
-extern void vhpt_flush_address_remote(int cpu, unsigned long vadr,
-                                     unsigned long addr_range);
 extern void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte,
                                 unsigned long logps);
 extern void vhpt_insert (unsigned long vadr, unsigned long pte,
                         unsigned long logps);
-extern void vhpt_flush(void);
-extern void smp_vhpt_flush_all(void);
 
 /* Currently the VHPT is allocated per CPU.  */
 DECLARE_PER_CPU (unsigned long, vhpt_paddr);
diff -r 06e5c5599147 -r 77ccce98ddef xen/include/asm-ia64/tlbflush.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/tlbflush.h   Tue May 16 10:35:58 2006 -0600
@@ -0,0 +1,37 @@
+#ifndef __FLUSHTLB_H__
+#define __FLUSHTLB_H__
+
+#include <xen/sched.h>
+
+/* TLB flushes can be either local (current vcpu only) or domain wide (on
+   all vcpus).
+   TLB flushes can be either all-flush or range only.
+
+   vTLB flushing means flushing VCPU virtual TLB + machine TLB + machine VHPT.
+*/
+
+/* Local all flush of vTLB.  */
+void vcpu_flush_vtlb_all (void);
+
+/* Local range flush of machine TLB only (not full VCPU virtual TLB!!!)  */
+void vcpu_flush_tlb_vhpt_range (u64 vadr, u64 log_range);
+
+/* Global all flush of vTLB  */
+void domain_flush_vtlb_all (void);
+
+/* Global range-flush of vTLB.  */
+void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range);
+
+/* Final vTLB flush on every dirty cpus.  */
+void domain_flush_destroy (struct domain *d);
+
+/* Flush v-tlb on cpus set in mask for current domain.  */
+void flush_tlb_mask(cpumask_t mask);
+
+/* Flush local machine TLB.  */
+void local_flush_tlb_all (void);
+
+#define tlbflush_current_time() 0
+#define tlbflush_filter(x,y) ((void)0)
+
+#endif
diff -r 06e5c5599147 -r 77ccce98ddef xen/include/asm-ia64/flushtlb.h
--- a/xen/include/asm-ia64/flushtlb.h   Tue May 16 09:05:36 2006 -0600
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,9 +0,0 @@
-#ifndef __FLUSHTLB_H__
-#define __FLUSHTLB_H__
-
-#include <asm/tlbflush.h>
-
-#define tlbflush_current_time() 0
-#define tlbflush_filter(x,y) ((void)0)
-
-#endif
diff -r 06e5c5599147 -r 77ccce98ddef 
xen/include/asm-ia64/linux-xen/asm/tlbflush.h
--- a/xen/include/asm-ia64/linux-xen/asm/tlbflush.h     Tue May 16 09:05:36 
2006 -0600
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,119 +0,0 @@
-#ifndef _ASM_IA64_TLBFLUSH_H
-#define _ASM_IA64_TLBFLUSH_H
-
-/*
- * Copyright (C) 2002 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- */
-
-#include <linux/config.h>
-
-#include <linux/mm.h>
-
-#include <asm/intrinsics.h>
-#include <asm/mmu_context.h>
-#include <asm/page.h>
-
-/*
- * Now for some TLB flushing routines.  This is the kind of stuff that
- * can be very expensive, so try to avoid them whenever possible.
- */
-
-/*
- * Flush everything (kernel mapping may also have changed due to
- * vmalloc/vfree).
- */
-extern void local_flush_tlb_all (void);
-
-#ifdef CONFIG_SMP
-  extern void smp_flush_tlb_all (void);
-  extern void smp_flush_tlb_mm (struct mm_struct *mm);
-# define flush_tlb_all()       smp_flush_tlb_all()
-#else
-# define flush_tlb_all()       local_flush_tlb_all()
-#endif
-
-#ifndef XEN
-static inline void
-local_finish_flush_tlb_mm (struct mm_struct *mm)
-{
-#ifndef XEN
-// FIXME SMP?
-       if (mm == current->active_mm)
-               activate_context(mm);
-#endif
-}
-
-/*
- * Flush a specified user mapping.  This is called, e.g., as a result of 
fork() and
- * exit().  fork() ends up here because the copy-on-write mechanism needs to 
write-protect
- * the PTEs of the parent task.
- */
-static inline void
-flush_tlb_mm (struct mm_struct *mm)
-{
-       if (!mm)
-               return;
-
-#ifndef XEN
-// FIXME SMP?
-       mm->context = 0;
-#endif
-
-       if (atomic_read(&mm->mm_users) == 0)
-               return;         /* happens as a result of exit_mmap() */
-
-#ifdef CONFIG_SMP
-       smp_flush_tlb_mm(mm);
-#else
-       local_finish_flush_tlb_mm(mm);
-#endif
-}
-
-extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, 
unsigned long end);
-
-/*
- * Page-granular tlb flush.
- */
-static inline void
-flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
-{
-#ifdef CONFIG_SMP
-       flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + 
PAGE_SIZE);
-#else
-#ifdef XEN
-       if (vma->vm_mm == current->domain->arch.mm)
-#else
-       if (vma->vm_mm == current->active_mm)
-#endif
-               ia64_ptcl(addr, (PAGE_SHIFT << 2));
-#ifndef XEN
-// FIXME SMP?
-       else
-               vma->vm_mm->context = 0;
-#endif
-#endif
-}
-
-/*
- * Flush the TLB entries mapping the virtually mapped linear page
- * table corresponding to address range [START-END).
- */
-static inline void
-flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long 
end)
-{
-       /*
-        * Deprecated.  The virtual page table is now flushed via the normal 
gather/flush
-        * interface (see tlb.h).
-        */
-}
-
-
-#define flush_tlb_kernel_range(start, end)     flush_tlb_all() /* XXX fix me */
-#endif /* XEN */
-
-#ifdef XEN
-extern void flush_tlb_mask(cpumask_t mask);
-#endif
-
-#endif /* _ASM_IA64_TLBFLUSH_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.