[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Manual cleanup after merge



ChangeSet 1.1268, 2005/04/05 08:38:03+01:00, mafetter@xxxxxxxxxxxxxxxx

        Manual cleanup after merge
        
        Signed-off-by: michael.fetterman@xxxxxxxxxxxx



 arch/x86/audit.c         |    2 +-
 arch/x86/mm.c            |   45 +++++++++++++++++++++++++++++++++++----------
 arch/x86/shadow.c        |    8 +++++---
 include/asm-x86/mm.h     |    3 ++-
 include/asm-x86/shadow.h |   16 ++++++++--------
 5 files changed, 51 insertions(+), 23 deletions(-)


diff -Nru a/xen/arch/x86/audit.c b/xen/arch/x86/audit.c
--- a/xen/arch/x86/audit.c      2005-04-05 12:21:56 -04:00
+++ b/xen/arch/x86/audit.c      2005-04-05 12:21:56 -04:00
@@ -683,7 +683,7 @@
 
     if ( d != current->domain )
         domain_pause(d);
-    synchronise_pagetables(~0UL);
+    sync_lazy_execstate_all();
 
     // Maybe we should just be using BIGLOCK?
     //
diff -Nru a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c 2005-04-05 12:21:56 -04:00
+++ b/xen/arch/x86/mm.c 2005-04-05 12:21:56 -04:00
@@ -199,8 +199,7 @@
     write_cr3(pagetable_val(ed->arch.monitor_table));
 }
 
-
-static inline void invalidate_shadow_ldt(struct exec_domain *d)
+void invalidate_shadow_ldt(struct exec_domain *d)
 {
     int i;
     unsigned long pfn;
@@ -1306,6 +1305,7 @@
 static void process_deferred_ops(unsigned int cpu)
 {
     unsigned int deferred_ops;
+    struct domain *d = current->domain;
 
     deferred_ops = percpu_info[cpu].deferred_ops;
     percpu_info[cpu].deferred_ops = 0;
@@ -1462,6 +1462,9 @@
             type = PGT_l1_page_table | PGT_va_mutable;
 
         pin_page:
+            if ( shadow_mode_enabled(FOREIGNDOM) )
+                type = PGT_writable_page;
+
             okay = get_page_and_type_from_pagenr(op.mfn, type, FOREIGNDOM);
             if ( unlikely(!okay) )
             {
@@ -1516,6 +1519,7 @@
 
         case MMUEXT_NEW_BASEPTR:
             okay = new_guest_cr3(op.mfn);
+            percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
             break;
         
 #ifdef __x86_64__
@@ -1542,6 +1546,8 @@
             break;
     
         case MMUEXT_INVLPG_LOCAL:
+            if ( shadow_mode_enabled(d) )
+                shadow_invlpg(ed, op.linear_addr);
             local_flush_tlb_one(op.linear_addr);
             break;
 
@@ -1556,17 +1562,25 @@
             }
             pset = vcpuset_to_pcpuset(d, vset);
             if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
+            {
+                BUG_ON(shadow_mode_enabled(d) && ((pset & d->cpuset) != 
(1<<cpu)));
                 flush_tlb_mask(pset & d->cpuset);
+            }
             else
+            {
+                BUG_ON(shadow_mode_enabled(d) && ((pset & d->cpuset) != 
(1<<cpu)));
                 flush_tlb_one_mask(pset & d->cpuset, op.linear_addr);
+            }
             break;
         }
 
         case MMUEXT_TLB_FLUSH_ALL:
+            BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
             flush_tlb_mask(d->cpuset);
             break;
     
         case MMUEXT_INVLPG_ALL:
+            BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
             flush_tlb_one_mask(d->cpuset, op.linear_addr);
             break;
 
@@ -1584,6 +1598,15 @@
 
         case MMUEXT_SET_LDT:
         {
+            if ( shadow_mode_external(d) )
+            {
+                // ignore this request from an external domain...
+                MEM_LOG("ignoring SET_LDT hypercall from external "
+                        "domain %u\n", d->id);
+                okay = 0;
+                break;
+            }
+
             unsigned long ptr  = op.linear_addr;
             unsigned long ents = op.nr_ents;
             if ( ((ptr & (PAGE_SIZE-1)) != 0) || 
@@ -1732,7 +1755,7 @@
     unsigned int foreigndom)
 {
     mmu_update_t req;
-    unsigned long va = 0, pfn, prev_pfn = 0;
+    unsigned long va = 0, mfn, prev_mfn = 0, gpfn;
     struct pfn_info *page;
     int rc = 0, okay = 1, i = 0, cpu = smp_processor_id();
     unsigned int cmd, done = 0;
@@ -1747,9 +1770,6 @@
     if ( unlikely(shadow_mode_enabled(d)) )
         check_pagetable(ed, "pre-mmu"); /* debug */
 
-    if ( unlikely(shadow_mode_translate(d)) )
-        domain_crash_synchronous();
-
     if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
     {
         count &= ~MMU_UPDATE_PREEMPTED;
@@ -1875,7 +1895,8 @@
                             __mark_dirty(d, mfn);
 
                         gpfn = __mfn_to_gpfn(d, mfn);
-                        ASSERT(gpfn);
+                        ASSERT(VALID_M2P(gpfn));
+
                         if ( page_is_page_table(page) )
                             shadow_mark_mfn_out_of_sync(ed, gpfn, mfn);
                     }
@@ -2012,7 +2033,10 @@
 
     if ( unlikely(__put_user(val, &l1_pgentry_val(
                                  linear_pg_table[l1_linear_offset(va)]))) )
-        return -EINVAL;
+    {
+        rc = -EINVAL;
+        goto out;
+    }
 
     // also need to update the shadow
 
@@ -2027,6 +2051,7 @@
     if ( shadow_mode_log_dirty(d) )
         mark_dirty(d, va_to_l1mfn(ed, va));
 
+ out:
     shadow_unlock(d);
     check_pagetable(ed, "post-va"); /* debug */
 
@@ -2658,8 +2683,8 @@
     u32                 l2_idx;
     struct exec_domain *ed = current;
 
-    // not supported in combination with various shadow modes!
-    ASSERT( !shadow_mode_enabled(ed->domain) );
+    if ( unlikely(shadow_mode_enabled(ed->domain)) )
+        return 0;
 
     /*
      * Attempt to read the PTE that maps the VA being accessed. By checking for
diff -Nru a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     2005-04-05 12:21:56 -04:00
+++ b/xen/arch/x86/shadow.c     2005-04-05 12:21:56 -04:00
@@ -590,10 +590,10 @@
     struct pfn_info *mmfn_info;
     struct domain *d = ed->domain;
 
-    ASSERT(!pagetable_val(ed->arch.monitor_table)); /* we should only get 
called once */
+    ASSERT(pagetable_val(ed->arch.monitor_table) == 0);
 
     mmfn_info = alloc_domheap_page(NULL);
-    ASSERT( mmfn_info ); 
+    ASSERT(mmfn_info != NULL);
 
     mmfn = (unsigned long) (mmfn_info - frame_table);
     mpl2e = (l2_pgentry_t *) map_domain_mem(mmfn << PAGE_SHIFT);
@@ -2756,7 +2756,7 @@
     shadow_lock(d);
 
     sh_check_name = s;
-    SH_VVLOG("%s-PT Audit", s);
+    //SH_VVLOG("%s-PT Audit", s);
     sh_l2_present = sh_l1_present = 0;
     perfc_incrc(check_pagetable);
 
@@ -2802,8 +2802,10 @@
     unmap_domain_mem(spl2e);
     unmap_domain_mem(gpl2e);
 
+#if 0
     SH_VVLOG("PT verified : l2_present = %d, l1_present = %d",
              sh_l2_present, sh_l1_present);
+#endif
 
  out:
     if ( errors )
diff -Nru a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  2005-04-05 12:21:56 -04:00
+++ b/xen/include/asm-x86/mm.h  2005-04-05 12:21:56 -04:00
@@ -246,7 +246,8 @@
 #undef  machine_to_phys_mapping
 #define machine_to_phys_mapping ((u32 *)RDWR_MPT_VIRT_START)
 #define INVALID_M2P_ENTRY        (~0U)
-#define IS_INVALID_M2P_ENTRY(_e) (!!((_e) & (1U<<31)))
+#define VALID_M2P(_e)            (!((_e) & (1U<<31)))
+#define IS_INVALID_M2P_ENTRY(_e) (!VALID_M2P(_e))
 
 /*
  * The phys_to_machine_mapping is the reversed mapping of MPT for full
diff -Nru a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      2005-04-05 12:21:56 -04:00
+++ b/xen/include/asm-x86/shadow.h      2005-04-05 12:21:56 -04:00
@@ -222,11 +222,11 @@
 #define SHADOW_SNAPSHOT_ELSEWHERE (-1L)
 
 /************************************************************************/
-#define SHADOW_DEBUG 0
-#define SHADOW_VERBOSE_DEBUG 0
-#define SHADOW_VVERBOSE_DEBUG 0
-#define SHADOW_HASH_DEBUG 0
-#define FULLSHADOW_DEBUG 0
+#define SHADOW_DEBUG 1
+#define SHADOW_VERBOSE_DEBUG 1
+#define SHADOW_VVERBOSE_DEBUG 1
+#define SHADOW_HASH_DEBUG 1
+#define FULLSHADOW_DEBUG 1
 
 #if SHADOW_DEBUG
 extern int shadow_status_noswap;
@@ -373,7 +373,7 @@
     if ( need_flush )
     {
         perfc_incrc(update_hl2e_invlpg);
-        __flush_tlb_one(&linear_pg_table[l1_linear_offset(va)]);
+        local_flush_tlb_one(&linear_pg_table[l1_linear_offset(va)]);
     }
 }
 
@@ -959,7 +959,7 @@
                 perfc_incrc(shadow_status_hit_head);
             }
 
-            SH_VVLOG("lookup gpfn=%p => status=%p", key, head->smfn);
+            //SH_VVLOG("lookup gpfn=%p => status=%p", key, head->smfn);
             return head->smfn;
         }
 
@@ -968,7 +968,7 @@

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.