[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/mm: merge the shadow, hap and log-dirty locks into a single paging lock.



# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1307017012 -3600
# Node ID 2bbed46eb10ce80e920506714f7e328193a23b52
# Parent  64398d14dcd6e720ac6908ee5ae284b03832e8bc
x86/mm: merge the shadow, hap and log-dirty locks into a single paging lock.

This will allow us to simplify the locking around calls between
hap/shadow and log-dirty code.  Many log-dirty paths already need the
shadow or HAP lock so it shouldn't increase contention that much.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---


diff -r 64398d14dcd6 -r 2bbed46eb10c xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Thu Jun 02 13:16:52 2011 +0100
@@ -65,9 +65,9 @@
         return -EINVAL;
 
     /* turn on PG_log_dirty bit in paging mode */
-    hap_lock(d);
+    paging_lock(d);
     d->arch.paging.mode |= PG_log_dirty;
-    hap_unlock(d);
+    paging_unlock(d);
 
     /* set l1e entries of P2M table to be read-only. */
     for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
@@ -85,9 +85,9 @@
     if ( !dirty_vram )
         return -EINVAL;
 
-    hap_lock(d);
+    paging_lock(d);
     d->arch.paging.mode &= ~PG_log_dirty;
-    hap_unlock(d);
+    paging_unlock(d);
 
     /* set l1e entries of P2M table with normal mode */
     for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
@@ -196,9 +196,9 @@
 static int hap_enable_log_dirty(struct domain *d)
 {
     /* turn on PG_log_dirty bit in paging mode */
-    hap_lock(d);
+    paging_lock(d);
     d->arch.paging.mode |= PG_log_dirty;
-    hap_unlock(d);
+    paging_unlock(d);
 
     /* set l1e entries of P2M table to be read-only. */
     p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
@@ -208,9 +208,9 @@
 
 static int hap_disable_log_dirty(struct domain *d)
 {
-    hap_lock(d);
+    paging_lock(d);
     d->arch.paging.mode &= ~PG_log_dirty;
-    hap_unlock(d);
+    paging_unlock(d);
 
     /* set l1e entries of P2M table with normal mode */
     p2m_change_entry_type_global(d, p2m_ram_logdirty, p2m_ram_rw);
@@ -248,7 +248,7 @@
     struct page_info *pg = NULL;
     void *p;
 
-    ASSERT(hap_locked_by_me(d));
+    ASSERT(paging_locked_by_me(d));
 
     pg = page_list_remove_head(&d->arch.paging.hap.freelist);
     if ( unlikely(!pg) )
@@ -268,7 +268,7 @@
 {
     struct page_info *pg = mfn_to_page(mfn);
 
-    ASSERT(hap_locked_by_me(d));
+    ASSERT(paging_locked_by_me(d));
 
     d->arch.paging.hap.free_pages++;
     page_list_add_tail(pg, &d->arch.paging.hap.freelist);
@@ -279,8 +279,8 @@
     struct page_info *pg;
 
     /* This is called both from the p2m code (which never holds the 
-     * hap lock) and the log-dirty code (which sometimes does). */
-    hap_lock_recursive(d);
+     * paging lock) and the log-dirty code (which sometimes does). */
+    paging_lock_recursive(d);
     pg = hap_alloc(d);
 
 #if CONFIG_PAGING_LEVELS == 3
@@ -311,15 +311,15 @@
         pg->count_info |= 1;
     }
 
-    hap_unlock(d);
+    paging_unlock(d);
     return pg;
 }
 
 static void hap_free_p2m_page(struct domain *d, struct page_info *pg)
 {
     /* This is called both from the p2m code (which never holds the 
-     * hap lock) and the log-dirty code (which sometimes does). */
-    hap_lock_recursive(d);
+     * paging lock) and the log-dirty code (which sometimes does). */
+    paging_lock_recursive(d);
 
     ASSERT(page_get_owner(pg) == d);
     /* Should have just the one ref we gave it in alloc_p2m_page() */
@@ -337,7 +337,7 @@
     hap_free(d, page_to_mfn(pg));
     ASSERT(d->arch.paging.hap.p2m_pages >= 0);
 
-    hap_unlock(d);
+    paging_unlock(d);
 }
 
 /* Return the size of the pool, rounded up to the nearest MB */
@@ -358,7 +358,7 @@
 {
     struct page_info *pg;
 
-    ASSERT(hap_locked_by_me(d));
+    ASSERT(paging_locked_by_me(d));
 
     if ( pages < d->arch.paging.hap.p2m_pages )
         pages = 0;
@@ -563,7 +563,6 @@
 /************************************************/
 void hap_domain_init(struct domain *d)
 {
-    mm_lock_init(&d->arch.paging.hap.lock);
     INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist);
 }
 
@@ -587,9 +586,9 @@
     if ( old_pages == 0 )
     {
         unsigned int r;
-        hap_lock(d);
+        paging_lock(d);
         r = hap_set_allocation(d, 256, NULL);
-        hap_unlock(d);
+        paging_unlock(d);
         if ( r != 0 )
         {
             hap_set_allocation(d, 0, NULL);
@@ -638,10 +637,10 @@
 
     p2m_teardown(p2m_get_hostp2m(d));
     /* Free any memory that the p2m teardown released */
-    hap_lock(d);
+    paging_lock(d);
     hap_set_allocation(d, 0, NULL);
     ASSERT(d->arch.paging.hap.p2m_pages == 0);
-    hap_unlock(d);
+    paging_unlock(d);
 }
 
 void hap_teardown(struct domain *d)
@@ -652,8 +651,8 @@
     ASSERT(d->is_dying);
     ASSERT(d != current->domain);
 
-    if ( !hap_locked_by_me(d) )
-        hap_lock(d); /* Keep various asserts happy */
+    if ( !paging_locked_by_me(d) )
+        paging_lock(d); /* Keep various asserts happy */
 
     if ( paging_mode_enabled(d) )
     {
@@ -689,7 +688,7 @@
 
     d->arch.paging.mode &= ~PG_log_dirty;
 
-    hap_unlock(d);
+    paging_unlock(d);
 }
 
 int hap_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
@@ -700,9 +699,9 @@
     switch ( sc->op )
     {
     case XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION:
-        hap_lock(d);
+        paging_lock(d);
         rc = hap_set_allocation(d, sc->mb << (20 - PAGE_SHIFT), &preempted);
-        hap_unlock(d);
+        paging_unlock(d);
         if ( preempted )
             /* Not finished.  Set up to re-run the call. */
             rc = hypercall_create_continuation(__HYPERVISOR_domctl, "h",
@@ -789,7 +788,7 @@
 {
     struct domain *d = v->domain;
 
-    hap_lock(d);
+    paging_lock(d);
 
     v->arch.paging.mode = hap_paging_get_mode(v);
 
@@ -804,7 +803,7 @@
     /* CR3 is effectively updated by a mode change. Flush ASIDs, etc. */
     hap_update_cr3(v, 0);
 
-    hap_unlock(d);
+    paging_unlock(d);
 }
 
 #if CONFIG_PAGING_LEVELS == 3
@@ -861,7 +860,7 @@
      * a hypercall which passes a domain and chooses mostly the first
      * vcpu. */
 
-    hap_lock(d);
+    paging_lock(d);
     old_flags = l1e_get_flags(*p);
 
     if ( nestedhvm_enabled(d) && (old_flags & _PAGE_PRESENT) ) {
@@ -886,7 +885,7 @@
         p2m_install_entry_in_monitors(d, (l3_pgentry_t *)p);
 #endif
 
-    hap_unlock(d);
+    paging_unlock(d);
 
     if ( flush_nestedp2m )
         p2m_flush_nestedp2m(d);
diff -r 64398d14dcd6 -r 2bbed46eb10c xen/arch/x86/mm/hap/nested_hap.c
--- a/xen/arch/x86/mm/hap/nested_hap.c  Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/hap/nested_hap.c  Thu Jun 02 13:16:52 2011 +0100
@@ -82,14 +82,14 @@
     struct domain *d = p2m->domain;
     uint32_t old_flags;
 
-    hap_lock(d);
+    paging_lock(d);
 
     old_flags = l1e_get_flags(*p);
     safe_write_pte(p, new);
     if (old_flags & _PAGE_PRESENT)
         nestedhvm_vmcx_flushtlb(p2m);
     
-    hap_unlock(d);
+    paging_unlock(d);
 }
 
 /********************************************/
diff -r 64398d14dcd6 -r 2bbed46eb10c xen/arch/x86/mm/mm-locks.h
--- a/xen/arch/x86/mm/mm-locks.h        Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/mm-locks.h        Thu Jun 02 13:16:52 2011 +0100
@@ -114,55 +114,26 @@
 #define p2m_unlock(p)         mm_unlock(&(p)->lock)
 #define p2m_locked_by_me(p)   mm_locked_by_me(&(p)->lock)
 
-/* Shadow lock (per-domain)
+/* Paging lock (per-domain)
  *
- * This lock is intended to allow us to make atomic updates to the
- * software TLB that the shadow pagetables provide.
- *
- * Specifically, it protects:
+ * For shadow pagetables, this lock protects
  *   - all changes to shadow page table pages
  *   - the shadow hash table
  *   - the shadow page allocator 
  *   - all changes to guest page table pages
  *   - all changes to the page_info->tlbflush_timestamp
- *   - the page_info->count fields on shadow pages */
+ *   - the page_info->count fields on shadow pages 
+ * 
+ * For HAP, it protects the NPT/EPT tables and mode changes. 
+ * 
+ * It also protects the log-dirty bitmap from concurrent accesses (and
+ * teardowns, etc). */
 
-declare_mm_lock(shadow)
-#define shadow_lock(d)         mm_lock(shadow, &(d)->arch.paging.shadow.lock)
-#define shadow_lock_recursive(d) \
-                     mm_lock_recursive(shadow, &(d)->arch.paging.shadow.lock)
-#define shadow_unlock(d)       mm_unlock(&(d)->arch.paging.shadow.lock)
-#define shadow_locked_by_me(d) mm_locked_by_me(&(d)->arch.paging.shadow.lock)
-
-/* HAP lock (per-domain)
- * 
- * Equivalent of the shadow lock for HAP.  Protects updates to the
- * NPT and EPT tables, and the HAP page allocator. */
-
-declare_mm_lock(hap)
-#define hap_lock(d)         mm_lock(hap, &(d)->arch.paging.hap.lock)
-#define hap_lock_recursive(d) \
-                  mm_lock_recursive(hap, &(d)->arch.paging.hap.lock)
-#define hap_unlock(d)       mm_unlock(&(d)->arch.paging.hap.lock)
-#define hap_locked_by_me(d) mm_locked_by_me(&(d)->arch.paging.hap.lock)
-
-/* Log-dirty lock (per-domain) 
- * 
- * Protects the log-dirty bitmap from concurrent accesses (and teardowns, etc).
- *
- * Because mark_dirty is called from a lot of places, the log-dirty lock
- * may be acquired with the shadow or HAP locks already held.  When the
- * log-dirty code makes callbacks into HAP or shadow code to reset
- * various traps that will trigger the mark_dirty calls, it must *not*
- * have the log-dirty lock held, or it risks deadlock.  Because the only
- * purpose of those calls is to make sure that *guest* actions will
- * cause mark_dirty to be called (hypervisor actions explictly call it
- * anyway), it is safe to release the log-dirty lock before the callback
- * as long as the domain is paused for the entire operation. */
-
-declare_mm_lock(log_dirty)
-#define log_dirty_lock(d) mm_lock(log_dirty, &(d)->arch.paging.log_dirty.lock)
-#define log_dirty_unlock(d) mm_unlock(&(d)->arch.paging.log_dirty.lock)
-
+declare_mm_lock(paging)
+#define paging_lock(d)         mm_lock(paging, &(d)->arch.paging.lock)
+#define paging_lock_recursive(d) \
+                    mm_lock_recursive(paging, &(d)->arch.paging.lock)
+#define paging_unlock(d)       mm_unlock(&(d)->arch.paging.lock)
+#define paging_locked_by_me(d) mm_locked_by_me(&(d)->arch.paging.lock)
 
 #endif /* _MM_LOCKS_H */
diff -r 64398d14dcd6 -r 2bbed46eb10c xen/arch/x86/mm/paging.c
--- a/xen/arch/x86/mm/paging.c  Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/paging.c  Thu Jun 02 13:16:52 2011 +0100
@@ -126,7 +126,7 @@
 
     INIT_PAGE_LIST_HEAD(&to_free);
 
-    log_dirty_lock(d);
+    paging_lock(d);
 
     l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
 
@@ -163,7 +163,7 @@
     ASSERT(d->arch.paging.log_dirty.allocs == 0);
     d->arch.paging.log_dirty.failed_allocs = 0;
 
-    log_dirty_unlock(d);
+    paging_unlock(d);
     
     /* Return the memory now that we're not holding the log-dirty lock */
     page_list_for_each_safe(pg, tmp, &to_free)
@@ -239,7 +239,8 @@
     new_mfn = _mfn(INVALID_MFN);
 
 again:
-    log_dirty_lock(d);
+    /* Recursive: this is called from inside the shadow code */
+    paging_lock_recursive(d);
 
     l4 = paging_map_log_dirty_bitmap(d);
     if ( unlikely(!l4) )
@@ -300,13 +301,13 @@
         d->arch.paging.log_dirty.dirty_count++;
     }
 
-    log_dirty_unlock(d);
+    paging_unlock(d);
     if ( mfn_valid(new_mfn) )
         paging_free_log_dirty_page(d, new_mfn);
     return;
 
 oom:
-    log_dirty_unlock(d);
+    paging_unlock(d);
     new_mfn = paging_new_log_dirty_page(d);
     if ( !mfn_valid(new_mfn) )
         /* we've already recorded the failed allocation */
@@ -323,7 +324,8 @@
     unsigned long *l1;
     int rv = 0;
 
-    log_dirty_lock(d);
+    /* Recursive: this is called from inside the shadow code */
+    paging_lock_recursive(d);
     ASSERT(paging_mode_log_dirty(d));
 
     /* We /really/ mean PFN here, even for non-translated guests. */
@@ -359,7 +361,7 @@
     unmap_domain_page(l1);
 
 out:
-    log_dirty_unlock(d);
+    paging_unlock(d);
     return rv;
 }
 
@@ -375,7 +377,7 @@
     int i4, i3, i2;
 
     domain_pause(d);
-    log_dirty_lock(d);
+    paging_lock(d);
 
     clean = (sc->op == XEN_DOMCTL_SHADOW_OP_CLEAN);
 
@@ -456,7 +458,7 @@
     if ( pages < sc->pages )
         sc->pages = pages;
 
-    log_dirty_unlock(d);
+    paging_unlock(d);
 
     if ( clean )
     {
@@ -468,7 +470,7 @@
     return rv;
 
  out:
-    log_dirty_unlock(d);
+    paging_unlock(d);
     domain_unpause(d);
     return rv;
 }
@@ -486,7 +488,7 @@
     int i2, i3, i4;
 
     d->arch.paging.log_dirty.clean_dirty_bitmap(d);
-    log_dirty_lock(d);
+    paging_lock(d);
 
     PAGING_DEBUG(LOGDIRTY, "log-dirty-range: dom %u faults=%u dirty=%u\n",
                  d->domain_id,
@@ -611,12 +613,12 @@
     if ( l4 )
         unmap_domain_page(l4);
 
-    log_dirty_unlock(d);
+    paging_unlock(d);
 
     return rv;
 
  out:
-    log_dirty_unlock(d);
+    paging_unlock(d);
     return rv;
 }
 
@@ -632,9 +634,6 @@
                            int    (*disable_log_dirty)(struct domain *d),
                            void   (*clean_dirty_bitmap)(struct domain *d))
 {
-    /* We initialize log dirty lock first */
-    mm_lock_init(&d->arch.paging.log_dirty.lock);
-
     d->arch.paging.log_dirty.enable_log_dirty = enable_log_dirty;
     d->arch.paging.log_dirty.disable_log_dirty = disable_log_dirty;
     d->arch.paging.log_dirty.clean_dirty_bitmap = clean_dirty_bitmap;
@@ -658,6 +657,8 @@
     if ( (rc = p2m_init(d)) != 0 )
         return rc;
 
+    mm_lock_init(&d->arch.paging.lock);
+
     /* The order of the *_init calls below is important, as the later
      * ones may rewrite some common fields.  Shadow pagetables are the
      * default... */
diff -r 64398d14dcd6 -r 2bbed46eb10c xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Thu Jun 02 13:16:52 2011 +0100
@@ -45,7 +45,6 @@
  * Called for every domain from arch_domain_create() */
 void shadow_domain_init(struct domain *d, unsigned int domcr_flags)
 {
-    mm_lock_init(&d->arch.paging.shadow.lock);
     INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelist);
     INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
 
@@ -469,7 +468,7 @@
  * 
  * 2. All shadow operations on a guest page require the page to be brought
  *    back into sync before proceeding.  This must be done under the
- *    shadow lock so that the page is guaranteed to remain synced until
+ *    paging lock so that the page is guaranteed to remain synced until
  *    the operation completes.
  *
  *    Exceptions to this rule: the pagefault and invlpg handlers may 
@@ -478,7 +477,7 @@
  * 3. Operations on shadows that do not start from a guest page need to
  *    be aware that they may be handling an out-of-sync shadow.
  *
- * 4. Operations that do not normally take the shadow lock (fast-path 
+ * 4. Operations that do not normally take the paging lock (fast-path 
  *    #PF handler, INVLPG) must fall back to a locking, syncing version 
  *    if they see an out-of-sync table. 
  *
@@ -725,7 +724,7 @@
 {
     struct page_info *pg = mfn_to_page(gmfn);
 
-    ASSERT(shadow_locked_by_me(v->domain));
+    ASSERT(paging_locked_by_me(v->domain));
     ASSERT(mfn_is_out_of_sync(gmfn));
     /* Guest page must be shadowed *only* as L1 when out of sync. */
     ASSERT(!(mfn_to_page(gmfn)->shadow_flags & SHF_page_type_mask 
@@ -916,7 +915,7 @@
 
     SHADOW_PRINTK("d=%d, v=%d\n", v->domain->domain_id, v->vcpu_id);
 
-    ASSERT(shadow_locked_by_me(v->domain));
+    ASSERT(paging_locked_by_me(v->domain));
 
     if ( !this )
         goto resync_others;
@@ -973,7 +972,7 @@
 {
     struct page_info *pg;
     
-    ASSERT(shadow_locked_by_me(v->domain));
+    ASSERT(paging_locked_by_me(v->domain));
 
     SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx\n",
                   v->domain->domain_id, v->vcpu_id, mfn_x(gmfn));
@@ -1137,7 +1136,7 @@
     struct domain *d = v->domain;
     int rc;
 
-    ASSERT(shadow_locked_by_me(v->domain));
+    ASSERT(paging_locked_by_me(v->domain));
     rc = sh_validate_guest_entry(v, gmfn, entry, size);
     if ( rc & SHADOW_SET_FLUSH )
         /* Need to flush TLBs to pick up shadow PT changes */
@@ -1159,11 +1158,11 @@
  * appropriately.  Returns 0 if we page-faulted, 1 for success. */
 {
     int failed;
-    shadow_lock(v->domain);
+    paging_lock(v->domain);
     failed = __copy_to_user(p, &new, sizeof(new));
     if ( failed != sizeof(new) )
         sh_validate_guest_entry(v, gmfn, p, sizeof(new));
-    shadow_unlock(v->domain);
+    paging_unlock(v->domain);
     return (failed == 0);
 }
 
@@ -1176,12 +1175,12 @@
 {
     int failed;
     intpte_t t = *old;
-    shadow_lock(v->domain);
+    paging_lock(v->domain);
     failed = cmpxchg_user(p, t, new);
     if ( t == *old )
         sh_validate_guest_entry(v, gmfn, p, sizeof(new));
     *old = t;
-    shadow_unlock(v->domain);
+    paging_unlock(v->domain);
     return (failed == 0);
 }
 
@@ -1416,9 +1415,9 @@
 void shadow_blow_tables_per_domain(struct domain *d)
 {
     if ( shadow_mode_enabled(d) && d->vcpu != NULL && d->vcpu[0] != NULL ) {
-        shadow_lock(d);
+        paging_lock(d);
         shadow_blow_tables(d);
-        shadow_unlock(d);
+        paging_unlock(d);
     }
 }
 
@@ -1435,9 +1434,9 @@
     {
         if ( shadow_mode_enabled(d) && d->vcpu != NULL && d->vcpu[0] != NULL )
         {
-            shadow_lock(d);
+            paging_lock(d);
             shadow_blow_tables(d);
-            shadow_unlock(d);
+            paging_unlock(d);
         }
     }
     rcu_read_unlock(&domlist_read_lock);
@@ -1484,7 +1483,7 @@
     void *p;
     int i;
 
-    ASSERT(shadow_locked_by_me(d));
+    ASSERT(paging_locked_by_me(d));
     ASSERT(shadow_type != SH_type_none);
     perfc_incr(shadow_alloc);
 
@@ -1560,7 +1559,7 @@
     u32 shadow_type;
     int i;
 
-    ASSERT(shadow_locked_by_me(d));
+    ASSERT(paging_locked_by_me(d));
     perfc_incr(shadow_free);
 
     shadow_type = sp->u.sh.type;
@@ -1613,13 +1612,13 @@
     struct page_info *pg;
 
     /* This is called both from the p2m code (which never holds the 
-     * shadow lock) and the log-dirty code (which sometimes does). */
-    shadow_lock_recursive(d);
+     * paging lock) and the log-dirty code (which sometimes does). */
+    paging_lock_recursive(d);
 
     if ( d->arch.paging.shadow.total_pages 
          < shadow_min_acceptable_pages(d) + 1 )
     {
-        shadow_unlock(d);
+        paging_unlock(d);
         return NULL;
     }
  
@@ -1628,7 +1627,7 @@
     d->arch.paging.shadow.p2m_pages++;
     d->arch.paging.shadow.total_pages--;
 
-    shadow_unlock(d);
+    paging_unlock(d);
 
     /* Unlike shadow pages, mark p2m pages as owned by the domain.
      * Marking the domain as the owner would normally allow the guest to
@@ -1655,14 +1654,14 @@
     page_set_owner(pg, NULL); 
 
     /* This is called both from the p2m code (which never holds the 
-     * shadow lock) and the log-dirty code (which sometimes does). */
-    shadow_lock_recursive(d);
+     * paging lock) and the log-dirty code (which sometimes does). */
+    paging_lock_recursive(d);
 
     shadow_free(d, page_to_mfn(pg));
     d->arch.paging.shadow.p2m_pages--;
     d->arch.paging.shadow.total_pages++;
 
-    shadow_unlock(d);
+    paging_unlock(d);
 }
 
 #if CONFIG_PAGING_LEVELS == 3
@@ -1721,7 +1720,7 @@
     struct page_info *sp;
     unsigned int lower_bound;
 
-    ASSERT(shadow_locked_by_me(d));
+    ASSERT(paging_locked_by_me(d));
 
     if ( pages > 0 )
     {
@@ -1920,7 +1919,7 @@
 {
     struct page_info **table;
 
-    ASSERT(shadow_locked_by_me(d));
+    ASSERT(paging_locked_by_me(d));
     ASSERT(!d->arch.paging.shadow.hash_table);
 
     table = xmalloc_array(struct page_info *, SHADOW_HASH_BUCKETS);
@@ -1935,7 +1934,7 @@
  * This function does not care whether the table is populated. */
 static void shadow_hash_teardown(struct domain *d)
 {
-    ASSERT(shadow_locked_by_me(d));
+    ASSERT(paging_locked_by_me(d));
     ASSERT(d->arch.paging.shadow.hash_table);
 
     xfree(d->arch.paging.shadow.hash_table);
@@ -1951,7 +1950,7 @@
     struct page_info *sp, *prev;
     key_t key;
 
-    ASSERT(shadow_locked_by_me(d));
+    ASSERT(paging_locked_by_me(d));
     ASSERT(d->arch.paging.shadow.hash_table);
     ASSERT(t);
 
@@ -2005,7 +2004,7 @@
     struct page_info *sp;
     key_t key;
     
-    ASSERT(shadow_locked_by_me(d));
+    ASSERT(paging_locked_by_me(d));
     ASSERT(d->arch.paging.shadow.hash_table);
     ASSERT(t);
 
@@ -2031,7 +2030,7 @@
     struct page_info *sp, *x;
     key_t key;
 
-    ASSERT(shadow_locked_by_me(d));
+    ASSERT(paging_locked_by_me(d));
     ASSERT(d->arch.paging.shadow.hash_table);
     ASSERT(t);
 
@@ -2085,7 +2084,7 @@
     struct domain *d = v->domain;
     struct page_info *x;
 
-    ASSERT(shadow_locked_by_me(d));
+    ASSERT(paging_locked_by_me(d));
 
     /* Can be called via p2m code &c after shadow teardown. */
     if ( unlikely(!d->arch.paging.shadow.hash_table) )
@@ -2242,7 +2241,7 @@
         ;
     struct page_info *pg = mfn_to_page(gmfn);
 
-    ASSERT(shadow_locked_by_me(v->domain));
+    ASSERT(paging_locked_by_me(v->domain));
 
     /* Only remove writable mappings if we are doing shadow refcounts.
      * In guest refcounting, we trust Xen to already be restricting
@@ -2508,9 +2507,9 @@
         return 0;
 
     /* Although this is an externally visible function, we do not know
-     * whether the shadow lock will be held when it is called (since it
+     * whether the paging lock will be held when it is called (since it
      * can be called via put_page_type when we clear a shadow l1e).*/
-    shadow_lock_recursive(v->domain);
+    paging_lock_recursive(v->domain);
 
     /* XXX TODO: 
      * Heuristics for finding the (probably) single mapping of this gmfn */
@@ -2536,7 +2535,7 @@
         }
     }
 
-    shadow_unlock(v->domain);
+    paging_unlock(v->domain);
 
     /* We killed at least one mapping, so must flush TLBs. */
     return 1;
@@ -2670,9 +2669,9 @@
     ASSERT(mfn_valid(gmfn));
 
     /* Although this is an externally visible function, we do not know
-     * whether the shadow lock will be held when it is called (since it
+     * whether the paging lock will be held when it is called (since it
      * can be called via put_page_type when we clear a shadow l1e).*/
-    shadow_lock_recursive(v->domain);
+    paging_lock_recursive(v->domain);
 
     SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx\n",
                    v->domain->domain_id, v->vcpu_id, mfn_x(gmfn));
@@ -2680,7 +2679,7 @@
     /* Bail out now if the page is not shadowed */
     if ( (pg->count_info & PGC_page_table) == 0 )
     {
-        shadow_unlock(v->domain);
+        paging_unlock(v->domain);
         return;
     }
 
@@ -2742,7 +2741,7 @@
      * take a fault. */
     flush_tlb_mask(v->domain->domain_dirty_cpumask);
 
-    shadow_unlock(v->domain);
+    paging_unlock(v->domain);
 }
 
 static void
@@ -2811,7 +2810,7 @@
     struct domain *d = v->domain;
     const struct paging_mode *old_mode = v->arch.paging.mode;
 
-    ASSERT(shadow_locked_by_me(d));
+    ASSERT(paging_locked_by_me(d));
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) 
     /* Make sure this vcpu has a virtual TLB array allocated */
@@ -3004,9 +3003,9 @@
 
 void shadow_update_paging_modes(struct vcpu *v)
 {
-    shadow_lock(v->domain);
+    paging_lock(v->domain);
     sh_update_paging_modes(v);
-    shadow_unlock(v->domain);
+    paging_unlock(v->domain);
 }
 
 /**************************************************************************/
@@ -3017,7 +3016,7 @@
 {
     struct vcpu *v;
 
-    ASSERT(shadow_locked_by_me(d));
+    ASSERT(paging_locked_by_me(d));
     ASSERT(d != current->domain);
 
     d->arch.paging.mode = new_mode;
@@ -3056,7 +3055,7 @@
     if ( old_pages == 0 )
     {
         unsigned int r;
-        shadow_lock(d);                
+        paging_lock(d);                
         r = sh_set_allocation(d, 1024, NULL); /* Use at least 4MB */
         if ( r != 0 )
         {
@@ -3064,14 +3063,14 @@
             rv = -ENOMEM;
             goto out_locked;
         }        
-        shadow_unlock(d);
+        paging_unlock(d);
     }
 
     /* Allow p2m and log-dirty code to borrow shadow memory */
     d->arch.paging.alloc_page = shadow_alloc_p2m_page;
     d->arch.paging.free_page = shadow_free_p2m_page;
 
-    /* Init the P2M table.  Must be done before we take the shadow lock 
+    /* Init the P2M table.  Must be done before we take the paging lock 
      * to avoid possible deadlock. */
     if ( mode & PG_translate )
     {
@@ -3103,7 +3102,7 @@
         pg->u.inuse.type_info = PGT_l2_page_table | 1 | PGT_validated;
     }
 
-    shadow_lock(d);
+    paging_lock(d);
 
     /* Sanity check again with the lock held */
     if ( shadow_mode_enabled(d) )
@@ -3133,7 +3132,7 @@
     sh_new_mode(d, mode);
 
  out_locked:
-    shadow_unlock(d);
+    paging_unlock(d);
  out_unlocked:
     if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m)) )
         p2m_teardown(p2m);
@@ -3154,7 +3153,7 @@
     ASSERT(d->is_dying);
     ASSERT(d != current->domain);
 
-    shadow_lock(d);
+    paging_lock(d);
 
     if ( shadow_mode_enabled(d) )
     {
@@ -3251,7 +3250,7 @@
         d->arch.hvm_domain.dirty_vram = NULL;
     }
 
-    shadow_unlock(d);
+    paging_unlock(d);
 
     /* Must be called outside the lock */
     if ( unpaged_pagetable ) 
@@ -3277,7 +3276,7 @@
     /* It is now safe to pull down the p2m map. */
     p2m_teardown(p2m_get_hostp2m(d));
     /* Free any shadow memory that the p2m teardown released */
-    shadow_lock(d);
+    paging_lock(d);
     sh_set_allocation(d, 0, NULL);
     SHADOW_PRINTK("dom %u final teardown done."
                    "  Shadow pages total = %u, free = %u, p2m=%u\n",
@@ -3285,13 +3284,13 @@
                    d->arch.paging.shadow.total_pages, 
                    d->arch.paging.shadow.free_pages, 
                    d->arch.paging.shadow.p2m_pages);
-    shadow_unlock(d);
+    paging_unlock(d);
 }
 
 static int shadow_one_bit_enable(struct domain *d, u32 mode)
 /* Turn on a single shadow mode feature */
 {
-    ASSERT(shadow_locked_by_me(d));
+    ASSERT(paging_locked_by_me(d));
 
     /* Sanity check the call */
     if ( d == current->domain || (d->arch.paging.mode & mode) == mode )
@@ -3332,7 +3331,7 @@
 /* Turn off a single shadow mode feature */
 {
     struct vcpu *v;
-    ASSERT(shadow_locked_by_me(d));
+    ASSERT(paging_locked_by_me(d));
 
     /* Sanity check the call */
     if ( d == current->domain || !((d->arch.paging.mode & mode) == mode) )
@@ -3397,9 +3396,9 @@
     int ret;
 
     domain_pause(d);
-    shadow_lock(d);
+    paging_lock(d);
     ret = shadow_one_bit_enable(d, PG_SH_enable);
-    shadow_unlock(d);
+    paging_unlock(d);
     domain_unpause(d);
 
     return ret;
@@ -3410,9 +3409,9 @@
     int ret;
 
     domain_pause(d);
-    shadow_lock(d);
+    paging_lock(d);
     ret = shadow_one_bit_disable(d, PG_SH_enable);
-    shadow_unlock(d);
+    paging_unlock(d);
     domain_unpause(d);
 
     return ret;
@@ -3501,7 +3500,7 @@
 {
     struct domain *d = v->domain;
     
-    shadow_lock(d);
+    paging_lock(d);
 
     /* If there are any shadows, update them.  But if shadow_teardown()
      * has already been called then it's not safe to try. */ 
@@ -3533,7 +3532,7 @@
     }
 #endif
 
-    shadow_unlock(d);
+    paging_unlock(d);
 }
 
 /**************************************************************************/
@@ -3546,8 +3545,7 @@
 {
     int ret;
 
-    /* shadow lock is required here */
-    shadow_lock(d);
+    paging_lock(d);
     if ( shadow_mode_enabled(d) )
     {
         /* This domain already has some shadows: need to clear them out 
@@ -3565,7 +3563,7 @@
 #endif
     
     ret = shadow_one_bit_enable(d, PG_log_dirty);
-    shadow_unlock(d);
+    paging_unlock(d);
 
     return ret;
 }
@@ -3575,10 +3573,9 @@
 {
     int ret;
 
-    /* shadow lock is required here */    
-    shadow_lock(d);
+    paging_lock(d);
     ret = shadow_one_bit_disable(d, PG_log_dirty);
-    shadow_unlock(d);
+    paging_unlock(d);
     
     return ret;
 }
@@ -3588,12 +3585,12 @@
  */
 void shadow_clean_dirty_bitmap(struct domain *d)
 {
-    shadow_lock(d);
+    paging_lock(d);
     /* Need to revoke write access to the domain's pages again.
      * In future, we'll have a less heavy-handed approach to this,
      * but for now, we just unshadow everything except Xen. */
     shadow_blow_tables(d);
-    shadow_unlock(d);
+    paging_unlock(d);
 }
 
 
@@ -3618,7 +3615,7 @@
             || end_pfn >= p2m->max_mapped_pfn)
         return -EINVAL;
 
-    shadow_lock(d);
+    paging_lock(d);
 
     if ( dirty_vram && (!nr ||
              ( begin_pfn != dirty_vram->begin_pfn
@@ -3789,7 +3786,7 @@
     dirty_vram = d->arch.hvm_domain.dirty_vram = NULL;
 
 out:
-    shadow_unlock(d);
+    paging_unlock(d);
     return rc;
 }
 
@@ -3824,18 +3821,18 @@
         return 0;
 
     case XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION:
-        shadow_lock(d);
+        paging_lock(d);
         if ( sc->mb == 0 && shadow_mode_enabled(d) )
         {            
             /* Can't set the allocation to zero unless the domain stops using
              * shadow pagetables first */
             SHADOW_ERROR("Can't set shadow allocation to zero, domain %u"
                          " is still using shadows.\n", d->domain_id);
-            shadow_unlock(d);
+            paging_unlock(d);
             return -EINVAL;
         }
         rc = sh_set_allocation(d, sc->mb << (20 - PAGE_SHIFT), &preempted);
-        shadow_unlock(d);
+        paging_unlock(d);
         if ( preempted )
             /* Not finished.  Set up to re-run the call. */
             rc = hypercall_create_continuation(
diff -r 64398d14dcd6 -r 2bbed46eb10c xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Thu Jun 02 13:16:52 2011 +0100
@@ -203,7 +203,7 @@
 #endif
     int mismatch = 0;
 
-    ASSERT(shadow_locked_by_me(d));
+    ASSERT(paging_locked_by_me(d));
 
     if ( version == atomic_read(&d->arch.paging.shadow.gtable_dirty_version) )
          return 1;
@@ -781,7 +781,7 @@
 static inline void safe_write_entry(void *dst, void *src) 
 /* Copy one PTE safely when processors might be running on the
  * destination pagetable.   This does *not* give safety against
- * concurrent writes (that's what the shadow lock is for), just 
+ * concurrent writes (that's what the paging lock is for), just 
  * stops the hardware picking up partially written entries. */
 {
     volatile unsigned long *d = dst;
@@ -3133,17 +3133,17 @@
      * do is let Xen's normal fault handlers try to fix it.  In any case, 
      * a diagnostic trace of the fault will be more useful than 
      * a BUG() when we try to take the lock again. */
-    if ( unlikely(shadow_locked_by_me(d)) )
+    if ( unlikely(paging_locked_by_me(d)) )
     {
         SHADOW_ERROR("Recursive shadow fault: lock was taken by %s\n",
-                     d->arch.paging.shadow.lock.locker_function);
+                     d->arch.paging.lock.locker_function);
         return 0;
     }
 
  rewalk:
 
     /* The walk is done in a lock-free style, with some sanity check
-     * postponed after grabbing shadow lock later. Those delayed checks
+     * postponed after grabbing paging lock later. Those delayed checks
      * will make sure no inconsistent mapping being translated into
      * shadow page table. */ 
     version = atomic_read(&d->arch.paging.shadow.gtable_dirty_version);
@@ -3201,7 +3201,7 @@
                 regs->error_code | PFEC_page_present);
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
 
-    shadow_lock(d);
+    paging_lock(d);
 
     TRACE_CLEAR_PATH_FLAGS;
 
@@ -3235,7 +3235,7 @@
     /* Second bit set: Resynced a page. Re-walk needed. */
     if ( rc & GW_RMWR_REWALK )
     {
-        shadow_unlock(d);
+        paging_unlock(d);
         goto rewalk;
     }
 #endif /* OOS */
@@ -3243,7 +3243,7 @@
     if ( !shadow_check_gwalk(v, va, &gw, version) )
     {
         perfc_incr(shadow_inconsistent_gwalk);
-        shadow_unlock(d);
+        paging_unlock(d);
         goto rewalk;
     }
 
@@ -3269,7 +3269,7 @@
 #else
         ASSERT(d->is_shutting_down);
 #endif
-        shadow_unlock(d);
+        paging_unlock(d);
         trace_shadow_gen(TRC_SHADOW_DOMF_DYING, va);
         return 0;
     }
@@ -3286,7 +3286,7 @@
          * sh_remove_shadows() in a previous sh_resync() call has
          * failed. We cannot safely continue since some page is still
          * OOS but not in the hash table anymore. */
-        shadow_unlock(d);
+        paging_unlock(d);
         return 0;
     }
 
@@ -3295,7 +3295,7 @@
     if ( shadow_check_gl1e(v, &gw)  )
     {
         perfc_incr(shadow_inconsistent_gwalk);
-        shadow_unlock(d);
+        paging_unlock(d);
         goto rewalk;
     }
 #endif /* OOS */
@@ -3388,7 +3388,7 @@
     sh_audit_gw(v, &gw);
     SHADOW_PRINTK("fixed\n");
     shadow_audit_tables(v);
-    shadow_unlock(d);
+    paging_unlock(d);
     return EXCRET_fault_fixed;
 
  emulate:
@@ -3456,7 +3456,7 @@
      */
     sh_audit_gw(v, &gw);
     shadow_audit_tables(v);
-    shadow_unlock(d);
+    paging_unlock(d);
 
     this_cpu(trace_emulate_write_val) = 0;
 
@@ -3594,7 +3594,7 @@
     SHADOW_PRINTK("mmio %#"PRIpaddr"\n", gpa);
     shadow_audit_tables(v);
     reset_early_unshadow(v);
-    shadow_unlock(d);
+    paging_unlock(d);
     trace_shadow_gen(TRC_SHADOW_MMIO, va);
     return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT)
             ? EXCRET_fault_fixed : 0);
@@ -3604,7 +3604,7 @@
     SHADOW_PRINTK("not a shadow fault\n");
     shadow_audit_tables(v);
     reset_early_unshadow(v);
-    shadow_unlock(d);
+    paging_unlock(d);
 
 propagate:
     trace_not_shadow_fault(gw.l1e, va);
@@ -3644,7 +3644,7 @@
                & _PAGE_PRESENT) )
             return 0;
         /* This must still be a copy-from-user because we don't have the
-         * shadow lock, and the higher-level shadows might disappear
+         * paging lock, and the higher-level shadows might disappear
          * under our feet. */
         if ( __copy_from_user(&sl3e, (sh_linear_l3_table(v) 
                                       + shadow_l3_linear_offset(va)),
@@ -3700,11 +3700,11 @@
              && page_is_out_of_sync(pg) )
         {
             /* The test above may give false positives, since we don't
-             * hold the shadow lock yet.  Check again with the lock held. */
-            shadow_lock(v->domain);
+             * hold the paging lock yet.  Check again with the lock held. */
+            paging_lock(v->domain);
 
             /* This must still be a copy-from-user because we didn't
-             * have the shadow lock last time we checked, and the
+             * have the paging lock last time we checked, and the
              * higher-level shadows might have disappeared under our
              * feet. */
             if ( __copy_from_user(&sl2e, 
@@ -3713,13 +3713,13 @@
                                   sizeof (sl2e)) != 0 )
             {
                 perfc_incr(shadow_invlpg_fault);
-                shadow_unlock(v->domain);
+                paging_unlock(v->domain);
                 return 0;
             }
 
             if ( !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT) )
             {
-                shadow_unlock(v->domain);
+                paging_unlock(v->domain);
                 return 0;
             }
 
@@ -3736,7 +3736,7 @@
                 (void) shadow_set_l1e(v, sl1, shadow_l1e_empty(),
                                       p2m_invalid, sl1mfn);
             }
-            shadow_unlock(v->domain);
+            paging_unlock(v->domain);
             /* Need the invlpg, to pick up the disappeareance of the sl1e */
             return 1;
         }
@@ -4153,7 +4153,7 @@
  * this function will call hvm_update_guest_cr(v, 3) to tell them where the 
  * shadow tables are.
  * If do_locking != 0, assume we are being called from outside the 
- * shadow code, and must take and release the shadow lock; otherwise 
+ * shadow code, and must take and release the paging lock; otherwise 
  * that is the caller's responsibility.
  */
 {
@@ -4172,7 +4172,7 @@
         return;
     }
 
-    if ( do_locking ) shadow_lock(v->domain);
+    if ( do_locking ) paging_lock(v->domain);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     /* Need to resync all the shadow entries on a TLB flush.  Resync
@@ -4181,7 +4181,7 @@
     shadow_resync_current_vcpu(v);
 #endif
 
-    ASSERT(shadow_locked_by_me(v->domain));
+    ASSERT(paging_locked_by_me(v->domain));
     ASSERT(v->arch.paging.mode);
 
     ////
@@ -4415,7 +4415,7 @@
 #endif
 
     /* Release the lock, if we took it (otherwise it's the caller's problem) */
-    if ( do_locking ) shadow_unlock(v->domain);
+    if ( do_locking ) paging_unlock(v->domain);
 }
 
 
@@ -4695,7 +4695,7 @@
     guest_l3e_t *gl3e = NULL;
     paddr_t gl2a = 0;
 
-    shadow_lock(v->domain);
+    paging_lock(v->domain);
 
     gcr3 = (v->arch.hvm_vcpu.guest_cr[3]);
     /* fast path: the pagetable belongs to the current context */
@@ -4747,7 +4747,7 @@
 out:
     if ( !fast_path )
         unmap_domain_page(gl3pa);
-    shadow_unlock(v->domain);
+    paging_unlock(v->domain);
 }
 #else
 static void sh_pagetable_dying(struct vcpu *v, paddr_t gpa)
@@ -4755,7 +4755,7 @@
     mfn_t smfn, gmfn;
     p2m_type_t p2mt;
 
-    shadow_lock(v->domain);
+    paging_lock(v->domain);
 
     gmfn = gfn_to_mfn_query(v->domain, _gfn(gpa >> PAGE_SHIFT), &p2mt);
 #if GUEST_PAGING_LEVELS == 2
@@ -4778,7 +4778,7 @@
 
     v->arch.paging.shadow.pagetable_dying = 1;
 
-    shadow_unlock(v->domain);
+    paging_unlock(v->domain);
 }
 #endif
 
@@ -4810,8 +4810,8 @@
     }
 
     /* Translate the GFN to an MFN */
-    /* PoD: query only if shadow lock is held (to avoid deadlock) */
-    if ( shadow_locked_by_me(v->domain) )
+    /* PoD: query only if paging lock is held (to avoid deadlock) */
+    if ( paging_locked_by_me(v->domain) )
         mfn = gfn_to_mfn_query(v->domain, _gfn(gfn), &p2mt);
     else
         mfn = gfn_to_mfn(v->domain, _gfn(gfn), &p2mt);
@@ -5000,7 +5000,7 @@
     if ( emulate_map_dest_failed(addr) )
         return (long)addr;
 
-    shadow_lock(v->domain);
+    paging_lock(v->domain);
     memcpy(addr, src, bytes);
 
     if ( tb_init_done )
@@ -5021,7 +5021,7 @@
 
     emulate_unmap_dest(v, addr, bytes, sh_ctxt);
     shadow_audit_tables(v);
-    shadow_unlock(v->domain);
+    paging_unlock(v->domain);
     return X86EMUL_OKAY;
 }
 
@@ -5042,7 +5042,7 @@
     if ( emulate_map_dest_failed(addr) )
         return (long)addr;
 
-    shadow_lock(v->domain);
+    paging_lock(v->domain);
     switch ( bytes )
     {
     case 1: prev = cmpxchg(((u8 *)addr), old, new);  break;
@@ -5063,7 +5063,7 @@
 
     emulate_unmap_dest(v, addr, bytes, sh_ctxt);
     shadow_audit_tables(v);
-    shadow_unlock(v->domain);
+    paging_unlock(v->domain);
     return rv;
 }
 
@@ -5089,7 +5089,7 @@
     old = (((u64) old_hi) << 32) | (u64) old_lo;
     new = (((u64) new_hi) << 32) | (u64) new_lo;
 
-    shadow_lock(v->domain);
+    paging_lock(v->domain);
     prev = cmpxchg(((u64 *)addr), old, new);
 
     if ( prev != old )
@@ -5097,7 +5097,7 @@
 
     emulate_unmap_dest(v, addr, 8, sh_ctxt);
     shadow_audit_tables(v);
-    shadow_unlock(v->domain);
+    paging_unlock(v->domain);
     return rv;
 }
 #endif
diff -r 64398d14dcd6 -r 2bbed46eb10c xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h  Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/shadow/private.h  Thu Jun 02 13:16:52 2011 +0100
@@ -533,7 +533,7 @@
         return 0;
     }
     
-    /* Guarded by the shadow lock, so no need for atomic update */
+    /* Guarded by the paging lock, so no need for atomic update */
     sp->u.sh.count = nx;
 
     /* We remember the first shadow entry that points to each shadow. */
@@ -573,7 +573,7 @@
         BUG();
     }
 
-    /* Guarded by the shadow lock, so no need for atomic update */
+    /* Guarded by the paging lock, so no need for atomic update */
     sp->u.sh.count = nx;
 
     if ( unlikely(nx == 0) ) 
diff -r 64398d14dcd6 -r 2bbed46eb10c xen/arch/x86/mm/shadow/types.h
--- a/xen/arch/x86/mm/shadow/types.h    Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/shadow/types.h    Thu Jun 02 13:16:52 2011 +0100
@@ -292,7 +292,7 @@
  * MMIO emulation, and faults where the guest PTE is not present.  We
  * record these as shadow l1 entries that have reserved bits set in
  * them, so we can spot them immediately in the fault handler and handle
- * them without needing to hold the shadow lock or walk the guest
+ * them without needing to hold the paging lock or walk the guest
  * pagetables.
  *
  * This is only feasible for PAE and 64bit Xen: 32-bit non-PAE PTEs don't
diff -r 64398d14dcd6 -r 2bbed46eb10c xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/include/asm-x86/domain.h      Thu Jun 02 13:16:52 2011 +0100
@@ -91,8 +91,6 @@
 /*          shadow paging extension             */
 /************************************************/
 struct shadow_domain {
-    mm_lock_t         lock;  /* shadow domain lock */
-
     unsigned int      opt_flags;    /* runtime tunable optimizations on/off */
     struct page_list_head pinned_shadows;
 
@@ -158,8 +156,6 @@
 /*            hardware assisted paging          */
 /************************************************/
 struct hap_domain {
-    mm_lock_t         lock;
-
     struct page_list_head freelist;
     unsigned int      total_pages;  /* number of pages allocated */
     unsigned int      free_pages;   /* number of pages on freelists */
@@ -170,9 +166,6 @@
 /*       common paging data structure           */
 /************************************************/
 struct log_dirty_domain {
-    /* log-dirty lock */
-    mm_lock_t     lock;
-
     /* log-dirty radix tree to record dirty pages */
     mfn_t          top;
     unsigned int   allocs;
@@ -189,6 +182,9 @@
 };
 
 struct paging_domain {
+    /* paging lock */
+    mm_lock_t lock;
+
     /* flags to control paging operation */
     u32                     mode;
     /* extension for shadow paging support */
diff -r 64398d14dcd6 -r 2bbed46eb10c xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/include/asm-x86/p2m.h Thu Jun 02 13:16:52 2011 +0100
@@ -337,8 +337,8 @@
 
 /* Syntactic sugar: most callers will use one of these. 
  * N.B. gfn_to_mfn_query() is the _only_ one guaranteed not to take the
- * p2m lock; none of the others can be called with the p2m, hap or
- * shadow lock held. */
+ * p2m lock; none of the others can be called with the p2m or paging
+ * lock held. */
 #define gfn_to_mfn(d, g, t)         gfn_to_mfn_type((d), (g), (t), p2m_alloc)
 #define gfn_to_mfn_query(d, g, t)   gfn_to_mfn_type((d), (g), (t), p2m_query)
 #define gfn_to_mfn_guest(d, g, t)   gfn_to_mfn_type((d), (g), (t), p2m_guest)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.