[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/mm: clean use of p2m unlocked queries



# HG changeset patch
# User Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
# Date 1327584087 0
# Node ID 496a9874d08ec9234693452e527ad1454d113676
# Parent  7a28b8b2d3ea01bb7c1a9e50b0af3c1709e36f2d
x86/mm: clean use of p2m unlocked queries

Limit such queries only to p2m_query types. This is more compatible
with the name and intended semantics: perform only a lookup, and explicitly
in an unlocked way.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---


diff -r 7a28b8b2d3ea -r 496a9874d08e xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c        Thu Jan 26 13:21:27 2012 +0000
+++ b/xen/arch/x86/hvm/emulate.c        Thu Jan 26 13:21:27 2012 +0000
@@ -660,7 +660,7 @@
 {
     struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
-    unsigned long saddr, daddr, bytes;
+    unsigned long saddr, daddr, bytes, sgfn, dgfn;
     paddr_t sgpa, dgpa;
     uint32_t pfec = PFEC_page_present;
     p2m_type_t p2mt;
@@ -693,17 +693,28 @@
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    /* Unlocked works here because we get_gfn for real in whatever
-     * we call later. */
-    (void)get_gfn_unlocked(current->domain, sgpa >> PAGE_SHIFT, &p2mt);
+    /* XXX In a fine-grained p2m locking scenario, we need to sort this
+     * get_gfn's, or else we might deadlock */
+    sgfn = sgpa >> PAGE_SHIFT;
+    (void)get_gfn(current->domain, sgfn, &p2mt);
     if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
-        return hvmemul_do_mmio(
+    {
+        rc = hvmemul_do_mmio(
             sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL);
+        put_gfn(current->domain, sgfn);
+        return rc;
+    }
 
-    (void)get_gfn_unlocked(current->domain, dgpa >> PAGE_SHIFT, &p2mt);
+    dgfn = dgpa >> PAGE_SHIFT;
+    (void)get_gfn(current->domain, dgfn, &p2mt);
     if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
-        return hvmemul_do_mmio(
+    {
+        rc = hvmemul_do_mmio(
             dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL);
+        put_gfn(current->domain, sgfn);
+        put_gfn(current->domain, dgfn);
+        return rc;
+    }
 
     /* RAM-to-RAM copy: emulate as equivalent of memmove(dgpa, sgpa, bytes). */
     bytes = *reps * bytes_per_rep;
@@ -718,7 +729,11 @@
      * can be emulated by a source-to-buffer-to-destination block copy.
      */
     if ( ((dgpa + bytes_per_rep) > sgpa) && (dgpa < (sgpa + bytes)) )
+    {
+        put_gfn(current->domain, sgfn);
+        put_gfn(current->domain, dgfn);
         return X86EMUL_UNHANDLEABLE;
+    }
 
     /* Adjust destination address for reverse copy. */
     if ( df )
@@ -727,7 +742,11 @@
     /* Allocate temporary buffer. Fall back to slow emulation if this fails. */
     buf = xmalloc_bytes(bytes);
     if ( buf == NULL )
+    {
+        put_gfn(current->domain, sgfn);
+        put_gfn(current->domain, dgfn);
         return X86EMUL_UNHANDLEABLE;
+    }
 
     /*
      * We do a modicum of checking here, just for paranoia's sake and to
@@ -738,6 +757,8 @@
         rc = hvm_copy_to_guest_phys(dgpa, buf, bytes);
 
     xfree(buf);
+    put_gfn(current->domain, sgfn);
+    put_gfn(current->domain, dgfn);
 
     if ( rc == HVMCOPY_gfn_paged_out )
         return X86EMUL_RETRY;
diff -r 7a28b8b2d3ea -r 496a9874d08e xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Jan 26 13:21:27 2012 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Thu Jan 26 13:21:27 2012 +0000
@@ -3975,7 +3975,10 @@
         rc = -EINVAL;
         if ( is_hvm_domain(d) )
         {
-            get_gfn_unshare_unlocked(d, a.pfn, &t);
+            /* Use get_gfn query as we are interested in the current 
+             * type, not in allocating or unsharing. That'll happen 
+             * on access. */
+            get_gfn_query_unlocked(d, a.pfn, &t);
             if ( p2m_is_mmio(t) )
                 a.mem_type =  HVMMEM_mmio_dm;
             else if ( p2m_is_readonly(t) )
diff -r 7a28b8b2d3ea -r 496a9874d08e xen/arch/x86/hvm/stdvga.c
--- a/xen/arch/x86/hvm/stdvga.c Thu Jan 26 13:21:27 2012 +0000
+++ b/xen/arch/x86/hvm/stdvga.c Thu Jan 26 13:21:27 2012 +0000
@@ -482,15 +482,19 @@
                 if ( hvm_copy_to_guest_phys(data, &tmp, p->size) !=
                      HVMCOPY_okay )
                 {
-                    (void)get_gfn_unlocked(d, data >> PAGE_SHIFT, &p2mt);
+                    (void)get_gfn(d, data >> PAGE_SHIFT, &p2mt);
                     /*
                      * The only case we handle is vga_mem <-> vga_mem.
                      * Anything else disables caching and leaves it to qemu-dm.
                      */
                     if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
                          ((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
+                    {
+                        put_gfn(d, data >> PAGE_SHIFT);
                         return 0;
+                    }
                     stdvga_mem_write(data, tmp, p->size);
+                    put_gfn(d, data >> PAGE_SHIFT);
                 }
                 data += sign * p->size;
                 addr += sign * p->size;
@@ -504,11 +508,15 @@
                 if ( hvm_copy_from_guest_phys(&tmp, data, p->size) !=
                      HVMCOPY_okay )
                 {
-                    (void)get_gfn_unlocked(d, data >> PAGE_SHIFT, &p2mt);
+                    (void)get_gfn(d, data >> PAGE_SHIFT, &p2mt);
                     if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
                          ((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
+                    {
+                        put_gfn(d, data >> PAGE_SHIFT);
                         return 0;
+                    }
                     tmp = stdvga_mem_read(data, p->size);
+                    put_gfn(d, data >> PAGE_SHIFT);
                 }
                 stdvga_mem_write(addr, tmp, p->size);
                 data += sign * p->size;
diff -r 7a28b8b2d3ea -r 496a9874d08e xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Jan 26 13:21:27 2012 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Jan 26 13:21:27 2012 +0000
@@ -2112,7 +2112,7 @@
         return;
 
     /* Everything else is an error. */
-    mfn = get_gfn_guest_unlocked(d, gfn, &p2mt);
+    mfn = get_gfn_query_unlocked(d, gfn, &p2mt);
     gdprintk(XENLOG_ERR, "EPT violation %#lx (%c%c%c/%c%c%c), "
              "gpa %#"PRIpaddr", mfn %#lx, type %i.\n", 
              qualification, 
diff -r 7a28b8b2d3ea -r 496a9874d08e xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c     Thu Jan 26 13:21:27 2012 +0000
+++ b/xen/arch/x86/mm/mem_sharing.c     Thu Jan 26 13:21:27 2012 +0000
@@ -421,7 +421,7 @@
     p2m_type_t p2mt;
     mfn_t mfn;
 
-    mfn = get_gfn_unlocked(d, gfn, &p2mt);
+    mfn = get_gfn_query_unlocked(d, gfn, &p2mt);
 
     gdprintk(XENLOG_DEBUG, "Debug for domain=%d, gfn=%lx, ", 
                d->domain_id, 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.