[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH V10 11/15] Introduce qemu_put_ram_ptr



From: Anthony PERARD <anthony.perard@xxxxxxxxxx>

This function allows to unlock a ram_ptr give by qemu_get_ram_ptr. After
a call to qemu_put_ram_ptr, the pointer may be unmap from QEMU when
used with Xen.

Signed-off-by: Anthony PERARD <anthony.perard@xxxxxxxxxx>
Acked-by: Alexander Graf <agraf@xxxxxxx>
---
 cpu-common.h   |    1 +
 exec.c         |   10 ++++++++++
 xen-mapcache.c |   34 ++++++++++++++++++++++++++++++++++
 3 files changed, 45 insertions(+), 0 deletions(-)

diff --git a/cpu-common.h b/cpu-common.h
index 6d4a898..6f935cc 100644
--- a/cpu-common.h
+++ b/cpu-common.h
@@ -55,6 +55,7 @@ void *qemu_get_ram_ptr(ram_addr_t addr);
 /* Same but slower, to use for migration, where the order of
  * RAMBlocks must not change. */
 void *qemu_safe_ram_ptr(ram_addr_t addr);
+void qemu_put_ram_ptr(void *addr);
 /* This should not be used by devices.  */
 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
diff --git a/exec.c b/exec.c
index 3b137dc..2f8773f 100644
--- a/exec.c
+++ b/exec.c
@@ -2977,6 +2977,13 @@ void *qemu_safe_ram_ptr(ram_addr_t addr)
     return NULL;
 }
 
+void qemu_put_ram_ptr(void *addr)
+{
+    if (xen_mapcache_enabled()) {
+        qemu_map_cache_unlock(addr);
+    }
+}
+
 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
 {
     RAMBlock *block;
@@ -3692,6 +3699,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, 
uint8_t *buf,
                     cpu_physical_memory_set_dirty_flags(
                         addr1, (0xff & ~CODE_DIRTY_FLAG));
                 }
+                qemu_put_ram_ptr(ptr);
             }
         } else {
             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
@@ -3722,6 +3730,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, 
uint8_t *buf,
                 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
                     (addr & ~TARGET_PAGE_MASK);
                 memcpy(buf, ptr, l);
+                qemu_put_ram_ptr(ptr);
             }
         }
         len -= l;
@@ -3762,6 +3771,7 @@ void cpu_physical_memory_write_rom(target_phys_addr_t 
addr,
             /* ROM/RAM case */
             ptr = qemu_get_ram_ptr(addr1);
             memcpy(ptr, buf, l);
+            qemu_put_ram_ptr(ptr);
         }
         len -= l;
         buf += l;
diff --git a/xen-mapcache.c b/xen-mapcache.c
index 43a5ed9..abf9bb2 100644
--- a/xen-mapcache.c
+++ b/xen-mapcache.c
@@ -195,6 +195,40 @@ uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, 
target_phys_addr_t size, u
     return mapcache->last_address_vaddr + address_offset;
 }
 
+void qemu_map_cache_unlock(void *buffer)
+{
+    MapCacheEntry *entry = NULL, *pentry = NULL;
+    MapCacheRev *reventry;
+    target_phys_addr_t paddr_index;
+    int found = 0;
+
+    QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
+        if (reventry->vaddr_req == buffer) {
+            paddr_index = reventry->paddr_index;
+            found = 1;
+            break;
+        }
+    }
+    if (!found) {
+        return;
+    }
+    QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next);
+    qemu_free(reventry);
+
+    entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
+    while (entry && entry->paddr_index != paddr_index) {
+        pentry = entry;
+        entry = entry->next;
+    }
+    if (!entry) {
+        return;
+    }
+    entry->lock--;
+    if (entry->lock > 0) {
+        entry->lock--;
+    }
+}
+
 ram_addr_t qemu_ram_addr_from_mapcache(void *ptr)
 {
     MapCacheRev *reventry;
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.