[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 12 of 18] libxc: make xc_memory_op library private



# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1286892402 -3600
# Node ID 400adff91720efab6413ad73bba8329c715f58ba
# Parent  ec389a7aa0d6a4215d95fe3ed167ed1049bb0dc9
libxc: make xc_memory_op library private

Now that all XENMEM_* callers go via an op specific function make
xc_memory_op private to libxc (and rename to do_memory_op for
consistency with other private functions).

Also change the interface to take a size parameter so that
do_memory_op knows how much memory to lock for the top-level argument,
removing some of the introspection.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>

diff -r ec389a7aa0d6 -r 400adff91720 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Tue Oct 12 15:06:42 2010 +0100
+++ b/tools/libxc/xc_domain.c   Tue Oct 12 15:06:42 2010 +0100
@@ -488,17 +488,16 @@ int xc_domain_set_memmap_limit(xc_interf
 
     set_xen_guest_handle(fmap.map.buffer, &e820);
 
-    if ( lock_pages(xch, &fmap, sizeof(fmap)) || lock_pages(xch, &e820, 
sizeof(e820)) )
+    if ( lock_pages(xch, &e820, sizeof(e820)) )
     {
         PERROR("Could not lock memory for Xen hypercall");
         rc = -1;
         goto out;
     }
 
-    rc = xc_memory_op(xch, XENMEM_set_memory_map, &fmap);
+    rc = do_memory_op(xch, XENMEM_set_memory_map, &fmap, sizeof(fmap));
 
  out:
-    unlock_pages(xch, &fmap, sizeof(fmap));
     unlock_pages(xch, &e820, sizeof(e820));
     return rc;
 }
@@ -581,7 +580,7 @@ int xc_domain_get_tsc_info(xc_interface 
 
 int xc_domain_maximum_gpfn(xc_interface *xch, domid_t domid)
 {
-    return xc_memory_op(xch, XENMEM_maximum_gpfn, &domid);
+    return do_memory_op(xch, XENMEM_maximum_gpfn, &domid, sizeof(domid));
 }
 
 int xc_domain_increase_reservation(xc_interface *xch,
@@ -602,7 +601,7 @@ int xc_domain_increase_reservation(xc_in
     /* may be NULL */
     set_xen_guest_handle(reservation.extent_start, extent_start);
 
-    err = xc_memory_op(xch, XENMEM_increase_reservation, &reservation);
+    err = do_memory_op(xch, XENMEM_increase_reservation, &reservation, 
sizeof(reservation));
 
     return err;
 }
@@ -657,7 +656,7 @@ int xc_domain_decrease_reservation(xc_in
         return -1;
     }
 
-    err = xc_memory_op(xch, XENMEM_decrease_reservation, &reservation);
+    err = do_memory_op(xch, XENMEM_decrease_reservation, &reservation, 
sizeof(reservation));
 
     return err;
 }
@@ -699,7 +698,7 @@ int xc_domain_add_to_physmap(xc_interfac
         .idx = idx,
         .gpfn = gpfn,
     };
-    return xc_memory_op(xch, XENMEM_add_to_physmap, &xatp);
+    return do_memory_op(xch, XENMEM_add_to_physmap, &xatp, sizeof(xatp));
 }
 
 int xc_domain_populate_physmap(xc_interface *xch,
@@ -718,7 +717,7 @@ int xc_domain_populate_physmap(xc_interf
     };
     set_xen_guest_handle(reservation.extent_start, extent_start);
 
-    err = xc_memory_op(xch, XENMEM_populate_physmap, &reservation);
+    err = do_memory_op(xch, XENMEM_populate_physmap, &reservation, 
sizeof(reservation));
 
     return err;
 }
@@ -774,7 +773,7 @@ int xc_domain_memory_exchange_pages(xc_i
     set_xen_guest_handle(exchange.in.extent_start, in_extents);
     set_xen_guest_handle(exchange.out.extent_start, out_extents);
 
-    rc = xc_memory_op(xch, XENMEM_exchange, &exchange);
+    rc = do_memory_op(xch, XENMEM_exchange, &exchange, sizeof(exchange));
 
     return rc;
 }
@@ -794,7 +793,7 @@ static int xc_domain_pod_target(xc_inter
         .target_pages = target_pages
     };
 
-    err = xc_memory_op(xch, op, &pod_target);
+    err = do_memory_op(xch, op, &pod_target, sizeof(pod_target));
 
     if ( err < 0 )
     {
diff -r ec389a7aa0d6 -r 400adff91720 tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c  Tue Oct 12 15:06:42 2010 +0100
+++ b/tools/libxc/xc_private.c  Tue Oct 12 15:06:42 2010 +0100
@@ -421,9 +421,7 @@ int xc_flush_mmu_updates(xc_interface *x
     return flush_mmu_updates(xch, mmu);
 }
 
-int xc_memory_op(xc_interface *xch,
-                 int cmd,
-                 void *arg)
+int do_memory_op(xc_interface *xch, int cmd, void *arg, size_t len)
 {
     DECLARE_HYPERCALL;
     struct xen_memory_reservation *reservation = arg;
@@ -435,16 +433,17 @@ int xc_memory_op(xc_interface *xch,
     hypercall.arg[0] = (unsigned long)cmd;
     hypercall.arg[1] = (unsigned long)arg;
 
+    if ( len && lock_pages(xch, arg, len) != 0 )
+    {
+        PERROR("Could not lock memory for XENMEM hypercall");
+        goto out1;
+    }
+
     switch ( cmd )
     {
     case XENMEM_increase_reservation:
     case XENMEM_decrease_reservation:
     case XENMEM_populate_physmap:
-        if ( lock_pages(xch, reservation, sizeof(*reservation)) != 0 )
-        {
-            PERROR("Could not lock");
-            goto out1;
-        }
         get_xen_guest_handle(extent_start, reservation->extent_start);
         if ( (extent_start != NULL) &&
              (lock_pages(xch, extent_start,
@@ -456,11 +455,6 @@ int xc_memory_op(xc_interface *xch,
         }
         break;
     case XENMEM_machphys_mfn_list:
-        if ( lock_pages(xch, xmml, sizeof(*xmml)) != 0 )
-        {
-            PERROR("Could not lock");
-            goto out1;
-        }
         get_xen_guest_handle(extent_start, xmml->extent_start);
         if ( lock_pages(xch, extent_start,
                    xmml->max_extents * sizeof(xen_pfn_t)) != 0 )
@@ -471,61 +465,40 @@ int xc_memory_op(xc_interface *xch,
         }
         break;
     case XENMEM_add_to_physmap:
-        if ( lock_pages(xch, arg, sizeof(struct xen_add_to_physmap)) )
-        {
-            PERROR("Could not lock");
-            goto out1;
-        }
-        break;
     case XENMEM_current_reservation:
     case XENMEM_maximum_reservation:
     case XENMEM_maximum_gpfn:
-        if ( lock_pages(xch, arg, sizeof(domid_t)) )
-        {
-            PERROR("Could not lock");
-            goto out1;
-        }
-        break;
     case XENMEM_set_pod_target:
     case XENMEM_get_pod_target:
-        if ( lock_pages(xch, arg, sizeof(struct xen_pod_target)) )
-        {
-            PERROR("Could not lock");
-            goto out1;
-        }
         break;
     }
 
     ret = do_xen_hypercall(xch, &hypercall);
+
+    if ( len )
+        unlock_pages(xch, arg, len);
 
     switch ( cmd )
     {
     case XENMEM_increase_reservation:
     case XENMEM_decrease_reservation:
     case XENMEM_populate_physmap:
-        unlock_pages(xch, reservation, sizeof(*reservation));
         get_xen_guest_handle(extent_start, reservation->extent_start);
         if ( extent_start != NULL )
             unlock_pages(xch, extent_start,
                          reservation->nr_extents * sizeof(xen_pfn_t));
         break;
     case XENMEM_machphys_mfn_list:
-        unlock_pages(xch, xmml, sizeof(*xmml));
         get_xen_guest_handle(extent_start, xmml->extent_start);
         unlock_pages(xch, extent_start,
                      xmml->max_extents * sizeof(xen_pfn_t));
         break;
     case XENMEM_add_to_physmap:
-        unlock_pages(xch, arg, sizeof(struct xen_add_to_physmap));
-        break;
     case XENMEM_current_reservation:
     case XENMEM_maximum_reservation:
     case XENMEM_maximum_gpfn:
-        unlock_pages(xch, arg, sizeof(domid_t));
-        break;
     case XENMEM_set_pod_target:
     case XENMEM_get_pod_target:
-        unlock_pages(xch, arg, sizeof(struct xen_pod_target));
         break;
     }
 
@@ -535,7 +508,7 @@ int xc_memory_op(xc_interface *xch,
 
 long xc_maximum_ram_page(xc_interface *xch)
 {
-    return xc_memory_op(xch, XENMEM_maximum_ram_page, NULL);
+    return do_memory_op(xch, XENMEM_maximum_ram_page, NULL, 0);
 }
 
 long long xc_domain_get_cpu_usage( xc_interface *xch, domid_t domid, int vcpu )
@@ -562,7 +535,7 @@ int xc_machphys_mfn_list(xc_interface *x
         .max_extents = max_extents,
     };
     set_xen_guest_handle(xmml.extent_start, extent_start);
-    rc = xc_memory_op(xch, XENMEM_machphys_mfn_list, &xmml);
+    rc = do_memory_op(xch, XENMEM_machphys_mfn_list, &xmml, sizeof(xmml));
     if (rc || xmml.nr_extents != max_extents)
         return -1;
     return 0;
diff -r ec389a7aa0d6 -r 400adff91720 tools/libxc/xc_private.h
--- a/tools/libxc/xc_private.h  Tue Oct 12 15:06:42 2010 +0100
+++ b/tools/libxc/xc_private.h  Tue Oct 12 15:06:42 2010 +0100
@@ -206,6 +206,8 @@ static inline int do_sysctl(xc_interface
     return ret;
 }
 
+int do_memory_op(xc_interface *xch, int cmd, void *arg, size_t len);
+
 int xc_interface_open_core(xc_interface *xch); /* returns fd, logs errors */
 int xc_interface_close_core(xc_interface *xch, int fd); /* no logging */
 
diff -r ec389a7aa0d6 -r 400adff91720 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Tue Oct 12 15:06:42 2010 +0100
+++ b/tools/libxc/xenctrl.h     Tue Oct 12 15:06:42 2010 +0100
@@ -981,9 +981,6 @@ int xc_mmuext_op(xc_interface *xch, stru
 /* System wide memory properties */
 long xc_maximum_ram_page(xc_interface *xch);
 
-int xc_memory_op(xc_interface *xch, int cmd, void *arg);
-
-
 /* Get current total pages allocated to a domain. */
 long xc_get_tot_pages(xc_interface *xch, uint32_t domid);
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.