[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v6 2/6] libxl: make libxl__need_xenpv_qemu() operate on domain config



libxl__need_xenpv_qemu() is called with configuration data for console,
vfbs, disks and channels today in order to evaluate the need for
starting a device model for a pv domain.

The console data is local to the caller and setup in a way to never
require a device model. All other data is taken from the domain config
structure.

In order to support other device backends via qemu change the interface
of libxl__need_xenpv_qemu() to take the domain config structure as
input instead of the single device arrays.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
V6: Split patch into 2 as requested by Wei Liu

V4: Return (negative) error value in case of failure, 0 or 1 else

V2: Return false if libxl__get_domid() fails as requested by George Dunlap
---
 tools/libxl/libxl_create.c   | 13 +++-------
 tools/libxl/libxl_dm.c       | 60 +++++++++++++-------------------------------
 tools/libxl/libxl_internal.h |  5 +---
 3 files changed, 23 insertions(+), 55 deletions(-)

diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
index 763996c..0681103 100644
--- a/tools/libxl/libxl_create.c
+++ b/tools/libxl/libxl_create.c
@@ -1313,18 +1313,13 @@ static void domcreate_launch_dm(libxl__egc *egc, 
libxl__multidev *multidev,
         }
 
         init_console_info(gc, &console, 0);
+        console.backend_domid = state->console_domid;
+        libxl__device_console_add(gc, domid, &console, state, &device);
+        libxl__device_console_dispose(&console);
 
-        ret = libxl__need_xenpv_qemu(gc, 1, &console,
-                d_config->num_vfbs, d_config->vfbs,
-                d_config->num_disks, &d_config->disks[0],
-                d_config->num_channels, &d_config->channels[0]);
+        ret = libxl__need_xenpv_qemu(gc, d_config);
         if (ret < 0)
             goto error_out;
-
-        console.backend_domid = state->console_domid;
-        libxl__device_console_add(gc, domid, &console, state, &device);
-        libxl__device_console_dispose(&console);
-
         if (ret) {
             dcs->dmss.dm.guest_domid = domid;
             libxl__spawn_local_dm(egc, &dcs->dmss.dm);
diff --git a/tools/libxl/libxl_dm.c b/tools/libxl/libxl_dm.c
index eb8961d..027d32b 100644
--- a/tools/libxl/libxl_dm.c
+++ b/tools/libxl/libxl_dm.c
@@ -2114,61 +2114,37 @@ int libxl__destroy_device_model(libxl__gc *gc, uint32_t 
domid)
 }
 
 /* Return 0 if no dm needed, 1 if needed and <0 if error. */
-int libxl__need_xenpv_qemu(libxl__gc *gc,
-        int nr_consoles, libxl__device_console *consoles,
-        int nr_vfbs, libxl_device_vfb *vfbs,
-        int nr_disks, libxl_device_disk *disks,
-        int nr_channels, libxl_device_channel *channels)
+int libxl__need_xenpv_qemu(libxl__gc *gc, libxl_domain_config *d_config)
 {
-    int i, ret = 0;
+    int i, ret;
     uint32_t domid;
 
-    /*
-     * qemu is required in order to support 2 or more consoles. So switch all
-     * backends to qemu if this is the case
-     */
-    if (nr_consoles > 1) {
-        for (i = 0; i < nr_consoles; i++)
-            consoles[i].consback = LIBXL__CONSOLE_BACKEND_IOEMU;
-        ret = 1;
+    ret = libxl__get_domid(gc, &domid);
+    if (ret) {
+        LOG(ERROR, "unable to get domain id");
         goto out;
     }
 
-    for (i = 0; i < nr_consoles; i++) {
-        if (consoles[i].consback == LIBXL__CONSOLE_BACKEND_IOEMU) {
-            ret = 1;
-            goto out;
-        }
-    }
-
-    if (nr_vfbs > 0) {
+    if (d_config->num_vfbs > 0) {
         ret = 1;
         goto out;
     }
 
-    if (nr_disks > 0) {
-        ret = libxl__get_domid(gc, &domid);
-        if (ret) goto out;
-        for (i = 0; i < nr_disks; i++) {
-            if (disks[i].backend == LIBXL_DISK_BACKEND_QDISK &&
-                disks[i].backend_domid == domid) {
-                ret = 1;
-                goto out;
-            }
+    for (i = 0; i < d_config->num_disks; i++) {
+        if (d_config->disks[i].backend == LIBXL_DISK_BACKEND_QDISK &&
+            d_config->disks[i].backend_domid == domid) {
+            ret = 1;
+            goto out;
         }
     }
 
-    if (nr_channels > 0) {
-        ret = libxl__get_domid(gc, &domid);
-        if (ret) goto out;
-        for (i = 0; i < nr_channels; i++) {
-            if (channels[i].backend_domid == domid) {
-                /* xenconsoled is limited to the first console only.
-                   Until this restriction is removed we must use qemu for
-                   secondary consoles which includes all channels. */
-                ret = 1;
-                goto out;
-            }
+    for (i = 0; i < d_config->num_channels; i++) {
+        if (d_config->channels[i].backend_domid == domid) {
+            /* xenconsoled is limited to the first console only.
+               Until this restriction is removed we must use qemu for
+               secondary consoles which includes all channels. */
+            ret = 1;
+            goto out;
         }
     }
 
diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
index 345a764..fc7bdab 100644
--- a/tools/libxl/libxl_internal.h
+++ b/tools/libxl/libxl_internal.h
@@ -1616,10 +1616,7 @@ _hidden int libxl__domain_build(libxl__gc *gc,
 _hidden const char *libxl__domain_device_model(libxl__gc *gc,
                                         const libxl_domain_build_info *info);
 _hidden int libxl__need_xenpv_qemu(libxl__gc *gc,
-        int nr_consoles, libxl__device_console *consoles,
-        int nr_vfbs, libxl_device_vfb *vfbs,
-        int nr_disks, libxl_device_disk *disks,
-        int nr_channels, libxl_device_channel *channels);
+                                   libxl_domain_config *d_config);
 
 /*
  * This function will fix reserved device memory conflict
-- 
2.6.2


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.