[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] xen: reset creation_finished flag on soft reset



Paul Durrant <Paul.Durrant@xxxxxxxxxx> writes:

>> Paul Durrant <Paul.Durrant@xxxxxxxxxx> writes:
>> 
>> >
>> > I wonder whether the easiest thing to do would be to modify qemu trad
>> > to do explicit ioreq server creation? It's really not that much
>> > code-change... 20-30 lines or so.
>> 
>> I was thinking about this too, I'll try. It will hopefuly allow to get
>> rid of the 'side effect' which creates default ioreq server on HVM
>> parameters read.
>
> Yes indeed. At that point I'd actually propose getting rid of those params 
> altogether since nothing will use them anymore.
>

And in addition to that we don't need the concept of
'default_ioreq_server' and special pathes for it all over the code. That
would be ideal, but:

I tried switching qemu-traditional to the new API and even succeeded,
everything including pci pass-through seems to work. However, I'm not
anywhere close to '20-30 lines' -- it's an order of magnitude more :-)

Anyway, here is the patch (attached). If everyone agrees the change is
appropriate for qemu-traditional I can sent it out. No additional
changes to the hypervisor is required.

-- 
  Vitaly

>From 030c73f4f0361752dad57a2a90179876ad697bfd Mon Sep 17 00:00:00 2001
From: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx>
Date: Tue, 5 Sep 2017 18:16:03 +0200
Subject: [PATCH] switch to the new IOREQ server API

Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx>
---
 hw/pci.c            |   5 ++
 hw/xen_common.h     | 169 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 hw/xen_machine_fv.c |  31 ++++++++--
 i386-dm/exec-dm.c   |   7 +++
 i386-dm/helper2.c   |  31 +++++++---
 vl.c                |   9 +++
 xen-vl-extra.c      |   3 +
 7 files changed, 242 insertions(+), 13 deletions(-)

diff --git a/hw/pci.c b/hw/pci.c
index c4232856..d6cafb3e 100644
--- a/hw/pci.c
+++ b/hw/pci.c
@@ -34,6 +34,7 @@
 #ifdef CONFIG_PASSTHROUGH
 #include "hw/pass-through.h"
 #endif
+#include "hw/xen_common.h"
 
 extern int igd_passthru;
 
@@ -248,6 +249,10 @@ PCIDevice *pci_register_device(PCIBus *bus, const char 
*name,
         return NULL;
     found: ;
     }
+
+    xen_map_pcidev(xc_handle, domid, ioservid, 0,
+                  PCI_SLOT(devfn), PCI_FUNC(devfn));
+
     pci_dev = qemu_mallocz(instance_size);
     pci_dev->bus = bus;
     pci_dev->devfn = devfn;
diff --git a/hw/xen_common.h b/hw/xen_common.h
index cc48892f..cde814dc 100644
--- a/hw/xen_common.h
+++ b/hw/xen_common.h
@@ -33,4 +33,173 @@
 # define xen_wmb() wmb()
 #endif
 
+extern uint16_t ioservid;
+
+#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040500
+static inline int xen_create_ioreq_server(xc_interface *xc, domid_t dom,
+                                          uint16_t *ioservid)
+{
+    return 0;
+}
+
+static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom,
+                                            uint16_t ioservid,
+                                            xen_pfn_t *ioreq_pfn,
+                                            xen_pfn_t *bufioreq_pfn,
+                                            uint32_t *bufioreq_evtchn)
+{
+    unsigned long param;
+    int rc;
+
+    rc = xc_get_hvm_param(xc, dom, HVM_PARAM_IOREQ_PFN, &param);
+    if (rc < 0) {
+        fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
+        return -1;
+    }
+
+    *ioreq_pfn = param;
+
+    rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
+    if (rc < 0) {
+        fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
+        return -1;
+    }
+
+    *bufioreq_pfn = param;
+
+    rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
+                          &param);
+    if (rc < 0) {
+        fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
+        return -1;
+    }
+
+    *bufioreq_evtchn = param;
+
+    return 0;
+}
+
+static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom,
+                                             uint16_t ioservid,
+                                             bool enable)
+{
+    return 0;
+}
+
+static inline void xen_map_memory_section(xc_interface *xc, domid_t dom,
+                                          uint16_t ioservid,
+                                         uint64_t start, uint64_t end)
+{
+}
+
+static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom,
+                                            uint16_t ioservid,
+                                           uint64_t start, uint64_t end)
+{
+}
+
+static inline void xen_map_io_section(xc_interface *xc, domid_t dom,
+                                      uint16_t ioservid,
+                                     uint64_t start, uint64_t end)
+{
+}
+
+static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom,
+                                        uint16_t ioservid,
+                                       uint64_t start, uint64_t end)
+{
+}
+
+static inline void xen_map_pcidev(xc_interface *xc, domid_t dom,
+                                  uint16_t ioservid,
+                                 uint8_t bus, uint8_t device,
+                                 uint8_t function)
+{
+}
+
+static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom,
+                                    uint16_t ioservid,
+                                   uint8_t bus, uint8_t device,
+                                   uint8_t function)
+{
+}
+#else
+static inline int xen_create_ioreq_server(xc_interface *xc, domid_t dom,
+                                          uint16_t *ioservid)
+{
+    int rc = xc_hvm_create_ioreq_server(xc, dom, 1, ioservid);
+
+    return rc;
+}
+
+static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom,
+                                            uint16_t ioservid,
+                                            xen_pfn_t *ioreq_pfn,
+                                            xen_pfn_t *bufioreq_pfn,
+                                            uint32_t *bufioreq_evtchn)
+{
+    return xc_hvm_get_ioreq_server_info(xc, dom, ioservid,
+                                        ioreq_pfn, bufioreq_pfn,
+                                        bufioreq_evtchn);
+}
+
+static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom,
+                                             uint16_t ioservid,
+                                             bool enable)
+{
+    return xc_hvm_set_ioreq_server_state(xc, dom, ioservid, enable);
+}
+
+static inline void xen_map_memory_section(xc_interface *xc, domid_t dom,
+                                          uint16_t ioservid,
+                                         uint64_t start, uint64_t end)
+{
+    xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 1,
+                                        start, end);
+}
+
+static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom,
+                                            uint16_t ioservid,
+                                           uint64_t start, uint64_t end)
+{
+    xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 1,
+                                            start, end);
+}
+
+static inline void xen_map_io_section(xc_interface *xc, domid_t dom,
+                                      uint16_t ioservid,
+                                     uint64_t start, uint64_t end)
+{
+    xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 0,
+                                        start, end);
+}
+
+static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom,
+                                        uint16_t ioservid,
+                                       uint64_t start, uint64_t end)
+{
+    xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 0,
+                                            start, end);
+}
+
+static inline void xen_map_pcidev(xc_interface *xc, domid_t dom,
+                                  uint16_t ioservid,
+                                 uint8_t bus, uint8_t device,
+                                 uint8_t function)
+{
+    xc_hvm_map_pcidev_to_ioreq_server(xc, dom, ioservid,
+                                      0, bus, device, function);
+}
+
+static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom,
+                                    uint16_t ioservid,
+                                   uint8_t bus, uint8_t device,
+                                   uint8_t function)
+{
+    xc_hvm_unmap_pcidev_from_ioreq_server(xc, dom, ioservid,
+                                          0, bus, device, function);
+}
+
+#endif
+
 #endif /* QEMU_HW_XEN_COMMON_H */
diff --git a/hw/xen_machine_fv.c b/hw/xen_machine_fv.c
index b385d6a5..edb8167d 100644
--- a/hw/xen_machine_fv.c
+++ b/hw/xen_machine_fv.c
@@ -277,7 +277,8 @@ static void xen_init_fv(ram_addr_t ram_size, int 
vga_ram_size,
                         const char *initrd_filename, const char *cpu_model,
                         const char *direct_pci)
 {
-    unsigned long ioreq_pfn;
+    unsigned long ioreq_pfn, bufioreq_pfn;
+    extern uint32_t bufioreq_evtchn;
     extern void *shared_page;
     extern void *buffered_io_page;
 #ifdef __ia64__
@@ -286,6 +287,7 @@ static void xen_init_fv(ram_addr_t ram_size, int 
vga_ram_size,
     extern void *buffered_pio_page;
     int i;
 #endif
+    int rc;
 
 #if defined(__i386__) || defined(__x86_64__)
 
@@ -298,7 +300,21 @@ static void xen_init_fv(ram_addr_t ram_size, int 
vga_ram_size,
 #ifdef CONFIG_STUBDOM /* the hvmop is not supported on older hypervisors */
     xc_set_hvm_param(xc_handle, domid, HVM_PARAM_DM_DOMAIN, DOMID_SELF);
 #endif
-    xc_get_hvm_param(xc_handle, domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
+    rc = xen_create_ioreq_server(xc_handle, domid, &ioservid);
+    if (rc < 0) {
+        fprintf(logfile, "failed to create ioreq server: error %d\n", errno);
+        exit(-1);
+    }
+
+    fprintf(logfile, "created ioreq server %d\n", ioservid);
+
+    rc = xen_get_ioreq_server_info(xc_handle, domid, ioservid, &ioreq_pfn,
+                                   &bufioreq_pfn, &bufioreq_evtchn);
+    if (rc < 0) {
+        fprintf(logfile, "failed to get ioreq server info: error %d\n", errno);
+        exit(-1);
+    }
+
     fprintf(logfile, "shared page at pfn %lx\n", ioreq_pfn);
     shared_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
                                        PROT_READ|PROT_WRITE, ioreq_pfn);
@@ -307,15 +323,20 @@ static void xen_init_fv(ram_addr_t ram_size, int 
vga_ram_size,
         exit(-1);
     }
 
-    xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
-    fprintf(logfile, "buffered io page at pfn %lx\n", ioreq_pfn);
+    fprintf(logfile, "buffered io page at pfn %lx\n", bufioreq_pfn);
     buffered_io_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
-                                            PROT_READ|PROT_WRITE, ioreq_pfn);
+                                            PROT_READ|PROT_WRITE, 
bufioreq_pfn);
     if (buffered_io_page == NULL) {
         fprintf(logfile, "map buffered IO page returned error %d\n", errno);
         exit(-1);
     }
 
+    rc = xen_set_ioreq_server_state(xc_handle, domid, ioservid, true);
+    if (rc < 0) {
+           fprintf(logfile, "failed to enable ioreq server %d\n", errno);
+           exit(-1);
+    }
+
 #if defined(__ia64__)
     xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFPIOREQ_PFN, &ioreq_pfn);
     fprintf(logfile, "buffered pio page at pfn %lx\n", ioreq_pfn);
diff --git a/i386-dm/exec-dm.c b/i386-dm/exec-dm.c
index 96274d9d..ec45e393 100644
--- a/i386-dm/exec-dm.c
+++ b/i386-dm/exec-dm.c
@@ -40,6 +40,7 @@
 #include "pc.h"
 #include "disas.h"
 #include "qemu-xen.h"
+#include "xen_common.h"
 
 //#define DEBUG_TB_INVALIDATE
 //#define DEBUG_FLUSH
@@ -308,6 +309,9 @@ void cpu_register_physical_memory(target_phys_addr_t 
start_addr,
     mmio[mmio_cnt].io_index = phys_offset;
     mmio[mmio_cnt].start = start_addr;
     mmio[mmio_cnt++].size = size;
+
+    xen_map_memory_section(xc_handle, domid, ioservid, start_addr,
+                          start_addr + size -1);
 }
 
 static int get_free_io_mem_idx(void)
@@ -476,6 +480,9 @@ void unregister_iomem(target_phys_addr_t start)
                (unsigned long)(mmio[index].start),
                 (unsigned long)(mmio[index].start + mmio[index].size));
         mmio[index].size = 0;
+       xen_unmap_memory_section(xc_handle, domid, ioservid,
+                                mmio[index].start,
+                                mmio[index].start + mmio[index].size - 1);
     }
 }
 
diff --git a/i386-dm/helper2.c b/i386-dm/helper2.c
index 78093fef..e0825f9e 100644
--- a/i386-dm/helper2.c
+++ b/i386-dm/helper2.c
@@ -100,6 +100,9 @@ long time_offset = 0;
 
 shared_iopage_t *shared_page = NULL;
 
+uint16_t ioservid;
+
+uint32_t bufioreq_evtchn;
 #define BUFFER_IO_MAX_DELAY  100
 buffered_iopage_t *buffered_io_page = NULL;
 QEMUTimer *buffered_io_timer;
@@ -120,7 +123,6 @@ CPUX86State *cpu_x86_init(const char *cpu_model)
     CPUX86State *env;
     static int inited;
     int i, rc;
-    unsigned long bufioreq_evtchn;
 
     env = qemu_mallocz(sizeof(CPUX86State));
     if (!env)
@@ -158,13 +160,6 @@ CPUX86State *cpu_x86_init(const char *cpu_model)
             }
             ioreq_local_port[i] = rc;
         }
-        rc = xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFIOREQ_EVTCHN,
-                &bufioreq_evtchn);
-        if (rc < 0) {
-            fprintf(logfile, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN 
error=%d\n",
-                    errno);
-            return NULL;
-        }
         rc = xenevtchn_bind_interdomain(xce_handle, domid, 
(uint32_t)bufioreq_evtchn);
         if (rc == -1) {
             fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
@@ -472,11 +467,31 @@ static void cpu_ioreq_timeoffset(CPUState *env, ioreq_t 
*req)
 
 static void __handle_ioreq(CPUState *env, ioreq_t *req)
 {
+       uint32_t sbdf, val;
+
     if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
         (req->size < sizeof(target_ulong)))
         req->data &= ((target_ulong)1 << (8 * req->size)) - 1;
 
     switch (req->type) {
+    case IOREQ_TYPE_PCI_CONFIG:
+            sbdf = req->addr >> 32;
+
+            /* Fake a write to port 0xCF8 so that
+             * the config space access will target the
+             * correct device model.
+             */
+            val = (1u << 31) |
+                  ((req->addr & 0x0f00) << 16) |
+                  ((sbdf & 0xffff) << 8) |
+                  (req->addr & 0xfc);
+            do_outp(env, 0xcf8, 4, val);
+
+            /* Now issue the config space access via
+             * port 0xCFC
+             */
+            req->addr = 0xcfc | (req->addr & 0x03);
+            cpu_ioreq_pio(env, req);
     case IOREQ_TYPE_PIO:
         cpu_ioreq_pio(env, req);
         break;
diff --git a/vl.c b/vl.c
index c3c5d630..c8d9896d 100644
--- a/vl.c
+++ b/vl.c
@@ -46,6 +46,7 @@
 
 #include "hw/pci.h"
 #include "hw/xen.h"
+#include "hw/xen_common.h"
 #include <stdlib.h>
 
 #include "qemu-xen.h"
@@ -390,6 +391,9 @@ int register_ioport_read(int start, int length, int size,
             hw_error("register_ioport_read: invalid opaque");
         ioport_opaque[i] = opaque;
     }
+
+    xen_map_io_section(xc_handle, domid, ioservid, start, start + length - 1);
+
     return 0;
 }
 
@@ -415,6 +419,9 @@ int register_ioport_write(int start, int length, int size,
             hw_error("register_ioport_write: invalid opaque");
         ioport_opaque[i] = opaque;
     }
+
+    xen_map_io_section(xc_handle, domid, ioservid, start, start + length - 1);
+
     return 0;
 }
 
@@ -433,6 +440,8 @@ void isa_unassign_ioport(int start, int length)
 
         ioport_opaque[i] = NULL;
     }
+
+    xen_unmap_io_section(xc_handle, domid, ioservid, start, start + length - 
1);
 }
 
 /***********************************************************/
diff --git a/xen-vl-extra.c b/xen-vl-extra.c
index 206ac658..c346e779 100644
--- a/xen-vl-extra.c
+++ b/xen-vl-extra.c
@@ -117,6 +117,9 @@ void do_pci_del(char *devname)
     else
         acpi_php_del(devfn);
 
+    xen_unmap_pcidev(xc_handle, domid, ioservid, 0,
+                    PCI_SLOT(devfn), PCI_FUNC(devfn));
+
     free(devname_cpy);
 }
 
-- 
2.13.5

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.