[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [QEMU][RFC PATCH 6/6] xen: handle qemu disaggregation



On Thu, 22 Mar 2012, Julien Grall wrote:
> * Register QEMU in Xen as server
> * Retrieve it's own shared pages
> * Check if the page is already mapping before to populate
> 
> Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx>
> ---
>  xen-all.c |   62 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
>  1 files changed, 59 insertions(+), 3 deletions(-)
> 
> diff --git a/xen-all.c b/xen-all.c
> index 2d001b8..6b7acd7 100644
> --- a/xen-all.c
> +++ b/xen-all.c
> @@ -61,6 +61,45 @@ static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t 
> *shared_page, int vcpu)
>  }
>  #  define FMT_ioreq_size "u"
>  #endif
> +#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040200

At this point, given that we are close to Xen 4.2 I would make this

if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040300


> +static inline unsigned long xen_buffered_iopage()
> +{
> +    unsigned long pfn;
> +
> +    xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &pfn);
> +
> +    return pfn;
> +}
> +
> +static inline unsigned long xen_iopage(void)
> +{
> +    unsigned long pfn;
> +
> +    xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &pfn);
> +
> +    return pfn;
> +}
> +#else
> +static inline unsigned long xen_buffered_iopage(void)
> +{
> +    unsigned long pfn;
> +
> +    xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IO_PFN_FIRST, &pfn);
> +    pfn += (serverid - 1) * 2 + 2;
> +    return pfn;
> +}
> +
> +static inline unsigned long xen_iopage(void)
> +{
> +    unsigned long pfn;
> +
> +    xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IO_PFN_FIRST, &pfn);
> +    pfn += (serverid - 1) * 2 + 1;
> +
> +    return pfn;

Shouldn't these be

pfn += serverid * 2;

and

pfn += serverid * 2 + 1;

Are you numbering serverid starting from 1?


> +#endif
>  
>  #define BUFFER_IO_MAX_DELAY  100
>  
> @@ -349,6 +388,10 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, 
> MemoryRegion *mr)
>          return;
>      }
>  
> +    if (xen_map_cache(ram_addr, size, 0)) {
> +        return;
> +    }

why?


>      trace_xen_ram_alloc(ram_addr, size);
>  
>      nr_pfn = size >> TARGET_PAGE_BITS;
> @@ -1046,7 +1089,14 @@ static void xenstore_record_dm_state(struct xs_handle 
> *xs, const char *state)
>          exit(1);
>      }
>  
> -    snprintf(path, sizeof (path), "/local/domain/0/device-model/%u/state", 
> xen_domid);
> +    if (!xen_dmid) {
> +        snprintf(path, sizeof (path), 
> "/local/domain/0/device-model/%u/state", xen_domid);
> +    }
> +    else {
> +        snprintf(path, sizeof (path), "/local/domain/0/dms/%u/%u/state",
> +                 xen_domid, xen_dmid);
> +    }
> +
>      if (!xs_write(xs, XBT_NULL, path, state, strlen(state))) {
>          fprintf(stderr, "error recording dm state\n");
>          exit(1);
> @@ -1077,6 +1127,7 @@ static void xen_change_state_handler(void *opaque, int 
> running,
>                                       RunState state)
>  {
>      if (running) {
> +        is_running = 1;
>          /* record state running */
>          xenstore_record_dm_state(xenstore, "running");
>      }
> @@ -1137,7 +1188,12 @@ int xen_hvm_init(void)
>      state->suspend.notify = xen_suspend_notifier;
>      qemu_register_suspend_notifier(&state->suspend);
>  
> -    xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
> +    rc = xc_hvm_register_ioreq_server(xen_xc, xen_domid, &serverid);
> +
> +    if (rc)
> +        hw_error("registered server returned error %d", rc);
> +
> +    ioreq_pfn = xen_iopage();
>      DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
>      state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, 
> XC_PAGE_SIZE,
>                                                PROT_READ|PROT_WRITE, 
> ioreq_pfn);
> @@ -1146,7 +1202,7 @@ int xen_hvm_init(void)
>                   errno, xen_xc);
>      }
>  
> -    xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
> +    ioreq_pfn = xen_buffered_iopage();
>      DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn);
>      state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid, 
> XC_PAGE_SIZE,
>                                                     PROT_READ|PROT_WRITE, 
> ioreq_pfn);
> -- 
> Julien Grall
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.