[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 01/12] xenbus: Support HVM backends



On Mon, Nov 28, 2011 at 11:49:00AM -0500, Daniel De Graaf wrote:
> Add HVM implementations of xenbus_(map,unmap)_ring_v(alloc,free) so
> that ring mappings can be done without using GNTMAP_contains_pte which
> is not supported on HVM.


So what else besides these patches should I do to load the blkback/netback
drivers in a HVM domain? There are some xen toolstack patches patches I presume?
Can you tell me what the c/s are (if any?)

Thanks!
> 
> Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
> ---
>  drivers/xen/xenbus/xenbus_client.c |  155 
> +++++++++++++++++++++++++++++-------
>  1 files changed, 125 insertions(+), 30 deletions(-)
> 
> diff --git a/drivers/xen/xenbus/xenbus_client.c 
> b/drivers/xen/xenbus/xenbus_client.c
> index 1906125..688c4b4 100644
> --- a/drivers/xen/xenbus/xenbus_client.c
> +++ b/drivers/xen/xenbus/xenbus_client.c
> @@ -32,16 +32,27 @@
>  
>  #include <linux/slab.h>
>  #include <linux/types.h>
> +#include <linux/spinlock.h>
>  #include <linux/vmalloc.h>
>  #include <linux/export.h>
>  #include <asm/xen/hypervisor.h>
>  #include <asm/xen/page.h>
>  #include <xen/interface/xen.h>
>  #include <xen/interface/event_channel.h>
> +#include <xen/balloon.h>
>  #include <xen/events.h>
>  #include <xen/grant_table.h>
>  #include <xen/xenbus.h>
>  
> +struct xenbus_map_node {
> +     struct list_head next;
> +     struct page *page;
> +     grant_handle_t handle;
> +};
> +
> +static DEFINE_SPINLOCK(xenbus_valloc_lock);
> +static LIST_HEAD(xenbus_valloc_pages);
> +
>  const char *xenbus_strstate(enum xenbus_state state)
>  {
>       static const char *const name[] = {
> @@ -420,21 +431,8 @@ int xenbus_free_evtchn(struct xenbus_device *dev, int 
> port)
>  EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
>  
>  
> -/**
> - * xenbus_map_ring_valloc
> - * @dev: xenbus device
> - * @gnt_ref: grant reference
> - * @vaddr: pointer to address to be filled out by mapping
> - *
> - * Based on Rusty Russell's skeleton driver's map_page.
> - * Map a page of memory into this domain from another domain's grant table.
> - * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
> - * page to that address, and sets *vaddr to that address.
> - * Returns 0 on success, and GNTST_* (see 
> xen/include/interface/grant_table.h)
> - * or -ENOMEM on error. If an error is returned, device will switch to
> - * XenbusStateClosing and the error message will be saved in XenStore.
> - */
> -int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void 
> **vaddr)
> +static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
> +                                     int gnt_ref, void **vaddr)
>  {
>       struct gnttab_map_grant_ref op = {
>               .flags = GNTMAP_host_map | GNTMAP_contains_pte,
> @@ -469,6 +467,64 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, 
> int gnt_ref, void **vaddr)
>       *vaddr = area->addr;
>       return 0;
>  }
> +
> +static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
> +                                     int gnt_ref, void **vaddr)
> +{
> +     struct xenbus_map_node *node;
> +     int err;
> +     void *addr;
> +
> +     *vaddr = NULL;
> +
> +     node = kzalloc(sizeof(*node), GFP_KERNEL);
> +     if (!node)
> +             return -ENOMEM;
> +
> +     err = alloc_xenballooned_pages(1, &node->page, false);
> +     if (err)
> +             goto out_err;
> +
> +     addr = pfn_to_kaddr(page_to_pfn(node->page));
> +
> +     err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
> +     if (err)
> +             goto out_err;
> +
> +     spin_lock(&xenbus_valloc_lock);
> +     list_add(&node->next, &xenbus_valloc_pages);
> +     spin_unlock(&xenbus_valloc_lock);
> +
> +     *vaddr = addr;
> +     return 0;
> +
> + out_err:
> +     free_xenballooned_pages(1, &node->page);
> +     kfree(node);
> +     return err;
> +}
> +
> +/**
> + * xenbus_map_ring_valloc
> + * @dev: xenbus device
> + * @gnt_ref: grant reference
> + * @vaddr: pointer to address to be filled out by mapping
> + *
> + * Based on Rusty Russell's skeleton driver's map_page.
> + * Map a page of memory into this domain from another domain's grant table.
> + * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
> + * page to that address, and sets *vaddr to that address.
> + * Returns 0 on success, and GNTST_* (see 
> xen/include/interface/grant_table.h)
> + * or -ENOMEM on error. If an error is returned, device will switch to
> + * XenbusStateClosing and the error message will be saved in XenStore.
> + */
> +int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void 
> **vaddr)
> +{
> +     if (xen_pv_domain())
> +             return xenbus_map_ring_valloc_pv(dev, gnt_ref, vaddr);
> +     else
> +             return xenbus_map_ring_valloc_hvm(dev, gnt_ref, vaddr);
> +}
>  EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
>  
>  
> @@ -510,20 +566,7 @@ int xenbus_map_ring(struct xenbus_device *dev, int 
> gnt_ref,
>  }
>  EXPORT_SYMBOL_GPL(xenbus_map_ring);
>  
> -
> -/**
> - * xenbus_unmap_ring_vfree
> - * @dev: xenbus device
> - * @vaddr: addr to unmap
> - *
> - * Based on Rusty Russell's skeleton driver's unmap_page.
> - * Unmap a page of memory in this domain that was imported from another 
> domain.
> - * Use xenbus_unmap_ring_vfree if you mapped in your memory with
> - * xenbus_map_ring_valloc (it will free the virtual address space).
> - * Returns 0 on success and returns GNTST_* on error
> - * (see xen/include/interface/grant_table.h).
> - */
> -int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
> +static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
>  {
>       struct vm_struct *area;
>       struct gnttab_unmap_grant_ref op = {
> @@ -566,8 +609,60 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, 
> void *vaddr)
>  
>       return op.status;
>  }
> -EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
>  
> +static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void 
> *vaddr)
> +{
> +     int rv;
> +     struct xenbus_map_node *node;
> +     void *addr;
> +
> +     spin_lock(&xenbus_valloc_lock);
> +     list_for_each_entry(node, &xenbus_valloc_pages, next) {
> +             addr = pfn_to_kaddr(page_to_pfn(node->page));
> +             if (addr == vaddr) {
> +                     list_del(&node->next);
> +                     goto found;
> +             }
> +     }
> +     node = NULL;
> + found:
> +     spin_unlock(&xenbus_valloc_lock);
> +
> +     if (!node) {
> +             xenbus_dev_error(dev, -ENOENT,
> +                              "can't find mapped virtual address %p", vaddr);
> +             return -ENOENT;
> +     }
> +
> +     rv = xenbus_unmap_ring(dev, node->handle, addr);
> +
> +     if (!rv)
> +             free_xenballooned_pages(1, &node->page);
> +
> +     kfree(node);
> +     return rv;
> +}
> +
> +/**
> + * xenbus_unmap_ring_vfree
> + * @dev: xenbus device
> + * @vaddr: addr to unmap
> + *
> + * Based on Rusty Russell's skeleton driver's unmap_page.
> + * Unmap a page of memory in this domain that was imported from another 
> domain.
> + * Use xenbus_unmap_ring_vfree if you mapped in your memory with
> + * xenbus_map_ring_valloc (it will free the virtual address space).
> + * Returns 0 on success and returns GNTST_* on error
> + * (see xen/include/interface/grant_table.h).
> + */
> +int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
> +{
> +     if (xen_pv_domain())
> +             return xenbus_unmap_ring_vfree_pv(dev, vaddr);
> +     else
> +             return xenbus_unmap_ring_vfree_hvm(dev, vaddr);
> +}
> +EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
>  
>  /**
>   * xenbus_unmap_ring
> -- 
> 1.7.7.3

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.