[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] Re: [PATCH 3/3] xen-gntdev: Use ballooned pages for grant mappings



On Mon, 2011-03-07 at 18:06 +0000, Daniel De Graaf wrote:
> Grant mappings cause the PFN<->MFN mapping to be lost on the pages used
> for the mapping. Instead of leaking memory, use pages that have already
> been ballooned out and so have no valid mapping. This removes the need
> for the bad-page leak workaround as pages are repopulated by the balloon
> driver.
> 
> Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>

Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>

> ---
>  drivers/xen/gntdev.c |   41 ++++++++---------------------------------
>  1 files changed, 8 insertions(+), 33 deletions(-)
> 
> diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
> index d43ff30..2faf797 100644
> --- a/drivers/xen/gntdev.c
> +++ b/drivers/xen/gntdev.c
> @@ -36,6 +36,7 @@
>  
>  #include <xen/xen.h>
>  #include <xen/grant_table.h>
> +#include <xen/balloon.h>
>  #include <xen/gntdev.h>
>  #include <xen/events.h>
>  #include <asm/xen/hypervisor.h>
> @@ -122,10 +123,12 @@ static struct grant_map *gntdev_alloc_map(struct 
> gntdev_priv *priv, int count)
>           NULL == add->pages)
>               goto err;
>  
> +     i = get_ballooned_pages(count, add->pages, 1);
> +     if (i != count) {
> +             put_ballooned_pages(i, add->pages);
> +             goto err;
> +     }
>       for (i = 0; i < count; i++) {
> -             add->pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
> -             if (add->pages[i] == NULL)
> -                     goto err;
>               add->map_ops[i].handle = -1;
>               add->unmap_ops[i].handle = -1;
>       }
> @@ -137,11 +140,6 @@ static struct grant_map *gntdev_alloc_map(struct 
> gntdev_priv *priv, int count)
>       return add;
>  
>  err:
> -     if (add->pages)
> -             for (i = 0; i < count; i++) {
> -                     if (add->pages[i])
> -                             __free_page(add->pages[i]);
> -             }
>       kfree(add->pages);
>       kfree(add->grants);
>       kfree(add->map_ops);
> @@ -184,8 +182,6 @@ static struct grant_map *gntdev_find_map_index(struct 
> gntdev_priv *priv,
>  
>  static void gntdev_put_map(struct grant_map *map)
>  {
> -     int i;
> -
>       if (!map)
>               return;
>  
> @@ -202,29 +198,7 @@ static void gntdev_put_map(struct grant_map *map)
>               if (!use_ptemod)
>                       unmap_grant_pages(map, 0, map->count);
>  
> -             for (i = 0; i < map->count; i++) {
> -                     uint32_t check, *tmp;
> -                     if (!map->pages[i])
> -                             continue;
> -                     /* XXX When unmapping in an HVM domain, Xen will
> -                      * sometimes end up mapping the GFN to an invalid MFN.
> -                      * In this case, writes will be discarded and reads will
> -                      * return all 0xFF bytes.  Leak these unusable GFNs
> -                      * until Xen supports fixing their p2m mapping.
> -                      *
> -                      * Confirmed present in Xen 4.1-RC3 with HVM source
> -                      */
> -                     tmp = kmap(map->pages[i]);
> -                     *tmp = 0xdeaddead;
> -                     mb();
> -                     check = *tmp;
> -                     kunmap(map->pages[i]);
> -                     if (check == 0xdeaddead)
> -                             __free_page(map->pages[i]);
> -                     else
> -                             pr_debug("Discard page %d=%ld\n", i,
> -                                     page_to_pfn(map->pages[i]));
> -             }
> +             put_ballooned_pages(map->count, map->pages);
>       }
>       kfree(map->pages);
>       kfree(map->grants);
> @@ -324,6 +298,7 @@ static int __unmap_grant_pages(struct grant_map *map, int 
> offset, int pages)
>                       map->unmap_ops[offset+i].status);
>               map->unmap_ops[offset+i].handle = -1;
>       }
> +
>       return err;
>  }
>  



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.