[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 5/6] xen-gntdev: Support mapping in HVM domains



On Fri, 14 Jan 2011, Daniel De Graaf wrote:
> HVM does not allow direct PTE modification, so instead we request
> that Xen change its internal p2m mappings on the allocated pages and
> map the memory into userspace normally.
> 
> Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
> ---
>  drivers/xen/gntdev.c      |  117 
> +++++++++++++++++++++++++++++++--------------
>  drivers/xen/grant-table.c |    6 ++
>  2 files changed, 87 insertions(+), 36 deletions(-)
> 
> diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
> index 8a12857..1931657 100644
> --- a/drivers/xen/gntdev.c
> +++ b/drivers/xen/gntdev.c
> @@ -32,6 +32,7 @@
>  #include <linux/sched.h>
>  #include <linux/spinlock.h>
>  #include <linux/slab.h>
> +#include <linux/highmem.h>
>  
>  #include <xen/xen.h>
>  #include <xen/grant_table.h>
> @@ -52,6 +53,8 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may 
> be mapped by "
>  
>  static atomic_t pages_mapped = ATOMIC_INIT(0);
>  
> +static int use_ptemod = 0;
> +
>  struct gntdev_priv {
>       struct list_head maps;
>       /* lock protects maps from concurrent changes */
> @@ -184,6 +187,9 @@ static void gntdev_put_map(struct grant_map *map)
>  
>       atomic_sub(map->count, &pages_mapped);
>  
> +     if (!use_ptemod)
> +             unmap_grant_pages(map, 0, map->count);
> +
>       for (i = 0; i < map->count; i++) {
>               if (map->pages[i])
>                       __free_page(map->pages[i]);
> @@ -212,9 +218,12 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
>  static int map_grant_pages(struct grant_map *map)
>  {
>       int i, flags, err = 0;
> +     phys_addr_t addr;
>       struct gnttab_map_grant_ref* map_ops = NULL;
>  
> -     flags = GNTMAP_host_map | GNTMAP_application_map | GNTMAP_contains_pte;
> +     flags = GNTMAP_host_map;
> +     if (use_ptemod)
> +             flags |= GNTMAP_application_map | GNTMAP_contains_pte;
>       if (map->is_ro)
>               flags |= GNTMAP_readonly;
>  
> @@ -224,7 +233,11 @@ static int map_grant_pages(struct grant_map *map)
>               goto out;
>  
>       for(i=0; i < map->count; i++) {
> -             gnttab_set_map_op(&map_ops[i], map->pginfo[i].pte_maddr, flags,
> +             if (use_ptemod)
> +                     addr = map->pginfo[i].pte_maddr;
> +             else
> +                     addr = 
> (phys_addr_t)pfn_to_kaddr(page_to_pfn(map->pages[i]));
> +             gnttab_set_map_op(&map_ops[i], addr, flags,
>                                 map->pginfo[i].target.ref,
>                                 map->pginfo[i].target.domid);
>       }

If I am not mistaken you are asking Xen to modify the virtual kernel
mappings of map->pages, but it is not enough to have correctly working
userspace mappings of map->pages: you need gnttab_map_refs to correctly
fix the p2m and add an entry to m2p_override too.
However in the last gntdev patch series I sent, requests with
!GNTMAP_contains_pte are not added to m2p_override and the p2m entries
of map->pages are not updated either.

Therefore you probably need this (untested) patch to make it work:

---

Add support for !GNTMAP_contains_pte mappings to gnttab_map_refs

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 9ef54eb..7d6d2ba 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -459,12 +459,38 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
                return ret;
 
        for (i = 0; i < count; i++) {
-               /* m2p override only supported for GNTMAP_contains_pte mappings 
*/
-               if (!(map_ops[i].flags & GNTMAP_contains_pte))
-                       continue;
-               pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
-                               (map_ops[i].host_addr & ~PAGE_MASK));
-               mfn = pte_mfn(*pte);
+               if ((map_ops[i].flags & GNTMAP_contains_pte)) {
+                       pte = (pte_t *) 
(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
+                                       (map_ops[i].host_addr & ~PAGE_MASK));
+                       mfn = pte_mfn(*pte);
+               } else {
+                       pgd_t *pgd;
+                       pud_t *pud;
+                       pmd_t *pmd;
+                       unsigned long addr = map_ops[i].host_addr;
+                       pgd = pgd_offset(current->active_mm, addr);
+                       if (pgd_none(*pgd)) {
+                               printk(KERN_WARNING "invalid pgd found, skip 
address=%lx\n", addr);
+                               continue;
+                       }
+                       pud = pud_offset(pgd, addr);
+                       if (pud_none(*pud)) {
+                               printk(KERN_WARNING "invalid pud found, skip 
address=%lx\n", addr);
+                               continue;
+                       }
+                       pmd = pmd_offset(pud, addr);
+                       if (pmd_none(*pmd)) {
+                               printk(KERN_WARNING "invalid pmd found, skip 
address=%lx\n", addr);
+                               continue;
+                       }
+                       pte = pte_offset_map(pmd, addr);
+                       if (pte_none(*pte)) {
+                               printk(KERN_WARNING "invalid pte found, skip 
address=%lx\n", addr);
+                               continue;
+                       }
+                       mfn = pte_mfn(*pte);
+                       pte_unmap(pte);
+               }
                ret = m2p_add_override(mfn, pages[i]);
                if (ret)
                        return ret;

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.