[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 1/4] xen/arm: implement page reference and grant table functions needed by grant_table.c



The implementation is strongly "inspired" by their x86 counterparts,
except that we assume paging_mode_external and paging_mode_translate.

TODO: read_only mappings and gnttab_mark_dirty.

Changes in v3:

- replace printk with gdprintk in create_grant_host_mapping;
- print a warning once in gnttab_mark_dirty.

Changes in v2:

- create_grant_host_mapping returns error for read-only mappings;
- remove get_page_light reference.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
 xen/arch/arm/dummy.S |    9 ----
 xen/arch/arm/mm.c    |  115 ++++++++++++++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/p2m.c   |   77 +++++++++++++++++++++++----------
 3 files changed, 168 insertions(+), 33 deletions(-)

diff --git a/xen/arch/arm/dummy.S b/xen/arch/arm/dummy.S
index cab9522..baced25 100644
--- a/xen/arch/arm/dummy.S
+++ b/xen/arch/arm/dummy.S
@@ -23,18 +23,10 @@ DUMMY(arch_vcpu_reset);
 NOP(update_vcpu_system_time);
 
 /* Page Reference & Type Maintenance */
-DUMMY(get_page);
 DUMMY(get_page_type);
-DUMMY(page_get_owner_and_reference);
-DUMMY(put_page);
 DUMMY(put_page_type);
 
 /* Grant Tables */
-DUMMY(create_grant_host_mapping);
-DUMMY(gnttab_clear_flag);
-DUMMY(gnttab_mark_dirty);
-DUMMY(is_iomem_page);
-DUMMY(replace_grant_host_mapping);
 DUMMY(steal_page);
 
 /* Page Offlining */
@@ -45,7 +37,6 @@ DUMMY(domain_get_maximum_gpfn);
 DUMMY(domain_relinquish_resources);
 DUMMY(domain_set_time_offset);
 DUMMY(dom_cow);
-DUMMY(gmfn_to_mfn);
 DUMMY(hypercall_create_continuation);
 DUMMY(send_timer_event);
 DUMMY(share_xen_page_with_privileged_guests);
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index d369ee3..e963af9 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -545,6 +545,121 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
 
     return 0;
 }
+
+struct domain *page_get_owner_and_reference(struct page_info *page)
+{
+    unsigned long x, y = page->count_info;
+
+    do {
+        x = y;
+        /*
+         * Count ==  0: Page is not allocated, so we cannot take a reference.
+         * Count == -1: Reference count would wrap, which is invalid.
+         */
+        if ( unlikely(((x + 1) & PGC_count_mask) <= 1) )
+            return NULL;
+    }
+    while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x );
+
+    return page_get_owner(page);
+}
+
+void put_page(struct page_info *page)
+{
+    unsigned long nx, x, y = page->count_info;
+
+    do {
+        ASSERT((y & PGC_count_mask) != 0);
+        x  = y;
+        nx = x - 1;
+    }
+    while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
+
+    if ( unlikely((nx & PGC_count_mask) == 0) )
+    {
+        free_domheap_page(page);
+    }
+}
+
+int get_page(struct page_info *page, struct domain *domain)
+{
+    struct domain *owner = page_get_owner_and_reference(page);
+
+    if ( likely(owner == domain) )
+        return 1;
+
+    if ( owner != NULL )
+        put_page(page);
+
+    return 0;
+}
+
+void gnttab_clear_flag(unsigned long nr, uint16_t *addr)
+{
+    /*
+     * Note that this cannot be clear_bit(), as the access must be
+     * confined to the specified 2 bytes.
+     */
+    uint16_t mask = ~(1 << nr), old;
+
+    do {
+        old = *addr;
+    } while (cmpxchg(addr, old, old & mask) != old);
+}
+
+void gnttab_mark_dirty(struct domain *d, unsigned long l)
+{
+    /* XXX: mark dirty */
+    static int warning;
+    if (!warning) {
+        gdprintk(XENLOG_WARNING, "gnttab_mark_dirty not implemented yet\n");
+        warning = 1;
+    }
+}
+
+int create_grant_host_mapping(unsigned long addr, unsigned long frame,
+                              unsigned int flags, unsigned int cache_flags)
+{
+    int rc;
+
+    if ( cache_flags  || (flags & ~GNTMAP_readonly) != GNTMAP_host_map )
+        return GNTST_general_error;
+
+    /* XXX: read only mappings */
+    if ( flags & GNTMAP_readonly )
+    {
+        gdprintk(XENLOG_WARNING, "read only mappings not implemented yet\n");
+        return GNTST_general_error;
+    }
+
+    rc = guest_physmap_add_page(current->domain,
+                                 addr >> PAGE_SHIFT, frame, 0);
+    if ( rc )
+        return GNTST_general_error;
+    else
+        return GNTST_okay;
+}
+
+int replace_grant_host_mapping(unsigned long addr, unsigned long mfn,
+        unsigned long new_addr, unsigned int flags)
+{
+    unsigned long gfn = (unsigned long)(addr >> PAGE_SHIFT);
+    struct domain *d = current->domain;
+
+    if ( new_addr != 0 || (flags & GNTMAP_contains_pte) )
+        return GNTST_general_error;
+
+    guest_physmap_remove_page(d, gfn, mfn, 0);
+
+    return GNTST_okay;
+}
+
+int is_iomem_page(unsigned long mfn)
+{
+    if ( !mfn_valid(mfn) )
+        return 1;
+    return 0;
+}
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 073216b..7c23b7d 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -120,8 +120,14 @@ static int p2m_create_table(struct domain *d,
     return 0;
 }
 
+enum p2m_operation {
+    INSERT,
+    ALLOCATE,
+    REMOVE
+};
+
 static int create_p2m_entries(struct domain *d,
-                     int alloc,
+                     enum p2m_operation op,
                      paddr_t start_gpaddr,
                      paddr_t end_gpaddr,
                      paddr_t maddr,
@@ -191,25 +197,39 @@ static int create_p2m_entries(struct domain *d,
         }
 
         /* Allocate a new RAM page and attach */
-        if (alloc)
-        {
-            struct page_info *page;
-            lpae_t pte;
-
-            rc = -ENOMEM;
-            page = alloc_domheap_page(d, 0);
-            if ( page == NULL ) {
-                printk("p2m_populate_ram: failed to allocate page\n");
-                goto out;
-            }
-
-            pte = mfn_to_p2m_entry(page_to_mfn(page), mattr);
-
-            write_pte(&third[third_table_offset(addr)], pte);
-        } else {
-            lpae_t pte = mfn_to_p2m_entry(maddr >> PAGE_SHIFT, mattr);
-            write_pte(&third[third_table_offset(addr)], pte);
-            maddr += PAGE_SIZE;
+        switch (op) {
+            case ALLOCATE:
+                {
+                    struct page_info *page;
+                    lpae_t pte;
+
+                    rc = -ENOMEM;
+                    page = alloc_domheap_page(d, 0);
+                    if ( page == NULL ) {
+                        printk("p2m_populate_ram: failed to allocate page\n");
+                        goto out;
+                    }
+
+                    pte = mfn_to_p2m_entry(page_to_mfn(page), mattr);
+
+                    write_pte(&third[third_table_offset(addr)], pte);
+                }
+                break;
+            case INSERT:
+                {
+                    lpae_t pte = mfn_to_p2m_entry(maddr >> PAGE_SHIFT, mattr);
+                    write_pte(&third[third_table_offset(addr)], pte);
+                    maddr += PAGE_SIZE;
+                }
+                break;
+            case REMOVE:
+                {
+                    lpae_t pte;
+                    memset(&pte, 0x00, sizeof(pte));
+                    write_pte(&third[third_table_offset(addr)], pte);
+                    maddr += PAGE_SIZE;
+                }
+                break;
         }
     }
 
@@ -229,7 +249,7 @@ int p2m_populate_ram(struct domain *d,
                      paddr_t start,
                      paddr_t end)
 {
-    return create_p2m_entries(d, 1, start, end, 0, MATTR_MEM);
+    return create_p2m_entries(d, ALLOCATE, start, end, 0, MATTR_MEM);
 }
 
 int map_mmio_regions(struct domain *d,
@@ -237,7 +257,7 @@ int map_mmio_regions(struct domain *d,
                      paddr_t end_gaddr,
                      paddr_t maddr)
 {
-    return create_p2m_entries(d, 0, start_gaddr, end_gaddr, maddr, MATTR_DEV);
+    return create_p2m_entries(d, INSERT, start_gaddr, end_gaddr, maddr, 
MATTR_DEV);
 }
 
 int guest_physmap_add_page(struct domain *d,
@@ -245,7 +265,7 @@ int guest_physmap_add_page(struct domain *d,
                            unsigned long mfn,
                            unsigned int page_order)
 {
-    return create_p2m_entries(d, 0, gpfn << PAGE_SHIFT,
+    return create_p2m_entries(d, INSERT, gpfn << PAGE_SHIFT,
                               (gpfn + (1<<page_order)) << PAGE_SHIFT,
                               mfn << PAGE_SHIFT, MATTR_MEM);
 }
@@ -254,7 +274,9 @@ void guest_physmap_remove_page(struct domain *d,
                                unsigned long gpfn,
                                unsigned long mfn, unsigned int page_order)
 {
-    ASSERT(0);
+    create_p2m_entries(d, REMOVE, gpfn << PAGE_SHIFT,
+                       (gpfn + (1<<page_order)) << PAGE_SHIFT,
+                       mfn << PAGE_SHIFT, MATTR_MEM);
 }
 
 int p2m_alloc_table(struct domain *d)
@@ -318,6 +340,13 @@ int p2m_init(struct domain *d)
 
     return 0;
 }
+
+unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
+{
+    paddr_t p = p2m_lookup(d, gpfn << PAGE_SHIFT);
+    return p >> PAGE_SHIFT;
+}
+
 /*
  * Local variables:
  * mode: C
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.