[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] xen/arm: implement page reference and gnttab functions needed by grant_table.c
# HG changeset patch # User Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> # Date 1349791532 -3600 # Node ID e6806ba46527ea2f163808ec4a7e17fad29bc550 # Parent ea1e095a6032ac10b96492b6b6a6904e2bb5a0d6 xen/arm: implement page reference and gnttab functions needed by grant_table.c The implementation is strongly "inspired" by their x86 counterparts, except that we assume paging_mode_external and paging_mode_translate. TODO: read_only mappings and gnttab_mark_dirty. Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx> Committed-by: Ian Campbell <ian.campbell@xxxxxxxxxx> --- diff -r ea1e095a6032 -r e6806ba46527 xen/arch/arm/dummy.S --- a/xen/arch/arm/dummy.S Tue Oct 09 15:05:31 2012 +0100 +++ b/xen/arch/arm/dummy.S Tue Oct 09 15:05:32 2012 +0100 @@ -23,18 +23,10 @@ DUMMY(arch_vcpu_reset); NOP(update_vcpu_system_time); /* Page Reference & Type Maintenance */ -DUMMY(get_page); DUMMY(get_page_type); -DUMMY(page_get_owner_and_reference); -DUMMY(put_page); DUMMY(put_page_type); /* Grant Tables */ -DUMMY(create_grant_host_mapping); -DUMMY(gnttab_clear_flag); -DUMMY(gnttab_mark_dirty); -DUMMY(is_iomem_page); -DUMMY(replace_grant_host_mapping); DUMMY(steal_page); /* Page Offlining */ @@ -45,7 +37,6 @@ DUMMY(domain_get_maximum_gpfn); DUMMY(domain_relinquish_resources); DUMMY(domain_set_time_offset); DUMMY(dom_cow); -DUMMY(gmfn_to_mfn); DUMMY(send_timer_event); DUMMY(share_xen_page_with_privileged_guests); DUMMY(wallclock_time); diff -r ea1e095a6032 -r e6806ba46527 xen/arch/arm/mm.c --- a/xen/arch/arm/mm.c Tue Oct 09 15:05:31 2012 +0100 +++ b/xen/arch/arm/mm.c Tue Oct 09 15:05:32 2012 +0100 @@ -525,6 +525,121 @@ long arch_memory_op(int op, XEN_GUEST_HA return 0; } + +struct domain *page_get_owner_and_reference(struct page_info *page) +{ + unsigned long x, y = page->count_info; + + do { + x = y; + /* + * Count == 0: Page is not allocated, so we cannot take a reference. + * Count == -1: Reference count would wrap, which is invalid. + */ + if ( unlikely(((x + 1) & PGC_count_mask) <= 1) ) + return NULL; + } + while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x ); + + return page_get_owner(page); +} + +void put_page(struct page_info *page) +{ + unsigned long nx, x, y = page->count_info; + + do { + ASSERT((y & PGC_count_mask) != 0); + x = y; + nx = x - 1; + } + while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) ); + + if ( unlikely((nx & PGC_count_mask) == 0) ) + { + free_domheap_page(page); + } +} + +int get_page(struct page_info *page, struct domain *domain) +{ + struct domain *owner = page_get_owner_and_reference(page); + + if ( likely(owner == domain) ) + return 1; + + if ( owner != NULL ) + put_page(page); + + return 0; +} + +void gnttab_clear_flag(unsigned long nr, uint16_t *addr) +{ + /* + * Note that this cannot be clear_bit(), as the access must be + * confined to the specified 2 bytes. + */ + uint16_t mask = ~(1 << nr), old; + + do { + old = *addr; + } while (cmpxchg(addr, old, old & mask) != old); +} + +void gnttab_mark_dirty(struct domain *d, unsigned long l) +{ + /* XXX: mark dirty */ + static int warning; + if (!warning) { + gdprintk(XENLOG_WARNING, "gnttab_mark_dirty not implemented yet\n"); + warning = 1; + } +} + +int create_grant_host_mapping(unsigned long addr, unsigned long frame, + unsigned int flags, unsigned int cache_flags) +{ + int rc; + + if ( cache_flags || (flags & ~GNTMAP_readonly) != GNTMAP_host_map ) + return GNTST_general_error; + + /* XXX: read only mappings */ + if ( flags & GNTMAP_readonly ) + { + gdprintk(XENLOG_WARNING, "read only mappings not implemented yet\n"); + return GNTST_general_error; + } + + rc = guest_physmap_add_page(current->domain, + addr >> PAGE_SHIFT, frame, 0); + if ( rc ) + return GNTST_general_error; + else + return GNTST_okay; +} + +int replace_grant_host_mapping(unsigned long addr, unsigned long mfn, + unsigned long new_addr, unsigned int flags) +{ + unsigned long gfn = (unsigned long)(addr >> PAGE_SHIFT); + struct domain *d = current->domain; + + if ( new_addr != 0 || (flags & GNTMAP_contains_pte) ) + return GNTST_general_error; + + guest_physmap_remove_page(d, gfn, mfn, 0); + + return GNTST_okay; +} + +int is_iomem_page(unsigned long mfn) +{ + if ( !mfn_valid(mfn) ) + return 1; + return 0; +} /* * Local variables: * mode: C diff -r ea1e095a6032 -r e6806ba46527 xen/arch/arm/p2m.c --- a/xen/arch/arm/p2m.c Tue Oct 09 15:05:31 2012 +0100 +++ b/xen/arch/arm/p2m.c Tue Oct 09 15:05:32 2012 +0100 @@ -120,8 +120,14 @@ static int p2m_create_table(struct domai return 0; } +enum p2m_operation { + INSERT, + ALLOCATE, + REMOVE +}; + static int create_p2m_entries(struct domain *d, - int alloc, + enum p2m_operation op, paddr_t start_gpaddr, paddr_t end_gpaddr, paddr_t maddr, @@ -191,25 +197,39 @@ static int create_p2m_entries(struct dom } /* Allocate a new RAM page and attach */ - if (alloc) - { - struct page_info *page; - lpae_t pte; + switch (op) { + case ALLOCATE: + { + struct page_info *page; + lpae_t pte; - rc = -ENOMEM; - page = alloc_domheap_page(d, 0); - if ( page == NULL ) { - printk("p2m_populate_ram: failed to allocate page\n"); - goto out; - } + rc = -ENOMEM; + page = alloc_domheap_page(d, 0); + if ( page == NULL ) { + printk("p2m_populate_ram: failed to allocate page\n"); + goto out; + } - pte = mfn_to_p2m_entry(page_to_mfn(page), mattr); + pte = mfn_to_p2m_entry(page_to_mfn(page), mattr); - write_pte(&third[third_table_offset(addr)], pte); - } else { - lpae_t pte = mfn_to_p2m_entry(maddr >> PAGE_SHIFT, mattr); - write_pte(&third[third_table_offset(addr)], pte); - maddr += PAGE_SIZE; + write_pte(&third[third_table_offset(addr)], pte); + } + break; + case INSERT: + { + lpae_t pte = mfn_to_p2m_entry(maddr >> PAGE_SHIFT, mattr); + write_pte(&third[third_table_offset(addr)], pte); + maddr += PAGE_SIZE; + } + break; + case REMOVE: + { + lpae_t pte; + memset(&pte, 0x00, sizeof(pte)); + write_pte(&third[third_table_offset(addr)], pte); + maddr += PAGE_SIZE; + } + break; } } @@ -229,7 +249,7 @@ int p2m_populate_ram(struct domain *d, paddr_t start, paddr_t end) { - return create_p2m_entries(d, 1, start, end, 0, MATTR_MEM); + return create_p2m_entries(d, ALLOCATE, start, end, 0, MATTR_MEM); } int map_mmio_regions(struct domain *d, @@ -237,7 +257,7 @@ int map_mmio_regions(struct domain *d, paddr_t end_gaddr, paddr_t maddr) { - return create_p2m_entries(d, 0, start_gaddr, end_gaddr, maddr, MATTR_DEV); + return create_p2m_entries(d, INSERT, start_gaddr, end_gaddr, maddr, MATTR_DEV); } int guest_physmap_add_page(struct domain *d, @@ -245,7 +265,7 @@ int guest_physmap_add_page(struct domain unsigned long mfn, unsigned int page_order) { - return create_p2m_entries(d, 0, gpfn << PAGE_SHIFT, + return create_p2m_entries(d, INSERT, gpfn << PAGE_SHIFT, (gpfn + (1<<page_order)) << PAGE_SHIFT, mfn << PAGE_SHIFT, MATTR_MEM); } @@ -254,7 +274,9 @@ void guest_physmap_remove_page(struct do unsigned long gpfn, unsigned long mfn, unsigned int page_order) { - ASSERT(0); + create_p2m_entries(d, REMOVE, gpfn << PAGE_SHIFT, + (gpfn + (1<<page_order)) << PAGE_SHIFT, + mfn << PAGE_SHIFT, MATTR_MEM); } int p2m_alloc_table(struct domain *d) @@ -318,6 +340,13 @@ int p2m_init(struct domain *d) return 0; } + +unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn) +{ + paddr_t p = p2m_lookup(d, gpfn << PAGE_SHIFT); + return p >> PAGE_SHIFT; +} + /* * Local variables: * mode: C _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |