[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Some arch/x86 gnttab cleanups in Xen.
# HG changeset patch # User kaf24@xxxxxxxxxxxxxxxxxxxx # Node ID dfd2ded7b7127652731200e46c1e6011fb7e0f9a # Parent 3fe7b0b7f6c5ffde300b6518c50d6e05e84a4c10 Some arch/x86 gnttab cleanups in Xen. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> diff -r 3fe7b0b7f6c5 -r dfd2ded7b712 xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Sun Aug 14 09:17:54 2005 +++ b/xen/arch/x86/mm.c Sun Aug 14 17:32:30 2005 @@ -2269,36 +2269,20 @@ } -int update_grant_va_mapping_pte(unsigned long pte_addr, - l1_pgentry_t _nl1e, - struct domain *d, - struct vcpu *v) -{ - /* Caller must: - * . own d's BIGLOCK - * . already have 'get_page' correctly on the to-be-installed nl1e - * . be responsible for flushing the TLB - * . check PTE being installed isn't DISALLOWED - */ - +int update_grant_pte_mapping( + unsigned long pte_addr, l1_pgentry_t _nl1e, + struct domain *d, struct vcpu *v) +{ int rc = GNTST_okay; void *va; unsigned long gpfn, mfn; struct pfn_info *page; - struct domain_mmap_cache mapcache, sh_mapcache; u32 type_info; - l1_pgentry_t ol1e; - - /* Grant tables and shadow mode don't currently work together. */ - ASSERT( !shadow_mode_refcounts(d) ); - - /* There shouldn't be any strange bits set on the PTE. */ - ASSERT( (l1e_get_flags(_nl1e) & L1_DISALLOW_MASK) == 0); - - cleanup_writable_pagetable(d); - - domain_mmap_cache_init(&mapcache); - domain_mmap_cache_init(&sh_mapcache); + l1_pgentry_t ol1e; + + ASSERT(spin_is_locked(&d->big_lock)); + ASSERT(!shadow_mode_refcounts(d)); + ASSERT((l1e_get_flags(_nl1e) & L1_DISALLOW_MASK) == 0); gpfn = pte_addr >> PAGE_SHIFT; mfn = __gpfn_to_mfn(d, gpfn); @@ -2306,88 +2290,61 @@ if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) ) { MEM_LOG("Could not get page for normal update"); - rc = -EINVAL; - goto failed_norefs; + return GNTST_general_error; } - va = map_domain_page_with_cache(mfn, &mapcache); - va = (void *)((unsigned long)va + - (unsigned long)(pte_addr & ~PAGE_MASK)); - page = &frame_table[mfn]; + va = map_domain_page(mfn); + va = (void *)((unsigned long)va + (pte_addr & ~PAGE_MASK)); + page = pfn_to_page(mfn); type_info = page->u.inuse.type_info; - if ( (type_info & PGT_type_mask) != PGT_l1_page_table) { + if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) || + !get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask)) ) + { DPRINTK("Grant map attempted to update a non-L1 page\n"); - rc = -EINVAL; + rc = GNTST_general_error; goto failed; } - if ( likely(get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask))) ) - { - - if ( unlikely(__copy_from_user(&ol1e, (l1_pgentry_t *)va, - sizeof(ol1e)) != 0) ) { - put_page_type(page); - rc = -EINVAL; - goto failed; - } - - if ( update_l1e(va, ol1e, _nl1e) ) - { - put_page_from_l1e(ol1e, d); - - if ( l1e_get_flags(ol1e) & _PAGE_PRESENT ) - rc = GNTST_flush_all; /* We don't know what vaddr to flush */ - else - rc = GNTST_okay; /* Caller need not invalidate TLB entry */ - - if ( unlikely(shadow_mode_enabled(d)) ) - shadow_l1_normal_pt_update(d, pte_addr, _nl1e, &sh_mapcache); - } - else - rc = -EINVAL; + if ( __copy_from_user(&ol1e, (l1_pgentry_t *)va, sizeof(ol1e)) || + !update_l1e(va, ol1e, _nl1e) ) + { + put_page_type(page); + rc = GNTST_general_error; + goto failed; + } + + put_page_from_l1e(ol1e, d); + + rc = (l1e_get_flags(ol1e) & _PAGE_PRESENT) ? GNTST_flush_all : GNTST_okay; + + if ( unlikely(shadow_mode_enabled(d)) ) + { + struct domain_mmap_cache sh_mapcache; + domain_mmap_cache_init(&sh_mapcache); + shadow_l1_normal_pt_update(d, pte_addr, _nl1e, &sh_mapcache); + domain_mmap_cache_destroy(&sh_mapcache); + } + + put_page_type(page); - put_page_type(page); - } - failed: - unmap_domain_page_with_cache(va, &mapcache); + unmap_domain_page(va); put_page(page); - - failed_norefs: - domain_mmap_cache_destroy(&mapcache); - domain_mmap_cache_destroy(&sh_mapcache); - return rc; } - - -int clear_grant_va_mapping_pte(unsigned long addr, unsigned long frame, - struct domain *d) -{ - /* Caller must: - * . own d's BIGLOCK - * . already have 'get_page' correctly on the to-be-installed nl1e - * . be responsible for flushing the TLB - * . check PTE being installed isn't DISALLOWED - */ - +int clear_grant_pte_mapping( + unsigned long addr, unsigned long frame, struct domain *d) +{ int rc = GNTST_okay; void *va; unsigned long gpfn, mfn; struct pfn_info *page; - struct domain_mmap_cache mapcache, sh_mapcache; u32 type_info; - l1_pgentry_t ol1e; - - /* Grant tables and shadow mode don't work together. */ - ASSERT( !shadow_mode_refcounts(d) ); - - cleanup_writable_pagetable(d); - - domain_mmap_cache_init(&mapcache); - domain_mmap_cache_init(&sh_mapcache); + l1_pgentry_t ol1e; + + ASSERT(!shadow_mode_refcounts(d)); gpfn = addr >> PAGE_SHIFT; mfn = __gpfn_to_mfn(d, gpfn); @@ -2395,119 +2352,91 @@ if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) ) { MEM_LOG("Could not get page for normal update"); - rc = -EINVAL; - goto failed_norefs; + return GNTST_general_error; } - va = map_domain_page_with_cache(mfn, &mapcache); - va = (void *)((unsigned long)va + - (unsigned long)(addr & ~PAGE_MASK)); - page = &frame_table[mfn]; + va = map_domain_page(mfn); + va = (void *)((unsigned long)va + (addr & ~PAGE_MASK)); + page = pfn_to_page(mfn); type_info = page->u.inuse.type_info; - if ( (type_info & PGT_type_mask) != PGT_l1_page_table) { + if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) || + !get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask)) ) + { DPRINTK("Grant map attempted to update a non-L1 page\n"); - rc = -EINVAL; + rc = GNTST_general_error; goto failed; } - if ( likely(get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask))) ) - { - if ( unlikely(__copy_from_user(&ol1e, (l1_pgentry_t *)va, - sizeof(ol1e)) != 0) ) - { - rc = -EINVAL; - put_page_type(page); - goto failed; - } + if ( __copy_from_user(&ol1e, (l1_pgentry_t *)va, sizeof(ol1e)) ) + { + put_page_type(page); + rc = GNTST_general_error; + goto failed; + } - /* - * Check that the virtual address supplied is actually mapped to frame. - */ - if ( unlikely((l1e_get_intpte(ol1e) >> PAGE_SHIFT) != frame )) - { - DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n", - (unsigned long)l1e_get_intpte(ol1e), addr, frame); - rc = -EINVAL; - put_page_type(page); - goto failed; - } - - /* Delete pagetable entry. */ - if ( unlikely(__put_user(0, (unsigned long *)va))) - { - DPRINTK("Cannot delete PTE entry at %p.\n", va); - rc = -EINVAL; - } else { - if ( unlikely(shadow_mode_enabled(d)) ) - shadow_l1_normal_pt_update(d, addr, l1e_empty(), - &sh_mapcache); - } + /* Check that the virtual address supplied is actually mapped to frame. */ + if ( unlikely((l1e_get_intpte(ol1e) >> PAGE_SHIFT) != frame) ) + { + DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n", + (unsigned long)l1e_get_intpte(ol1e), addr, frame); put_page_type(page); - } + rc = GNTST_general_error; + goto failed; + } + + /* Delete pagetable entry. */ + if ( unlikely(__put_user(0, (unsigned long *)va))) + { + DPRINTK("Cannot delete PTE entry at %p.\n", va); + put_page_type(page); + rc = GNTST_general_error; + goto failed; + } + + if ( unlikely(shadow_mode_enabled(d)) ) + { + struct domain_mmap_cache sh_mapcache; + domain_mmap_cache_init(&sh_mapcache); + shadow_l1_normal_pt_update(d, addr, l1e_empty(), &sh_mapcache); + domain_mmap_cache_destroy(&sh_mapcache); + } + + put_page_type(page); failed: - unmap_domain_page_with_cache(va, &mapcache); + unmap_domain_page(va); put_page(page); - - failed_norefs: - domain_mmap_cache_destroy(&mapcache); - domain_mmap_cache_destroy(&sh_mapcache); - return rc; } - -/* This function assumes the caller is holding the domain's BIGLOCK - * and is running in a shadow mode - */ -int update_grant_va_mapping(unsigned long va, - l1_pgentry_t _nl1e, - struct domain *d, - struct vcpu *v) -{ - /* Caller must: - * . own d's BIGLOCK - * . already have 'get_page' correctly on the to-be-installed nl1e - * . be responsible for flushing the TLB - * . check PTE being installed isn't DISALLOWED +int update_grant_va_mapping( + unsigned long va, l1_pgentry_t _nl1e, struct domain *d, struct vcpu *v) +{ + int rc = GNTST_okay; + l1_pgentry_t *pl1e, ol1e; + + ASSERT(spin_is_locked(&d->big_lock)); + ASSERT(!shadow_mode_refcounts(d)); + ASSERT((l1e_get_flags(_nl1e) & L1_DISALLOW_MASK) == 0); + + /* + * This is actually overkill - we don't need to sync the L1 itself, + * just everything involved in getting to this L1 (i.e. we need + * linear_pg_table[l1_linear_offset(va)] to be in sync)... */ - - int rc = GNTST_okay; - l1_pgentry_t *pl1e; - l1_pgentry_t ol1e; - - cleanup_writable_pagetable(d); - - // This is actually overkill - we don't need to sync the L1 itself, - // just everything involved in getting to this L1 (i.e. we need - // linear_pg_table[l1_linear_offset(va)] to be in sync)... - // __shadow_sync_va(v, va); pl1e = &linear_pg_table[l1_linear_offset(va)]; - if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) ) - rc = -EINVAL; - else if ( !shadow_mode_refcounts(d) ) - { - if ( update_l1e(pl1e, ol1e, _nl1e) ) - { - put_page_from_l1e(ol1e, d); - if ( l1e_get_flags(ol1e) & _PAGE_PRESENT ) - rc = GNTST_flush_one; - else - rc = GNTST_okay; /* Caller need not invalidate TLB entry */ - } - else - rc = -EINVAL; - } - else - { - printk("grant tables and shadow mode currently don't work together\n"); - BUG(); - } + if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) || + !update_l1e(pl1e, ol1e, _nl1e) ) + return GNTST_general_error; + + put_page_from_l1e(ol1e, d); + + rc = (l1e_get_flags(ol1e) & _PAGE_PRESENT) ? GNTST_flush_one : GNTST_okay; if ( unlikely(shadow_mode_enabled(d)) ) shadow_do_update_va_mapping(va, _nl1e, v); @@ -2517,15 +2446,15 @@ int clear_grant_va_mapping(unsigned long addr, unsigned long frame) { - l1_pgentry_t *pl1e; - unsigned long _ol1e; + l1_pgentry_t *pl1e; + unsigned long _ol1e; pl1e = &linear_pg_table[l1_linear_offset(addr)]; if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) ) { DPRINTK("Could not find PTE entry for address %lx\n", addr); - return -EINVAL; + return GNTST_general_error; } /* @@ -2536,14 +2465,14 @@ { DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n", _ol1e, addr, frame); - return -EINVAL; + return GNTST_general_error; } /* Delete pagetable entry. */ if ( unlikely(__put_user(0, (unsigned long *)pl1e))) { DPRINTK("Cannot delete PTE entry at %p.\n", (unsigned long *)pl1e); - return -EINVAL; + return GNTST_general_error; } return 0; @@ -2583,10 +2512,11 @@ (shadow_mode_translate(d) || shadow_mode_translate(percpu_info[cpu].foreign))) ) { - // The foreign domain's pfn's are in a different namespace. - // There's not enough information in just a gpte to figure out - // how to (re-)shadow this entry. - // + /* + * The foreign domain's pfn's are in a different namespace. There's + * not enough information in just a gpte to figure out how to + * (re-)shadow this entry. + */ domain_crash(); } @@ -3054,7 +2984,7 @@ */ BUG(); } - PTWR_PRINTK("[%c] disconnected_l1va at %p is %lx\n", + PTWR_PRINTK("[%c] disconnected_l1va at %p is %"PRIpte"\n", PTWR_PRINT_WHICH, ptep, pte.l1); l1e_remove_flags(pte, _PAGE_RW); @@ -3072,7 +3002,7 @@ /* Ensure that there are no stale writable mappings in any TLB. */ /* NB. INVLPG is a serialising instruction: flushes pending updates. */ flush_tlb_one_mask(d->cpumask, l1va); - PTWR_PRINTK("[%c] disconnected_l1va at %p now %lx\n", + PTWR_PRINTK("[%c] disconnected_l1va at %p now %"PRIpte"\n", PTWR_PRINT_WHICH, ptep, pte.l1); /* diff -r 3fe7b0b7f6c5 -r dfd2ded7b712 xen/common/grant_table.c --- a/xen/common/grant_table.c Sun Aug 14 09:17:54 2005 +++ b/xen/common/grant_table.c Sun Aug 14 17:32:30 2005 @@ -266,7 +266,6 @@ spin_unlock(&granting_d->grant_table->lock); - if ( (addr != 0) && (dev_hst_ro_flags & GNTMAP_host_map) ) { /* Write update into the pagetable. */ @@ -278,14 +277,10 @@ if ( !(dev_hst_ro_flags & GNTMAP_readonly) ) l1e_add_flags(pte,_PAGE_RW); - if (!(dev_hst_ro_flags & GNTMAP_contains_pte)) - { - rc = update_grant_va_mapping( addr, pte, - mapping_d, mapping_ed ); - } else { - rc = update_grant_va_mapping_pte( addr, pte, - mapping_d, mapping_ed ); - } + if ( dev_hst_ro_flags & GNTMAP_contains_pte ) + rc = update_grant_pte_mapping(addr, pte, mapping_d, mapping_ed); + else + rc = update_grant_va_mapping(addr, pte, mapping_d, mapping_ed); /* IMPORTANT: rc indicates the degree of TLB flush that is required. * GNTST_flush_one (1) or GNTST_flush_all (2). This is done in the @@ -586,11 +581,13 @@ (flags & GNTMAP_host_map) && ((act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask)) > 0)) { - if (flags & GNTMAP_contains_pte) - { - if ( (rc = clear_grant_va_mapping_pte(addr, frame, ld)) < 0 ) + if ( flags & GNTMAP_contains_pte ) + { + if ( (rc = clear_grant_pte_mapping(addr, frame, ld)) < 0 ) goto unmap_out; - } else { + } + else + { if ( (rc = clear_grant_va_mapping(addr, frame)) < 0 ) goto unmap_out; } @@ -961,11 +958,14 @@ unsigned int cmd, void *uop, unsigned int count) { long rc; + struct domain *d = current->domain; if ( count > 512 ) return -EINVAL; - LOCK_BIGLOCK(current->domain); + LOCK_BIGLOCK(d); + + sync_pagetable_state(d); rc = -EFAULT; switch ( cmd ) @@ -1001,7 +1001,7 @@ } out: - UNLOCK_BIGLOCK(current->domain); + UNLOCK_BIGLOCK(d); return rc; } diff -r 3fe7b0b7f6c5 -r dfd2ded7b712 xen/include/asm-x86/mm.h --- a/xen/include/asm-x86/mm.h Sun Aug 14 09:17:54 2005 +++ b/xen/include/asm-x86/mm.h Sun Aug 14 17:32:30 2005 @@ -377,17 +377,14 @@ * Caller must own d's BIGLOCK, is responsible for flushing the TLB, and must * hold a reference to the page. */ -int update_grant_va_mapping(unsigned long va, - l1_pgentry_t _nl1e, - struct domain *d, - struct vcpu *v); -int update_grant_va_mapping_pte(unsigned long pte_addr, - l1_pgentry_t _nl1e, - struct domain *d, - struct vcpu *v); - +int update_grant_va_mapping( + unsigned long va, l1_pgentry_t _nl1e, + struct domain *d, struct vcpu *v); +int update_grant_pte_mapping( + unsigned long pte_addr, l1_pgentry_t _nl1e, + struct domain *d, struct vcpu *v); int clear_grant_va_mapping(unsigned long addr, unsigned long frame); -int clear_grant_va_mapping_pte(unsigned long addr, unsigned long frame, - struct domain *d); +int clear_grant_pte_mapping( + unsigned long addr, unsigned long frame, struct domain *d); #endif /* __ASM_X86_MM_H__ */ diff -r 3fe7b0b7f6c5 -r dfd2ded7b712 xen/include/public/grant_table.h --- a/xen/include/public/grant_table.h Sun Aug 14 09:17:54 2005 +++ b/xen/include/public/grant_table.h Sun Aug 14 17:32:30 2005 @@ -261,8 +261,6 @@ /* * Values for error status returns. All errors are -ve. */ -#define GNTST_flush_all (2) /* Success, need to flush entire TLB. */ -#define GNTST_flush_one (1) /* Success, need to flush a vaddr. */ #define GNTST_okay (0) /* Normal return. */ #define GNTST_general_error (-1) /* General undefined error. */ #define GNTST_bad_domain (-2) /* Unrecognsed domain id. */ diff -r 3fe7b0b7f6c5 -r dfd2ded7b712 xen/include/xen/grant_table.h --- a/xen/include/xen/grant_table.h Sun Aug 14 09:17:54 2005 +++ b/xen/include/xen/grant_table.h Sun Aug 14 17:32:30 2005 @@ -53,8 +53,8 @@ #define ORDER_GRANT_FRAMES 2 #define NR_GRANT_FRAMES (1U << ORDER_GRANT_FRAMES) -#define NR_GRANT_ENTRIES (NR_GRANT_FRAMES * PAGE_SIZE / sizeof(grant_entry_t)) - +#define NR_GRANT_ENTRIES \ + ((NR_GRANT_FRAMES << PAGE_SHIFT) / sizeof(grant_entry_t)) /* * Tracks a mapping of another domain's grant reference. Each domain has a @@ -65,8 +65,8 @@ domid_t domid; /* granting domain */ } grant_mapping_t; #define MAPTRACK_GNTMAP_MASK 0x1f -#define MAPTRACK_REF_SHIFT 5 -#define MAPTRACK_MAX_ENTRIES ( 1 << (16 - MAPTRACK_REF_SHIFT) ) +#define MAPTRACK_REF_SHIFT 5 +#define MAPTRACK_MAX_ENTRIES (1 << (16 - MAPTRACK_REF_SHIFT)) /* Per-domain grant information. */ typedef struct { @@ -109,10 +109,15 @@ /* Notify 'rd' of a completed transfer via an already-locked grant entry. */ void gnttab_notify_transfer( - struct domain *rd, struct domain *ld, grant_ref_t ref, unsigned long frame); + struct domain *rd, struct domain *ld, + grant_ref_t ref, unsigned long frame); -/* Pre-domain destruction release of granted device mappings of other domains.*/ +/* Domain death release of granted device mappings of other domains.*/ void gnttab_release_dev_mappings(grant_table_t *gt); +/* Extra GNTST_ values, for internal use only. */ +#define GNTST_flush_all (2) /* Success, need to flush entire TLB. */ +#define GNTST_flush_one (1) /* Success, need to flush a vaddr. */ + #endif /* __XEN_GRANT_H__ */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |