[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] This changest allows grant mappings to arbitrary PTEs.
# HG changeset patch # User akw27@xxxxxxxxxxxxxxxxxxxxxx # Node ID 100837b7abecfa1bc336dd57d775d60090f747ad # Parent 46cf225719801597532c44b24868d22706bfbebd # Parent 4995d5f167c9e0a80087c4e6ece8c3381da6f737 This changest allows grant mappings to arbitrary PTEs. The map operation now takes PTE addresses as well as virtual addresses. The contains_pte flag is used to indicate what sort of address is in the map request. Additionally, this patch: - fixes grant mappings to user space. - fixes overflow testing when you run out of maptrack entry space. Signed-off-by: Andrew Warfield <andrew.warfield@xxxxxxxxxxxx> Signed-off-by: Leoffrey Lefebvre <geoffrey.g.lefebvre@xxxxxxxxx> diff -r 46cf22571980 -r 100837b7abec xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Fri Aug 12 13:33:57 2005 +++ b/xen/arch/x86/mm.c Fri Aug 12 17:52:47 2005 @@ -2268,6 +2268,197 @@ return rc; } + +int update_grant_va_mapping_pte(unsigned long pte_addr, + l1_pgentry_t _nl1e, + struct domain *d, + struct vcpu *v) +{ + /* Caller must: + * . own d's BIGLOCK + * . already have 'get_page' correctly on the to-be-installed nl1e + * . be responsible for flushing the TLB + * . check PTE being installed isn't DISALLOWED + */ + + int rc = GNTST_okay; + void *va; + unsigned long gpfn, mfn; + struct pfn_info *page; + struct domain_mmap_cache mapcache, sh_mapcache; + u32 type_info; + l1_pgentry_t ol1e; + + /* Grant tables and shadow mode don't currently work together. */ + ASSERT( !shadow_mode_refcounts(d) ); + + /* There shouldn't be any strange bits set on the PTE. */ + ASSERT( (l1e_get_flags(_nl1e) & L1_DISALLOW_MASK) == 0); + + cleanup_writable_pagetable(d); + + domain_mmap_cache_init(&mapcache); + domain_mmap_cache_init(&sh_mapcache); + + gpfn = pte_addr >> PAGE_SHIFT; + mfn = __gpfn_to_mfn(d, gpfn); + + if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) ) + { + MEM_LOG("Could not get page for normal update"); + rc = -EINVAL; + goto failed_norefs; + } + + va = map_domain_page_with_cache(mfn, &mapcache); + va = (void *)((unsigned long)va + + (unsigned long)(pte_addr & ~PAGE_MASK)); + page = &frame_table[mfn]; + + type_info = page->u.inuse.type_info; + if ( (type_info & PGT_type_mask) != PGT_l1_page_table) { + DPRINTK("Grant map attempted to update a non-L1 page\n"); + rc = -EINVAL; + goto failed; + } + + if ( likely(get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask))) ) + { + + if ( unlikely(__copy_from_user(&ol1e, (l1_pgentry_t *)va, + sizeof(ol1e)) != 0) ) { + put_page_type(page); + rc = -EINVAL; + goto failed; + } + + if ( update_l1e(va, ol1e, _nl1e) ) + { + put_page_from_l1e(ol1e, d); + + if ( l1e_get_flags(ol1e) & _PAGE_PRESENT ) + rc = GNTST_flush_all; /* We don't know what vaddr to flush */ + else + rc = GNTST_okay; /* Caller need not invalidate TLB entry */ + + if ( unlikely(shadow_mode_enabled(d)) ) + shadow_l1_normal_pt_update(d, pte_addr, _nl1e, &sh_mapcache); + } + else + rc = -EINVAL; + + put_page_type(page); + } + + failed: + unmap_domain_page_with_cache(va, &mapcache); + put_page(page); + + failed_norefs: + domain_mmap_cache_destroy(&mapcache); + domain_mmap_cache_destroy(&sh_mapcache); + + return rc; +} + + + +int clear_grant_va_mapping_pte(unsigned long addr, unsigned long frame, + struct domain *d) +{ + /* Caller must: + * . own d's BIGLOCK + * . already have 'get_page' correctly on the to-be-installed nl1e + * . be responsible for flushing the TLB + * . check PTE being installed isn't DISALLOWED + */ + + int rc = GNTST_okay; + void *va; + unsigned long gpfn, mfn; + struct pfn_info *page; + struct domain_mmap_cache mapcache, sh_mapcache; + u32 type_info; + l1_pgentry_t ol1e; + + /* Grant tables and shadow mode don't work together. */ + ASSERT( !shadow_mode_refcounts(d) ); + + cleanup_writable_pagetable(d); + + domain_mmap_cache_init(&mapcache); + domain_mmap_cache_init(&sh_mapcache); + + gpfn = addr >> PAGE_SHIFT; + mfn = __gpfn_to_mfn(d, gpfn); + + if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) ) + { + MEM_LOG("Could not get page for normal update"); + rc = -EINVAL; + goto failed_norefs; + } + + va = map_domain_page_with_cache(mfn, &mapcache); + va = (void *)((unsigned long)va + + (unsigned long)(addr & ~PAGE_MASK)); + page = &frame_table[mfn]; + + type_info = page->u.inuse.type_info; + if ( (type_info & PGT_type_mask) != PGT_l1_page_table) { + DPRINTK("Grant map attempted to update a non-L1 page\n"); + rc = -EINVAL; + goto failed; + } + + if ( likely(get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask))) ) + { + if ( unlikely(__copy_from_user(&ol1e, (l1_pgentry_t *)va, + sizeof(ol1e)) != 0) ) + { + rc = -EINVAL; + put_page_type(page); + goto failed; + } + + /* + * Check that the virtual address supplied is actually mapped to frame. + */ + if ( unlikely((l1e_get_intpte(ol1e) >> PAGE_SHIFT) != frame )) + { + DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n", + (unsigned long)l1e_get_intpte(ol1e), addr, frame); + rc = -EINVAL; + put_page_type(page); + goto failed; + } + + /* Delete pagetable entry. */ + if ( unlikely(__put_user(0, (unsigned long *)va))) + { + DPRINTK("Cannot delete PTE entry at %p.\n", va); + rc = -EINVAL; + } else { + if ( unlikely(shadow_mode_enabled(d)) ) + shadow_l1_normal_pt_update(d, addr, l1e_empty(), + &sh_mapcache); + } + put_page_type(page); + } + + failed: + unmap_domain_page_with_cache(va, &mapcache); + put_page(page); + + failed_norefs: + domain_mmap_cache_destroy(&mapcache); + domain_mmap_cache_destroy(&sh_mapcache); + + return rc; +} + + + /* This function assumes the caller is holding the domain's BIGLOCK * and is running in a shadow mode */ @@ -2283,7 +2474,7 @@ * . check PTE being installed isn't DISALLOWED */ - int rc = 0; + int rc = GNTST_okay; l1_pgentry_t *pl1e; l1_pgentry_t ol1e; @@ -2305,9 +2496,9 @@ { put_page_from_l1e(ol1e, d); if ( l1e_get_flags(ol1e) & _PAGE_PRESENT ) - rc = 0; /* Caller needs to invalidate TLB entry */ + rc = GNTST_flush_one; else - rc = 1; /* Caller need not invalidate TLB entry */ + rc = GNTST_okay; /* Caller need not invalidate TLB entry */ } else rc = -EINVAL; @@ -2322,6 +2513,40 @@ shadow_do_update_va_mapping(va, _nl1e, v); return rc; +} + +int clear_grant_va_mapping(unsigned long addr, unsigned long frame) +{ + l1_pgentry_t *pl1e; + unsigned long _ol1e; + + pl1e = &linear_pg_table[l1_linear_offset(addr)]; + + if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) ) + { + DPRINTK("Could not find PTE entry for address %lx\n", addr); + return -EINVAL; + } + + /* + * Check that the virtual address supplied is actually mapped to + * frame. + */ + if ( unlikely((_ol1e >> PAGE_SHIFT) != frame )) + { + DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n", + _ol1e, addr, frame); + return -EINVAL; + } + + /* Delete pagetable entry. */ + if ( unlikely(__put_user(0, (unsigned long *)pl1e))) + { + DPRINTK("Cannot delete PTE entry at %p.\n", (unsigned long *)pl1e); + return -EINVAL; + } + + return 0; } diff -r 46cf22571980 -r 100837b7abec xen/common/grant_table.c --- a/xen/common/grant_table.c Fri Aug 12 13:33:57 2005 +++ b/xen/common/grant_table.c Fri Aug 12 17:52:47 2005 @@ -6,6 +6,8 @@ * * Copyright (c) 2005 Christopher Clark * Copyright (c) 2004 K A Fraser + * Copyright (c) 2005 Andrew Warfield + * Modifications by Geoffrey Lefebvre are (c) Intel Research Cambridge * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -50,7 +52,7 @@ grant_table_t *t) { unsigned int h; - if ( unlikely((h = t->maptrack_head) == t->maptrack_limit) ) + if ( unlikely((h = t->maptrack_head) == (t->maptrack_limit - 1)) ) return -1; t->maptrack_head = t->maptrack[h].ref_and_flags >> MAPTRACK_REF_SHIFT; t->map_count++; @@ -73,7 +75,7 @@ struct domain *granting_d, grant_ref_t ref, u16 dev_hst_ro_flags, - unsigned long host_virt_addr, + unsigned long addr, unsigned long *pframe ) /* OUT */ { domid_t sdom; @@ -121,6 +123,10 @@ sflags = sha->flags; sdom = sha->domid; + /* This loop attempts to set the access (reading/writing) flags + * in the grant table entry. It tries a cmpxchg on the field + * up to five times, and then fails under the assumption that + * the guest is misbehaving. */ for ( ; ; ) { u32 scombo, prev_scombo, new_scombo; @@ -253,28 +259,37 @@ /* * At this point: - * act->pin updated to reflect mapping. + * act->pin updated to reference count mappings. * sha->flags updated to indicate to granting domain mapping done. * frame contains the mfn. */ spin_unlock(&granting_d->grant_table->lock); - if ( (host_virt_addr != 0) && (dev_hst_ro_flags & GNTMAP_host_map) ) + + if ( (addr != 0) && (dev_hst_ro_flags & GNTMAP_host_map) ) { /* Write update into the pagetable. */ l1_pgentry_t pte; pte = l1e_from_pfn(frame, GRANT_PTE_FLAGS); + + if ( (dev_hst_ro_flags & GNTMAP_application_map) ) + l1e_add_flags(pte,_PAGE_USER); if ( !(dev_hst_ro_flags & GNTMAP_readonly) ) l1e_add_flags(pte,_PAGE_RW); - rc = update_grant_va_mapping( host_virt_addr, pte, - mapping_d, mapping_ed ); - - /* - * IMPORTANT: (rc == 0) => must flush / invalidate entry in TLB. - * This is done in the outer gnttab_map_grant_ref. - */ - + + if (!(dev_hst_ro_flags & GNTMAP_contains_pte)) + { + rc = update_grant_va_mapping( addr, pte, + mapping_d, mapping_ed ); + } else { + rc = update_grant_va_mapping_pte( addr, pte, + mapping_d, mapping_ed ); + } + + /* IMPORTANT: rc indicates the degree of TLB flush that is required. + * GNTST_flush_one (1) or GNTST_flush_all (2). This is done in the + * outer gnttab_map_grant_ref. */ if ( rc < 0 ) { /* Failure: undo and abort. */ @@ -317,6 +332,9 @@ /* * Returns 0 if TLB flush / invalidate required by caller. * va will indicate the address to be invalidated. + * + * addr is _either_ a host virtual address, or the address of the pte to + * update, as indicated by the GNTMAP_contains_pte flag. */ static int __gnttab_map_grant_ref( @@ -326,10 +344,10 @@ domid_t dom; grant_ref_t ref; struct domain *ld, *rd; - struct vcpu *led; + struct vcpu *led; u16 dev_hst_ro_flags; int handle; - unsigned long frame = 0, host_virt_addr; + unsigned long frame = 0, addr; int rc; led = current; @@ -338,19 +356,20 @@ /* Bitwise-OR avoids short-circuiting which screws control flow. */ if ( unlikely(__get_user(dom, &uop->dom) | __get_user(ref, &uop->ref) | - __get_user(host_virt_addr, &uop->host_virt_addr) | + __get_user(addr, &uop->host_virt_addr) | __get_user(dev_hst_ro_flags, &uop->flags)) ) { DPRINTK("Fault while reading gnttab_map_grant_ref_t.\n"); return -EFAULT; /* don't set status */ } - - if ( ((host_virt_addr != 0) || (dev_hst_ro_flags & GNTMAP_host_map)) && - unlikely(!__addr_ok(host_virt_addr))) + if ( (dev_hst_ro_flags & GNTMAP_host_map) && + ( (addr == 0) || + (!(dev_hst_ro_flags & GNTMAP_contains_pte) && + unlikely(!__addr_ok(addr))) ) ) { DPRINTK("Bad virtual address (%lx) or flags (%x).\n", - host_virt_addr, dev_hst_ro_flags); + addr, dev_hst_ro_flags); (void)__put_user(GNTST_bad_virt_addr, &uop->handle); return GNTST_bad_gntref; } @@ -386,12 +405,20 @@ grant_mapping_t *new_mt; grant_table_t *lgt = ld->grant_table; + if ( (lgt->maptrack_limit << 1) > MAPTRACK_MAX_ENTRIES ) + { + put_domain(rd); + DPRINTK("Maptrack table is at maximum size.\n"); + (void)__put_user(GNTST_no_device_space, &uop->handle); + return GNTST_no_device_space; + } + /* Grow the maptrack table. */ new_mt = alloc_xenheap_pages(lgt->maptrack_order + 1); if ( new_mt == NULL ) { put_domain(rd); - DPRINTK("No more map handles available\n"); + DPRINTK("No more map handles available.\n"); (void)__put_user(GNTST_no_device_space, &uop->handle); return GNTST_no_device_space; } @@ -405,7 +432,7 @@ lgt->maptrack_order += 1; lgt->maptrack_limit <<= 1; - printk("Doubled maptrack size\n"); + DPRINTK("Doubled maptrack size\n"); handle = get_maptrack_handle(ld->grant_table); } @@ -416,7 +443,7 @@ if ( 0 <= ( rc = __gnttab_activate_grant_ref( ld, led, rd, ref, dev_hst_ro_flags, - host_virt_addr, &frame))) + addr, &frame))) { /* * Only make the maptrack live _after_ writing the pte, in case we @@ -430,8 +457,9 @@ (void)__put_user(frame, &uop->dev_bus_addr); - if ( dev_hst_ro_flags & GNTMAP_host_map ) - *va = host_virt_addr; + if ( ( dev_hst_ro_flags & GNTMAP_host_map ) && + !( dev_hst_ro_flags & GNTMAP_contains_pte) ) + *va = addr; (void)__put_user(handle, &uop->handle); } @@ -449,12 +477,12 @@ gnttab_map_grant_ref( gnttab_map_grant_ref_t *uop, unsigned int count) { - int i, flush = 0; + int i, rc, flush = 0; unsigned long va = 0; for ( i = 0; i < count; i++ ) - if ( __gnttab_map_grant_ref(&uop[i], &va) == 0 ) - flush++; + if ( (rc =__gnttab_map_grant_ref(&uop[i], &va)) >= 0 ) + flush += rc; if ( flush == 1 ) flush_tlb_one_mask(current->domain->cpumask, va); @@ -479,12 +507,12 @@ grant_mapping_t *map; u16 flags; s16 rc = 1; - unsigned long frame, virt; + unsigned long frame, addr; ld = current->domain; /* Bitwise-OR avoids short-circuiting which screws control flow. */ - if ( unlikely(__get_user(virt, &uop->host_virt_addr) | + if ( unlikely(__get_user(addr, &uop->host_virt_addr) | __get_user(frame, &uop->dev_bus_addr) | __get_user(handle, &uop->handle)) ) { @@ -554,41 +582,17 @@ /* Frame is now unmapped for device access. */ } - if ( (virt != 0) && + if ( (addr != 0) && (flags & GNTMAP_host_map) && ((act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask)) > 0)) { - l1_pgentry_t *pl1e; - unsigned long _ol1e; - - pl1e = &linear_pg_table[l1_linear_offset(virt)]; - - if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) ) - { - DPRINTK("Could not find PTE entry for address %lx\n", virt); - rc = -EINVAL; - goto unmap_out; - } - - /* - * Check that the virtual address supplied is actually mapped to - * act->frame. - */ - if ( unlikely((_ol1e >> PAGE_SHIFT) != frame )) - { - DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n", - _ol1e, virt, frame); - rc = -EINVAL; - goto unmap_out; - } - - /* Delete pagetable entry. */ - if ( unlikely(__put_user(0, (unsigned long *)pl1e))) - { - DPRINTK("Cannot delete PTE entry at %p for virtual address %lx\n", - pl1e, virt); - rc = -EINVAL; - goto unmap_out; + if (flags & GNTMAP_contains_pte) + { + if ( (rc = clear_grant_va_mapping_pte(addr, frame, ld)) < 0 ) + goto unmap_out; + } else { + if ( (rc = clear_grant_va_mapping(addr, frame)) < 0 ) + goto unmap_out; } map->ref_and_flags &= ~GNTMAP_host_map; @@ -606,7 +610,8 @@ } rc = 0; - *va = virt; + if ( !( flags & GNTMAP_contains_pte) ) + *va = addr; } if ( (map->ref_and_flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) @@ -630,6 +635,7 @@ if ( act->pin == 0 ) { + act->frame = 0xdeadbeef; clear_bit(_GTF_reading, &sha->flags); put_page(&frame_table[frame]); } @@ -769,7 +775,8 @@ { DPRINTK("Grant: dom (%hu) SHARED (%d) flags:(%hx) " "dom:(%hu) frame:(%lx)\n", - op.dom, i, sha_copy.flags, sha_copy.domid, sha_copy.frame); + op.dom, i, sha_copy.flags, sha_copy.domid, + (unsigned long) sha_copy.frame); } } @@ -823,7 +830,7 @@ gnttab_donate_t *gop = &uop[i]; #if GRANT_DEBUG printk("gnttab_donate: i=%d mfn=%08x domid=%d gref=%08x\n", - i, gop->mfn, gop->domid, gop->handle); + i, (unsigned int)gop->mfn, gop->domid, gop->handle); #endif page = &frame_table[gop->mfn]; @@ -1027,7 +1034,7 @@ if ( ld->domain_id != 0 ) { DPRINTK("Foreign unref rd(%d) ld(%d) frm(%x) flgs(%x).\n", - rd->domain_id, ld->domain_id, frame, readonly); + rd->domain_id, ld->domain_id, (unsigned int)frame, readonly); } #endif diff -r 46cf22571980 -r 100837b7abec xen/include/asm-x86/mm.h --- a/xen/include/asm-x86/mm.h Fri Aug 12 13:33:57 2005 +++ b/xen/include/asm-x86/mm.h Fri Aug 12 17:52:47 2005 @@ -365,4 +365,13 @@ l1_pgentry_t _nl1e, struct domain *d, struct vcpu *v); +int update_grant_va_mapping_pte(unsigned long pte_addr, + l1_pgentry_t _nl1e, + struct domain *d, + struct vcpu *v); + +int clear_grant_va_mapping(unsigned long addr, unsigned long frame); +int clear_grant_va_mapping_pte(unsigned long addr, unsigned long frame, + struct domain *d); + #endif /* __ASM_X86_MM_H__ */ diff -r 46cf22571980 -r 100837b7abec xen/include/public/grant_table.h --- a/xen/include/public/grant_table.h Fri Aug 12 13:33:57 2005 +++ b/xen/include/public/grant_table.h Fri Aug 12 17:52:47 2005 @@ -150,7 +150,10 @@ #define GNTTABOP_map_grant_ref 0 typedef struct gnttab_map_grant_ref { /* IN parameters. */ - memory_t host_virt_addr; + union { + memory_t pte_addr; + memory_t host_virt_addr; + }; domid_t dom; grant_ref_t ref; u16 flags; /* GNTMAP_* */ @@ -173,7 +176,10 @@ #define GNTTABOP_unmap_grant_ref 1 typedef struct gnttab_unmap_grant_ref { /* IN parameters. */ - memory_t host_virt_addr; + union { + memory_t pte_addr; + memory_t host_virt_addr; + }; memory_t dev_bus_addr; u16 handle; /* OUT parameters. */ @@ -247,10 +253,20 @@ #define _GNTMAP_application_map (3) #define GNTMAP_application_map (1<<_GNTMAP_application_map) + /* + * GNTMAP_contains_pte subflag: + * 0 => This map request contains a host virtual address. + * 1 => This map request contains the machine addess of the PTE to update. + */ +#define _GNTMAP_contains_pte (4) +#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) + /* * Values for error status returns. All errors are -ve. */ -#define GNTST_okay (0) +#define GNTST_flush_all (2) /* Success, need to flush entire TLB. */ +#define GNTST_flush_one (1) /* Success, need to flush a vaddr. */ +#define GNTST_okay (0) /* Normal return. */ #define GNTST_general_error (-1) /* General undefined error. */ #define GNTST_bad_domain (-2) /* Unrecognsed domain id. */ #define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */ diff -r 46cf22571980 -r 100837b7abec xen/include/xen/grant_table.h --- a/xen/include/xen/grant_table.h Fri Aug 12 13:33:57 2005 +++ b/xen/include/xen/grant_table.h Fri Aug 12 17:52:47 2005 @@ -61,11 +61,12 @@ * table of these, indexes into which are returned as a 'mapping handle'. */ typedef struct { - u16 ref_and_flags; /* 0-2: GNTMAP_* ; 3-15: grant ref */ + u16 ref_and_flags; /* 0-4: GNTMAP_* ; 5-15: grant ref */ domid_t domid; /* granting domain */ } grant_mapping_t; -#define MAPTRACK_GNTMAP_MASK 7 -#define MAPTRACK_REF_SHIFT 3 +#define MAPTRACK_GNTMAP_MASK 0x1f +#define MAPTRACK_REF_SHIFT 5 +#define MAPTRACK_MAX_ENTRIES ( 1 << (16 - MAPTRACK_REF_SHIFT) ) /* Per-domain grant information. */ typedef struct { _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |