[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86: introduce create_perdomain_mapping()
commit 703ac3abcfc5f649c038070867ee12c67f730548 Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Thu Feb 28 11:08:13 2013 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Thu Feb 28 11:08:13 2013 +0100 x86: introduce create_perdomain_mapping() ... as well as free_perdomain_mappings(), and use them to carry out the existing per-domain mapping setup/teardown. This at once makes the setup of the first sub-range PV domain specific (with idle domains also excluded), as the GDT/LDT mapping area is needed only for those. Also fix an improperly scaled BUILD_BUG_ON() expression in mapcache_domain_init(). Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Acked-by: Keir Fraser <keir@xxxxxxx> --- xen/arch/x86/domain.c | 92 +++++++-------------- xen/arch/x86/domain_page.c | 129 ++++++----------------------- xen/arch/x86/mm.c | 188 ++++++++++++++++++++++++++++++++++++++++-- xen/include/asm-x86/config.h | 10 +-- xen/include/asm-x86/domain.h | 16 +--- xen/include/asm-x86/mm.h | 8 ++ 6 files changed, 251 insertions(+), 192 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index c1f96ff..3cdee86 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -372,37 +372,16 @@ int switch_compat(struct domain *d) int vcpu_initialise(struct vcpu *v) { struct domain *d = v->domain; - unsigned int idx; int rc; v->arch.flags = TF_kernel_mode; - idx = perdomain_pt_idx(v); - if ( !d->arch.perdomain_pts[idx] ) - { - void *pt; - l2_pgentry_t *l2tab; - - pt = alloc_xenheap_pages(0, MEMF_node(vcpu_to_node(v))); - if ( !pt ) - return -ENOMEM; - clear_page(pt); - d->arch.perdomain_pts[idx] = pt; - - l2tab = __map_domain_page(d->arch.perdomain_l2_pg[0]); - l2tab[l2_table_offset(PERDOMAIN_VIRT_START) + idx] - = l2e_from_paddr(__pa(pt), __PAGE_HYPERVISOR); - unmap_domain_page(l2tab); - } - rc = mapcache_vcpu_init(v); if ( rc ) return rc; paging_vcpu_init(v); - v->arch.perdomain_ptes = perdomain_ptes(d, v); - if ( (rc = vcpu_init_fpu(v)) != 0 ) return rc; @@ -420,6 +399,12 @@ int vcpu_initialise(struct vcpu *v) if ( !is_idle_domain(d) ) { + rc = create_perdomain_mapping(d, GDT_VIRT_START(v), + 1 << GDT_LDT_VCPU_SHIFT, + d->arch.pv_domain.gdt_ldt_l1tab, NULL); + if ( rc ) + goto done; + BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv_vcpu.trap_ctxt) > PAGE_SIZE); v->arch.pv_vcpu.trap_ctxt = xzalloc_array(struct trap_info, @@ -478,8 +463,6 @@ void vcpu_destroy(struct vcpu *v) int arch_domain_create(struct domain *d, unsigned int domcr_flags) { - struct page_info *pg; - l3_pgentry_t *l3tab; int i, paging_initialised = 0; int rc = -ENOMEM; @@ -510,29 +493,24 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags) d->domain_id); } - BUILD_BUG_ON(PDPT_L2_ENTRIES * sizeof(*d->arch.perdomain_pts) - != PAGE_SIZE); - d->arch.perdomain_pts = - alloc_xenheap_pages(0, MEMF_node(domain_to_node(d))); - if ( !d->arch.perdomain_pts ) - goto fail; - clear_page(d->arch.perdomain_pts); - - pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d))); - if ( pg == NULL ) - goto fail; - d->arch.perdomain_l2_pg[0] = pg; - clear_domain_page(page_to_mfn(pg)); + if ( is_hvm_domain(d) ) + rc = create_perdomain_mapping(d, PERDOMAIN_VIRT_START, 0, NULL, NULL); + else if ( is_idle_domain(d) ) + rc = 0; + else + { + d->arch.pv_domain.gdt_ldt_l1tab = + alloc_xenheap_pages(0, MEMF_node(domain_to_node(d))); + if ( !d->arch.pv_domain.gdt_ldt_l1tab ) + goto fail; + clear_page(d->arch.pv_domain.gdt_ldt_l1tab); - pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d))); - if ( pg == NULL ) + rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START, + GDT_LDT_MBYTES << (20 - PAGE_SHIFT), + NULL, NULL); + } + if ( rc ) goto fail; - d->arch.perdomain_l3_pg = pg; - l3tab = __map_domain_page(pg); - clear_page(l3tab); - l3tab[l3_table_offset(PERDOMAIN_VIRT_START)] = - l3e_from_page(d->arch.perdomain_l2_pg[0], __PAGE_HYPERVISOR); - unmap_domain_page(l3tab); mapcache_domain_init(d); @@ -608,19 +586,14 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags) if ( paging_initialised ) paging_final_teardown(d); mapcache_domain_exit(d); - for ( i = 0; i < PERDOMAIN_SLOTS; ++i) - if ( d->arch.perdomain_l2_pg[i] ) - free_domheap_page(d->arch.perdomain_l2_pg[i]); - if ( d->arch.perdomain_l3_pg ) - free_domheap_page(d->arch.perdomain_l3_pg); - free_xenheap_page(d->arch.perdomain_pts); + free_perdomain_mappings(d); + if ( !is_hvm_domain(d) ) + free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab); return rc; } void arch_domain_destroy(struct domain *d) { - unsigned int i; - if ( is_hvm_domain(d) ) hvm_domain_destroy(d); else @@ -634,13 +607,9 @@ void arch_domain_destroy(struct domain *d) mapcache_domain_exit(d); - for ( i = 0; i < PDPT_L2_ENTRIES; ++i ) - free_xenheap_page(d->arch.perdomain_pts[i]); - free_xenheap_page(d->arch.perdomain_pts); - for ( i = 0; i < PERDOMAIN_SLOTS; ++i) - if ( d->arch.perdomain_l2_pg[i] ) - free_domheap_page(d->arch.perdomain_l2_pg[i]); - free_domheap_page(d->arch.perdomain_l3_pg); + free_perdomain_mappings(d); + if ( !is_hvm_domain(d) ) + free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab); free_xenheap_page(d->shared_info); cleanup_domain_irq_mapping(d); @@ -1515,10 +1484,11 @@ static void __context_switch(void) if ( need_full_gdt(n) ) { unsigned long mfn = virt_to_mfn(gdt); + l1_pgentry_t *pl1e = gdt_ldt_ptes(n->domain, n); unsigned int i; + for ( i = 0; i < NR_RESERVED_GDT_PAGES; i++ ) - l1e_write(n->arch.perdomain_ptes + - FIRST_RESERVED_GDT_PAGE + i, + l1e_write(pl1e + FIRST_RESERVED_GDT_PAGE + i, l1e_from_pfn(mfn + i, __PAGE_HYPERVISOR)); } diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c index acc9486..989ec29 100644 --- a/xen/arch/x86/domain_page.c +++ b/xen/arch/x86/domain_page.c @@ -243,10 +243,7 @@ void copy_domain_page(unsigned long dmfn, unsigned long smfn) int mapcache_domain_init(struct domain *d) { struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache; - l3_pgentry_t *l3tab; - l2_pgentry_t *l2tab; - unsigned int i, bitmap_pages, memf = MEMF_node(domain_to_node(d)); - unsigned long *end; + unsigned int bitmap_pages; if ( is_hvm_domain(d) || is_idle_domain(d) ) return 0; @@ -256,48 +253,23 @@ int mapcache_domain_init(struct domain *d) return 0; #endif - dcache->l1tab = xzalloc_array(l1_pgentry_t *, MAPCACHE_L2_ENTRIES + 1); - d->arch.perdomain_l2_pg[MAPCACHE_SLOT] = alloc_domheap_page(NULL, memf); - if ( !dcache->l1tab || !d->arch.perdomain_l2_pg[MAPCACHE_SLOT] ) + dcache->l1tab = xzalloc_array(l1_pgentry_t *, MAPCACHE_L2_ENTRIES); + if ( !dcache->l1tab ) return -ENOMEM; - clear_domain_page(page_to_mfn(d->arch.perdomain_l2_pg[MAPCACHE_SLOT])); - l3tab = __map_domain_page(d->arch.perdomain_l3_pg); - l3tab[l3_table_offset(MAPCACHE_VIRT_START)] = - l3e_from_page(d->arch.perdomain_l2_pg[MAPCACHE_SLOT], - __PAGE_HYPERVISOR); - unmap_domain_page(l3tab); - - l2tab = __map_domain_page(d->arch.perdomain_l2_pg[MAPCACHE_SLOT]); - - BUILD_BUG_ON(MAPCACHE_VIRT_END + 3 + - 2 * PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long)) > + BUILD_BUG_ON(MAPCACHE_VIRT_END + PAGE_SIZE * (3 + + 2 * PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long))) > MAPCACHE_VIRT_START + (PERDOMAIN_SLOT_MBYTES << 20)); bitmap_pages = PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long)); dcache->inuse = (void *)MAPCACHE_VIRT_END + PAGE_SIZE; dcache->garbage = dcache->inuse + (bitmap_pages + 1) * PAGE_SIZE / sizeof(long); - end = dcache->garbage + bitmap_pages * PAGE_SIZE / sizeof(long); - - for ( i = l2_table_offset((unsigned long)dcache->inuse); - i <= l2_table_offset((unsigned long)(end - 1)); ++i ) - { - ASSERT(i <= MAPCACHE_L2_ENTRIES); - dcache->l1tab[i] = alloc_xenheap_pages(0, memf); - if ( !dcache->l1tab[i] ) - { - unmap_domain_page(l2tab); - return -ENOMEM; - } - clear_page(dcache->l1tab[i]); - l2tab[i] = l2e_from_paddr(__pa(dcache->l1tab[i]), __PAGE_HYPERVISOR); - } - - unmap_domain_page(l2tab); spin_lock_init(&dcache->lock); - return 0; + return create_perdomain_mapping(d, (unsigned long)dcache->inuse, + 2 * bitmap_pages + 1, + NIL(l1_pgentry_t *), NULL); } void mapcache_domain_exit(struct domain *d) @@ -307,94 +279,41 @@ void mapcache_domain_exit(struct domain *d) if ( is_hvm_domain(d) ) return; - if ( dcache->l1tab ) - { - unsigned long i; - - for ( i = (unsigned long)dcache->inuse; ; i += PAGE_SIZE ) - { - l1_pgentry_t *pl1e; - - if ( l2_table_offset(i) > MAPCACHE_L2_ENTRIES || - !dcache->l1tab[l2_table_offset(i)] ) - break; - - pl1e = &dcache->l1tab[l2_table_offset(i)][l1_table_offset(i)]; - if ( l1e_get_flags(*pl1e) ) - free_domheap_page(l1e_get_page(*pl1e)); - } - - for ( i = 0; i < MAPCACHE_L2_ENTRIES + 1; ++i ) - free_xenheap_page(dcache->l1tab[i]); - - xfree(dcache->l1tab); - } + xfree(dcache->l1tab); } int mapcache_vcpu_init(struct vcpu *v) { struct domain *d = v->domain; struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache; - l2_pgentry_t *l2tab; unsigned long i; - unsigned int memf = MEMF_node(vcpu_to_node(v)); + unsigned int ents = d->max_vcpus * MAPCACHE_VCPU_ENTRIES; + unsigned int nr = PFN_UP(BITS_TO_LONGS(ents) * sizeof(long)); if ( is_hvm_vcpu(v) || !dcache->l1tab ) return 0; - l2tab = __map_domain_page(d->arch.perdomain_l2_pg[MAPCACHE_SLOT]); - - while ( dcache->entries < d->max_vcpus * MAPCACHE_VCPU_ENTRIES ) + if ( ents > dcache->entries ) { - unsigned int ents = dcache->entries + MAPCACHE_VCPU_ENTRIES; - l1_pgentry_t *pl1e; - /* Populate page tables. */ - if ( !dcache->l1tab[i = mapcache_l2_entry(ents - 1)] ) - { - dcache->l1tab[i] = alloc_xenheap_pages(0, memf); - if ( !dcache->l1tab[i] ) - { - unmap_domain_page(l2tab); - return -ENOMEM; - } - clear_page(dcache->l1tab[i]); - l2tab[i] = l2e_from_paddr(__pa(dcache->l1tab[i]), - __PAGE_HYPERVISOR); - } + int rc = create_perdomain_mapping(d, MAPCACHE_VIRT_START, + d->max_vcpus * MAPCACHE_VCPU_ENTRIES, + dcache->l1tab, NULL); /* Populate bit maps. */ - i = (unsigned long)(dcache->inuse + BITS_TO_LONGS(ents)); - pl1e = &dcache->l1tab[l2_table_offset(i)][l1_table_offset(i)]; - if ( !l1e_get_flags(*pl1e) ) - { - struct page_info *pg = alloc_domheap_page(NULL, memf); - - if ( pg ) - { - clear_domain_page(page_to_mfn(pg)); - *pl1e = l1e_from_page(pg, __PAGE_HYPERVISOR); - pg = alloc_domheap_page(NULL, memf); - } - if ( !pg ) - { - unmap_domain_page(l2tab); - return -ENOMEM; - } - - i = (unsigned long)(dcache->garbage + BITS_TO_LONGS(ents)); - pl1e = &dcache->l1tab[l2_table_offset(i)][l1_table_offset(i)]; - ASSERT(!l1e_get_flags(*pl1e)); - - clear_domain_page(page_to_mfn(pg)); - *pl1e = l1e_from_page(pg, __PAGE_HYPERVISOR); - } + if ( !rc ) + rc = create_perdomain_mapping(d, (unsigned long)dcache->inuse, + nr, NULL, NIL(struct page_info *)); + if ( !rc ) + rc = create_perdomain_mapping(d, (unsigned long)dcache->garbage, + nr, NULL, NIL(struct page_info *)); + + if ( rc ) + return rc; dcache->entries = ents; } - unmap_domain_page(l2tab); - /* Mark all maphash entries as not in use. */ BUILD_BUG_ON(MAPHASHENT_NOTINUSE < MAPCACHE_ENTRIES); for ( i = 0; i < MAPHASH_ENTRIES; i++ ) diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index add93ac..335a85c 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -511,6 +511,7 @@ void update_cr3(struct vcpu *v) static void invalidate_shadow_ldt(struct vcpu *v, int flush) { + l1_pgentry_t *pl1e; int i; unsigned long pfn; struct page_info *page; @@ -523,12 +524,13 @@ static void invalidate_shadow_ldt(struct vcpu *v, int flush) goto out; v->arch.pv_vcpu.shadow_ldt_mapcnt = 0; + pl1e = gdt_ldt_ptes(v->domain, v); for ( i = 16; i < 32; i++ ) { - pfn = l1e_get_pfn(v->arch.perdomain_ptes[i]); + pfn = l1e_get_pfn(pl1e[i]); if ( pfn == 0 ) continue; - l1e_write(&v->arch.perdomain_ptes[i], l1e_empty()); + l1e_write(&pl1e[i], l1e_empty()); page = mfn_to_page(pfn); ASSERT_PAGE_IS_TYPE(page, PGT_seg_desc_page); ASSERT_PAGE_IS_DOMAIN(page, v->domain); @@ -596,7 +598,7 @@ int map_ldt_shadow_page(unsigned int off) nl1e = l1e_from_pfn(page_to_mfn(page), l1e_get_flags(l1e) | _PAGE_RW); spin_lock(&v->arch.pv_vcpu.shadow_ldt_lock); - l1e_write(&v->arch.perdomain_ptes[off + 16], nl1e); + l1e_write(&gdt_ldt_ptes(d, v)[off + 16], nl1e); v->arch.pv_vcpu.shadow_ldt_mapcnt++; spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock); @@ -4073,15 +4075,17 @@ long do_update_va_mapping_otherdomain(unsigned long va, u64 val64, void destroy_gdt(struct vcpu *v) { + l1_pgentry_t *pl1e; int i; unsigned long pfn; v->arch.pv_vcpu.gdt_ents = 0; + pl1e = gdt_ldt_ptes(v->domain, v); for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ ) { - if ( (pfn = l1e_get_pfn(v->arch.perdomain_ptes[i])) != 0 ) + if ( (pfn = l1e_get_pfn(pl1e[i])) != 0 ) put_page_and_type(mfn_to_page(pfn)); - l1e_write(&v->arch.perdomain_ptes[i], l1e_empty()); + l1e_write(&pl1e[i], l1e_empty()); v->arch.pv_vcpu.gdt_frames[i] = 0; } } @@ -4092,6 +4096,7 @@ long set_gdt(struct vcpu *v, unsigned int entries) { struct domain *d = v->domain; + l1_pgentry_t *pl1e; /* NB. There are 512 8-byte entries per GDT page. */ int i, nr_pages = (entries + 511) / 512; unsigned long mfn, *pfns; @@ -4124,11 +4129,11 @@ long set_gdt(struct vcpu *v, /* Install the new GDT. */ v->arch.pv_vcpu.gdt_ents = entries; + pl1e = gdt_ldt_ptes(d, v); for ( i = 0; i < nr_pages; i++ ) { v->arch.pv_vcpu.gdt_frames[i] = frames[i]; - l1e_write(&v->arch.perdomain_ptes[i], - l1e_from_pfn(frames[i], __PAGE_HYPERVISOR)); + l1e_write(&pl1e[i], l1e_from_pfn(frames[i], __PAGE_HYPERVISOR)); } xfree(pfns); @@ -5528,6 +5533,175 @@ void __iomem *ioremap(paddr_t pa, size_t len) return (void __force __iomem *)va; } +int create_perdomain_mapping(struct domain *d, unsigned long va, + unsigned int nr, l1_pgentry_t **pl1tab, + struct page_info **ppg) +{ + struct page_info *pg; + l3_pgentry_t *l3tab; + l2_pgentry_t *l2tab; + l1_pgentry_t *l1tab; + unsigned int memf = MEMF_node(domain_to_node(d)); + int rc = 0; + + ASSERT(va >= PERDOMAIN_VIRT_START && + va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS)); + + if ( !d->arch.perdomain_l3_pg ) + { + pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d))); + if ( !pg ) + return -ENOMEM; + l3tab = __map_domain_page(pg); + clear_page(l3tab); + d->arch.perdomain_l3_pg = pg; + if ( !nr ) + { + unmap_domain_page(l3tab); + return 0; + } + } + else if ( !nr ) + return 0; + else + l3tab = __map_domain_page(d->arch.perdomain_l3_pg); + + ASSERT(!l3_table_offset(va ^ (va + nr * PAGE_SIZE - 1))); + + if ( !(l3e_get_flags(l3tab[l3_table_offset(va)]) & _PAGE_PRESENT) ) + { + pg = alloc_domheap_page(NULL, memf); + if ( !pg ) + { + unmap_domain_page(l3tab); + return -ENOMEM; + } + l2tab = __map_domain_page(pg); + clear_page(l2tab); + l3tab[l3_table_offset(va)] = l3e_from_page(pg, __PAGE_HYPERVISOR); + } + else + l2tab = map_domain_page(l3e_get_pfn(l3tab[l3_table_offset(va)])); + + unmap_domain_page(l3tab); + + if ( !pl1tab && !ppg ) + { + unmap_domain_page(l2tab); + return 0; + } + + for ( l1tab = NULL; !rc && nr--; ) + { + l2_pgentry_t *pl2e = l2tab + l2_table_offset(va); + + if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) ) + { + if ( pl1tab && !IS_NIL(pl1tab) ) + { + l1tab = alloc_xenheap_pages(0, memf); + if ( !l1tab ) + { + rc = -ENOMEM; + break; + } + ASSERT(!pl1tab[l2_table_offset(va)]); + pl1tab[l2_table_offset(va)] = l1tab; + pg = virt_to_page(l1tab); + } + else + { + pg = alloc_domheap_page(NULL, memf); + if ( !pg ) + { + rc = -ENOMEM; + break; + } + l1tab = __map_domain_page(pg); + } + clear_page(l1tab); + *pl2e = l2e_from_page(pg, __PAGE_HYPERVISOR); + } + else if ( !l1tab ) + l1tab = map_domain_page(l2e_get_pfn(*pl2e)); + + if ( ppg && + !(l1e_get_flags(l1tab[l1_table_offset(va)]) & _PAGE_PRESENT) ) + { + pg = alloc_domheap_page(NULL, memf); + if ( pg ) + { + clear_domain_page(page_to_mfn(pg)); + if ( !IS_NIL(ppg) ) + *ppg++ = pg; + l1tab[l1_table_offset(va)] = + l1e_from_page(pg, __PAGE_HYPERVISOR | _PAGE_AVAIL0); + l2e_add_flags(*pl2e, _PAGE_AVAIL0); + } + else + rc = -ENOMEM; + } + + va += PAGE_SIZE; + if ( rc || !nr || !l1_table_offset(va) ) + { + /* Note that this is a no-op for the alloc_xenheap_page() case. */ + unmap_domain_page(l1tab); + l1tab = NULL; + } + } + + ASSERT(!l1tab); + unmap_domain_page(l2tab); + + return rc; +} + +void free_perdomain_mappings(struct domain *d) +{ + l3_pgentry_t *l3tab = __map_domain_page(d->arch.perdomain_l3_pg); + unsigned int i; + + for ( i = 0; i < PERDOMAIN_SLOTS; ++i) + if ( l3e_get_flags(l3tab[i]) & _PAGE_PRESENT ) + { + struct page_info *l2pg = l3e_get_page(l3tab[i]); + l2_pgentry_t *l2tab = __map_domain_page(l2pg); + unsigned int j; + + for ( j = 0; j < L2_PAGETABLE_ENTRIES; ++j ) + if ( l2e_get_flags(l2tab[j]) & _PAGE_PRESENT ) + { + struct page_info *l1pg = l2e_get_page(l2tab[j]); + + if ( l2e_get_flags(l2tab[j]) & _PAGE_AVAIL0 ) + { + l1_pgentry_t *l1tab = __map_domain_page(l1pg); + unsigned int k; + + for ( k = 0; k < L1_PAGETABLE_ENTRIES; ++k ) + if ( (l1e_get_flags(l1tab[k]) & + (_PAGE_PRESENT | _PAGE_AVAIL0)) == + (_PAGE_PRESENT | _PAGE_AVAIL0) ) + free_domheap_page(l1e_get_page(l1tab[k])); + + unmap_domain_page(l1tab); + } + + if ( is_xen_heap_page(l1pg) ) + free_xenheap_page(page_to_virt(l1pg)); + else + free_domheap_page(l1pg); + } + + unmap_domain_page(l2tab); + free_domheap_page(l2pg); + } + + unmap_domain_page(l3tab); + free_domheap_page(d->arch.perdomain_l3_pg); +} + #ifdef MEMORY_GUARD void memguard_init(void) diff --git a/xen/include/asm-x86/config.h b/xen/include/asm-x86/config.h index 70f70b3..dc928ed 100644 --- a/xen/include/asm-x86/config.h +++ b/xen/include/asm-x86/config.h @@ -304,19 +304,13 @@ extern unsigned long xen_phys_start; #define LDT_VIRT_START(v) \ (GDT_VIRT_START(v) + (64*1024)) -/* map_domain_page() map cache. The last per-domain-mapping sub-area. */ +/* map_domain_page() map cache. The second per-domain-mapping sub-area. */ #define MAPCACHE_VCPU_ENTRIES (CONFIG_PAGING_LEVELS * CONFIG_PAGING_LEVELS) #define MAPCACHE_ENTRIES (MAX_VIRT_CPUS * MAPCACHE_VCPU_ENTRIES) -#define MAPCACHE_SLOT (PERDOMAIN_SLOTS - 1) -#define MAPCACHE_VIRT_START PERDOMAIN_VIRT_SLOT(MAPCACHE_SLOT) +#define MAPCACHE_VIRT_START PERDOMAIN_VIRT_SLOT(1) #define MAPCACHE_VIRT_END (MAPCACHE_VIRT_START + \ MAPCACHE_ENTRIES * PAGE_SIZE) -#define PDPT_L1_ENTRIES \ - ((PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS - 1) - PERDOMAIN_VIRT_START) >> PAGE_SHIFT) -#define PDPT_L2_ENTRIES \ - ((PDPT_L1_ENTRIES + (1 << PAGETABLE_ORDER) - 1) >> PAGETABLE_ORDER) - #define ELFSIZE 64 #define ARCH_CRASH_SAVE_VMCOREINFO diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 91d7d4f..30efe33 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -223,6 +223,8 @@ struct time_scale { struct pv_domain { + l1_pgentry_t **gdt_ldt_l1tab; + /* Shared page for notifying that explicit PIRQ EOI is required. */ unsigned long *pirq_eoi_map; unsigned long pirq_eoi_map_mfn; @@ -241,8 +243,6 @@ struct pv_domain struct arch_domain { - void **perdomain_pts; - struct page_info *perdomain_l2_pg[PERDOMAIN_SLOTS]; struct page_info *perdomain_l3_pg; unsigned int hv_compat_vstart; @@ -318,10 +318,10 @@ struct arch_domain #define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list)) #define has_arch_mmios(d) (!rangeset_is_empty((d)->iomem_caps)) -#define perdomain_pt_idx(v) \ +#define gdt_ldt_pt_idx(v) \ ((v)->vcpu_id >> (PAGETABLE_ORDER - GDT_LDT_VCPU_SHIFT)) -#define perdomain_ptes(d, v) \ - ((l1_pgentry_t *)(d)->arch.perdomain_pts[perdomain_pt_idx(v)] + \ +#define gdt_ldt_ptes(d, v) \ + ((d)->arch.pv_domain.gdt_ldt_l1tab[gdt_ldt_pt_idx(v)] + \ (((v)->vcpu_id << GDT_LDT_VCPU_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))) struct pv_vcpu @@ -406,12 +406,6 @@ struct arch_vcpu struct hvm_vcpu hvm_vcpu; }; - /* - * Every domain has a L1 pagetable of its own. Per-domain mappings - * are put in this table (eg. the current GDT is mapped here). - */ - l1_pgentry_t *perdomain_ptes; - pagetable_t guest_table_user; /* (MFN) x86/64 user-space pagetable */ pagetable_t guest_table; /* (MFN) guest notion of cr3 */ /* guest_table holds a ref to the page, and also a type-count unless diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h index 4f89dae..3c65a7c 100644 --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -573,6 +573,14 @@ int donate_page( int map_ldt_shadow_page(unsigned int); +#define NIL(type) ((type *)NULL - 1) +#define IS_NIL(ptr) (!((ptr) + 1)) + +int create_perdomain_mapping(struct domain *, unsigned long va, + unsigned int nr, l1_pgentry_t **, + struct page_info **); +void free_perdomain_mappings(struct domain *); + extern int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm); void domain_set_alloc_bitsize(struct domain *d); -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |