# HG changeset patch # User huangwei@xxxxxxxxxxxxxxxxx # Date 1208352176 18000 # Node ID c6d49e7e9f2a14c01458dfe57c4a640f4c9db234 # Parent 08e010c3f2519a585e2948871ab6be0fe25d93d4 add super page patch diff -r 08e010c3f251 -r c6d49e7e9f2a tools/libxc/xc_hvm_build.c --- a/tools/libxc/xc_hvm_build.c Tue Apr 15 16:39:00 2008 +0100 +++ b/tools/libxc/xc_hvm_build.c Wed Apr 16 08:22:56 2008 -0500 @@ -157,8 +157,10 @@ static int setup_guest(int xc_handle, char *image, unsigned long image_size) { xen_pfn_t *page_array = NULL; + xen_pfn_t *super_page_array = NULL; unsigned long i, nr_pages = (unsigned long)memsize << (20 - PAGE_SHIFT); - unsigned long special_page_nr, entry_eip, cur_pages; + unsigned long nr_super_pages; + unsigned long special_page_nr, entry_eip, cur_pages, limit; struct xen_add_to_physmap xatp; struct shared_info *shared_info; void *e820_page; @@ -167,6 +169,8 @@ static int setup_guest(int xc_handle, uint64_t v_start, v_end; int rc; xen_capabilities_info_t caps; + int super_page_shift; + int super_page_order; /* An HVM guest must be initialised with at least 2MB memory. */ if ( memsize < 2 ) @@ -189,6 +193,15 @@ static int setup_guest(int xc_handle, PERROR("Guest OS must load to a page boundary.\n"); goto error_out; } + + /* check for PAE support and setup page size shift appropriately */ + if ( strstr(caps, "x86_32p") ) + super_page_shift = 1; + else + super_page_shift = 2; + + nr_super_pages = (unsigned long)memsize >> super_page_shift; + super_page_order = 9 + (super_page_shift - 1); IPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n" " Loader: %016"PRIx64"->%016"PRIx64"\n" @@ -198,7 +211,9 @@ static int setup_guest(int xc_handle, v_start, v_end, elf_uval(&elf, elf.ehdr, e_entry)); - if ( (page_array = malloc(nr_pages * sizeof(xen_pfn_t))) == NULL ) + if ( (page_array = malloc(nr_pages * sizeof(xen_pfn_t))) == NULL || + (super_page_array = + malloc(nr_super_pages * sizeof(xen_pfn_t))) == NULL ) { PERROR("Could not allocate memory.\n"); goto error_out; @@ -206,26 +221,46 @@ static int setup_guest(int xc_handle, for ( i = 0; i < nr_pages; i++ ) page_array[i] = i; + for ( i = 0; i < nr_super_pages; i++ ) + super_page_array[i] = i << super_page_order; for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < nr_pages; i++ ) page_array[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT; - - /* - * Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000. - * We allocate pages in batches of no more than 2048 to ensure that - * we can be preempted and hence dom0 remains responsive. + for ( i = HVM_BELOW_4G_RAM_END >> (PAGE_SHIFT + super_page_order); + i < nr_super_pages; i++ ) + super_page_array[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT; + + + /* Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000. */ rc = xc_domain_memory_populate_physmap( xc_handle, dom, 0xa0, 0, 0, &page_array[0x00]); - cur_pages = 0xc0; - while ( (rc == 0) && (nr_pages > cur_pages) ) - { - unsigned long count = nr_pages - cur_pages; - if ( count > 2048 ) - count = 2048; + if ( rc == 0 ) rc = xc_domain_memory_populate_physmap( - xc_handle, dom, count, 0, 0, &page_array[cur_pages]); + xc_handle, dom, (0x200<<(super_page_shift-1)) - 0xc0, 0, 0, + &page_array[0xc0]); + + /* We allocate pages in batches of no more than 8MB to ensure that + * we can be preempted and hence dom0 remains responsive. + */ + limit = 4 / super_page_shift; + cur_pages = 1; + while ( (rc == 0) && (nr_super_pages > cur_pages) ) + { + unsigned long count = nr_super_pages - cur_pages; + if ( count > limit ) + count = limit; + rc = xc_domain_memory_populate_physmap(xc_handle, dom, count, + super_page_order, 0, + &super_page_array[cur_pages]); cur_pages += count; } + + /* handle the case of odd number physical memory size (such as 255MB) */ + if ( rc == 0 ) + rc = xc_domain_memory_populate_physmap( + xc_handle, dom, nr_pages - (nr_super_pages << super_page_order), + 0, 0, &page_array[nr_super_pages << super_page_order]); + if ( rc != 0 ) { PERROR("Could not allocate memory for HVM guest.\n"); @@ -314,10 +349,12 @@ static int setup_guest(int xc_handle, } free(page_array); + free(super_page_array); return 0; error_out: free(page_array); + free(super_page_array); return -1; } diff -r 08e010c3f251 -r c6d49e7e9f2a xen/arch/ia64/xen/mm.c --- a/xen/arch/ia64/xen/mm.c Tue Apr 15 16:39:00 2008 +0100 +++ b/xen/arch/ia64/xen/mm.c Wed Apr 16 08:22:56 2008 -0500 @@ -2415,7 +2415,7 @@ steal_page(struct domain *d, struct page int guest_physmap_add_page(struct domain *d, unsigned long gpfn, - unsigned long mfn) + unsigned long mfn, unsigned int page_order) { BUG_ON(!mfn_valid(mfn)); BUG_ON(mfn_to_page(mfn)->count_info != (PGC_allocated | 1)); @@ -2432,7 +2432,7 @@ guest_physmap_add_page(struct domain *d, void guest_physmap_remove_page(struct domain *d, unsigned long gpfn, - unsigned long mfn) + unsigned long mfn, unsigned int page_order) { BUG_ON(mfn == 0);//XXX zap_domain_page_one(d, gpfn << PAGE_SHIFT, 0, mfn); @@ -2838,7 +2838,8 @@ arch_memory_op(int op, XEN_GUEST_HANDLE( if (prev_mfn && mfn_valid(prev_mfn)) { if (is_xen_heap_mfn(prev_mfn)) /* Xen heap frames are simply unhooked from this phys slot. */ - guest_physmap_remove_page(d, xatp.gpfn, prev_mfn); + guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, + NORMAL_PAGE_ORDER); else /* Normal domain memory is freed, to avoid leaking memory. */ guest_remove_page(d, xatp.gpfn); @@ -2847,10 +2848,10 @@ arch_memory_op(int op, XEN_GUEST_HANDLE( /* Unmap from old location, if any. */ gpfn = get_gpfn_from_mfn(mfn); if (gpfn != INVALID_M2P_ENTRY) - guest_physmap_remove_page(d, gpfn, mfn); + guest_physmap_remove_page(d, gpfn, mfn, NORMAL_PAGE_ORDER); /* Map at new location. */ - guest_physmap_add_page(d, xatp.gpfn, mfn); + guest_physmap_add_page(d, xatp.gpfn, mfn, NORMAL_PAGE_ORDER); out: domain_unlock(d); diff -r 08e010c3f251 -r c6d49e7e9f2a xen/arch/powerpc/mm.c --- a/xen/arch/powerpc/mm.c Tue Apr 15 16:39:00 2008 +0100 +++ b/xen/arch/powerpc/mm.c Wed Apr 16 08:22:56 2008 -0500 @@ -350,7 +350,7 @@ uint allocate_extents(struct domain *d, /* Build p2m mapping for newly allocated extent. */ mfn = page_to_mfn(pg); for (i = 0; i < (1 << ext_order); i++) - guest_physmap_add_page(d, gpfn + i, mfn + i); + guest_physmap_add_page(d, gpfn + i, mfn + i, NORMAL_PAGE_ORDER); /* Bump starting PFN by extent size pages. */ gpfn += ext_nrpages; @@ -395,7 +395,7 @@ int allocate_rma(struct domain *d, unsig clear_page((void *)page_to_maddr(&d->arch.rma_page[i])); /* Set up p2m mapping for RMA. */ - guest_physmap_add_page(d, i, mfn+i); + guest_physmap_add_page(d, i, mfn+i, NORMAL_PAGE_ORDER); } /* shared_info uses last page of RMA */ @@ -563,7 +563,8 @@ int guest_physmap_max_mem_pages(struct d } void guest_physmap_add_page( - struct domain *d, unsigned long gpfn, unsigned long mfn) + struct domain *d, unsigned long gpfn, unsigned long mfn, + unsigned int page_order) { if (page_get_owner(mfn_to_page(mfn)) != d) { printk("Won't map foreign MFN 0x%lx for DOM%d\n", mfn, d->domain_id); @@ -591,7 +592,8 @@ void guest_physmap_add_page( } void guest_physmap_remove_page( - struct domain *d, unsigned long gpfn, unsigned long mfn) + struct domain *d, unsigned long gpfn, unsigned long mfn, + unsigned int page_order) { if (page_get_owner(mfn_to_page(mfn)) != d) { printk("Won't unmap foreign MFN 0x%lx for DOM%d\n", mfn, d->domain_id); diff -r 08e010c3f251 -r c6d49e7e9f2a xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Tue Apr 15 16:39:00 2008 +0100 +++ b/xen/arch/x86/mm.c Wed Apr 16 08:22:56 2008 -0500 @@ -3319,7 +3319,8 @@ long arch_memory_op(int op, XEN_GUEST_HA { if ( is_xen_heap_mfn(prev_mfn) ) /* Xen heap frames are simply unhooked from this phys slot. */ - guest_physmap_remove_page(d, xatp.gpfn, prev_mfn); + guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, + NORMAL_PAGE_ORDER); else /* Normal domain memory is freed, to avoid leaking memory. */ guest_remove_page(d, xatp.gpfn); @@ -3328,10 +3329,10 @@ long arch_memory_op(int op, XEN_GUEST_HA /* Unmap from old location, if any. */ gpfn = get_gpfn_from_mfn(mfn); if ( gpfn != INVALID_M2P_ENTRY ) - guest_physmap_remove_page(d, gpfn, mfn); + guest_physmap_remove_page(d, gpfn, mfn, NORMAL_PAGE_ORDER); /* Map at new location. */ - guest_physmap_add_page(d, xatp.gpfn, mfn); + guest_physmap_add_page(d, xatp.gpfn, mfn, NORMAL_PAGE_ORDER); domain_unlock(d); diff -r 08e010c3f251 -r c6d49e7e9f2a xen/arch/x86/mm/hap/p2m-ept.c --- a/xen/arch/x86/mm/hap/p2m-ept.c Tue Apr 15 16:39:00 2008 +0100 +++ b/xen/arch/x86/mm/hap/p2m-ept.c Wed Apr 16 08:22:56 2008 -0500 @@ -91,7 +91,8 @@ static int ept_next_level(struct domain } static int -ept_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn, p2m_type_t p2mt) +ept_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn, + unsigned int page_order, p2m_type_t p2mt) { ept_entry_t *table = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table))); diff -r 08e010c3f251 -r c6d49e7e9f2a xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c Tue Apr 15 16:39:00 2008 +0100 +++ b/xen/arch/x86/mm/p2m.c Wed Apr 16 08:22:56 2008 -0500 @@ -151,9 +151,11 @@ p2m_next_level(struct domain *d, mfn_t * unsigned long *gfn_remainder, unsigned long gfn, u32 shift, u32 max, unsigned long type) { + l1_pgentry_t *l1_entry; l1_pgentry_t *p2m_entry; l1_pgentry_t new_entry; void *next; + int i; ASSERT(d->arch.p2m->alloc_page); if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn, @@ -194,6 +196,44 @@ p2m_next_level(struct domain *d, mfn_t * break; } } + + ASSERT(l1e_get_flags(*p2m_entry) & _PAGE_PRESENT); + + /* split single large page into 4KB page in P2M table */ + if ( type == PGT_l1_page_table && (l1e_get_flags(*p2m_entry) & _PAGE_PSE) ) + { + unsigned long flags, pfn; + struct page_info *pg = d->arch.p2m->alloc_page(d); + if ( pg == NULL ) + return 0; + list_add_tail(&pg->list, &d->arch.p2m->pages); + pg->u.inuse.type_info = PGT_l1_page_table | 1 | PGT_validated; + pg->count_info = 1; + + /* New splintered mappings inherit the flags of the old superpage, + * with a little reorganisation for the _PAGE_PSE_PAT bit. */ + flags = l1e_get_flags(*p2m_entry); + pfn = l1e_get_pfn(*p2m_entry); + if ( pfn & 1 ) /* ==> _PAGE_PSE_PAT was set */ + pfn -= 1; /* Clear it; _PAGE_PSE becomes _PAGE_PAT */ + else + flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */ + + l1_entry = map_domain_page(mfn_x(page_to_mfn(pg))); + for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) + { + new_entry = l1e_from_pfn(pfn + i, flags); + paging_write_p2m_entry(d, gfn, + l1_entry+i, *table_mfn, new_entry, 1); + } + unmap_domain_page(l1_entry); + + new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), + __PAGE_HYPERVISOR|_PAGE_USER); + paging_write_p2m_entry(d, gfn, + p2m_entry, *table_mfn, new_entry, 2); + } + *table_mfn = _mfn(l1e_get_pfn(*p2m_entry)); next = map_domain_page(mfn_x(*table_mfn)); unmap_domain_page(*table); @@ -204,7 +244,8 @@ p2m_next_level(struct domain *d, mfn_t * // Returns 0 on error (out of memory) static int -p2m_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn, p2m_type_t p2mt) +p2m_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn, + unsigned int page_order, p2m_type_t p2mt) { // XXX -- this might be able to be faster iff current->domain == d mfn_t table_mfn = pagetable_get_mfn(d->arch.phys_table); @@ -212,6 +253,7 @@ p2m_set_entry(struct domain *d, unsigned unsigned long gfn_remainder = gfn; l1_pgentry_t *p2m_entry; l1_pgentry_t entry_content; + l2_pgentry_t l2e_content; int rv=0; #if CONFIG_PAGING_LEVELS >= 4 @@ -236,26 +278,54 @@ p2m_set_entry(struct domain *d, unsigned PGT_l2_page_table) ) goto out; #endif - if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn, - L2_PAGETABLE_SHIFT - PAGE_SHIFT, - L2_PAGETABLE_ENTRIES, PGT_l1_page_table) ) - goto out; - - p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn, - 0, L1_PAGETABLE_ENTRIES); - ASSERT(p2m_entry); - + + if ( page_order == NORMAL_PAGE_ORDER) + { + if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn, + L2_PAGETABLE_SHIFT - PAGE_SHIFT, + L2_PAGETABLE_ENTRIES, PGT_l1_page_table) ) + goto out; + + p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn, + 0, L1_PAGETABLE_ENTRIES); + ASSERT(p2m_entry); + + if ( mfn_valid(mfn) || (p2mt == p2m_mmio_direct) ) + entry_content = l1e_from_pfn(mfn_x(mfn), p2m_type_to_flags(p2mt)); + else + entry_content = l1e_empty(); + + /* level 1 entry */ + paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 1); + } + else + { + p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn, + L2_PAGETABLE_SHIFT - PAGE_SHIFT, + L2_PAGETABLE_ENTRIES); + ASSERT(p2m_entry); + + if ( (l1e_get_flags(*p2m_entry) & _PAGE_PRESENT) && + !(l1e_get_flags(*p2m_entry) & _PAGE_PSE) ) + { + P2M_ERROR("configure P2M table 4KB L2 entry with large page\n"); + domain_crash(d); + goto out; + } + + if ( mfn_valid(mfn) ) + l2e_content = l2e_from_pfn(mfn_x(mfn), + p2m_type_to_flags(p2mt) | _PAGE_PSE); + else + l2e_content = l2e_empty(); + + entry_content.l1 = l2e_content.l2; + paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 2); + } + /* Track the highest gfn for which we have ever had a valid mapping */ if ( mfn_valid(mfn) && (gfn > d->arch.p2m->max_mapped_pfn) ) d->arch.p2m->max_mapped_pfn = gfn; - - if ( mfn_valid(mfn) || (p2mt == p2m_mmio_direct) ) - entry_content = l1e_from_pfn(mfn_x(mfn), p2m_type_to_flags(p2mt)); - else - entry_content = l1e_empty(); - - /* level 1 entry */ - paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 1); if ( iommu_enabled && is_hvm_domain(d) ) { @@ -346,6 +416,16 @@ p2m_gfn_to_mfn(struct domain *d, unsigne unmap_domain_page(l2e); return _mfn(INVALID_MFN); } + else if ( (l2e_get_flags(*l2e) & _PAGE_PSE) ) + { + mfn = _mfn(l2e_get_pfn(*l2e) + l1_table_offset(addr)); + *t = p2m_flags_to_type(l2e_get_flags(*l2e)); + unmap_domain_page(l2e); + + ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t)); + return (p2m_is_valid(*t)) ? mfn : _mfn(INVALID_MFN); + } + mfn = _mfn(l2e_get_pfn(*l2e)); unmap_domain_page(l2e); @@ -369,6 +449,8 @@ static mfn_t p2m_gfn_to_mfn_current(unsi { mfn_t mfn = _mfn(INVALID_MFN); p2m_type_t p2mt = p2m_mmio_dm; + paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT; + /* XXX This is for compatibility with the old model, where anything not * XXX marked as RAM was considered to be emulated MMIO space. * XXX Once we start explicitly registering MMIO regions in the p2m @@ -377,25 +459,47 @@ static mfn_t p2m_gfn_to_mfn_current(unsi if ( gfn <= current->domain->arch.p2m->max_mapped_pfn ) { l1_pgentry_t l1e = l1e_empty(); + l2_pgentry_t l2e = l2e_empty(); int ret; ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof(l1_pgentry_t)); - - /* Need to __copy_from_user because the p2m is sparse and this - * part might not exist */ - ret = __copy_from_user(&l1e, - &phys_to_machine_mapping[gfn], - sizeof(l1e)); - - if ( ret == 0 ) { - p2mt = p2m_flags_to_type(l1e_get_flags(l1e)); - ASSERT(l1e_get_pfn(l1e) != INVALID_MFN || !p2m_is_ram(p2mt)); + + /* need to copy_from_user because p2m is sparse and this part might not + * exist. + */ + ret = __copy_from_user(&l2e, + &__linear_l1_table[l1_linear_offset(RO_MPT_VIRT_START) + l2_linear_offset(addr)], + sizeof(l2e)); + + + if ( (ret == 0) && (l2e_get_flags(l2e) & _PAGE_PRESENT) && + (l2e_get_flags(l2e) & _PAGE_PSE) ) + { + p2mt = p2m_flags_to_type(l2e_get_flags(l2e)); + ASSERT(l2e_get_pfn(l2e) != INVALID_MFN || !p2m_is_ram(p2mt)); if ( p2m_is_valid(p2mt) ) - mfn = _mfn(l1e_get_pfn(l1e)); - else - /* XXX see above */ + mfn = _mfn(l2e_get_pfn(l2e) + l1_table_offset(addr)); + else p2mt = p2m_mmio_dm; + } + else + { + /* Need to __copy_from_user because the p2m is sparse and this + * part might not exist */ + ret = __copy_from_user(&l1e, + &phys_to_machine_mapping[gfn], + sizeof(l1e)); + + if ( ret == 0 ) { + p2mt = p2m_flags_to_type(l1e_get_flags(l1e)); + ASSERT(l1e_get_pfn(l1e) != INVALID_MFN || !p2m_is_ram(p2mt)); + if ( p2m_is_valid(p2mt) ) + mfn = _mfn(l1e_get_pfn(l1e)); + else + /* XXX see above */ + p2mt = p2m_mmio_dm; + } } } @@ -441,9 +545,10 @@ void p2m_change_entry_type_global(struct } static inline -int set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, p2m_type_t p2mt) -{ - return d->arch.p2m->set_entry(d, gfn, mfn, p2mt); +int set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, + unsigned int page_order, p2m_type_t p2mt) +{ + return d->arch.p2m->set_entry(d, gfn, mfn, page_order, p2mt); } // Allocate a new p2m table for a domain. @@ -506,7 +611,8 @@ int p2m_alloc_table(struct domain *d, P2M_PRINTK("populating p2m table\n"); /* Initialise physmap tables for slot zero. Other code assumes this. */ - if ( !set_p2m_entry(d, 0, _mfn(INVALID_MFN), p2m_invalid) ) + if ( !set_p2m_entry(d, 0, _mfn(INVALID_MFN), NORMAL_PAGE_ORDER, + p2m_invalid) ) goto error; /* Copy all existing mappings from the page list and m2p */ @@ -525,7 +631,7 @@ int p2m_alloc_table(struct domain *d, (gfn != 0x55555555L) #endif && gfn != INVALID_M2P_ENTRY - && !set_p2m_entry(d, gfn, mfn, p2m_ram_rw) ) + && !set_p2m_entry(d, gfn, mfn, NORMAL_PAGE_ORDER, p2m_ram_rw) ) goto error; } @@ -710,6 +816,28 @@ static void audit_p2m(struct domain *d) gfn += 1 << (L2_PAGETABLE_SHIFT - PAGE_SHIFT); continue; } + + /* check for super page */ + if ( l2e_get_flags(l2e[i2]) & _PAGE_PSE ) + { + mfn = l2e_get_pfn(l2e[i2]); + ASSERT(mfn_valid(_mfn(mfn))); + for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++) + { + m2pfn = get_gpfn_from_mfn(mfn+i1); + if ( m2pfn != (gfn + i) ) + { + pmbad++; + P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx" + " -> gfn %#lx\n", gfn+i, mfn+i, + m2pfn); + BUG(); + } + } + gfn += 1 << (L2_PAGETABLE_SHIFT - PAGE_SHIFT); + continue; + } + l1e = map_domain_page(mfn_x(_mfn(l2e_get_pfn(l2e[i2])))); for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ ) @@ -763,35 +891,40 @@ static void audit_p2m(struct domain *d) static void -p2m_remove_page(struct domain *d, unsigned long gfn, unsigned long mfn) -{ +p2m_remove_page(struct domain *d, unsigned long gfn, unsigned long mfn, + unsigned int page_order) +{ + int i; if ( !paging_mode_translate(d) ) return; P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn); - set_p2m_entry(d, gfn, _mfn(INVALID_MFN), p2m_invalid); - set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY); + set_p2m_entry(d, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid); + for (i = 0; i < (1UL << page_order); i++ ) + set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY); } void guest_physmap_remove_page(struct domain *d, unsigned long gfn, - unsigned long mfn) + unsigned long mfn, unsigned int page_order) { p2m_lock(d->arch.p2m); audit_p2m(d); - p2m_remove_page(d, gfn, mfn); + p2m_remove_page(d, gfn, mfn, page_order); audit_p2m(d); p2m_unlock(d->arch.p2m); } int guest_physmap_add_entry(struct domain *d, unsigned long gfn, - unsigned long mfn, p2m_type_t t) + unsigned long mfn, unsigned int page_order, + p2m_type_t t) { unsigned long ogfn; p2m_type_t ot; mfn_t omfn; int rc = 0; + int i; if ( !paging_mode_translate(d) ) return -EINVAL; @@ -821,7 +954,8 @@ guest_physmap_add_entry(struct domain *d if ( p2m_is_ram(ot) ) { ASSERT(mfn_valid(omfn)); - set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY); + for ( i = 0; i < (1UL << page_order); i++ ) + set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY); } ogfn = mfn_to_gfn(d, _mfn(mfn)); @@ -844,21 +978,23 @@ guest_physmap_add_entry(struct domain *d P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n", ogfn , mfn_x(omfn)); if ( mfn_x(omfn) == mfn ) - p2m_remove_page(d, ogfn, mfn); + p2m_remove_page(d, ogfn, mfn, page_order); } } if ( mfn_valid(_mfn(mfn)) ) { - if ( !set_p2m_entry(d, gfn, _mfn(mfn), t) ) + if ( !set_p2m_entry(d, gfn, _mfn(mfn), page_order, t) ) rc = -EINVAL; - set_gpfn_from_mfn(mfn, gfn); + for (i = 0; i < (1UL << page_order); i++) + set_gpfn_from_mfn(mfn+i, gfn+i); } else { gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n", gfn, mfn); - if ( !set_p2m_entry(d, gfn, _mfn(INVALID_MFN), p2m_invalid) ) + if ( !set_p2m_entry(d, gfn, _mfn(INVALID_MFN), page_order, + p2m_invalid) ) rc = -EINVAL; } @@ -877,7 +1013,7 @@ void p2m_change_type_global(struct domai l1_pgentry_t l1e_content; l1_pgentry_t *l1e; l2_pgentry_t *l2e; - mfn_t l1mfn; + mfn_t l1mfn, l2mfn; int i1, i2; #if CONFIG_PAGING_LEVELS >= 3 l3_pgentry_t *l3e; @@ -902,6 +1038,7 @@ void p2m_change_type_global(struct domai l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table))); #else /* CONFIG_PAGING_LEVELS == 2 */ l2e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table))); + l2mfn = mfn_x(pagetable_get_mfn(d->arch.phys_table); #endif #if CONFIG_PAGING_LEVELS >= 3 @@ -922,6 +1059,7 @@ void p2m_change_type_global(struct domai { continue; } + l2mfn = _mfn(l3e_get_pfn(l3e[i3])); l2e = map_domain_page(l3e_get_pfn(l3e[i3])); #endif /* all levels... */ for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ ) @@ -930,6 +1068,20 @@ void p2m_change_type_global(struct domai { continue; } + + if ( (l2e_get_flags(l2e[i2]) & _PAGE_PSE) ) + { + flags = l2e_get_flags(l2e[i2]); + if ( p2m_flags_to_type(flags) != ot ) + continue; + mfn = l2e_get_pfn(l2e[i2]); + gfn = get_gpfn_from_mfn(mfn); + flags = p2m_flags_to_type(nt); + l1e_content = l1e_from_pfn(mfn, flags | _PAGE_PSE); + paging_write_p2m_entry(d, gfn, (l1_pgentry_t *)&l2e[i2], + l2mfn, l1e_content, 2); + continue; + } l1mfn = _mfn(l2e_get_pfn(l2e[i2])); l1e = map_domain_page(mfn_x(l1mfn)); @@ -980,7 +1132,7 @@ p2m_type_t p2m_change_type(struct domain mfn = gfn_to_mfn(d, gfn, &pt); if ( pt == ot ) - set_p2m_entry(d, gfn, mfn, nt); + set_p2m_entry(d, gfn, mfn, NORMAL_PAGE_ORDER, nt); p2m_unlock(d->arch.p2m); @@ -1004,7 +1156,7 @@ set_mmio_p2m_entry(struct domain *d, uns set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY); } - rc = set_p2m_entry(d, gfn, mfn, p2m_mmio_direct); + rc = set_p2m_entry(d, gfn, mfn, NORMAL_PAGE_ORDER, p2m_mmio_direct); if ( 0 == rc ) gdprintk(XENLOG_ERR, "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n", @@ -1028,7 +1180,7 @@ clear_mmio_p2m_entry(struct domain *d, u "clear_mmio_p2m_entry: gfn_to_mfn failed! gfn=%08lx\n", gfn); return 0; } - rc = set_p2m_entry(d, gfn, _mfn(INVALID_MFN), 0); + rc = set_p2m_entry(d, gfn, _mfn(INVALID_MFN), NORMAL_PAGE_ORDER, 0); return rc; } diff -r 08e010c3f251 -r c6d49e7e9f2a xen/arch/x86/mm/shadow/common.c --- a/xen/arch/x86/mm/shadow/common.c Tue Apr 15 16:39:00 2008 +0100 +++ b/xen/arch/x86/mm/shadow/common.c Wed Apr 16 08:22:56 2008 -0500 @@ -2782,6 +2782,26 @@ shadow_write_p2m_entry(struct vcpu *v, u } } + /* If we're removing a superpage mapping from the p2m, remove all the + * MFNs covered by it from the shadows too. */ + if ( level == 2 && (l1e_get_flags(*p) & _PAGE_PRESENT) && + (l1e_get_flags(*p) & _PAGE_PSE) ) + { + unsigned int i; + mfn_t mfn = _mfn(l1e_get_pfn(*p)); + p2m_type_t p2mt = p2m_flags_to_type(l1e_get_flags(*p)); + if ( p2m_is_valid(p2mt) && mfn_valid(mfn) ) + { + for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) + { + sh_remove_all_shadows_and_parents(v, mfn); + if ( sh_remove_all_mappings(v, mfn) ) + flush_tlb_mask(d->domain_dirty_cpumask); + mfn = _mfn(mfn_x(mfn) + 1); + } + } + } + /* Update the entry with new content */ safe_write_pte(p, new); diff -r 08e010c3f251 -r c6d49e7e9f2a xen/common/grant_table.c --- a/xen/common/grant_table.c Tue Apr 15 16:39:00 2008 +0100 +++ b/xen/common/grant_table.c Wed Apr 16 08:22:56 2008 -0500 @@ -1159,7 +1159,7 @@ gnttab_transfer( spin_lock(&e->grant_table->lock); sha = &shared_entry(e->grant_table, gop.ref); - guest_physmap_add_page(e, sha->frame, mfn); + guest_physmap_add_page(e, sha->frame, mfn, NORMAL_PAGE_ORDER); sha->frame = mfn; wmb(); sha->flags |= GTF_transfer_completed; diff -r 08e010c3f251 -r c6d49e7e9f2a xen/common/memory.c --- a/xen/common/memory.c Tue Apr 15 16:39:00 2008 +0100 +++ b/xen/common/memory.c Wed Apr 16 08:22:56 2008 -0500 @@ -114,34 +114,60 @@ static void populate_physmap(struct memo page = alloc_domheap_pages( d, a->extent_order, a->memflags | MEMF_node(node)); - if ( unlikely(page == NULL) ) - { - gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: " - "id=%d memflags=%x (%ld of %d)\n", - a->extent_order, d->domain_id, a->memflags, - i, a->nr_extents); - goto out; - } - - mfn = page_to_mfn(page); - - if ( unlikely(paging_mode_translate(d)) ) - { + + if ( unlikely(page == NULL) ) + { + /* fail if it is not under translate mode */ + if ( !paging_mode_translate(d) ) + { + gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: " + "id=%d memflags=%x (%ld of %d)\n", + a->extent_order, d->domain_id, a->memflags, + i, a->nr_extents); + goto out; + } + + /* try to allocate using 4KB page instead */ for ( j = 0; j < (1 << a->extent_order); j++ ) - if ( guest_physmap_add_page(d, gpfn + j, mfn + j) ) + { + page = alloc_domheap_pages(d, 0, + a->memflags | MEMF_node(node)); + if ( page == NULL ) + { + gdprintk(XENLOG_INFO, "Could not allocate order=%d extent:" + "id=%d memflags=%x (%ld of %d)\n", + 0, d->domain_id, a->memflags, i, a->nr_extents); goto out; - } - else - { - for ( j = 0; j < (1 << a->extent_order); j++ ) - set_gpfn_from_mfn(mfn + j, gpfn + j); - - /* Inform the domain of the new page's machine address. */ - if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) ) - goto out; - } - } - + } + + mfn = page_to_mfn(page); + + if ( guest_physmap_add_page(d, gpfn+j, mfn, + NORMAL_PAGE_ORDER) ) + goto out; + } + } + else /* successful in allocating page of extent_order */ + { + mfn = page_to_mfn(page); + + if ( unlikely(paging_mode_translate(d)) ) + { + if ( guest_physmap_add_page(d, gpfn, mfn, a->extent_order) ) + goto out; + } + else + { + for ( j = 0; j < (1 << a->extent_order); j++ ) + set_gpfn_from_mfn(mfn + j, gpfn + j); + + /* Inform the domain of the new page's machine address. */ + if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, + 1)) ) + goto out; + } + } + } out: a->nr_done = i; } @@ -172,7 +198,7 @@ int guest_remove_page(struct domain *d, if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) put_page(page); - guest_physmap_remove_page(d, gmfn, mfn); + guest_physmap_remove_page(d, gmfn, mfn, NORMAL_PAGE_ORDER); put_page(page); @@ -419,7 +445,8 @@ static long memory_exchange(XEN_GUEST_HA if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) ) BUG(); mfn = page_to_mfn(page); - guest_physmap_remove_page(d, mfn_to_gmfn(d, mfn), mfn); + guest_physmap_remove_page(d, mfn_to_gmfn(d, mfn), mfn, + NORMAL_PAGE_ORDER); put_page(page); } @@ -441,8 +468,8 @@ static long memory_exchange(XEN_GUEST_HA if ( unlikely(paging_mode_translate(d)) ) { /* Ignore failure here. There's nothing we can do. */ - for ( k = 0; k < (1UL << exch.out.extent_order); k++ ) - (void)guest_physmap_add_page(d, gpfn + k, mfn + k); + (void)guest_physmap_add_page(d, gpfn, mfn, + exch.out.extent_order); } else { diff -r 08e010c3f251 -r c6d49e7e9f2a xen/include/asm-ia64/shadow.h --- a/xen/include/asm-ia64/shadow.h Tue Apr 15 16:39:00 2008 +0100 +++ b/xen/include/asm-ia64/shadow.h Wed Apr 16 08:22:56 2008 -0500 @@ -40,8 +40,10 @@ * Utilities to change relationship of gpfn->mfn for designated domain, * which is required by gnttab transfer, balloon, device model and etc. */ -int guest_physmap_add_page(struct domain *d, unsigned long gpfn, unsigned long mfn); -void guest_physmap_remove_page(struct domain *d, unsigned long gpfn, unsigned long mfn); +int guest_physmap_add_page(struct domain *d, unsigned long gpfn, + unsigned long mfn, unsigned int page_order); +void guest_physmap_remove_page(struct domain *d, unsigned long gpfn, + unsigned long mfn, unsigned int page_order); static inline int shadow_mode_enabled(struct domain *d) diff -r 08e010c3f251 -r c6d49e7e9f2a xen/include/asm-powerpc/mm.h --- a/xen/include/asm-powerpc/mm.h Tue Apr 15 16:39:00 2008 +0100 +++ b/xen/include/asm-powerpc/mm.h Wed Apr 16 08:22:56 2008 -0500 @@ -278,9 +278,11 @@ extern int guest_physmap_max_mem_pages(s extern int guest_physmap_max_mem_pages(struct domain *d, unsigned long new_max); extern void guest_physmap_add_page( - struct domain *d, unsigned long gpfn, unsigned long mfn); + struct domain *d, unsigned long gpfn, unsigned long mfn, + unsigned int page_oder); extern void guest_physmap_remove_page( - struct domain *d, unsigned long gpfn, unsigned long mfn); + struct domain *d, unsigned long gpfn, unsigned long mfn, + unsigned int page_order); #endif diff -r 08e010c3f251 -r c6d49e7e9f2a xen/include/asm-x86/mm.h --- a/xen/include/asm-x86/mm.h Tue Apr 15 16:39:00 2008 +0100 +++ b/xen/include/asm-x86/mm.h Wed Apr 16 08:22:56 2008 -0500 @@ -128,6 +128,15 @@ static inline u32 pickle_domptr(struct d #define SHADOW_MAX_ORDER 2 /* Need up to 16k allocs for 32-bit on PAE/64 */ #endif +/* The order of continuously allocated super page frames */ +#define NORMAL_PAGE_ORDER 0 /* 4KB page */ +#if CONFIG_PAGING_LEVELS == 2 +#define SUPER_PAGE_ORDER 10 /* 4MB page */ +#else +#define SUPER_PAGE_ORDER 9 /* 2MB page */ +#endif + + #define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain)) #define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d)) diff -r 08e010c3f251 -r c6d49e7e9f2a xen/include/asm-x86/p2m.h --- a/xen/include/asm-x86/p2m.h Tue Apr 15 16:39:00 2008 +0100 +++ b/xen/include/asm-x86/p2m.h Wed Apr 16 08:22:56 2008 -0500 @@ -102,7 +102,8 @@ struct p2m_domain { void (*free_page )(struct domain *d, struct page_info *pg); int (*set_entry )(struct domain *d, unsigned long gfn, - mfn_t mfn, p2m_type_t p2mt); + mfn_t mfn, unsigned int page_order, + p2m_type_t p2mt); mfn_t (*get_entry )(struct domain *d, unsigned long gfn, p2m_type_t *p2mt); mfn_t (*get_entry_current)(unsigned long gfn, @@ -203,21 +204,23 @@ void p2m_final_teardown(struct domain *d /* Add a page to a domain's p2m table */ int guest_physmap_add_entry(struct domain *d, unsigned long gfn, - unsigned long mfn, p2m_type_t t); + unsigned long mfn, unsigned int page_order, + p2m_type_t t); /* Untyped version for RAM only, for compatibility * * Return 0 for success */ static inline int guest_physmap_add_page(struct domain *d, unsigned long gfn, - unsigned long mfn) -{ - return guest_physmap_add_entry(d, gfn, mfn, p2m_ram_rw); + unsigned long mfn, + unsigned int page_order) +{ + return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw); } /* Remove a page from a domain's p2m table */ void guest_physmap_remove_page(struct domain *d, unsigned long gfn, - unsigned long mfn); + unsigned long mfn, unsigned int page_order); /* Change types across all p2m entries in a domain */ void p2m_change_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt); diff -r 08e010c3f251 -r c6d49e7e9f2a xen/include/xen/paging.h --- a/xen/include/xen/paging.h Tue Apr 15 16:39:00 2008 +0100 +++ b/xen/include/xen/paging.h Wed Apr 16 08:22:56 2008 -0500 @@ -17,9 +17,9 @@ #else -#define paging_mode_translate(d) (0) -#define guest_physmap_add_page(d, p, m) (0) -#define guest_physmap_remove_page(d, p, m) ((void)0) +#define paging_mode_translate(d) (0) +#define guest_physmap_add_page(d, p, m, o) (0) +#define guest_physmap_remove_page(d, p, m, o) ((void)0) #endif