[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 5/5] libxc: create p2m list outside of kernel mapping if supported
On Fri, 2015-10-02 at 07:49 +0200, Juergen Gross wrote: > > + /* Allocate p2m list if outside of initial kernel mapping. */ > + if ( dom->arch_hooks->alloc_p2m_list && dom->parms.p2m_base != > UNSET_ADDR ) > + { > + if ( dom->arch_hooks->alloc_p2m_list(dom) != 0 ) > + goto err; > + dom->p2m_seg.vend = dom->p2m_seg.vend - dom->p2m_seg.vstart; > + dom->p2m_seg.vstart = dom->parms.p2m_base; > + dom->p2m_seg.vend += dom->p2m_seg.vstart; Here is this strange pattern again. It seems like you should be adding new APIs to the dom builder's VA/PA allocation stuff and using those instead of working around the behaviour of the existing ones. > + } > + /* > + * Build the page tables for mapping the p2m list at an address > + * specified by the to be loaded kernel. > + * l1pfn holds the pfn of the next page table to allocate. > + * At each level we might already have an entry filled when setting > + * up the initial kernel mapping. This can happen for the last entry > + * of each level only! > + */ > + l3tab = NULL; > + l2tab = NULL; > + l1tab = NULL; > + l1pfn = round_pfn(dom->p2m_size * dom->arch_hooks->sizeof_pfn) + > + dom->p2m_seg.pfn; > + > + for ( addr = dom->parms.p2m_base; > + addr < dom->parms.p2m_base + > + dom->p2m_size * dom->arch_hooks->sizeof_pfn; > + addr += PAGE_SIZE_X86 ) This is replicating a bunch of existing setup_pgtable_* code. Please refactor into a helper (one per PT layout) to map a region and use that for the existing and new use cases. > + { > + if ( l3tab == NULL ) > + { > + l4off = l4_table_offset_x86_64(addr); > + l3pfn = l4tab[l4off] ? l4pfn + dom->pg_l4 : l1pfn++; > + l3tab = xc_dom_pfn_to_ptr(dom, l3pfn, 1); > + if ( l3tab == NULL ) > + goto pfn_error; > + l4tab[l4off] = > + pfn_to_paddr(xc_dom_p2m_guest(dom, l3pfn)) | L4_PROT; > + } > + > + if ( l2tab == NULL ) > + { > + l3off = l3_table_offset_x86_64(addr); > + l2pfn = l3tab[l3off] ? l3pfn + dom->pg_l3 : l1pfn++; > + l2tab = xc_dom_pfn_to_ptr(dom, l2pfn, 1); > + if ( l2tab == NULL ) > + goto pfn_error; > + l3tab[l3off] = > + pfn_to_paddr(xc_dom_p2m_guest(dom, l2pfn)) | L3_PROT; > + } > + > + if ( l1tab == NULL ) > + { > + l2off = l2_table_offset_x86_64(addr); > + l1pfn = l2tab[l2off] ? l2pfn + dom->pg_l2 : l1pfn; > + l1tab = xc_dom_pfn_to_ptr(dom, l1pfn, 1); > + if ( l1tab == NULL ) > + goto pfn_error; > + l2tab[l2off] = > + pfn_to_paddr(xc_dom_p2m_guest(dom, l1pfn)) | L2_PROT; > + l1pfn++; > + } > + > + l1off = l1_table_offset_x86_64(addr); > + pgpfn = ((addr - dom->parms.p2m_base) >> PAGE_SHIFT_X86) + > + dom->p2m_seg.pfn; > + l1tab[l1off] = > + pfn_to_paddr(xc_dom_p2m_guest(dom, pgpfn)) | L1_PROT; > + > + if ( l1off == (L1_PAGETABLE_ENTRIES_X86_64 - 1) ) > + { > + l1tab = NULL; > + if ( l2off == (L2_PAGETABLE_ENTRIES_X86_64 - 1) ) > + { > + l2tab = NULL; > + if ( l3off == (L3_PAGETABLE_ENTRIES_X86_64 - 1) ) > + l3tab = NULL; > + } > + } > + } > + > return 0; > > pfn_error: > @@ -442,6 +519,27 @@ pfn_error: > static int alloc_p2m_list(struct xc_dom_image *dom) > { > size_t p2m_alloc_size = dom->p2m_size * dom->arch_hooks->sizeof_pfn; > + xen_vaddr_t from, to; > + xen_pfn_t tables; > + > + p2m_alloc_size = round_pg(p2m_alloc_size); > + if ( dom->parms.p2m_base != UNSET_ADDR ) > + { > + /* Add space for page tables, 64 bit only. */ Please make an alloc_p2m_list_x86_64 which does this and then calls the common code and then use the appropriate hook for each sub arch. > + from = dom->parms.p2m_base; > + to = from + p2m_alloc_size - 1; > + tables = 0; > + tables += nr_page_tables(dom, from, to, > L4_PAGETABLE_SHIFT_X86_64); > + if ( to > (xen_vaddr_t)(~0ULL << L4_PAGETABLE_SHIFT_X86_64) ) > + tables--; > + tables += nr_page_tables(dom, from, to, > L3_PAGETABLE_SHIFT_X86_64); > + if ( to > (xen_vaddr_t)(~0ULL << L3_PAGETABLE_SHIFT_X86_64) ) > + tables--; > + tables += nr_page_tables(dom, from, to, > L2_PAGETABLE_SHIFT_X86_64); > + if ( to > (xen_vaddr_t)(~0ULL << L2_PAGETABLE_SHIFT_X86_64) ) > + tables--; > + p2m_alloc_size += tables << PAGE_SHIFT_X86; > + } > > /* allocate phys2mach table */ > if ( xc_dom_alloc_segment(dom, &dom->p2m_seg, "phys2mach", _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |