[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] Fix pagetable accessor macros in Xen to have better names and to
# HG changeset patch # User kaf24@xxxxxxxxxxxxxxxxxxxx # Node ID 9f937ecc4f544ca7f5eb0b3c3b8a71e76a6d3ee6 # Parent 8084304286224d09f9443c25a8dbeb9ed4025532 Fix pagetable accessor macros in Xen to have better names and to preserve top bits of PAE pgdirs situated above 4GB. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> --- xen/arch/x86/audit.c | 4 +- xen/arch/x86/domain.c | 8 ++--- xen/arch/x86/domain_build.c | 6 ++-- xen/arch/x86/hvm/svm/svm.c | 60 +++++++++++++++++++++---------------------- xen/arch/x86/hvm/vmx/vmx.c | 12 ++++---- xen/arch/x86/mm.c | 6 ++-- xen/arch/x86/shadow.c | 2 - xen/arch/x86/shadow32.c | 14 +++++----- xen/arch/x86/shadow_public.c | 14 +++++----- xen/arch/x86/smpboot.c | 2 - xen/arch/x86/x86_32/mm.c | 3 +- xen/arch/x86/x86_64/mm.c | 3 +- xen/arch/x86/x86_64/traps.c | 2 - xen/include/asm-x86/page.h | 11 +++++-- 14 files changed, 76 insertions(+), 71 deletions(-) diff -r 808430428622 -r 9f937ecc4f54 xen/arch/x86/audit.c --- a/xen/arch/x86/audit.c Thu Jun 01 21:49:25 2006 +0100 +++ b/xen/arch/x86/audit.c Thu Jun 01 22:21:39 2006 +0100 @@ -432,10 +432,10 @@ int audit_adjust_pgtables(struct domain for_each_vcpu(d, v) { - if ( pagetable_get_paddr(v->arch.guest_table) ) + if ( !pagetable_is_null(v->arch.guest_table) ) adjust(mfn_to_page(pagetable_get_pfn(v->arch.guest_table)), !shadow_mode_refcounts(d)); - if ( pagetable_get_paddr(v->arch.shadow_table) ) + if ( !pagetable_is_null(v->arch.shadow_table) ) adjust(mfn_to_page(pagetable_get_pfn(v->arch.shadow_table)), 0); if ( v->arch.monitor_shadow_ref ) diff -r 808430428622 -r 9f937ecc4f54 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Thu Jun 01 21:49:25 2006 +0100 +++ b/xen/arch/x86/domain.c Thu Jun 01 22:21:39 2006 +0100 @@ -327,7 +327,7 @@ int arch_set_info_guest( (gmfn_to_mfn(d, phys_basetab >> PAGE_SHIFT) << PAGE_SHIFT) | (phys_basetab & ~PAGE_MASK); - v->arch.guest_table = mk_pagetable(phys_basetab); + v->arch.guest_table = pagetable_from_paddr(phys_basetab); } if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 ) @@ -335,7 +335,7 @@ int arch_set_info_guest( if ( c->flags & VGCF_HVM_GUEST ) { - v->arch.guest_table = mk_pagetable(0); + v->arch.guest_table = pagetable_null(); if ( !hvm_initialize_guest_resources(v) ) return -EINVAL; @@ -935,7 +935,7 @@ void domain_relinquish_resources(struct put_page_type(mfn_to_page(pfn)); put_page(mfn_to_page(pfn)); - v->arch.guest_table = mk_pagetable(0); + v->arch.guest_table = pagetable_null(); } if ( (pfn = pagetable_get_pfn(v->arch.guest_table_user)) != 0 ) @@ -944,7 +944,7 @@ void domain_relinquish_resources(struct put_page_type(mfn_to_page(pfn)); put_page(mfn_to_page(pfn)); - v->arch.guest_table_user = mk_pagetable(0); + v->arch.guest_table_user = pagetable_null(); } } diff -r 808430428622 -r 9f937ecc4f54 xen/arch/x86/domain_build.c --- a/xen/arch/x86/domain_build.c Thu Jun 01 21:49:25 2006 +0100 +++ b/xen/arch/x86/domain_build.c Thu Jun 01 22:21:39 2006 +0100 @@ -443,13 +443,13 @@ int construct_dom0(struct domain *d, l2tab[(LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT)+i] = l2e_from_paddr((u32)l2tab + i*PAGE_SIZE, __PAGE_HYPERVISOR); } - v->arch.guest_table = mk_pagetable((unsigned long)l3start); + v->arch.guest_table = pagetable_from_paddr((unsigned long)l3start); #else l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE; memcpy(l2tab, idle_pg_table, PAGE_SIZE); l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] = l2e_from_paddr((unsigned long)l2start, __PAGE_HYPERVISOR); - v->arch.guest_table = mk_pagetable((unsigned long)l2start); + v->arch.guest_table = pagetable_from_paddr((unsigned long)l2start); #endif for ( i = 0; i < PDPT_L2_ENTRIES; i++ ) @@ -577,7 +577,7 @@ int construct_dom0(struct domain *d, l4e_from_paddr(__pa(l4start), __PAGE_HYPERVISOR); l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] = l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR); - v->arch.guest_table = mk_pagetable(__pa(l4start)); + v->arch.guest_table = pagetable_from_paddr(__pa(l4start)); l4tab += l4_table_offset(dsi.v_start); mfn = alloc_spfn; diff -r 808430428622 -r 9f937ecc4f54 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Thu Jun 01 21:49:25 2006 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Thu Jun 01 22:21:39 2006 +0100 @@ -744,34 +744,34 @@ static void svm_ctxt_switch_to(struct vc void svm_final_setup_guest(struct vcpu *v) { + struct domain *d = v->domain; + struct vcpu *vc; + v->arch.schedule_tail = arch_svm_do_launch; v->arch.ctxt_switch_from = svm_ctxt_switch_from; v->arch.ctxt_switch_to = svm_ctxt_switch_to; - if (v == v->domain->vcpu[0]) - { - struct domain *d = v->domain; - struct vcpu *vc; - - /* Initialize monitor page table */ - for_each_vcpu(d, vc) - vc->arch.monitor_table = mk_pagetable(0); - - /* - * Required to do this once per domain - * TODO: add a seperate function to do these. - */ - memset(&d->shared_info->evtchn_mask[0], 0xff, - sizeof(d->shared_info->evtchn_mask)); - - /* - * Put the domain in shadow mode even though we're going to be using - * the shared 1:1 page table initially. It shouldn't hurt - */ - shadow_mode_enable(d, - SHM_enable|SHM_refcounts| - SHM_translate|SHM_external|SHM_wr_pt_pte); - } + if ( v != d->vcpu[0] ) + return; + + /* Initialize monitor page table */ + for_each_vcpu( d, vc ) + vc->arch.monitor_table = pagetable_null(); + + /* + * Required to do this once per domain + * TODO: add a seperate function to do these. + */ + memset(&d->shared_info->evtchn_mask[0], 0xff, + sizeof(d->shared_info->evtchn_mask)); + + /* + * Put the domain in shadow mode even though we're going to be using + * the shared 1:1 page table initially. It shouldn't hurt + */ + shadow_mode_enable(d, + SHM_enable|SHM_refcounts| + SHM_translate|SHM_external|SHM_wr_pt_pte); } @@ -868,7 +868,7 @@ static int svm_do_page_fault(unsigned lo /* Use 1:1 page table to identify MMIO address space */ if (mmio_space(gpa)) { - /* No support for APIC */ + /* No support for APIC */ if (!hvm_apic_support(v->domain) && gpa >= 0xFEC00000) { int inst_len; @@ -1568,7 +1568,7 @@ static int svm_set_cr0(unsigned long val } /* Now arch.guest_table points to machine physical. */ - v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT); + v->arch.guest_table = pagetable_from_pfn(mfn); update_pagetables(v); HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx", @@ -1588,7 +1588,7 @@ static int svm_set_cr0(unsigned long val if ( v->arch.hvm_svm.cpu_cr3 ) { put_page(mfn_to_page(get_mfn_from_gpfn( v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT))); - v->arch.guest_table = mk_pagetable(0); + v->arch.guest_table = pagetable_null(); } /* @@ -1597,7 +1597,7 @@ static int svm_set_cr0(unsigned long val * created. */ if ((value & X86_CR0_PE) == 0) { - if (value & X86_CR0_PG) { + if (value & X86_CR0_PG) { svm_inject_exception(v, TRAP_gp_fault, 1, 0); return 0; } @@ -1738,7 +1738,7 @@ static int mov_to_cr(int gpreg, int cr, } old_base_mfn = pagetable_get_pfn(v->arch.guest_table); - v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT); + v->arch.guest_table = pagetable_from_pfn(mfn); if (old_base_mfn) put_page(mfn_to_page(old_base_mfn)); @@ -1795,7 +1795,7 @@ static int mov_to_cr(int gpreg, int cr, * Now arch.guest_table points to machine physical. */ - v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT); + v->arch.guest_table = pagetable_from_pfn(mfn); update_pagetables(v); HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx", diff -r 808430428622 -r 9f937ecc4f54 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Thu Jun 01 21:49:25 2006 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Jun 01 22:21:39 2006 +0100 @@ -66,7 +66,7 @@ void vmx_final_setup_guest(struct vcpu * /* Initialize monitor page table */ for_each_vcpu(d, vc) - vc->arch.monitor_table = mk_pagetable(0); + vc->arch.monitor_table = pagetable_null(); /* * Required to do this once per domain @@ -1223,7 +1223,7 @@ vmx_world_restore(struct vcpu *v, struct if(!get_page(mfn_to_page(mfn), v->domain)) return 0; old_base_mfn = pagetable_get_pfn(v->arch.guest_table); - v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT); + v->arch.guest_table = pagetable_from_pfn(mfn); if (old_base_mfn) put_page(mfn_to_page(old_base_mfn)); /* @@ -1459,7 +1459,7 @@ static int vmx_set_cr0(unsigned long val /* * Now arch.guest_table points to machine physical. */ - v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT); + v->arch.guest_table = pagetable_from_pfn(mfn); update_pagetables(v); HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx", @@ -1477,7 +1477,7 @@ static int vmx_set_cr0(unsigned long val if ( v->arch.hvm_vmx.cpu_cr3 ) { put_page(mfn_to_page(get_mfn_from_gpfn( v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT))); - v->arch.guest_table = mk_pagetable(0); + v->arch.guest_table = pagetable_null(); } /* @@ -1635,7 +1635,7 @@ static int mov_to_cr(int gp, int cr, str domain_crash_synchronous(); /* need to take a clean path */ } old_base_mfn = pagetable_get_pfn(v->arch.guest_table); - v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT); + v->arch.guest_table = pagetable_from_pfn(mfn); if (old_base_mfn) put_page(mfn_to_page(old_base_mfn)); /* @@ -1690,7 +1690,7 @@ static int mov_to_cr(int gp, int cr, str * Now arch.guest_table points to machine physical. */ - v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT); + v->arch.guest_table = pagetable_from_pfn(mfn); update_pagetables(v); HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx", diff -r 808430428622 -r 9f937ecc4f54 xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Thu Jun 01 21:49:25 2006 +0100 +++ b/xen/arch/x86/mm.c Thu Jun 01 22:21:39 2006 +0100 @@ -1714,7 +1714,7 @@ int new_guest_cr3(unsigned long mfn) { /* Switch to idle pagetable: this VCPU has no active p.t. now. */ old_base_mfn = pagetable_get_pfn(v->arch.guest_table); - v->arch.guest_table = mk_pagetable(0); + v->arch.guest_table = pagetable_null(); update_pagetables(v); write_cr3(__pa(idle_pg_table)); if ( old_base_mfn != 0 ) @@ -1736,7 +1736,7 @@ int new_guest_cr3(unsigned long mfn) invalidate_shadow_ldt(v); old_base_mfn = pagetable_get_pfn(v->arch.guest_table); - v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT); + v->arch.guest_table = pagetable_from_pfn(mfn); update_pagetables(v); /* update shadow_table and monitor_table */ write_ptbase(v); @@ -2003,7 +2003,7 @@ int do_mmuext_op( { unsigned long old_mfn = pagetable_get_pfn(v->arch.guest_table_user); - v->arch.guest_table_user = mk_pagetable(mfn << PAGE_SHIFT); + v->arch.guest_table_user = pagetable_from_pfn(mfn); if ( old_mfn != 0 ) put_page_and_type(mfn_to_page(old_mfn)); } diff -r 808430428622 -r 9f937ecc4f54 xen/arch/x86/shadow.c --- a/xen/arch/x86/shadow.c Thu Jun 01 21:49:25 2006 +0100 +++ b/xen/arch/x86/shadow.c Thu Jun 01 22:21:39 2006 +0100 @@ -2472,7 +2472,7 @@ static void shadow_update_pagetables(str if ( !get_shadow_ref(smfn) ) BUG(); old_smfn = pagetable_get_pfn(v->arch.shadow_table); - v->arch.shadow_table = mk_pagetable((u64)smfn << PAGE_SHIFT); + v->arch.shadow_table = pagetable_from_pfn(smfn); if ( old_smfn ) put_shadow_ref(old_smfn); diff -r 808430428622 -r 9f937ecc4f54 xen/arch/x86/shadow32.c --- a/xen/arch/x86/shadow32.c Thu Jun 01 21:49:25 2006 +0100 +++ b/xen/arch/x86/shadow32.c Thu Jun 01 22:21:39 2006 +0100 @@ -583,7 +583,7 @@ static void free_shadow_pages(struct dom if ( pagetable_get_paddr(v->arch.shadow_table) ) { put_shadow_ref(pagetable_get_pfn(v->arch.shadow_table)); - v->arch.shadow_table = mk_pagetable(0); + v->arch.shadow_table = pagetable_null(); if ( shadow_mode_external(d) ) { @@ -765,7 +765,7 @@ static void alloc_monitor_pagetable(stru mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = l2e_empty(); mpl2e[l2_table_offset(RO_MPT_VIRT_START)] = l2e_empty(); - v->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT); + v->arch.monitor_table = pagetable_from_pfn(mmfn); v->arch.monitor_vtable = mpl2e; if ( v->vcpu_id == 0 ) @@ -830,7 +830,7 @@ void free_monitor_pagetable(struct vcpu unmap_domain_page_global(v->arch.monitor_vtable); free_domheap_page(mfn_to_page(mfn)); - v->arch.monitor_table = mk_pagetable(0); + v->arch.monitor_table = pagetable_null(); v->arch.monitor_vtable = 0; } @@ -992,7 +992,7 @@ alloc_p2m_table(struct domain *d) l1tab = map_domain_page(page_to_mfn(page)); memset(l1tab, 0, PAGE_SIZE); - d->arch.phys_table = mk_pagetable(page_to_maddr(page)); + d->arch.phys_table = pagetable_from_page(page); } list_ent = d->page_list.next; @@ -1126,7 +1126,7 @@ int shadow_direct_map_init(struct domain memset(root, 0, PAGE_SIZE); unmap_domain_page(root); - d->arch.phys_table = mk_pagetable(page_to_maddr(page)); + d->arch.phys_table = pagetable_from_page(page); return 1; } @@ -1156,7 +1156,7 @@ void shadow_direct_map_clean(struct doma unmap_domain_page(l2e); - d->arch.phys_table = mk_pagetable(0); + d->arch.phys_table = pagetable_null(); } int __shadow_mode_enable(struct domain *d, unsigned int mode) @@ -3231,7 +3231,7 @@ void __update_pagetables(struct vcpu *v) if ( !get_shadow_ref(smfn) ) BUG(); old_smfn = pagetable_get_pfn(v->arch.shadow_table); - v->arch.shadow_table = mk_pagetable(smfn << PAGE_SHIFT); + v->arch.shadow_table = pagetable_from_pfn(smfn); if ( old_smfn ) put_shadow_ref(old_smfn); diff -r 808430428622 -r 9f937ecc4f54 xen/arch/x86/shadow_public.c --- a/xen/arch/x86/shadow_public.c Thu Jun 01 21:49:25 2006 +0100 +++ b/xen/arch/x86/shadow_public.c Thu Jun 01 22:21:39 2006 +0100 @@ -50,7 +50,7 @@ int shadow_direct_map_init(struct domain memset(root, 0, PAGE_SIZE); root[PAE_SHADOW_SELF_ENTRY] = l3e_from_page(page, __PAGE_HYPERVISOR); - d->arch.phys_table = mk_pagetable(page_to_maddr(page)); + d->arch.phys_table = pagetable_from_page(page); unmap_domain_page(root); return 1; @@ -92,7 +92,7 @@ void shadow_direct_map_clean(struct doma unmap_domain_page(l3e); - d->arch.phys_table = mk_pagetable(0); + d->arch.phys_table = pagetable_null(); } /****************************************************************************/ @@ -338,7 +338,7 @@ static void alloc_monitor_pagetable(stru /* map the phys_to_machine map into the per domain Read-Only MPT space */ - v->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT); + v->arch.monitor_table = pagetable_from_pfn(mmfn); v->arch.monitor_vtable = (l2_pgentry_t *) mpl4e; mpl4e[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty(); @@ -380,7 +380,7 @@ void free_monitor_pagetable(struct vcpu unmap_domain_page_global(v->arch.monitor_vtable); free_domheap_page(mfn_to_page(mfn)); - v->arch.monitor_table = mk_pagetable(0); + v->arch.monitor_table = pagetable_null(); v->arch.monitor_vtable = 0; } #elif CONFIG_PAGING_LEVELS == 3 @@ -431,7 +431,7 @@ static void alloc_monitor_pagetable(stru for ( i = 0; i < (MACHPHYS_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ ) mpl2e[l2_table_offset(RO_MPT_VIRT_START) + i] = l2e_empty(); - v->arch.monitor_table = mk_pagetable(m3mfn << PAGE_SHIFT); /* < 4GB */ + v->arch.monitor_table = pagetable_from_pfn(m3mfn); v->arch.monitor_vtable = (l2_pgentry_t *) mpl3e; if ( v->vcpu_id == 0 ) @@ -492,7 +492,7 @@ void free_monitor_pagetable(struct vcpu unmap_domain_page_global(v->arch.monitor_vtable); free_domheap_page(mfn_to_page(m3mfn)); - v->arch.monitor_table = mk_pagetable(0); + v->arch.monitor_table = pagetable_null(); v->arch.monitor_vtable = 0; } #endif @@ -924,7 +924,7 @@ void free_shadow_pages(struct domain *d) if ( pagetable_get_paddr(v->arch.shadow_table) ) { put_shadow_ref(pagetable_get_pfn(v->arch.shadow_table)); - v->arch.shadow_table = mk_pagetable(0); + v->arch.shadow_table = pagetable_null(); if ( shadow_mode_external(d) ) { diff -r 808430428622 -r 9f937ecc4f54 xen/arch/x86/smpboot.c --- a/xen/arch/x86/smpboot.c Thu Jun 01 21:49:25 2006 +0100 +++ b/xen/arch/x86/smpboot.c Thu Jun 01 22:21:39 2006 +0100 @@ -908,7 +908,7 @@ static int __devinit do_boot_cpu(int api idle_vcpu[cpu] = v; BUG_ON(v == NULL); - v->arch.monitor_table = mk_pagetable(__pa(idle_pg_table)); + v->arch.monitor_table = pagetable_from_paddr(__pa(idle_pg_table)); /* start_eip had better be page-aligned! */ start_eip = setup_trampoline(); diff -r 808430428622 -r 9f937ecc4f54 xen/arch/x86/x86_32/mm.c --- a/xen/arch/x86/x86_32/mm.c Thu Jun 01 21:49:25 2006 +0100 +++ b/xen/arch/x86/x86_32/mm.c Thu Jun 01 22:21:39 2006 +0100 @@ -75,7 +75,8 @@ void __init paging_init(void) printk("PAE disabled.\n"); #endif - idle_vcpu[0]->arch.monitor_table = mk_pagetable(__pa(idle_pg_table)); + idle_vcpu[0]->arch.monitor_table = + pagetable_from_paddr(__pa(idle_pg_table)); if ( cpu_has_pge ) { diff -r 808430428622 -r 9f937ecc4f54 xen/arch/x86/x86_64/mm.c --- a/xen/arch/x86/x86_64/mm.c Thu Jun 01 21:49:25 2006 +0100 +++ b/xen/arch/x86/x86_64/mm.c Thu Jun 01 22:21:39 2006 +0100 @@ -81,7 +81,8 @@ void __init paging_init(void) l2_pgentry_t *l2_ro_mpt; struct page_info *pg; - idle_vcpu[0]->arch.monitor_table = mk_pagetable(__pa(idle_pg_table)); + idle_vcpu[0]->arch.monitor_table = + pagetable_from_paddr(__pa(idle_pg_table)); /* Create user-accessible L2 directory to map the MPT for guests. */ l3_ro_mpt = alloc_xenheap_page(); diff -r 808430428622 -r 9f937ecc4f54 xen/arch/x86/x86_64/traps.c --- a/xen/arch/x86/x86_64/traps.c Thu Jun 01 21:49:25 2006 +0100 +++ b/xen/arch/x86/x86_64/traps.c Thu Jun 01 22:21:39 2006 +0100 @@ -195,7 +195,7 @@ unsigned long do_iret(void) /* Returning to user mode? */ if ( (iret_saved.cs & 3) == 3 ) { - if ( unlikely(pagetable_get_paddr(v->arch.guest_table_user) == 0) ) + if ( unlikely(pagetable_is_null(v->arch.guest_table_user)) ) { DPRINTK("Guest switching to user mode with no user page tables\n"); domain_crash_synchronous(); diff -r 808430428622 -r 9f937ecc4f54 xen/include/asm-x86/page.h --- a/xen/include/asm-x86/page.h Thu Jun 01 21:49:25 2006 +0100 +++ b/xen/include/asm-x86/page.h Thu Jun 01 22:21:39 2006 +0100 @@ -172,10 +172,13 @@ typedef struct { u32 pfn; } pagetable_t; /* x86_64 */ typedef struct { u64 pfn; } pagetable_t; #endif -#define pagetable_get_paddr(x) ((paddr_t)(x).pfn << PAGE_SHIFT) -#define pagetable_get_pfn(x) ((x).pfn) -#define mk_pagetable(pa) \ - ({ pagetable_t __p; __p.pfn = (pa) >> PAGE_SHIFT; __p; }) +#define pagetable_get_paddr(x) ((paddr_t)(x).pfn << PAGE_SHIFT) +#define pagetable_get_pfn(x) ((x).pfn) +#define pagetable_is_null(x) ((x).pfn == 0) +#define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) }) +#define pagetable_from_page(pg) pagetable_from_pfn(page_to_mfn(pg)) +#define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT) +#define pagetable_null() pagetable_from_pfn(0) #endif #define clear_page(_p) memset((void *)(_p), 0, PAGE_SIZE) _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |