[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86: Move guest_l*e definitions into common code
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1226581282 0 # Node ID b87cc4de3ca676e895f6374daed1d33a79849b9d # Parent 9f68b6ae6243dd8af575ce79bc60d74e1f2364c4 x86: Move guest_l*e definitions into common code Move the definitions of guest pagetable types and the guest pagetable walk record out of the shadow-code headers into asm-x86. Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx> --- xen/arch/x86/mm/shadow/multi.c | 16 +-- xen/arch/x86/mm/shadow/types.h | 203 +---------------------------------------- xen/include/asm-x86/guest_pt.h | 202 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 215 insertions(+), 206 deletions(-) diff -r 9f68b6ae6243 -r b87cc4de3ca6 xen/arch/x86/mm/shadow/multi.c --- a/xen/arch/x86/mm/shadow/multi.c Thu Nov 13 10:50:50 2008 +0000 +++ b/xen/arch/x86/mm/shadow/multi.c Thu Nov 13 13:01:22 2008 +0000 @@ -35,6 +35,7 @@ #include <asm/hvm/hvm.h> #include <asm/hvm/cacheattr.h> #include <asm/mtrr.h> +#include <asm/guest_pt.h> #include "private.h" #include "types.h" @@ -254,7 +255,7 @@ static uint32_t set_ad_bits(void *guest_ * Return 1 to indicate success and 0 for inconsistency */ static inline uint32_t -shadow_check_gwalk(struct vcpu *v, unsigned long va, walk_t *gw) +shadow_check_gwalk(struct vcpu *v, unsigned long va, walk_t *gw, int version) { struct domain *d = v->domain; guest_l1e_t *l1p; @@ -267,9 +268,8 @@ shadow_check_gwalk(struct vcpu *v, unsig ASSERT(shadow_locked_by_me(d)); - if ( gw->version == - atomic_read(&d->arch.paging.shadow.gtable_dirty_version) ) - return 1; + if ( version == atomic_read(&d->arch.paging.shadow.gtable_dirty_version) ) + return 1; /* We may consider caching guest page mapping from last * guest table walk. However considering this check happens @@ -401,9 +401,6 @@ guest_walk_tables(struct vcpu *v, unsign perfc_incr(shadow_guest_walk); memset(gw, 0, sizeof(*gw)); gw->va = va; - - gw->version = atomic_read(&d->arch.paging.shadow.gtable_dirty_version); - rmb(); /* Mandatory bits that must be set in every entry. We invert NX, to * calculate as if there were an "X" bit that allowed access. @@ -3173,6 +3170,7 @@ static int sh_page_fault(struct vcpu *v, fetch_type_t ft = 0; p2m_type_t p2mt; uint32_t rc; + int version; #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION int fast_emul = 0; #endif @@ -3316,6 +3314,8 @@ static int sh_page_fault(struct vcpu *v, } rewalk: + version = atomic_read(&d->arch.paging.shadow.gtable_dirty_version); + rmb(); rc = guest_walk_tables(v, va, &gw, regs->error_code); #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) @@ -3392,7 +3392,7 @@ static int sh_page_fault(struct vcpu *v, } #endif /* OOS */ - if ( !shadow_check_gwalk(v, va, &gw) ) + if ( !shadow_check_gwalk(v, va, &gw, version) ) { perfc_incr(shadow_inconsistent_gwalk); shadow_unlock(d); diff -r 9f68b6ae6243 -r b87cc4de3ca6 xen/arch/x86/mm/shadow/types.h --- a/xen/arch/x86/mm/shadow/types.h Thu Nov 13 10:50:50 2008 +0000 +++ b/xen/arch/x86/mm/shadow/types.h Thu Nov 13 13:01:22 2008 +0000 @@ -191,169 +191,13 @@ static inline shadow_l4e_t shadow_l4e_fr }) #endif - -/* Type of the guest's frame numbers */ -TYPE_SAFE(unsigned long,gfn) -#define SH_PRI_gfn "05lx" - -#define VALID_GFN(m) (m != INVALID_GFN) - -static inline int -valid_gfn(gfn_t m) -{ - return VALID_GFN(gfn_x(m)); -} - -static inline paddr_t -gfn_to_paddr(gfn_t gfn) -{ - return ((paddr_t)gfn_x(gfn)) << PAGE_SHIFT; -} - -/* Override gfn_to_mfn to work with gfn_t */ -#undef gfn_to_mfn -#define gfn_to_mfn(d, g, t) _gfn_to_mfn((d), gfn_x(g), (t)) +/* The shadow types needed for the various levels. */ #if GUEST_PAGING_LEVELS == 2 - -#include "../page-guest32.h" - -#define GUEST_L1_PAGETABLE_ENTRIES 1024 -#define GUEST_L2_PAGETABLE_ENTRIES 1024 -#define GUEST_L1_PAGETABLE_SHIFT 12 -#define GUEST_L2_PAGETABLE_SHIFT 22 - -/* Types of the guest's page tables */ -typedef l1_pgentry_32_t guest_l1e_t; -typedef l2_pgentry_32_t guest_l2e_t; -typedef intpte_32_t guest_intpte_t; - -/* Access functions for them */ -static inline paddr_t guest_l1e_get_paddr(guest_l1e_t gl1e) -{ return l1e_get_paddr_32(gl1e); } -static inline paddr_t guest_l2e_get_paddr(guest_l2e_t gl2e) -{ return l2e_get_paddr_32(gl2e); } - -static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e) -{ return _gfn(l1e_get_paddr_32(gl1e) >> PAGE_SHIFT); } -static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e) -{ return _gfn(l2e_get_paddr_32(gl2e) >> PAGE_SHIFT); } - -static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e) -{ return l1e_get_flags_32(gl1e); } -static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e) -{ return l2e_get_flags_32(gl2e); } - -static inline guest_l1e_t guest_l1e_add_flags(guest_l1e_t gl1e, u32 flags) -{ l1e_add_flags_32(gl1e, flags); return gl1e; } -static inline guest_l2e_t guest_l2e_add_flags(guest_l2e_t gl2e, u32 flags) -{ l2e_add_flags_32(gl2e, flags); return gl2e; } - -static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags) -{ return l1e_from_pfn_32(gfn_x(gfn), flags); } -static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags) -{ return l2e_from_pfn_32(gfn_x(gfn), flags); } - -#define guest_l1_table_offset(a) l1_table_offset_32(a) -#define guest_l2_table_offset(a) l2_table_offset_32(a) - -/* The shadow types needed for the various levels. */ #define SH_type_l1_shadow SH_type_l1_32_shadow #define SH_type_l2_shadow SH_type_l2_32_shadow #define SH_type_fl1_shadow SH_type_fl1_32_shadow - -#else /* GUEST_PAGING_LEVELS != 2 */ - -#if GUEST_PAGING_LEVELS == 3 -#define GUEST_L1_PAGETABLE_ENTRIES 512 -#define GUEST_L2_PAGETABLE_ENTRIES 512 -#define GUEST_L3_PAGETABLE_ENTRIES 4 -#define GUEST_L1_PAGETABLE_SHIFT 12 -#define GUEST_L2_PAGETABLE_SHIFT 21 -#define GUEST_L3_PAGETABLE_SHIFT 30 -#else /* GUEST_PAGING_LEVELS == 4 */ -#define GUEST_L1_PAGETABLE_ENTRIES 512 -#define GUEST_L2_PAGETABLE_ENTRIES 512 -#define GUEST_L3_PAGETABLE_ENTRIES 512 -#define GUEST_L4_PAGETABLE_ENTRIES 512 -#define GUEST_L1_PAGETABLE_SHIFT 12 -#define GUEST_L2_PAGETABLE_SHIFT 21 -#define GUEST_L3_PAGETABLE_SHIFT 30 -#define GUEST_L4_PAGETABLE_SHIFT 39 -#endif - -/* Types of the guest's page tables */ -typedef l1_pgentry_t guest_l1e_t; -typedef l2_pgentry_t guest_l2e_t; -typedef l3_pgentry_t guest_l3e_t; -#if GUEST_PAGING_LEVELS >= 4 -typedef l4_pgentry_t guest_l4e_t; -#endif -typedef intpte_t guest_intpte_t; - -/* Access functions for them */ -static inline paddr_t guest_l1e_get_paddr(guest_l1e_t gl1e) -{ return l1e_get_paddr(gl1e); } -static inline paddr_t guest_l2e_get_paddr(guest_l2e_t gl2e) -{ return l2e_get_paddr(gl2e); } -static inline paddr_t guest_l3e_get_paddr(guest_l3e_t gl3e) -{ return l3e_get_paddr(gl3e); } -#if GUEST_PAGING_LEVELS >= 4 -static inline paddr_t guest_l4e_get_paddr(guest_l4e_t gl4e) -{ return l4e_get_paddr(gl4e); } -#endif - -static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e) -{ return _gfn(l1e_get_paddr(gl1e) >> PAGE_SHIFT); } -static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e) -{ return _gfn(l2e_get_paddr(gl2e) >> PAGE_SHIFT); } -static inline gfn_t guest_l3e_get_gfn(guest_l3e_t gl3e) -{ return _gfn(l3e_get_paddr(gl3e) >> PAGE_SHIFT); } -#if GUEST_PAGING_LEVELS >= 4 -static inline gfn_t guest_l4e_get_gfn(guest_l4e_t gl4e) -{ return _gfn(l4e_get_paddr(gl4e) >> PAGE_SHIFT); } -#endif - -static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e) -{ return l1e_get_flags(gl1e); } -static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e) -{ return l2e_get_flags(gl2e); } -static inline u32 guest_l3e_get_flags(guest_l3e_t gl3e) -{ return l3e_get_flags(gl3e); } -#if GUEST_PAGING_LEVELS >= 4 -static inline u32 guest_l4e_get_flags(guest_l4e_t gl4e) -{ return l4e_get_flags(gl4e); } -#endif - -static inline guest_l1e_t guest_l1e_add_flags(guest_l1e_t gl1e, u32 flags) -{ l1e_add_flags(gl1e, flags); return gl1e; } -static inline guest_l2e_t guest_l2e_add_flags(guest_l2e_t gl2e, u32 flags) -{ l2e_add_flags(gl2e, flags); return gl2e; } -static inline guest_l3e_t guest_l3e_add_flags(guest_l3e_t gl3e, u32 flags) -{ l3e_add_flags(gl3e, flags); return gl3e; } -#if GUEST_PAGING_LEVELS >= 4 -static inline guest_l4e_t guest_l4e_add_flags(guest_l4e_t gl4e, u32 flags) -{ l4e_add_flags(gl4e, flags); return gl4e; } -#endif - -static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags) -{ return l1e_from_pfn(gfn_x(gfn), flags); } -static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags) -{ return l2e_from_pfn(gfn_x(gfn), flags); } -static inline guest_l3e_t guest_l3e_from_gfn(gfn_t gfn, u32 flags) -{ return l3e_from_pfn(gfn_x(gfn), flags); } -#if GUEST_PAGING_LEVELS >= 4 -static inline guest_l4e_t guest_l4e_from_gfn(gfn_t gfn, u32 flags) -{ return l4e_from_pfn(gfn_x(gfn), flags); } -#endif - -#define guest_l1_table_offset(a) l1_table_offset(a) -#define guest_l2_table_offset(a) l2_table_offset(a) -#define guest_l3_table_offset(a) l3_table_offset(a) -#define guest_l4_table_offset(a) l4_table_offset(a) - -/* The shadow types needed for the various levels. */ -#if GUEST_PAGING_LEVELS == 3 +#elif GUEST_PAGING_LEVELS == 3 #define SH_type_l1_shadow SH_type_l1_pae_shadow #define SH_type_fl1_shadow SH_type_fl1_pae_shadow #define SH_type_l2_shadow SH_type_l2_pae_shadow @@ -366,35 +210,6 @@ static inline guest_l4e_t guest_l4e_from #define SH_type_l3_shadow SH_type_l3_64_shadow #define SH_type_l4_shadow SH_type_l4_64_shadow #endif - -#endif /* GUEST_PAGING_LEVELS != 2 */ - - -/* Type used for recording a walk through guest pagetables. It is - * filled in by the pagetable walk function, and also used as a cache - * for later walks. When we encounter a suporpage l2e, we fabricate an - * l1e for propagation to the shadow (for splintering guest superpages - * into many shadow l1 entries). */ -typedef struct shadow_walk_t walk_t; -struct shadow_walk_t -{ - unsigned long va; /* Address we were looking for */ -#if GUEST_PAGING_LEVELS >= 3 -#if GUEST_PAGING_LEVELS >= 4 - guest_l4e_t l4e; /* Guest's level 4 entry */ -#endif - guest_l3e_t l3e; /* Guest's level 3 entry */ -#endif - guest_l2e_t l2e; /* Guest's level 2 entry */ - guest_l1e_t l1e; /* Guest's level 1 entry (or fabrication) */ -#if GUEST_PAGING_LEVELS >= 4 - mfn_t l4mfn; /* MFN that the level 4 entry was in */ - mfn_t l3mfn; /* MFN that the level 3 entry was in */ -#endif - mfn_t l2mfn; /* MFN that the level 2 entry was in */ - mfn_t l1mfn; /* MFN that the level 1 entry was in */ - int version; /* Saved guest dirty version */ -}; /* macros for dealing with the naming of the internal function names of the * shadow code's external entry points. @@ -460,17 +275,9 @@ struct shadow_walk_t #define MFN_FITS_IN_HVM_CR3(_MFN) !(mfn_x(_MFN) >> 20) #endif -#define SH_PRI_pte PRIpte - -#if GUEST_PAGING_LEVELS == 2 -#define SH_PRI_gpte "08x" -#else /* GUEST_PAGING_LEVELS >= 3 */ -#ifndef __x86_64__ -#define SH_PRI_gpte "016llx" -#else -#define SH_PRI_gpte "016lx" -#endif -#endif /* GUEST_PAGING_LEVELS >= 3 */ +#define SH_PRI_pte PRIpte +#define SH_PRI_gpte PRI_gpte +#define SH_PRI_gfn PRI_gfn #if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) diff -r 9f68b6ae6243 -r b87cc4de3ca6 xen/include/asm-x86/guest_pt.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-x86/guest_pt.h Thu Nov 13 13:01:22 2008 +0000 @@ -0,0 +1,202 @@ +/****************************************************************************** + * xen/asm-x86/guest_pt.h + * + * Types and accessors for guest pagetable entries, as distinct from + * Xen's pagetable types. + * + * Users must #define GUEST_PAGING_LEVELS to 2, 3 or 4 before including + * this file. + * + * Parts of this code are Copyright (c) 2006 by XenSource Inc. + * Parts of this code are Copyright (c) 2006 by Michael A Fetterman + * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _XEN_ASM_GUEST_PT_H +#define _XEN_ASM_GUEST_PT_H + +/* Type of the guest's frame numbers */ +TYPE_SAFE(unsigned long,gfn) +#define PRI_gfn "05lx" + +#define VALID_GFN(m) (m != INVALID_GFN) + +static inline int +valid_gfn(gfn_t m) +{ + return VALID_GFN(gfn_x(m)); +} + +static inline paddr_t +gfn_to_paddr(gfn_t gfn) +{ + return ((paddr_t)gfn_x(gfn)) << PAGE_SHIFT; +} + +/* Override gfn_to_mfn to work with gfn_t */ +#undef gfn_to_mfn +#define gfn_to_mfn(d, g, t) _gfn_to_mfn((d), gfn_x(g), (t)) + + +/* Types of the guest's page tables and access functions for them */ + +#if GUEST_PAGING_LEVELS == 2 + +#define GUEST_L1_PAGETABLE_ENTRIES 1024 +#define GUEST_L2_PAGETABLE_ENTRIES 1024 +#define GUEST_L1_PAGETABLE_SHIFT 12 +#define GUEST_L2_PAGETABLE_SHIFT 22 + +typedef uint32_t guest_intpte_t; +typedef struct { guest_intpte_t l1; } guest_l1e_t; +typedef struct { guest_intpte_t l2; } guest_l2e_t; + +#define PRI_gpte "08x" + +static inline paddr_t guest_l1e_get_paddr(guest_l1e_t gl1e) +{ return ((paddr_t) gl1e.l1) & (PADDR_MASK & PAGE_MASK); } +static inline paddr_t guest_l2e_get_paddr(guest_l2e_t gl2e) +{ return ((paddr_t) gl2e.l2) & (PADDR_MASK & PAGE_MASK); } + +static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e) +{ return _gfn(guest_l1e_get_paddr(gl1e) >> PAGE_SHIFT); } +static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e) +{ return _gfn(guest_l2e_get_paddr(gl2e) >> PAGE_SHIFT); } + +static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e) +{ return gl1e.l1 & 0xfff; } +static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e) +{ return gl2e.l2 & 0xfff; } + +static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags) +{ return (guest_l1e_t) { (gfn_x(gfn) << PAGE_SHIFT) | flags }; } +static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags) +{ return (guest_l2e_t) { (gfn_x(gfn) << PAGE_SHIFT) | flags }; } + +#define guest_l1_table_offset(_va) \ + (((_va) >> GUEST_L1_PAGETABLE_SHIFT) & (GUEST_L1_PAGETABLE_ENTRIES - 1)) +#define guest_l2_table_offset(_va) \ + (((_va) >> GUEST_L2_PAGETABLE_SHIFT) & (GUEST_L2_PAGETABLE_ENTRIES - 1)) + +#else /* GUEST_PAGING_LEVELS != 2 */ + +#if GUEST_PAGING_LEVELS == 3 +#define GUEST_L1_PAGETABLE_ENTRIES 512 +#define GUEST_L2_PAGETABLE_ENTRIES 512 +#define GUEST_L3_PAGETABLE_ENTRIES 4 +#define GUEST_L1_PAGETABLE_SHIFT 12 +#define GUEST_L2_PAGETABLE_SHIFT 21 +#define GUEST_L3_PAGETABLE_SHIFT 30 +#else /* GUEST_PAGING_LEVELS == 4 */ +#define GUEST_L1_PAGETABLE_ENTRIES 512 +#define GUEST_L2_PAGETABLE_ENTRIES 512 +#define GUEST_L3_PAGETABLE_ENTRIES 512 +#define GUEST_L4_PAGETABLE_ENTRIES 512 +#define GUEST_L1_PAGETABLE_SHIFT 12 +#define GUEST_L2_PAGETABLE_SHIFT 21 +#define GUEST_L3_PAGETABLE_SHIFT 30 +#define GUEST_L4_PAGETABLE_SHIFT 39 +#endif + +typedef l1_pgentry_t guest_l1e_t; +typedef l2_pgentry_t guest_l2e_t; +typedef l3_pgentry_t guest_l3e_t; +#if GUEST_PAGING_LEVELS >= 4 +typedef l4_pgentry_t guest_l4e_t; +#endif +typedef intpte_t guest_intpte_t; + +#define PRI_gpte "016"PRIx64 + +static inline paddr_t guest_l1e_get_paddr(guest_l1e_t gl1e) +{ return l1e_get_paddr(gl1e); } +static inline paddr_t guest_l2e_get_paddr(guest_l2e_t gl2e) +{ return l2e_get_paddr(gl2e); } +static inline paddr_t guest_l3e_get_paddr(guest_l3e_t gl3e) +{ return l3e_get_paddr(gl3e); } +#if GUEST_PAGING_LEVELS >= 4 +static inline paddr_t guest_l4e_get_paddr(guest_l4e_t gl4e) +{ return l4e_get_paddr(gl4e); } +#endif + +static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e) +{ return _gfn(l1e_get_paddr(gl1e) >> PAGE_SHIFT); } +static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e) +{ return _gfn(l2e_get_paddr(gl2e) >> PAGE_SHIFT); } +static inline gfn_t guest_l3e_get_gfn(guest_l3e_t gl3e) +{ return _gfn(l3e_get_paddr(gl3e) >> PAGE_SHIFT); } +#if GUEST_PAGING_LEVELS >= 4 +static inline gfn_t guest_l4e_get_gfn(guest_l4e_t gl4e) +{ return _gfn(l4e_get_paddr(gl4e) >> PAGE_SHIFT); } +#endif + +static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e) +{ return l1e_get_flags(gl1e); } +static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e) +{ return l2e_get_flags(gl2e); } +static inline u32 guest_l3e_get_flags(guest_l3e_t gl3e) +{ return l3e_get_flags(gl3e); } +#if GUEST_PAGING_LEVELS >= 4 +static inline u32 guest_l4e_get_flags(guest_l4e_t gl4e) +{ return l4e_get_flags(gl4e); } +#endif + +static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags) +{ return l1e_from_pfn(gfn_x(gfn), flags); } +static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags) +{ return l2e_from_pfn(gfn_x(gfn), flags); } +static inline guest_l3e_t guest_l3e_from_gfn(gfn_t gfn, u32 flags) +{ return l3e_from_pfn(gfn_x(gfn), flags); } +#if GUEST_PAGING_LEVELS >= 4 +static inline guest_l4e_t guest_l4e_from_gfn(gfn_t gfn, u32 flags) +{ return l4e_from_pfn(gfn_x(gfn), flags); } +#endif + +#define guest_l1_table_offset(a) l1_table_offset(a) +#define guest_l2_table_offset(a) l2_table_offset(a) +#define guest_l3_table_offset(a) l3_table_offset(a) +#define guest_l4_table_offset(a) l4_table_offset(a) + +#endif /* GUEST_PAGING_LEVELS != 2 */ + + +/* Type used for recording a walk through guest pagetables. It is + * filled in by the pagetable walk function, and also used as a cache + * for later walks. When we encounter a superpage l2e, we fabricate an + * l1e for propagation to the shadow (for splintering guest superpages + * into many shadow l1 entries). */ +typedef struct guest_pagetable_walk walk_t; +struct guest_pagetable_walk +{ + unsigned long va; /* Address we were looking for */ +#if GUEST_PAGING_LEVELS >= 3 +#if GUEST_PAGING_LEVELS >= 4 + guest_l4e_t l4e; /* Guest's level 4 entry */ +#endif + guest_l3e_t l3e; /* Guest's level 3 entry */ +#endif + guest_l2e_t l2e; /* Guest's level 2 entry */ + guest_l1e_t l1e; /* Guest's level 1 entry (or fabrication) */ +#if GUEST_PAGING_LEVELS >= 4 + mfn_t l4mfn; /* MFN that the level 4 entry was in */ + mfn_t l3mfn; /* MFN that the level 3 entry was in */ +#endif + mfn_t l2mfn; /* MFN that the level 2 entry was in */ + mfn_t l1mfn; /* MFN that the level 1 entry was in */ +}; + +#endif /* _XEN_ASM_GUEST_PT_H */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |