[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [patch rfc 1/3] xen arch header rework.
This patch makes arch-specific types and structs be postfixed by the architecture name. The native ones are defined to the non-postfixed versions. Also changed struct elements to use uint{32,64}_t everywhere so we get the correct sizes no matter what sizeof(long) happens to be. The goal of this work is to be able to include foreign arch headers within one source file and be able to deal with foreign structs in the xen tools. For now I've started with x86_32 and x86_64, we'll need that first when supporting 32-on-64 paravirtualized guests. Signed-off-by: Gerd Hoffmann <kraxel@xxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxxxx> --- xen/include/public/arch-x86_32.h | 207 ++++++++++++++++++++++----------------- xen/include/public/arch-x86_64.h | 181 ++++++++++++++++++++-------------- 2 files changed, 227 insertions(+), 161 deletions(-) Index: build-64-unstable-11624/xen/include/public/arch-x86_32.h =================================================================== --- build-64-unstable-11624.orig/xen/include/public/arch-x86_32.h +++ build-64-unstable-11624/xen/include/public/arch-x86_32.h @@ -43,20 +43,6 @@ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) #endif -#ifndef __ASSEMBLY__ -/* Guest handles for primitive C types. */ -__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); -__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); -__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); -DEFINE_XEN_GUEST_HANDLE(char); -DEFINE_XEN_GUEST_HANDLE(int); -DEFINE_XEN_GUEST_HANDLE(long); -DEFINE_XEN_GUEST_HANDLE(void); - -typedef unsigned long xen_pfn_t; -DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); -#endif - /* * SEGMENT DESCRIPTOR TABLES */ @@ -82,44 +68,21 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ -#define FLAT_KERNEL_CS FLAT_RING1_CS -#define FLAT_KERNEL_DS FLAT_RING1_DS -#define FLAT_KERNEL_SS FLAT_RING1_SS -#define FLAT_USER_CS FLAT_RING3_CS -#define FLAT_USER_DS FLAT_RING3_DS -#define FLAT_USER_SS FLAT_RING3_SS - -/* - * Virtual addresses beyond this are not modifiable by guest OSes. The - * machine->physical mapping table starts at this address, read-only. - */ -#ifdef CONFIG_X86_PAE -#define __HYPERVISOR_VIRT_START 0xF5800000 -#define __MACH2PHYS_VIRT_START 0xF5800000 -#define __MACH2PHYS_VIRT_END 0xF6800000 -#else -#define __HYPERVISOR_VIRT_START 0xFC000000 -#define __MACH2PHYS_VIRT_START 0xFC000000 -#define __MACH2PHYS_VIRT_END 0xFC400000 -#endif - -#ifndef HYPERVISOR_VIRT_START -#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) -#endif - -#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) -#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) -#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2) -#ifndef machine_to_phys_mapping -#define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START) -#endif +#define FLAT_KERNEL_CS_X86_32 FLAT_RING1_CS +#define FLAT_KERNEL_DS_X86_32 FLAT_RING1_DS +#define FLAT_KERNEL_SS_X86_32 FLAT_RING1_SS +#define FLAT_USER_CS_X86_32 FLAT_RING3_CS +#define FLAT_USER_DS_X86_32 FLAT_RING3_DS +#define FLAT_USER_SS_X86_32 FLAT_RING3_SS /* Maximum number of virtual CPUs in multi-processor guests. */ #define MAX_VIRT_CPUS 32 #ifndef __ASSEMBLY__ -typedef unsigned long xen_ulong_t; +typedef uint32_t xen_ulong_x86_32_t; +typedef uint32_t xen_pfn_x86_32_t; +typedef uint64_t tsc_timestamp_x86_32_t; /* RDTSC timestamp */ /* * Send an array of these to HYPERVISOR_set_trap_table() @@ -128,16 +91,15 @@ typedef unsigned long xen_ulong_t; #define TI_GET_IF(_ti) ((_ti)->flags & 4) #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl)) #define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2)) -struct trap_info { +struct trap_info_x86_32 { uint8_t vector; /* exception vector */ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ uint16_t cs; /* code selector */ - unsigned long address; /* code offset */ + uint32_t address; /* code offset */ }; -typedef struct trap_info trap_info_t; -DEFINE_XEN_GUEST_HANDLE(trap_info_t); +typedef struct trap_info_x86_32 trap_info_x86_32_t; -struct cpu_user_regs { +struct cpu_user_regs_x86_32 { uint32_t ebx; uint32_t ecx; uint32_t edx; @@ -159,16 +121,13 @@ struct cpu_user_regs { uint16_t fs, _pad4; uint16_t gs, _pad5; }; -typedef struct cpu_user_regs cpu_user_regs_t; -DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); - -typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ +typedef struct cpu_user_regs_x86_32 cpu_user_regs_x86_32_t; /* * The following is all CPU context. Note that the fpu_ctxt block is filled * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. */ -struct vcpu_guest_context { +struct vcpu_guest_context_x86_32 { /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ #define VGCF_I387_VALID (1<<0) @@ -182,54 +141,128 @@ struct vcpu_guest_context { #define VGCF_in_kernel (1<<_VGCF_in_kernel) #define _VGCF_failsafe_disables_events 3 #define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events) - unsigned long flags; /* VGCF_* flags */ - struct cpu_user_regs user_regs; /* User-level CPU registers */ - struct trap_info trap_ctxt[256]; /* Virtual IDT */ - unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ - unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ - unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ - unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ - unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ - unsigned long event_callback_cs; /* CS:EIP of event callback */ - unsigned long event_callback_eip; - unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ - unsigned long failsafe_callback_eip; - unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ + uint32_t flags; /* VGCF_* flags */ + struct cpu_user_regs_x86_32 user_regs; /* User-level CPU registers */ + struct trap_info_x86_32 trap_ctxt[256]; /* Virtual IDT */ + uint32_t ldt_base, ldt_ents; /* LDT (linear address, # ents) */ + uint32_t gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ + uint32_t kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ + uint32_t ctrlreg[8]; /* CR0-CR7 (control registers) */ + uint32_t debugreg[8]; /* DB0-DB7 (debug registers) */ + uint32_t event_callback_cs; /* CS:EIP of event callback */ + uint32_t event_callback_eip; + uint32_t failsafe_callback_cs; /* CS:EIP of failsafe callback */ + uint32_t failsafe_callback_eip; + uint32_t vm_assist; /* VMASST_TYPE_* bitmap */ }; -typedef struct vcpu_guest_context vcpu_guest_context_t; -DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); +typedef struct vcpu_guest_context_x86_32 vcpu_guest_context_x86_32_t; /* * Page-directory addresses above 4GB do not fit into architectural %cr3. * When accessing %cr3, or equivalent field in vcpu_guest_context, guests * must use the following accessor macros to pack/unpack valid MFNs. */ -#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) -#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) +#define xen_pfn_to_cr3_x86_32(pfn) (((uint32_t)(pfn) << 12) | ((uint32_t)(pfn) >> 20)) +#define xen_cr3_to_pfn_x86_32(cr3) (((uint32_t)(cr3) >> 12) | ((uint32_t)(cr3) << 20)) -struct arch_shared_info { - unsigned long max_pfn; /* max pfn that appears in table */ +struct arch_shared_info_x86_32 { + uint32_t max_pfn; /* max pfn that appears in table */ /* Frame containing list of mfns containing list of mfns containing p2m. */ - xen_pfn_t pfn_to_mfn_frame_list_list; - unsigned long nmi_reason; - uint64_t pad[32]; + xen_pfn_x86_32_t pfn_to_mfn_frame_list_list; + uint32_t nmi_reason; + uint32_t pad[64]; }; -typedef struct arch_shared_info arch_shared_info_t; +typedef struct arch_shared_info_x86_32 arch_shared_info_x86_32_t; -struct arch_vcpu_info { - unsigned long cr2; - unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */ +struct arch_vcpu_info_x86_32 { + uint32_t cr2; + uint32_t pad[5]; /* sizeof(vcpu_info_t) == 64 */ }; -typedef struct arch_vcpu_info arch_vcpu_info_t; +typedef struct arch_vcpu_info_x86_32 arch_vcpu_info_x86_32_t; -struct xen_callback { - unsigned long cs; - unsigned long eip; +struct xen_callback_x86_32 { + uint32_t cs; + uint32_t eip; }; -typedef struct xen_callback xen_callback_t; +typedef struct xen_callback_x86_32 xen_callback_x86_32_t; #endif /* !__ASSEMBLY__ */ +#ifdef __i386__ +/* make also available without _x86_32 postfix when native */ + +#ifndef __ASSEMBLY__ + +__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); +__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); +__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); +DEFINE_XEN_GUEST_HANDLE(char); +DEFINE_XEN_GUEST_HANDLE(int); +DEFINE_XEN_GUEST_HANDLE(long); +DEFINE_XEN_GUEST_HANDLE(void); + +__DEFINE_XEN_GUEST_HANDLE(xen_pfn_t, xen_pfn_x86_32_t); +__DEFINE_XEN_GUEST_HANDLE(trap_info_t, trap_info_x86_32_t); +__DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t, cpu_user_regs_x86_32_t); +__DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t, vcpu_guest_context_x86_32_t); + +#define xen_pfn_t xen_pfn_x86_32_t +#define PRIpfn PRIx32 + +#define xen_ulong_t xen_ulong_x86_32_t +#define tsc_timestamp_t tsc_timestamp_x86_32_t +#define trap_info trap_info_x86_32 +#define trap_info_t trap_info_x86_32_t +#define cpu_user_regs cpu_user_regs_x86_32 +#define cpu_user_regs_t cpu_user_regs_x86_32_t +#define vcpu_guest_context vcpu_guest_context_x86_32 +#define vcpu_guest_context_t vcpu_guest_context_x86_32_t +#define arch_shared_info arch_shared_info_x86_32 +#define arch_shared_info_t arch_shared_info_x86_32_t +#define arch_vcpu_info arch_vcpu_info_x86_32 +#define arch_vcpu_info_t arch_vcpu_info_x86_32_t +#define xen_callback xen_callback_x86_32 +#define xen_callback_t xen_callback_x86_32_t + +#define xen_pfn_to_cr3 xen_pfn_to_cr3_x86_32 +#define xen_cr3_to_pfn xen_cr3_to_pfn_x86_32 + +#endif /* !__ASSEMBLY__ */ + +#define FLAT_KERNEL_DS FLAT_KERNEL_DS_X86_32 +#define FLAT_KERNEL_CS FLAT_KERNEL_CS_X86_32 +#define FLAT_KERNEL_SS FLAT_KERNEL_SS_X86_32 +#define FLAT_USER_DS FLAT_USER_DS_X86_32 +#define FLAT_USER_CS FLAT_USER_CS_X86_32 +#define FLAT_USER_SS FLAT_USER_SS_X86_32 + +/* + * Virtual addresses beyond this are not modifiable by guest OSes. The + * machine->physical mapping table starts at this address, read-only. + */ +#ifdef CONFIG_X86_PAE +#define __HYPERVISOR_VIRT_START 0xF5800000 +#define __MACH2PHYS_VIRT_START 0xF5800000 +#define __MACH2PHYS_VIRT_END 0xF6800000 +#else +#define __HYPERVISOR_VIRT_START 0xFC000000 +#define __MACH2PHYS_VIRT_START 0xFC000000 +#define __MACH2PHYS_VIRT_END 0xFC400000 +#endif + +#ifndef HYPERVISOR_VIRT_START +#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) +#endif + +#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) +#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) +#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2) +#ifndef machine_to_phys_mapping +#define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START) +#endif + +#endif /* i386 native */ + /* * Prefix forces emulation of some non-trapping instructions. * Currently only CPUID. Index: build-64-unstable-11624/xen/include/public/arch-x86_64.h =================================================================== --- build-64-unstable-11624.orig/xen/include/public/arch-x86_64.h +++ build-64-unstable-11624/xen/include/public/arch-x86_64.h @@ -44,20 +44,6 @@ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) #endif -#ifndef __ASSEMBLY__ -/* Guest handles for primitive C types. */ -__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); -__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); -__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); -DEFINE_XEN_GUEST_HANDLE(char); -DEFINE_XEN_GUEST_HANDLE(int); -DEFINE_XEN_GUEST_HANDLE(long); -DEFINE_XEN_GUEST_HANDLE(void); - -typedef unsigned long xen_pfn_t; -DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); -#endif - /* * SEGMENT DESCRIPTOR TABLES */ @@ -87,47 +73,32 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); #define FLAT_KERNEL_DS64 FLAT_RING3_DS64 #define FLAT_KERNEL_DS32 FLAT_RING3_DS32 -#define FLAT_KERNEL_DS FLAT_KERNEL_DS64 #define FLAT_KERNEL_CS64 FLAT_RING3_CS64 #define FLAT_KERNEL_CS32 FLAT_RING3_CS32 -#define FLAT_KERNEL_CS FLAT_KERNEL_CS64 #define FLAT_KERNEL_SS64 FLAT_RING3_SS64 #define FLAT_KERNEL_SS32 FLAT_RING3_SS32 -#define FLAT_KERNEL_SS FLAT_KERNEL_SS64 +#define FLAT_KERNEL_DS_X86_64 FLAT_KERNEL_DS64 +#define FLAT_KERNEL_CS_X86_64 FLAT_KERNEL_CS64 +#define FLAT_KERNEL_SS_X86_64 FLAT_KERNEL_SS64 #define FLAT_USER_DS64 FLAT_RING3_DS64 #define FLAT_USER_DS32 FLAT_RING3_DS32 -#define FLAT_USER_DS FLAT_USER_DS64 #define FLAT_USER_CS64 FLAT_RING3_CS64 #define FLAT_USER_CS32 FLAT_RING3_CS32 -#define FLAT_USER_CS FLAT_USER_CS64 #define FLAT_USER_SS64 FLAT_RING3_SS64 #define FLAT_USER_SS32 FLAT_RING3_SS32 -#define FLAT_USER_SS FLAT_USER_SS64 - -#define __HYPERVISOR_VIRT_START 0xFFFF800000000000 -#define __HYPERVISOR_VIRT_END 0xFFFF880000000000 -#define __MACH2PHYS_VIRT_START 0xFFFF800000000000 -#define __MACH2PHYS_VIRT_END 0xFFFF804000000000 - -#ifndef HYPERVISOR_VIRT_START -#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) -#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END) -#endif - -#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) -#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) -#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) -#ifndef machine_to_phys_mapping -#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) -#endif +#define FLAT_USER_DS_X86_64 FLAT_USER_DS64 +#define FLAT_USER_CS_X86_64 FLAT_USER_CS64 +#define FLAT_USER_SS_X86_64 FLAT_USER_SS64 /* Maximum number of virtual CPUs in multi-processor guests. */ #define MAX_VIRT_CPUS 32 #ifndef __ASSEMBLY__ -typedef unsigned long xen_ulong_t; +typedef uint64_t xen_ulong_x86_64_t; +typedef uint64_t xen_pfn_x86_64_t; +typedef uint64_t tsc_timestamp_x86_64_t; /* RDTSC timestamp */ /* * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) @@ -181,14 +152,14 @@ struct iret_context { #define TI_GET_IF(_ti) ((_ti)->flags & 4) #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl)) #define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2)) -struct trap_info { +struct trap_info_x86_64 { uint8_t vector; /* exception vector */ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ uint16_t cs; /* code selector */ - unsigned long address; /* code offset */ + uint32_t unused; /* alignment */ + uint64_t address; /* code offset */ }; -typedef struct trap_info trap_info_t; -DEFINE_XEN_GUEST_HANDLE(trap_info_t); +typedef struct trap_info_x86_64 trap_info_x86_64_t; #ifdef __GNUC__ /* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */ @@ -198,7 +169,7 @@ DEFINE_XEN_GUEST_HANDLE(trap_info_t); #define __DECL_REG(name) uint64_t r ## name #endif -struct cpu_user_regs { +struct cpu_user_regs_x86_64 { uint64_t r15; uint64_t r14; uint64_t r13; @@ -228,18 +199,15 @@ struct cpu_user_regs { uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */ }; -typedef struct cpu_user_regs cpu_user_regs_t; -DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); +typedef struct cpu_user_regs_x86_64 cpu_user_regs_x86_64_t; #undef __DECL_REG -typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ - /* * The following is all CPU context. Note that the fpu_ctxt block is filled * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. */ -struct vcpu_guest_context { +struct vcpu_guest_context_x86_64 { /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ #define VGCF_I387_VALID (1<<0) @@ -255,48 +223,113 @@ struct vcpu_guest_context { #define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events) #define _VGCF_syscall_disables_events 4 #define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events) - unsigned long flags; /* VGCF_* flags */ - struct cpu_user_regs user_regs; /* User-level CPU registers */ - struct trap_info trap_ctxt[256]; /* Virtual IDT */ - unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ - unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ - unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ - unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ - unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ - unsigned long event_callback_eip; - unsigned long failsafe_callback_eip; - unsigned long syscall_callback_eip; - unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ + uint64_t flags; /* VGCF_* flags */ + struct cpu_user_regs_x86_64 user_regs; /* User-level CPU registers */ + struct trap_info_x86_64 trap_ctxt[256]; /* Virtual IDT */ + uint64_t ldt_base, ldt_ents; /* LDT (linear address, # ents) */ + uint64_t gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ + uint64_t kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ + uint64_t ctrlreg[8]; /* CR0-CR7 (control registers) */ + uint64_t debugreg[8]; /* DB0-DB7 (debug registers) */ + uint64_t event_callback_eip; + uint64_t failsafe_callback_eip; + uint64_t syscall_callback_eip; + uint64_t vm_assist; /* VMASST_TYPE_* bitmap */ /* Segment base addresses. */ uint64_t fs_base; uint64_t gs_base_kernel; uint64_t gs_base_user; }; -typedef struct vcpu_guest_context vcpu_guest_context_t; -DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); +typedef struct vcpu_guest_context_x86_64 vcpu_guest_context_x86_64_t; -#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12) -#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12) +#define xen_pfn_to_cr3_x86_64(pfn) ((uint64_t)(pfn) << 12) +#define xen_cr3_to_pfn_x86_64(cr3) ((uint64_t)(cr3) >> 12) -struct arch_shared_info { - unsigned long max_pfn; /* max pfn that appears in table */ +struct arch_shared_info_x86_64 { + uint64_t max_pfn; /* max pfn that appears in table */ /* Frame containing list of mfns containing list of mfns containing p2m. */ - xen_pfn_t pfn_to_mfn_frame_list_list; - unsigned long nmi_reason; + xen_pfn_x86_64_t pfn_to_mfn_frame_list_list; + uint64_t nmi_reason; uint64_t pad[32]; }; -typedef struct arch_shared_info arch_shared_info_t; +typedef struct arch_shared_info_x86_64 arch_shared_info_x86_64_t; -struct arch_vcpu_info { - unsigned long cr2; - unsigned long pad; /* sizeof(vcpu_info_t) == 64 */ +struct arch_vcpu_info_x86_64 { + uint64_t cr2; + uint64_t pad; /* sizeof(vcpu_info_t) == 64 */ }; -typedef struct arch_vcpu_info arch_vcpu_info_t; +typedef struct arch_vcpu_info_x86_64 arch_vcpu_info_x86_64_t; -typedef unsigned long xen_callback_t; +typedef uint64_t xen_callback_x86_64_t; + +#endif /* !__ASSEMBLY__ */ + +#ifdef __x86_64__ +/* make also available without _x86_64 postfix when native */ + +#ifndef __ASSEMBLY__ + +__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); +__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); +__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); +DEFINE_XEN_GUEST_HANDLE(char); +DEFINE_XEN_GUEST_HANDLE(int); +DEFINE_XEN_GUEST_HANDLE(long); +DEFINE_XEN_GUEST_HANDLE(void); + +__DEFINE_XEN_GUEST_HANDLE(xen_pfn_t, xen_pfn_x86_64_t); +__DEFINE_XEN_GUEST_HANDLE(trap_info_t, trap_info_x86_64_t); +__DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t, cpu_user_regs_x86_64_t); +__DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t, vcpu_guest_context_x86_64_t); + +#define xen_pfn_t xen_pfn_x86_64_t +#define PRIpfn PRIx64 + +#define xen_ulong_t xen_ulong_x86_64_t +#define tsc_timestamp_t tsc_timestamp_x86_64_t +#define trap_info trap_info_x86_64 +#define trap_info_t trap_info_x86_64_t +#define cpu_user_regs cpu_user_regs_x86_64 +#define cpu_user_regs_t cpu_user_regs_x86_64_t +#define vcpu_guest_context vcpu_guest_context_x86_64 +#define vcpu_guest_context_t vcpu_guest_context_x86_64_t +#define arch_shared_info arch_shared_info_x86_64 +#define arch_shared_info_t arch_shared_info_x86_64_t +#define arch_vcpu_info arch_vcpu_info_x86_64 +#define arch_vcpu_info_t arch_vcpu_info_x86_64_t +#define xen_callback_t xen_callback_x86_64_t + +#define xen_pfn_to_cr3 xen_pfn_to_cr3_x86_64 +#define xen_cr3_to_pfn xen_cr3_to_pfn_x86_64 #endif /* !__ASSEMBLY__ */ +#define FLAT_KERNEL_DS FLAT_KERNEL_DS_X86_64 +#define FLAT_KERNEL_CS FLAT_KERNEL_CS_X86_64 +#define FLAT_KERNEL_SS FLAT_KERNEL_SS_X86_64 +#define FLAT_USER_DS FLAT_USER_DS_X86_64 +#define FLAT_USER_CS FLAT_USER_CS_X86_64 +#define FLAT_USER_SS FLAT_USER_SS_X86_64 + +#define __HYPERVISOR_VIRT_START 0xFFFF800000000000 +#define __HYPERVISOR_VIRT_END 0xFFFF880000000000 +#define __MACH2PHYS_VIRT_START 0xFFFF800000000000 +#define __MACH2PHYS_VIRT_END 0xFFFF804000000000 + +#ifndef HYPERVISOR_VIRT_START +#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) +#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END) +#endif + +#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) +#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) +#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) +#ifndef machine_to_phys_mapping +#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) +#endif + +#endif /* x86_64 native */ + /* * Prefix forces emulation of some non-trapping instructions. * Currently only CPUID. -- _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |