[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] Adjust emulation code to deal with compatibility mode guests. This
# HG changeset patch # User Emmanuel Ackaouy <ack@xxxxxxxxxxxxx> # Date 1168018470 0 # Node ID 3870aff51ae384163b9ba20db1b5266d89565382 # Parent f632c0c3697657c0df031820dafeb74bf0b0f5bb Adjust emulation code to deal with compatibility mode guests. This includes enhancements to emulate_privileged_op() that aren't directly related to such guests. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> --- xen/arch/x86/domain.c | 14 + xen/arch/x86/mm.c | 31 +++ xen/arch/x86/traps.c | 296 ++++++++++++++++++++++++++++------- xen/arch/x86/x86_64/mm.c | 6 xen/include/asm-x86/desc.h | 7 xen/include/asm-x86/mm.h | 5 xen/include/asm-x86/x86_32/uaccess.h | 2 xen/include/asm-x86/x86_64/uaccess.h | 2 8 files changed, 303 insertions(+), 60 deletions(-) diff -r f632c0c36976 -r 3870aff51ae3 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Fri Jan 05 17:34:30 2007 +0000 +++ b/xen/arch/x86/domain.c Fri Jan 05 17:34:30 2007 +0000 @@ -1055,6 +1055,20 @@ void domain_relinquish_resources(struct { /* Drop ref to guest_table (from new_guest_cr3(), svm/vmx cr3 handling, * or sh_update_paging_modes()) */ +#ifdef CONFIG_COMPAT + if ( IS_COMPAT(d) ) + { + pfn = l4e_get_pfn(*(l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table))); + if ( pfn != 0 ) + { + if ( shadow_mode_refcounts(d) ) + put_page(mfn_to_page(pfn)); + else + put_page_and_type(mfn_to_page(pfn)); + } + continue; + } +#endif pfn = pagetable_get_pfn(v->arch.guest_table); if ( pfn != 0 ) { diff -r f632c0c36976 -r 3870aff51ae3 xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Fri Jan 05 17:34:30 2007 +0000 +++ b/xen/arch/x86/mm.c Fri Jan 05 17:34:30 2007 +0000 @@ -1765,6 +1765,33 @@ int new_guest_cr3(unsigned long mfn) if ( is_hvm_domain(d) && !hvm_paging_enabled(v) ) return 0; +#ifdef CONFIG_COMPAT + if ( IS_COMPAT(d) ) + { + l4_pgentry_t l4e = l4e_from_pfn(mfn, _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED); + + if ( shadow_mode_refcounts(d) ) + { + okay = get_page_from_pagenr(mfn, d); + old_base_mfn = l4e_get_pfn(l4e); + if ( okay && old_base_mfn ) + put_page(mfn_to_page(old_base_mfn)); + } + else + okay = mod_l4_entry(__va(pagetable_get_paddr(v->arch.guest_table)), + l4e, 0); + if ( unlikely(!okay) ) + { + MEM_LOG("Error while installing new compat baseptr %lx", mfn); + return 0; + } + + invalidate_shadow_ldt(v); + write_ptbase(v); + + return 1; + } +#endif if ( shadow_mode_refcounts(d) ) { okay = get_page_from_pagenr(mfn, d); @@ -3204,7 +3231,7 @@ static int ptwr_emulated_update( nl1e = l1e_from_intpte(val); if ( unlikely(!get_page_from_l1e(gl1e_to_ml1e(d, nl1e), d)) ) { - if ( (CONFIG_PAGING_LEVELS == 3) && + if ( (CONFIG_PAGING_LEVELS == 3 || IS_COMPAT(d)) && (bytes == 4) && !do_cmpxchg && (l1e_get_flags(nl1e) & _PAGE_PRESENT) ) @@ -3347,7 +3374,7 @@ int ptwr_do_page_fault(struct vcpu *v, u goto bail; ptwr_ctxt.ctxt.regs = guest_cpu_user_regs(); - ptwr_ctxt.ctxt.mode = X86EMUL_MODE_HOST; + ptwr_ctxt.ctxt.mode = !IS_COMPAT(d) ? X86EMUL_MODE_HOST : X86EMUL_MODE_PROT32; ptwr_ctxt.cr2 = addr; ptwr_ctxt.pte = pte; if ( x86_emulate(&ptwr_ctxt.ctxt, &ptwr_emulate_ops) ) diff -r f632c0c36976 -r 3870aff51ae3 xen/arch/x86/traps.c --- a/xen/arch/x86/traps.c Fri Jan 05 17:34:30 2007 +0000 +++ b/xen/arch/x86/traps.c Fri Jan 05 17:34:30 2007 +0000 @@ -977,6 +977,64 @@ long do_fpu_taskswitch(int set) return 0; } +static int read_descriptor(unsigned int sel, + const struct vcpu *v, + const struct cpu_user_regs * regs, + unsigned long *base, + unsigned long *limit, + unsigned int *ar, + unsigned int vm86attr) +{ + struct desc_struct desc; + + if ( !vm86_mode(regs) ) + { + if ( sel < 4) + desc.b = desc.a = 0; + else if ( __get_user(desc, + (const struct desc_struct *)(!(sel & 4) + ? GDT_VIRT_START(v) + : LDT_VIRT_START(v)) + + (sel >> 3)) ) + return 0; + if ( !(vm86attr & _SEGMENT_CODE) ) + desc.b &= ~_SEGMENT_L; + } + else + { + desc.a = (sel << 20) | 0xffff; + desc.b = vm86attr | (sel >> 12); + } + + *ar = desc.b & 0x00f0ff00; + if ( !(desc.b & _SEGMENT_L) ) + { + *base = (desc.a >> 16) + ((desc.b & 0xff) << 16) + (desc.b & 0xff000000); + *limit = (desc.a & 0xffff) | (desc.b & 0x000f0000); + if ( desc.b & _SEGMENT_G ) + *limit = ((*limit + 1) << 12) - 1; +#ifndef NDEBUG + if ( !vm86_mode(regs) && sel > 3 ) + { + unsigned int a, l; + unsigned char valid; + + __asm__("larl %2, %0\n\tsetz %1" : "=r" (a), "=rm" (valid) : "rm" (sel)); + BUG_ON(valid && (a & 0x00f0ff00) != *ar); + __asm__("lsll %2, %0\n\tsetz %1" : "=r" (l), "=rm" (valid) : "rm" (sel)); + BUG_ON(valid && l != *limit); + } +#endif + } + else + { + *base = 0UL; + *limit = ~0UL; + } + + return 1; +} + /* Has the guest requested sufficient permission for this I/O access? */ static inline int guest_io_okay( unsigned int port, unsigned int bytes, @@ -1041,65 +1099,113 @@ unsigned long guest_to_host_gpr_switch(u __attribute__((__regparm__(1))); /* Instruction fetch with error handling. */ -#define insn_fetch(_type, _size, cs, eip) \ -({ unsigned long _rc, _x, _ptr = eip; \ - if ( vm86_mode(regs) ) \ - _ptr += cs << 4; \ - if ( (_rc = copy_from_user(&_x, (_type *)_ptr, sizeof(_type))) != 0 ) \ +#define insn_fetch(type, base, eip, limit) \ +({ unsigned long _rc, _ptr = (base) + (eip); \ + type _x; \ + if ( (limit) < sizeof(_x) - 1 || (eip) > (limit) - (sizeof(_x) - 1) ) \ + goto fail; \ + if ( (_rc = copy_from_user(&_x, (type *)_ptr, sizeof(_x))) != 0 ) \ { \ - propagate_page_fault(eip + sizeof(_type) - _rc, 0); \ + propagate_page_fault(_ptr + sizeof(_x) - _rc, 0); \ return EXCRET_fault_fixed; \ } \ - eip += _size; (_type)_x; }) + (eip) += sizeof(_x); _x; }) + +#if defined(CONFIG_X86_32) +# define read_sreg(regs, sr) ((regs)->sr) +#elif defined(CONFIG_X86_64) +# define read_sreg(regs, sr) read_segment_register(sr) +#endif static int emulate_privileged_op(struct cpu_user_regs *regs) { struct vcpu *v = current; - unsigned long *reg, eip = regs->eip, cs = regs->cs, res; - u8 opcode, modrm_reg = 0, modrm_rm = 0, rep_prefix = 0; - unsigned int port, i, op_bytes = 4, data, rc; + unsigned long *reg, eip = regs->eip, res; + u8 opcode, modrm_reg = 0, modrm_rm = 0, rep_prefix = 0, rex = 0; + enum { lm_seg_none, lm_seg_fs, lm_seg_gs } lm_ovr = lm_seg_none; + unsigned int port, i, data_sel, ar, data, rc; + unsigned int op_bytes, op_default, ad_bytes, ad_default; +#define rd_ad(reg) (ad_bytes >= sizeof(regs->reg) \ + ? regs->reg \ + : ad_bytes == 4 \ + ? (u32)regs->reg \ + : (u16)regs->reg) +#define wr_ad(reg, val) (ad_bytes >= sizeof(regs->reg) \ + ? regs->reg = (val) \ + : ad_bytes == 4 \ + ? (*(u32 *)®s->reg = (val)) \ + : (*(u16 *)®s->reg = (val))) + unsigned long code_base, code_limit; char io_emul_stub[16]; void (*io_emul)(struct cpu_user_regs *) __attribute__((__regparm__(1))); u32 l, h; + if ( !read_descriptor(regs->cs, v, regs, + &code_base, &code_limit, &ar, + _SEGMENT_CODE|_SEGMENT_S|_SEGMENT_DPL|_SEGMENT_P) ) + goto fail; + op_default = op_bytes = (ar & (_SEGMENT_L|_SEGMENT_DB)) ? 4 : 2; + ad_default = ad_bytes = (ar & _SEGMENT_L) ? 8 : op_default; + if ( !(ar & (_SEGMENT_CODE|_SEGMENT_S|_SEGMENT_P)) ) + goto fail; + + /* emulating only opcodes not allowing SS to be default */ + data_sel = read_sreg(regs, ds); + /* Legacy prefixes. */ - for ( i = 0; i < 8; i++ ) - { - switch ( opcode = insn_fetch(u8, 1, cs, eip) ) + for ( i = 0; i < 8; i++, rex == opcode || (rex = 0) ) + { + switch ( opcode = insn_fetch(u8, code_base, eip, code_limit) ) { case 0x66: /* operand-size override */ - op_bytes ^= 6; /* switch between 2/4 bytes */ - break; + op_bytes = op_default ^ 6; /* switch between 2/4 bytes */ + continue; case 0x67: /* address-size override */ + ad_bytes = ad_default != 4 ? 4 : 2; /* switch to 2/4 bytes */ + continue; case 0x2e: /* CS override */ + data_sel = regs->cs; + continue; case 0x3e: /* DS override */ + data_sel = read_sreg(regs, ds); + continue; case 0x26: /* ES override */ + data_sel = read_sreg(regs, es); + continue; case 0x64: /* FS override */ + data_sel = read_sreg(regs, fs); + lm_ovr = lm_seg_fs; + continue; case 0x65: /* GS override */ + data_sel = read_sreg(regs, gs); + lm_ovr = lm_seg_gs; + continue; case 0x36: /* SS override */ + data_sel = regs->ss; + continue; case 0xf0: /* LOCK */ + continue; case 0xf2: /* REPNE/REPNZ */ - break; case 0xf3: /* REP/REPE/REPZ */ rep_prefix = 1; - break; + continue; default: - goto done_prefixes; - } - } - done_prefixes: - -#ifdef __x86_64__ + if ( (ar & _SEGMENT_L) && (opcode & 0xf0) == 0x40 ) + { + rex = opcode; + continue; + } + break; + } + break; + } + /* REX prefix. */ - if ( (opcode & 0xf0) == 0x40 ) - { - modrm_reg = (opcode & 4) << 1; /* REX.R */ - modrm_rm = (opcode & 1) << 3; /* REX.B */ - - /* REX.W and REX.X do not need to be decoded. */ - opcode = insn_fetch(u8, 1, cs, eip); - } -#endif + if ( rex & 8 ) /* REX.W */ + op_bytes = 4; /* emulating only opcodes not supporting 64-bit operands */ + modrm_reg = (rex & 4) << 1; /* REX.R */ + /* REX.X does not need to be decoded. */ + modrm_rm = (rex & 1) << 3; /* REX.B */ if ( opcode == 0x0f ) goto twobyte_opcode; @@ -1107,8 +1213,58 @@ static int emulate_privileged_op(struct /* Input/Output String instructions. */ if ( (opcode >= 0x6c) && (opcode <= 0x6f) ) { - if ( rep_prefix && (regs->ecx == 0) ) + unsigned long data_base, data_limit; + + if ( rep_prefix && (rd_ad(ecx) == 0) ) goto done; + + if ( !(opcode & 2) ) + { + data_sel = read_sreg(regs, es); + lm_ovr = lm_seg_none; + } + + if ( !(ar & _SEGMENT_L) ) + { + if ( !read_descriptor(data_sel, v, regs, + &data_base, &data_limit, &ar, + _SEGMENT_WR|_SEGMENT_S|_SEGMENT_DPL|_SEGMENT_P) ) + goto fail; + if ( !(ar & (_SEGMENT_S|_SEGMENT_P)) || + (opcode & 2 ? + (ar & _SEGMENT_CODE) && !(ar & _SEGMENT_WR) : + (ar & _SEGMENT_CODE) || !(ar & _SEGMENT_WR)) ) + goto fail; + } +#ifdef CONFIG_X86_64 + else + { + if ( lm_ovr == lm_seg_none || data_sel < 4 ) + { + switch ( lm_ovr ) + { + case lm_seg_none: + data_base = 0UL; + break; + case lm_seg_fs: + data_base = v->arch.guest_context.fs_base; + break; + case lm_seg_gs: + if ( guest_kernel_mode(v, regs) ) + data_base = v->arch.guest_context.gs_base_kernel; + else + data_base = v->arch.guest_context.gs_base_user; + break; + } + } + else + read_descriptor(data_sel, v, regs, + &data_base, &data_limit, &ar, + 0); + data_limit = ~0UL; + ar = _SEGMENT_WR|_SEGMENT_S|_SEGMENT_DPL|_SEGMENT_P; + } +#endif continue_io_string: switch ( opcode ) @@ -1116,7 +1272,9 @@ static int emulate_privileged_op(struct case 0x6c: /* INSB */ op_bytes = 1; case 0x6d: /* INSW/INSL */ - if ( !guest_io_okay((u16)regs->edx, op_bytes, v, regs) ) + if ( data_limit < op_bytes - 1 || + rd_ad(edi) > data_limit - (op_bytes - 1) || + !guest_io_okay((u16)regs->edx, op_bytes, v, regs) ) goto fail; port = (u16)regs->edx; switch ( op_bytes ) @@ -1134,24 +1292,26 @@ static int emulate_privileged_op(struct data = (u32)(guest_inl_okay(port, v, regs) ? inl(port) : ~0); break; } - if ( (rc = copy_to_user((void *)regs->edi, &data, op_bytes)) != 0 ) + if ( (rc = copy_to_user((void *)data_base + rd_ad(edi), &data, op_bytes)) != 0 ) { - propagate_page_fault(regs->edi + op_bytes - rc, + propagate_page_fault(data_base + rd_ad(edi) + op_bytes - rc, PFEC_write_access); return EXCRET_fault_fixed; } - regs->edi += (int)((regs->eflags & EF_DF) ? -op_bytes : op_bytes); + wr_ad(edi, regs->edi + (int)((regs->eflags & EF_DF) ? -op_bytes : op_bytes)); break; case 0x6e: /* OUTSB */ op_bytes = 1; case 0x6f: /* OUTSW/OUTSL */ - if ( !guest_io_okay((u16)regs->edx, op_bytes, v, regs) ) + if ( data_limit < op_bytes - 1 || + rd_ad(esi) > data_limit - (op_bytes - 1) || + !guest_io_okay((u16)regs->edx, op_bytes, v, regs) ) goto fail; - rc = copy_from_user(&data, (void *)regs->esi, op_bytes); + rc = copy_from_user(&data, (void *)data_base + rd_ad(esi), op_bytes); if ( rc != 0 ) { - propagate_page_fault(regs->esi + op_bytes - rc, 0); + propagate_page_fault(data_base + rd_ad(esi) + op_bytes - rc, 0); return EXCRET_fault_fixed; } port = (u16)regs->edx; @@ -1172,11 +1332,11 @@ static int emulate_privileged_op(struct outl((u32)data, port); break; } - regs->esi += (int)((regs->eflags & EF_DF) ? -op_bytes : op_bytes); - break; - } - - if ( rep_prefix && (--regs->ecx != 0) ) + wr_ad(esi, regs->esi + (int)((regs->eflags & EF_DF) ? -op_bytes : op_bytes)); + break; + } + + if ( rep_prefix && (wr_ad(ecx, regs->ecx - 1) != 0) ) { if ( !hypercall_preempt_check() ) goto continue_io_string; @@ -1216,7 +1376,7 @@ static int emulate_privileged_op(struct case 0xe4: /* IN imm8,%al */ op_bytes = 1; case 0xe5: /* IN imm8,%eax */ - port = insn_fetch(u8, 1, cs, eip); + port = insn_fetch(u8, code_base, eip, code_limit); io_emul_stub[7] = port; /* imm8 */ exec_in: if ( !guest_io_okay(port, op_bytes, v, regs) ) @@ -1258,7 +1418,7 @@ static int emulate_privileged_op(struct case 0xe6: /* OUT %al,imm8 */ op_bytes = 1; case 0xe7: /* OUT %eax,imm8 */ - port = insn_fetch(u8, 1, cs, eip); + port = insn_fetch(u8, code_base, eip, code_limit); io_emul_stub[7] = port; /* imm8 */ exec_out: if ( !guest_io_okay(port, op_bytes, v, regs) ) @@ -1311,7 +1471,7 @@ static int emulate_privileged_op(struct goto fail; /* Privileged (ring 0) instructions. */ - opcode = insn_fetch(u8, 1, cs, eip); + opcode = insn_fetch(u8, code_base, eip, code_limit); switch ( opcode ) { case 0x06: /* CLTS */ @@ -1329,7 +1489,7 @@ static int emulate_privileged_op(struct break; case 0x20: /* MOV CR?,<reg> */ - opcode = insn_fetch(u8, 1, cs, eip); + opcode = insn_fetch(u8, code_base, eip, code_limit); modrm_reg |= (opcode >> 3) & 7; modrm_rm |= (opcode >> 0) & 7; reg = decode_register(modrm_rm, regs, 0); @@ -1345,8 +1505,14 @@ static int emulate_privileged_op(struct break; case 3: /* Read CR3 */ - *reg = xen_pfn_to_cr3(mfn_to_gmfn( - v->domain, pagetable_get_pfn(v->arch.guest_table))); + if ( !IS_COMPAT(v->domain) ) + *reg = xen_pfn_to_cr3(mfn_to_gmfn( + v->domain, pagetable_get_pfn(v->arch.guest_table))); +#ifdef CONFIG_COMPAT + else + *reg = compat_pfn_to_cr3(mfn_to_gmfn( + v->domain, l4e_get_pfn(*(l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table))))); +#endif break; case 4: /* Read CR4 */ @@ -1363,7 +1529,7 @@ static int emulate_privileged_op(struct break; case 0x21: /* MOV DR?,<reg> */ - opcode = insn_fetch(u8, 1, cs, eip); + opcode = insn_fetch(u8, code_base, eip, code_limit); modrm_reg |= (opcode >> 3) & 7; modrm_rm |= (opcode >> 0) & 7; reg = decode_register(modrm_rm, regs, 0); @@ -1373,7 +1539,7 @@ static int emulate_privileged_op(struct break; case 0x22: /* MOV <reg>,CR? */ - opcode = insn_fetch(u8, 1, cs, eip); + opcode = insn_fetch(u8, code_base, eip, code_limit); modrm_reg |= (opcode >> 3) & 7; modrm_rm |= (opcode >> 0) & 7; reg = decode_register(modrm_rm, regs, 0); @@ -1396,7 +1562,12 @@ static int emulate_privileged_op(struct case 3: /* Write CR3 */ LOCK_BIGLOCK(v->domain); - rc = new_guest_cr3(gmfn_to_mfn(v->domain, xen_cr3_to_pfn(*reg))); + if ( !IS_COMPAT(v->domain) ) + rc = new_guest_cr3(gmfn_to_mfn(v->domain, xen_cr3_to_pfn(*reg))); +#ifdef CONFIG_COMPAT + else + rc = new_guest_cr3(gmfn_to_mfn(v->domain, compat_cr3_to_pfn(*reg))); +#endif UNLOCK_BIGLOCK(v->domain); if ( rc == 0 ) /* not okay */ goto fail; @@ -1416,7 +1587,7 @@ static int emulate_privileged_op(struct break; case 0x23: /* MOV <reg>,DR? */ - opcode = insn_fetch(u8, 1, cs, eip); + opcode = insn_fetch(u8, code_base, eip, code_limit); modrm_reg |= (opcode >> 3) & 7; modrm_rm |= (opcode >> 0) & 7; reg = decode_register(modrm_rm, regs, 0); @@ -1429,18 +1600,24 @@ static int emulate_privileged_op(struct { #ifdef CONFIG_X86_64 case MSR_FS_BASE: + if ( IS_COMPAT(v->domain) ) + goto fail; if ( wrmsr_safe(MSR_FS_BASE, regs->eax, regs->edx) ) goto fail; v->arch.guest_context.fs_base = ((u64)regs->edx << 32) | regs->eax; break; case MSR_GS_BASE: + if ( IS_COMPAT(v->domain) ) + goto fail; if ( wrmsr_safe(MSR_GS_BASE, regs->eax, regs->edx) ) goto fail; v->arch.guest_context.gs_base_kernel = ((u64)regs->edx << 32) | regs->eax; break; case MSR_SHADOW_GS_BASE: + if ( IS_COMPAT(v->domain) ) + goto fail; if ( wrmsr_safe(MSR_SHADOW_GS_BASE, regs->eax, regs->edx) ) goto fail; v->arch.guest_context.gs_base_user = @@ -1465,14 +1642,20 @@ static int emulate_privileged_op(struct { #ifdef CONFIG_X86_64 case MSR_FS_BASE: + if ( IS_COMPAT(v->domain) ) + goto fail; regs->eax = v->arch.guest_context.fs_base & 0xFFFFFFFFUL; regs->edx = v->arch.guest_context.fs_base >> 32; break; case MSR_GS_BASE: + if ( IS_COMPAT(v->domain) ) + goto fail; regs->eax = v->arch.guest_context.gs_base_kernel & 0xFFFFFFFFUL; regs->edx = v->arch.guest_context.gs_base_kernel >> 32; break; case MSR_SHADOW_GS_BASE: + if ( IS_COMPAT(v->domain) ) + goto fail; regs->eax = v->arch.guest_context.gs_base_user & 0xFFFFFFFFUL; regs->edx = v->arch.guest_context.gs_base_user >> 32; break; @@ -1500,6 +1683,9 @@ static int emulate_privileged_op(struct default: goto fail; } + +#undef wr_ad +#undef rd_ad done: regs->eip = eip; diff -r f632c0c36976 -r 3870aff51ae3 xen/arch/x86/x86_64/mm.c --- a/xen/arch/x86/x86_64/mm.c Fri Jan 05 17:34:30 2007 +0000 +++ b/xen/arch/x86/x86_64/mm.c Fri Jan 05 17:34:30 2007 +0000 @@ -376,7 +376,11 @@ int check_descriptor(const struct domain /* All code and data segments are okay. No base/limit checking. */ if ( (b & _SEGMENT_S) ) - goto good; + { + if ( !IS_COMPAT(dom) || !(b & _SEGMENT_L) ) + goto good; + goto bad; + } /* Invalid type 0 is harmless. It is used for 2nd half of a call gate. */ if ( (b & _SEGMENT_TYPE) == 0x000 ) diff -r f632c0c36976 -r 3870aff51ae3 xen/include/asm-x86/desc.h --- a/xen/include/asm-x86/desc.h Fri Jan 05 17:34:30 2007 +0000 +++ b/xen/include/asm-x86/desc.h Fri Jan 05 17:34:30 2007 +0000 @@ -113,12 +113,19 @@ /* These are bitmasks for the high 32 bits of a descriptor table entry. */ #define _SEGMENT_TYPE (15<< 8) +#define _SEGMENT_WR ( 1<< 9) /* Writeable (data) or Readable (code) + segment */ #define _SEGMENT_EC ( 1<<10) /* Expand-down or Conforming segment */ #define _SEGMENT_CODE ( 1<<11) /* Code (vs data) segment for non-system segments */ #define _SEGMENT_S ( 1<<12) /* System descriptor (yes iff S==0) */ #define _SEGMENT_DPL ( 3<<13) /* Descriptor Privilege Level */ #define _SEGMENT_P ( 1<<15) /* Segment Present */ +#ifdef __x86_64 +#define _SEGMENT_L ( 1<<21) /* 64-bit segment */ +#else +#define _SEGMENT_L 0 +#endif #define _SEGMENT_DB ( 1<<22) /* 16- or 32-bit segment */ #define _SEGMENT_G ( 1<<23) /* Granularity */ diff -r f632c0c36976 -r 3870aff51ae3 xen/include/asm-x86/mm.h --- a/xen/include/asm-x86/mm.h Fri Jan 05 17:34:30 2007 +0000 +++ b/xen/include/asm-x86/mm.h Fri Jan 05 17:34:30 2007 +0000 @@ -279,6 +279,11 @@ int check_descriptor(const struct domain #define INVALID_MFN (~0UL) +#ifdef CONFIG_COMPAT +#define compat_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) +#define compat_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) +#endif + #ifdef MEMORY_GUARD void memguard_init(void); void memguard_guard_range(void *p, unsigned long l); diff -r f632c0c36976 -r 3870aff51ae3 xen/include/asm-x86/x86_32/uaccess.h --- a/xen/include/asm-x86/x86_32/uaccess.h Fri Jan 05 17:34:30 2007 +0000 +++ b/xen/include/asm-x86/x86_32/uaccess.h Fri Jan 05 17:34:30 2007 +0000 @@ -83,7 +83,7 @@ do { \ case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \ case 4: __get_user_asm(x,ptr,retval,"l","","=r",errret);break; \ case 8: __get_user_u64(x,ptr,retval,errret);break; \ - default: (x) = __get_user_bad(); \ + default: __get_user_bad(); \ } \ } while (0) diff -r f632c0c36976 -r 3870aff51ae3 xen/include/asm-x86/x86_64/uaccess.h --- a/xen/include/asm-x86/x86_64/uaccess.h Fri Jan 05 17:34:30 2007 +0000 +++ b/xen/include/asm-x86/x86_64/uaccess.h Fri Jan 05 17:34:30 2007 +0000 @@ -48,7 +48,7 @@ do { \ case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \ case 4: __get_user_asm(x,ptr,retval,"l","k","=r",errret);break; \ case 8: __get_user_asm(x,ptr,retval,"q","","=r",errret); break; \ - default: (x) = __get_user_bad(); \ + default: __get_user_bad(); \ } \ } while (0) _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |