[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] hvm: Support hardware task switching.



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1190112582 -3600
# Node ID 35fb20c4822c40ca404fe61b1734b063b80047a6
# Parent  49700bb716bb71b8a3ed23216522a62cf8f95259
hvm: Support hardware task switching.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c          |  406 ++++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/svm/svm.c      |   26 +-
 xen/arch/x86/hvm/vmx/vmx.c      |   28 ++
 xen/arch/x86/mm/shadow/common.c |   88 +-------
 xen/include/asm-x86/hvm/hvm.h   |   17 +
 5 files changed, 479 insertions(+), 86 deletions(-)

diff -r 49700bb716bb -r 35fb20c4822c xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Mon Sep 17 13:33:09 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Tue Sep 18 11:49:42 2007 +0100
@@ -670,6 +670,412 @@ int hvm_set_cr4(unsigned long value)
  gpf:
     hvm_inject_exception(TRAP_gp_fault, 0, 0);
     return 0;
+}
+
+int hvm_virtual_to_linear_addr(
+    enum x86_segment seg,
+    struct segment_register *reg,
+    unsigned long offset,
+    unsigned int bytes,
+    enum hvm_access_type access_type,
+    unsigned int addr_size,
+    unsigned long *linear_addr)
+{
+    unsigned long addr = offset;
+    uint32_t last_byte;
+
+    if ( addr_size != 64 )
+    {
+        /*
+         * COMPATIBILITY MODE: Apply segment checks and add base.
+         */
+
+        switch ( access_type )
+        {
+        case hvm_access_read:
+            if ( (reg->attr.fields.type & 0xa) == 0x8 )
+                goto gpf; /* execute-only code segment */
+            break;
+        case hvm_access_write:
+            if ( (reg->attr.fields.type & 0xa) != 0x2 )
+                goto gpf; /* not a writable data segment */
+            break;
+        default:
+            break;
+        }
+
+        last_byte = offset + bytes - 1;
+
+        /* Is this a grows-down data segment? Special limit check if so. */
+        if ( (reg->attr.fields.type & 0xc) == 0x4 )
+        {
+            /* Is upper limit 0xFFFF or 0xFFFFFFFF? */
+            if ( !reg->attr.fields.db )
+                last_byte = (uint16_t)last_byte;
+
+            /* Check first byte and last byte against respective bounds. */
+            if ( (offset <= reg->limit) || (last_byte < offset) )
+                goto gpf;
+        }
+        else if ( (last_byte > reg->limit) || (last_byte < offset) )
+            goto gpf; /* last byte is beyond limit or wraps 0xFFFFFFFF */
+
+        /*
+         * Hardware truncates to 32 bits in compatibility mode.
+         * It does not truncate to 16 bits in 16-bit address-size mode.
+         */
+        addr = (uint32_t)(addr + reg->base);
+    }
+    else
+    {
+        /*
+         * LONG MODE: FS and GS add segment base. Addresses must be canonical.
+         */
+
+        if ( (seg == x86_seg_fs) || (seg == x86_seg_gs) )
+            addr += reg->base;
+
+        if ( !is_canonical_address(addr) )
+            goto gpf;
+    }
+
+    *linear_addr = addr;
+    return 1;
+
+ gpf:
+    return 0;
+}
+
+static void *hvm_map(unsigned long va, int size)
+{
+    unsigned long gfn, mfn;
+    p2m_type_t p2mt;
+
+    if ( ((va & ~PAGE_MASK) + size) > PAGE_SIZE )
+    {
+        hvm_inject_exception(TRAP_page_fault, PFEC_write_access,
+                             (va + PAGE_SIZE - 1) & PAGE_MASK);
+        return NULL;
+    }
+
+    gfn = paging_gva_to_gfn(current, va);
+    mfn = mfn_x(gfn_to_mfn_current(gfn, &p2mt));
+    if ( !p2m_is_ram(p2mt) )
+    {
+        hvm_inject_exception(TRAP_page_fault, PFEC_write_access, va);
+        return NULL;
+    }
+
+    ASSERT(mfn_valid(mfn));
+
+    paging_mark_dirty(current->domain, mfn);
+
+    return (char *)map_domain_page(mfn) + (va & ~PAGE_MASK);
+}
+
+static void hvm_unmap(void *p)
+{
+    if ( p )
+        unmap_domain_page(p);
+}
+
+static int hvm_load_segment_selector(
+    struct vcpu *v, enum x86_segment seg, uint16_t sel)
+{
+    struct segment_register desctab, cs, segr;
+    struct desc_struct *pdesc, desc;
+    u8 dpl, rpl, cpl;
+    int fault_type = TRAP_invalid_tss;
+
+    /* NULL selector? */
+    if ( (sel & 0xfffc) == 0 )
+    {
+        if ( (seg == x86_seg_cs) || (seg == x86_seg_ss) )
+            goto fail;
+        memset(&segr, 0, sizeof(segr));
+        hvm_set_segment_register(v, seg, &segr);
+        return 0;
+    }
+
+    /* LDT descriptor must be in the GDT. */
+    if ( (seg == x86_seg_ldtr) && (sel & 4) )
+        goto fail;
+
+    hvm_get_segment_register(v, x86_seg_cs, &cs);
+    hvm_get_segment_register(
+        v, (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, &desctab);
+
+    /* Check against descriptor table limit. */
+    if ( ((sel & 0xfff8) + 7) > desctab.limit )
+        goto fail;
+
+    pdesc = hvm_map(desctab.base + (sel & 0xfff8), 8);
+    if ( pdesc == NULL )
+        goto hvm_map_fail;
+
+    do {
+        desc = *pdesc;
+
+        /* Segment present in memory? */
+        if ( !(desc.b & (1u<<15)) )
+        {
+            fault_type = TRAP_no_segment;
+            goto unmap_and_fail;
+        }
+
+        /* LDT descriptor is a system segment. All others are code/data. */
+        if ( (desc.b & (1u<<12)) == ((seg == x86_seg_ldtr) << 12) )
+            goto unmap_and_fail;
+
+        dpl = (desc.b >> 13) & 3;
+        rpl = sel & 3;
+        cpl = cs.sel & 3;
+
+        switch ( seg )
+        {
+        case x86_seg_cs:
+            /* Code segment? */
+            if ( !(desc.b & (1u<<11)) )
+                goto unmap_and_fail;
+            /* Non-conforming segment: check DPL against RPL. */
+            if ( ((desc.b & (6u<<9)) != 6) && (dpl != rpl) )
+                goto unmap_and_fail;
+            break;
+        case x86_seg_ss:
+            /* Writable data segment? */
+            if ( (desc.b & (5u<<9)) != (1u<<9) )
+                goto unmap_and_fail;
+            if ( (dpl != cpl) || (dpl != rpl) )
+                goto unmap_and_fail;
+            break;
+        case x86_seg_ldtr:
+            /* LDT system segment? */
+            if ( (desc.b & (15u<<8)) != (2u<<8) )
+                goto unmap_and_fail;
+            goto skip_accessed_flag;
+        default:
+            /* Readable code or data segment? */
+            if ( (desc.b & (5u<<9)) == (4u<<9) )
+                goto unmap_and_fail;
+            /* Non-conforming segment: check DPL against RPL and CPL. */
+            if ( ((desc.b & (6u<<9)) != 6) && ((dpl < cpl) || (dpl < rpl)) )
+                goto unmap_and_fail;
+            break;
+        }
+    } while ( !(desc.b & 0x100) && /* Ensure Accessed flag is set */
+              (cmpxchg(&pdesc->b, desc.b, desc.b | 0x100) != desc.b) );
+
+    /* Force the Accessed flag in our local copy. */
+    desc.b |= 0x100;
+
+ skip_accessed_flag:
+    hvm_unmap(pdesc);
+
+    segr.base = (((desc.b <<  0) & 0xff000000u) |
+                 ((desc.b << 16) & 0x00ff0000u) |
+                 ((desc.a >> 16) & 0x0000ffffu));
+    segr.attr.bytes = (((desc.b >>  8) & 0x00ffu) |
+                       ((desc.b >> 12) & 0x0f00u));
+    segr.limit = (desc.b & 0x000f0000u) | (desc.a & 0x0000ffffu);
+    if ( segr.attr.fields.g )
+        segr.limit = (segr.limit << 12) | 0xfffu;
+    segr.sel = sel;
+    hvm_set_segment_register(v, seg, &segr);
+
+    return 0;
+
+ unmap_and_fail:
+    hvm_unmap(pdesc);
+ fail:
+    hvm_inject_exception(fault_type, sel & 0xfffc, 0);
+ hvm_map_fail:
+    return 1;
+}
+
+void hvm_task_switch(
+    uint16_t tss_sel, enum hvm_task_switch_reason taskswitch_reason,
+    int32_t errcode)
+{
+    struct vcpu *v = current;
+    struct cpu_user_regs *regs = guest_cpu_user_regs();
+    struct segment_register gdt, tr, prev_tr, segr;
+    struct desc_struct *optss_desc = NULL, *nptss_desc = NULL, tss_desc;
+    unsigned long eflags;
+    int exn_raised;
+    struct {
+        u16 back_link,__blh;
+        u32 esp0;
+        u16 ss0, _0;
+        u32 esp1;
+        u16 ss1, _1;
+        u32 esp2;
+        u16 ss2, _2;
+        u32 cr3, eip, eflags, eax, ecx, edx, ebx, esp, ebp, esi, edi;
+        u16 es, _3, cs, _4, ss, _5, ds, _6, fs, _7, gs, _8, ldt, _9;
+        u16 trace, iomap;
+    } *ptss, tss;
+
+    hvm_get_segment_register(v, x86_seg_gdtr, &gdt);
+    hvm_get_segment_register(v, x86_seg_tr, &prev_tr);
+
+    if ( ((tss_sel & 0xfff8) + 7) > gdt.limit )
+    {
+        hvm_inject_exception((taskswitch_reason == TSW_iret) ?
+                             TRAP_invalid_tss : TRAP_gp_fault,
+                             tss_sel & 0xfff8, 0);
+        goto out;
+    }
+
+    optss_desc = hvm_map(gdt.base + (prev_tr.sel & 0xfff8), 8);
+    if ( optss_desc == NULL )
+        goto out;
+
+    nptss_desc = hvm_map(gdt.base + (tss_sel & 0xfff8), 8);
+    if ( nptss_desc == NULL )
+        goto out;
+
+    tss_desc = *nptss_desc;
+    tr.sel = tss_sel;
+    tr.base = (((tss_desc.b <<  0) & 0xff000000u) |
+               ((tss_desc.b << 16) & 0x00ff0000u) |
+               ((tss_desc.a >> 16) & 0x0000ffffu));
+    tr.limit = (tss_desc.b & 0x000f0000u) | (tss_desc.a & 0x0000ffffu);
+    tr.attr.bytes = (((tss_desc.b >>  8) & 0x00ffu) |
+                     ((tss_desc.b >> 20) & 0x0f00u));
+
+    if ( !tr.attr.fields.p )
+    {
+        hvm_inject_exception(TRAP_no_segment, tss_sel & 0xfff8, 0);
+        goto out;
+    }
+
+    if ( tr.attr.fields.type != ((taskswitch_reason == TSW_iret) ? 0xb : 0x9) )
+    {
+        hvm_inject_exception(
+            (taskswitch_reason == TSW_iret) ? TRAP_invalid_tss : TRAP_gp_fault,
+            tss_sel & 0xfff8, 0);
+        goto out;
+    }
+
+    if ( !tr.attr.fields.g && (tr.limit < (sizeof(tss)-1)) )
+    {
+        hvm_inject_exception(TRAP_invalid_tss, tss_sel & 0xfff8, 0);
+        goto out;
+    }
+
+    hvm_store_cpu_guest_regs(v, regs, NULL);
+
+    ptss = hvm_map(prev_tr.base, sizeof(tss));
+    if ( ptss == NULL )
+        goto out;
+
+    eflags = regs->eflags;
+    if ( taskswitch_reason == TSW_iret )
+        eflags &= ~X86_EFLAGS_NT;
+
+    ptss->cr3    = v->arch.hvm_vcpu.guest_cr[3];
+    ptss->eip    = regs->eip;
+    ptss->eflags = eflags;
+    ptss->eax    = regs->eax;
+    ptss->ecx    = regs->ecx;
+    ptss->edx    = regs->edx;
+    ptss->ebx    = regs->ebx;
+    ptss->esp    = regs->esp;
+    ptss->ebp    = regs->ebp;
+    ptss->esi    = regs->esi;
+    ptss->edi    = regs->edi;
+
+    hvm_get_segment_register(v, x86_seg_es, &segr);
+    ptss->es = segr.sel;
+    hvm_get_segment_register(v, x86_seg_cs, &segr);
+    ptss->cs = segr.sel;
+    hvm_get_segment_register(v, x86_seg_ss, &segr);
+    ptss->ss = segr.sel;
+    hvm_get_segment_register(v, x86_seg_ds, &segr);
+    ptss->ds = segr.sel;
+    hvm_get_segment_register(v, x86_seg_fs, &segr);
+    ptss->fs = segr.sel;
+    hvm_get_segment_register(v, x86_seg_gs, &segr);
+    ptss->gs = segr.sel;
+    hvm_get_segment_register(v, x86_seg_ldtr, &segr);
+    ptss->ldt = segr.sel;
+
+    hvm_unmap(ptss);
+
+    ptss = hvm_map(tr.base, sizeof(tss));
+    if ( ptss == NULL )
+        goto out;
+
+    if ( !hvm_set_cr3(ptss->cr3) )
+    {
+        hvm_unmap(ptss);
+        goto out;
+    }
+
+    regs->eip    = ptss->eip;
+    regs->eflags = ptss->eflags;
+    regs->eax    = ptss->eax;
+    regs->ecx    = ptss->ecx;
+    regs->edx    = ptss->edx;
+    regs->ebx    = ptss->ebx;
+    regs->esp    = ptss->esp;
+    regs->ebp    = ptss->ebp;
+    regs->esi    = ptss->esi;
+    regs->edi    = ptss->edi;
+
+    if ( (taskswitch_reason == TSW_call_or_int) )
+    {
+        regs->eflags |= X86_EFLAGS_NT;
+        ptss->back_link = prev_tr.sel;
+    }
+
+    exn_raised = 0;
+    if ( hvm_load_segment_selector(v, x86_seg_es, ptss->es) ||
+         hvm_load_segment_selector(v, x86_seg_cs, ptss->cs) ||
+         hvm_load_segment_selector(v, x86_seg_ss, ptss->ss) ||
+         hvm_load_segment_selector(v, x86_seg_ds, ptss->ds) ||
+         hvm_load_segment_selector(v, x86_seg_fs, ptss->fs) ||
+         hvm_load_segment_selector(v, x86_seg_gs, ptss->gs) ||
+         hvm_load_segment_selector(v, x86_seg_ldtr, ptss->ldt) )
+        exn_raised = 1;
+
+    if ( (ptss->trace & 1) && !exn_raised )
+        hvm_inject_exception(TRAP_debug, tss_sel & 0xfff8, 0);
+
+    hvm_unmap(ptss);
+
+    tr.attr.fields.type = 0xb; /* busy 32-bit tss */
+    hvm_set_segment_register(v, x86_seg_tr, &tr);
+    paging_update_cr3(v);
+
+    v->arch.hvm_vcpu.guest_cr[0] |= X86_CR0_TS;
+    hvm_update_guest_cr(v, 0);
+
+    if ( (taskswitch_reason == TSW_iret) ||
+         (taskswitch_reason == TSW_jmp) )
+        clear_bit(41, optss_desc); /* clear B flag of old task */
+
+    if ( taskswitch_reason != TSW_iret )
+        set_bit(41, nptss_desc); /* set B flag of new task */
+
+    if ( errcode >= 0 )
+    {
+        struct segment_register reg;
+        unsigned long linear_addr;
+        regs->esp -= 4;
+        hvm_get_segment_register(current, x86_seg_ss, &reg);
+        /* Todo: do not ignore access faults here. */
+        if ( hvm_virtual_to_linear_addr(x86_seg_ss, &reg, regs->esp,
+                                        4, hvm_access_write, 32,
+                                        &linear_addr) )
+            hvm_copy_to_guest_virt(linear_addr, &errcode, 4);
+    }
+
+    hvm_load_cpu_guest_regs(v, regs);
+
+ out:
+    hvm_unmap(optss_desc);
+    hvm_unmap(nptss_desc);
 }
 
 /*
diff -r 49700bb716bb -r 35fb20c4822c xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Mon Sep 17 13:33:09 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Tue Sep 18 11:49:42 2007 +0100
@@ -648,6 +648,8 @@ static void svm_get_segment_register(str
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
+    ASSERT(v == current);
+
     switch ( seg )
     {
     case x86_seg_cs:
@@ -694,10 +696,13 @@ static void svm_set_segment_register(str
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
+    ASSERT(v == current);
+
     switch ( seg )
     {
     case x86_seg_cs:
         memcpy(&vmcb->cs, reg, sizeof(*reg));
+        guest_cpu_user_regs()->cs = reg->sel;
         break;
     case x86_seg_ds:
         memcpy(&vmcb->ds, reg, sizeof(*reg));
@@ -717,6 +722,7 @@ static void svm_set_segment_register(str
         break;
     case x86_seg_ss:
         memcpy(&vmcb->ss, reg, sizeof(*reg));
+        guest_cpu_user_regs()->ss = reg->sel;
         break;
     case x86_seg_tr:
         svm_sync_vmcb(v);
@@ -2299,12 +2305,20 @@ asmlinkage void svm_vmexit_handler(struc
         svm_vmexit_do_invd(v);
         break;
 
-    case VMEXIT_GDTR_WRITE:
-        printk("WRITE to GDTR\n");
-        break;
-
-    case VMEXIT_TASK_SWITCH:
-        goto exit_and_crash;
+    case VMEXIT_TASK_SWITCH: {
+        enum hvm_task_switch_reason reason;
+        int32_t errcode = -1;
+        if ( (vmcb->exitinfo2 >> 36) & 1 )
+            reason = TSW_iret;
+        else if ( (vmcb->exitinfo2 >> 38) & 1 )
+            reason = TSW_jmp;
+        else
+            reason = TSW_call_or_int;
+        if ( (vmcb->exitinfo2 >> 44) & 1 )
+            errcode = (uint32_t)vmcb->exitinfo2;
+        hvm_task_switch((uint16_t)vmcb->exitinfo1, reason, errcode);
+        break;
+    }
 
     case VMEXIT_CPUID:
         svm_vmexit_do_cpuid(vmcb, regs);
diff -r 49700bb716bb -r 35fb20c4822c xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Mon Sep 17 13:33:09 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Tue Sep 18 11:49:42 2007 +0100
@@ -885,7 +885,7 @@ static void vmx_get_segment_register(str
 static void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
                                      struct segment_register *reg)
 {
-    u16 attr = 0;
+    uint32_t attr = 0;
 
     ASSERT(v == current);
 
@@ -960,12 +960,16 @@ static void vmx_set_segment_register(str
 static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg,
                                      struct segment_register *reg)
 {
-    u16 attr;
+    uint32_t attr;
 
     ASSERT(v == current);
 
     attr = reg->attr.bytes;
     attr = ((attr & 0xf00) << 4) | (attr & 0xff);
+
+    /* Not-present must mean unusable. */
+    if ( !reg->attr.fields.p )
+        attr |= (1u << 16);
 
     switch ( seg )
     {
@@ -974,6 +978,7 @@ static void vmx_set_segment_register(str
         __vmwrite(GUEST_CS_LIMIT, reg->limit);
         __vmwrite(GUEST_CS_BASE, reg->base);
         __vmwrite(GUEST_CS_AR_BYTES, attr);
+        guest_cpu_user_regs()->cs = reg->sel;
         break;
     case x86_seg_ds:
         __vmwrite(GUEST_DS_SELECTOR, reg->sel);
@@ -1004,6 +1009,7 @@ static void vmx_set_segment_register(str
         __vmwrite(GUEST_SS_LIMIT, reg->limit);
         __vmwrite(GUEST_SS_BASE, reg->base);
         __vmwrite(GUEST_SS_AR_BYTES, attr);
+        guest_cpu_user_regs()->ss = reg->sel;
         break;
     case x86_seg_tr:
         __vmwrite(GUEST_TR_SELECTOR, reg->sel);
@@ -2668,7 +2674,8 @@ asmlinkage void vmx_vmexit_handler(struc
 
     /* Event delivery caused this intercept? Queue for redelivery. */
     idtv_info = __vmread(IDT_VECTORING_INFO);
-    if ( unlikely(idtv_info & INTR_INFO_VALID_MASK) )
+    if ( unlikely(idtv_info & INTR_INFO_VALID_MASK) &&
+         (exit_reason != EXIT_REASON_TASK_SWITCH) )
     {
         if ( hvm_event_needs_reinjection((idtv_info>>8)&7, idtv_info&0xff) )
         {
@@ -2785,8 +2792,19 @@ asmlinkage void vmx_vmexit_handler(struc
         __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
                   v->arch.hvm_vmx.exec_control);
         break;
-    case EXIT_REASON_TASK_SWITCH:
-        goto exit_and_crash;
+    case EXIT_REASON_TASK_SWITCH: {
+        const enum hvm_task_switch_reason reasons[] = {
+            TSW_call_or_int, TSW_iret, TSW_jmp, TSW_call_or_int };
+        int32_t errcode = -1;
+        exit_qualification = __vmread(EXIT_QUALIFICATION);
+        if ( (idtv_info & INTR_INFO_VALID_MASK) &&
+             (idtv_info & INTR_INFO_DELIVER_CODE_MASK) )
+            errcode = __vmread(IDT_VECTORING_ERROR_CODE);
+        hvm_task_switch((uint16_t)exit_qualification,
+                        reasons[(exit_qualification >> 30) & 3],
+                        errcode);
+        break;
+    }
     case EXIT_REASON_CPUID:
         inst_len = __get_instruction_length(); /* Safe: CPUID */
         __update_guest_eip(inst_len);
diff -r 49700bb716bb -r 35fb20c4822c xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Mon Sep 17 13:33:09 2007 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Tue Sep 18 11:49:42 2007 +0100
@@ -101,7 +101,7 @@ int _shadow_mode_refcounts(struct domain
 /* x86 emulator support for the shadow code
  */
 
-struct segment_register *hvm_get_seg_reg(
+static struct segment_register *hvm_get_seg_reg(
     enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt)
 {
     struct segment_register *seg_reg = &sh_ctxt->seg_reg[seg];
@@ -109,10 +109,6 @@ struct segment_register *hvm_get_seg_reg
         hvm_get_segment_register(current, seg, seg_reg);
     return seg_reg;
 }
-
-enum hvm_access_type {
-    hvm_access_insn_fetch, hvm_access_read, hvm_access_write
-};
 
 static int hvm_translate_linear_addr(
     enum x86_segment seg,
@@ -123,76 +119,18 @@ static int hvm_translate_linear_addr(
     unsigned long *paddr)
 {
     struct segment_register *reg = hvm_get_seg_reg(seg, sh_ctxt);
-    unsigned long limit, addr = offset;
-    uint32_t last_byte;
-
-    if ( sh_ctxt->ctxt.addr_size != 64 )
-    {
-        /*
-         * COMPATIBILITY MODE: Apply segment checks and add base.
-         */
-
-        switch ( access_type )
-        {
-        case hvm_access_read:
-            if ( (reg->attr.fields.type & 0xa) == 0x8 )
-                goto gpf; /* execute-only code segment */
-            break;
-        case hvm_access_write:
-            if ( (reg->attr.fields.type & 0xa) != 0x2 )
-                goto gpf; /* not a writable data segment */
-            break;
-        default:
-            break;
-        }
-
-        /* Calculate the segment limit, including granularity flag. */
-        limit = reg->limit;
-        if ( reg->attr.fields.g )
-            limit = (limit << 12) | 0xfff;
-
-        last_byte = offset + bytes - 1;
-
-        /* Is this a grows-down data segment? Special limit check if so. */
-        if ( (reg->attr.fields.type & 0xc) == 0x4 )
-        {
-            /* Is upper limit 0xFFFF or 0xFFFFFFFF? */
-            if ( !reg->attr.fields.db )
-                last_byte = (uint16_t)last_byte;
-
-            /* Check first byte and last byte against respective bounds. */
-            if ( (offset <= limit) || (last_byte < offset) )
-                goto gpf;
-        }
-        else if ( (last_byte > limit) || (last_byte < offset) )
-            goto gpf; /* last byte is beyond limit or wraps 0xFFFFFFFF */
-
-        /*
-         * Hardware truncates to 32 bits in compatibility mode.
-         * It does not truncate to 16 bits in 16-bit address-size mode.
-         */
-        addr = (uint32_t)(addr + reg->base);
-    }
-    else
-    {
-        /*
-         * LONG MODE: FS and GS add segment base. Addresses must be canonical.
-         */
-
-        if ( (seg == x86_seg_fs) || (seg == x86_seg_gs) )
-            addr += reg->base;
-
-        if ( !is_canonical_address(addr) )
-            goto gpf;
-    }
-
-    *paddr = addr;
-    return 0;    
-
- gpf:
-    /* Inject #GP(0). */
-    hvm_inject_exception(TRAP_gp_fault, 0, 0);
-    return X86EMUL_EXCEPTION;
+    int okay;
+
+    okay = hvm_virtual_to_linear_addr(
+        seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, paddr);
+
+    if ( !okay )
+    {
+        hvm_inject_exception(TRAP_gp_fault, 0, 0);
+        return X86EMUL_EXCEPTION;
+    }
+
+    return 0;
 }
 
 static int
diff -r 49700bb716bb -r 35fb20c4822c xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Mon Sep 17 13:33:09 2007 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Tue Sep 18 11:49:42 2007 +0100
@@ -362,4 +362,21 @@ static inline void hvm_cpu_down(void)
         hvm_funcs.cpu_down();
 }
 
+enum hvm_task_switch_reason { TSW_jmp, TSW_iret, TSW_call_or_int };
+void hvm_task_switch(
+    uint16_t tss_sel, enum hvm_task_switch_reason taskswitch_reason,
+    int32_t errcode);
+
+enum hvm_access_type {
+    hvm_access_insn_fetch, hvm_access_read, hvm_access_write
+};
+int hvm_virtual_to_linear_addr(
+    enum x86_segment seg,
+    struct segment_register *reg,
+    unsigned long offset,
+    unsigned int bytes,
+    enum hvm_access_type access_type,
+    unsigned int addr_size,
+    unsigned long *linear_addr);
+
 #endif /* __ASM_X86_HVM_HVM_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.