[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] merge with xen-unstable.hg
# HG changeset patch # User Isaku Yamahata <yamahata@xxxxxxxxxxxxx> # Date 1211250795 -32400 # Node ID 4269ab4b37eecae7cc026b4366acd92b9e6c39e9 # Parent e78f5dbedbe0a5e34b4af43c9c248413e5340190 # Parent 2ada81810ddb73f29dfd1eb00de466eec2881ce6 merge with xen-unstable.hg --- tools/ioemu/hw/pci.c | 2 +- tools/python/xen/xm/main.py | 2 +- xen/arch/x86/hvm/stdvga.c | 36 +++++++++++++++++++++++++++++------- xen/arch/x86/hvm/svm/intr.c | 6 ++++++ xen/arch/x86/hvm/svm/svm.c | 38 ++++++++++++++++++++++++++++++++++++-- xen/arch/x86/setup.c | 2 +- 6 files changed, 74 insertions(+), 12 deletions(-) diff -r e78f5dbedbe0 -r 4269ab4b37ee tools/ioemu/hw/pci.c --- a/tools/ioemu/hw/pci.c Fri May 16 22:25:47 2008 +0900 +++ b/tools/ioemu/hw/pci.c Tue May 20 11:33:15 2008 +0900 @@ -101,7 +101,7 @@ int pci_device_load(PCIDevice *s, QEMUFi int i; qemu_get_buffer(f, &irq_state, 1); for (i = 0; i < 4; i++) - pci_set_irq(s, i, !!(irq_state >> i)); + pci_set_irq(s, i, (irq_state >> i) & 1); } return 0; } diff -r e78f5dbedbe0 -r 4269ab4b37ee tools/python/xen/xm/main.py --- a/tools/python/xen/xm/main.py Fri May 16 22:25:47 2008 +0900 +++ b/tools/python/xen/xm/main.py Tue May 20 11:33:15 2008 +0900 @@ -1096,7 +1096,7 @@ def xm_vcpu_list(args): # normalize cpumap by modulus nr_cpus, and drop duplicates cpumap = dict.fromkeys( - map(lambda x: x % nr_cpus, cpumap)).keys() + filter(lambda x: x < nr_cpus, cpumap)).keys() if len(cpumap) == nr_cpus: return "any cpu" diff -r e78f5dbedbe0 -r 4269ab4b37ee xen/arch/x86/hvm/stdvga.c --- a/xen/arch/x86/hvm/stdvga.c Fri May 16 22:25:47 2008 +0900 +++ b/xen/arch/x86/hvm/stdvga.c Tue May 20 11:33:15 2008 +0900 @@ -271,9 +271,9 @@ static uint8_t stdvga_mem_readb(uint64_t return ret; } -static uint32_t stdvga_mem_read(uint32_t addr, uint32_t size) -{ - uint32_t data = 0; +static uint64_t stdvga_mem_read(uint64_t addr, uint64_t size) +{ + uint64_t data = 0; switch ( size ) { @@ -293,8 +293,19 @@ static uint32_t stdvga_mem_read(uint32_t data |= stdvga_mem_readb(addr + 3) << 24; break; + case 8: + data = (uint64_t)(stdvga_mem_readb(addr)); + data |= (uint64_t)(stdvga_mem_readb(addr + 1)) << 8; + data |= (uint64_t)(stdvga_mem_readb(addr + 2)) << 16; + data |= (uint64_t)(stdvga_mem_readb(addr + 3)) << 24; + data |= (uint64_t)(stdvga_mem_readb(addr + 4)) << 32; + data |= (uint64_t)(stdvga_mem_readb(addr + 5)) << 40; + data |= (uint64_t)(stdvga_mem_readb(addr + 6)) << 48; + data |= (uint64_t)(stdvga_mem_readb(addr + 7)) << 56; + break; + default: - gdprintk(XENLOG_WARNING, "invalid io size:%d\n", size); + gdprintk(XENLOG_WARNING, "invalid io size: %"PRId64"\n", size); break; } @@ -409,7 +420,7 @@ static void stdvga_mem_writeb(uint64_t a } } -static void stdvga_mem_write(uint32_t addr, uint32_t data, uint32_t size) +static void stdvga_mem_write(uint64_t addr, uint64_t data, uint64_t size) { /* Intercept mmio write */ switch ( size ) @@ -430,8 +441,19 @@ static void stdvga_mem_write(uint32_t ad stdvga_mem_writeb(addr+3, (data >> 24) & 0xff); break; + case 8: + stdvga_mem_writeb(addr+0, (data >> 0) & 0xff); + stdvga_mem_writeb(addr+1, (data >> 8) & 0xff); + stdvga_mem_writeb(addr+2, (data >> 16) & 0xff); + stdvga_mem_writeb(addr+3, (data >> 24) & 0xff); + stdvga_mem_writeb(addr+4, (data >> 32) & 0xff); + stdvga_mem_writeb(addr+5, (data >> 40) & 0xff); + stdvga_mem_writeb(addr+6, (data >> 48) & 0xff); + stdvga_mem_writeb(addr+7, (data >> 56) & 0xff); + break; + default: - gdprintk(XENLOG_WARNING, "invalid io size:%d\n", size); + gdprintk(XENLOG_WARNING, "invalid io size: %"PRId64"\n", size); break; } } @@ -447,7 +469,7 @@ static int mmio_move(struct hvm_hw_stdvg { if ( p->dir == IOREQ_READ ) { - uint32_t addr = p->addr, data = p->data, tmp; + uint64_t addr = p->addr, data = p->data, tmp; for ( i = 0; i < p->count; i++ ) { tmp = stdvga_mem_read(addr, p->size); diff -r e78f5dbedbe0 -r 4269ab4b37ee xen/arch/x86/hvm/svm/intr.c --- a/xen/arch/x86/hvm/svm/intr.c Fri May 16 22:25:47 2008 +0900 +++ b/xen/arch/x86/hvm/svm/intr.c Tue May 20 11:33:15 2008 +0900 @@ -51,6 +51,12 @@ static void svm_inject_nmi(struct vcpu * ASSERT(vmcb->eventinj.fields.v == 0); vmcb->eventinj = event; + + /* + * SVM does not virtualise the NMI mask, so we emulate it by intercepting + * the next IRET and blocking NMI injection until the intercept triggers. + */ + vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IRET; } static void svm_inject_extint(struct vcpu *v, int vector) diff -r e78f5dbedbe0 -r 4269ab4b37ee xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Fri May 16 22:25:47 2008 +0900 +++ b/xen/arch/x86/hvm/svm/svm.c Tue May 20 11:33:15 2008 +0900 @@ -367,15 +367,27 @@ static unsigned int svm_get_interrupt_sh static unsigned int svm_get_interrupt_shadow(struct vcpu *v) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; - return (vmcb->interrupt_shadow ? - (HVM_INTR_SHADOW_MOV_SS|HVM_INTR_SHADOW_STI) : 0); + unsigned int intr_shadow = 0; + + if ( vmcb->interrupt_shadow ) + intr_shadow |= HVM_INTR_SHADOW_MOV_SS | HVM_INTR_SHADOW_STI; + + if ( vmcb->general1_intercepts & GENERAL1_INTERCEPT_IRET ) + intr_shadow |= HVM_INTR_SHADOW_NMI; + + return intr_shadow; } static void svm_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + vmcb->interrupt_shadow = !!(intr_shadow & (HVM_INTR_SHADOW_MOV_SS|HVM_INTR_SHADOW_STI)); + + vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_IRET; + if ( intr_shadow & HVM_INTR_SHADOW_NMI ) + vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IRET; } static int svm_guest_x86_mode(struct vcpu *v) @@ -1266,6 +1278,15 @@ asmlinkage void svm_vmexit_handler(struc reason = TSW_call_or_int; if ( (vmcb->exitinfo2 >> 44) & 1 ) errcode = (uint32_t)vmcb->exitinfo2; + + /* + * Some processors set the EXITINTINFO field when the task switch + * is caused by a task gate in the IDT. In this case we will be + * emulating the event injection, so we do not want the processor + * to re-inject the original event! + */ + vmcb->eventinj.bytes = 0; + hvm_task_switch((uint16_t)vmcb->exitinfo1, reason, errcode); break; } @@ -1331,6 +1352,19 @@ asmlinkage void svm_vmexit_handler(struc svm_do_nested_pgfault(vmcb->exitinfo2, regs); break; + case VMEXIT_IRET: + /* + * IRET clears the NMI mask. However because we clear the mask + * /before/ executing IRET, we set the interrupt shadow to prevent + * a pending NMI from being injected immediately. This will work + * perfectly unless the IRET instruction faults: in that case we + * may inject an NMI before the NMI handler's IRET instruction is + * retired. + */ + vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_IRET; + vmcb->interrupt_shadow = 1; + break; + default: exit_and_crash: gdprintk(XENLOG_ERR, "unexpected VMEXIT: exit reason = 0x%x, " diff -r e78f5dbedbe0 -r 4269ab4b37ee xen/arch/x86/setup.c --- a/xen/arch/x86/setup.c Fri May 16 22:25:47 2008 +0900 +++ b/xen/arch/x86/setup.c Tue May 20 11:33:15 2008 +0900 @@ -362,7 +362,7 @@ void __init kexec_reserve_area(struct e8 is_reserved = 1; - if ( !reserve_e820_ram(e820, kdump_start, kdump_size) ) + if ( !reserve_e820_ram(e820, kdump_start, kdump_start + kdump_size) ) { printk("Kdump: DISABLED (failed to reserve %luMB (%lukB) at 0x%lx)" "\n", kdump_size >> 20, kdump_size >> 10, kdump_start); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |