[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] merge?
# HG changeset patch # User cl349@xxxxxxxxxxxxxxxxxxxx # Node ID e2f0a6fdb7d9a8dd3b9920266460a48fc132c74a # Parent 383f1336c30590eaef6f24881693aaf27ce75a4a # Parent 5959fae4722aa8f4638876deb05cbd7f5ae3b590 merge? diff -r 383f1336c305 -r e2f0a6fdb7d9 extras/mini-os/mm.c --- a/extras/mini-os/mm.c Wed Sep 14 13:37:03 2005 +++ b/extras/mini-os/mm.c Wed Sep 14 14:43:34 2005 @@ -432,7 +432,7 @@ /* Pin the page to provide correct protection */ pin_request.cmd = MMUEXT_PIN_L1_TABLE; - pin_request.mfn = pfn_to_mfn(pt_frame); + pin_request.arg1.mfn = pfn_to_mfn(pt_frame); if(HYPERVISOR_mmuext_op(&pin_request, 1, NULL, DOMID_SELF) < 0) { printk("ERROR: pinning failed\n"); diff -r 383f1336c305 -r e2f0a6fdb7d9 linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c Wed Sep 14 13:37:03 2005 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c Wed Sep 14 14:43:34 2005 @@ -115,7 +115,7 @@ { struct mmuext_op op; op.cmd = MMUEXT_NEW_BASEPTR; - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); + op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } @@ -123,7 +123,7 @@ { struct mmuext_op op; op.cmd = MMUEXT_NEW_USER_BASEPTR; - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); + op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } @@ -138,7 +138,7 @@ { struct mmuext_op op; op.cmd = MMUEXT_INVLPG_LOCAL; - op.linear_addr = ptr & PAGE_MASK; + op.arg1.linear_addr = ptr & PAGE_MASK; BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } @@ -157,7 +157,7 @@ if ( cpus_empty(*mask) ) return; op.cmd = MMUEXT_TLB_FLUSH_MULTI; - op.vcpumask = mask->bits; + op.arg2.vcpumask = mask->bits; BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } @@ -165,7 +165,7 @@ { struct mmuext_op op; op.cmd = MMUEXT_INVLPG_ALL; - op.linear_addr = ptr & PAGE_MASK; + op.arg1.linear_addr = ptr & PAGE_MASK; BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } @@ -175,8 +175,8 @@ if ( cpus_empty(*mask) ) return; op.cmd = MMUEXT_INVLPG_MULTI; - op.vcpumask = mask->bits; - op.linear_addr = ptr & PAGE_MASK; + op.arg1.linear_addr = ptr & PAGE_MASK; + op.arg2.vcpumask = mask->bits; BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } @@ -193,7 +193,7 @@ #else op.cmd = MMUEXT_PIN_L2_TABLE; #endif - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); + op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } @@ -201,7 +201,7 @@ { struct mmuext_op op; op.cmd = MMUEXT_UNPIN_TABLE; - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); + op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } @@ -209,7 +209,7 @@ { struct mmuext_op op; op.cmd = MMUEXT_PIN_L1_TABLE; - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); + op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } @@ -217,7 +217,7 @@ { struct mmuext_op op; op.cmd = MMUEXT_UNPIN_TABLE; - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); + op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } @@ -226,7 +226,7 @@ { struct mmuext_op op; op.cmd = MMUEXT_PIN_L3_TABLE; - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); + op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } @@ -234,7 +234,7 @@ { struct mmuext_op op; op.cmd = MMUEXT_UNPIN_TABLE; - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); + op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } @@ -242,7 +242,7 @@ { struct mmuext_op op; op.cmd = MMUEXT_PIN_L2_TABLE; - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); + op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } @@ -250,7 +250,7 @@ { struct mmuext_op op; op.cmd = MMUEXT_UNPIN_TABLE; - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); + op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } #endif /* CONFIG_X86_64 */ @@ -260,8 +260,8 @@ { struct mmuext_op op; op.cmd = MMUEXT_SET_LDT; - op.linear_addr = ptr; - op.nr_ents = len; + op.arg1.linear_addr = ptr; + op.arg2.nr_ents = len; BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); } diff -r 383f1336c305 -r e2f0a6fdb7d9 linux-2.6-xen-sparse/drivers/xen/netback/netback.c --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Wed Sep 14 13:37:03 2005 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Wed Sep 14 14:43:34 2005 @@ -294,7 +294,7 @@ mcl++; mmuext->cmd = MMUEXT_REASSIGN_PAGE; - mmuext->mfn = old_mfn; + mmuext->arg1.mfn = old_mfn; mmuext++; #endif mmu->ptr = ((unsigned long long)new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; @@ -355,7 +355,7 @@ #ifdef CONFIG_XEN_NETDEV_GRANT old_mfn = 0; /* XXX Fix this so we can free_mfn() on error! */ #else - old_mfn = mmuext[0].mfn; + old_mfn = mmuext[0].arg1.mfn; #endif atomic_set(&(skb_shinfo(skb)->dataref), 1); skb_shinfo(skb)->nr_frags = 0; diff -r 383f1336c305 -r e2f0a6fdb7d9 linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h Wed Sep 14 13:37:03 2005 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h Wed Sep 14 14:43:34 2005 @@ -67,7 +67,7 @@ /* Re-load page tables: load_cr3(next->pgd) */ per_cpu(cur_pgd, cpu) = next->pgd; op->cmd = MMUEXT_NEW_BASEPTR; - op->mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT); + op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT); op++; /* @@ -76,8 +76,8 @@ if (unlikely(prev->context.ldt != next->context.ldt)) { /* load_LDT_nolock(&next->context, cpu) */ op->cmd = MMUEXT_SET_LDT; - op->linear_addr = (unsigned long)next->context.ldt; - op->nr_ents = next->context.size; + op->arg1.linear_addr = (unsigned long)next->context.ldt; + op->arg2.nr_ents = next->context.size; op++; } diff -r 383f1336c305 -r e2f0a6fdb7d9 linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h Wed Sep 14 13:37:03 2005 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h Wed Sep 14 14:43:34 2005 @@ -83,19 +83,19 @@ /* load_cr3(next->pgd) */ per_cpu(cur_pgd, smp_processor_id()) = next->pgd; op->cmd = MMUEXT_NEW_BASEPTR; - op->mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT); + op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT); op++; /* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */ op->cmd = MMUEXT_NEW_USER_BASEPTR; - op->mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT); + op->arg1.mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT); op++; if (unlikely(next->context.ldt != prev->context.ldt)) { /* load_LDT_nolock(&next->context, cpu) */ op->cmd = MMUEXT_SET_LDT; - op->linear_addr = (unsigned long)next->context.ldt; - op->nr_ents = next->context.size; + op->arg1.linear_addr = (unsigned long)next->context.ldt; + op->arg2.nr_ents = next->context.size; op++; } diff -r 383f1336c305 -r e2f0a6fdb7d9 tools/ioemu/hw/i8259.c --- a/tools/ioemu/hw/i8259.c Wed Sep 14 13:37:03 2005 +++ b/tools/ioemu/hw/i8259.c Wed Sep 14 14:43:34 2005 @@ -128,21 +128,23 @@ /* pic[1] is connected to pin2 of pic[0] */ #define CASCADE_IRQ 2 -static void shared_page_update() -{ - extern shared_iopage_t *shared_page; - uint8_t * pmask = (uint8_t *)&(shared_page->sp_global.pic_mask[0]); - int index; +extern shared_iopage_t *shared_page; + +static void xen_update_shared_imr(void) +{ + uint8_t *pmask = (uint8_t *)shared_page->sp_global.pic_mask; + int index; index = pics[0].irq_base/8; pmask[index] = pics[0].imr; + index = pics[1].irq_base/8; - - if ( pics[0].imr & (1 << CASCADE_IRQ) ) { - pmask[index] = 0xff; - } else { - pmask[index] = pics[1].imr; - } + pmask[index] = (pics[0].imr & (1 << CASCADE_IRQ)) ? 0xff : pics[1].imr; +} + +static void xen_clear_shared_irr(void) +{ + memset(shared_page->sp_global.pic_intr, 0, INTR_LEN); } /* raise irq to CPU if necessary. must be called every time the active @@ -174,7 +176,8 @@ #endif cpu_interrupt(cpu_single_env, CPU_INTERRUPT_HARD); } - shared_page_update(); + + xen_update_shared_imr(); } #ifdef DEBUG_IRQ_LATENCY @@ -283,7 +286,9 @@ tmp = s->elcr_mask; memset(s, 0, sizeof(PicState)); s->elcr_mask = tmp; - shared_page_update(); + + xen_update_shared_imr(); + xen_clear_shared_irr(); } static void pic_ioport_write(void *opaque, uint32_t addr, uint32_t val) diff -r 383f1336c305 -r e2f0a6fdb7d9 tools/libxc/xc_linux_restore.c --- a/tools/libxc/xc_linux_restore.c Wed Sep 14 13:37:03 2005 +++ b/tools/libxc/xc_linux_restore.c Wed Sep 14 14:43:34 2005 @@ -421,7 +421,7 @@ pin[nr_pins].cmd = MMUEXT_PIN_L1_TABLE; else /* pfn_type[i] == (L2TAB|LPINTAB) */ pin[nr_pins].cmd = MMUEXT_PIN_L2_TABLE; - pin[nr_pins].mfn = pfn_to_mfn_table[i]; + pin[nr_pins].arg1.mfn = pfn_to_mfn_table[i]; if ( ++nr_pins == MAX_PIN_BATCH ) { if ( xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0 ) diff -r 383f1336c305 -r e2f0a6fdb7d9 tools/libxc/xc_vmx_build.c --- a/tools/libxc/xc_vmx_build.c Wed Sep 14 13:37:03 2005 +++ b/tools/libxc/xc_vmx_build.c Wed Sep 14 14:43:34 2005 @@ -169,21 +169,35 @@ l2_pgentry_t *vl2tab; mmio_addr = mmio_range_start & PAGE_MASK; - for (; mmio_addr < mmio_range_end; mmio_addr += PAGE_SIZE) { + for ( ; mmio_addr < mmio_range_end; mmio_addr += PAGE_SIZE ) + { vl3e = vl3tab[l3_table_offset(mmio_addr)]; - if (vl3e == 0) + if ( vl3e == 0 ) continue; - vl2tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, - PROT_READ|PROT_WRITE, vl3e >> PAGE_SHIFT); - if (vl2tab == 0) { + + vl2tab = xc_map_foreign_range( + xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, vl3e>>PAGE_SHIFT); + if ( vl2tab == NULL ) + { PERROR("Failed zap MMIO range"); return -1; } + vl2e = vl2tab[l2_table_offset(mmio_addr)]; - if (vl2e == 0) + if ( vl2e == 0 ) + { + munmap(vl2tab, PAGE_SIZE); continue; - vl1tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, - PROT_READ|PROT_WRITE, vl2e >> PAGE_SHIFT); + } + + vl1tab = xc_map_foreign_range( + xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, vl2e>>PAGE_SHIFT); + if ( vl1tab == NULL ) + { + PERROR("Failed zap MMIO range"); + munmap(vl2tab, PAGE_SIZE); + return -1; + } vl1tab[l1_table_offset(mmio_addr)] = 0; munmap(vl2tab, PAGE_SIZE); diff -r 383f1336c305 -r e2f0a6fdb7d9 tools/libxc/xg_private.c --- a/tools/libxc/xg_private.c Wed Sep 14 13:37:03 2005 +++ b/tools/libxc/xg_private.c Wed Sep 14 14:43:34 2005 @@ -65,7 +65,7 @@ struct mmuext_op op; op.cmd = type; - op.mfn = mfn; + op.arg1.mfn = mfn; if ( xc_mmuext_op(xc_handle, &op, 1, dom) < 0 ) return 1; diff -r 383f1336c305 -r e2f0a6fdb7d9 xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Wed Sep 14 13:37:03 2005 +++ b/xen/arch/x86/mm.c Wed Sep 14 14:43:34 2005 @@ -1659,7 +1659,7 @@ { struct mmuext_op op; int rc = 0, i = 0, okay, cpu = smp_processor_id(); - unsigned long type, done = 0; + unsigned long mfn, type, done = 0; struct pfn_info *page; struct vcpu *v = current; struct domain *d = v->domain, *e; @@ -1706,7 +1706,8 @@ } okay = 1; - page = &frame_table[op.mfn]; + mfn = op.arg1.mfn; + page = &frame_table[mfn]; switch ( op.cmd ) { @@ -1717,17 +1718,17 @@ if ( shadow_mode_refcounts(FOREIGNDOM) ) type = PGT_writable_page; - okay = get_page_and_type_from_pagenr(op.mfn, type, FOREIGNDOM); + okay = get_page_and_type_from_pagenr(mfn, type, FOREIGNDOM); if ( unlikely(!okay) ) { - MEM_LOG("Error while pinning mfn %lx", op.mfn); + MEM_LOG("Error while pinning mfn %lx", mfn); break; } if ( unlikely(test_and_set_bit(_PGT_pinned, &page->u.inuse.type_info)) ) { - MEM_LOG("Mfn %lx already pinned", op.mfn); + MEM_LOG("Mfn %lx already pinned", mfn); put_page_and_type(page); okay = 0; break; @@ -1750,10 +1751,10 @@ goto pin_page; case MMUEXT_UNPIN_TABLE: - if ( unlikely(!(okay = get_page_from_pagenr(op.mfn, FOREIGNDOM))) ) + if ( unlikely(!(okay = get_page_from_pagenr(mfn, FOREIGNDOM))) ) { MEM_LOG("Mfn %lx bad domain (dom=%p)", - op.mfn, page_get_owner(page)); + mfn, page_get_owner(page)); } else if ( likely(test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info)) ) @@ -1765,28 +1766,28 @@ { okay = 0; put_page(page); - MEM_LOG("Mfn %lx not pinned", op.mfn); + MEM_LOG("Mfn %lx not pinned", mfn); } break; case MMUEXT_NEW_BASEPTR: - okay = new_guest_cr3(op.mfn); + okay = new_guest_cr3(mfn); percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB; break; #ifdef __x86_64__ case MMUEXT_NEW_USER_BASEPTR: okay = get_page_and_type_from_pagenr( - op.mfn, PGT_root_page_table, d); + mfn, PGT_root_page_table, d); if ( unlikely(!okay) ) { - MEM_LOG("Error while installing new mfn %lx", op.mfn); + MEM_LOG("Error while installing new mfn %lx", mfn); } else { unsigned long old_mfn = pagetable_get_pfn(v->arch.guest_table_user); - v->arch.guest_table_user = mk_pagetable(op.mfn << PAGE_SHIFT); + v->arch.guest_table_user = mk_pagetable(mfn << PAGE_SHIFT); if ( old_mfn != 0 ) put_page_and_type(&frame_table[old_mfn]); } @@ -1799,8 +1800,8 @@ case MMUEXT_INVLPG_LOCAL: if ( shadow_mode_enabled(d) ) - shadow_invlpg(v, op.linear_addr); - local_flush_tlb_one(op.linear_addr); + shadow_invlpg(v, op.arg1.linear_addr); + local_flush_tlb_one(op.arg1.linear_addr); break; case MMUEXT_TLB_FLUSH_MULTI: @@ -1808,7 +1809,7 @@ { unsigned long vmask; cpumask_t pmask; - if ( unlikely(get_user(vmask, (unsigned long *)op.vcpumask)) ) + if ( unlikely(get_user(vmask, (unsigned long *)op.arg2.vcpumask)) ) { okay = 0; break; @@ -1818,7 +1819,7 @@ if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI ) flush_tlb_mask(pmask); else - flush_tlb_one_mask(pmask, op.linear_addr); + flush_tlb_one_mask(pmask, op.arg1.linear_addr); break; } @@ -1827,7 +1828,7 @@ break; case MMUEXT_INVLPG_ALL: - flush_tlb_one_mask(d->cpumask, op.linear_addr); + flush_tlb_one_mask(d->cpumask, op.arg1.linear_addr); break; case MMUEXT_FLUSH_CACHE: @@ -1852,8 +1853,8 @@ break; } - unsigned long ptr = op.linear_addr; - unsigned long ents = op.nr_ents; + unsigned long ptr = op.arg1.linear_addr; + unsigned long ents = op.arg2.nr_ents; if ( ((ptr & (PAGE_SIZE-1)) != 0) || (ents > 8192) || !array_access_ok(ptr, ents, LDT_ENTRY_SIZE) ) @@ -1886,7 +1887,7 @@ e = percpu_info[cpu].foreign; if ( unlikely(e == NULL) ) { - MEM_LOG("No FOREIGNDOM to reassign mfn %lx to", op.mfn); + MEM_LOG("No FOREIGNDOM to reassign mfn %lx to", mfn); okay = 0; break; } @@ -1919,7 +1920,7 @@ { MEM_LOG("Transferee has no reservation headroom (%d,%d), or " "page is in Xen heap (%lx), or dom is dying (%ld).", - e->tot_pages, e->max_pages, op.mfn, e->domain_flags); + e->tot_pages, e->max_pages, mfn, e->domain_flags); okay = 0; goto reassign_fail; } diff -r 383f1336c305 -r e2f0a6fdb7d9 xen/arch/x86/vmx.c --- a/xen/arch/x86/vmx.c Wed Sep 14 13:37:03 2005 +++ b/xen/arch/x86/vmx.c Wed Sep 14 14:43:34 2005 @@ -1021,7 +1021,7 @@ * CR0: We don't want to lose PE and PG. */ paging_enabled = vmx_paging_enabled(d); - __vmwrite(GUEST_CR0, (value | X86_CR0_PE | X86_CR0_PG)); + __vmwrite(GUEST_CR0, value | X86_CR0_PE | X86_CR0_PG | X86_CR0_NE); __vmwrite(CR0_READ_SHADOW, value); VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value); diff -r 383f1336c305 -r e2f0a6fdb7d9 xen/arch/x86/vmx_intercept.c --- a/xen/arch/x86/vmx_intercept.c Wed Sep 14 13:37:03 2005 +++ b/xen/arch/x86/vmx_intercept.c Wed Sep 14 14:43:34 2005 @@ -227,6 +227,7 @@ u64 *intr = &(sp->sp_global.pic_intr[0]); struct vmx_virpit_t *vpit = &(d->domain->arch.vmx_platform.vmx_pit); int rw_mode, reinit = 0; + int oldvec = 0; /* load init count*/ if (p->state == STATE_IORESP_HOOK) { @@ -235,6 +236,7 @@ VMX_DBG_LOG(DBG_LEVEL_1, "VMX_PIT: guest reset PIT with channel %lx!\n", (unsigned long) ((p->u.data >> 24) & 0x3) ); rem_ac_timer(&(vpit->pit_timer)); reinit = 1; + oldvec = vpit->vector; } else init_ac_timer(&vpit->pit_timer, pit_timer_fn, vpit, d->processor); @@ -250,6 +252,12 @@ vpit->period = 1000000; } vpit->vector = ((p->u.data >> 16) & 0xFF); + + if( reinit && oldvec != vpit->vector){ + clear_bit(oldvec, intr); + vpit->pending_intr_nr = 0; + } + vpit->channel = ((p->u.data >> 24) & 0x3); vpit->first_injected = 0; diff -r 383f1336c305 -r e2f0a6fdb7d9 xen/include/public/arch-x86_64.h --- a/xen/include/public/arch-x86_64.h Wed Sep 14 13:37:03 2005 +++ b/xen/include/public/arch-x86_64.h Wed Sep 14 14:43:34 2005 @@ -124,36 +124,46 @@ unsigned long address; /* code offset */ } trap_info_t; +#ifdef __GNUC__ +/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */ +#define __DECL_REG(name) union { u64 r ## name, e ## name; } +#else +/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */ +#define __DECL_REG(name) u64 r ## name +#endif + typedef struct cpu_user_regs { u64 r15; u64 r14; u64 r13; u64 r12; - union { u64 rbp, ebp; }; - union { u64 rbx, ebx; }; + __DECL_REG(bp); + __DECL_REG(bx); u64 r11; u64 r10; u64 r9; u64 r8; - union { u64 rax, eax; }; - union { u64 rcx, ecx; }; - union { u64 rdx, edx; }; - union { u64 rsi, esi; }; - union { u64 rdi, edi; }; + __DECL_REG(ax); + __DECL_REG(cx); + __DECL_REG(dx); + __DECL_REG(si); + __DECL_REG(di); u32 error_code; /* private */ u32 entry_vector; /* private */ - union { u64 rip, eip; }; + __DECL_REG(ip); u16 cs, _pad0[1]; u8 saved_upcall_mask; u8 _pad1[3]; - union { u64 rflags, eflags; }; - union { u64 rsp, esp; }; + __DECL_REG(flags); + __DECL_REG(sp); u16 ss, _pad2[3]; u16 es, _pad3[3]; u16 ds, _pad4[3]; u16 fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */ u16 gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_user. */ } cpu_user_regs_t; + +#undef __DECL_REG typedef u64 tsc_timestamp_t; /* RDTSC timestamp */ diff -r 383f1336c305 -r e2f0a6fdb7d9 xen/include/public/xen.h --- a/xen/include/public/xen.h Wed Sep 14 13:37:03 2005 +++ b/xen/include/public/xen.h Wed Sep 14 14:43:34 2005 @@ -174,13 +174,13 @@ unsigned long mfn; /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ unsigned long linear_addr; - }; + } arg1; union { /* SET_LDT */ unsigned int nr_ents; /* TLB_FLUSH_MULTI, INVLPG_MULTI */ void *vcpumask; - }; + } arg2; }; #endif _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |