[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86: split struct domain
# HG changeset patch # User Jan Beulich <jbeulich@xxxxxxxxxx> # Date 1302004977 -3600 # Node ID 37c4f7d492a419b8dd819f7d0e0902128e85bba8 # Parent 2f7f24fe5924cdd20d6bdbb02e2b62ffa63a7aee x86: split struct domain This is accomplished by converting a couple of embedded arrays (in one case a structure containing an array) into separately allocated pointers, and (just as for struct arch_vcpu in a prior patch) overlaying some PV-only fields with HVM-only ones. One particularly noteworthy change in the opposite direction is that of PITState - this field so far lived in the HVM-only portion, but is being used by PV guests too, and hence needed to be moved out of struct hvm_domain. The change to XENMEM_set_memory_map (and hence libxl__build_pre() and the movement of the E820 related pieces to struct pv_domain) are subject to a positive response to a query sent to xen-devel regarding the need for this to happen for HVM guests (see http://lists.xensource.com/archives/html/xen-devel/2011-03/msg01848.html). The protection of arch.hvm_domain.irq.dpci accesses by is_hvm_domain() is subject to confirmation that the field is used for HVM guests only (see http://lists.xensource.com/archives/html/xen-devel/2011-03/msg02004.html). In the absence of any reply to these queries, and given the early state of 4.2 development, I think it should be acceptable to take the risk of having to later undo/redo some of this. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> --- diff -r 2f7f24fe5924 -r 37c4f7d492a4 tools/libxl/libxl_dom.c --- a/tools/libxl/libxl_dom.c Tue Apr 05 13:02:00 2011 +0100 +++ b/tools/libxl/libxl_dom.c Tue Apr 05 13:02:57 2011 +0100 @@ -72,9 +72,9 @@ libxl_ctx *ctx = libxl__gc_owner(gc); xc_domain_max_vcpus(ctx->xch, domid, info->max_vcpus); xc_domain_setmaxmem(ctx->xch, domid, info->target_memkb + LIBXL_MAXMEM_CONSTANT); - xc_domain_set_memmap_limit(ctx->xch, domid, - (info->hvm) ? info->max_memkb : - (info->max_memkb + info->u.pv.slack_memkb)); + if (!info->hvm) + xc_domain_set_memmap_limit(ctx->xch, domid, + (info->max_memkb + info->u.pv.slack_memkb)); xc_domain_set_tsc_info(ctx->xch, domid, info->tsc_mode, 0, 0, 0); if ( info->disable_migrate ) xc_domain_disable_migrate(ctx->xch, domid); diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/arch/x86/domain.c Tue Apr 05 13:02:57 2011 +0100 @@ -187,16 +187,17 @@ #ifdef __x86_64__ bits += pfn_pdx_hole_shift; #endif - d = alloc_xenheap_pages(get_order_from_bytes(sizeof(*d)), MEMF_bits(bits)); + BUILD_BUG_ON(sizeof(*d) > PAGE_SIZE); + d = alloc_xenheap_pages(0, MEMF_bits(bits)); if ( d != NULL ) - memset(d, 0, sizeof(*d)); + clear_page(d); return d; } void free_domain_struct(struct domain *d) { lock_profile_deregister_struct(LOCKPROF_TYPE_PERDOM, d); - free_xenheap_pages(d, get_order_from_bytes(sizeof(*d))); + free_xenheap_page(d); } struct vcpu *alloc_vcpu_struct(void) @@ -531,6 +532,17 @@ if ( !is_idle_domain(d) ) { + d->arch.cpuids = xmalloc_array(cpuid_input_t, MAX_CPUID_INPUT); + rc = -ENOMEM; + if ( d->arch.cpuids == NULL ) + goto fail; + memset(d->arch.cpuids, 0, MAX_CPUID_INPUT * sizeof(*d->arch.cpuids)); + for ( i = 0; i < MAX_CPUID_INPUT; i++ ) + { + d->arch.cpuids[i].input[0] = XEN_CPUID_INPUT_UNUSED; + d->arch.cpuids[i].input[1] = XEN_CPUID_INPUT_UNUSED; + } + d->arch.ioport_caps = rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex); rc = -ENOMEM; @@ -599,13 +611,6 @@ (CONFIG_PAGING_LEVELS != 4); } - memset(d->arch.cpuids, 0, sizeof(d->arch.cpuids)); - for ( i = 0; i < MAX_CPUID_INPUT; i++ ) - { - d->arch.cpuids[i].input[0] = XEN_CPUID_INPUT_UNUSED; - d->arch.cpuids[i].input[1] = XEN_CPUID_INPUT_UNUSED; - } - /* initialize default tsc behavior in case tools don't */ tsc_set_info(d, TSC_MODE_DEFAULT, 0UL, 0, 0); spin_lock_init(&d->arch.vtsc_lock); @@ -2067,11 +2072,12 @@ unmap_vcpu_info(v); } - if ( d->arch.pirq_eoi_map != NULL ) + if ( d->arch.pv_domain.pirq_eoi_map != NULL ) { - unmap_domain_page_global(d->arch.pirq_eoi_map); - put_page_and_type(mfn_to_page(d->arch.pirq_eoi_map_mfn)); - d->arch.pirq_eoi_map = NULL; + unmap_domain_page_global(d->arch.pv_domain.pirq_eoi_map); + put_page_and_type( + mfn_to_page(d->arch.pv_domain.pirq_eoi_map_mfn)); + d->arch.pv_domain.pirq_eoi_map = NULL; } } diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/arch/x86/domctl.c --- a/xen/arch/x86/domctl.c Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/arch/x86/domctl.c Tue Apr 05 13:02:57 2011 +0100 @@ -900,6 +900,10 @@ break; bind = &(domctl->u.bind_pt_irq); + ret = -EINVAL; + if ( !is_hvm_domain(d) ) + goto bind_out; + ret = xsm_bind_pt_irq(d, bind); if ( ret ) goto bind_out; diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/arch/x86/hvm/hpet.c --- a/xen/arch/x86/hvm/hpet.c Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/arch/x86/hvm/hpet.c Tue Apr 05 13:02:57 2011 +0100 @@ -237,8 +237,7 @@ { /* HPET specification requires PIT shouldn't generate * interrupts if LegacyReplacementRoute is set for timer0 */ - PITState *pit = &vhpet_domain(h)->arch.hvm_domain.pl_time.vpit; - pit_stop_channel0_irq(pit); + pit_stop_channel0_irq(&vhpet_domain(h)->arch.vpit); } if ( !timer_enabled(h, tn) ) diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/arch/x86/hvm/hvm.c Tue Apr 05 13:02:57 2011 +0100 @@ -412,7 +412,7 @@ spin_lock(&hd->pbuf_lock); hd->pbuf[hd->pbuf_idx++] = c; - if ( (hd->pbuf_idx == (sizeof(hd->pbuf) - 2)) || (c == '\n') ) + if ( (hd->pbuf_idx == (HVM_PBUF_SIZE - 2)) || (c == '\n') ) { if ( c != '\n' ) hd->pbuf[hd->pbuf_idx++] = '\n'; @@ -443,6 +443,19 @@ INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list); spin_lock_init(&d->arch.hvm_domain.msixtbl_list_lock); + d->arch.hvm_domain.pbuf = xmalloc_array(char, HVM_PBUF_SIZE); + d->arch.hvm_domain.params = xmalloc_array(uint64_t, HVM_NR_PARAMS); + d->arch.hvm_domain.io_handler = xmalloc(struct hvm_io_handler); + rc = -ENOMEM; + if ( !d->arch.hvm_domain.pbuf || !d->arch.hvm_domain.params || + !d->arch.hvm_domain.io_handler ) + goto fail0; + memset(d->arch.hvm_domain.pbuf, 0, + HVM_PBUF_SIZE * sizeof(*d->arch.hvm_domain.pbuf)); + memset(d->arch.hvm_domain.params, 0, + HVM_NR_PARAMS * sizeof(*d->arch.hvm_domain.params)); + d->arch.hvm_domain.io_handler->num_slot = 0; + hvm_init_guest_time(d); d->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] = 1; @@ -480,6 +493,10 @@ vioapic_deinit(d); fail1: hvm_destroy_cacheattr_region_list(d); + fail0: + xfree(d->arch.hvm_domain.io_handler); + xfree(d->arch.hvm_domain.params); + xfree(d->arch.hvm_domain.pbuf); return rc; } @@ -500,6 +517,10 @@ pmtimer_deinit(d); hpet_deinit(d); } + + xfree(d->arch.hvm_domain.io_handler); + xfree(d->arch.hvm_domain.params); + xfree(d->arch.hvm_domain.pbuf); } void hvm_domain_destroy(struct domain *d) @@ -2533,10 +2554,20 @@ static long hvm_memory_op(int cmd, XEN_GUEST_HANDLE(void) arg) { - long rc = do_memory_op(cmd, arg); - if ( (cmd & MEMOP_CMD_MASK) == XENMEM_decrease_reservation ) + long rc; + + switch ( cmd & MEMOP_CMD_MASK ) + { + case XENMEM_memory_map: + case XENMEM_machine_memory_map: + case XENMEM_machphys_mapping: + return -ENOSYS; + case XENMEM_decrease_reservation: + rc = do_memory_op(cmd, arg); current->domain->arch.hvm_domain.qemu_mapcache_invalidate = 1; - return rc; + return rc; + } + return do_memory_op(cmd, arg); } static long hvm_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg) @@ -2613,10 +2644,20 @@ static long hvm_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg) { - long rc = compat_memory_op(cmd, arg); - if ( (cmd & MEMOP_CMD_MASK) == XENMEM_decrease_reservation ) + int rc; + + switch ( cmd & MEMOP_CMD_MASK ) + { + case XENMEM_memory_map: + case XENMEM_machine_memory_map: + case XENMEM_machphys_mapping: + return -ENOSYS; + case XENMEM_decrease_reservation: + rc = compat_memory_op(cmd, arg); current->domain->arch.hvm_domain.qemu_mapcache_invalidate = 1; - return rc; + return rc; + } + return compat_memory_op(cmd, arg); } static long hvm_vcpu_op_compat32( diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/arch/x86/hvm/i8254.c --- a/xen/arch/x86/hvm/i8254.c Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/arch/x86/hvm/i8254.c Tue Apr 05 13:02:57 2011 +0100 @@ -38,10 +38,9 @@ #include <asm/hvm/vpt.h> #include <asm/current.h> -#define domain_vpit(x) (&(x)->arch.hvm_domain.pl_time.vpit) +#define domain_vpit(x) (&(x)->arch.vpit) #define vcpu_vpit(x) (domain_vpit((x)->domain)) -#define vpit_domain(x) (container_of((x), struct domain, \ - arch.hvm_domain.pl_time.vpit)) +#define vpit_domain(x) (container_of((x), struct domain, arch.vpit)) #define vpit_vcpu(x) (pt_global_vcpu_target(vpit_domain(x))) #define RW_STATE_LSB 1 @@ -450,14 +449,18 @@ void pit_init(struct vcpu *v, unsigned long cpu_khz) { - PITState *pit = vcpu_vpit(v); + struct domain *d = v->domain; + PITState *pit = domain_vpit(d); spin_lock_init(&pit->lock); - register_portio_handler(v->domain, PIT_BASE, 4, handle_pit_io); - register_portio_handler(v->domain, 0x61, 1, handle_speaker_io); + if ( is_hvm_domain(d) ) + { + register_portio_handler(d, PIT_BASE, 4, handle_pit_io); + register_portio_handler(d, 0x61, 1, handle_speaker_io); + } - pit_reset(v->domain); + pit_reset(d); } void pit_deinit(struct domain *d) diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/arch/x86/hvm/intercept.c --- a/xen/arch/x86/hvm/intercept.c Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/arch/x86/hvm/intercept.c Tue Apr 05 13:02:57 2011 +0100 @@ -195,8 +195,7 @@ int hvm_io_intercept(ioreq_t *p, int type) { struct vcpu *v = current; - struct hvm_io_handler *handler = - &v->domain->arch.hvm_domain.io_handler; + struct hvm_io_handler *handler = v->domain->arch.hvm_domain.io_handler; int i; unsigned long addr, size; @@ -230,7 +229,7 @@ struct domain *d, unsigned long addr, unsigned long size, void *action, int type) { - struct hvm_io_handler *handler = &d->arch.hvm_domain.io_handler; + struct hvm_io_handler *handler = d->arch.hvm_domain.io_handler; int num = handler->num_slot; BUG_ON(num >= MAX_IO_HANDLER); @@ -246,7 +245,7 @@ struct domain *d, unsigned long old_addr, unsigned long new_addr, unsigned long size, int type) { - struct hvm_io_handler *handler = &d->arch.hvm_domain.io_handler; + struct hvm_io_handler *handler = d->arch.hvm_domain.io_handler; int i; for ( i = 0; i < handler->num_slot; i++ ) diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/arch/x86/hvm/vioapic.c --- a/xen/arch/x86/hvm/vioapic.c Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/arch/x86/hvm/vioapic.c Tue Apr 05 13:02:57 2011 +0100 @@ -272,8 +272,7 @@ static inline int pit_channel0_enabled(void) { - PITState *pit = ¤t->domain->arch.hvm_domain.pl_time.vpit; - return pt_active(&pit->pt0); + return pt_active(¤t->domain->arch.vpit.pt0); } static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq) diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/arch/x86/hvm/vpt.c --- a/xen/arch/x86/hvm/vpt.c Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/arch/x86/hvm/vpt.c Tue Apr 05 13:02:57 2011 +0100 @@ -463,18 +463,21 @@ void pt_adjust_global_vcpu_target(struct vcpu *v) { + struct PITState *vpit; struct pl_time *pl_time; int i; if ( v == NULL ) return; + vpit = &v->domain->arch.vpit; + + spin_lock(&vpit->lock); + pt_adjust_vcpu(&vpit->pt0, v); + spin_unlock(&vpit->lock); + pl_time = &v->domain->arch.hvm_domain.pl_time; - spin_lock(&pl_time->vpit.lock); - pt_adjust_vcpu(&pl_time->vpit.pt0, v); - spin_unlock(&pl_time->vpit.lock); - spin_lock(&pl_time->vrtc.lock); pt_adjust_vcpu(&pl_time->vrtc.pt, v); spin_unlock(&pl_time->vrtc.lock); @@ -507,7 +510,7 @@ if ( d ) { - pt_resume(&d->arch.hvm_domain.pl_time.vpit.pt0); + pt_resume(&d->arch.vpit.pt0); pt_resume(&d->arch.hvm_domain.pl_time.vrtc.pt); for ( i = 0; i < HPET_TIMER_NUM; i++ ) pt_resume(&d->arch.hvm_domain.pl_time.vhpet.pt[i]); diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/arch/x86/irq.c --- a/xen/arch/x86/irq.c Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/arch/x86/irq.c Tue Apr 05 13:02:57 2011 +0100 @@ -764,14 +764,14 @@ static inline void set_pirq_eoi(struct domain *d, unsigned int irq) { - if ( d->arch.pirq_eoi_map ) - set_bit(irq, d->arch.pirq_eoi_map); + if ( !is_hvm_domain(d) && d->arch.pv_domain.pirq_eoi_map ) + set_bit(irq, d->arch.pv_domain.pirq_eoi_map); } static inline void clear_pirq_eoi(struct domain *d, unsigned int irq) { - if ( d->arch.pirq_eoi_map ) - clear_bit(irq, d->arch.pirq_eoi_map); + if ( !is_hvm_domain(d) && d->arch.pv_domain.pirq_eoi_map ) + clear_bit(irq, d->arch.pv_domain.pirq_eoi_map); } static void _irq_guest_eoi(struct irq_desc *desc) diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/arch/x86/mm.c Tue Apr 05 13:02:57 2011 +0100 @@ -4710,7 +4710,7 @@ if ( copy_from_guest(&fmap, arg, 1) ) return -EFAULT; - if ( fmap.map.nr_entries > ARRAY_SIZE(d->arch.e820) ) + if ( fmap.map.nr_entries > ARRAY_SIZE(d->arch.pv_domain.e820) ) return -EINVAL; rc = rcu_lock_target_domain_by_id(fmap.domid, &d); @@ -4724,9 +4724,15 @@ return rc; } - rc = copy_from_guest(d->arch.e820, fmap.map.buffer, + if ( is_hvm_domain(d) ) + { + rcu_unlock_domain(d); + return -EPERM; + } + + rc = copy_from_guest(d->arch.pv_domain.e820, fmap.map.buffer, fmap.map.nr_entries) ? -EFAULT : 0; - d->arch.nr_e820 = fmap.map.nr_entries; + d->arch.pv_domain.nr_e820 = fmap.map.nr_entries; rcu_unlock_domain(d); return rc; @@ -4738,14 +4744,15 @@ struct domain *d = current->domain; /* Backwards compatibility. */ - if ( d->arch.nr_e820 == 0 ) + if ( d->arch.pv_domain.nr_e820 == 0 ) return -ENOSYS; if ( copy_from_guest(&map, arg, 1) ) return -EFAULT; - map.nr_entries = min(map.nr_entries, d->arch.nr_e820); - if ( copy_to_guest(map.buffer, d->arch.e820, map.nr_entries) || + map.nr_entries = min(map.nr_entries, d->arch.pv_domain.nr_e820); + if ( copy_to_guest(map.buffer, d->arch.pv_domain.e820, + map.nr_entries) || copy_to_guest(arg, &map, 1) ) return -EFAULT; diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/arch/x86/physdev.c --- a/xen/arch/x86/physdev.c Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/arch/x86/physdev.c Tue Apr 05 13:02:57 2011 +0100 @@ -264,7 +264,8 @@ ret = -EINVAL; if ( eoi.irq >= v->domain->nr_pirqs ) break; - if ( v->domain->arch.pirq_eoi_map ) + if ( !is_hvm_domain(v->domain) && + v->domain->arch.pv_domain.pirq_eoi_map ) evtchn_unmask(v->domain->pirq_to_evtchn[eoi.irq]); if ( !is_hvm_domain(v->domain) || domain_pirq_to_emuirq(v->domain, eoi.irq) == IRQ_PT ) @@ -289,17 +290,18 @@ PGT_writable_page) ) break; - if ( cmpxchg(&v->domain->arch.pirq_eoi_map_mfn, 0, mfn) != 0 ) + if ( cmpxchg(&v->domain->arch.pv_domain.pirq_eoi_map_mfn, + 0, mfn) != 0 ) { put_page_and_type(mfn_to_page(mfn)); ret = -EBUSY; break; } - v->domain->arch.pirq_eoi_map = map_domain_page_global(mfn); - if ( v->domain->arch.pirq_eoi_map == NULL ) + v->domain->arch.pv_domain.pirq_eoi_map = map_domain_page_global(mfn); + if ( v->domain->arch.pv_domain.pirq_eoi_map == NULL ) { - v->domain->arch.pirq_eoi_map_mfn = 0; + v->domain->arch.pv_domain.pirq_eoi_map_mfn = 0; put_page_and_type(mfn_to_page(mfn)); ret = -ENOSPC; break; diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/drivers/passthrough/io.c --- a/xen/drivers/passthrough/io.c Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/drivers/passthrough/io.c Tue Apr 05 13:02:57 2011 +0100 @@ -85,6 +85,14 @@ } } +struct hvm_irq_dpci *domain_get_irq_dpci(const struct domain *d) +{ + if ( !d || !is_hvm_domain(d) ) + return NULL; + + return d->arch.hvm_domain.irq.dpci; +} + void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci) { xfree(dpci->mirq); @@ -150,12 +158,7 @@ for ( int i = 0; i < NR_HVM_IRQS; i++ ) INIT_LIST_HEAD(&hvm_irq_dpci->girq[i]); - if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 ) - { - spin_unlock(&d->event_lock); - free_hvm_irq_dpci(hvm_irq_dpci); - return -EINVAL; - } + d->arch.hvm_domain.irq.dpci = hvm_irq_dpci; } if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI ) @@ -392,8 +395,7 @@ struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d); ASSERT(spin_is_locked(&irq_desc[domain_pirq_to_irq(d, mirq)].lock)); - if ( !iommu_enabled || (d == dom0) || !dpci || - !test_bit(mirq, dpci->mapping)) + if ( !iommu_enabled || !dpci || !test_bit(mirq, dpci->mapping)) return 0; set_bit(mirq, dpci->dirq_mask); diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/drivers/passthrough/vtd/ia64/vtd.c --- a/xen/drivers/passthrough/vtd/ia64/vtd.c Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/drivers/passthrough/vtd/ia64/vtd.c Tue Apr 05 13:02:57 2011 +0100 @@ -70,23 +70,6 @@ return (void *) ( maddr + __IA64_UNCACHED_OFFSET); } -struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain) -{ - if ( !domain ) - return NULL; - - return domain->arch.hvm_domain.irq.dpci; -} - -int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci) -{ - if ( !domain || !dpci ) - return 0; - - domain->arch.hvm_domain.irq.dpci = dpci; - return 1; -} - void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq) { /* dummy */ diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/drivers/passthrough/vtd/x86/vtd.c --- a/xen/drivers/passthrough/vtd/x86/vtd.c Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/drivers/passthrough/vtd/x86/vtd.c Tue Apr 05 13:02:57 2011 +0100 @@ -68,23 +68,6 @@ return (void *)fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus); } -struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain) -{ - if ( !domain ) - return NULL; - - return domain->arch.hvm_domain.irq.dpci; -} - -int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci) -{ - if ( !domain || !dpci ) - return 0; - - domain->arch.hvm_domain.irq.dpci = dpci; - return 1; -} - void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq) { struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/include/asm-x86/domain.h --- a/xen/include/asm-x86/domain.h Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/include/asm-x86/domain.h Tue Apr 05 13:02:57 2011 +0100 @@ -231,6 +231,17 @@ u32 mul_frac; }; +struct pv_domain +{ + /* Shared page for notifying that explicit PIRQ EOI is required. */ + unsigned long *pirq_eoi_map; + unsigned long pirq_eoi_map_mfn; + + /* Pseudophysical e820 map (XENMEM_memory_map). */ + struct e820entry e820[3]; + unsigned int nr_e820; +}; + struct arch_domain { #ifdef CONFIG_X86_64 @@ -253,7 +264,11 @@ uint32_t pci_cf8; struct list_head pdev_list; - struct hvm_domain hvm_domain; + + union { + struct pv_domain pv_domain; + struct hvm_domain hvm_domain; + }; struct paging_domain paging; struct p2m_domain *p2m; @@ -265,14 +280,6 @@ int *emuirq_pirq; int *pirq_emuirq; - /* Shared page for notifying that explicit PIRQ EOI is required. */ - unsigned long *pirq_eoi_map; - unsigned long pirq_eoi_map_mfn; - - /* Pseudophysical e820 map (XENMEM_memory_map). */ - struct e820entry e820[3]; - unsigned int nr_e820; - /* Maximum physical-address bitwidth supported by this guest. */ unsigned int physaddr_bitsize; @@ -294,7 +301,9 @@ } relmem; struct page_list_head relmem_list; - cpuid_input_t cpuids[MAX_CPUID_INPUT]; + cpuid_input_t *cpuids; + + struct PITState vpit; /* For Guest vMCA handling */ struct domain_mca_msrs *vmca_msrs; diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/include/asm-x86/hvm/domain.h --- a/xen/include/asm-x86/hvm/domain.h Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/include/asm-x86/hvm/domain.h Tue Apr 05 13:02:57 2011 +0100 @@ -47,7 +47,7 @@ struct pl_time pl_time; - struct hvm_io_handler io_handler; + struct hvm_io_handler *io_handler; /* Lock protects access to irq, vpic and vioapic. */ spinlock_t irq_lock; @@ -60,11 +60,12 @@ struct vcpu *i8259_target; /* hvm_print_line() logging. */ - char pbuf[80]; +#define HVM_PBUF_SIZE 80 + char *pbuf; int pbuf_idx; spinlock_t pbuf_lock; - uint64_t params[HVM_NR_PARAMS]; + uint64_t *params; /* Memory ranges with pinned cache attributes. */ struct list_head pinned_cacheattr_ranges; diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/include/asm-x86/hvm/vpt.h --- a/xen/include/asm-x86/hvm/vpt.h Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/include/asm-x86/hvm/vpt.h Tue Apr 05 13:02:57 2011 +0100 @@ -124,7 +124,6 @@ } PMTState; struct pl_time { /* platform time */ - struct PITState vpit; struct RTCState vrtc; struct HPETState vhpet; struct PMTState vpmt; @@ -143,7 +142,9 @@ void pt_adjust_global_vcpu_target(struct vcpu *v); #define pt_global_vcpu_target(d) \ - ((d)->arch.hvm_domain.i8259_target ? : (d)->vcpu ? (d)->vcpu[0] : NULL) + (is_hvm_domain(d) && (d)->arch.hvm_domain.i8259_target ? \ + (d)->arch.hvm_domain.i8259_target : \ + (d)->vcpu ? (d)->vcpu[0] : NULL) void pt_may_unmask_irq(struct domain *d, struct periodic_time *vlapic_pt); diff -r 2f7f24fe5924 -r 37c4f7d492a4 xen/include/xen/iommu.h --- a/xen/include/xen/iommu.h Tue Apr 05 13:02:00 2011 +0100 +++ b/xen/include/xen/iommu.h Tue Apr 05 13:02:57 2011 +0100 @@ -106,8 +106,7 @@ struct ir_ctrl *iommu_ir_ctrl(struct iommu *iommu); struct iommu_flush *iommu_get_flush(struct iommu *iommu); void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq); -struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain); -int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci); +struct hvm_irq_dpci *domain_get_irq_dpci(const struct domain *); void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci); bool_t pt_irq_need_timer(uint32_t flags); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |