[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] passthrough: use domain pirq as index of struct hvm_irq_dpci's hvm_timer array
# HG changeset patch # User Jan Beulich <jbeulich@xxxxxxxxxx> # Date 1302005105 -3600 # Node ID fbfee2a01a917d1a02677c48e2935dafbadaa00a # Parent 4fe0442aa5b7434ed10e63c027bbe9e9f6642dae passthrough: use domain pirq as index of struct hvm_irq_dpci's hvm_timer array Since d->nr_pirqs is guaranteed to be not larger than nr_irqs, indexing arrays by the former ought to be preferred. In the case given, the indices so far had to be computed specially in a number of cases, whereas the indexes use now are all readily available. This opens the possibility to fold the ->mirq[] and ->hvm_timer[] members of struct hvm_irq_dpci into a single array, possibly with some members overlayed in a union to reduce size (see http://lists.xensource.com/archives/html/xen-devel/2011-03/msg02006.html). Such space saving wouldn't, however, suffice to generally get the respective allocation sizes here to below PAGE_SIZE, not even when converting the array of structures into an array of pointers to structures. Whether a multi-level lookup mechanism would make sense here is questionable, as it can be expected that for other than Dom0 (which isn't hvm, and hence shouldn't use these data structures - see http://lists.xensource.com/archives/html/xen-devel/2011-03/msg02004.html) only very few entries would commonly be used here. An obvious alternative would be to use rb or radix trees (both currently only used in tmem). Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> --- diff -r 4fe0442aa5b7 -r fbfee2a01a91 xen/drivers/passthrough/io.c --- a/xen/drivers/passthrough/io.c Tue Apr 05 13:03:29 2011 +0100 +++ b/xen/drivers/passthrough/io.c Tue Apr 05 13:05:05 2011 +0100 @@ -135,7 +135,7 @@ BITS_TO_LONGS(d->nr_pirqs)); hvm_irq_dpci->mapping = xmalloc_array(unsigned long, BITS_TO_LONGS(d->nr_pirqs)); - hvm_irq_dpci->hvm_timer = xmalloc_array(struct timer, nr_irqs); + hvm_irq_dpci->hvm_timer = xmalloc_array(struct timer, d->nr_pirqs); if ( !hvm_irq_dpci->mirq || !hvm_irq_dpci->dirq_mask || !hvm_irq_dpci->mapping || @@ -150,7 +150,7 @@ bitmap_zero(hvm_irq_dpci->dirq_mask, d->nr_pirqs); bitmap_zero(hvm_irq_dpci->mapping, d->nr_pirqs); memset(hvm_irq_dpci->hvm_timer, 0, - nr_irqs * sizeof(*hvm_irq_dpci->hvm_timer)); + d->nr_pirqs * sizeof(*hvm_irq_dpci->hvm_timer)); for ( int i = 0; i < d->nr_pirqs; i++ ) { INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list); hvm_irq_dpci->mirq[i].gmsi.dest_vcpu_id = -1; @@ -258,7 +258,6 @@ /* Bind the same mirq once in the same domain */ if ( !test_and_set_bit(machine_gsi, hvm_irq_dpci->mapping)) { - unsigned int irq = domain_pirq_to_irq(d, machine_gsi); unsigned int share; hvm_irq_dpci->mirq[machine_gsi].dom = d; @@ -278,14 +277,14 @@ /* Init timer before binding */ if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) ) - init_timer(&hvm_irq_dpci->hvm_timer[irq], + init_timer(&hvm_irq_dpci->hvm_timer[machine_gsi], pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0); /* Deal with gsi for legacy devices */ rc = pirq_guest_bind(d->vcpu[0], machine_gsi, share); if ( unlikely(rc) ) { if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) ) - kill_timer(&hvm_irq_dpci->hvm_timer[irq]); + kill_timer(&hvm_irq_dpci->hvm_timer[machine_gsi]); hvm_irq_dpci->mirq[machine_gsi].dom = NULL; clear_bit(machine_gsi, hvm_irq_dpci->mapping); list_del(&girq->list); @@ -374,7 +373,7 @@ pirq_guest_unbind(d, machine_gsi); msixtbl_pt_unregister(d, machine_gsi); if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) ) - kill_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_irq(d, machine_gsi)]); + kill_timer(&hvm_irq_dpci->hvm_timer[machine_gsi]); hvm_irq_dpci->mirq[machine_gsi].dom = NULL; hvm_irq_dpci->mirq[machine_gsi].flags = 0; clear_bit(machine_gsi, hvm_irq_dpci->mapping); @@ -516,7 +515,7 @@ * will never be deasserted. */ if ( pt_irq_need_timer(hvm_irq_dpci->mirq[pirq].flags) ) - set_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_irq(d, pirq)], + set_timer(&hvm_irq_dpci->hvm_timer[pirq], NOW() + PT_IRQ_TIME_OUT); spin_unlock(&d->event_lock); } @@ -544,7 +543,7 @@ ! pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) ) return; - stop_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_irq(d, machine_gsi)]); + stop_timer(&hvm_irq_dpci->hvm_timer[machine_gsi]); pirq_guest_eoi(d, machine_gsi); } diff -r 4fe0442aa5b7 -r fbfee2a01a91 xen/drivers/passthrough/pci.c --- a/xen/drivers/passthrough/pci.c Tue Apr 05 13:03:29 2011 +0100 +++ b/xen/drivers/passthrough/pci.c Tue Apr 05 13:05:05 2011 +0100 @@ -262,7 +262,7 @@ pirq_guest_unbind(d, i); if ( pt_irq_need_timer(hvm_irq_dpci->mirq[i].flags) ) - kill_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_irq(d, i)]); + kill_timer(&hvm_irq_dpci->hvm_timer[i]); list_for_each_safe ( digl_list, tmp, &hvm_irq_dpci->mirq[i].digl_list ) diff -r 4fe0442aa5b7 -r fbfee2a01a91 xen/drivers/passthrough/vtd/x86/vtd.c --- a/xen/drivers/passthrough/vtd/x86/vtd.c Tue Apr 05 13:03:29 2011 +0100 +++ b/xen/drivers/passthrough/vtd/x86/vtd.c Tue Apr 05 13:05:05 2011 +0100 @@ -101,7 +101,7 @@ hvm_pci_intx_deassert(d, digl->device, digl->intx); if ( --dpci->mirq[i].pending == 0 ) { - stop_timer(&dpci->hvm_timer[domain_pirq_to_irq(d, i)]); + stop_timer(&dpci->hvm_timer[i]); pirq_guest_eoi(d, i); } } _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |