[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] Share VT-d code between x86 and IA64
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1207744544 -3600 # Node ID a8ce3e934abd5d9979566186c7618d4c9f8d7db3 # Parent cb1f41538756efc93e632e8896b0c2955a82bb51 Share VT-d code between x86 and IA64 Declare arch-dependent functions in vtd.h, and implement them for x86. Signed-off-by: Weidong Han <weidong.han@xxxxxxxxx> --- xen/drivers/passthrough/io.c | 74 +------- xen/drivers/passthrough/iommu.c | 2 xen/drivers/passthrough/vtd/Makefile | 2 xen/drivers/passthrough/vtd/vtd.h | 9 xen/drivers/passthrough/vtd/x86/Makefile | 1 xen/drivers/passthrough/vtd/x86/vtd.c | 282 +++++++++++++++++++++++++++++++ 6 files changed, 311 insertions(+), 59 deletions(-) diff -r cb1f41538756 -r a8ce3e934abd xen/drivers/passthrough/io.c --- a/xen/drivers/passthrough/io.c Wed Apr 09 13:32:21 2008 +0100 +++ b/xen/drivers/passthrough/io.c Wed Apr 09 13:35:44 2008 +0100 @@ -25,7 +25,7 @@ static void pt_irq_time_out(void *data) { struct hvm_mirq_dpci_mapping *irq_map = data; unsigned int guest_gsi, machine_gsi = 0; - struct hvm_irq_dpci *dpci = irq_map->dom->arch.hvm_domain.irq.dpci; + struct hvm_irq_dpci *dpci = domain_get_irq_dpci(irq_map->dom); struct dev_intx_gsi_link *digl; uint32_t device, intx; @@ -49,7 +49,7 @@ int pt_irq_create_bind_vtd( int pt_irq_create_bind_vtd( struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind) { - struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; + struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d); uint32_t machine_gsi, guest_gsi; uint32_t device, intx, link; struct dev_intx_gsi_link *digl; @@ -65,11 +65,8 @@ int pt_irq_create_bind_vtd( for ( int i = 0; i < NR_IRQS; i++ ) INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list); - if ( cmpxchg((unsigned long *)&d->arch.hvm_domain.irq.dpci, - 0, (unsigned long)hvm_irq_dpci) != 0 ) + if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 ) xfree(hvm_irq_dpci); - - hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; } machine_gsi = pt_irq_bind->machine_irq; @@ -116,7 +113,7 @@ int pt_irq_destroy_bind_vtd( int pt_irq_destroy_bind_vtd( struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind) { - struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; + struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d); uint32_t machine_gsi, guest_gsi; uint32_t device, intx, link; struct list_head *digl_list, *tmp; @@ -133,14 +130,15 @@ int pt_irq_destroy_bind_vtd( hvm_irq_dpci->link_cnt[link]--; gdprintk(XENLOG_INFO, - "pt_irq_destroy_bind_vtd: machine_gsi=%d, guest_gsi=%d, device=%d, intx=%d.\n", - machine_gsi, guest_gsi, device, intx); - memset(&hvm_irq_dpci->girq[guest_gsi], 0, sizeof(struct hvm_girq_dpci_mapping)); + "pt_irq_destroy_bind_vtd: machine_gsi=%d " + "guest_gsi=%d, device=%d, intx=%d.\n", + machine_gsi, guest_gsi, device, intx); + memset(&hvm_irq_dpci->girq[guest_gsi], 0, + sizeof(struct hvm_girq_dpci_mapping)); /* clear the mirq info */ if ( hvm_irq_dpci->mirq[machine_gsi].valid ) { - list_for_each_safe ( digl_list, tmp, &hvm_irq_dpci->mirq[machine_gsi].digl_list ) { @@ -174,10 +172,10 @@ int pt_irq_destroy_bind_vtd( int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq) { - struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; - - if ( !iommu_enabled || (d == dom0) || (hvm_irq->dpci == NULL) || - !hvm_irq->dpci->mirq[mirq].valid ) + struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d); + + if ( !iommu_enabled || (d == dom0) || !dpci || + !dpci->mirq[mirq].valid ) return 0; /* @@ -186,58 +184,18 @@ int hvm_do_IRQ_dpci(struct domain *d, un * this case the guest may not pick up the interrupt (e.g., masked at the * PIC) and we need to detect that. */ - set_bit(mirq, hvm_irq->dpci->dirq_mask); - set_timer(&hvm_irq->dpci->hvm_timer[irq_to_vector(mirq)], + set_bit(mirq, dpci->dirq_mask); + set_timer(&dpci->hvm_timer[irq_to_vector(mirq)], NOW() + PT_IRQ_TIME_OUT); vcpu_kick(d->vcpu[0]); return 1; } -static void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq) -{ - struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; - struct hvm_irq_dpci *dpci = hvm_irq->dpci; - struct dev_intx_gsi_link *digl, *tmp; - int i; - - ASSERT(isairq < NR_ISAIRQS); - if ( !iommu_enabled || !dpci || - !test_bit(isairq, dpci->isairq_map) ) - return; - - /* Multiple mirq may be mapped to one isa irq */ - for ( i = 0; i < NR_IRQS; i++ ) - { - if ( !dpci->mirq[i].valid ) - continue; - - list_for_each_entry_safe ( digl, tmp, - &dpci->mirq[i].digl_list, list ) - { - if ( hvm_irq->pci_link.route[digl->link] == isairq ) - { - hvm_pci_intx_deassert(d, digl->device, digl->intx); - spin_lock(&dpci->dirq_lock); - if ( --dpci->mirq[i].pending == 0 ) - { - spin_unlock(&dpci->dirq_lock); - gdprintk(XENLOG_INFO VTDPREFIX, - "hvm_dpci_isairq_eoi:: mirq = %x\n", i); - stop_timer(&dpci->hvm_timer[irq_to_vector(i)]); - pirq_guest_eoi(d, i); - } - else - spin_unlock(&dpci->dirq_lock); - } - } - } -} - void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi, union vioapic_redir_entry *ent) { - struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; + struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d); uint32_t device, intx, machine_gsi; if ( !iommu_enabled || (hvm_irq_dpci == NULL) || diff -r cb1f41538756 -r a8ce3e934abd xen/drivers/passthrough/iommu.c --- a/xen/drivers/passthrough/iommu.c Wed Apr 09 13:32:21 2008 +0100 +++ b/xen/drivers/passthrough/iommu.c Wed Apr 09 13:35:44 2008 +0100 @@ -58,7 +58,7 @@ int assign_device(struct domain *d, u8 b void iommu_domain_destroy(struct domain *d) { - struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; + struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d); uint32_t i; struct hvm_iommu *hd = domain_hvm_iommu(d); struct list_head *ioport_list, *digl_list, *tmp; diff -r cb1f41538756 -r a8ce3e934abd xen/drivers/passthrough/vtd/Makefile --- a/xen/drivers/passthrough/vtd/Makefile Wed Apr 09 13:32:21 2008 +0100 +++ b/xen/drivers/passthrough/vtd/Makefile Wed Apr 09 13:35:44 2008 +0100 @@ -1,3 +1,5 @@ obj-y += iommu.o +subdir-$(x86) += x86 + obj-y += iommu.o obj-y += dmar.o obj-y += utils.o diff -r cb1f41538756 -r a8ce3e934abd xen/drivers/passthrough/vtd/vtd.h --- a/xen/drivers/passthrough/vtd/vtd.h Wed Apr 09 13:32:21 2008 +0100 +++ b/xen/drivers/passthrough/vtd/vtd.h Wed Apr 09 13:35:44 2008 +0100 @@ -42,4 +42,13 @@ struct IO_APIC_route_remap_entry { }; }; +unsigned int get_clflush_size(void); +u64 alloc_pgtable_maddr(void); +void free_pgtable_maddr(u64 maddr); +void *map_vtd_domain_page(u64 maddr); +void unmap_vtd_domain_page(void *va); + +void iommu_flush_cache_entry(struct iommu *iommu, void *addr); +void iommu_flush_cache_page(struct iommu *iommu, void *addr); + #endif // _VTD_H_ diff -r cb1f41538756 -r a8ce3e934abd xen/drivers/passthrough/vtd/x86/Makefile --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/drivers/passthrough/vtd/x86/Makefile Wed Apr 09 13:35:44 2008 +0100 @@ -0,0 +1,1 @@ +obj-y += vtd.o diff -r cb1f41538756 -r a8ce3e934abd xen/drivers/passthrough/vtd/x86/vtd.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/drivers/passthrough/vtd/x86/vtd.c Wed Apr 09 13:35:44 2008 +0100 @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2008, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Copyright (C) Allen Kay <allen.m.kay@xxxxxxxxx> + * Copyright (C) Weidong Han <weidong.han@xxxxxxxxx> + */ + +#include <xen/sched.h> +#include <xen/domain_page.h> +#include <xen/iommu.h> +#include "../iommu.h" +#include "../dmar.h" +#include "../vtd.h" + +void *map_vtd_domain_page(u64 maddr) +{ + return map_domain_page(maddr >> PAGE_SHIFT_4K); +} + +void unmap_vtd_domain_page(void *va) +{ + unmap_domain_page(va); +} + +void iommu_set_pgd(struct domain *d) +{ + struct hvm_iommu *hd = domain_hvm_iommu(d); + unsigned long p2m_table; + int level = agaw_to_level(hd->agaw); + l3_pgentry_t *l3e; + unsigned long flags; + + p2m_table = mfn_x(pagetable_get_mfn(d->arch.phys_table)); + +#if CONFIG_PAGING_LEVELS == 3 + { + struct dma_pte *pte = NULL, *pgd_vaddr = NULL, *pmd_vaddr = NULL; + int i; + u64 pmd_maddr; + + spin_lock_irqsave(&hd->mapping_lock, flags); + hd->pgd_maddr = alloc_pgtable_maddr(); + if ( hd->pgd_maddr == 0 ) + { + spin_unlock_irqrestore(&hd->mapping_lock, flags); + gdprintk(XENLOG_ERR VTDPREFIX, + "Allocate pgd memory failed!\n"); + return; + } + + pgd_vaddr = map_vtd_domain_page(hd->pgd_maddr); + l3e = map_domain_page(p2m_table); + switch ( level ) + { + case VTD_PAGE_TABLE_LEVEL_3: /* Weybridge */ + /* We only support 8 entries for the PAE L3 p2m table */ + for ( i = 0; i < 8 ; i++ ) + { + /* Don't create new L2 entry, use ones from p2m table */ + pgd_vaddr[i].val = l3e[i].l3 | _PAGE_PRESENT | _PAGE_RW; + } + break; + + case VTD_PAGE_TABLE_LEVEL_4: /* Stoakley */ + /* We allocate one more page for the top vtd page table. */ + pmd_maddr = alloc_pgtable_maddr(); + if ( pmd_maddr == 0 ) + { + unmap_vtd_domain_page(pgd_vaddr); + unmap_domain_page(l3e); + spin_unlock_irqrestore(&hd->mapping_lock, flags); + gdprintk(XENLOG_ERR VTDPREFIX, + "Allocate pmd memory failed!\n"); + return; + } + + pte = &pgd_vaddr[0]; + dma_set_pte_addr(*pte, pmd_maddr); + dma_set_pte_readable(*pte); + dma_set_pte_writable(*pte); + + pmd_vaddr = map_vtd_domain_page(pmd_maddr); + for ( i = 0; i < 8; i++ ) + { + /* Don't create new L2 entry, use ones from p2m table */ + pmd_vaddr[i].val = l3e[i].l3 | _PAGE_PRESENT | _PAGE_RW; + } + + unmap_vtd_domain_page(pmd_vaddr); + break; + default: + gdprintk(XENLOG_ERR VTDPREFIX, + "iommu_set_pgd:Unsupported p2m table sharing level!\n"); + break; + } + + unmap_vtd_domain_page(pgd_vaddr); + unmap_domain_page(l3e); + spin_unlock_irqrestore(&hd->mapping_lock, flags); + } +#elif CONFIG_PAGING_LEVELS == 4 + { + mfn_t pgd_mfn; + + spin_lock_irqsave(&hd->mapping_lock, flags); + hd->pgd_maddr = alloc_pgtable_maddr(); + if ( hd->pgd_maddr == 0 ) + { + spin_unlock_irqrestore(&hd->mapping_lock, flags); + gdprintk(XENLOG_ERR VTDPREFIX, + "Allocate pgd memory failed!\n"); + return; + } + + switch ( level ) + { + case VTD_PAGE_TABLE_LEVEL_3: + l3e = map_domain_page(p2m_table); + if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 ) + { + spin_unlock_irqrestore(&hd->mapping_lock, flags); + gdprintk(XENLOG_ERR VTDPREFIX, + "iommu_set_pgd: second level wasn't there\n"); + unmap_domain_page(l3e); + return; + } + + pgd_mfn = _mfn(l3e_get_pfn(*l3e)); + hd->pgd_maddr = mfn_x(pgd_mfn) << PAGE_SHIFT_4K; + unmap_domain_page(l3e); + break; + case VTD_PAGE_TABLE_LEVEL_4: + pgd_mfn = _mfn(p2m_table); + hd->pgd_maddr = mfn_x(pgd_mfn) << PAGE_SHIFT_4K; + break; + default: + gdprintk(XENLOG_ERR VTDPREFIX, + "iommu_set_pgd:Unsupported p2m table sharing level!\n"); + break; + } + spin_unlock_irqrestore(&hd->mapping_lock, flags); + } +#endif +} + +void iommu_free_pgd(struct domain *d) +{ +#if CONFIG_PAGING_LEVELS == 3 + struct hvm_iommu *hd = domain_hvm_iommu(d); + int level = agaw_to_level(hd->agaw); + struct dma_pte *pgd_vaddr = NULL; + + switch ( level ) + { + case VTD_PAGE_TABLE_LEVEL_3: + if ( hd->pgd_maddr != 0 ) + { + free_pgtable_maddr(hd->pgd_maddr); + hd->pgd_maddr = 0; + } + break; + case VTD_PAGE_TABLE_LEVEL_4: + if ( hd->pgd_maddr != 0 ) + { + pgd_vaddr = (struct dma_pte*)map_vtd_domain_page(hd->pgd_maddr); + if ( pgd_vaddr[0].val != 0 ) + free_pgtable_maddr(pgd_vaddr[0].val); + unmap_vtd_domain_page(pgd_vaddr); + free_pgtable_maddr(hd->pgd_maddr); + hd->pgd_maddr = 0; + } + break; + default: + gdprintk(XENLOG_ERR VTDPREFIX, + "Unsupported p2m table sharing level!\n"); + break; + } +#endif +} + +/* Allocate page table, return its machine address */ +u64 alloc_pgtable_maddr(void) +{ + struct page_info *pg; + u64 *vaddr; + struct acpi_drhd_unit *drhd; + struct iommu *iommu; + + pg = alloc_domheap_page(NULL, 0); + vaddr = map_domain_page(page_to_mfn(pg)); + if ( !vaddr ) + return 0; + memset(vaddr, 0, PAGE_SIZE); + + drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list); + iommu = drhd->iommu; + iommu_flush_cache_page(iommu, vaddr); + unmap_domain_page(vaddr); + + return page_to_maddr(pg); +} + +void free_pgtable_maddr(u64 maddr) +{ + if ( maddr != 0 ) + free_domheap_page(maddr_to_page(maddr)); +} + +unsigned int get_clflush_size(void) +{ + return ((cpuid_ebx(1) >> 8) & 0xff) * 8; +} + +struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain) +{ + if ( !domain ) + return NULL; + + return domain->arch.hvm_domain.irq.dpci; +} + +int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci) +{ + if ( !domain || !dpci ) + return 0; + + domain->arch.hvm_domain.irq.dpci = dpci; + return 1; +} + +void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq) +{ + struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; + struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d); + struct dev_intx_gsi_link *digl, *tmp; + int i; + + ASSERT(isairq < NR_ISAIRQS); + if ( !vtd_enabled || !dpci || + !test_bit(isairq, dpci->isairq_map) ) + return; + + /* Multiple mirq may be mapped to one isa irq */ + for ( i = 0; i < NR_IRQS; i++ ) + { + if ( !dpci->mirq[i].valid ) + continue; + + list_for_each_entry_safe ( digl, tmp, + &dpci->mirq[i].digl_list, list ) + { + if ( hvm_irq->pci_link.route[digl->link] == isairq ) + { + hvm_pci_intx_deassert(d, digl->device, digl->intx); + spin_lock(&dpci->dirq_lock); + if ( --dpci->mirq[i].pending == 0 ) + { + spin_unlock(&dpci->dirq_lock); + gdprintk(XENLOG_INFO VTDPREFIX, + "hvm_dpci_isairq_eoi:: mirq = %x\n", i); + stop_timer(&dpci->hvm_timer[irq_to_vector(i)]); + pirq_guest_eoi(d, i); + } + else + spin_unlock(&dpci->dirq_lock); + } + } + } +} _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |