diff -r 88818d55e95a xen/arch/x86/domctl.c --- a/xen/arch/x86/domctl.c Tue Feb 12 11:37:45 2008 +0000 +++ b/xen/arch/x86/domctl.c Wed Feb 13 14:48:25 2008 +0100 @@ -530,7 +530,7 @@ long arch_do_domctl( u8 bus, devfn; ret = -EINVAL; - if ( !vtd_enabled ) + if ( !iommu_enabled ) break; bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff; @@ -553,7 +553,7 @@ long arch_do_domctl( u8 bus, devfn; ret = -EINVAL; - if ( !vtd_enabled ) + if ( !iommu_enabled ) break; if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) ) @@ -589,7 +589,7 @@ long arch_do_domctl( if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) break; bind = &(domctl->u.bind_pt_irq); - if (vtd_enabled) + if (iommu_enabled) ret = pt_irq_create_bind_vtd(d, bind); if (ret < 0) gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n"); diff -r 88818d55e95a xen/arch/x86/hvm/Makefile --- a/xen/arch/x86/hvm/Makefile Tue Feb 12 11:37:45 2008 +0000 +++ b/xen/arch/x86/hvm/Makefile Wed Feb 13 14:48:25 2008 +0100 @@ -18,3 +18,4 @@ obj-y += vpic.o obj-y += vpic.o obj-y += save.o obj-y += stdvga.o +obj-y += iommu.o diff -r 88818d55e95a xen/arch/x86/hvm/vioapic.c --- a/xen/arch/x86/hvm/vioapic.c Tue Feb 12 11:37:45 2008 +0000 +++ b/xen/arch/x86/hvm/vioapic.c Wed Feb 13 14:48:25 2008 +0100 @@ -458,7 +458,7 @@ void vioapic_update_EOI(struct domain *d ent->fields.remote_irr = 0; - if ( vtd_enabled ) + if ( iommu_enabled ) { spin_unlock(&d->arch.hvm_domain.irq_lock); hvm_dpci_eoi(current->domain, gsi, ent); diff -r 88818d55e95a xen/arch/x86/hvm/vmx/vtd/io.c --- a/xen/arch/x86/hvm/vmx/vtd/io.c Tue Feb 12 11:37:45 2008 +0000 +++ b/xen/arch/x86/hvm/vmx/vtd/io.c Wed Feb 13 14:48:25 2008 +0100 @@ -141,7 +141,7 @@ int hvm_do_IRQ_dpci(struct domain *d, un { struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; - if ( !vtd_enabled || (d == dom0) || (hvm_irq->dpci == NULL) || + if ( !iommu_enabled || (d == dom0) || (hvm_irq->dpci == NULL) || !hvm_irq->dpci->mirq[mirq].valid ) return 0; @@ -167,7 +167,7 @@ static void hvm_dpci_isairq_eoi(struct d int i; ASSERT(isairq < NR_ISAIRQS); - if ( !vtd_enabled || !dpci || + if ( !iommu_enabled || !dpci || !test_bit(isairq, dpci->isairq_map) ) return; @@ -205,7 +205,7 @@ void hvm_dpci_eoi(struct domain *d, unsi struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; uint32_t device, intx, machine_gsi; - if ( !vtd_enabled || (hvm_irq_dpci == NULL) || + if ( !iommu_enabled || (hvm_irq_dpci == NULL) || (guest_gsi >= NR_ISAIRQS && !hvm_irq_dpci->girq[guest_gsi].valid) ) return; @@ -235,50 +235,3 @@ void hvm_dpci_eoi(struct domain *d, unsi else spin_unlock(&hvm_irq_dpci->dirq_lock); } - -void iommu_domain_destroy(struct domain *d) -{ - struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; - uint32_t i; - struct hvm_iommu *hd = domain_hvm_iommu(d); - struct list_head *ioport_list, *digl_list, *tmp; - struct g2m_ioport *ioport; - struct dev_intx_gsi_link *digl; - - if ( !vtd_enabled ) - return; - - if ( hvm_irq_dpci != NULL ) - { - for ( i = 0; i < NR_IRQS; i++ ) - if ( hvm_irq_dpci->mirq[i].valid ) - { - pirq_guest_unbind(d, i); - kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]); - - list_for_each_safe ( digl_list, tmp, - &hvm_irq_dpci->mirq[i].digl_list ) - { - digl = list_entry(digl_list, - struct dev_intx_gsi_link, list); - list_del(&digl->list); - xfree(digl); - } - } - - d->arch.hvm_domain.irq.dpci = NULL; - xfree(hvm_irq_dpci); - } - - if ( hd ) - { - list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list ) - { - ioport = list_entry(ioport_list, struct g2m_ioport, list); - list_del(&ioport->list); - xfree(ioport); - } - } - - iommu_domain_teardown(d); -} diff -r 88818d55e95a xen/include/asm-x86/hvm/iommu.h --- a/xen/include/asm-x86/hvm/iommu.h Tue Feb 12 11:37:45 2008 +0000 +++ b/xen/include/asm-x86/hvm/iommu.h Wed Feb 13 14:48:25 2008 +0100 @@ -48,6 +48,9 @@ struct hvm_iommu { int domain_id; int paging_mode; void *root_table; + + /* iommu_ops */ + struct iommu_ops *platform_ops; }; #endif // __ASM_X86_HVM_IOMMU_H__ diff -r 88818d55e95a xen/include/asm-x86/iommu.h --- a/xen/include/asm-x86/iommu.h Tue Feb 12 11:37:45 2008 +0000 +++ b/xen/include/asm-x86/iommu.h Wed Feb 13 14:48:25 2008 +0100 @@ -28,7 +28,9 @@ #include extern int vtd_enabled; +extern int amd_iommu_enabled; +#define iommu_enabled ( amd_iommu_enabled || vtd_enabled ) #define domain_hvm_iommu(d) (&d->arch.hvm_domain.hvm_iommu) #define domain_vmx_iommu(d) (&d->arch.hvm_domain.hvm_iommu.vmx_iommu) #define iommu_qi_ctrl(iommu) (&(iommu->intel.qi_ctrl)); @@ -89,4 +91,13 @@ void io_apic_write_remap_rte(unsigned in #define PT_IRQ_TIME_OUT MILLISECS(8) #define VTDPREFIX "[VT-D]" +/* iommu interface */ +struct iommu_ops{ + int (*init ) (struct domain *d); + int (*assign_device) (struct domain *d, u8 bus, u8 devfn); + void (*teardown) (struct domain *d); + int (*map_page) (struct domain *d, dma_addr_t gfn, dma_addr_t mfn); + int (*unmap_page) (struct domain *d, dma_addr_t gfn); +}; + #endif /* _IOMMU_H_ */ diff -r 88818d55e95a xen/arch/x86/hvm/iommu.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/x86/hvm/iommu.c Wed Feb 13 14:48:25 2008 +0100 @@ -0,0 +1,132 @@ +/* +* This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern struct iommu_ops intel_iommu_ops; +extern struct iommu_ops amd_iommu_ops; + +int iommu_domain_init(struct domain *domain) +{ + struct hvm_iommu *hd = domain_hvm_iommu(domain); + + spin_lock_init(&hd->mapping_lock); + spin_lock_init(&hd->iommu_list_lock); + INIT_LIST_HEAD(&hd->pdev_list); + INIT_LIST_HEAD(&hd->g2m_ioport_list); + + if ( !iommu_enabled ) + return 0; + + switch ( boot_cpu_data.x86_vendor ) + { + case X86_VENDOR_INTEL: + hd->platform_ops = &intel_iommu_ops; + break; + case X86_VENDOR_AMD: + hd->platform_ops = &amd_iommu_ops; + break; + + default: + dprintk(XENLOG_ERR, "IOMMU: Unknown CPU Vendor\n"); + return 0; + } + + return hd->platform_ops->init(domain); +} + +int assign_device(struct domain *d, u8 bus, u8 devfn) +{ + struct hvm_iommu *hd = domain_hvm_iommu(d); + + if ( !iommu_enabled || !hd->platform_ops) + return 0; + return hd->platform_ops->assign_device(d, bus, devfn); +} + +void iommu_domain_destroy(struct domain *d) +{ + struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; + uint32_t i; + struct hvm_iommu *hd = domain_hvm_iommu(d); + struct list_head *ioport_list, *digl_list, *tmp; + struct g2m_ioport *ioport; + struct dev_intx_gsi_link *digl; + + if ( !iommu_enabled || !hd->platform_ops) + return; + + if ( hvm_irq_dpci != NULL ) + { + for ( i = 0; i < NR_IRQS; i++ ) + if ( hvm_irq_dpci->mirq[i].valid ) + { + pirq_guest_unbind(d, i); + kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]); + + list_for_each_safe ( digl_list, tmp, + &hvm_irq_dpci->mirq[i].digl_list ) + { + digl = list_entry(digl_list, + struct dev_intx_gsi_link, list); + list_del(&digl->list); + xfree(digl); + } + } + + d->arch.hvm_domain.irq.dpci = NULL; + xfree(hvm_irq_dpci); + } + + if ( hd ) + { + list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list ) + { + ioport = list_entry(ioport_list, struct g2m_ioport, list); + list_del(&ioport->list); + xfree(ioport); + } + } + + return hd->platform_ops->teardown(d); +} + +int iommu_map_page(struct domain *d, dma_addr_t gfn, dma_addr_t mfn) +{ + struct hvm_iommu *hd = domain_hvm_iommu(d); + + if ( !iommu_enabled || !hd->platform_ops) + return 0; + return hd->platform_ops->map_page(d, gfn, mfn); +} + +int iommu_unmap_page(struct domain *d, dma_addr_t gfn) +{ + struct hvm_iommu *hd = domain_hvm_iommu(d); + + if ( !iommu_enabled || !hd->platform_ops) + return 0; + return hd->platform_ops->unmap_page(d, gfn); +}