[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 2/2] AMD/IOMMU: re-work locking around sending of commands
It appears unhelpful to me for flush_command_buffer() to block all progress elsewhere for the given IOMMU by holding its lock while waiting for command completion. There's no real need for callers of that function or of send_iommu_command() to hold the lock. Contain all command sending related locking to the latter function. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v2: Re-work to contain locking to send_iommu_command(). --- a/xen/drivers/passthrough/amd/iommu_cmd.c +++ b/xen/drivers/passthrough/amd/iommu_cmd.c @@ -27,6 +27,9 @@ static void send_iommu_command(struct am const uint32_t cmd[4]) { uint32_t tail; + unsigned long flags; + + spin_lock_irqsave(&iommu->lock, flags); tail = iommu->cmd_buffer.tail + sizeof(cmd_entry_t); if ( tail == iommu->cmd_buffer.size ) @@ -47,6 +50,8 @@ static void send_iommu_command(struct am iommu->cmd_buffer.tail = tail; writel(tail, iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET); + + spin_unlock_irqrestore(&iommu->lock, flags); } static void flush_command_buffer(struct amd_iommu *iommu, @@ -273,7 +278,6 @@ static void invalidate_iommu_all(struct void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev, daddr_t daddr, unsigned int order) { - unsigned long flags; struct amd_iommu *iommu; unsigned int req_id, queueid, maxpend; @@ -300,10 +304,8 @@ void amd_iommu_flush_iotlb(u8 devfn, con maxpend = pdev->ats.queue_depth & 0xff; /* send INVALIDATE_IOTLB_PAGES command */ - spin_lock_irqsave(&iommu->lock, flags); invalidate_iotlb_pages(iommu, maxpend, 0, queueid, daddr, req_id, order); flush_command_buffer(iommu, iommu_dev_iotlb_timeout); - spin_unlock_irqrestore(&iommu->lock, flags); } static void amd_iommu_flush_all_iotlbs(struct domain *d, daddr_t daddr, @@ -330,17 +332,14 @@ static void amd_iommu_flush_all_iotlbs(s static void _amd_iommu_flush_pages(struct domain *d, daddr_t daddr, unsigned int order) { - unsigned long flags; struct amd_iommu *iommu; unsigned int dom_id = d->domain_id; /* send INVALIDATE_IOMMU_PAGES command */ for_each_amd_iommu ( iommu ) { - spin_lock_irqsave(&iommu->lock, flags); invalidate_iommu_pages(iommu, daddr, dom_id, order); flush_command_buffer(iommu, 0); - spin_unlock_irqrestore(&iommu->lock, flags); } if ( ats_enabled ) @@ -360,37 +359,25 @@ void amd_iommu_flush_pages(struct domain void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf) { - ASSERT( spin_is_locked(&iommu->lock) ); - invalidate_dev_table_entry(iommu, bdf); flush_command_buffer(iommu, 0); } void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf) { - ASSERT( spin_is_locked(&iommu->lock) ); - invalidate_interrupt_table(iommu, bdf); flush_command_buffer(iommu, 0); } void amd_iommu_flush_all_caches(struct amd_iommu *iommu) { - ASSERT( spin_is_locked(&iommu->lock) ); - invalidate_iommu_all(iommu); flush_command_buffer(iommu, 0); } void amd_iommu_send_guest_cmd(struct amd_iommu *iommu, u32 cmd[]) { - unsigned long flags; - - spin_lock_irqsave(&iommu->lock, flags); - send_iommu_command(iommu, cmd); /* TBD: Timeout selection may require peeking into cmd[]. */ flush_command_buffer(iommu, 0); - - spin_unlock_irqrestore(&iommu->lock, flags); } --- a/xen/drivers/passthrough/amd/iommu_guest.c +++ b/xen/drivers/passthrough/amd/iommu_guest.c @@ -449,9 +449,10 @@ static int do_invalidate_dte(struct doma spin_lock_irqsave(&iommu->lock, flags); dte_set_gcr3_table(mdte, hdom_id, gcr3_mfn << PAGE_SHIFT, gv, glx); - amd_iommu_flush_device(iommu, req_id); spin_unlock_irqrestore(&iommu->lock, flags); + amd_iommu_flush_device(iommu, req_id); + return 0; } --- a/xen/drivers/passthrough/amd/iommu_init.c +++ b/xen/drivers/passthrough/amd/iommu_init.c @@ -871,7 +871,10 @@ static void enable_iommu(struct amd_iomm spin_lock_irqsave(&iommu->lock, flags); if ( unlikely(iommu->enabled) ) - goto out; + { + spin_unlock_irqrestore(&iommu->lock, flags); + return; + } amd_iommu_erratum_746_workaround(iommu); @@ -921,13 +924,12 @@ static void enable_iommu(struct amd_iomm set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED); - if ( iommu->features.flds.ia_sup ) - amd_iommu_flush_all_caches(iommu); - iommu->enabled = 1; - out: spin_unlock_irqrestore(&iommu->lock, flags); + + if ( iommu->features.flds.ia_sup ) + amd_iommu_flush_all_caches(iommu); } static void disable_iommu(struct amd_iommu *iommu) @@ -1544,7 +1546,6 @@ static int _invalidate_all_devices( { unsigned int bdf; u16 req_id; - unsigned long flags; struct amd_iommu *iommu; for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ ) @@ -1553,10 +1554,8 @@ static int _invalidate_all_devices( req_id = ivrs_mappings[bdf].dte_requestor_id; if ( iommu ) { - spin_lock_irqsave(&iommu->lock, flags); amd_iommu_flush_device(iommu, req_id); amd_iommu_flush_intremap(iommu, req_id); - spin_unlock_irqrestore(&iommu->lock, flags); } } --- a/xen/drivers/passthrough/amd/iommu_intr.c +++ b/xen/drivers/passthrough/amd/iommu_intr.c @@ -310,9 +310,7 @@ static int update_intremap_entry_from_io entry.ptr32->flds.remap_en = false; spin_unlock(lock); - spin_lock(&iommu->lock); amd_iommu_flush_intremap(iommu, req_id); - spin_unlock(&iommu->lock); spin_lock(lock); } @@ -527,11 +525,9 @@ static int update_intremap_entry_from_ms if ( iommu->enabled ) { - spin_lock_irqsave(&iommu->lock, flags); amd_iommu_flush_intremap(iommu, req_id); if ( alias_id != req_id ) amd_iommu_flush_intremap(iommu, alias_id); - spin_unlock_irqrestore(&iommu->lock, flags); } return 0; @@ -567,11 +563,9 @@ static int update_intremap_entry_from_ms entry.ptr32->flds.remap_en = false; spin_unlock(lock); - spin_lock(&iommu->lock); amd_iommu_flush_intremap(iommu, req_id); if ( alias_id != req_id ) amd_iommu_flush_intremap(iommu, alias_id); - spin_unlock(&iommu->lock); spin_lock(lock); } --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -129,6 +129,8 @@ static void amd_iommu_setup_domain_devic iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) ) dte->i = ats_enabled; + spin_unlock_irqrestore(&iommu->lock, flags); + amd_iommu_flush_device(iommu, req_id); AMD_IOMMU_DEBUG("Setup I/O page table: device id = %#x, type = %#x, " @@ -138,8 +140,8 @@ static void amd_iommu_setup_domain_devic page_to_maddr(hd->arch.amd.root_table), domain->domain_id, hd->arch.amd.paging_mode); } - - spin_unlock_irqrestore(&iommu->lock, flags); + else + spin_unlock_irqrestore(&iommu->lock, flags); ASSERT(pcidevs_locked()); @@ -307,6 +309,8 @@ static void amd_iommu_disable_domain_dev smp_wmb(); dte->v = true; + spin_unlock_irqrestore(&iommu->lock, flags); + amd_iommu_flush_device(iommu, req_id); AMD_IOMMU_DEBUG("Disable: device id = %#x, " @@ -314,7 +318,8 @@ static void amd_iommu_disable_domain_dev req_id, domain->domain_id, dom_iommu(domain)->arch.amd.paging_mode); } - spin_unlock_irqrestore(&iommu->lock, flags); + else + spin_unlock_irqrestore(&iommu->lock, flags); ASSERT(pcidevs_locked()); @@ -455,9 +460,9 @@ static int amd_iommu_add_device(u8 devfn iommu->dev_table.buffer + (bdf * IOMMU_DEV_TABLE_ENTRY_SIZE), ivrs_mappings[bdf].intremap_table, iommu, iommu_intremap); - amd_iommu_flush_device(iommu, bdf); - spin_unlock_irqrestore(&iommu->lock, flags); + + amd_iommu_flush_device(iommu, bdf); } amd_iommu_setup_domain_device(pdev->domain, iommu, devfn, pdev);
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |