|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v4 10/12] AMD/IOMMU: correct IRTE updating
On Thu, Jul 25, 2019 at 01:33:02PM +0000, Jan Beulich wrote:
> Flushing didn't get done along the lines of what the specification says.
> Mark entries to be updated as not remapped (which will result in
> interrupt requests to get target aborted, but the interrupts should be
> masked anyway at that point in time), issue the flush, and only then
> write the new entry.
>
> In update_intremap_entry_from_msi_msg() also fold the duplicate initial
> lock determination and acquire into just a single instance.
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Brian Woods <brian.woods@xxxxxxx>
> ---
> RFC: Putting the flush invocations in loops isn't overly nice, but I
> don't think this can really be abused, since callers up the stack
> hold further locks. Nevertheless I'd like to ask for better
> suggestions.
> ---
> v4: Re-base.
> v3: Remove stale parts of description. Re-base.
> v2: Parts morphed into earlier patch.
>
> --- a/xen/drivers/passthrough/amd/iommu_intr.c
> +++ b/xen/drivers/passthrough/amd/iommu_intr.c
> @@ -213,15 +213,13 @@ static void update_intremap_entry(const
> },
> };
>
> - ACCESS_ONCE(entry.ptr128->raw[0]) = 0;
> + ASSERT(!entry.ptr128->full.remap_en);
> + entry.ptr128->raw[1] = irte.raw[1];
> /*
> - * Low half, in particular RemapEn, needs to be cleared first. See
> + * High half needs to be set before low one (containing RemapEn).
> See
> * comment in free_intremap_entry() regarding the choice of barrier.
> */
> smp_wmb();
> - entry.ptr128->raw[1] = irte.raw[1];
> - /* High half needs to be set before low one (containing RemapEn). */
> - smp_wmb();
> ACCESS_ONCE(entry.ptr128->raw[0]) = irte.raw[0];
> }
> else
> @@ -296,6 +294,20 @@ static int update_intremap_entry_from_io
> }
>
> entry = get_intremap_entry(iommu, req_id, offset);
> +
> + /* The RemapEn fields match for all formats. */
> + while ( iommu->enabled && entry.ptr32->flds.remap_en )
> + {
> + entry.ptr32->flds.remap_en = false;
> + spin_unlock(lock);
> +
> + spin_lock(&iommu->lock);
> + amd_iommu_flush_intremap(iommu, req_id);
> + spin_unlock(&iommu->lock);
> +
> + spin_lock(lock);
> + }
> +
> if ( fresh )
> /* nothing */;
> else if ( !lo_update )
> @@ -325,13 +337,6 @@ static int update_intremap_entry_from_io
>
> spin_unlock_irqrestore(lock, flags);
>
> - if ( iommu->enabled && !fresh )
> - {
> - spin_lock_irqsave(&iommu->lock, flags);
> - amd_iommu_flush_intremap(iommu, req_id);
> - spin_unlock_irqrestore(&iommu->lock, flags);
> - }
> -
> set_rte_index(rte, offset);
>
> return 0;
> @@ -587,19 +592,27 @@ static int update_intremap_entry_from_ms
> req_id = get_dma_requestor_id(iommu->seg, bdf);
> alias_id = get_intremap_requestor_id(iommu->seg, bdf);
>
> + lock = get_intremap_lock(iommu->seg, req_id);
> + spin_lock_irqsave(lock, flags);
> +
> if ( msg == NULL )
> {
> - lock = get_intremap_lock(iommu->seg, req_id);
> - spin_lock_irqsave(lock, flags);
> for ( i = 0; i < nr; ++i )
> free_intremap_entry(iommu, req_id, *remap_index + i);
> spin_unlock_irqrestore(lock, flags);
> - goto done;
> - }
>
> - lock = get_intremap_lock(iommu->seg, req_id);
> + if ( iommu->enabled )
> + {
> + spin_lock_irqsave(&iommu->lock, flags);
> + amd_iommu_flush_intremap(iommu, req_id);
> + if ( alias_id != req_id )
> + amd_iommu_flush_intremap(iommu, alias_id);
> + spin_unlock_irqrestore(&iommu->lock, flags);
> + }
> +
> + return 0;
> + }
>
> - spin_lock_irqsave(lock, flags);
> dest_mode = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
> delivery_mode = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
> vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK;
> @@ -623,6 +636,22 @@ static int update_intremap_entry_from_ms
> }
>
> entry = get_intremap_entry(iommu, req_id, offset);
> +
> + /* The RemapEn fields match for all formats. */
> + while ( iommu->enabled && entry.ptr32->flds.remap_en )
> + {
> + entry.ptr32->flds.remap_en = false;
> + spin_unlock(lock);
> +
> + spin_lock(&iommu->lock);
> + amd_iommu_flush_intremap(iommu, req_id);
> + if ( alias_id != req_id )
> + amd_iommu_flush_intremap(iommu, alias_id);
> + spin_unlock(&iommu->lock);
> +
> + spin_lock(lock);
> + }
> +
> update_intremap_entry(iommu, entry, vector, delivery_mode, dest_mode,
> dest);
> spin_unlock_irqrestore(lock, flags);
>
> @@ -642,16 +671,6 @@ static int update_intremap_entry_from_ms
> get_ivrs_mappings(iommu->seg)[alias_id].intremap_table);
> }
>
> -done:
> - if ( iommu->enabled )
> - {
> - spin_lock_irqsave(&iommu->lock, flags);
> - amd_iommu_flush_intremap(iommu, req_id);
> - if ( alias_id != req_id )
> - amd_iommu_flush_intremap(iommu, alias_id);
> - spin_unlock_irqrestore(&iommu->lock, flags);
> - }
> -
> return 0;
> }
>
>
--
Brian Woods
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |