[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 10/12] AMD/IOMMU: correct IRTE updating


  • To: Jan Beulich <JBeulich@xxxxxxxx>
  • From: "Woods, Brian" <Brian.Woods@xxxxxxx>
  • Date: Thu, 25 Jul 2019 14:04:51 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1;spf=pass smtp.mailfrom=amd.com;dmarc=pass action=none header.from=amd.com;dkim=pass header.d=amd.com;arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=+zldGsXefkL5fHNfZuvvpA8Guvm2uXpddO6X9hYNJc8=; b=dTsheRLWSMFP/ACdbc4PlxQVdJr8UNm8u+H8mW/aJO2aaOoKH1i2tzTEFDqwxgRdidVPER4bxKUlGmdQ7YNDS2UFlzGccJHK9rCbBls6dSYHv/K1Pz4qC0eCui75/YU0zp0SdR4VGa12me83rGBIWP1YHugQtEJW1iX4nBDbB+FhI982HTY5B9nFjnW7Ltqu6/vFAFdgv/yo6DgQDkjByKHUwMpVmUgvN+fUOFrCFIvDOd0MYmBAHZ7ecywaPoliB3GQx9cXfEUxpI1PsghdB0XuE2gKynPQKTdvd3LW6wkNc/Bvb5kCXPgOXez8E2gN+U6Uac87hI3lun4qu2hB2A==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=J2DozYoJC3reCptlGjH3vLx8Q0LSLbxZOoD4kcAI6EhjjBRju/31zawMNqkMSIjAlg0dAnCu2zf4NB+XH1czrPDNhTfj/O5voc6xJTUdGYNPX9JI3YTuqB3SCdu0t5gbGVec9v+9EtLpw7acxlec2XRZ275hXavD2GGQ8jA4ZNkvHCAXa9UllooqEnplwvxip+f+tsBfUt1QtH+OLfJ7ovWWrhBhhkNrPRNDis8ja/c6YPAynjn2l+U1npvPQYF7qp73/HQeo4Xh9Qs6VyoQQ3sg1x+n1LrdlsnOgej453ppD50IaB0TkblAvgF+L2dmfemqFFn+IRE4KRSJTRrwDg==
  • Authentication-results: spf=none (sender IP is ) smtp.mailfrom=Brian.Woods@xxxxxxx;
  • Cc: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>, "Woods, Brian" <Brian.Woods@xxxxxxx>, "Suthikulpanit, Suravee" <Suravee.Suthikulpanit@xxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
  • Delivery-date: Thu, 25 Jul 2019 14:05:02 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHVQu8Sv0h/SjvNh0miri6LNzAjzqbbXfmA
  • Thread-topic: [PATCH v4 10/12] AMD/IOMMU: correct IRTE updating

On Thu, Jul 25, 2019 at 01:33:02PM +0000, Jan Beulich wrote:
> Flushing didn't get done along the lines of what the specification says.
> Mark entries to be updated as not remapped (which will result in
> interrupt requests to get target aborted, but the interrupts should be
> masked anyway at that point in time), issue the flush, and only then
> write the new entry.
> 
> In update_intremap_entry_from_msi_msg() also fold the duplicate initial
> lock determination and acquire into just a single instance.
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

Acked-by: Brian Woods <brian.woods@xxxxxxx>

> ---
> RFC: Putting the flush invocations in loops isn't overly nice, but I
>       don't think this can really be abused, since callers up the stack
>       hold further locks. Nevertheless I'd like to ask for better
>       suggestions.
> ---
> v4: Re-base.
> v3: Remove stale parts of description. Re-base.
> v2: Parts morphed into earlier patch.
> 
> --- a/xen/drivers/passthrough/amd/iommu_intr.c
> +++ b/xen/drivers/passthrough/amd/iommu_intr.c
> @@ -213,15 +213,13 @@ static void update_intremap_entry(const
>               },
>           };
>   
> -        ACCESS_ONCE(entry.ptr128->raw[0]) = 0;
> +        ASSERT(!entry.ptr128->full.remap_en);
> +        entry.ptr128->raw[1] = irte.raw[1];
>           /*
> -         * Low half, in particular RemapEn, needs to be cleared first.  See
> +         * High half needs to be set before low one (containing RemapEn).  
> See
>            * comment in free_intremap_entry() regarding the choice of barrier.
>            */
>           smp_wmb();
> -        entry.ptr128->raw[1] = irte.raw[1];
> -        /* High half needs to be set before low one (containing RemapEn). */
> -        smp_wmb();
>           ACCESS_ONCE(entry.ptr128->raw[0]) = irte.raw[0];
>       }
>       else
> @@ -296,6 +294,20 @@ static int update_intremap_entry_from_io
>       }
>   
>       entry = get_intremap_entry(iommu, req_id, offset);
> +
> +    /* The RemapEn fields match for all formats. */
> +    while ( iommu->enabled && entry.ptr32->flds.remap_en )
> +    {
> +        entry.ptr32->flds.remap_en = false;
> +        spin_unlock(lock);
> +
> +        spin_lock(&iommu->lock);
> +        amd_iommu_flush_intremap(iommu, req_id);
> +        spin_unlock(&iommu->lock);
> +
> +        spin_lock(lock);
> +    }
> +
>       if ( fresh )
>           /* nothing */;
>       else if ( !lo_update )
> @@ -325,13 +337,6 @@ static int update_intremap_entry_from_io
>   
>       spin_unlock_irqrestore(lock, flags);
>   
> -    if ( iommu->enabled && !fresh )
> -    {
> -        spin_lock_irqsave(&iommu->lock, flags);
> -        amd_iommu_flush_intremap(iommu, req_id);
> -        spin_unlock_irqrestore(&iommu->lock, flags);
> -    }
> -
>       set_rte_index(rte, offset);
>   
>       return 0;
> @@ -587,19 +592,27 @@ static int update_intremap_entry_from_ms
>       req_id = get_dma_requestor_id(iommu->seg, bdf);
>       alias_id = get_intremap_requestor_id(iommu->seg, bdf);
>   
> +    lock = get_intremap_lock(iommu->seg, req_id);
> +    spin_lock_irqsave(lock, flags);
> +
>       if ( msg == NULL )
>       {
> -        lock = get_intremap_lock(iommu->seg, req_id);
> -        spin_lock_irqsave(lock, flags);
>           for ( i = 0; i < nr; ++i )
>               free_intremap_entry(iommu, req_id, *remap_index + i);
>           spin_unlock_irqrestore(lock, flags);
> -        goto done;
> -    }
>   
> -    lock = get_intremap_lock(iommu->seg, req_id);
> +        if ( iommu->enabled )
> +        {
> +            spin_lock_irqsave(&iommu->lock, flags);
> +            amd_iommu_flush_intremap(iommu, req_id);
> +            if ( alias_id != req_id )
> +                amd_iommu_flush_intremap(iommu, alias_id);
> +            spin_unlock_irqrestore(&iommu->lock, flags);
> +        }
> +
> +        return 0;
> +    }
>   
> -    spin_lock_irqsave(lock, flags);
>       dest_mode = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
>       delivery_mode = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
>       vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK;
> @@ -623,6 +636,22 @@ static int update_intremap_entry_from_ms
>       }
>   
>       entry = get_intremap_entry(iommu, req_id, offset);
> +
> +    /* The RemapEn fields match for all formats. */
> +    while ( iommu->enabled && entry.ptr32->flds.remap_en )
> +    {
> +        entry.ptr32->flds.remap_en = false;
> +        spin_unlock(lock);
> +
> +        spin_lock(&iommu->lock);
> +        amd_iommu_flush_intremap(iommu, req_id);
> +        if ( alias_id != req_id )
> +            amd_iommu_flush_intremap(iommu, alias_id);
> +        spin_unlock(&iommu->lock);
> +
> +        spin_lock(lock);
> +    }
> +
>       update_intremap_entry(iommu, entry, vector, delivery_mode, dest_mode, 
> dest);
>       spin_unlock_irqrestore(lock, flags);
>   
> @@ -642,16 +671,6 @@ static int update_intremap_entry_from_ms
>                  get_ivrs_mappings(iommu->seg)[alias_id].intremap_table);
>       }
>   
> -done:
> -    if ( iommu->enabled )
> -    {
> -        spin_lock_irqsave(&iommu->lock, flags);
> -        amd_iommu_flush_intremap(iommu, req_id);
> -        if ( alias_id != req_id )
> -            amd_iommu_flush_intremap(iommu, alias_id);
> -        spin_unlock_irqrestore(&iommu->lock, flags);
> -    }
> -
>       return 0;
>   }
>   
> 

-- 
Brian Woods

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.