[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: swiotlb cleanups v3



On Sat, Apr 17, 2021 at 11:39:22AM -0500, Tom Lendacky wrote:
> Somewhere between the 1st and 2nd patch, specifying a specific swiotlb
> for an SEV guest is no longer honored. For example, if I start an SEV
> guest with 16GB of memory and specify swiotlb=131072 I used to get a
> 256MB SWIOTLB. However, after the 2nd patch, the swiotlb=131072 is no
> longer honored and I get a 982MB SWIOTLB (as set via sev_setup_arch() in
> arch/x86/mm/mem_encrypt.c).
> 
> I can't be sure which patch caused the issue since an SEV guest fails to
> boot with the 1st patch but can boot with the 2nd patch, at which point
> the SWIOTLB comes in at 982MB (I haven't had a chance to debug it and so
> I'm hoping you might be able to quickly spot what's going on).

Can you try this patch?

diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 0a5b6f7e75bce6..ac81ef97df32f5 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -71,15 +71,17 @@ struct io_tlb_mem *io_tlb_default_mem;
  */
 static unsigned int max_segment;
 
-static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
+static unsigned long swiotlb_cmdline_size;
 
 static int __init
 setup_io_tlb_npages(char *str)
 {
        if (isdigit(*str)) {
                /* avoid tail segment of size < IO_TLB_SEGSIZE */
-               default_nslabs =
-                       ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
+               unsigned long nslabs = simple_strtoul(str, &str, 0);
+
+               swiotlb_cmdline_size =
+                       ALIGN(nslabs, IO_TLB_SEGSIZE) << IO_TLB_SHIFT;
        }
        if (*str == ',')
                ++str;
@@ -108,7 +110,9 @@ void swiotlb_set_max_segment(unsigned int val)
 
 unsigned long swiotlb_size_or_default(void)
 {
-       return default_nslabs << IO_TLB_SHIFT;
+       if (swiotlb_cmdline_size)
+               return swiotlb_cmdline_size;
+       return IO_TLB_DEFAULT_SIZE;
 }
 
 void __init swiotlb_adjust_size(unsigned long size)
@@ -118,9 +122,10 @@ void __init swiotlb_adjust_size(unsigned long size)
         * architectures such as those supporting memory encryption to
         * adjust/expand SWIOTLB size for their use.
         */
-       size = ALIGN(size, IO_TLB_SIZE);
-       default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
-       pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
+       if (!swiotlb_cmdline_size)
+               swiotlb_cmdline_size = ALIGN(size, IO_TLB_SIZE);
+       pr_info("SWIOTLB bounce buffer size adjusted to %luMB",
+               swiotlb_cmdline_size >> 20);
 }
 
 void swiotlb_print_info(void)
@@ -209,7 +214,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long 
nslabs, int verbose)
 void  __init
 swiotlb_init(int verbose)
 {
-       size_t bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT);
+       size_t bytes = PAGE_ALIGN(swiotlb_size_or_default());
        void *tlb;
 
        if (swiotlb_force == SWIOTLB_NO_FORCE)
@@ -219,7 +224,7 @@ swiotlb_init(int verbose)
        tlb = memblock_alloc_low(bytes, PAGE_SIZE);
        if (!tlb)
                goto fail;
-       if (swiotlb_init_with_tbl(tlb, default_nslabs, verbose))
+       if (swiotlb_init_with_tbl(tlb, bytes >> IO_TLB_SHIFT, verbose))
                goto fail_free_mem;
        return;
 



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.