[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [UNIKRAFT/LWIP PATCH v2] lwip: support for memory pools



Hi Hugo,

This patch looks good, thanks.

Reviewed-by: Felipe Huici <felipe.huici@xxxxxxxxx>

On Thu, Jun 18, 2020 at 3:59 PM Hugo Lefeuvre <hugo.lefeuvre@xxxxxxxxx> wrote:
Add support for memory pools.

There are now two allocation modes:

(1) heap only, malloc/free are used everywhere (even for memp allocations)
(2) memory pools with custom pools.

The pools are sized to provide good performance for Redis: 1000x256,
1000x512, 1000x1256. Overall this is equivalent to ~2MB, which might be
too much in some cases. In any case, these should be manually tweaked
for performance critical applications.

Increase the defaults for the maximum number of TCP sockets and listeners
(was previously 5, now 64), and expose them via the menuconfig.

Signed-off-by: Hugo Lefeuvre <hugo.lefeuvre@xxxxxxxxx>
---
 Config.uk           | 12 ++++++++++--
 include/lwipopts.h  | 49 +++++++++++++++++++++++++++----------------------
 include/lwippools.h |  7 +++++++
 3 files changed, 44 insertions(+), 24 deletions(-)
 create mode 100644 include/lwippools.h

diff --git a/Config.uk b/Config.uk
index aaaaae0..3bbb018 100644
--- a/Config.uk
+++ b/Config.uk
@@ -63,8 +63,8 @@ config LWIP_HEAP
        help
                Use default ukalloc allocator for all memory allocation requests

-#config LWIP_POOLS
-#      bool "Memory pools"
+config LWIP_POOLS
+       bool "Memory pools"
 endchoice

 config LWIP_NETIF_EXT_STATUS_CALLBACK
@@ -118,6 +118,14 @@ config LWIP_TCP_KEEPALIVE
 config LWIP_TCP_TIMESTAMPS
        bool "Timestamps"
        default n
+
+config LWIP_NUM_TCPCON
+       int "Maximum number of simultaneous TCP connections"
+       default 64
+
+config LWIP_NUM_TCPLISTENERS
+       int "Maximum number of simultaneous TCP listeners"
+       default 64
 endif

 config LWIP_ICMP
diff --git a/include/lwipopts.h b/include/lwipopts.h
index 148028d..d0ed208 100644
--- a/include/lwipopts.h
+++ b/include/lwipopts.h
@@ -16,27 +16,29 @@
 /**
  * Memory mode
  */
-/* provide malloc/free by Unikraft */
 #if CONFIG_LWIP_HEAP
   /* Only use malloc/free for lwIP.
    * Every allocation is done by the heap.
    * Note: This setting results in the smallest binary
    *       size but leads to heavy malloc/free usage during
-   *       network processing.
+   *       network processing and subsequent performance decrease.
    */
   #define MEM_LIBC_MALLOC 1 /* enable heap */
   #define MEMP_MEM_MALLOC 1 /* pool allocations via malloc */
 #elif CONFIG_LWIP_POOLS
-  /* Pools are used for pool allocations and the heap
-   * is used for all the rest of allocations.
-   * Note: Per design, lwIP allocates outgoing packet buffers
-   *       from heap (via PBUF_RAM) and incoming from pools (via PBUF_POOL)
-   *       CONFIG_LWIP_PBUF_POOL_SIZE defines the pool size for PBUF_POOL
-   *       allocations
+  /* Pools are used for all allocations.
    * Note: lwIP allocate pools on the data segment
    */
-  #define MEM_LIBC_MALLOC 1 /* enable heap */
-  #define MEMP_MEM_MALLOC 0 /* pool allocations still via pool */
+  #define MEM_LIBC_MALLOC 0 /* disable heap */
+  #define MEMP_MEM_MALLOC 0 /* pool allocations via pool (default) */
+
+  /* When mem_malloc is called, an element of the smallest pool that can provide
+   * the length needed is returned.
+   */
+  #define MEM_USE_POOLS   1
+  #define MEMP_USE_CUSTOM_POOLS 1
+  #define MEM_USE_POOLS_TRY_BIGGER_POOL 1 /* take a bigger pool if necessary */
+  #define MEMP_SEPARATE_POOLS 1
 #else
  #error Configuration error!
 #endif /* CONFIG_LWIP_HEAP_ONLY / CONFIG_LWIP_POOLS_ONLY */
@@ -55,15 +57,6 @@ void sys_free(void *ptr);
 #define mem_clib_free     sys_free
 #endif /* MEM_LIBC_MALLOC */

-#if MEM_USE_POOLS
-/*
- * Use lwIP's pools
- */
-#define MEMP_USE_CUSTOM_POOLS 0
-/* for each pool use a separate array in data segment */
-#define MEMP_SEPARATE_POOLS 1
-#endif /* MEM_USE_POOLS */
-
 /**
  * Operation mode (threaded, mainloop)
  */
@@ -171,14 +164,15 @@ void sys_free(void *ptr);
 #define TCP_SND_BUF (TCP_WND + (2 * TCP_MSS))
 #endif /* CONFIG_LWIP_WND_SCALE */

+#define MEMP_NUM_TCP_PCB CONFIG_LWIP_NUM_TCPCON /* max num of sim. TCP connections */
+#define MEMP_NUM_TCP_PCB_LISTEN CONFIG_LWIP_NUM_TCPLISTENERS /* max num of sim. TCP listeners */
+
 #define TCP_SNDLOWAT (4 * TCP_MSS)
 #define TCP_SND_QUEUELEN (2 * (TCP_SND_BUF) / (TCP_MSS))
 #define TCP_QUEUE_OOSEQ 4
-#define MEMP_NUM_TCP_SEG (MEMP_NUM_TCP_PCB * ((TCP_SND_QUEUELEN) / 5))
+#define MEMP_NUM_TCP_SEG ((MEMP_NUM_TCP_PCB) * ((TCP_SND_QUEUELEN) / 5))
 #define MEMP_NUM_FRAG_PBUF 32

-#define MEMP_NUM_TCP_PCB CONFIG_LWIP_NUM_TCPCON /* max num of sim. TCP connections */
-#define MEMP_NUM_TCP_PCB_LISTEN 32 /* max num of sim. TCP listeners */
 #endif /* LWIP_TCP */

 /**
@@ -249,6 +243,10 @@ void sys_free(void *ptr);
 #ifndef PBUF_POOL_SIZE
 #define PBUF_POOL_SIZE ((TCP_WND + TCP_MSS - 1) / TCP_MSS)
 #endif
+#ifndef PBUF_POOL_BUFSIZE
+/* smallest PBUF_POOL_BUFSIZE which satisfies TCP_WND < PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - protocol headers) */
+#define PBUF_POOL_BUFSIZE ((TCP_WND / PBUF_POOL_SIZE) + (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN) + 1)
+#endif
 #ifndef MEMP_NUM_PBUF
 #define MEMP_NUM_PBUF ((MEMP_NUM_TCP_PCB * (TCP_SND_QUEUELEN)) / 2)
 #endif
@@ -286,6 +284,13 @@ void sys_free(void *ptr);
 #define LWIP_NETCONN 0
 #else
 #define LWIP_NETCONN 1
+/* maximum number of struct netconn entries, which limits the maximum
+ * number of open sockets */
+#if LWIP_TCP
+#define MEMP_NUM_NETCONN CONFIG_LWIP_NUM_TCPCON
+#else
+#define MEMP_NUM_NETCONN 64
+#endif /* LWIP_TCP */
 #endif

 /**
diff --git a/include/lwippools.h b/include/lwippools.h
new file mode 100644
index 0000000..3ec8104
--- /dev/null
+++ b/include/lwippools.h
@@ -0,0 +1,7 @@
+#if CONFIG_LWIP_POOLS
+LWIP_MALLOC_MEMPOOL_START
+LWIP_MALLOC_MEMPOOL(1000, 256)
+LWIP_MALLOC_MEMPOOL(1000, 512)
+LWIP_MALLOC_MEMPOOL(1000, 1560)
+LWIP_MALLOC_MEMPOOL_END
+#endif
--
2.7.4



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.