[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Fix the skbuff allocator for multi-page buffers.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 7bd1a40ae2bd1895190f7df580afb7ea8426eb50
# Parent  d4f6247b2a1bfa8792782d3951149bcddad53f0b
Fix the skbuff allocator for multi-page buffers.
pci-dma.c still needs fixing to recognise contiguous
multi-page buffers.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r d4f6247b2a1b -r 7bd1a40ae2bd 
linux-2.6-xen-sparse/arch/xen/kernel/skbuff.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/skbuff.c     Wed Aug 17 15:34:58 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/skbuff.c     Wed Aug 17 16:53:30 2005
@@ -5,8 +5,6 @@
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
 #include <linux/netdevice.h>
 #include <linux/inetdevice.h>
 #include <linux/etherdevice.h>
@@ -14,34 +12,86 @@
 #include <linux/init.h>
 #include <asm/io.h>
 #include <asm/page.h>
-
-EXPORT_SYMBOL(__dev_alloc_skb);
+#include <asm-xen/hypervisor.h>
 
 /* Referenced in netback.c. */
 /*static*/ kmem_cache_t *skbuff_cachep;
 
-/* Size must be cacheline-aligned (alloc_skb uses SKB_DATA_ALIGN). */
-#define XEN_SKB_SIZE \
-    ((PAGE_SIZE - sizeof(struct skb_shared_info)) & ~(SMP_CACHE_BYTES - 1))
+#define MAX_SKBUFF_ORDER 2
+static kmem_cache_t *skbuff_order_cachep[MAX_SKBUFF_ORDER + 1];
 
 struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask)
 {
-    struct sk_buff *skb;
-    skb = alloc_skb_from_cache(skbuff_cachep, length + 16, gfp_mask);
-    if ( likely(skb != NULL) )
-        skb_reserve(skb, 16);
-    return skb;
+       struct sk_buff *skb;
+       int order;
+
+       length = SKB_DATA_ALIGN(length + 16);
+       order = get_order(length + sizeof(struct skb_shared_info));
+       if (order > MAX_SKBUFF_ORDER) {
+               printk(KERN_ALERT "Attempt to allocate order %d skbuff. "
+                      "Increase MAX_SKBUFF_ORDER.\n", order);
+               return NULL;
+       }
+
+       skb = alloc_skb_from_cache(
+               skbuff_order_cachep[order], length, gfp_mask);
+       if (skb != NULL)
+               skb_reserve(skb, 16);
+
+       return skb;
 }
 
 static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused)
 {
-    scrub_pages(buf, 1);
+       int order = 0;
+
+       while (skbuff_order_cachep[order] != cachep)
+               order++;
+
+       if (order != 0)
+               xen_create_contiguous_region((unsigned long)buf, order);
+
+       scrub_pages(buf, 1 << order);
+}
+
+static void skbuff_dtor(void *buf, kmem_cache_t *cachep, unsigned long unused)
+{
+       int order = 0;
+
+       while (skbuff_order_cachep[order] != cachep)
+               order++;
+
+       if (order != 0)
+               xen_destroy_contiguous_region((unsigned long)buf, order);
 }
 
 static int __init skbuff_init(void)
 {
-    skbuff_cachep = kmem_cache_create(
-        "xen-skb", PAGE_SIZE, PAGE_SIZE, 0, skbuff_ctor, NULL);
-    return 0;
+       static char name[MAX_SKBUFF_ORDER + 1][20];
+       unsigned long size;
+       int order;
+
+       for (order = 0; order <= MAX_SKBUFF_ORDER; order++) {
+               size = PAGE_SIZE << order;
+               sprintf(name[order], "xen-skb-%lu", size);
+               skbuff_order_cachep[order] = kmem_cache_create(
+                       name[order], size, size, 0, skbuff_ctor, skbuff_dtor);
+       }
+
+       skbuff_cachep = skbuff_order_cachep[0];
+
+       return 0;
 }
 __initcall(skbuff_init);
+
+EXPORT_SYMBOL(__dev_alloc_skb);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.