[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] xen/common: Introduce _xrealloc function



commit 50879e805edb363c3cfa860af86b4b854be53641
Author:     Oleksandr Tyshchenko <oleksandr_tyshchenko@xxxxxxxx>
AuthorDate: Thu Sep 26 14:20:29 2019 +0300
Commit:     Julien Grall <julien.grall@xxxxxxx>
CommitDate: Thu Sep 26 14:34:36 2019 +0100

    xen/common: Introduce _xrealloc function
    
    This patch introduces type-unsafe function which besides
    re-allocation handles the following corner cases:
    1. if requested size is zero, it will behave like xfree
    2. if incoming pointer is not valid (NULL or ZERO_BLOCK_PTR),
       it will behave like xmalloc
    
    If both pointer and size are valid the function will re-allocate and
    copy only if requested size and alignment don't fit in already
    allocated space.
    
    Subsequent patch will add type-safe helper macros.
    
    Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@xxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    [julien: comestic changes]
    Acked-by: Julien Grall <julien.grall@xxxxxxx>
    CC: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    CC: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
    CC: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
    CC: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
    CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
    CC: Tim Deegan <tim@xxxxxxx>
    CC: Wei Liu <wl@xxxxxxx>
    CC: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
 xen/common/xmalloc_tlsf.c | 112 ++++++++++++++++++++++++++++++++++++++--------
 xen/include/xen/xmalloc.h |   1 +
 2 files changed, 95 insertions(+), 18 deletions(-)

diff --git a/xen/common/xmalloc_tlsf.c b/xen/common/xmalloc_tlsf.c
index 1e8d72dea2..0b92a7a7a3 100644
--- a/xen/common/xmalloc_tlsf.c
+++ b/xen/common/xmalloc_tlsf.c
@@ -549,10 +549,40 @@ static void tlsf_init(void)
  * xmalloc()
  */
 
+static void *strip_padding(void *p)
+{
+    const struct bhdr *b = p - BHDR_OVERHEAD;
+
+    if ( b->size & FREE_BLOCK )
+    {
+        p -= b->size & ~FREE_BLOCK;
+        b = p - BHDR_OVERHEAD;
+        ASSERT(!(b->size & FREE_BLOCK));
+    }
+
+    return p;
+}
+
+static void *add_padding(void *p, unsigned long align)
+{
+    unsigned int pad;
+
+    if ( (pad = -(long)p & (align - 1)) != 0 )
+    {
+        void *q = p + pad;
+        struct bhdr *b = q - BHDR_OVERHEAD;
+
+        ASSERT(q > p);
+        b->size = pad | FREE_BLOCK;
+        p = q;
+    }
+
+    return p;
+}
+
 void *_xmalloc(unsigned long size, unsigned long align)
 {
     void *p = NULL;
-    u32 pad;
 
     ASSERT(!in_irq());
 
@@ -573,14 +603,7 @@ void *_xmalloc(unsigned long size, unsigned long align)
         return xmalloc_whole_pages(size - align + MEM_ALIGN, align);
 
     /* Add alignment padding. */
-    if ( (pad = -(long)p & (align - 1)) != 0 )
-    {
-        char *q = (char *)p + pad;
-        struct bhdr *b = (struct bhdr *)(q - BHDR_OVERHEAD);
-        ASSERT(q > (char *)p);
-        b->size = pad | FREE_BLOCK;
-        p = q;
-    }
+    p = add_padding(p, align);
 
     ASSERT(((unsigned long)p & (align - 1)) == 0);
     return p;
@@ -593,10 +616,69 @@ void *_xzalloc(unsigned long size, unsigned long align)
     return p ? memset(p, 0, size) : p;
 }
 
-void xfree(void *p)
+void *_xrealloc(void *ptr, unsigned long size, unsigned long align)
 {
-    struct bhdr *b;
+    unsigned long curr_size;
+    void *p;
+
+    if ( !size )
+    {
+        xfree(ptr);
+        return ZERO_BLOCK_PTR;
+    }
+
+    if ( ptr == NULL || ptr == ZERO_BLOCK_PTR )
+        return _xmalloc(size, align);
+
+    ASSERT(!(align & (align - 1)));
+    if ( align < MEM_ALIGN )
+        align = MEM_ALIGN;
+
+    if ( !((unsigned long)ptr & (PAGE_SIZE - 1)) )
+    {
+        curr_size = (unsigned long)PFN_ORDER(virt_to_page(ptr)) << PAGE_SHIFT;
+
+        if ( size <= curr_size && !((unsigned long)ptr & (align - 1)) )
+            return ptr;
+    }
+    else
+    {
+        unsigned long tmp_size = size + align - MEM_ALIGN;
+        const struct bhdr *b;
+
+        if ( tmp_size < PAGE_SIZE )
+            tmp_size = (tmp_size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE :
+                ROUNDUP_SIZE(tmp_size);
+
+        /* Strip alignment padding. */
+        p = strip_padding(ptr);
+
+        b = p - BHDR_OVERHEAD;
+        curr_size = b->size & BLOCK_SIZE_MASK;
+
+        if ( tmp_size <= curr_size )
+        {
+            /* Add alignment padding. */
+            p = add_padding(p, align);
 
+            ASSERT(!((unsigned long)p & (align - 1)));
+
+            return p;
+        }
+    }
+
+    p = _xmalloc(size, align);
+    if ( p )
+    {
+        memcpy(p, ptr, min(curr_size, size));
+        xfree(ptr);
+    }
+
+    return p;
+}
+
+void xfree(void *p)
+{
     if ( p == NULL || p == ZERO_BLOCK_PTR )
         return;
 
@@ -621,13 +703,7 @@ void xfree(void *p)
     }
 
     /* Strip alignment padding. */
-    b = (struct bhdr *)((char *)p - BHDR_OVERHEAD);
-    if ( b->size & FREE_BLOCK )
-    {
-        p = (char *)p - (b->size & ~FREE_BLOCK);
-        b = (struct bhdr *)((char *)p - BHDR_OVERHEAD);
-        ASSERT(!(b->size & FREE_BLOCK));
-    }
+    p = strip_padding(p);
 
     xmem_pool_free(p, xenpool);
 }
diff --git a/xen/include/xen/xmalloc.h b/xen/include/xen/xmalloc.h
index f075d2da91..831152f895 100644
--- a/xen/include/xen/xmalloc.h
+++ b/xen/include/xen/xmalloc.h
@@ -51,6 +51,7 @@ extern void xfree(void *);
 /* Underlying functions */
 extern void *_xmalloc(unsigned long size, unsigned long align);
 extern void *_xzalloc(unsigned long size, unsigned long align);
+extern void *_xrealloc(void *ptr, unsigned long size, unsigned long align);
 
 static inline void *_xmalloc_array(
     unsigned long size, unsigned long align, unsigned long num)
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.