[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [UNIKRAFT PATCH v5 5/5] lib/ukallocpool: Batched allocation



All good.

Reviewed-by: Razvan Deaconescu <razvan.deaconescu@xxxxxxxxx>

Simon Kuenzer <simon.kuenzer@xxxxxxxxx> writes:
> Introduce low-overhead interfaces for batched allocations and batched
> freeing of objects.
>
> Signed-off-by: Simon Kuenzer <simon.kuenzer@xxxxxxxxx>
> ---
>  lib/ukallocpool/exportsyms.uk          |  2 ++
>  lib/ukallocpool/include/uk/allocpool.h | 30 ++++++++++++++++++++++++++
>  lib/ukallocpool/pool.c                 | 29 +++++++++++++++++++++++++
>  3 files changed, 61 insertions(+)
>
> diff --git a/lib/ukallocpool/exportsyms.uk b/lib/ukallocpool/exportsyms.uk
> index 68c16ebe..0f4c3de4 100644
> --- a/lib/ukallocpool/exportsyms.uk
> +++ b/lib/ukallocpool/exportsyms.uk
> @@ -5,5 +5,7 @@ uk_allocpool_reqmem
>  uk_allocpool_availcount
>  uk_allocpool_objlen
>  uk_allocpool_take
> +uk_allocpool_take_batch
>  uk_allocpool_return
> +uk_allocpool_return_batch
>  uk_allocpool2ukalloc
> diff --git a/lib/ukallocpool/include/uk/allocpool.h 
> b/lib/ukallocpool/include/uk/allocpool.h
> index 1f6747f1..55bccb86 100644
> --- a/lib/ukallocpool/include/uk/allocpool.h
> +++ b/lib/ukallocpool/include/uk/allocpool.h
> @@ -157,6 +157,22 @@ size_t uk_allocpool_objlen(struct uk_allocpool *p);
>   */
>  void *uk_allocpool_take(struct uk_allocpool *p);
>  
> +/**
> + * Get multiple objects from a pool.
> + *
> + * @param p
> + *  Pointer to memory pool.
> + * @param obj
> + *  Pointer to array that will be filled with pointers of
> + *  allocated objects from the pool.
> + * @param count
> + *  Maximum number of objects that should be taken from the pool.
> + * @return
> + *  Number of successfully allocated objects on the given array.
> + */
> +unsigned int uk_allocpool_take_batch(struct uk_allocpool *p,
> +                                  void *obj[], unsigned int count);
> +
>  /**
>   * Return one object back to a pool.
>   * HINT: It is recommended to use this call instead of uk_free() whenever
> @@ -169,6 +185,20 @@ void *uk_allocpool_take(struct uk_allocpool *p);
>   */
>  void uk_allocpool_return(struct uk_allocpool *p, void *obj);
>  
> +/**
> + * Return multiple objects to a pool.
> + *
> + * @param p
> + *  Pointer to memory pool.
> + * @param obj
> + *  Pointer to array that with pointers of objects that
> + *  should be returned.
> + * @param count
> + *  Number of objects that are on the array.
> + */
> +void uk_allocpool_return_batch(struct uk_allocpool *p,
> +                            void *obj[], unsigned int count);
> +
>  #ifdef __cplusplus
>  }
>  #endif
> diff --git a/lib/ukallocpool/pool.c b/lib/ukallocpool/pool.c
> index a7c3acf0..390f6b92 100644
> --- a/lib/ukallocpool/pool.c
> +++ b/lib/ukallocpool/pool.c
> @@ -172,6 +172,23 @@ void *uk_allocpool_take(struct uk_allocpool *p)
>       return _take_free_obj(p);
>  }
>  
> +unsigned int uk_allocpool_take_batch(struct uk_allocpool *p,
> +                                  void *obj[], unsigned int count)
> +{
> +     unsigned int i;
> +
> +     UK_ASSERT(p);
> +     UK_ASSERT(obj);
> +
> +     for (i = 0; i < count; ++i) {
> +             if (unlikely(uk_list_empty(&p->free_obj)))
> +                     break;
> +             obj[i] = _take_free_obj(p);
> +     }
> +
> +     return i;
> +}
> +
>  void uk_allocpool_return(struct uk_allocpool *p, void *obj)
>  {
>       UK_ASSERT(p);
> @@ -179,6 +196,18 @@ void uk_allocpool_return(struct uk_allocpool *p, void 
> *obj)
>       _prepend_free_obj(p, obj);
>  }
>  
> +void uk_allocpool_return_batch(struct uk_allocpool *p,
> +                            void *obj[], unsigned int count)
> +{
> +     unsigned int i;
> +
> +     UK_ASSERT(p);
> +     UK_ASSERT(obj);
> +
> +     for (i = 0; i < count; ++i)
> +             _prepend_free_obj(p, obj[i]);
> +}
> +
>  #if CONFIG_LIBUKALLOC_IFSTATS
>  static ssize_t pool_availmem(struct uk_alloc *a)
>  {



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.