[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [UNIKRAFT PATCH v3 3/5] lib/ukring: Adapt {ring.c, ring.h} to Unikraft



Hi Alexander,

I'm gonna fix the uk_preempt_*() calls on upstreaming:

critical_enter -> uk_preempt_disable
critical_exit  -> uk_preempt_enable

Reviewed-by: Costin Lupu <costin.lupu@xxxxxxxxx>

On 7/23/20 3:49 PM, Alexander Jung wrote:
> This includes Unikraft-centric headers and types.
> 
> Signed-off-by: Alexander Jung <alexander.jung@xxxxxxxxx>
> ---
>  lib/ukring/include/uk/ring.h | 101 
> +++++++++++++++++++++++--------------------
>  lib/ukring/ring.c            |  37 ++++++++--------
>  2 files changed, 72 insertions(+), 66 deletions(-)
> 
> diff --git a/lib/ukring/include/uk/ring.h b/lib/ukring/include/uk/ring.h
> index abc95fe..6bc789d 100644
> --- a/lib/ukring/include/uk/ring.h
> +++ b/lib/ukring/include/uk/ring.h
> @@ -32,12 +32,16 @@
>  #ifndef _SYS_BUF_RING_H_
>  #define _SYS_BUF_RING_H_
>  
> -#include <machine/cpu.h>
> +#include <errno.h>
> +#include <uk/mutex.h>
> +#include <uk/print.h>
> +#include <uk/config.h>
> +#include <uk/assert.h>
> +#include <uk/plat/lcpu.h>
> +#include <uk/arch/atomic.h>
> +#include <uk/essentials.h>
> +#include <uk/preempt.h>
>  
> -#ifdef DEBUG_BUFRING
> -#include <sys/lock.h>
> -#include <sys/mutex.h>
> -#endif
>  
>  
>  struct buf_ring {
> @@ -50,8 +54,8 @@ struct buf_ring {
>       volatile uint32_t br_cons_tail;
>       int               br_cons_size;
>       int               br_cons_mask;
> -#ifdef DEBUG_BUFRING
> -     struct mtx       *br_lock;
> +#ifdef CONFIG_LIBUKRING_DEBUG_BUFRING
> +     struct uk_mutex  *br_lock;
>  #endif
>       void             *br_ring[0] __aligned(CACHE_LINE_SIZE);
>  };
> @@ -66,7 +70,7 @@ buf_ring_enqueue(struct buf_ring *br, void *buf)
>  {
>       uint32_t prod_head, prod_next, cons_tail;
>  
> -#ifdef DEBUG_BUFRING
> +#ifdef CONFIG_LIBUKRING_DEBUG_BUFRING
>       int i;
>  
>       /*
> @@ -77,11 +81,11 @@ buf_ring_enqueue(struct buf_ring *br, void *buf)
>       for (i = br->br_cons_head; i != br->br_prod_head;
>                        i = ((i + 1) & br->br_cons_mask))
>               if (br->br_ring[i] == buf)
> -                     panic("buf=%p already enqueue at %d prod=%d cons=%d",
> +                     UK_CRASH("buf=%p already enqueue at %d prod=%d cons=%d",
>                                       buf, i, br->br_prod_tail, 
> br->br_cons_tail);
>  #endif
>  
> -     critical_enter();
> +     uk_preempt_enable();
>  
>       do {
>               prod_head = br->br_prod_head;
> @@ -92,16 +96,17 @@ buf_ring_enqueue(struct buf_ring *br, void *buf)
>                       rmb();
>                       if (prod_head == br->br_prod_head && cons_tail == 
> br->br_cons_tail) {
>                               br->br_drops++;
> -                             critical_exit();
> +                             uk_preempt_disable();
>                               return -ENOBUFS;
>                       }
>                       continue;
>               }
> -     } while (!atomic_cmpset_acq_int(&br->br_prod_head, prod_head, 
> prod_next));
> +     } while (!ukarch_compare_exchange_sync(&br->br_prod_head, prod_head,
> +                                     prod_next));
>  
> -#ifdef DEBUG_BUFRING
> +#ifdef CONFIG_LIBUKRING_DEBUG_BUFRING
>       if (br->br_ring[prod_head] != NULL)
> -             panic("dangling value in enqueue");
> +             UK_CRASH("dangling value in enqueue");
>  #endif
>  
>       br->br_ring[prod_head] = buf;
> @@ -112,10 +117,10 @@ buf_ring_enqueue(struct buf_ring *br, void *buf)
>        * to complete 
>        */
>       while (br->br_prod_tail != prod_head)
> -             cpu_spinwait();
> +             ukarch_spinwait();
>  
> -     atomic_store_rel_int(&br->br_prod_tail, prod_next);
> -     critical_exit();
> +     ukarch_store_n(&br->br_prod_tail, prod_next);
> +     uk_preempt_disable();
>  
>       return 0;
>  }
> @@ -130,21 +135,22 @@ buf_ring_dequeue_mc(struct buf_ring *br)
>       uint32_t cons_head, cons_next;
>       void *buf;
>  
> -     critical_enter();
> +     uk_preempt_enable();
>  
>       do {
>               cons_head = br->br_cons_head;
>               cons_next = (cons_head + 1) & br->br_cons_mask;
>  
>               if (cons_head == br->br_prod_tail) {
> -                     critical_exit();
> +                     uk_preempt_disable();
>                       return NULL;
>               }
> -     } while (!atomic_cmpset_acq_int(&br->br_cons_head, cons_head, 
> cons_next));
> +     } while (!ukarch_compare_exchange_sync(&br->br_cons_head, cons_head,
> +                                     cons_next));
>  
>       buf = br->br_ring[cons_head];
>  
> -#ifdef DEBUG_BUFRING
> +#ifdef CONFIG_LIBUKRING_DEBUG_BUFRING
>       br->br_ring[cons_head] = NULL;
>  #endif
>  
> @@ -154,10 +160,10 @@ buf_ring_dequeue_mc(struct buf_ring *br)
>        * to complete
>        */
>       while (br->br_cons_tail != cons_head)
> -             cpu_spinwait();
> +             ukarch_spinwait();
>  
> -     atomic_store_rel_int(&br->br_cons_tail, cons_next);
> -     critical_exit();
> +     ukarch_store_n(&br->br_cons_tail, cons_next);
> +     uk_preempt_disable();
>  
>       return buf;
>  }
> @@ -201,12 +207,12 @@ buf_ring_dequeue_sc(struct buf_ring *br)
>        *
>        * <1> Load (on core 1) from br->br_ring[cons_head] can be reordered 
> (speculative readed) by CPU.
>        */
> -#if defined(__arm__) || defined(__aarch64__)
> -     cons_head = atomic_load_acq_32(&br->br_cons_head);
> +#if defined(CONFIG_ARCH_ARM_32) || defined(CONFIG_ARCH_ARM_64)
> +     cons_head = ukarch_load_n(&br->br_cons_head);
>  #else
>       cons_head = br->br_cons_head;
>  #endif
> -     prod_tail = atomic_load_acq_32(&br->br_prod_tail);
> +     prod_tail = ukarch_load_n(&br->br_prod_tail);
>  
>       cons_next = (cons_head + 1) & br->br_cons_mask;
>  #ifdef PREFETCH_DEFINED
> @@ -227,14 +233,14 @@ buf_ring_dequeue_sc(struct buf_ring *br)
>       br->br_cons_head = cons_next;
>       buf = br->br_ring[cons_head];
>  
> -#ifdef DEBUG_BUFRING
> +#ifdef CONFIG_LIBUKRING_DEBUG_BUFRING
>       br->br_ring[cons_head] = NULL;
>  
> -     if (!mtx_owned(br->br_lock))
> -             panic("lock not held on single consumer dequeue");
> +     if (!uk_mutex_is_locked(br->br_lock))
> +             UK_CRASH("lock not held on single consumer dequeue: %d", 
> br->br_lock->lock_count);
>  
>       if (br->br_cons_tail != cons_head)
> -             panic("inconsistent list cons_tail=%d cons_head=%d",
> +             UK_CRASH("inconsistent list cons_tail=%d cons_head=%d",
>                               br->br_cons_tail, cons_head);
>  #endif
>  
> @@ -262,7 +268,7 @@ buf_ring_advance_sc(struct buf_ring *br)
>  
>       br->br_cons_head = cons_next;
>  
> -#ifdef DEBUG_BUFRING
> +#ifdef CONFIG_LIBUKRING_DEBUG_BUFRING
>       br->br_ring[cons_head] = NULL;
>  #endif
>  
> @@ -288,8 +294,8 @@ buf_ring_advance_sc(struct buf_ring *br)
>  static __inline void
>  buf_ring_putback_sc(struct buf_ring *br, void *new)
>  {
> -     KASSERT(br->br_cons_head != br->br_prod_tail,
> -             ("Buf-Ring has none in putback"));
> +     /* Buffer ring has none in putback */
> +     UK_ASSERT(br->br_cons_head != br->br_prod_tail);
>       br->br_ring[br->br_cons_head] = new;
>  }
>  
> @@ -301,9 +307,9 @@ buf_ring_putback_sc(struct buf_ring *br, void *new)
>  static __inline void *
>  buf_ring_peek(struct buf_ring *br)
>  {
> -#ifdef DEBUG_BUFRING
> -     if ((br->br_lock != NULL) && !mtx_owned(br->br_lock))
> -             panic("lock not held on single consumer dequeue");
> +#ifdef CONFIG_LIBUKRING_DEBUG_BUFRING
> +     if (!uk_mutex_is_locked(br->br_lock))
> +             UK_CRASH("lock not held on single consumer dequeue");
>  #endif
>  
>       /*
> @@ -321,17 +327,17 @@ buf_ring_peek(struct buf_ring *br)
>  static __inline void *
>  buf_ring_peek_clear_sc(struct buf_ring *br)
>  {
> -#ifdef DEBUG_BUFRING
> +#ifdef CONFIG_LIBUKRING_DEBUG_BUFRING
>       void *ret;
>  
> -     if (!mtx_owned(br->br_lock))
> -             panic("lock not held on single consumer dequeue");
> +     if (!uk_mutex_is_locked(br->br_lock))
> +             UK_CRASH("lock not held on single consumer dequeue");
>  #endif
>  
>       if (br->br_cons_head == br->br_prod_tail)
>               return NULL;
>  
> -#if defined(__arm__) || defined(__aarch64__)
> +#if defined(CONFIG_ARCH_ARM_32) || defined(CONFIG_ARCH_ARM_64)
>       /*
>        * The barrier is required there on ARM and ARM64 to ensure, that
>        * br->br_ring[br->br_cons_head] will not be fetched before the above
> @@ -342,10 +348,11 @@ buf_ring_peek_clear_sc(struct buf_ring *br)
>        * conditional check will be true, so we will return previously fetched
>        * (and invalid) buffer.
>        */
> -     atomic_thread_fence_acq();
> +     #error "unsupported: atomic_thread_fence_acq()"
> +     /* atomic_thread_fence_acq(); */
>  #endif
>  
> -#ifdef DEBUG_BUFRING
> +#ifdef CONFIG_LIBUKRING_DEBUG_BUFRING
>       /*
>        * Single consumer, i.e. cons_head will not move while we are
>        * running, so atomic_swap_ptr() is not necessary here.
> @@ -378,8 +385,8 @@ buf_ring_count(struct buf_ring *br)
>                       & br->br_prod_mask;
>  }
>  
> -struct buf_ring *buf_ring_alloc(int count, struct malloc_type *type, int 
> flags,
> -             struct mtx *);
> -void buf_ring_free(struct buf_ring *br, struct malloc_type *type);
> +struct buf_ring *buf_ring_alloc(int count, struct uk_alloc *a, int flags,
> +             struct uk_mutex *lock);
> +void buf_ring_free(struct buf_ring *br, struct uk_alloc *a);
>  
> -#endif
> \ No newline at end of file
> +#endif
> diff --git a/lib/ukring/ring.c b/lib/ukring/ring.c
> index e327bd7..2badb79 100644
> --- a/lib/ukring/ring.c
> +++ b/lib/ukring/ring.c
> @@ -26,30 +26,29 @@
>   * SUCH DAMAGE.
>   */
>  
> -#include <sys/cdefs.h>
> -__FBSDID("$FreeBSD$");
> -
> -#include <sys/param.h>
> -#include <sys/systm.h>
> -#include <sys/kernel.h>
> -#include <sys/malloc.h>
> -#include <sys/ktr.h>
> -#include <sys/buf_ring.h>
> +#include <uk/ring.h>
> +#include <uk/assert.h>
> +#include <uk/alloc.h>
> +#include <uk/mutex.h>
> +#include <uk/config.h>
> +#include <uk/print.h>
> +#include <uk/essentials.h>
>  
>  struct buf_ring *
> -buf_ring_alloc(int count, struct malloc_type *type, int flags, struct mtx 
> *lock)
> +buf_ring_alloc(int count, struct uk_alloc *a, int flags, struct uk_mutex 
> *lock)
>  {
>       struct buf_ring *br;
>  
> -     KASSERT(powerof2(count), ("buf ring must be size power of 2"));
> -
> -     br = malloc(sizeof(struct buf_ring) + count*sizeof(caddr_t),
> -                     type, flags|M_ZERO);
> +     /* buf ring must be size power of 2 */
> +     UK_ASSERT(POWER_OF_2(count));
>  
> -     if (br == NULL)
> +     br = uk_malloc(a, sizeof(struct buf_ring) + count * sizeof(caddr_t));
> +     if (br == NULL) {
> +             uk_pr_err("could not allocate ring\n");
>               return NULL;
> +     }
>  
> -#ifdef DEBUG_BUFRING
> +#ifdef CONFIG_LIBUKRING_DEBUG_BUFRING
>       br->br_lock = lock;
>  #endif
>  
> @@ -62,7 +61,7 @@ buf_ring_alloc(int count, struct malloc_type *type, int 
> flags, struct mtx *lock)
>  }
>  
>  void
> -buf_ring_free(struct buf_ring *br, struct malloc_type *type)
> +buf_ring_free(struct buf_ring *br, struct uk_alloc *a)
>  {
> -     free(br, type);
> -}
> \ No newline at end of file
> +     uk_free(a, br);
> +}
> 



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.