[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 3/4] Support accelerated network plugin modules



On Tue, May 08, 2007 at 10:55:09AM +0100, Kieran Mansley wrote:
> Add hooks in the netfront driver to allow an accelerated plugin module
> to attach and provide a fast path for network traffic in the presence of
> a virtualisable NIC.
> 
> Signed-off-by: Kieran Mansley <kmansley@xxxxxxxxxxxxxx>

> Frontend net driver acceleration
> 
> diff -r 325afaed01ff linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
> --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c    Tue Apr 17 
> 09:07:31 2007 +0100
> +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c    Tue Apr 17 
> 09:13:05 2007 +0100
> @@ -3,6 +3,7 @@
>   *
>   * Copyright (c) 2002-2005, K A Fraser
>   * Copyright (c) 2005, XenSource Ltd
> + * Copyright (C) 2007 Solarflare Communications, Inc.
>   *
>   * This program is free software; you can redistribute it and/or
>   * modify it under the terms of the GNU General Public License version 2
> @@ -67,6 +68,8 @@
>  #include <xen/platform-compat.h>
>  #endif
>  
> +#include "netfront.h"
> +
>  /*
>   * Mutually-exclusive module options to select receive data path:
>   *  rx_copy : Packets are copied by network backend into local memory
> @@ -137,57 +140,6 @@ static inline int netif_needs_gso(struct
>  
>  #define GRANT_INVALID_REF    0
>  
> -#define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE)
> -#define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE)
> -
> -struct netfront_info {
> -     struct list_head list;
> -     struct net_device *netdev;
> -
> -     struct net_device_stats stats;
> -
> -     struct netif_tx_front_ring tx;
> -     struct netif_rx_front_ring rx;
> -
> -     spinlock_t   tx_lock;
> -     spinlock_t   rx_lock;
> -
> -     unsigned int irq;
> -     unsigned int copying_receiver;
> -     unsigned int carrier;
> -
> -     /* Receive-ring batched refills. */
> -#define RX_MIN_TARGET 8
> -#define RX_DFL_MIN_TARGET 64
> -#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
> -     unsigned rx_min_target, rx_max_target, rx_target;
> -     struct sk_buff_head rx_batch;
> -
> -     struct timer_list rx_refill_timer;
> -
> -     /*
> -      * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs
> -      * is an index into a chain of free entries.
> -      */
> -     struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
> -     struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
> -
> -#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
> -     grant_ref_t gref_tx_head;
> -     grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
> -     grant_ref_t gref_rx_head;
> -     grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
> -
> -     struct xenbus_device *xbdev;
> -     int tx_ring_ref;
> -     int rx_ring_ref;
> -     u8 mac[ETH_ALEN];
> -
> -     unsigned long rx_pfn_array[NET_RX_RING_SIZE];
> -     struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
> -     struct mmu_update rx_mmu[NET_RX_RING_SIZE];
> -};
> -
>  struct netfront_rx_info {
>       struct netif_rx_response rx;
>       struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
> @@ -271,6 +223,275 @@ static void xennet_sysfs_delif(struct ne
>  #define xennet_sysfs_delif(dev) do { } while(0)
>  #endif
>  
> +static struct netfront_accelerator *accelerators = NULL;
> +static spinlock_t accelerators_lock;
> +
> +static int match_accelerator(const char *frontend, 
> +                             struct netfront_accelerator *accelerator)
> +{
> +        return strcmp(frontend, accelerator->frontend) == 0;
> +}
> +
> +
> +static void add_accelerator_vif(struct netfront_accelerator *accelerator,
> +                                struct netfront_info *np,
> +                                struct xenbus_device *dev)
> +{
> +        unsigned flags;
> +        spin_lock_irqsave(&np->accelerator_lock, flags);
> +        np->accelerator = accelerator;
> +        np->accel_vif_state.np = np;
> +        np->accel_vif_state.dev = dev;
> +        np->accel_vif_state.hooks = accelerator->hooks;
> +        np->accel_vif_state.next = accelerator->vif_states;
> +        accelerator->vif_states = &np->accel_vif_state;
> +        spin_unlock_irqrestore(&np->accelerator_lock, flags);
> +}

Is there a reason not to use standard Linux list.h here rather than
implementinng your own linked list?

> +static int init_accelerator(struct netfront_info *np, struct xenbus_device 
> *dev, 
> +                            const char *frontend)
> +{
> +        struct netfront_accelerator *accelerator = 
> +                kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL);
> +
> +        if(!accelerator){
> +                DPRINTK("%s: no memory for accelerator", __FUNCTION__);
> +                return -ENOMEM;
> +        }
> +
> +        accelerator->frontend = kmalloc(strlen(frontend), GFP_KERNEL);
> +        if(!accelerator->frontend){
> +                DPRINTK("%s: no memory for accelerator", __FUNCTION__);
> +                kfree(accelerator);
> +                return -ENOMEM;
> +        }
> +        strcpy(accelerator->frontend, frontend);

You're not mallocing enough space for the final NULL. Use strlcpy?

> +
> +        accelerator->vif_states = NULL;
> +        accelerator->hooks = NULL;
> +
> +        accelerator->next = accelerators;
> +        
> +        accelerators = accelerator;

Shouldn't the list be protected here? also, better naming would be
appreciated - it's hard to differentate between 'accelerator' and
'accelerators' in first glance.

> +
> +        if(np){
> +                add_accelerator_vif(accelerator, np, dev);
> +        }

Coding style - no braces for a single line

> +        return 0;
> +}                                        
> +
> +
> +static int netfront_load_accelerator(struct netfront_info *np, 
> +                                     struct xenbus_device *dev, 
> +                                     const char *frontend)
> +{
> +        struct netfront_accelerator *accelerator = accelerators;
> +        int rc;
> +        unsigned flags;
> +
> +        spin_lock_irqsave(&accelerators_lock, flags);
> +
> +        /* Look at list of loaded accelerators to see if the requested
> +           one is already there */
> +        while(accelerator != NULL){
> +                if(match_accelerator(frontend, accelerator)){
> +                        /* Already know about it, already loaded, but
> +                           these details weren't known at the time */
> +                        if(accelerator->hooks == NULL)
> +                                DPRINTK("%s: no hooks set", __FUNCTION__);
> +                        else
> +
>                                  
> accelerator->hooks->new_device(np->accelerator->hooks->new_device(np->netdev, 
> dev);

It would be better to call the hoook without holding the lock.

> +                        /* Tell accelerator about this frontend device */
> +                        add_accelerator_vif(accelerator, np, dev);
> +                        spin_unlock_irqrestore(&accelerators_lock, flags);
> +                        return 0;
> +                }
> +
> +                accelerator = accelerator->next;
> +        }
> +
> +        /* Couldn't find it, so create a new one and load the module */
> +        if((rc = init_accelerator(np, dev, frontend)) < 0) {
> +                spin_unlock_irqrestore(&accelerators_lock, flags);
> +                return rc;
> +        }

coding style - 'if ((', seperate assignment and test.
> +
> +        spin_unlock_irqrestore(&accelerators_lock, flags);
> +
> +        DPRINTK("%s: loading module %s\n", __FUNCTION__, frontend);
> +
> +        /* load acceleration module */
> +        request_module("%s", frontend);
> +
> +        /* Module should now call netfront_accelerator_loaded() once
> +           it's up and running, and we can continue from there */
> +
> +        return 0;
> +}
> +
> +
> +static void accelerator_set_hooks(struct netfront_accelerator *accelerator,
> +                                  struct netfront_accel_hooks *hooks)
> +{
> +        struct netfront_accel_vif_state *accel_vif_state;
> +        unsigned flags;
> +
> +        DPRINTK("%s: %p %p\n", __FUNCTION__, accelerator, hooks);
> +
> +        accelerator->hooks = hooks;
> +
> +        accel_vif_state = accelerator->vif_states;
> +        while(accel_vif_state != NULL) {
> +                struct netfront_info *np = accel_vif_state->np;
> +                hooks->new_device(np->netdev, accel_vif_state->dev);
> +                spin_lock_irqsave(&np->accelerator_lock, flags);

This is called with the accelerators_lock held (from
netfront_accelerator_loaded) - is the lock ordering guaranteed?

> +                accel_vif_state->hooks = hooks;
> +                spin_unlock_irqrestore(&np->accelerator_lock, flags);
> +
> +                accel_vif_state = accel_vif_state->next;
> +        }
> +}
> +
> +
> +/* Called by the accelerator once it's ready for action */
> +int netfront_accelerator_loaded(const char *frontend, 
> +                                struct netfront_accel_hooks *hooks)
> +{
> +        /* Tell accelerator about the frontend device */
> +        struct netfront_accelerator *accelerator = accelerators;
> +        unsigned flags;
> +
> +        spin_lock_irqsave(&accelerators_lock, flags);
> +
> +        /* Look through list of accelerators to see if it has already
> +           been requested */
> +        while(accelerator != NULL){
> +                if(match_accelerator(frontend, accelerator)){
> +                        accelerator_set_hooks(accelerator, hooks);
> +                        spin_unlock_irqrestore(&accelerators_lock, flags);
> +                        return 0;
> +                }
> +
> +                accelerator = accelerator->next;
> +        }
> +
> +        /* If it wasn't in the list, add it now so that when it is
> +           requested the caller will find it */
> +        if(accelerator == NULL){
> +                DPRINTK("%s: Couldn't find matching accelerator (%s)\n",
> +                        __FUNCTION__, frontend);
> +                init_accelerator(NULL, NULL, frontend);
> +        }
> +
> +        spin_unlock_irqrestore(&accelerators_lock, flags);
> +
> +        return 0;
> +}
> +EXPORT_SYMBOL_GPL(netfront_accelerator_loaded);
> +
> +
> +void accelerator_disconnect_vif(struct netfront_accel_vif_state *vif_state)
> +{
> +        struct netfront_info *np = vif_state->np;
> +        unsigned flags;
> +
> +        if(np) {
> +                /* Spin lock doesn't protect poll, so
> +                   make sure there's none of those
> +                   going on */
> +                netif_poll_disable(np->netdev);
> +                /* Likewise for xmit */
> +                netif_tx_disable(np->netdev);
> +                
> +                spin_lock_irqsave(&np->accelerator_lock, flags);
> +                np->accel_vif_state.hooks = NULL;
> +                spin_unlock_irqrestore(&np->accelerator_lock, flags);
> +                
> +                netif_wake_queue(np->netdev);
> +                netif_poll_enable(np->netdev);
> +        }
> +       
> +}
> +
> +
> +void netfront_accelerator_unloaded(const char *frontend)
> +{
> +        /* Tell accelerator about the frontend device */
> +        struct netfront_accelerator *accelerator = accelerators;
> +        struct netfront_accelerator *prev = NULL;
> +        unsigned flags;
> +
> +        spin_lock_irqsave(&accelerators_lock, flags);
> +
> +        while(accelerator != NULL){
> +                if(match_accelerator(frontend, accelerator)){
> +                        struct netfront_accel_vif_state *vif_state 
> +                                = accelerator->vif_states;
> +
> +                        accelerator->hooks = NULL;
> +
> +                        spin_unlock_irqrestore(&accelerators_lock, flags);
> +                                
> +                        while(vif_state != NULL){
> +                                accelerator_disconnect_vif(vif_state);
> +                                vif_state = vif_state->next;
> +                        }
> +                        return;
> +                }
> +
> +                prev = accelerator;

accelerator is never incremented here??? if accelerator is not NULL
and match_accelerator fails, we'll go into an infinite loop. I
strongly recommend you use the standard list macros
(list_for_each...).

> +        }
> +        spin_unlock_irqrestore(&accelerators_lock, flags);
> +}
> +EXPORT_SYMBOL_GPL(netfront_accelerator_unloaded);
> +
> +
> +#define NETFRONT_CALL_ACCELERATOR_HOOK(_np, _hook, _args...)            \
> +        do {                                                            \
> +                if((_np)->accelerator && (_np)->accel_vif_state.hooks)  \
> +                        (_np)->accel_vif_state.hooks->_hook(_args);     \
> +        } while(0)
> +
> +
> +#define NETFRONT_LOCK_AND_CALL_ACCELERATOR_HOOK(_np, _hook, _args...)   \
> +        do {                                                            \
> +                unsigned _flags;                                        \
> +                spin_lock_irqsave(&(_np)->accelerator_lock, _flags);    \
> +                if((_np)->accelerator && (_np)->accel_vif_state.hooks)  \
> +                        (_np)->accel_vif_state.hooks->_hook(_args);     \
> +                spin_unlock_irqrestore(&(_np)->accelerator_lock, _flags); \
> +        } while(0)

Please get rid of these macros - it's not exactly a lot of code to
duplicate and it makes it obvious what's going on.

> +
> +
> +int netfront_schedule_poll(struct net_device *dev)
> +{
> +        /* TODO do we need to protect this with any netfront locks? */
> +        netif_rx_schedule(dev);
> +        return 0;
> +}
> +EXPORT_SYMBOL_GPL(netfront_schedule_poll);
> +
> +
> +int netfront_stop_queue(struct net_device *dev)
> +{
> +        netif_stop_queue(dev);
> +        return 0;
> +}
> +EXPORT_SYMBOL_GPL(netfront_stop_queue);
> +
> +
> +int netfront_wake_queue(struct net_device *dev)
> +{
> +        netif_wake_queue(dev);
> +        return 0;
> +}
> +EXPORT_SYMBOL_GPL(netfront_wake_queue);

These look like pointless wrapper *if* we don't need to do antyhing
netfront specific in them.

I'll review the rest once you repost the patches.

Cheers,
Muli


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.