[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v1 04/13] xen/pvcalls: implement connect command



On 22/07/17 02:11, Stefano Stabellini wrote:
> Send PVCALLS_CONNECT to the backend. Allocate a new ring and evtchn for
> the active socket.
> 
> Introduce a data structure to keep track of sockets. Introduce a
> waitqueue to allow the frontend to wait on data coming from the backend
> on the active socket (recvmsg command).
> 
> Two mutexes (one of reads and one for writes) will be used to protect
> the active socket in and out rings from concurrent accesses.
> 
> sock->sk->sk_send_head is not used for ip sockets: reuse the field to
> store a pointer to the struct sock_mapping corresponding to the socket.
> This way, we can easily get the struct sock_mapping from the struct
> socket.
> 
> Convert the struct socket pointer into an uint64_t and use it as id for
> the new socket to pass to the backend.
> 
> Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx>
> CC: boris.ostrovsky@xxxxxxxxxx
> CC: jgross@xxxxxxxx
> ---
>  drivers/xen/pvcalls-front.c | 153 
> ++++++++++++++++++++++++++++++++++++++++++++
>  drivers/xen/pvcalls-front.h |   2 +
>  2 files changed, 155 insertions(+)
> 
> diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
> index 7933c73..0d305e0 100644
> --- a/drivers/xen/pvcalls-front.c
> +++ b/drivers/xen/pvcalls-front.c
> @@ -13,6 +13,8 @@
>   */
>  
>  #include <linux/module.h>
> +#include <linux/net.h>
> +#include <linux/socket.h>
>  
>  #include <xen/events.h>
>  #include <xen/grant_table.h>
> @@ -20,6 +22,8 @@
>  #include <xen/xenbus.h>
>  #include <xen/interface/io/pvcalls.h>
>  
> +#include <net/sock.h>
> +
>  #define PVCALLS_INVALID_ID (UINT_MAX)
>  #define RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
>  #define PVCALLS_NR_REQ_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
> @@ -38,6 +42,24 @@ struct pvcalls_bedata {
>  };
>  struct xenbus_device *pvcalls_front_dev;
>  
> +struct sock_mapping {
> +     bool active_socket;
> +     struct list_head list;
> +     struct socket *sock;
> +     union {
> +             struct {
> +                     int irq;
> +                     grant_ref_t ref;
> +                     struct pvcalls_data_intf *ring;
> +                     struct pvcalls_data data;
> +                     struct mutex in_mutex;
> +                     struct mutex out_mutex;
> +
> +                     wait_queue_head_t inflight_conn_req;
> +             } active;
> +     };
> +};
> +
>  static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
>  {
>       struct xenbus_device *dev = dev_id;
> @@ -80,6 +102,18 @@ static irqreturn_t pvcalls_front_event_handler(int irq, 
> void *dev_id)
>       return IRQ_HANDLED;
>  }
>  
> +static irqreturn_t pvcalls_front_conn_handler(int irq, void *sock_map)
> +{
> +     struct sock_mapping *map = sock_map;
> +
> +     if (map == NULL)
> +             return IRQ_HANDLED;
> +
> +     wake_up_interruptible(&map->active.inflight_conn_req);
> +
> +     return IRQ_HANDLED;
> +}
> +
>  int pvcalls_front_socket(struct socket *sock)
>  {
>       struct pvcalls_bedata *bedata;
> @@ -134,6 +168,125 @@ int pvcalls_front_socket(struct socket *sock)
>       return ret;
>  }
>  
> +static struct sock_mapping *create_active(int *evtchn)
> +{
> +     struct sock_mapping *map = NULL;
> +     void *bytes;
> +     int ret, irq = -1, i;
> +
> +     map = kzalloc(sizeof(*map), GFP_KERNEL);
> +     if (map == NULL)
> +             return NULL;
> +
> +     init_waitqueue_head(&map->active.inflight_conn_req);
> +
> +     map->active.ring = (struct pvcalls_data_intf *)
> +             __get_free_page(GFP_KERNEL | __GFP_ZERO);
> +     if (map->active.ring == NULL)
> +             goto out_error;
> +     memset(map->active.ring, 0, XEN_PAGE_SIZE);
> +     map->active.ring->ring_order = RING_ORDER;
> +     bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
> +                                     map->active.ring->ring_order);
> +     if (bytes == NULL)
> +             goto out_error;
> +     for (i = 0; i < (1 << map->active.ring->ring_order); i++)
> +             map->active.ring->ref[i] = gnttab_grant_foreign_access(
> +                     pvcalls_front_dev->otherend_id,
> +                     pfn_to_gfn(virt_to_pfn(bytes) + i), 0);
> +
> +     map->active.ref = gnttab_grant_foreign_access(
> +             pvcalls_front_dev->otherend_id,
> +             pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
> +
> +     ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
> +     if (ret)
> +             goto out_error;

You are leaking bytes here in case of error.

> +     map->active.data.in = bytes;
> +     map->active.data.out = bytes +
> +             XEN_FLEX_RING_SIZE(map->active.ring->ring_order);
> +     irq = bind_evtchn_to_irqhandler(*evtchn, pvcalls_front_conn_handler,
> +                                     0, "pvcalls-frontend", map);
> +     if (irq < 0)
> +             goto out_error;
> +
> +     map->active.irq = irq;
> +     map->active_socket = true;
> +     mutex_init(&map->active.in_mutex);
> +     mutex_init(&map->active.out_mutex);
> +
> +     return map;
> +
> +out_error:
> +     if (irq >= 0)
> +             unbind_from_irqhandler(irq, map);
> +     else if (*evtchn >= 0)
> +             xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
> +     kfree(map->active.data.in);
> +     kfree(map->active.ring);
> +     kfree(map);
> +     return NULL;
> +}
> +
> +int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
> +                             int addr_len, int flags)
> +{
> +     struct pvcalls_bedata *bedata;
> +     struct sock_mapping *map = NULL;
> +     struct xen_pvcalls_request *req;
> +     int notify, req_id, ret, evtchn;
> +
> +     if (!pvcalls_front_dev)
> +             return -ENETUNREACH;
> +     if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
> +             return -ENOTSUPP;
> +
> +     bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
> +
> +     spin_lock(&bedata->pvcallss_lock);
> +     req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1);
> +     BUG_ON(req_id >= PVCALLS_NR_REQ_PER_RING);
> +     if (RING_FULL(&bedata->ring) ||
> +         READ_ONCE(bedata->rsp[req_id].req_id) != PVCALLS_INVALID_ID) {
> +             spin_unlock(&bedata->pvcallss_lock);
> +             return -EAGAIN;
> +     }
> +
> +     map = create_active(&evtchn);
> +     if (!map)
> +         return -ENOMEM;

spin_unlock()?


Juergen

> +
> +     req = RING_GET_REQUEST(&bedata->ring, req_id);
> +     req->req_id = req_id;
> +     req->cmd = PVCALLS_CONNECT;
> +     req->u.connect.id = (uint64_t)sock;
> +     memcpy(req->u.connect.addr, addr, sizeof(*addr));
> +     req->u.connect.len = addr_len;
> +     req->u.connect.flags = flags;
> +     req->u.connect.ref = map->active.ref;
> +     req->u.connect.evtchn = evtchn;
> +     
> +     list_add_tail(&map->list, &bedata->socket_mappings);
> +     map->sock = sock;
> +     WRITE_ONCE(sock->sk->sk_send_head, (void *)map);
> +
> +     bedata->ring.req_prod_pvt++;
> +     RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
> +     spin_unlock(&bedata->pvcallss_lock);
> +
> +     if (notify)
> +             notify_remote_via_irq(bedata->irq);
> +
> +     wait_event(bedata->inflight_req,
> +                READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
> +
> +     ret = bedata->rsp[req_id].ret;
> +     /* read ret, then set this rsp slot to be reused */
> +     smp_mb();
> +     WRITE_ONCE(bedata->rsp[req_id].req_id, PVCALLS_INVALID_ID);
> +     return ret;
> +}
> +
>  static const struct xenbus_device_id pvcalls_front_ids[] = {
>       { "pvcalls" },
>       { "" }
> diff --git a/drivers/xen/pvcalls-front.h b/drivers/xen/pvcalls-front.h
> index b7dabed..63b0417 100644
> --- a/drivers/xen/pvcalls-front.h
> +++ b/drivers/xen/pvcalls-front.h
> @@ -4,5 +4,7 @@
>  #include <linux/net.h>
>  
>  int pvcalls_front_socket(struct socket *sock);
> +int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
> +                       int addr_len, int flags);
>  
>  #endif
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.