[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v6 09/18] xen/pvcalls: implement bind command



Allocate a socket. Track the allocated passive sockets with a new data
structure named sockpass_mapping. It contains an unbound workqueue to
schedule delayed work for the accept and poll commands. It also has a
reqcopy field to be used to store a copy of a request for delayed work.
Reads/writes to it are protected by a lock (the "copy_lock" spinlock).
Initialize the workqueue in pvcalls_back_bind.

Implement the bind command with inet_bind.

The pass_sk_data_ready event handler will be added later.

Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx>
CC: boris.ostrovsky@xxxxxxxxxx
CC: jgross@xxxxxxxx
---
 drivers/xen/pvcalls-back.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 87 insertions(+)

diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 1bc2620..dae91fb 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -78,6 +78,18 @@ struct sock_mapping {
        struct pvcalls_ioworker ioworker;
 };
 
+struct sockpass_mapping {
+       struct list_head list;
+       struct pvcalls_fedata *fedata;
+       struct socket *sock;
+       uint64_t id;
+       struct xen_pvcalls_request reqcopy;
+       spinlock_t copy_lock;
+       struct workqueue_struct *wq;
+       struct work_struct register_work;
+       void (*saved_data_ready)(struct sock *sk);
+};
+
 static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map);
 static int pvcalls_back_release_active(struct xenbus_device *dev,
                                       struct pvcalls_fedata *fedata,
@@ -263,9 +275,84 @@ static int pvcalls_back_release(struct xenbus_device *dev,
        return 0;
 }
 
+static void __pvcalls_back_accept(struct work_struct *work)
+{
+}
+
+static void pvcalls_pass_sk_data_ready(struct sock *sock)
+{
+}
+
 static int pvcalls_back_bind(struct xenbus_device *dev,
                             struct xen_pvcalls_request *req)
 {
+       struct pvcalls_fedata *fedata;
+       int ret, err;
+       struct socket *sock;
+       struct sockpass_mapping *map;
+       struct xen_pvcalls_response *rsp;
+
+       fedata = dev_get_drvdata(&dev->dev);
+
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
+       if (map == NULL) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       INIT_WORK(&map->register_work, __pvcalls_back_accept);
+       spin_lock_init(&map->copy_lock);
+       map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1);
+       if (!map->wq) {
+               ret = -ENOMEM;
+               kfree(map);
+               goto out;
+       }
+
+       ret = sock_create(AF_INET, SOCK_STREAM, 0, &sock);
+       if (ret < 0) {
+               destroy_workqueue(map->wq);
+               kfree(map);
+               goto out;
+       }
+
+       ret = inet_bind(sock, (struct sockaddr *)&req->u.bind.addr,
+                       req->u.bind.len);
+       if (ret < 0) {
+               sock_release(sock);
+               destroy_workqueue(map->wq);
+               kfree(map);
+               goto out;
+       }
+
+       map->fedata = fedata;
+       map->sock = sock;
+       map->id = req->u.bind.id;
+
+       down(&fedata->socket_lock);
+       err = radix_tree_insert(&fedata->socketpass_mappings, map->id,
+                               map);
+       up(&fedata->socket_lock);
+       if (err) {
+               ret = err;
+               sock_release(sock);
+               destroy_workqueue(map->wq);
+               kfree(map);
+               goto out;
+       }
+
+       write_lock_bh(&sock->sk->sk_callback_lock);
+       map->saved_data_ready = sock->sk->sk_data_ready;
+       sock->sk->sk_user_data = map;
+       sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready;
+       write_unlock_bh(&sock->sk->sk_callback_lock);
+
+out:
+       rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
+       rsp->req_id = req->req_id;
+       rsp->cmd = req->cmd;
+       rsp->u.bind.id = req->u.bind.id;
+       rsp->ret = ret;
        return 0;
 }
 
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.