[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v6 09/18] xen/pvcalls: implement bind command
On Tue, 4 Jul 2017, Juergen Gross wrote: > On 03/07/17 23:08, Stefano Stabellini wrote: > > Allocate a socket. Track the allocated passive sockets with a new data > > structure named sockpass_mapping. It contains an unbound workqueue to > > schedule delayed work for the accept and poll commands. It also has a > > reqcopy field to be used to store a copy of a request for delayed work. > > Reads/writes to it are protected by a lock (the "copy_lock" spinlock). > > Initialize the workqueue in pvcalls_back_bind. > > > > Implement the bind command with inet_bind. > > > > The pass_sk_data_ready event handler will be added later. > > > > Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx> > > CC: boris.ostrovsky@xxxxxxxxxx > > CC: jgross@xxxxxxxx > > --- > > drivers/xen/pvcalls-back.c | 87 > > ++++++++++++++++++++++++++++++++++++++++++++++ > > 1 file changed, 87 insertions(+) > > > > diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c > > index 1bc2620..dae91fb 100644 > > --- a/drivers/xen/pvcalls-back.c > > +++ b/drivers/xen/pvcalls-back.c > > @@ -78,6 +78,18 @@ struct sock_mapping { > > struct pvcalls_ioworker ioworker; > > }; > > > > +struct sockpass_mapping { > > + struct list_head list; > > + struct pvcalls_fedata *fedata; > > + struct socket *sock; > > + uint64_t id; > > + struct xen_pvcalls_request reqcopy; > > + spinlock_t copy_lock; > > + struct workqueue_struct *wq; > > + struct work_struct register_work; > > + void (*saved_data_ready)(struct sock *sk); > > +}; > > + > > static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map); > > static int pvcalls_back_release_active(struct xenbus_device *dev, > > struct pvcalls_fedata *fedata, > > @@ -263,9 +275,84 @@ static int pvcalls_back_release(struct xenbus_device > > *dev, > > return 0; > > } > > > > +static void __pvcalls_back_accept(struct work_struct *work) > > +{ > > +} > > + > > +static void pvcalls_pass_sk_data_ready(struct sock *sock) > > +{ > > +} > > + > > static int pvcalls_back_bind(struct xenbus_device *dev, > > struct xen_pvcalls_request *req) > > { > > + struct pvcalls_fedata *fedata; > > + int ret, err; > > + struct socket *sock; > > Get rid of sock, ... OK > > + struct sockpass_mapping *map; > > + struct xen_pvcalls_response *rsp; > > + > > + fedata = dev_get_drvdata(&dev->dev); > > + > > + map = kzalloc(sizeof(*map), GFP_KERNEL); > > + if (map == NULL) { > > + ret = -ENOMEM; > > + goto out; > > + } > > + > > + INIT_WORK(&map->register_work, __pvcalls_back_accept); > > + spin_lock_init(&map->copy_lock); > > + map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1); > > + if (!map->wq) { > > + ret = -ENOMEM; > > + kfree(map); > > Move kfree(map) to the exit path, ... OK > > + goto out; > > + } > > + > > + ret = sock_create(AF_INET, SOCK_STREAM, 0, &sock); > > use &map->sock here, ... OK > > + if (ret < 0) { > > + destroy_workqueue(map->wq); > > move destory_workqueue() to the exit path, ... OK > > + kfree(map); > > + goto out; > > + } > > + > > + ret = inet_bind(sock, (struct sockaddr *)&req->u.bind.addr, > > + req->u.bind.len); > > + if (ret < 0) { > > + sock_release(sock); > > and sock_release, too, ... OK > > + destroy_workqueue(map->wq); > > + kfree(map); > > + goto out; > > + } > > + > > + map->fedata = fedata; > > + map->sock = sock; > > + map->id = req->u.bind.id; > > + > > + down(&fedata->socket_lock); > > + err = radix_tree_insert(&fedata->socketpass_mappings, map->id, > > + map); > > User ret instead of err? OK > > + up(&fedata->socket_lock); > > + if (err) { > > + ret = err; > > + sock_release(sock); > > + destroy_workqueue(map->wq); > > + kfree(map); > > + goto out; > > + } > > + > > + write_lock_bh(&sock->sk->sk_callback_lock); > > + map->saved_data_ready = sock->sk->sk_data_ready; > > + sock->sk->sk_user_data = map; > > + sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready; > > + write_unlock_bh(&sock->sk->sk_callback_lock); > > + > > +out: > > + rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); > > + rsp->req_id = req->req_id; > > + rsp->cmd = req->cmd; > > + rsp->u.bind.id = req->u.bind.id; > > + rsp->ret = ret; > > ... have a common error exit handling: > + if (ret) { > + if (map && map->sock) > + sock_release(map->sock); > + if (map && map->wq) > + destroy_workqueue(map->wq); > + kfree(map); > + } All good suggestions, I'll make all changes. _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |