[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4] pvcalls-front: Avoid get_free_pages(GFP_KERNEL) under spinlock
The problem is that we call this with a spin lock held. The call tree is: pvcalls_front_accept() holds bedata->socket_lock. -> create_active() -> __get_free_pages() uses GFP_KERNEL The create_active() function is only called from pvcalls_front_accept() with a spin_lock held, The allocation is not allowed to sleep and GFP_KERNEL is not sufficient. This issue was detected by using the Coccinelle software. v2: Add a function doing the allocations which is called outside the lock and passing the allocated data to create_active(). v3: Use the matching deallocators i.e., free_page() and free_pages(), respectively. v4: It would be better to pre-populate map (struct sock_mapping), rather than introducing one more new struct. Suggested-by: Juergen Gross <jgross@xxxxxxxx> Suggested-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Suggested-by: Stefano Stabellini <sstabellini@xxxxxxxxxx> Signed-off-by: Wen Yang <wen.yang99@xxxxxxxxxx> CC: Julia Lawall <julia.lawall@xxxxxxx> CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> CC: Juergen Gross <jgross@xxxxxxxx> CC: Stefano Stabellini <sstabellini@xxxxxxxxxx> CC: xen-devel@xxxxxxxxxxxxxxxxxxxx CC: linux-kernel@xxxxxxxxxxxxxxx --- drivers/xen/pvcalls-front.c | 57 ++++++++++++++++++++++++++++++------- 1 file changed, 46 insertions(+), 11 deletions(-) diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 77224d8f3e6f..555c9abdf58f 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c @@ -335,13 +335,18 @@ int pvcalls_front_socket(struct socket *sock) return ret; } -static int create_active(struct sock_mapping *map, int *evtchn) +static void free_active_ring(struct sock_mapping *map) { - void *bytes; - int ret = -ENOMEM, irq = -1, i; + if (!map) + return; + free_pages((unsigned long)map->active.data.in, + map->active.ring->ring_order); + free_page((unsigned long)map->active.ring); +} - *evtchn = -1; - init_waitqueue_head(&map->active.inflight_conn_req); +static int alloc_active_ring(struct sock_mapping *map) +{ + void *bytes; map->active.ring = (struct pvcalls_data_intf *) __get_free_page(GFP_KERNEL | __GFP_ZERO); @@ -352,6 +357,26 @@ static int create_active(struct sock_mapping *map, int *evtchn) PVCALLS_RING_ORDER); if (bytes == NULL) goto out_error; + map->active.data.in = bytes; + map->active.data.out = bytes + + XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); + + return 0; + +out_error: + free_active_ring(map); + return -ENOMEM; +} + +static int create_active(struct sock_mapping *map, int *evtchn) +{ + void *bytes; + int ret = -ENOMEM, irq = -1, i; + + *evtchn = -1; + init_waitqueue_head(&map->active.inflight_conn_req); + + bytes = map->active.data.in; for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) map->active.ring->ref[i] = gnttab_grant_foreign_access( pvcalls_front_dev->otherend_id, @@ -361,10 +386,6 @@ static int create_active(struct sock_mapping *map, int *evtchn) pvcalls_front_dev->otherend_id, pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0); - map->active.data.in = bytes; - map->active.data.out = bytes + - XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); - ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn); if (ret) goto out_error; @@ -385,8 +406,7 @@ static int create_active(struct sock_mapping *map, int *evtchn) out_error: if (*evtchn >= 0) xenbus_free_evtchn(pvcalls_front_dev, *evtchn); - free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER); - free_page((unsigned long)map->active.ring); + free_active_ring(map); return ret; } @@ -406,11 +426,17 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr, return PTR_ERR(map); bedata = dev_get_drvdata(&pvcalls_front_dev->dev); + ret = alloc_active_ring(map); + if (ret < 0) { + pvcalls_exit_sock(sock); + return ret; + } spin_lock(&bedata->socket_lock); ret = get_request(bedata, &req_id); if (ret < 0) { spin_unlock(&bedata->socket_lock); + free_active_ring(map); pvcalls_exit_sock(sock); return ret; } @@ -780,12 +806,20 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) } } + ret = alloc_active_ring(map); + if (ret < 0) { + clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, + (void *)&map->passive.flags); + pvcalls_exit_sock(sock); + return ret; + } spin_lock(&bedata->socket_lock); ret = get_request(bedata, &req_id); if (ret < 0) { clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags); spin_unlock(&bedata->socket_lock); + free_active_ring(map); pvcalls_exit_sock(sock); return ret; } @@ -794,6 +828,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags); spin_unlock(&bedata->socket_lock); + free_active_ring(map); pvcalls_exit_sock(sock); return -ENOMEM; } -- 2.19.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |