[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH 43/60] hyper_dmabuf: fixes on memory leaks in various places



Make sure to free buffers before returning to prevent memory leaks

Signed-off-by: Dongwon Kim <dongwon.kim@xxxxxxxxx>
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c      | 19 +++++++-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c        |  9 +++-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c        |  6 ++-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c   |  4 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   | 52 +++++++++++++++++++---
 5 files changed, 78 insertions(+), 12 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 283fe5a..3215003 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -282,6 +282,7 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
 
        /* free msg */
        kfree(req);
+
        /* free page_info */
        kfree(page_info->pages);
        kfree(page_info);
@@ -298,6 +299,10 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
 fail_map_req:
        hyper_dmabuf_remove_exported(sgt_info->hid);
 
+       /* free page_info */
+       kfree(page_info->pages);
+       kfree(page_info);
+
 fail_export:
        kfree(sgt_info->va_vmapped);
 
@@ -433,6 +438,13 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, 
void *data)
 
                        sgt_info->num_importers--;
                        req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+
+                       if (!req) {
+                               dev_err(hyper_dmabuf_private.device,
+                                       "No more space left\n");
+                               return -ENOMEM;
+                       }
+
                        hyper_dmabuf_create_request(req, 
HYPER_DMABUF_EXPORT_FD_FAILED, &operands[0]);
                        ops->send_req(HYPER_DMABUF_DOM_ID(sgt_info->hid), req, 
false);
                        kfree(req);
@@ -681,16 +693,19 @@ long hyper_dmabuf_ioctl(struct file *filp,
 
        if (copy_from_user(kdata, (void __user *)param, _IOC_SIZE(cmd)) != 0) {
                dev_err(hyper_dmabuf_private.device, "failed to copy from user 
arguments\n");
-               return -EFAULT;
+               ret = -EFAULT;
+               goto ioctl_error;
        }
 
        ret = func(filp, kdata);
 
        if (copy_to_user((void __user *)param, kdata, _IOC_SIZE(cmd)) != 0) {
                dev_err(hyper_dmabuf_private.device, "failed to copy to user 
arguments\n");
-               return -EFAULT;
+               ret = -EFAULT;
+               goto ioctl_error;
        }
 
+ioctl_error:
        kfree(kdata);
 
        return ret;
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
index c516df8..46cf9a4 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
@@ -191,8 +191,7 @@ int hyper_dmabuf_msg_parse(int domid, struct 
hyper_dmabuf_req *req)
        struct hyper_dmabuf_req *temp_req;
        struct hyper_dmabuf_imported_sgt_info *sgt_info;
        struct hyper_dmabuf_sgt_info *exp_sgt_info;
-       hyper_dmabuf_id_t hid = {req->operands[0], /* hid.id */
-                              {req->operands[1], req->operands[2], 
req->operands[3]}}; /* hid.rng_key */
+       hyper_dmabuf_id_t hid;
        int ret;
 
        if (!req) {
@@ -200,6 +199,11 @@ int hyper_dmabuf_msg_parse(int domid, struct 
hyper_dmabuf_req *req)
                return -EINVAL;
        }
 
+       hid.id = req->operands[0];
+       hid.rng_key[0] = req->operands[1];
+       hid.rng_key[1] = req->operands[2];
+       hid.rng_key[2] = req->operands[3];
+
        if ((req->command < HYPER_DMABUF_EXPORT) ||
                (req->command > HYPER_DMABUF_OPS_TO_SOURCE)) {
                dev_err(hyper_dmabuf_private.device, "invalid command\n");
@@ -332,6 +336,7 @@ int hyper_dmabuf_msg_parse(int domid, struct 
hyper_dmabuf_req *req)
        if (!proc) {
                dev_err(hyper_dmabuf_private.device,
                        "No memory left to be allocated\n");
+               kfree(temp_req);
                return -ENOMEM;
        }
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
index 81cb09f..9313c42 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
@@ -148,9 +148,8 @@ static struct sg_table* hyper_dmabuf_ops_map(struct 
dma_buf_attachment *attachme
        if (!st)
                goto err_free_sg;
 
-        if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+        if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir))
                 goto err_free_sg;
-        }
 
        ret = hyper_dmabuf_sync_request(sgt_info->hid,
                                        HYPER_DMABUF_OPS_MAP);
@@ -171,6 +170,9 @@ static struct sg_table* hyper_dmabuf_ops_map(struct 
dma_buf_attachment *attachme
                kfree(st);
        }
 
+       kfree(page_info->pages);
+       kfree(page_info);
+
        return NULL;
 }
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
index c2d013a..dd17d26 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
@@ -89,8 +89,10 @@ struct hyper_dmabuf_pages_info *hyper_dmabuf_ext_pgs(struct 
sg_table *sgt)
                return NULL;
 
        pinfo->pages = kmalloc(sizeof(struct page 
*)*hyper_dmabuf_get_num_pgs(sgt), GFP_KERNEL);
-       if (!pinfo->pages)
+       if (!pinfo->pages) {
+               kfree(pinfo);
                return NULL;
+       }
 
        sgl = sgt->sgl;
 
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
index 43dd3b6..9689346 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
@@ -229,9 +229,16 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
 
        ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL);
 
+       if (!ring_info) {
+               dev_err(hyper_dmabuf_private.device,
+                       "No more spae left\n");
+               return -ENOMEM;
+       }
+
        /* from exporter to importer */
        shared_ring = (void *)__get_free_pages(GFP_KERNEL, 1);
        if (shared_ring == 0) {
+               kfree(ring_info);
                return -ENOMEM;
        }
 
@@ -246,6 +253,7 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
                                                           0);
        if (ring_info->gref_ring < 0) {
                /* fail to get gref */
+               kfree(ring_info);
                return -EFAULT;
        }
 
@@ -256,6 +264,7 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
        if (ret) {
                dev_err(hyper_dmabuf_private.device,
                        "Cannot allocate event channel\n");
+               kfree(ring_info);
                return -EIO;
        }
 
@@ -271,6 +280,7 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
                HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
                gnttab_end_foreign_access(ring_info->gref_ring, 0,
                                        virt_to_mfn(shared_ring));
+               kfree(ring_info);
                return -EIO;
        }
 
@@ -299,6 +309,14 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
         */
        ring_info->watch.callback = remote_dom_exporter_watch_cb;
        ring_info->watch.node = (const char*) kmalloc(sizeof(char) * 255, 
GFP_KERNEL);
+
+       if (!ring_info->watch.node) {
+               dev_err(hyper_dmabuf_private.device,
+                       "No more space left\n");
+               kfree(ring_info);
+               return -ENOMEM;
+       }
+
        sprintf((char*)ring_info->watch.node,
                "/local/domain/%d/data/hyper_dmabuf/%d/port",
                domid, hyper_dmabuf_xen_get_domid());
@@ -392,8 +410,16 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
 
        map_ops = kmalloc(sizeof(*map_ops), GFP_KERNEL);
 
+       if (!map_ops) {
+               dev_err(hyper_dmabuf_private.device,
+                       "No memory left to be allocated\n");
+               ret = -ENOMEM;
+               goto fail_no_map_ops;
+       }
+
        if (gnttab_alloc_pages(1, &shared_ring)) {
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto fail_others;
        }
 
        gnttab_set_map_op(&map_ops[0], (unsigned 
long)pfn_to_kaddr(page_to_pfn(shared_ring)),
@@ -405,12 +431,14 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
        ret = gnttab_map_refs(map_ops, NULL, &shared_ring, 1);
        if (ret < 0) {
                dev_err(hyper_dmabuf_private.device, "Cannot map ring\n");
-               return -EFAULT;
+               ret = -EFAULT;
+               goto fail_others;
        }
 
        if (map_ops[0].status) {
                dev_err(hyper_dmabuf_private.device, "Ring mapping failed\n");
-               return -EFAULT;
+               ret = -EFAULT;
+               goto fail_others;
        } else {
                ring_info->unmap_op.handle = map_ops[0].handle;
        }
@@ -424,7 +452,8 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
        ret = bind_interdomain_evtchn_to_irq(domid, rx_port);
 
        if (ret < 0) {
-               return -EIO;
+               ret = -EIO;
+               goto fail_others;
        }
 
        ring_info->irq = ret;
@@ -445,6 +474,12 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
                          back_ring_isr, 0,
                          NULL, (void*)ring_info);
 
+fail_others:
+       kfree(map_ops);
+
+fail_no_map_ops:
+       kfree(ring_info);
+
        return ret;
 }
 
@@ -520,15 +555,22 @@ int hyper_dmabuf_xen_send_req(int domid, struct 
hyper_dmabuf_req *req, int wait)
                return -ENOENT;
        }
 
-
        mutex_lock(&ring_info->lock);
 
        ring = &ring_info->ring_front;
 
        while (RING_FULL(ring)) {
+               if (timeout == 0) {
+                       dev_err(hyper_dmabuf_private.device,
+                               "Timeout while waiting for an entry in the 
ring\n");
+                       return -EIO;
+               }
                usleep_range(100, 120);
+               timeout--;
        }
 
+       timeout = 1000;
+
        new_req = RING_GET_REQUEST(ring, ring->req_prod_pvt);
        if (!new_req) {
                mutex_unlock(&ring_info->lock);
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.