[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH 50/60] hyper_dmabuf: fix styling err and warns caught by checkpatch.pl



Fixing all styling problems caught by checkpatch.pl

Signed-off-by: Dongwon Kim <dongwon.kim@xxxxxxxxx>
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c        |  53 ++--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h        |   6 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c      |  12 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c         |  24 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.h         |   4 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c      | 308 +++++++++++----------
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h      |   5 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c        | 132 ++++-----
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h        |   4 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c        |  58 ++--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c      | 236 ++++++++--------
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c    |  81 +++---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c   |  15 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h   |   2 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h     |  78 ++++--
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   | 154 +++++------
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h   |  21 +-
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c  |  21 +-
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h  |  16 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h    |  19 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c    | 128 +++++----
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h    |  15 +-
 include/uapi/xen/hyper_dmabuf.h                    |  26 +-
 23 files changed, 739 insertions(+), 679 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 525ee78..023d7f4 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -44,7 +44,6 @@
 
 #ifdef CONFIG_HYPER_DMABUF_XEN
 #include "xen/hyper_dmabuf_xen_drv.h"
-extern struct hyper_dmabuf_backend_ops xen_backend_ops;
 #endif
 
 MODULE_LICENSE("GPL and additional rights");
@@ -52,14 +51,11 @@ MODULE_AUTHOR("Intel Corporation");
 
 struct hyper_dmabuf_private *hy_drv_priv;
 
-long hyper_dmabuf_ioctl(struct file *filp,
-                       unsigned int cmd, unsigned long param);
-
-static void hyper_dmabuf_force_free(struct exported_sgt_info* exported,
-                                   void *attr)
+static void hyper_dmabuf_force_free(struct exported_sgt_info *exported,
+                                   void *attr)
 {
        struct ioctl_hyper_dmabuf_unexport unexport_attr;
-       struct file *filp = (struct file*) attr;
+       struct file *filp = (struct file *)attr;
 
        if (!filp || !exported)
                return;
@@ -97,7 +93,8 @@ int hyper_dmabuf_release(struct inode *inode, struct file 
*filp)
 
 #ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
 
-unsigned int hyper_dmabuf_event_poll(struct file *filp, struct 
poll_table_struct *wait)
+unsigned int hyper_dmabuf_event_poll(struct file *filp,
+                                    struct poll_table_struct *wait)
 {
        unsigned int mask = 0;
 
@@ -153,15 +150,17 @@ ssize_t hyper_dmabuf_event_read(struct file *filp, char 
__user *buffer,
 
                        mutex_unlock(&hy_drv_priv->event_read_lock);
                        ret = wait_event_interruptible(hy_drv_priv->event_wait,
-                                                      
!list_empty(&hy_drv_priv->event_list));
+                                 !list_empty(&hy_drv_priv->event_list));
 
                        if (ret == 0)
-                               ret = 
mutex_lock_interruptible(&hy_drv_priv->event_read_lock);
+                               ret = mutex_lock_interruptible(
+                                       &hy_drv_priv->event_read_lock);
 
                        if (ret)
                                return ret;
                } else {
-                       unsigned length = (sizeof(struct 
hyper_dmabuf_event_hdr) + e->event_data.hdr.size);
+                       unsigned int length = (sizeof(e->event_data.hdr) +
+                                                     e->event_data.hdr.size);
 
                        if (length > count - ret) {
 put_back_event:
@@ -172,20 +171,22 @@ ssize_t hyper_dmabuf_event_read(struct file *filp, char 
__user *buffer,
                        }
 
                        if (copy_to_user(buffer + ret, &e->event_data.hdr,
-                                        sizeof(struct 
hyper_dmabuf_event_hdr))) {
+                                        sizeof(e->event_data.hdr))) {
                                if (ret == 0)
                                        ret = -EFAULT;
 
                                goto put_back_event;
                        }
 
-                       ret += sizeof(struct hyper_dmabuf_event_hdr);
+                       ret += sizeof(e->event_data.hdr);
 
-                       if (copy_to_user(buffer + ret, e->event_data.data, 
e->event_data.hdr.size)) {
+                       if (copy_to_user(buffer + ret, e->event_data.data,
+                                        e->event_data.hdr.size)) {
                                /* error while copying void *data */
 
                                struct hyper_dmabuf_event_hdr dummy_hdr = {0};
-                               ret -= sizeof(struct hyper_dmabuf_event_hdr);
+
+                               ret -= sizeof(e->event_data.hdr);
 
                                /* nullifying hdr of the event in user buffer */
                                if (copy_to_user(buffer + ret, &dummy_hdr,
@@ -212,8 +213,7 @@ ssize_t hyper_dmabuf_event_read(struct file *filp, char 
__user *buffer,
 
 #endif
 
-static struct file_operations hyper_dmabuf_driver_fops =
-{
+static const struct file_operations hyper_dmabuf_driver_fops = {
        .owner = THIS_MODULE,
        .open = hyper_dmabuf_open,
        .release = hyper_dmabuf_release,
@@ -246,7 +246,7 @@ int register_device(void)
 
        hy_drv_priv->dev = hyper_dmabuf_miscdev.this_device;
 
-       /* TODO: Check if there is a different way to initialize dma mask 
nicely */
+       /* TODO: Check if there is a different way to initialize dma mask */
        dma_coerce_mask_and_coherent(hy_drv_priv->dev, DMA_BIT_MASK(64));
 
        return ret;
@@ -264,32 +264,30 @@ static int __init hyper_dmabuf_drv_init(void)
 {
        int ret = 0;
 
-       printk( KERN_NOTICE "hyper_dmabuf_starting: Initialization started\n");
+       printk(KERN_NOTICE "hyper_dmabuf_starting: Initialization started\n");
 
        hy_drv_priv = kcalloc(1, sizeof(struct hyper_dmabuf_private),
                              GFP_KERNEL);
 
        if (!hy_drv_priv) {
-               printk( KERN_ERR "hyper_dmabuf: Failed to create drv\n");
+               printk(KERN_ERR "hyper_dmabuf: Failed to create drv\n");
                return -1;
        }
 
        ret = register_device();
-       if (ret < 0) {
+       if (ret < 0)
                return ret;
-       }
 
 /* currently only supports XEN hypervisor */
-
 #ifdef CONFIG_HYPER_DMABUF_XEN
        hy_drv_priv->backend_ops = &xen_backend_ops;
 #else
        hy_drv_priv->backend_ops = NULL;
-       printk( KERN_ERR "hyper_dmabuf drv currently supports XEN only.\n");
+       printk(KERN_ERR "hyper_dmabuf drv currently supports XEN only.\n");
 #endif
 
        if (hy_drv_priv->backend_ops == NULL) {
-               printk( KERN_ERR "Hyper_dmabuf: failed to be loaded - no 
backend found\n");
+               printk(KERN_ERR "Hyper_dmabuf: no backend found\n");
                return -1;
        }
 
@@ -385,10 +383,7 @@ static void hyper_dmabuf_drv_exit(void)
        dev_info(hy_drv_priv->dev,
                 "hyper_dmabuf driver: Exiting\n");
 
-       if (hy_drv_priv) {
-               kfree(hy_drv_priv);
-               hy_drv_priv = NULL;
-       }
+       kfree(hy_drv_priv);
 
        unregister_device();
 }
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
index 2ead41b..049c694 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
@@ -36,7 +36,7 @@ struct hyper_dmabuf_event {
 };
 
 struct hyper_dmabuf_private {
-        struct device *dev;
+       struct device *dev;
 
        /* VM(domain) id of current VM instance */
        int domid;
@@ -57,8 +57,8 @@ struct hyper_dmabuf_private {
        /* flag that shows whether backend is initialized */
        bool initialized;
 
-        wait_queue_head_t event_wait;
-        struct list_head event_list;
+       wait_queue_head_t event_wait;
+       struct list_head event_list;
 
        spinlock_t event_lock;
        struct mutex event_read_lock;
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c
index 0498cda..a4945af 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c
@@ -44,7 +44,8 @@ static void hyper_dmabuf_send_event_locked(struct 
hyper_dmabuf_event *e)
        assert_spin_locked(&hy_drv_priv->event_lock);
 
        /* check current number of event then if it hits the max num allowed
-        * then remove the oldest event in the list */
+        * then remove the oldest event in the list
+        */
        if (hy_drv_priv->pending > MAX_DEPTH_EVENT_QUEUE - 1) {
                oldest = list_first_entry(&hy_drv_priv->event_list,
                                struct hyper_dmabuf_event, link);
@@ -61,7 +62,7 @@ static void hyper_dmabuf_send_event_locked(struct 
hyper_dmabuf_event *e)
        wake_up_interruptible(&hy_drv_priv->event_wait);
 }
 
-void hyper_dmabuf_events_release()
+void hyper_dmabuf_events_release(void)
 {
        struct hyper_dmabuf_event *e, *et;
        unsigned long irqflags;
@@ -100,15 +101,12 @@ int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid)
 
        e = kzalloc(sizeof(*e), GFP_KERNEL);
 
-       if (!e) {
-               dev_err(hy_drv_priv->dev,
-                       "no space left\n");
+       if (!e)
                return -ENOMEM;
-       }
 
        e->event_data.hdr.event_type = HYPER_DMABUF_NEW_IMPORT;
        e->event_data.hdr.hid = hid;
-       e->event_data.data = (void*)imported->priv;
+       e->event_data.data = (void *)imported->priv;
        e->event_data.hdr.size = imported->sz_priv;
 
        spin_lock_irqsave(&hy_drv_priv->event_lock, irqflags);
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
index e2466c7..312dea5 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
@@ -40,11 +40,8 @@ void store_reusable_hid(hyper_dmabuf_id_t hid)
 
        new_reusable = kmalloc(sizeof(*new_reusable), GFP_KERNEL);
 
-       if (!new_reusable) {
-               dev_err(hy_drv_priv->dev,
-                       "No memory left to be allocated\n");
+       if (!new_reusable)
                return;
-       }
 
        new_reusable->hid = hid;
 
@@ -54,7 +51,7 @@ void store_reusable_hid(hyper_dmabuf_id_t hid)
 static hyper_dmabuf_id_t retrieve_reusable_hid(void)
 {
        struct list_reusable_id *reusable_head = hy_drv_priv->id_queue;
-       hyper_dmabuf_id_t hid = {-1, {0,0,0}};
+       hyper_dmabuf_id_t hid = {-1, {0, 0, 0} };
 
        /* check there is reusable id */
        if (!list_empty(&reusable_head->list)) {
@@ -92,7 +89,7 @@ void destroy_reusable_list(void)
 
 hyper_dmabuf_id_t hyper_dmabuf_get_hid(void)
 {
-       static int count = 0;
+       static int count;
        hyper_dmabuf_id_t hid;
        struct list_reusable_id *reusable_head;
 
@@ -100,13 +97,11 @@ hyper_dmabuf_id_t hyper_dmabuf_get_hid(void)
        if (count == 0) {
                reusable_head = kmalloc(sizeof(*reusable_head), GFP_KERNEL);
 
-               if (!reusable_head) {
-                       dev_err(hy_drv_priv->dev,
-                               "No memory left to be allocated\n");
-                       return (hyper_dmabuf_id_t){-1, {0,0,0}};
-               }
+               if (!reusable_head)
+                       return (hyper_dmabuf_id_t){-1, {0, 0, 0} };
 
-               reusable_head->hid.id = -1; /* list head has an invalid count */
+               /* list head has an invalid count */
+               reusable_head->hid.id = -1;
                INIT_LIST_HEAD(&reusable_head->list);
                hy_drv_priv->id_queue = reusable_head;
        }
@@ -116,9 +111,8 @@ hyper_dmabuf_id_t hyper_dmabuf_get_hid(void)
        /*creating a new H-ID only if nothing in the reusable id queue
         * and count is less than maximum allowed
         */
-       if (hid.id == -1 && count < HYPER_DMABUF_ID_MAX) {
+       if (hid.id == -1 && count < HYPER_DMABUF_ID_MAX)
                hid.id = HYPER_DMABUF_ID_CREATE(hy_drv_priv->domid, count++);
-       }
 
        /* random data embedded in the id for security */
        get_random_bytes(&hid.rng_key[0], 12);
@@ -131,7 +125,7 @@ bool hyper_dmabuf_hid_keycomp(hyper_dmabuf_id_t hid1, 
hyper_dmabuf_id_t hid2)
        int i;
 
        /* compare keys */
-       for (i=0; i<3; i++) {
+       for (i = 0; i < 3; i++) {
                if (hid1.rng_key[i] != hid2.rng_key[i])
                        return false;
        }
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.h
index a3336d9..61c4fb3 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.h
@@ -26,10 +26,10 @@
 #define __HYPER_DMABUF_ID_H__
 
 #define HYPER_DMABUF_ID_CREATE(domid, cnt) \
-        ((((domid) & 0xFF) << 24) | ((cnt) & 0xFFFFFF))
+       ((((domid) & 0xFF) << 24) | ((cnt) & 0xFFFFFF))
 
 #define HYPER_DMABUF_DOM_ID(hid) \
-        (((hid.id) >> 24) & 0xFF)
+       (((hid.id) >> 24) & 0xFF)
 
 /* currently maximum number of buffers shared
  * at any given moment is limited to 1000
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index b328df7..f9040ed 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -91,7 +91,7 @@ static int hyper_dmabuf_send_export_msg(struct 
exported_sgt_info *exported,
        /* now create request for importer via ring */
        op[0] = exported->hid.id;
 
-       for (i=0; i<3; i++)
+       for (i = 0; i < 3; i++)
                op[i+1] = exported->hid.rng_key[i];
 
        if (pg_info) {
@@ -113,10 +113,8 @@ static int hyper_dmabuf_send_export_msg(struct 
exported_sgt_info *exported,
 
        req = kcalloc(1, sizeof(*req), GFP_KERNEL);
 
-       if(!req) {
-               dev_err(hy_drv_priv->dev, "no more space left\n");
+       if (!req)
                return -1;
-       }
 
        /* composing a message to the importer */
        hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT, &op[0]);
@@ -161,69 +159,71 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
                                             export_remote_attr->remote_domain);
        if (hid.id != -1) {
                exported = hyper_dmabuf_find_exported(hid);
-               if (exported != NULL) {
-                       if (exported->valid) {
-                               /*
-                                * Check if unexport is already scheduled for 
that buffer,
-                                * if so try to cancel it. If that will fail, 
buffer needs
-                                * to be reexport once again.
-                                */
-                               if (exported->unexport_sched) {
-                                       if 
(!cancel_delayed_work_sync(&exported->unexport)) {
-                                               dma_buf_put(dma_buf);
-                                               goto reexport;
-                                       }
-                                       exported->unexport_sched = false;
-                               }
-
-                               /* if there's any change in size of private 
data.
-                                * we reallocate space for private data with 
new size */
-                               if (export_remote_attr->sz_priv != 
exported->sz_priv) {
-                                       kfree(exported->priv);
-
-                                       /* truncating size */
-                                       if (export_remote_attr->sz_priv > 
MAX_SIZE_PRIV_DATA) {
-                                               exported->sz_priv = 
MAX_SIZE_PRIV_DATA;
-                                       } else {
-                                               exported->sz_priv = 
export_remote_attr->sz_priv;
-                                       }
-
-                                       exported->priv = kcalloc(1, 
exported->sz_priv, GFP_KERNEL);
-
-                                       if(!exported->priv) {
-                                               dev_err(hy_drv_priv->dev,
-                                                       "no more space left for 
priv\n");
-                                               
hyper_dmabuf_remove_exported(exported->hid);
-                                               
hyper_dmabuf_cleanup_sgt_info(exported, true);
-                                               kfree(exported);
-                                               dma_buf_put(dma_buf);
-                                               return -ENOMEM;
-                                       }
-                               }
-
-                               /* update private data in sgt_info with new 
ones */
-                               ret = copy_from_user(exported->priv, 
export_remote_attr->priv,
-                                                    exported->sz_priv);
-                               if (ret) {
-                                       dev_err(hy_drv_priv->dev,
-                                               "Failed to load a new private 
data\n");
-                                       ret = -EINVAL;
-                               } else {
-                                       /* send an export msg for updating priv 
in importer */
-                                       ret = 
hyper_dmabuf_send_export_msg(exported, NULL);
-
-                                       if (ret < 0) {
-                                               dev_err(hy_drv_priv->dev,
-                                                       "Failed to send a new 
private data\n");
-                                               ret = -EBUSY;
-                                       }
-                               }
 
+               if (!exported)
+                       goto reexport;
+
+               if (exported->valid == false)
+                       goto reexport;
+
+               /*
+                * Check if unexport is already scheduled for that buffer,
+                * if so try to cancel it. If that will fail, buffer needs
+                * to be reexport once again.
+                */
+               if (exported->unexport_sched) {
+                       if (!cancel_delayed_work_sync(&exported->unexport)) {
                                dma_buf_put(dma_buf);
-                               export_remote_attr->hid = hid;
-                               return ret;
+                               goto reexport;
                        }
+                       exported->unexport_sched = false;
                }
+
+               /* if there's any change in size of private data.
+                * we reallocate space for private data with new size
+                */
+               if (export_remote_attr->sz_priv != exported->sz_priv) {
+                       kfree(exported->priv);
+
+                       /* truncating size */
+                       if (export_remote_attr->sz_priv > MAX_SIZE_PRIV_DATA)
+                               exported->sz_priv = MAX_SIZE_PRIV_DATA;
+                       else
+                               exported->sz_priv = export_remote_attr->sz_priv;
+
+                       exported->priv = kcalloc(1, exported->sz_priv,
+                                                GFP_KERNEL);
+
+                       if (!exported->priv) {
+                               hyper_dmabuf_remove_exported(exported->hid);
+                               hyper_dmabuf_cleanup_sgt_info(exported, true);
+                               kfree(exported);
+                               dma_buf_put(dma_buf);
+                               return -ENOMEM;
+                       }
+               }
+
+               /* update private data in sgt_info with new ones */
+               ret = copy_from_user(exported->priv, export_remote_attr->priv,
+                                    exported->sz_priv);
+               if (ret) {
+                       dev_err(hy_drv_priv->dev,
+                               "Failed to load a new private data\n");
+                       ret = -EINVAL;
+               } else {
+                       /* send an export msg for updating priv in importer */
+                       ret = hyper_dmabuf_send_export_msg(exported, NULL);
+
+                       if (ret < 0) {
+                               dev_err(hy_drv_priv->dev,
+                                       "Failed to send a new private data\n");
+                               ret = -EBUSY;
+                       }
+               }
+
+               dma_buf_put(dma_buf);
+               export_remote_attr->hid = hid;
+               return ret;
        }
 
 reexport:
@@ -244,25 +244,22 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
 
        exported = kcalloc(1, sizeof(*exported), GFP_KERNEL);
 
-       if(!exported) {
-               dev_err(hy_drv_priv->dev, "no more space left\n");
+       if (!exported) {
                ret = -ENOMEM;
                goto fail_sgt_info_creation;
        }
 
        /* possible truncation */
-       if (export_remote_attr->sz_priv > MAX_SIZE_PRIV_DATA) {
+       if (export_remote_attr->sz_priv > MAX_SIZE_PRIV_DATA)
                exported->sz_priv = MAX_SIZE_PRIV_DATA;
-       } else {
+       else
                exported->sz_priv = export_remote_attr->sz_priv;
-       }
 
        /* creating buffer for private data of buffer */
-       if(exported->sz_priv != 0) {
+       if (exported->sz_priv != 0) {
                exported->priv = kcalloc(1, exported->sz_priv, GFP_KERNEL);
 
-               if(!exported->priv) {
-                       dev_err(hy_drv_priv->dev, "no more space left\n");
+               if (!exported->priv) {
                        ret = -ENOMEM;
                        goto fail_priv_creation;
                }
@@ -273,7 +270,7 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
        exported->hid = hyper_dmabuf_get_hid();
 
        /* no more exported dmabuf allowed */
-       if(exported->hid.id == -1) {
+       if (exported->hid.id == -1) {
                dev_err(hy_drv_priv->dev,
                        "exceeds allowed number of dmabuf to be exported\n");
                ret = -ENOMEM;
@@ -286,28 +283,27 @@ static int hyper_dmabuf_export_remote_ioctl(struct file 
*filp, void *data)
 
        exported->active_sgts = kmalloc(sizeof(struct sgt_list), GFP_KERNEL);
        if (!exported->active_sgts) {
-               dev_err(hy_drv_priv->dev, "no more space left\n");
                ret = -ENOMEM;
                goto fail_map_active_sgts;
        }
 
-       exported->active_attached = kmalloc(sizeof(struct attachment_list), 
GFP_KERNEL);
+       exported->active_attached = kmalloc(sizeof(struct attachment_list),
+                                           GFP_KERNEL);
        if (!exported->active_attached) {
-               dev_err(hy_drv_priv->dev, "no more space left\n");
                ret = -ENOMEM;
                goto fail_map_active_attached;
        }
 
-       exported->va_kmapped = kmalloc(sizeof(struct kmap_vaddr_list), 
GFP_KERNEL);
+       exported->va_kmapped = kmalloc(sizeof(struct kmap_vaddr_list),
+                                      GFP_KERNEL);
        if (!exported->va_kmapped) {
-               dev_err(hy_drv_priv->dev, "no more space left\n");
                ret = -ENOMEM;
                goto fail_map_va_kmapped;
        }
 
-       exported->va_vmapped = kmalloc(sizeof(struct vmap_vaddr_list), 
GFP_KERNEL);
+       exported->va_vmapped = kmalloc(sizeof(struct vmap_vaddr_list),
+                                      GFP_KERNEL);
        if (!exported->va_vmapped) {
-               dev_err(hy_drv_priv->dev, "no more space left\n");
                ret = -ENOMEM;
                goto fail_map_va_vmapped;
        }
@@ -436,31 +432,32 @@ static int hyper_dmabuf_export_fd_ioctl(struct file 
*filp, void *data)
        /* send notification for export_fd to exporter */
        op[0] = imported->hid.id;
 
-       for (i=0; i<3; i++)
+       for (i = 0; i < 3; i++)
                op[i+1] = imported->hid.rng_key[i];
 
-       dev_dbg(hy_drv_priv->dev, "Exporting fd of buffer {id:%d key:%d %d 
%d}\n",
-               imported->hid.id, imported->hid.rng_key[0], 
imported->hid.rng_key[1],
-               imported->hid.rng_key[2]);
+       dev_dbg(hy_drv_priv->dev, "Export FD of buffer {id:%d key:%d %d %d}\n",
+               imported->hid.id, imported->hid.rng_key[0],
+               imported->hid.rng_key[1], imported->hid.rng_key[2]);
 
        req = kcalloc(1, sizeof(*req), GFP_KERNEL);
 
-       if (!req) {
-               dev_err(hy_drv_priv->dev,
-                       "No memory left to be allocated\n");
+       if (!req)
                return -ENOMEM;
-       }
 
        hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD, &op[0]);
 
        ret = ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, true);
 
        if (ret < 0) {
-               /* in case of timeout other end eventually will receive 
request, so we need to undo it */
-               hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD_FAILED, 
&op[0]);
+               /* in case of timeout other end eventually will receive request,
+                * so we need to undo it
+                */
+               hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD_FAILED,
+                                       &op[0]);
                ops->send_req(op[0], req, false);
                kfree(req);
-               dev_err(hy_drv_priv->dev, "Failed to create sgt or notify 
exporter\n");
+               dev_err(hy_drv_priv->dev,
+                       "Failed to create sgt or notify exporter\n");
                imported->importers--;
                mutex_unlock(&hy_drv_priv->lock);
                return ret;
@@ -471,64 +468,69 @@ static int hyper_dmabuf_export_fd_ioctl(struct file 
*filp, void *data)
        if (ret == HYPER_DMABUF_REQ_ERROR) {
                dev_err(hy_drv_priv->dev,
                        "Buffer invalid {id:%d key:%d %d %d}, cannot import\n",
-                       imported->hid.id, imported->hid.rng_key[0], 
imported->hid.rng_key[1],
-                       imported->hid.rng_key[2]);
+                       imported->hid.id, imported->hid.rng_key[0],
+                       imported->hid.rng_key[1], imported->hid.rng_key[2]);
 
                imported->importers--;
                mutex_unlock(&hy_drv_priv->lock);
                return -EINVAL;
-       } else {
-               dev_dbg(hy_drv_priv->dev, "Can import buffer {id:%d key:%d %d 
%d}\n",
-                       imported->hid.id, imported->hid.rng_key[0], 
imported->hid.rng_key[1],
-                       imported->hid.rng_key[2]);
-
-               ret = 0;
        }
 
+       ret = 0;
+
+       dev_dbg(hy_drv_priv->dev,
+               "Found buffer gref %d off %d\n",
+               imported->ref_handle, imported->frst_ofst);
+
        dev_dbg(hy_drv_priv->dev,
-                 "%s Found buffer gref %d  off %d last len %d nents %d domain 
%d\n",
-                 __func__, imported->ref_handle, imported->frst_ofst,
-                 imported->last_len, imported->nents, 
HYPER_DMABUF_DOM_ID(imported->hid));
+               "last len %d nents %d domain %d\n",
+               imported->last_len, imported->nents,
+               HYPER_DMABUF_DOM_ID(imported->hid));
 
        if (!imported->sgt) {
                dev_dbg(hy_drv_priv->dev,
-                       "%s buffer {id:%d key:%d %d %d} pages not mapped 
yet\n", __func__,
-                       imported->hid.id, imported->hid.rng_key[0], 
imported->hid.rng_key[1],
-                       imported->hid.rng_key[2]);
+                       "buffer {id:%d key:%d %d %d} pages not mapped yet\n",
+                       imported->hid.id, imported->hid.rng_key[0],
+                       imported->hid.rng_key[1], imported->hid.rng_key[2]);
 
                data_pgs = ops->map_shared_pages(imported->ref_handle,
-                                                  
HYPER_DMABUF_DOM_ID(imported->hid),
-                                                  imported->nents,
-                                                  &imported->refs_info);
+                                       HYPER_DMABUF_DOM_ID(imported->hid),
+                                       imported->nents,
+                                       &imported->refs_info);
 
                if (!data_pgs) {
                        dev_err(hy_drv_priv->dev,
-                               "Cannot map pages of buffer {id:%d key:%d %d 
%d}\n",
-                               imported->hid.id, imported->hid.rng_key[0], 
imported->hid.rng_key[1],
+                               "can't map pages hid {id:%d key:%d %d %d}\n",
+                               imported->hid.id, imported->hid.rng_key[0],
+                               imported->hid.rng_key[1],
                                imported->hid.rng_key[2]);
 
                        imported->importers--;
+
                        req = kcalloc(1, sizeof(*req), GFP_KERNEL);
 
-                       if (!req) {
-                               dev_err(hy_drv_priv->dev,
-                                       "No more space left\n");
+                       if (!req)
                                return -ENOMEM;
-                       }
 
-                       hyper_dmabuf_create_req(req, 
HYPER_DMABUF_EXPORT_FD_FAILED, &op[0]);
-                       ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, 
false);
+                       hyper_dmabuf_create_req(req,
+                                               HYPER_DMABUF_EXPORT_FD_FAILED,
+                                               &op[0]);
+                       ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req,
+                                                         false);
                        kfree(req);
                        mutex_unlock(&hy_drv_priv->lock);
                        return -EINVAL;
                }
 
-               imported->sgt = hyper_dmabuf_create_sgt(data_pgs, 
imported->frst_ofst,
-                                                       imported->last_len, 
imported->nents);
+               imported->sgt = hyper_dmabuf_create_sgt(data_pgs,
+                                                       imported->frst_ofst,
+                                                       imported->last_len,
+                                                       imported->nents);
 
        }
 
-       export_fd_attr->fd = hyper_dmabuf_export_fd(imported, 
export_fd_attr->flags);
+       export_fd_attr->fd = hyper_dmabuf_export_fd(imported,
+                                                   export_fd_attr->flags);
 
        if (export_fd_attr->fd < 0) {
                /* fail to get fd */
@@ -566,21 +568,19 @@ static void hyper_dmabuf_delayed_unexport(struct 
work_struct *work)
 
        req = kcalloc(1, sizeof(*req), GFP_KERNEL);
 
-       if (!req) {
-               dev_err(hy_drv_priv->dev,
-                       "No memory left to be allocated\n");
+       if (!req)
                return;
-       }
 
        op[0] = exported->hid.id;
 
-       for (i=0; i<3; i++)
+       for (i = 0; i < 3; i++)
                op[i+1] = exported->hid.rng_key[i];
 
        hyper_dmabuf_create_req(req, HYPER_DMABUF_NOTIFY_UNEXPORT, &op[0]);
 
        /* Now send unexport request to remote domain, marking
-        * that buffer should not be used anymore */
+        * that buffer should not be used anymore
+        */
        ret = ops->send_req(exported->rdomid, req, true);
        if (ret < 0) {
                dev_err(hy_drv_priv->dev,
@@ -589,12 +589,10 @@ static void hyper_dmabuf_delayed_unexport(struct 
work_struct *work)
                        exported->hid.rng_key[1], exported->hid.rng_key[2]);
        }
 
-       /* free msg */
        kfree(req);
        exported->unexport_sched = false;
 
-       /*
-        * Immediately clean-up if it has never been exported by importer
+       /* Immediately clean-up if it has never been exported by importer
         * (so no SGT is constructed on importer).
         * clean it up later in remote sync when final release ops
         * is called (importer does this only when there's no
@@ -669,25 +667,31 @@ static int hyper_dmabuf_query_ioctl(struct file *filp, 
void *data)
                exported = hyper_dmabuf_find_exported(query_attr->hid);
                if (exported) {
                        ret = hyper_dmabuf_query_exported(exported,
-                                                         query_attr->item, 
&query_attr->info);
+                                                         query_attr->item,
+                                                         &query_attr->info);
                } else {
                        dev_err(hy_drv_priv->dev,
-                               "DMA BUF {id:%d key:%d %d %d} not in the export 
list\n",
-                               query_attr->hid.id, query_attr->hid.rng_key[0],
-                               query_attr->hid.rng_key[1], 
query_attr->hid.rng_key[2]);
+                               "hid {id:%d key:%d %d %d} not in exp list\n",
+                               query_attr->hid.id,
+                               query_attr->hid.rng_key[0],
+                               query_attr->hid.rng_key[1],
+                               query_attr->hid.rng_key[2]);
                        return -ENOENT;
                }
        } else {
                /* query for imported dmabuf */
                imported = hyper_dmabuf_find_imported(query_attr->hid);
                if (imported) {
-                       ret = hyper_dmabuf_query_imported(imported, 
query_attr->item,
+                       ret = hyper_dmabuf_query_imported(imported,
+                                                         query_attr->item,
                                                          &query_attr->info);
                } else {
                        dev_err(hy_drv_priv->dev,
-                               "DMA BUF {id:%d key:%d %d %d} not in the 
imported list\n",
-                               query_attr->hid.id, query_attr->hid.rng_key[0],
-                               query_attr->hid.rng_key[1], 
query_attr->hid.rng_key[2]);
+                               "hid {id:%d key:%d %d %d} not in imp list\n",
+                               query_attr->hid.id,
+                               query_attr->hid.rng_key[0],
+                               query_attr->hid.rng_key[1],
+                               query_attr->hid.rng_key[2]);
                        return -ENOENT;
                }
        }
@@ -696,12 +700,18 @@ static int hyper_dmabuf_query_ioctl(struct file *filp, 
void *data)
 }
 
 const struct hyper_dmabuf_ioctl_desc hyper_dmabuf_ioctls[] = {
-       HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_TX_CH_SETUP, 
hyper_dmabuf_tx_ch_setup_ioctl, 0),
-       HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_RX_CH_SETUP, 
hyper_dmabuf_rx_ch_setup_ioctl, 0),
-       HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_REMOTE, 
hyper_dmabuf_export_remote_ioctl, 0),
-       HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_FD, 
hyper_dmabuf_export_fd_ioctl, 0),
-       HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_UNEXPORT, 
hyper_dmabuf_unexport_ioctl, 0),
-       HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_QUERY, 
hyper_dmabuf_query_ioctl, 0),
+       HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_TX_CH_SETUP,
+                              hyper_dmabuf_tx_ch_setup_ioctl, 0),
+       HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_RX_CH_SETUP,
+                              hyper_dmabuf_rx_ch_setup_ioctl, 0),
+       HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_REMOTE,
+                              hyper_dmabuf_export_remote_ioctl, 0),
+       HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_FD,
+                              hyper_dmabuf_export_fd_ioctl, 0),
+       HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_UNEXPORT,
+                              hyper_dmabuf_unexport_ioctl, 0),
+       HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_QUERY,
+                              hyper_dmabuf_query_ioctl, 0),
 };
 
 long hyper_dmabuf_ioctl(struct file *filp,
@@ -728,21 +738,23 @@ long hyper_dmabuf_ioctl(struct file *filp,
        }
 
        kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
-       if (!kdata) {
-               dev_err(hy_drv_priv->dev, "no memory\n");
+       if (!kdata)
                return -ENOMEM;
-       }
 
-       if (copy_from_user(kdata, (void __user *)param, _IOC_SIZE(cmd)) != 0) {
-               dev_err(hy_drv_priv->dev, "failed to copy from user 
arguments\n");
+       if (copy_from_user(kdata, (void __user *)param,
+                          _IOC_SIZE(cmd)) != 0) {
+               dev_err(hy_drv_priv->dev,
+                       "failed to copy from user arguments\n");
                ret = -EFAULT;
                goto ioctl_error;
        }
 
        ret = func(filp, kdata);
 
-       if (copy_to_user((void __user *)param, kdata, _IOC_SIZE(cmd)) != 0) {
-               dev_err(hy_drv_priv->dev, "failed to copy to user arguments\n");
+       if (copy_to_user((void __user *)param, kdata,
+                        _IOC_SIZE(cmd)) != 0) {
+               dev_err(hy_drv_priv->dev,
+                       "failed to copy to user arguments\n");
                ret = -EFAULT;
                goto ioctl_error;
        }
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h
index 3e9470a..5991a87 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h
@@ -34,7 +34,7 @@ struct hyper_dmabuf_ioctl_desc {
        const char *name;
 };
 
-#define HYPER_DMABUF_IOCTL_DEF(ioctl, _func, _flags)   \
+#define HYPER_DMABUF_IOCTL_DEF(ioctl, _func, _flags)   \
        [_IOC_NR(ioctl)] = {                            \
                        .cmd = ioctl,                   \
                        .func = _func,                  \
@@ -42,6 +42,9 @@ struct hyper_dmabuf_ioctl_desc {
                        .name = #ioctl                  \
        }
 
+long hyper_dmabuf_ioctl(struct file *filp,
+                       unsigned int cmd, unsigned long param);
+
 int hyper_dmabuf_unexport_ioctl(struct file *filp, void *data);
 
 #endif //__HYPER_DMABUF_IOCTL_H__
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
index 907f76e..fbbcc39 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
@@ -52,18 +52,19 @@ void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req,
        req->stat = HYPER_DMABUF_REQ_NOT_RESPONDED;
        req->cmd = cmd;
 
-       switch(cmd) {
+       switch (cmd) {
        /* as exporter, commands to importer */
        case HYPER_DMABUF_EXPORT:
                /* exporting pages for dmabuf */
                /* command : HYPER_DMABUF_EXPORT,
-                * op0~3 : hyper_dmabuf_id
+                * op0~op3 : hyper_dmabuf_id
                 * op4 : number of pages to be shared
                 * op5 : offset of data in the first page
                 * op6 : length of data in the last page
                 * op7 : top-level reference number for shared pages
                 * op8 : size of private data (from op9)
-                * op9 ~ : Driver-specific private data (e.g. graphic buffer's 
meta info)
+                * op9 ~ : Driver-specific private data
+                *         (e.g. graphic buffer's meta info)
                 */
 
                memcpy(&req->op[0], &op[0], 9 * sizeof(int) + op[8]);
@@ -72,34 +73,39 @@ void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req,
        case HYPER_DMABUF_NOTIFY_UNEXPORT:
                /* destroy sg_list for hyper_dmabuf_id on remote side */
                /* command : DMABUF_DESTROY,
-                * op0~3 : hyper_dmabuf_id_t hid
+                * op0~op3 : hyper_dmabuf_id_t hid
                 */
 
-               for (i=0; i < 4; i++)
+               for (i = 0; i < 4; i++)
                        req->op[i] = op[i];
                break;
 
        case HYPER_DMABUF_EXPORT_FD:
        case HYPER_DMABUF_EXPORT_FD_FAILED:
-               /* dmabuf fd is being created on imported side or importing 
failed */
-               /* command : HYPER_DMABUF_EXPORT_FD or 
HYPER_DMABUF_EXPORT_FD_FAILED,
-                * op0~3 : hyper_dmabuf_id
+               /* dmabuf fd is being created on imported side or importing
+                * failed
+                *
+                * command : HYPER_DMABUF_EXPORT_FD or
+                *           HYPER_DMABUF_EXPORT_FD_FAILED,
+                * op0~op3 : hyper_dmabuf_id
                 */
 
-               for (i=0; i < 4; i++)
+               for (i = 0; i < 4; i++)
                        req->op[i] = op[i];
                break;
 
        case HYPER_DMABUF_OPS_TO_REMOTE:
-               /* notifying dmabuf map/unmap to importer (probably not needed) 
*/
-               /* for dmabuf synchronization */
+               /* notifying dmabuf map/unmap to importer (probably not needed)
+                * for dmabuf synchronization
+                */
                break;
 
-       /* as importer, command to exporter */
        case HYPER_DMABUF_OPS_TO_SOURCE:
-               /* notifying dmabuf map/unmap to exporter, map will make the 
driver to do shadow mapping
-               * or unmapping for synchronization with original exporter (e.g. 
i915) */
-               /* command : DMABUF_OPS_TO_SOURCE.
+               /* notifying dmabuf map/unmap to exporter, map will make
+                * the driver to do shadow mapping or unmapping for
+                * synchronization with original exporter (e.g. i915)
+                *
+                * command : DMABUF_OPS_TO_SOURCE.
                 * op0~3 : hyper_dmabuf_id
                 * op4 : map(=1)/unmap(=2)/attach(=3)/detach(=4)
                 */
@@ -116,7 +122,8 @@ void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req,
 static void cmd_process_work(struct work_struct *work)
 {
        struct imported_sgt_info *imported;
-       struct cmd_process *proc = container_of(work, struct cmd_process, work);
+       struct cmd_process *proc = container_of(work,
+                                               struct cmd_process, work);
        struct hyper_dmabuf_req *req;
        int domid;
        int i;
@@ -128,40 +135,42 @@ static void cmd_process_work(struct work_struct *work)
        case HYPER_DMABUF_EXPORT:
                /* exporting pages for dmabuf */
                /* command : HYPER_DMABUF_EXPORT,
-                * op0~3 : hyper_dmabuf_id
+                * op0~op3 : hyper_dmabuf_id
                 * op4 : number of pages to be shared
                 * op5 : offset of data in the first page
                 * op6 : length of data in the last page
                 * op7 : top-level reference number for shared pages
                 * op8 : size of private data (from op9)
-                * op9 ~ : Driver-specific private data (e.g. graphic buffer's 
meta info)
+                * op9 ~ : Driver-specific private data
+                *         (e.g. graphic buffer's meta info)
                 */
 
-               /* if nents == 0, it means it is a message only for priv 
synchronization
-                * for existing imported_sgt_info so not creating a new one */
+               /* if nents == 0, it means it is a message only for
+                * priv synchronization. for existing imported_sgt_info
+                * so not creating a new one
+                */
                if (req->op[4] == 0) {
                        hyper_dmabuf_id_t exist = {req->op[0],
                                                   {req->op[1], req->op[2],
-                                                  req->op[3]}};
+                                                  req->op[3] } };
 
                        imported = hyper_dmabuf_find_imported(exist);
 
                        if (!imported) {
                                dev_err(hy_drv_priv->dev,
-                                       "Can't find imported sgt_info from 
IMPORT_LIST\n");
+                                       "Can't find imported sgt_info\n");
                                break;
                        }
 
                        /* if size of new private data is different,
-                        * we reallocate it. */
+                        * we reallocate it.
+                        */
                        if (imported->sz_priv != req->op[8]) {
                                kfree(imported->priv);
                                imported->sz_priv = req->op[8];
-                               imported->priv = kcalloc(1, req->op[8], 
GFP_KERNEL);
+                               imported->priv = kcalloc(1, req->op[8],
+                                                        GFP_KERNEL);
                                if (!imported->priv) {
-                                       dev_err(hy_drv_priv->dev,
-                                               "Fail to allocate priv\n");
-
                                        /* set it invalid */
                                        imported->valid = 0;
                                        break;
@@ -181,26 +190,20 @@ static void cmd_process_work(struct work_struct *work)
 
                imported = kcalloc(1, sizeof(*imported), GFP_KERNEL);
 
-               if (!imported) {
-                       dev_err(hy_drv_priv->dev,
-                               "No memory left to be allocated\n");
+               if (!imported)
                        break;
-               }
 
                imported->sz_priv = req->op[8];
                imported->priv = kcalloc(1, req->op[8], GFP_KERNEL);
 
                if (!imported->priv) {
-                       dev_err(hy_drv_priv->dev,
-                               "Fail to allocate priv\n");
-
                        kfree(imported);
                        break;
                }
 
                imported->hid.id = req->op[0];
 
-               for (i=0; i<3; i++)
+               for (i = 0; i < 3; i++)
                        imported->hid.rng_key[i] = req->op[i+1];
 
                imported->nents = req->op[4];
@@ -230,13 +233,13 @@ static void cmd_process_work(struct work_struct *work)
                break;
 
        case HYPER_DMABUF_OPS_TO_REMOTE:
-               /* notifying dmabuf map/unmap to importer (probably not needed) 
*/
-               /* for dmabuf synchronization */
+               /* notifying dmabuf map/unmap to importer
+                * (probably not needed) for dmabuf synchronization
+                */
                break;
 
        default:
                /* shouldn't get here */
-               /* no matched command, nothing to do.. just return error */
                break;
        }
 
@@ -280,20 +283,22 @@ int hyper_dmabuf_msg_parse(int domid, struct 
hyper_dmabuf_req *req)
                 * op0~3 : hyper_dmabuf_id
                 */
                dev_dbg(hy_drv_priv->dev,
-                       "%s: processing HYPER_DMABUF_NOTIFY_UNEXPORT\n", 
__func__);
+                       "processing HYPER_DMABUF_NOTIFY_UNEXPORT\n");
 
                imported = hyper_dmabuf_find_imported(hid);
 
                if (imported) {
                        /* if anything is still using dma_buf */
                        if (imported->importers) {
-                               /*
-                                * Buffer is still in  use, just mark that it 
should
-                                * not be allowed to export its fd anymore.
+                               /* Buffer is still in  use, just mark that
+                                * it should not be allowed to export its fd
+                                * anymore.
                                 */
                                imported->valid = false;
                        } else {
-                               /* No one is using buffer, remove it from 
imported list */
+                               /* No one is using buffer, remove it from
+                                * imported list
+                                */
                                hyper_dmabuf_remove_imported(hid);
                                kfree(imported);
                        }
@@ -306,10 +311,12 @@ int hyper_dmabuf_msg_parse(int domid, struct 
hyper_dmabuf_req *req)
 
        /* dma buf remote synchronization */
        if (req->cmd == HYPER_DMABUF_OPS_TO_SOURCE) {
-               /* notifying dmabuf map/unmap to exporter, map will make the 
driver to do shadow mapping
-                * or unmapping for synchronization with original exporter 
(e.g. i915) */
-
-               /* command : DMABUF_OPS_TO_SOURCE.
+               /* notifying dmabuf map/unmap to exporter, map will
+                * make the driver to do shadow mapping
+                * or unmapping for synchronization with original
+                * exporter (e.g. i915)
+                *
+                * command : DMABUF_OPS_TO_SOURCE.
                 * op0~3 : hyper_dmabuf_id
                 * op1 : enum hyper_dmabuf_ops {....}
                 */
@@ -330,27 +337,30 @@ int hyper_dmabuf_msg_parse(int domid, struct 
hyper_dmabuf_req *req)
        if (req->cmd == HYPER_DMABUF_EXPORT_FD) {
                /* find a corresponding SGT for the id */
                dev_dbg(hy_drv_priv->dev,
-                       "Processing HYPER_DMABUF_EXPORT_FD for buffer {id:%d 
key:%d %d %d}\n",
+                       "HYPER_DMABUF_EXPORT_FD for {id:%d key:%d %d %d}\n",
                        hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
 
                exported = hyper_dmabuf_find_exported(hid);
 
                if (!exported) {
                        dev_err(hy_drv_priv->dev,
-                               "critical err: requested sgt_info can't be 
found for buffer {id:%d key:%d %d %d}\n",
-                               hid.id, hid.rng_key[0], hid.rng_key[1], 
hid.rng_key[2]);
+                               "buffer {id:%d key:%d %d %d} not found\n",
+                               hid.id, hid.rng_key[0], hid.rng_key[1],
+                               hid.rng_key[2]);
 
                        req->stat = HYPER_DMABUF_REQ_ERROR;
                } else if (!exported->valid) {
                        dev_dbg(hy_drv_priv->dev,
-                               "Buffer no longer valid - cannot export fd for 
buffer {id:%d key:%d %d %d}\n",
-                               hid.id, hid.rng_key[0], hid.rng_key[1], 
hid.rng_key[2]);
+                               "Buffer no longer valid {id:%d key:%d %d %d}\n",
+                               hid.id, hid.rng_key[0], hid.rng_key[1],
+                               hid.rng_key[2]);
 
                        req->stat = HYPER_DMABUF_REQ_ERROR;
                } else {
                        dev_dbg(hy_drv_priv->dev,
-                               "Buffer still valid - can export fd for buffer 
{id:%d key:%d %d %d}\n",
-                               hid.id, hid.rng_key[0], hid.rng_key[1], 
hid.rng_key[2]);
+                               "Buffer still valid {id:%d key:%d %d %d}\n",
+                               hid.id, hid.rng_key[0], hid.rng_key[1],
+                               hid.rng_key[2]);
 
                        exported->active++;
                        req->stat = HYPER_DMABUF_REQ_PROCESSED;
@@ -360,15 +370,16 @@ int hyper_dmabuf_msg_parse(int domid, struct 
hyper_dmabuf_req *req)
 
        if (req->cmd == HYPER_DMABUF_EXPORT_FD_FAILED) {
                dev_dbg(hy_drv_priv->dev,
-                       "Processing HYPER_DMABUF_EXPORT_FD_FAILED for buffer 
{id:%d key:%d %d %d}\n",
+                       "HYPER_DMABUF_EXPORT_FD_FAILED for {id:%d key:%d %d 
%d}\n",
                        hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
 
                exported = hyper_dmabuf_find_exported(hid);
 
                if (!exported) {
                        dev_err(hy_drv_priv->dev,
-                               "critical err: requested sgt_info can't be 
found for buffer {id:%d key:%d %d %d}\n",
-                               hid.id, hid.rng_key[0], hid.rng_key[1], 
hid.rng_key[2]);
+                               "buffer {id:%d key:%d %d %d} not found\n",
+                               hid.id, hid.rng_key[0], hid.rng_key[1],
+                               hid.rng_key[2]);
 
                        req->stat = HYPER_DMABUF_REQ_ERROR;
                } else {
@@ -382,19 +393,14 @@ int hyper_dmabuf_msg_parse(int domid, struct 
hyper_dmabuf_req *req)
                "%s: putting request to workqueue\n", __func__);
        temp_req = kmalloc(sizeof(*temp_req), GFP_KERNEL);
 
-       if (!temp_req) {
-               dev_err(hy_drv_priv->dev,
-                       "No memory left to be allocated\n");
+       if (!temp_req)
                return -ENOMEM;
-       }
 
        memcpy(temp_req, req, sizeof(*temp_req));
 
        proc = kcalloc(1, sizeof(struct cmd_process), GFP_KERNEL);
 
        if (!proc) {
-               dev_err(hy_drv_priv->dev,
-                       "No memory left to be allocated\n");
                kfree(temp_req);
                return -ENOMEM;
        }
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h
index 7c694ec..9c8a76b 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h
@@ -79,7 +79,9 @@ void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req,
                                 enum hyper_dmabuf_command command,
                                 int *operands);
 
-/* parse incoming request packet (or response) and take appropriate actions 
for those */
+/* parse incoming request packet (or response) and take
+ * appropriate actions for those
+ */
 int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req);
 
 #endif // __HYPER_DMABUF_MSG_H__
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
index 7e73170..03fdd30 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
@@ -53,18 +53,15 @@ static int hyper_dmabuf_sync_request(hyper_dmabuf_id_t hid, 
int dmabuf_ops)
 
        op[0] = hid.id;
 
-       for (i=0; i<3; i++)
+       for (i = 0; i < 3; i++)
                op[i+1] = hid.rng_key[i];
 
        op[4] = dmabuf_ops;
 
        req = kcalloc(1, sizeof(*req), GFP_KERNEL);
 
-       if (!req) {
-               dev_err(hy_drv_priv->dev,
-                       "No memory left to be allocated\n");
+       if (!req)
                return -ENOMEM;
-       }
 
        hyper_dmabuf_create_req(req, HYPER_DMABUF_OPS_TO_SOURCE, &op[0]);
 
@@ -81,8 +78,8 @@ static int hyper_dmabuf_sync_request(hyper_dmabuf_id_t hid, 
int dmabuf_ops)
        return ret;
 }
 
-static int hyper_dmabuf_ops_attach(struct dma_buf* dmabuf,
-                                  struct device* dev,
+static int hyper_dmabuf_ops_attach(struct dma_buf *dmabuf,
+                                  struct device *dev,
                                   struct dma_buf_attachment *attach)
 {
        struct imported_sgt_info *imported;
@@ -99,7 +96,7 @@ static int hyper_dmabuf_ops_attach(struct dma_buf* dmabuf,
        return ret;
 }
 
-static void hyper_dmabuf_ops_detach(struct dma_buf* dmabuf,
+static void hyper_dmabuf_ops_detach(struct dma_buf *dmabuf,
                                    struct dma_buf_attachment *attach)
 {
        struct imported_sgt_info *imported;
@@ -114,8 +111,9 @@ static void hyper_dmabuf_ops_detach(struct dma_buf* dmabuf,
                                        HYPER_DMABUF_OPS_DETACH);
 }
 
-static struct sg_table* hyper_dmabuf_ops_map(struct dma_buf_attachment 
*attachment,
-                                            enum dma_data_direction dir)
+static struct sg_table *hyper_dmabuf_ops_map(
+                               struct dma_buf_attachment *attachment,
+                               enum dma_data_direction dir)
 {
        struct sg_table *st;
        struct imported_sgt_info *imported;
@@ -130,9 +128,8 @@ static struct sg_table* hyper_dmabuf_ops_map(struct 
dma_buf_attachment *attachme
        /* extract pages from sgt */
        pg_info = hyper_dmabuf_ext_pgs(imported->sgt);
 
-       if (!pg_info) {
+       if (!pg_info)
                return NULL;
-       }
 
        /* create a new sg_table with extracted pages */
        st = hyper_dmabuf_create_sgt(pg_info->pgs, pg_info->frst_ofst,
@@ -140,8 +137,8 @@ static struct sg_table* hyper_dmabuf_ops_map(struct 
dma_buf_attachment *attachme
        if (!st)
                goto err_free_sg;
 
-        if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir))
-                goto err_free_sg;
+       if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir))
+               goto err_free_sg;
 
        ret = hyper_dmabuf_sync_request(imported->hid,
                                        HYPER_DMABUF_OPS_MAP);
@@ -196,9 +193,8 @@ static void hyper_dmabuf_ops_release(struct dma_buf 
*dma_buf)
 
        imported = (struct imported_sgt_info *)dma_buf->priv;
 
-       if (!dmabuf_refcount(imported->dma_buf)) {
+       if (!dmabuf_refcount(imported->dma_buf))
                imported->dma_buf = NULL;
-       }
 
        imported->importers--;
 
@@ -219,8 +215,9 @@ static void hyper_dmabuf_ops_release(struct dma_buf 
*dma_buf)
                                        HYPER_DMABUF_OPS_RELEASE);
 
        /*
-        * Check if buffer is still valid and if not remove it from imported 
list.
-        * That has to be done after sending sync request
+        * Check if buffer is still valid and if not remove it
+        * from imported list. That has to be done after sending
+        * sync request
         */
        if (finish) {
                hyper_dmabuf_remove_imported(imported->hid);
@@ -228,7 +225,8 @@ static void hyper_dmabuf_ops_release(struct dma_buf 
*dma_buf)
        }
 }
 
-static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf *dmabuf, enum 
dma_data_direction dir)
+static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf *dmabuf,
+                                            enum dma_data_direction dir)
 {
        struct imported_sgt_info *imported;
        int ret;
@@ -244,7 +242,8 @@ static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf 
*dmabuf, enum dma_da
        return ret;
 }
 
-static int hyper_dmabuf_ops_end_cpu_access(struct dma_buf *dmabuf, enum 
dma_data_direction dir)
+static int hyper_dmabuf_ops_end_cpu_access(struct dma_buf *dmabuf,
+                                          enum dma_data_direction dir)
 {
        struct imported_sgt_info *imported;
        int ret;
@@ -260,7 +259,8 @@ static int hyper_dmabuf_ops_end_cpu_access(struct dma_buf 
*dmabuf, enum dma_data
        return 0;
 }
 
-static void *hyper_dmabuf_ops_kmap_atomic(struct dma_buf *dmabuf, unsigned 
long pgnum)
+static void *hyper_dmabuf_ops_kmap_atomic(struct dma_buf *dmabuf,
+                                         unsigned long pgnum)
 {
        struct imported_sgt_info *imported;
        int ret;
@@ -273,10 +273,12 @@ static void *hyper_dmabuf_ops_kmap_atomic(struct dma_buf 
*dmabuf, unsigned long
        ret = hyper_dmabuf_sync_request(imported->hid,
                                        HYPER_DMABUF_OPS_KMAP_ATOMIC);
 
-       return NULL; /* for now NULL.. need to return the address of mapped 
region */
+       /* TODO: NULL for now. Need to return the addr of mapped region */
+       return NULL;
 }
 
-static void hyper_dmabuf_ops_kunmap_atomic(struct dma_buf *dmabuf, unsigned 
long pgnum, void *vaddr)
+static void hyper_dmabuf_ops_kunmap_atomic(struct dma_buf *dmabuf,
+                                          unsigned long pgnum, void *vaddr)
 {
        struct imported_sgt_info *imported;
        int ret;
@@ -322,7 +324,8 @@ static void hyper_dmabuf_ops_kunmap(struct dma_buf *dmabuf, 
unsigned long pgnum,
                                        HYPER_DMABUF_OPS_KUNMAP);
 }
 
-static int hyper_dmabuf_ops_mmap(struct dma_buf *dmabuf, struct vm_area_struct 
*vma)
+static int hyper_dmabuf_ops_mmap(struct dma_buf *dmabuf,
+                                struct vm_area_struct *vma)
 {
        struct imported_sgt_info *imported;
        int ret;
@@ -374,8 +377,8 @@ static const struct dma_buf_ops hyper_dmabuf_ops = {
        .map_dma_buf = hyper_dmabuf_ops_map,
        .unmap_dma_buf = hyper_dmabuf_ops_unmap,
        .release = hyper_dmabuf_ops_release,
-       .begin_cpu_access = (void*)hyper_dmabuf_ops_begin_cpu_access,
-       .end_cpu_access = (void*)hyper_dmabuf_ops_end_cpu_access,
+       .begin_cpu_access = (void *)hyper_dmabuf_ops_begin_cpu_access,
+       .end_cpu_access = (void *)hyper_dmabuf_ops_end_cpu_access,
        .map_atomic = hyper_dmabuf_ops_kmap_atomic,
        .unmap_atomic = hyper_dmabuf_ops_kunmap_atomic,
        .map = hyper_dmabuf_ops_kmap,
@@ -395,9 +398,8 @@ int hyper_dmabuf_export_fd(struct imported_sgt_info 
*imported, int flags)
         */
        hyper_dmabuf_export_dma_buf(imported);
 
-       if (imported->dma_buf) {
+       if (imported->dma_buf)
                fd = dma_buf_fd(imported->dma_buf, flags);
-       }
 
        return fd;
 }
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c
index 36e888c..1f2f56b 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c
@@ -36,63 +36,63 @@
        ((nents)*PAGE_SIZE - (first_offset) - PAGE_SIZE + (last_len))
 
 int hyper_dmabuf_query_exported(struct exported_sgt_info *exported,
-                               int query, unsigned long* info)
+                               int query, unsigned long *info)
 {
-       switch (query)
-       {
-               case HYPER_DMABUF_QUERY_TYPE:
-                       *info = EXPORTED;
-                       break;
-
-               /* exporting domain of this specific dmabuf*/
-               case HYPER_DMABUF_QUERY_EXPORTER:
-                       *info = HYPER_DMABUF_DOM_ID(exported->hid);
-                       break;
-
-               /* importing domain of this specific dmabuf */
-               case HYPER_DMABUF_QUERY_IMPORTER:
-                       *info = exported->rdomid;
-                       break;
-
-               /* size of dmabuf in byte */
-               case HYPER_DMABUF_QUERY_SIZE:
-                       *info = exported->dma_buf->size;
-                       break;
-
-               /* whether the buffer is used by importer */
-               case HYPER_DMABUF_QUERY_BUSY:
-                       *info = (exported->active > 0);
-                       break;
-
-               /* whether the buffer is unexported */
-               case HYPER_DMABUF_QUERY_UNEXPORTED:
-                       *info = !exported->valid;
-                       break;
-
-               /* whether the buffer is scheduled to be unexported */
-               case HYPER_DMABUF_QUERY_DELAYED_UNEXPORTED:
-                       *info = !exported->unexport_sched;
-                       break;
-
-               /* size of private info attached to buffer */
-               case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE:
-                       *info = exported->sz_priv;
-                       break;
-
-               /* copy private info attached to buffer */
-               case HYPER_DMABUF_QUERY_PRIV_INFO:
-                       if (exported->sz_priv > 0) {
-                               int n;
-                               n = copy_to_user((void __user*) *info,
-                                               exported->priv,
-                                               exported->sz_priv);
-                               if (n != 0)
-                                       return -EINVAL;
-                       }
-                       break;
-
-               default:
-                       return -EINVAL;
+       switch (query) {
+       case HYPER_DMABUF_QUERY_TYPE:
+               *info = EXPORTED;
+               break;
+
+       /* exporting domain of this specific dmabuf*/
+       case HYPER_DMABUF_QUERY_EXPORTER:
+               *info = HYPER_DMABUF_DOM_ID(exported->hid);
+               break;
+
+       /* importing domain of this specific dmabuf */
+       case HYPER_DMABUF_QUERY_IMPORTER:
+               *info = exported->rdomid;
+               break;
+
+       /* size of dmabuf in byte */
+       case HYPER_DMABUF_QUERY_SIZE:
+               *info = exported->dma_buf->size;
+               break;
+
+       /* whether the buffer is used by importer */
+       case HYPER_DMABUF_QUERY_BUSY:
+               *info = (exported->active > 0);
+               break;
+
+       /* whether the buffer is unexported */
+       case HYPER_DMABUF_QUERY_UNEXPORTED:
+               *info = !exported->valid;
+               break;
+
+       /* whether the buffer is scheduled to be unexported */
+       case HYPER_DMABUF_QUERY_DELAYED_UNEXPORTED:
+               *info = !exported->unexport_sched;
+               break;
+
+       /* size of private info attached to buffer */
+       case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE:
+               *info = exported->sz_priv;
+               break;
+
+       /* copy private info attached to buffer */
+       case HYPER_DMABUF_QUERY_PRIV_INFO:
+               if (exported->sz_priv > 0) {
+                       int n;
+
+                       n = copy_to_user((void __user *) *info,
+                                       exported->priv,
+                                       exported->sz_priv);
+                       if (n != 0)
+                               return -EINVAL;
+               }
+               break;
+
+       default:
+               return -EINVAL;
        }
 
        return 0;
@@ -102,66 +102,70 @@ int hyper_dmabuf_query_exported(struct exported_sgt_info 
*exported,
 int hyper_dmabuf_query_imported(struct imported_sgt_info *imported,
                                int query, unsigned long *info)
 {
-       switch (query)
-       {
-               case HYPER_DMABUF_QUERY_TYPE:
-                       *info = IMPORTED;
-                       break;
-
-               /* exporting domain of this specific dmabuf*/
-               case HYPER_DMABUF_QUERY_EXPORTER:
-                       *info = HYPER_DMABUF_DOM_ID(imported->hid);
-                       break;
-
-               /* importing domain of this specific dmabuf */
-               case HYPER_DMABUF_QUERY_IMPORTER:
-                       *info = hy_drv_priv->domid;
-                       break;
-
-               /* size of dmabuf in byte */
-               case HYPER_DMABUF_QUERY_SIZE:
-                       if (imported->dma_buf) {
-                               /* if local dma_buf is created (if it's ever 
mapped),
-                                * retrieve it directly from struct dma_buf *
-                                */
-                               *info = imported->dma_buf->size;
-                       } else {
-                               /* calcuate it from given nents, frst_ofst and 
last_len */
-                               *info = HYPER_DMABUF_SIZE(imported->nents,
-                                                         imported->frst_ofst,
-                                                         imported->last_len);
-                       }
-                       break;
-
-               /* whether the buffer is used or not */
-               case HYPER_DMABUF_QUERY_BUSY:
-                       /* checks if it's used by importer */
-                       *info = (imported->importers > 0);
-                       break;
-
-               /* whether the buffer is unexported */
-               case HYPER_DMABUF_QUERY_UNEXPORTED:
-                       *info = !imported->valid;
-                       break;
-               /* size of private info attached to buffer */
-               case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE:
-                       *info = imported->sz_priv;
-                       break;
-
-               /* copy private info attached to buffer */
-               case HYPER_DMABUF_QUERY_PRIV_INFO:
-                       if (imported->sz_priv > 0) {
-                               int n;
-                               n = copy_to_user((void __user*) *info,
-                                               imported->priv,
-                                               imported->sz_priv);
-                               if (n != 0)
-                                       return -EINVAL;
-                       }
-                       break;
-
-               default:
-                       return -EINVAL;
+       switch (query) {
+       case HYPER_DMABUF_QUERY_TYPE:
+               *info = IMPORTED;
+               break;
+
+       /* exporting domain of this specific dmabuf*/
+       case HYPER_DMABUF_QUERY_EXPORTER:
+               *info = HYPER_DMABUF_DOM_ID(imported->hid);
+               break;
+
+       /* importing domain of this specific dmabuf */
+       case HYPER_DMABUF_QUERY_IMPORTER:
+               *info = hy_drv_priv->domid;
+               break;
+
+       /* size of dmabuf in byte */
+       case HYPER_DMABUF_QUERY_SIZE:
+               if (imported->dma_buf) {
+                       /* if local dma_buf is created (if it's
+                        * ever mapped), retrieve it directly
+                        * from struct dma_buf *
+                        */
+                       *info = imported->dma_buf->size;
+               } else {
+                       /* calcuate it from given nents, frst_ofst
+                        * and last_len
+                        */
+                       *info = HYPER_DMABUF_SIZE(imported->nents,
+                                                 imported->frst_ofst,
+                                                 imported->last_len);
+               }
+               break;
+
+       /* whether the buffer is used or not */
+       case HYPER_DMABUF_QUERY_BUSY:
+               /* checks if it's used by importer */
+               *info = (imported->importers > 0);
+               break;
+
+       /* whether the buffer is unexported */
+       case HYPER_DMABUF_QUERY_UNEXPORTED:
+               *info = !imported->valid;
+               break;
+
+       /* size of private info attached to buffer */
+       case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE:
+               *info = imported->sz_priv;
+               break;
+
+       /* copy private info attached to buffer */
+       case HYPER_DMABUF_QUERY_PRIV_INFO:
+               if (imported->sz_priv > 0) {
+                       int n;
+
+                       n = copy_to_user((void __user *)*info,
+                                       imported->priv,
+                                       imported->sz_priv);
+                       if (n != 0)
+                               return -EINVAL;
+               }
+               break;
+
+       default:
+               return -EINVAL;
        }
 
        return 0;
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
index 01ec98c..c9fe040 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
@@ -76,11 +76,8 @@ int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops)
        case HYPER_DMABUF_OPS_ATTACH:
                attachl = kcalloc(1, sizeof(*attachl), GFP_KERNEL);
 
-               if (!attachl) {
-                       dev_err(hy_drv_priv->dev,
-                               "remote sync::HYPER_DMABUF_OPS_ATTACH\n");
+               if (!attachl)
                        return -ENOMEM;
-               }
 
                attachl->attach = dma_buf_attach(exported->dma_buf,
                                                 hy_drv_priv->dev);
@@ -126,13 +123,11 @@ int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int 
ops)
 
                sgtl = kcalloc(1, sizeof(*sgtl), GFP_KERNEL);
 
-               if (!sgtl) {
-                       dev_err(hy_drv_priv->dev,
-                               "remote sync::HYPER_DMABUF_OPS_MAP\n");
+               if (!sgtl)
                        return -ENOMEM;
-               }
 
-               sgtl->sgt = dma_buf_map_attachment(attachl->attach, 
DMA_BIDIRECTIONAL);
+               sgtl->sgt = dma_buf_map_attachment(attachl->attach,
+                                                  DMA_BIDIRECTIONAL);
                if (!sgtl->sgt) {
                        kfree(sgtl);
                        dev_err(hy_drv_priv->dev,
@@ -148,7 +143,7 @@ int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops)
                        dev_err(hy_drv_priv->dev,
                                "remote sync::HYPER_DMABUF_OPS_UNMAP\n");
                        dev_err(hy_drv_priv->dev,
-                               "no more SGT or attachment left to be 
unmapped\n");
+                               "no SGT or attach left to be unmapped\n");
                        return -EFAULT;
                }
 
@@ -165,23 +160,28 @@ int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int 
ops)
 
        case HYPER_DMABUF_OPS_RELEASE:
                dev_dbg(hy_drv_priv->dev,
-                       "Buffer {id:%d key:%d %d %d} released, references left: 
%d\n",
-                        exported->hid.id, exported->hid.rng_key[0], 
exported->hid.rng_key[1],
-                        exported->hid.rng_key[2], exported->active - 1);
+                       "id:%d key:%d %d %d} released, ref left: %d\n",
+                        exported->hid.id, exported->hid.rng_key[0],
+                        exported->hid.rng_key[1], exported->hid.rng_key[2],
+                        exported->active - 1);
+
+               exported->active--;
 
-                exported->active--;
-               /* If there are still importers just break, if no then continue 
with final cleanup */
+               /* If there are still importers just break, if no then
+                * continue with final cleanup
+                */
                if (exported->active)
                        break;
 
-               /*
-                * Importer just released buffer fd, check if there is any 
other importer still using it.
-                * If not and buffer was unexported, clean up shared data and 
remove that buffer.
+               /* Importer just released buffer fd, check if there is
+                * any other importer still using it.
+                * If not and buffer was unexported, clean up shared
+                * data and remove that buffer.
                 */
                dev_dbg(hy_drv_priv->dev,
                        "Buffer {id:%d key:%d %d %d} final released\n",
-                       exported->hid.id, exported->hid.rng_key[0], 
exported->hid.rng_key[1],
-                       exported->hid.rng_key[2]);
+                       exported->hid.id, exported->hid.rng_key[0],
+                       exported->hid.rng_key[1], exported->hid.rng_key[2]);
 
                if (!exported->valid && !exported->active &&
                    !exported->unexport_sched) {
@@ -195,19 +195,21 @@ int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int 
ops)
                break;
 
        case HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS:
-               ret = dma_buf_begin_cpu_access(exported->dma_buf, 
DMA_BIDIRECTIONAL);
+               ret = dma_buf_begin_cpu_access(exported->dma_buf,
+                                              DMA_BIDIRECTIONAL);
                if (ret) {
                        dev_err(hy_drv_priv->dev,
-                               "remote 
sync::HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS\n");
+                               "HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS\n");
                        return ret;
                }
                break;
 
        case HYPER_DMABUF_OPS_END_CPU_ACCESS:
-               ret = dma_buf_end_cpu_access(exported->dma_buf, 
DMA_BIDIRECTIONAL);
+               ret = dma_buf_end_cpu_access(exported->dma_buf,
+                                            DMA_BIDIRECTIONAL);
                if (ret) {
                        dev_err(hy_drv_priv->dev,
-                               "remote 
sync::HYPER_DMABUF_OPS_END_CPU_ACCESS\n");
+                               "HYPER_DMABUF_OPS_END_CPU_ACCESS\n");
                        return ret;
                }
                break;
@@ -215,22 +217,21 @@ int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int 
ops)
        case HYPER_DMABUF_OPS_KMAP_ATOMIC:
        case HYPER_DMABUF_OPS_KMAP:
                va_kmapl = kcalloc(1, sizeof(*va_kmapl), GFP_KERNEL);
-               if (!va_kmapl) {
-                       dev_err(hy_drv_priv->dev,
-                               "remote 
sync::HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n");
+               if (!va_kmapl)
                        return -ENOMEM;
-               }
 
                /* dummy kmapping of 1 page */
                if (ops == HYPER_DMABUF_OPS_KMAP_ATOMIC)
-                       va_kmapl->vaddr = 
dma_buf_kmap_atomic(exported->dma_buf, 1);
+                       va_kmapl->vaddr = dma_buf_kmap_atomic(
+                                               exported->dma_buf, 1);
                else
-                       va_kmapl->vaddr = dma_buf_kmap(exported->dma_buf, 1);
+                       va_kmapl->vaddr = dma_buf_kmap(
+                                               exported->dma_buf, 1);
 
                if (!va_kmapl->vaddr) {
                        kfree(va_kmapl);
                        dev_err(hy_drv_priv->dev,
-                               "remote 
sync::HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n");
+                               "HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n");
                        return -ENOMEM;
                }
                list_add(&va_kmapl->list, &exported->va_kmapped->list);
@@ -240,7 +241,7 @@ int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops)
        case HYPER_DMABUF_OPS_KUNMAP:
                if (list_empty(&exported->va_kmapped->list)) {
                        dev_err(hy_drv_priv->dev,
-                               "remote 
sync::HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n");
+                               "HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n");
                        dev_err(hy_drv_priv->dev,
                                "no more dmabuf VA to be freed\n");
                        return -EFAULT;
@@ -250,15 +251,17 @@ int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int 
ops)
                                            struct kmap_vaddr_list, list);
                if (!va_kmapl->vaddr) {
                        dev_err(hy_drv_priv->dev,
-                               "remote 
sync::HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n");
+                               "HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n");
                        return PTR_ERR(va_kmapl->vaddr);
                }
 
                /* unmapping 1 page */
                if (ops == HYPER_DMABUF_OPS_KUNMAP_ATOMIC)
-                       dma_buf_kunmap_atomic(exported->dma_buf, 1, 
va_kmapl->vaddr);
+                       dma_buf_kunmap_atomic(exported->dma_buf,
+                                             1, va_kmapl->vaddr);
                else
-                       dma_buf_kunmap(exported->dma_buf, 1, va_kmapl->vaddr);
+                       dma_buf_kunmap(exported->dma_buf,
+                                      1, va_kmapl->vaddr);
 
                list_del(&va_kmapl->list);
                kfree(va_kmapl);
@@ -266,7 +269,8 @@ int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops)
 
        case HYPER_DMABUF_OPS_MMAP:
                /* currently not supported: looking for a way to create
-                * a dummy vma */
+                * a dummy vma
+                */
                dev_warn(hy_drv_priv->dev,
                         "remote sync::sychronized mmap is not supported\n");
                break;
@@ -274,11 +278,8 @@ int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int 
ops)
        case HYPER_DMABUF_OPS_VMAP:
                va_vmapl = kcalloc(1, sizeof(*va_vmapl), GFP_KERNEL);
 
-               if (!va_vmapl) {
-                       dev_err(hy_drv_priv->dev,
-                               "remote sync::HYPER_DMABUF_OPS_VMAP\n");
+               if (!va_vmapl)
                        return -ENOMEM;
-               }
 
                /* dummy vmapping */
                va_vmapl->vaddr = dma_buf_vmap(exported->dma_buf);
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
index 315c354..e9299e5 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
@@ -89,9 +89,8 @@ struct pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt)
        if (!pg_info)
                return NULL;
 
-       pg_info->pgs = kmalloc(sizeof(struct page *) *
-                              hyper_dmabuf_get_num_pgs(sgt),
-                              GFP_KERNEL);
+       pg_info->pgs = kmalloc_array(hyper_dmabuf_get_num_pgs(sgt),
+                                    sizeof(struct page *), GFP_KERNEL);
 
        if (!pg_info->pgs) {
                kfree(pg_info);
@@ -137,17 +136,17 @@ struct pages_info *hyper_dmabuf_ext_pgs(struct sg_table 
*sgt)
 }
 
 /* create sg_table with given pages and other parameters */
-struct sg_table* hyper_dmabuf_create_sgt(struct page **pgs,
-                                        int frst_ofst, int last_len, int nents)
+struct sg_table *hyper_dmabuf_create_sgt(struct page **pgs,
+                                        int frst_ofst, int last_len,
+                                        int nents)
 {
        struct sg_table *sgt;
        struct scatterlist *sgl;
        int i, ret;
 
        sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!sgt) {
+       if (!sgt)
                return NULL;
-       }
 
        ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
        if (ret) {
@@ -163,7 +162,7 @@ struct sg_table* hyper_dmabuf_create_sgt(struct page **pgs,
 
        sg_set_page(sgl, pgs[0], PAGE_SIZE-frst_ofst, frst_ofst);
 
-       for (i=1; i<nents-1; i++) {
+       for (i = 1; i < nents-1; i++) {
                sgl = sg_next(sgl);
                sg_set_page(sgl, pgs[i], PAGE_SIZE, 0);
        }
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h
index 930bade..152f78c 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h
@@ -31,7 +31,7 @@ int dmabuf_refcount(struct dma_buf *dma_buf);
 struct pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt);
 
 /* create sg_table with given pages and other parameters */
-struct sg_table* hyper_dmabuf_create_sgt(struct page **pgs,
+struct sg_table *hyper_dmabuf_create_sgt(struct page **pgs,
                                         int frst_ofst, int last_len,
                                         int nents);
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h
index 8a612d1..a11f804 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h
@@ -51,67 +51,91 @@ struct vmap_vaddr_list {
 
 /* Exporter builds pages_info before sharing pages */
 struct pages_info {
-        int frst_ofst; /* offset of data in the first page */
-        int last_len; /* length of data in the last page */
-        int nents; /* # of pages */
-        struct page **pgs; /* pages that contains reference numbers of shared 
pages*/
+       int frst_ofst;
+       int last_len;
+       int nents;
+       struct page **pgs;
 };
 
 
 /* Exporter stores references to sgt in a hash table
- * Exporter keeps these references for synchronization and tracking purposes
+ * Exporter keeps these references for synchronization
+ * and tracking purposes
  */
 struct exported_sgt_info {
-        hyper_dmabuf_id_t hid; /* unique id to reference dmabuf in remote 
domain */
-       int rdomid; /* domain importing this sgt */
+       hyper_dmabuf_id_t hid;
+
+       /* VM ID of importer */
+       int rdomid;
 
-       struct dma_buf *dma_buf; /* needed to store this for freeing it later */
+       struct dma_buf *dma_buf;
        int nents;
 
-       /* list of remote activities on dma_buf */
+       /* list for tracking activities on dma_buf */
        struct sgt_list *active_sgts;
        struct attachment_list *active_attached;
        struct kmap_vaddr_list *va_kmapped;
        struct vmap_vaddr_list *va_vmapped;
 
-       bool valid; /* set to 0 once unexported. Needed to prevent further 
mapping by importer */
-       int active; /* locally shared on importer's side */
-       void *refs_info; /* hypervisor-specific info for the references */
+       /* set to 0 when unexported. Importer doesn't
+        * do a new mapping of buffer if valid == false
+        */
+       bool valid;
+
+       /* active == true if the buffer is actively used
+        * (mapped) by importer
+        */
+       int active;
+
+       /* hypervisor specific reference data for shared pages */
+       void *refs_info;
+
        struct delayed_work unexport;
        bool unexport_sched;
 
-       /* owner of buffer
-        * TODO: that is naiive as buffer may be reused by
-        * another userspace app, so here list of struct file should be kept
-        * and emergency unexport should be executed only after last of buffer
-        * uses releases hyper_dmabuf device
+       /* list for file pointers associated with all user space
+        * application that have exported this same buffer to
+        * another VM. This needs to be tracked to know whether
+        * the buffer can be completely freed.
         */
        struct file *filp;
 
+       /* size of private */
        size_t sz_priv;
-       char *priv; /* device specific info (e.g. image's meta info?) */
+
+       /* private data associated with the exported buffer */
+       char *priv;
 };
 
-/* Importer store references (before mapping) on shared pages
- * Importer store these references in the table and map it in
- * its own memory map once userspace asks for reference for the buffer */
+/* imported_sgt_info contains information about imported DMA_BUF
+ * this info is kept in IMPORT list and asynchorously retrieved and
+ * used to map DMA_BUF on importer VM's side upon export fd ioctl
+ * request from user-space
+ */
+
 struct imported_sgt_info {
        hyper_dmabuf_id_t hid; /* unique id for shared dmabuf imported */
 
-       int ref_handle; /* reference number of top level addressing page of 
shared pages */
-       int frst_ofst;  /* start offset in first shared page */
-       int last_len;   /* length of data in the last shared page */
-       int nents;      /* number of pages to be shared */
+       /* hypervisor-specific handle to pages */
+       int ref_handle;
+
+       /* offset and size info of DMA_BUF */
+       int frst_ofst;
+       int last_len;
+       int nents;
 
        struct dma_buf *dma_buf;
-       struct sg_table *sgt; /* sgt pointer after importing buffer */
+       struct sg_table *sgt;
 
        void *refs_info;
        bool valid;
        int importers;
 
+       /* size of private */
        size_t sz_priv;
-       char *priv; /* device specific info (e.g. image's meta info?) */
+
+       /* private data associated with the exported buffer */
+       char *priv;
 };
 
 #endif /* __HYPER_DMABUF_STRUCT_H__ */
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
index f70b4ea..05f3521 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
@@ -41,12 +41,10 @@
 #include "hyper_dmabuf_xen_comm_list.h"
 #include "../hyper_dmabuf_drv.h"
 
-static int export_req_id = 0;
+static int export_req_id;
 
 struct hyper_dmabuf_req req_pending = {0};
 
-extern int xenstored_ready;
-
 static void xen_get_domid_delayed(struct work_struct *unused);
 static void xen_init_comm_env_delayed(struct work_struct *unused);
 
@@ -160,15 +158,16 @@ void xen_get_domid_delayed(struct work_struct *unused)
        int domid, ret;
 
        /* scheduling another if driver is still running
-        * and xenstore has not been initialized */
+        * and xenstore has not been initialized
+        */
        if (likely(xenstored_ready == 0)) {
                dev_dbg(hy_drv_priv->dev,
-                       "Xenstore is not quite ready yet. Will retry it in 
500ms\n");
+                       "Xenstore is not ready yet. Will retry in 500ms\n");
                schedule_delayed_work(&get_vm_id_work, msecs_to_jiffies(500));
        } else {
-               xenbus_transaction_start(&xbt);
+               xenbus_transaction_start(&xbt);
 
-               ret = xenbus_scanf(xbt, "domid","", "%d", &domid);
+               ret = xenbus_scanf(xbt, "domid", "", "%d", &domid);
 
                if (ret <= 0)
                        domid = -1;
@@ -176,14 +175,17 @@ void xen_get_domid_delayed(struct work_struct *unused)
                xenbus_transaction_end(xbt, 0);
 
                /* try again since -1 is an invalid id for domain
-                * (but only if driver is still running) */
+                * (but only if driver is still running)
+                */
                if (unlikely(domid == -1)) {
                        dev_dbg(hy_drv_priv->dev,
                                "domid==-1 is invalid. Will retry it in 
500ms\n");
-                       schedule_delayed_work(&get_vm_id_work, 
msecs_to_jiffies(500));
+                       schedule_delayed_work(&get_vm_id_work,
+                                             msecs_to_jiffies(500));
                } else {
                        dev_info(hy_drv_priv->dev,
-                               "Successfully retrieved domid from 
Xenstore:%d\n", domid);
+                                "Successfully retrieved domid from 
Xenstore:%d\n",
+                                domid);
                        hy_drv_priv->domid = domid;
                }
        }
@@ -199,21 +201,20 @@ int hyper_dmabuf_xen_get_domid(void)
                return -1;
        }
 
-        xenbus_transaction_start(&xbt);
+       xenbus_transaction_start(&xbt);
 
-        if (!xenbus_scanf(xbt, "domid","", "%d", &domid)) {
+       if (!xenbus_scanf(xbt, "domid", "", "%d", &domid))
                domid = -1;
-        }
 
-        xenbus_transaction_end(xbt, 0);
+       xenbus_transaction_end(xbt, 0);
 
        return domid;
 }
 
 static int xen_comm_next_req_id(void)
 {
-        export_req_id++;
-        return export_req_id;
+       export_req_id++;
+       return export_req_id;
 }
 
 /* For now cache latast rings as global variables TODO: keep them in list*/
@@ -236,19 +237,18 @@ static irqreturn_t back_ring_isr(int irq, void *info);
 static void remote_dom_exporter_watch_cb(struct xenbus_watch *watch,
                                         const char *path, const char *token)
 {
-       int rdom,ret;
+       int rdom, ret;
        uint32_t grefid, port;
        struct xen_comm_rx_ring_info *ring_info;
 
        /* Check which domain has changed its exporter rings */
        ret = sscanf(watch->node, "/local/domain/%d/", &rdom);
-       if (ret <= 0) {
+       if (ret <= 0)
                return;
-       }
 
        /* Check if we have importer ring for given remote domain already
-        * created */
-
+        * created
+        */
        ring_info = xen_comm_find_rx_ring(rdom);
 
        /* Try to query remote domain exporter ring details - if
@@ -298,11 +298,8 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
 
        ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL);
 
-       if (!ring_info) {
-               dev_err(hy_drv_priv->dev,
-                       "No more spae left\n");
+       if (!ring_info)
                return -ENOMEM;
-       }
 
        /* from exporter to importer */
        shared_ring = (void *)__get_free_pages(GFP_KERNEL, 1);
@@ -318,8 +315,8 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
        FRONT_RING_INIT(&(ring_info->ring_front), sring, PAGE_SIZE);
 
        ring_info->gref_ring = gnttab_grant_foreign_access(domid,
-                                                          
virt_to_mfn(shared_ring),
-                                                          0);
+                                               virt_to_mfn(shared_ring),
+                                               0);
        if (ring_info->gref_ring < 0) {
                /* fail to get gref */
                kfree(ring_info);
@@ -340,7 +337,7 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
        /* setting up interrupt */
        ret = bind_evtchn_to_irqhandler(alloc_unbound.port,
                                        front_ring_isr, 0,
-                                       NULL, (void*) ring_info);
+                                       NULL, (void *) ring_info);
 
        if (ret < 0) {
                dev_err(hy_drv_priv->dev,
@@ -368,25 +365,24 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
 
        ret = xen_comm_add_tx_ring(ring_info);
 
-       ret = xen_comm_expose_ring_details(hyper_dmabuf_xen_get_domid(), domid,
-                                          ring_info->gref_ring, 
ring_info->port);
+       ret = xen_comm_expose_ring_details(hyper_dmabuf_xen_get_domid(),
+                                          domid,
+                                          ring_info->gref_ring,
+                                          ring_info->port);
 
-       /*
-        * Register watch for remote domain exporter ring.
+       /* Register watch for remote domain exporter ring.
         * When remote domain will setup its exporter ring,
         * we will automatically connect our importer ring to it.
         */
        ring_info->watch.callback = remote_dom_exporter_watch_cb;
-       ring_info->watch.node = (const char*) kmalloc(sizeof(char) * 255, 
GFP_KERNEL);
+       ring_info->watch.node = kmalloc(255, GFP_KERNEL);
 
        if (!ring_info->watch.node) {
-               dev_err(hy_drv_priv->dev,
-                       "No more space left\n");
                kfree(ring_info);
                return -ENOMEM;
        }
 
-       sprintf((char*)ring_info->watch.node,
+       sprintf((char *)ring_info->watch.node,
                "/local/domain/%d/data/hyper_dmabuf/%d/port",
                domid, hyper_dmabuf_xen_get_domid());
 
@@ -404,9 +400,8 @@ void hyper_dmabuf_xen_cleanup_tx_rbuf(int domid)
        /* check if we at all have exporter ring for given rdomain */
        ring_info = xen_comm_find_tx_ring(domid);
 
-       if (!ring_info) {
+       if (!ring_info)
                return;
-       }
 
        xen_comm_remove_tx_ring(domid);
 
@@ -416,7 +411,7 @@ void hyper_dmabuf_xen_cleanup_tx_rbuf(int domid)
        /* No need to close communication channel, will be done by
         * this function
         */
-       unbind_from_irqhandler(ring_info->irq, (void*) ring_info);
+       unbind_from_irqhandler(ring_info->irq, (void *) ring_info);
 
        /* No need to free sring page, will be freed by this function
         * when other side will end its access
@@ -430,7 +425,8 @@ void hyper_dmabuf_xen_cleanup_tx_rbuf(int domid)
        if (!rx_ring_info)
                return;
 
-       BACK_RING_INIT(&(rx_ring_info->ring_back), 
rx_ring_info->ring_back.sring,
+       BACK_RING_INIT(&(rx_ring_info->ring_back),
+                      rx_ring_info->ring_back.sring,
                       PAGE_SIZE);
 }
 
@@ -473,11 +469,8 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
 
        ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL);
 
-       if (!ring_info) {
-               dev_err(hy_drv_priv->dev,
-                       "No memory left to be allocated\n");
+       if (!ring_info)
                return -ENOMEM;
-       }
 
        ring_info->sdomain = domid;
        ring_info->evtchn = rx_port;
@@ -485,8 +478,6 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
        map_ops = kmalloc(sizeof(*map_ops), GFP_KERNEL);
 
        if (!map_ops) {
-               dev_err(hy_drv_priv->dev,
-                       "No memory left to be allocated\n");
                ret = -ENOMEM;
                goto fail_no_map_ops;
        }
@@ -497,11 +488,13 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
        }
 
        gnttab_set_map_op(&map_ops[0],
-                         (unsigned long)pfn_to_kaddr(page_to_pfn(shared_ring)),
+                         (unsigned long)pfn_to_kaddr(
+                                       page_to_pfn(shared_ring)),
                          GNTMAP_host_map, rx_gref, domid);
 
        gnttab_set_unmap_op(&ring_info->unmap_op,
-                           (unsigned 
long)pfn_to_kaddr(page_to_pfn(shared_ring)),
+                           (unsigned long)pfn_to_kaddr(
+                                       page_to_pfn(shared_ring)),
                            GNTMAP_host_map, -1);
 
        ret = gnttab_map_refs(map_ops, NULL, &shared_ring, 1);
@@ -542,13 +535,12 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
        ret = xen_comm_add_rx_ring(ring_info);
 
        /* Setup communcation channel in opposite direction */
-       if (!xen_comm_find_tx_ring(domid)) {
+       if (!xen_comm_find_tx_ring(domid))
                ret = hyper_dmabuf_xen_init_tx_rbuf(domid);
-       }
 
        ret = request_irq(ring_info->irq,
                          back_ring_isr, 0,
-                         NULL, (void*)ring_info);
+                         NULL, (void *)ring_info);
 
        return ret;
 
@@ -577,7 +569,7 @@ void hyper_dmabuf_xen_cleanup_rx_rbuf(int domid)
        xen_comm_remove_rx_ring(domid);
 
        /* no need to close event channel, will be done by that function */
-       unbind_from_irqhandler(ring_info->irq, (void*)ring_info);
+       unbind_from_irqhandler(ring_info->irq, (void *)ring_info);
 
        /* unmapping shared ring page */
        shared_ring = virt_to_page(ring_info->ring_back.sring);
@@ -636,7 +628,8 @@ static void xen_rx_ch_add_delayed(struct work_struct 
*unused)
 
                                if (!ret)
                                        dev_info(hy_drv_priv->dev,
-                                                "Finishing up setting up rx 
channel for domain %d\n", i);
+                                                "Done rx ch init for VM %d\n",
+                                                i);
                        }
                }
 
@@ -654,7 +647,8 @@ void xen_init_comm_env_delayed(struct work_struct *unused)
 
        /* scheduling another work if driver is still running
         * and xenstore hasn't been initialized or dom_id hasn't
-        * been correctly retrieved. */
+        * been correctly retrieved.
+        */
        if (likely(xenstored_ready == 0 ||
            hy_drv_priv->domid == -1)) {
                dev_dbg(hy_drv_priv->dev,
@@ -778,9 +772,8 @@ int hyper_dmabuf_xen_send_req(int domid, struct 
hyper_dmabuf_req *req,
        ring->req_prod_pvt++;
 
        RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
-       if (notify) {
+       if (notify)
                notify_remote_via_irq(ring_info->irq);
-       }
 
        if (wait) {
                while (timeout--) {
@@ -792,24 +785,29 @@ int hyper_dmabuf_xen_send_req(int domid, struct 
hyper_dmabuf_req *req,
 
                if (timeout < 0) {
                        mutex_unlock(&ring_info->lock);
-                       dev_err(hy_drv_priv->dev, "request timed-out\n");
+                       dev_err(hy_drv_priv->dev,
+                               "request timed-out\n");
                        return -EBUSY;
                }
 
                mutex_unlock(&ring_info->lock);
                do_gettimeofday(&tv_end);
 
-               /* checking time duration for round-trip of a request for 
debugging */
+               /* checking time duration for round-trip of a request
+                * for debugging
+                */
                if (tv_end.tv_usec >= tv_start.tv_usec) {
                        tv_diff.tv_sec = tv_end.tv_sec-tv_start.tv_sec;
                        tv_diff.tv_usec = tv_end.tv_usec-tv_start.tv_usec;
                } else {
                        tv_diff.tv_sec = tv_end.tv_sec-tv_start.tv_sec-1;
-                       tv_diff.tv_usec = 
tv_end.tv_usec+1000000-tv_start.tv_usec;
+                       tv_diff.tv_usec = tv_end.tv_usec+1000000-
+                                         tv_start.tv_usec;
                }
 
                if (tv_diff.tv_sec != 0 && tv_diff.tv_usec > 16000)
-                       dev_dbg(hy_drv_priv->dev, "send_req:time diff: %ld sec, 
%ld usec\n",
+                       dev_dbg(hy_drv_priv->dev,
+                               "send_req:time diff: %ld sec, %ld usec\n",
                                tv_diff.tv_sec, tv_diff.tv_usec);
        }
 
@@ -850,23 +848,24 @@ static irqreturn_t back_ring_isr(int irq, void *info)
                        ret = hyper_dmabuf_msg_parse(ring_info->sdomain, &req);
 
                        if (ret > 0) {
-                               /* preparing a response for the request and 
send it to
-                                * the requester
+                               /* preparing a response for the request and
+                                * send it to the requester
                                 */
                                memcpy(&resp, &req, sizeof(resp));
-                               memcpy(RING_GET_RESPONSE(ring, 
ring->rsp_prod_pvt),
+                               memcpy(RING_GET_RESPONSE(ring,
+                                                        ring->rsp_prod_pvt),
                                                         &resp, sizeof(resp));
                                ring->rsp_prod_pvt++;
 
                                dev_dbg(hy_drv_priv->dev,
-                                       "sending response to exporter for 
request id:%d\n",
+                                       "responding to exporter for req:%d\n",
                                        resp.resp_id);
 
-                               RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring, 
notify);
+                               RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring,
+                                                                    notify);
 
-                               if (notify) {
+                               if (notify)
                                        notify_remote_via_irq(ring_info->irq);
-                               }
                        }
 
                        RING_FINAL_CHECK_FOR_REQUESTS(ring, more_to_do);
@@ -905,41 +904,40 @@ static irqreturn_t front_ring_isr(int irq, void *info)
                        dev_dbg(hy_drv_priv->dev,
                                "getting response from importer\n");
 
-                       if (req_pending.req_id == resp->resp_id) {
+                       if (req_pending.req_id == resp->resp_id)
                                req_pending.stat = resp->stat;
-                       }
 
                        if (resp->stat == HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP) {
                                /* parsing response */
                                ret = hyper_dmabuf_msg_parse(ring_info->rdomain,
-                                                       (struct 
hyper_dmabuf_req *)resp);
+                                       (struct hyper_dmabuf_req *)resp);
 
                                if (ret < 0) {
                                        dev_err(hy_drv_priv->dev,
-                                               "getting error while parsing 
response\n");
+                                               "err while parsing resp\n");
                                }
                        } else if (resp->stat == HYPER_DMABUF_REQ_PROCESSED) {
-                               /* for debugging dma_buf remote synchronization 
*/
+                               /* for debugging dma_buf remote synch */
                                dev_dbg(hy_drv_priv->dev,
                                        "original request = 0x%x\n", resp->cmd);
                                dev_dbg(hy_drv_priv->dev,
-                                       "Just got 
HYPER_DMABUF_REQ_PROCESSED\n");
+                                       "got HYPER_DMABUF_REQ_PROCESSED\n");
                        } else if (resp->stat == HYPER_DMABUF_REQ_ERROR) {
-                               /* for debugging dma_buf remote synchronization 
*/
+                               /* for debugging dma_buf remote synch */
                                dev_dbg(hy_drv_priv->dev,
                                        "original request = 0x%x\n", resp->cmd);
                                dev_dbg(hy_drv_priv->dev,
-                                       "Just got HYPER_DMABUF_REQ_ERROR\n");
+                                       "got HYPER_DMABUF_REQ_ERROR\n");
                        }
                }
 
                ring->rsp_cons = i;
 
-               if (i != ring->req_prod_pvt) {
+               if (i != ring->req_prod_pvt)
                        RING_FINAL_CHECK_FOR_RESPONSES(ring, more_to_do);
-               } else {
+               else
                        ring->sring->rsp_event = i+1;
-               }
+
        } while (more_to_do);
 
        return IRQ_HANDLED;
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h
index 80741c1..8e2d1d0 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h
@@ -29,23 +29,25 @@
 #include "xen/xenbus.h"
 #include "../hyper_dmabuf_msg.h"
 
+extern int xenstored_ready;
+
 DEFINE_RING_TYPES(xen_comm, struct hyper_dmabuf_req, struct hyper_dmabuf_resp);
 
 struct xen_comm_tx_ring_info {
-        struct xen_comm_front_ring ring_front;
+       struct xen_comm_front_ring ring_front;
        int rdomain;
-        int gref_ring;
-        int irq;
-        int port;
+       int gref_ring;
+       int irq;
+       int port;
        struct mutex lock;
        struct xenbus_watch watch;
 };
 
 struct xen_comm_rx_ring_info {
-        int sdomain;
-        int irq;
-        int evtchn;
-        struct xen_comm_back_ring ring_back;
+       int sdomain;
+       int irq;
+       int evtchn;
+       struct xen_comm_back_ring ring_back;
        struct gnttab_unmap_grant_ref unmap_op;
 };
 
@@ -70,6 +72,7 @@ void hyper_dmabuf_xen_cleanup_rx_rbuf(int domid);
 void hyper_dmabuf_xen_destroy_comm(void);
 
 /* send request to the remote domain */
-int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int 
wait);
+int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req,
+                             int wait);
 
 #endif // __HYPER_DMABUF_XEN_COMM_H__
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
index 7a8ec73..343aab3 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
@@ -31,7 +31,6 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/cdev.h>
-#include <asm/uaccess.h>
 #include <linux/hashtable.h>
 #include <xen/grant_table.h>
 #include "../hyper_dmabuf_drv.h"
@@ -41,7 +40,7 @@
 DECLARE_HASHTABLE(xen_comm_tx_ring_hash, MAX_ENTRY_TX_RING);
 DECLARE_HASHTABLE(xen_comm_rx_ring_hash, MAX_ENTRY_RX_RING);
 
-void xen_comm_ring_table_init()
+void xen_comm_ring_table_init(void)
 {
        hash_init(xen_comm_rx_ring_hash);
        hash_init(xen_comm_tx_ring_hash);
@@ -53,11 +52,8 @@ int xen_comm_add_tx_ring(struct xen_comm_tx_ring_info 
*ring_info)
 
        info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
 
-       if (!info_entry) {
-               dev_err(hy_drv_priv->dev,
-                       "No memory left to be allocated\n");
+       if (!info_entry)
                return -ENOMEM;
-       }
 
        info_entry->info = ring_info;
 
@@ -73,11 +69,8 @@ int xen_comm_add_rx_ring(struct xen_comm_rx_ring_info 
*ring_info)
 
        info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
 
-       if (!info_entry) {
-               dev_err(hy_drv_priv->dev,
-                       "No memory left to be allocated\n");
+       if (!info_entry)
                return -ENOMEM;
-       }
 
        info_entry->info = ring_info;
 
@@ -93,7 +86,7 @@ struct xen_comm_tx_ring_info *xen_comm_find_tx_ring(int domid)
        int bkt;
 
        hash_for_each(xen_comm_tx_ring_hash, bkt, info_entry, node)
-               if(info_entry->info->rdomain == domid)
+               if (info_entry->info->rdomain == domid)
                        return info_entry->info;
 
        return NULL;
@@ -105,7 +98,7 @@ struct xen_comm_rx_ring_info *xen_comm_find_rx_ring(int 
domid)
        int bkt;
 
        hash_for_each(xen_comm_rx_ring_hash, bkt, info_entry, node)
-               if(info_entry->info->sdomain == domid)
+               if (info_entry->info->sdomain == domid)
                        return info_entry->info;
 
        return NULL;
@@ -117,7 +110,7 @@ int xen_comm_remove_tx_ring(int domid)
        int bkt;
 
        hash_for_each(xen_comm_tx_ring_hash, bkt, info_entry, node)
-               if(info_entry->info->rdomain == domid) {
+               if (info_entry->info->rdomain == domid) {
                        hash_del(&info_entry->node);
                        kfree(info_entry);
                        return 0;
@@ -132,7 +125,7 @@ int xen_comm_remove_rx_ring(int domid)
        int bkt;
 
        hash_for_each(xen_comm_rx_ring_hash, bkt, info_entry, node)
-               if(info_entry->info->sdomain == domid) {
+               if (info_entry->info->sdomain == domid) {
                        hash_del(&info_entry->node);
                        kfree(info_entry);
                        return 0;
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h
index cde8ade..8502fe7 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h
@@ -31,13 +31,13 @@
 #define MAX_ENTRY_RX_RING 7
 
 struct xen_comm_tx_ring_info_entry {
-        struct xen_comm_tx_ring_info *info;
-        struct hlist_node node;
+       struct xen_comm_tx_ring_info *info;
+       struct hlist_node node;
 };
 
 struct xen_comm_rx_ring_info_entry {
-        struct xen_comm_rx_ring_info *info;
-        struct hlist_node node;
+       struct xen_comm_rx_ring_info *info;
+       struct hlist_node node;
 };
 
 void xen_comm_ring_table_init(void);
@@ -54,10 +54,14 @@ struct xen_comm_tx_ring_info *xen_comm_find_tx_ring(int 
domid);
 
 struct xen_comm_rx_ring_info *xen_comm_find_rx_ring(int domid);
 
-/* iterates over all exporter rings and calls provided function for each of 
them */
+/* iterates over all exporter rings and calls provided
+ * function for each of them
+ */
 void xen_comm_foreach_tx_ring(void (*func)(int domid));
 
-/* iterates over all importer rings and calls provided function for each of 
them */
+/* iterates over all importer rings and calls provided
+ * function for each of them
+ */
 void xen_comm_foreach_rx_ring(void (*func)(int domid));
 
 #endif // __HYPER_DMABUF_XEN_COMM_LIST_H__
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h
index c5fec24..e5bff09 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h
@@ -34,11 +34,20 @@ extern struct hyper_dmabuf_backend_ops xen_backend_ops;
  * when unsharing.
  */
 struct xen_shared_pages_info {
-        grant_ref_t lvl3_gref; /* top level refid */
-        grant_ref_t *lvl3_table; /* page of top level addressing, it contains 
refids of 2nd level pages */
-        grant_ref_t *lvl2_table; /* table of 2nd level pages, that contains 
refids to data pages */
-        struct gnttab_unmap_grant_ref* unmap_ops; /* unmap ops for mapped 
pages */
-        struct page **data_pages; /* data pages to be unmapped */
+       /* top level refid */
+       grant_ref_t lvl3_gref;
+
+       /* page of top level addressing, it contains refids of 2nd lvl pages */
+       grant_ref_t *lvl3_table;
+
+       /* table of 2nd level pages, that contains refids to data pages */
+       grant_ref_t *lvl2_table;
+
+       /* unmap ops for mapped pages */
+       struct gnttab_unmap_grant_ref *unmap_ops;
+
+       /* data pages to be unmapped */
+       struct page **data_pages;
 };
 
 #endif // __HYPER_DMABUF_XEN_COMM_H__
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
index 424417d..a86313a 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
@@ -40,19 +40,21 @@
  * Creates 2 level page directory structure for referencing shared pages.
  * Top level page is a single page that contains up to 1024 refids that
  * point to 2nd level pages.
+ *
  * Each 2nd level page contains up to 1024 refids that point to shared
  * data pages.
+ *
  * There will always be one top level page and number of 2nd level pages
  * depends on number of shared data pages.
  *
  *      3rd level page                2nd level pages            Data pages
- * +-------------------------+   ┌>+--------------------+ ┌--->+------------+
- * |2nd level page 0 refid   |---┘ |Data page 0 refid   |-┘    |Data page 0 |
- * |2nd level page 1 refid   |---┐ |Data page 1 refid   |-┐    +------------+
+ * +-------------------------+   ┌>+--------------------+ ┌>+------------+
+ * |2nd level page 0 refid   |---┘ |Data page 0 refid   |-┘ |Data page 0 |
+ * |2nd level page 1 refid   |---┐ |Data page 1 refid   |-┐ +------------+
  * |           ...           |   | |     ....           | |
- * |2nd level page 1023 refid|-┐ | |Data page 1023 refid| └--->+------------+
- * +-------------------------+ | | +--------------------+      |Data page 1 |
- *                             | |                             +------------+
+ * |2nd level page 1023 refid|-┐ | |Data page 1023 refid| └>+------------+
+ * +-------------------------+ | | +--------------------+   |Data page 1 |
+ *                             | |                          +------------+
  *                             | └>+--------------------+
  *                             |   |Data page 1024 refid|
  *                             |   |Data page 1025 refid|
@@ -65,9 +67,8 @@
  *                                 |Data page 1047552 refid|
  *                                 |Data page 1047553 refid|
  *                                 |       ...             |
- *                                 |Data page 1048575 
refid|-->+------------------+
- *                                 +-----------------------+   |Data page 
1048575 |
- *                                                             
+------------------+
+ *                                 |Data page 1048575 refid|
+ *                                 +-----------------------+
  *
  * Using such 2 level structure it is possible to reference up to 4GB of
  * shared data using single refid pointing to top level page.
@@ -85,7 +86,7 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int 
domid, int nents,
         * Calculate number of pages needed for 2nd level addresing:
         */
        int n_lvl2_grefs = (nents/REFS_PER_PAGE +
-                          ((nents % REFS_PER_PAGE) ? 1: 0));
+                          ((nents % REFS_PER_PAGE) ? 1 : 0));
 
        struct xen_shared_pages_info *sh_pages_info;
        int i;
@@ -95,23 +96,22 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int 
domid, int nents,
 
        sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL);
 
-       if (!sh_pages_info) {
-               dev_err(hy_drv_priv->dev, "No more space left\n");
+       if (!sh_pages_info)
                return -ENOMEM;
-       }
 
        *refs_info = (void *)sh_pages_info;
 
        /* share data pages in readonly mode for security */
-       for (i=0; i<nents; i++) {
+       for (i = 0; i < nents; i++) {
                lvl2_table[i] = gnttab_grant_foreign_access(domid,
                                        pfn_to_mfn(page_to_pfn(pages[i])),
-                                       true /* read-only from remote domain 
*/);
+                                       true /* read only */);
                if (lvl2_table[i] == -ENOSPC) {
-                       dev_err(hy_drv_priv->dev, "No more space left in grant 
table\n");
+                       dev_err(hy_drv_priv->dev,
+                               "No more space left in grant table\n");
 
                        /* Unshare all already shared pages for lvl2 */
-                       while(i--) {
+                       while (i--) {
                                gnttab_end_foreign_access_ref(lvl2_table[i], 0);
                                gnttab_free_grant_reference(lvl2_table[i]);
                        }
@@ -120,23 +120,26 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int 
domid, int nents,
        }
 
        /* Share 2nd level addressing pages in readonly mode*/
-       for (i=0; i< n_lvl2_grefs; i++) {
+       for (i = 0; i < n_lvl2_grefs; i++) {
                lvl3_table[i] = gnttab_grant_foreign_access(domid,
-                                       virt_to_mfn((unsigned 
long)lvl2_table+i*PAGE_SIZE ),
+                                       virt_to_mfn(
+                                       (unsigned long)lvl2_table+i*PAGE_SIZE),
                                        true);
 
                if (lvl3_table[i] == -ENOSPC) {
-                       dev_err(hy_drv_priv->dev, "No more space left in grant 
table\n");
+                       dev_err(hy_drv_priv->dev,
+                               "No more space left in grant table\n");
 
                        /* Unshare all already shared pages for lvl3 */
-                       while(i--) {
+                       while (i--) {
                                gnttab_end_foreign_access_ref(lvl3_table[i], 1);
                                gnttab_free_grant_reference(lvl3_table[i]);
                        }
 
                        /* Unshare all pages for lvl2 */
-                       while(nents--) {
-                               
gnttab_end_foreign_access_ref(lvl2_table[nents], 0);
+                       while (nents--) {
+                               gnttab_end_foreign_access_ref(
+                                                       lvl2_table[nents], 0);
                                gnttab_free_grant_reference(lvl2_table[nents]);
                        }
 
@@ -150,16 +153,17 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int 
domid, int nents,
                        true);
 
        if (lvl3_gref == -ENOSPC) {
-               dev_err(hy_drv_priv->dev, "No more space left in grant 
table\n");
+               dev_err(hy_drv_priv->dev,
+                       "No more space left in grant table\n");
 
                /* Unshare all pages for lvl3 */
-               while(i--) {
+               while (i--) {
                        gnttab_end_foreign_access_ref(lvl3_table[i], 1);
                        gnttab_free_grant_reference(lvl3_table[i]);
                }
 
                /* Unshare all pages for lvl2 */
-               while(nents--) {
+               while (nents--) {
                        gnttab_end_foreign_access_ref(lvl2_table[nents], 0);
                        gnttab_free_grant_reference(lvl2_table[nents]);
                }
@@ -187,10 +191,11 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int 
domid, int nents,
        return -ENOSPC;
 }
 
-int hyper_dmabuf_xen_unshare_pages(void **refs_info, int nents) {
+int hyper_dmabuf_xen_unshare_pages(void **refs_info, int nents)
+{
        struct xen_shared_pages_info *sh_pages_info;
        int n_lvl2_grefs = (nents/REFS_PER_PAGE +
-                           ((nents % REFS_PER_PAGE) ? 1: 0));
+                           ((nents % REFS_PER_PAGE) ? 1 : 0));
        int i;
 
        dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__);
@@ -206,28 +211,28 @@ int hyper_dmabuf_xen_unshare_pages(void **refs_info, int 
nents) {
 
        /* End foreign access for data pages, but do not free them */
        for (i = 0; i < nents; i++) {
-               if (gnttab_query_foreign_access(sh_pages_info->lvl2_table[i])) {
+               if (gnttab_query_foreign_access(sh_pages_info->lvl2_table[i]))
                        dev_warn(hy_drv_priv->dev, "refid not shared !!\n");
-               }
+
                gnttab_end_foreign_access_ref(sh_pages_info->lvl2_table[i], 0);
                gnttab_free_grant_reference(sh_pages_info->lvl2_table[i]);
        }
 
        /* End foreign access for 2nd level addressing pages */
        for (i = 0; i < n_lvl2_grefs; i++) {
-               if (gnttab_query_foreign_access(sh_pages_info->lvl3_table[i])) {
+               if (gnttab_query_foreign_access(sh_pages_info->lvl3_table[i]))
                        dev_warn(hy_drv_priv->dev, "refid not shared !!\n");
-               }
-               if 
(!gnttab_end_foreign_access_ref(sh_pages_info->lvl3_table[i], 1)) {
+
+               if (!gnttab_end_foreign_access_ref(
+                                       sh_pages_info->lvl3_table[i], 1))
                        dev_warn(hy_drv_priv->dev, "refid still in use!!!\n");
-               }
+
                gnttab_free_grant_reference(sh_pages_info->lvl3_table[i]);
        }
 
        /* End foreign access for top level addressing page */
-       if (gnttab_query_foreign_access(sh_pages_info->lvl3_gref)) {
+       if (gnttab_query_foreign_access(sh_pages_info->lvl3_gref))
                dev_warn(hy_drv_priv->dev, "gref not shared !!\n");
-       }
 
        gnttab_end_foreign_access_ref(sh_pages_info->lvl3_gref, 1);
        gnttab_free_grant_reference(sh_pages_info->lvl3_gref);
@@ -246,10 +251,11 @@ int hyper_dmabuf_xen_unshare_pages(void **refs_info, int 
nents) {
        return 0;
 }
 
-/*
- * Maps provided top level ref id and then return array of pages containing 
data refs.
+/* Maps provided top level ref id and then return array of pages
+ * containing data refs.
  */
-struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int 
nents, void **refs_info)
+struct page **hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid,
+                                               int nents, void **refs_info)
 {
        struct page *lvl3_table_page;
        struct page **lvl2_table_pages;
@@ -280,19 +286,19 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
        sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL);
        *refs_info = (void *) sh_pages_info;
 
-       lvl2_table_pages = kcalloc(sizeof(struct page*), n_lvl2_grefs,
+       lvl2_table_pages = kcalloc(n_lvl2_grefs, sizeof(struct page *),
                                   GFP_KERNEL);
 
-       data_pages = kcalloc(sizeof(struct page*), nents, GFP_KERNEL);
+       data_pages = kcalloc(nents, sizeof(struct page *), GFP_KERNEL);
 
-       lvl2_map_ops = kcalloc(sizeof(*lvl2_map_ops), n_lvl2_grefs,
+       lvl2_map_ops = kcalloc(n_lvl2_grefs, sizeof(*lvl2_map_ops),
                               GFP_KERNEL);
 
-       lvl2_unmap_ops = kcalloc(sizeof(*lvl2_unmap_ops), n_lvl2_grefs,
+       lvl2_unmap_ops = kcalloc(n_lvl2_grefs, sizeof(*lvl2_unmap_ops),
                                 GFP_KERNEL);
 
-       data_map_ops = kcalloc(sizeof(*data_map_ops), nents, GFP_KERNEL);
-       data_unmap_ops = kcalloc(sizeof(*data_unmap_ops), nents, GFP_KERNEL);
+       data_map_ops = kcalloc(nents, sizeof(*data_map_ops), GFP_KERNEL);
+       data_unmap_ops = kcalloc(nents, sizeof(*data_unmap_ops), GFP_KERNEL);
 
        /* Map top level addressing page */
        if (gnttab_alloc_pages(1, &lvl3_table_page)) {
@@ -332,7 +338,8 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
        }
 
        for (i = 0; i < n_lvl2_grefs; i++) {
-               lvl2_table = (grant_ref_t 
*)pfn_to_kaddr(page_to_pfn(lvl2_table_pages[i]));
+               lvl2_table = (grant_ref_t *)pfn_to_kaddr(
+                                       page_to_pfn(lvl2_table_pages[i]));
                gnttab_set_map_op(&lvl2_map_ops[i],
                                  (unsigned long)lvl2_table, GNTMAP_host_map |
                                  GNTMAP_readonly,
@@ -348,11 +355,11 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
                dev_err(hy_drv_priv->dev,
                        "xen: cannot unmap top level page\n");
                return NULL;
-       } else {
-               /* Mark that page was unmapped */
-               lvl3_unmap_ops.handle = -1;
        }
 
+       /* Mark that page was unmapped */
+       lvl3_unmap_ops.handle = -1;
+
        if (gnttab_map_refs(lvl2_map_ops, NULL,
                            lvl2_table_pages, n_lvl2_grefs)) {
                dev_err(hy_drv_priv->dev,
@@ -384,19 +391,22 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
                lvl2_table = pfn_to_kaddr(page_to_pfn(lvl2_table_pages[i]));
                for (j = 0; j < REFS_PER_PAGE; j++) {
                        gnttab_set_map_op(&data_map_ops[k],
-                               (unsigned 
long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
+                               (unsigned long)pfn_to_kaddr(
+                                               page_to_pfn(data_pages[k])),
                                GNTMAP_host_map | GNTMAP_readonly,
                                lvl2_table[j], domid);
 
                        gnttab_set_unmap_op(&data_unmap_ops[k],
-                               (unsigned 
long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
+                               (unsigned long)pfn_to_kaddr(
+                                               page_to_pfn(data_pages[k])),
                                GNTMAP_host_map | GNTMAP_readonly, -1);
                        k++;
                }
        }
 
        /* for grefs in the last lvl2 table page */
-       lvl2_table = pfn_to_kaddr(page_to_pfn(lvl2_table_pages[n_lvl2_grefs - 
1]));
+       lvl2_table = pfn_to_kaddr(page_to_pfn(
+                               lvl2_table_pages[n_lvl2_grefs - 1]));
 
        for (j = 0; j < nents_last; j++) {
                gnttab_set_map_op(&data_map_ops[k],
@@ -424,13 +434,12 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
                dev_err(hy_drv_priv->dev,
                        "Cannot unmap 2nd level refs\n");
                return NULL;
-       } else {
-               /* Mark that pages were unmapped */
-               for (i = 0; i < n_lvl2_grefs; i++) {
-                       lvl2_unmap_ops[i].handle = -1;
-               }
        }
 
+       /* Mark that pages were unmapped */
+       for (i = 0; i < n_lvl2_grefs; i++)
+               lvl2_unmap_ops[i].handle = -1;
+
        for (i = 0; i < nents; i++) {
                if (data_map_ops[i].status) {
                        dev_err(hy_drv_priv->dev,
@@ -483,7 +492,8 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
        return NULL;
 }
 
-int hyper_dmabuf_xen_unmap_shared_pages(void **refs_info, int nents) {
+int hyper_dmabuf_xen_unmap_shared_pages(void **refs_info, int nents)
+{
        struct xen_shared_pages_info *sh_pages_info;
 
        dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__);
@@ -498,7 +508,7 @@ int hyper_dmabuf_xen_unmap_shared_pages(void **refs_info, 
int nents) {
        }
 
        if (gnttab_unmap_refs(sh_pages_info->unmap_ops, NULL,
-                             sh_pages_info->data_pages, nents) ) {
+                             sh_pages_info->data_pages, nents)) {
                dev_err(hy_drv_priv->dev, "Cannot unmap data pages\n");
                return -EFAULT;
        }
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h
index 629ec0f..e7ae731 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h
@@ -25,18 +25,21 @@
 #ifndef __HYPER_DMABUF_XEN_SHM_H__
 #define __HYPER_DMABUF_XEN_SHM_H__
 
-/* This collects all reference numbers for 2nd level shared pages and create a 
table
- * with those in 1st level shared pages then return reference numbers for this 
top level
- * table. */
+/* This collects all reference numbers for 2nd level shared pages and
+ * create a table with those in 1st level shared pages then return reference
+ * numbers for this top level table.
+ */
 int hyper_dmabuf_xen_share_pages(struct page **pages, int domid, int nents,
                                 void **refs_info);
 
 int hyper_dmabuf_xen_unshare_pages(void **refs_info, int nents);
 
-/* Maps provided top level ref id and then return array of pages containing 
data refs.
+/* Maps provided top level ref id and then return array of pages containing
+ * data refs.
  */
-struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int 
nents,
-                                               void **refs_info);
+struct page **hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid,
+                                                int nents,
+                                                void **refs_info);
 
 int hyper_dmabuf_xen_unmap_shared_pages(void **refs_info, int nents);
 
diff --git a/include/uapi/xen/hyper_dmabuf.h b/include/uapi/xen/hyper_dmabuf.h
index e18dd9b..cb25299 100644
--- a/include/uapi/xen/hyper_dmabuf.h
+++ b/include/uapi/xen/hyper_dmabuf.h
@@ -28,8 +28,8 @@
 #define MAX_SIZE_PRIV_DATA 192
 
 typedef struct {
-        int id;
-        int rng_key[3]; /* 12bytes long random number */
+       int id;
+       int rng_key[3]; /* 12bytes long random number */
 } hyper_dmabuf_id_t;
 
 struct hyper_dmabuf_event_hdr {
@@ -115,20 +115,20 @@ struct ioctl_hyper_dmabuf_query {
 /* DMABUF query */
 
 enum hyper_dmabuf_query {
-        HYPER_DMABUF_QUERY_TYPE = 0x10,
-        HYPER_DMABUF_QUERY_EXPORTER,
-        HYPER_DMABUF_QUERY_IMPORTER,
-        HYPER_DMABUF_QUERY_SIZE,
-        HYPER_DMABUF_QUERY_BUSY,
-        HYPER_DMABUF_QUERY_UNEXPORTED,
-        HYPER_DMABUF_QUERY_DELAYED_UNEXPORTED,
-        HYPER_DMABUF_QUERY_PRIV_INFO_SIZE,
-        HYPER_DMABUF_QUERY_PRIV_INFO,
+       HYPER_DMABUF_QUERY_TYPE = 0x10,
+       HYPER_DMABUF_QUERY_EXPORTER,
+       HYPER_DMABUF_QUERY_IMPORTER,
+       HYPER_DMABUF_QUERY_SIZE,
+       HYPER_DMABUF_QUERY_BUSY,
+       HYPER_DMABUF_QUERY_UNEXPORTED,
+       HYPER_DMABUF_QUERY_DELAYED_UNEXPORTED,
+       HYPER_DMABUF_QUERY_PRIV_INFO_SIZE,
+       HYPER_DMABUF_QUERY_PRIV_INFO,
 };
 
 enum hyper_dmabuf_status {
-        EXPORTED= 0x01,
-        IMPORTED,
+       EXPORTED = 0x01,
+       IMPORTED,
 };
 
 #endif //__LINUX_PUBLIC_HYPER_DMABUF_H__
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.