[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH 16/60] hyper_dmabuf: define hypervisor specific backend API



For adoption of hyper_dmabuf driver to various hypervisors
other than Xen, a "backend" layer is defined and separated out
from existing one-body structure.

"Backend" is basically a list of entry points of function calls
that provides method to do Kernel's page-level sharing and inter
VMs communication using hypervisor's native mechanism (hypercall).

All backend APIs are listed up in "struct hyper_dmabuf_backend_ops"
as shown below.

struct hyper_dmabuf_backend_ops {
        /* retreiving id of current virtual machine */
        int (*get_vm_id)(void);

        /* get pages shared via hypervisor-specific method */
        int (*share_pages)(struct page **, int, int, void **);

        /* make shared pages unshared via hypervisor specific method */
        int (*unshare_pages)(void **, int);

        /* map remotely shared pages on importer's side via
         * hypervisor-specific method
         */
        struct page ** (*map_shared_pages)(int, int, int, void **);

        /* unmap and free shared pages on importer's side via
         * hypervisor-specific method
         */
        int (*unmap_shared_pages)(void **, int);

        /* initialize communication environment */
        int (*init_comm_env)(void);

        void (*destroy_comm)(void);

        /* upstream ch setup (receiving and responding) */
        int (*init_rx_ch)(int);

        /* downstream ch setup (transmitting and parsing responses) */
        int (*init_tx_ch)(int);

        int (*send_req)(int, struct hyper_dmabuf_req *, int);
};

Within this new structure, only backend APIs need to be re-designed or
replaced with new ones when porting this sharing model to a different
hypervisor environment, which is a lot simpler than completely redesiging
whole driver for a new hypervisor.

Signed-off-by: Dongwon Kim <dongwon.kim@xxxxxxxxx>
---
 drivers/xen/hyper_dmabuf/Makefile                  |  11 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h       |   1 -
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c        |  33 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h        | 112 ++----
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c         |   6 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c        | 426 ++-------------------
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h        |  14 -
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c      | 134 +++----
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h      |  87 +++++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c        |  52 ++-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h        |  23 +-
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c    |   4 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h     |  26 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   | 303 +++++++++------
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h   |  51 +--
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c  |  67 ++--
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h  |  32 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c    |  22 ++
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h    |  20 +
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c    | 356 +++++++++++++++++
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h    |  19 +
 21 files changed, 949 insertions(+), 850 deletions(-)
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h
 create mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c
 create mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h
 create mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
 create mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h

diff --git a/drivers/xen/hyper_dmabuf/Makefile 
b/drivers/xen/hyper_dmabuf/Makefile
index c9b8b7f..d90cfc3 100644
--- a/drivers/xen/hyper_dmabuf/Makefile
+++ b/drivers/xen/hyper_dmabuf/Makefile
@@ -1,5 +1,7 @@
 TARGET_MODULE:=hyper_dmabuf
 
+PLATFORM:=XEN
+
 # If we running by kernel building system
 ifneq ($(KERNELRELEASE),)
        $(TARGET_MODULE)-objs := hyper_dmabuf_drv.o \
@@ -9,8 +11,13 @@ ifneq ($(KERNELRELEASE),)
                                 hyper_dmabuf_msg.o \
                                 hyper_dmabuf_id.o \
                                 hyper_dmabuf_remote_sync.o \
-                                xen/hyper_dmabuf_xen_comm.o \
-                                xen/hyper_dmabuf_xen_comm_list.o
+
+ifeq ($(CONFIG_XEN), y)
+       $(TARGET_MODULE)-objs += xen/hyper_dmabuf_xen_comm.o \
+                                xen/hyper_dmabuf_xen_comm_list.o \
+                                xen/hyper_dmabuf_xen_shm.o \
+                                xen/hyper_dmabuf_xen_drv.o
+endif
 
 obj-$(CONFIG_HYPER_DMABUF) := $(TARGET_MODULE).o
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
index 3d9b2d6..d012b05 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
@@ -1,2 +1 @@
 #define CURRENT_TARGET XEN
-#define INTER_DOMAIN_DMABUF_SYNCHRONIZATION
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 66d6cb9..ddcc955 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -1,15 +1,18 @@
-#include <linux/init.h>       /* module_init, module_exit */
-#include <linux/module.h> /* version info, MODULE_LICENSE, MODULE_AUTHOR, 
printk() */
+#include <linux/init.h>
+#include <linux/module.h>
 #include <linux/workqueue.h>
-#include <xen/grant_table.h>
-#include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_conf.h"
+#include "hyper_dmabuf_msg.h"
+#include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_list.h"
 #include "hyper_dmabuf_id.h"
-#include "xen/hyper_dmabuf_xen_comm_list.h"
-#include "xen/hyper_dmabuf_xen_comm.h"
 
-MODULE_LICENSE("Dual BSD/GPL");
+#ifdef CONFIG_XEN
+#include "xen/hyper_dmabuf_xen_drv.h"
+extern struct hyper_dmabuf_backend_ops xen_backend_ops;
+#endif
+
+MODULE_LICENSE("GPL");
 MODULE_AUTHOR("IOTG-PED, INTEL");
 
 int register_device(void);
@@ -29,24 +32,24 @@ static int hyper_dmabuf_drv_init(void)
                return -EINVAL;
        }
 
+#ifdef CONFIG_XEN
+       hyper_dmabuf_private.backend_ops = &xen_backend_ops;
+#endif
+
        printk( KERN_NOTICE "initializing database for imported/exported 
dmabufs\n");
 
        /* device structure initialization */
        /* currently only does work-queue initialization */
        hyper_dmabuf_private.work_queue = 
create_workqueue("hyper_dmabuf_wqueue");
-       hyper_dmabuf_private.domid = hyper_dmabuf_get_domid();
+       hyper_dmabuf_private.domid = 
hyper_dmabuf_private.backend_ops->get_vm_id();
 
        ret = hyper_dmabuf_table_init();
        if (ret < 0) {
                return -EINVAL;
        }
 
-       ret = hyper_dmabuf_ring_table_init();
-       if (ret < 0) {
-               return -EINVAL;
-       }
+       ret = hyper_dmabuf_private.backend_ops->init_comm_env();
 
-       ret = hyper_dmabuf_setup_data_dir();
        if (ret < 0) {
                return -EINVAL;
        }
@@ -61,8 +64,7 @@ static void hyper_dmabuf_drv_exit(void)
        /* hash tables for export/import entries and ring_infos */
        hyper_dmabuf_table_destroy();
 
-       hyper_dmabuf_cleanup_ringbufs();
-       hyper_dmabuf_ring_table_destroy();
+       hyper_dmabuf_private.backend_ops->destroy_comm();
 
        /* destroy workqueue */
        if (hyper_dmabuf_private.work_queue)
@@ -72,7 +74,6 @@ static void hyper_dmabuf_drv_exit(void)
        if (hyper_dmabuf_private.id_queue)
                destroy_reusable_list();
 
-       hyper_dmabuf_destroy_data_dir();
        printk( KERN_NOTICE "dma_buf-src_sink model: Exiting" );
        unregister_device();
 }
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
index 37b0cc1..03d77d7 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
@@ -6,94 +6,48 @@ struct list_reusable_id {
        struct list_head list;
 };
 
-struct hyper_dmabuf_private {
-        struct device *device;
-       int domid;
-       struct workqueue_struct *work_queue;
-       struct list_reusable_id *id_queue;
-};
+struct hyper_dmabuf_backend_ops {
+       /* retreiving id of current virtual machine */
+       int (*get_vm_id)(void);
 
-typedef int (*hyper_dmabuf_ioctl_t)(void *data);
+       /* get pages shared via hypervisor-specific method */
+       int (*share_pages)(struct page **, int, int, void **);
 
-struct hyper_dmabuf_ioctl_desc {
-       unsigned int cmd;
-       int flags;
-       hyper_dmabuf_ioctl_t func;
-       const char *name;
-};
+       /* make shared pages unshared via hypervisor specific method */
+       int (*unshare_pages)(void **, int);
 
-#define HYPER_DMABUF_IOCTL_DEF(ioctl, _func, _flags)   \
-       [_IOC_NR(ioctl)] = {                            \
-                       .cmd = ioctl,                   \
-                       .func = _func,                  \
-                       .flags = _flags,                \
-                       .name = #ioctl                  \
-       }
+       /* map remotely shared pages on importer's side via
+        * hypervisor-specific method
+        */
+       struct page ** (*map_shared_pages)(int, int, int, void **);
 
-#define IOCTL_HYPER_DMABUF_EXPORTER_RING_SETUP \
-_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_hyper_dmabuf_exporter_ring_setup))
-struct ioctl_hyper_dmabuf_exporter_ring_setup {
-       /* IN parameters */
-       /* Remote domain id */
-       uint32_t remote_domain;
-};
+       /* unmap and free shared pages on importer's side via
+        * hypervisor-specific method
+        */
+       int (*unmap_shared_pages)(void **, int);
 
-#define IOCTL_HYPER_DMABUF_IMPORTER_RING_SETUP \
-_IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_hyper_dmabuf_importer_ring_setup))
-struct ioctl_hyper_dmabuf_importer_ring_setup {
-       /* IN parameters */
-       /* Source domain id */
-       uint32_t source_domain;
-};
+       /* initialize communication environment */
+       int (*init_comm_env)(void);
 
-#define IOCTL_HYPER_DMABUF_EXPORT_REMOTE \
-_IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_hyper_dmabuf_export_remote))
-struct ioctl_hyper_dmabuf_export_remote {
-       /* IN parameters */
-       /* DMA buf fd to be exported */
-       uint32_t dmabuf_fd;
-       /* Domain id to which buffer should be exported */
-       uint32_t remote_domain;
-       /* exported dma buf id */
-       uint32_t hyper_dmabuf_id;
-       uint32_t private[4];
-};
+       void (*destroy_comm)(void);
 
-#define IOCTL_HYPER_DMABUF_EXPORT_FD \
-_IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_hyper_dmabuf_export_fd))
-struct ioctl_hyper_dmabuf_export_fd {
-       /* IN parameters */
-       /* hyper dmabuf id to be imported */
-       uint32_t hyper_dmabuf_id;
-       /* flags */
-       uint32_t flags;
-       /* OUT parameters */
-       /* exported dma buf fd */
-       uint32_t fd;
-};
+       /* upstream ch setup (receiving and responding) */
+       int (*init_rx_ch)(int);
+
+       /* downstream ch setup (transmitting and parsing responses) */
+       int (*init_tx_ch)(int);
 
-#define IOCTL_HYPER_DMABUF_UNEXPORT \
-_IOC(_IOC_NONE, 'G', 4, sizeof(struct ioctl_hyper_dmabuf_unexport))
-struct ioctl_hyper_dmabuf_unexport {
-       /* IN parameters */
-       /* hyper dmabuf id to be unexported */
-       uint32_t hyper_dmabuf_id;
-       /* OUT parameters */
-       /* Status of request */
-       uint32_t status;
+       int (*send_req)(int, struct hyper_dmabuf_req *, int);
 };
 
-#define IOCTL_HYPER_DMABUF_QUERY \
-_IOC(_IOC_NONE, 'G', 5, sizeof(struct ioctl_hyper_dmabuf_query))
-struct ioctl_hyper_dmabuf_query {
-       /* in parameters */
-       /* hyper dmabuf id to be queried */
-       uint32_t hyper_dmabuf_id;
-       /* item to be queried */
-       uint32_t item;
-       /* OUT parameters */
-       /* Value of queried item */
-       uint32_t info;
+struct hyper_dmabuf_private {
+        struct device *device;
+       int domid;
+       struct workqueue_struct *work_queue;
+       struct list_reusable_id *id_queue;
+
+       /* backend ops - hypervisor specific */
+       struct hyper_dmabuf_backend_ops *backend_ops;
 };
 
-#endif //__LINUX_PUBLIC_HYPER_DMABUF_DRV_H__
+#endif /* __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
index 7bbb179..b58a111 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
@@ -1,5 +1,6 @@
 #include <linux/list.h>
 #include <linux/slab.h>
+#include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_id.h"
 
@@ -19,6 +20,7 @@ void store_reusable_id(int id)
 static int retrieve_reusable_id(void)
 {
        struct list_reusable_id *reusable_head = hyper_dmabuf_private.id_queue;
+       int id;
 
        /* check there is reusable id */
        if (!list_empty(&reusable_head->list)) {
@@ -27,7 +29,9 @@ static int retrieve_reusable_id(void)
                                                 list);
 
                list_del(&reusable_head->list);
-               return reusable_head->id;
+               id = reusable_head->id;
+               kfree(reusable_head);
+               return id;
        }
 
        return -1;
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index b109138..0f104b9 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -8,10 +8,12 @@
 #include "hyper_dmabuf_struct.h"
 #include "hyper_dmabuf_imp.h"
 #include "hyper_dmabuf_id.h"
-#include "xen/hyper_dmabuf_xen_comm.h"
 #include "hyper_dmabuf_msg.h"
+#include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_list.h"
 
+extern struct hyper_dmabuf_private hyper_dmabuf_private;
+
 #define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
 
 int dmabuf_refcount(struct dma_buf *dma_buf)
@@ -138,397 +140,10 @@ struct sg_table* hyper_dmabuf_create_sgt(struct page 
**pages,
 /* free sg_table */
 void hyper_dmabuf_free_sgt(struct sg_table* sgt)
 {
-       sg_free_table(sgt);
-       kfree(sgt);
-}
-
-/*
- * Creates 2 level page directory structure for referencing shared pages.
- * Top level page is a single page that contains up to 1024 refids that
- * point to 2nd level pages.
- * Each 2nd level page contains up to 1024 refids that point to shared
- * data pages.
- * There will always be one top level page and number of 2nd level pages
- * depends on number of shared data pages.
- *
- *      Top level page                2nd level pages            Data pages
- * +-------------------------+   ┌>+--------------------+ ┌--->+------------+
- * |2nd level page 0 refid   |---┘ |Data page 0 refid   |-┘    |Data page 0 |
- * |2nd level page 1 refid   |---┐ |Data page 1 refid   |-┐    +------------+
- * |           ...           |   | |     ....           | |
- * |2nd level page 1023 refid|-┐ | |Data page 1023 refid| └--->+------------+
- * +-------------------------+ | | +--------------------+      |Data page 1 |
- *                             | |                             +------------+
- *                             | └>+--------------------+
- *                             |   |Data page 1024 refid|
- *                             |   |Data page 1025 refid|
- *                             |   |       ...          |
- *                             |   |Data page 2047 refid|
- *                             |   +--------------------+
- *                             |
- *                             |        .....
- *                             └-->+-----------------------+
- *                                 |Data page 1047552 refid|
- *                                 |Data page 1047553 refid|
- *                                 |       ...             |
- *                                 |Data page 1048575 
refid|-->+------------------+
- *                                 +-----------------------+   |Data page 
1048575 |
- *                                                             
+------------------+
- *
- * Using such 2 level structure it is possible to reference up to 4GB of
- * shared data using single refid pointing to top level page.
- *
- * Returns refid of top level page.
- */
-grant_ref_t hyper_dmabuf_create_addressing_tables(grant_ref_t *data_refs, int 
nents, int rdomain,
-                                                 struct 
hyper_dmabuf_shared_pages_info *shared_pages_info)
-{
-       /*
-        * Calculate number of pages needed for 2nd level addresing:
-        */
-       int n_2nd_level_pages = (nents/REFS_PER_PAGE +
-                               ((nents % REFS_PER_PAGE) ? 1: 0));
-       int i;
-       unsigned long gref_page_start;
-       grant_ref_t *tmp_page;
-       grant_ref_t top_level_ref;
-       grant_ref_t * addr_refs;
-       addr_refs = kcalloc(sizeof(grant_ref_t), n_2nd_level_pages, GFP_KERNEL);
-
-       gref_page_start = __get_free_pages(GFP_KERNEL, n_2nd_level_pages);
-       tmp_page = (grant_ref_t *)gref_page_start;
-
-       /* Store 2nd level pages to be freed later */
-       shared_pages_info->addr_pages = tmp_page;
-
-       /*TODO: make sure that allocated memory is filled with 0*/
-
-       /* Share 2nd level addressing pages in readonly mode*/
-       for (i=0; i< n_2nd_level_pages; i++) {
-               addr_refs[i] = gnttab_grant_foreign_access(rdomain,
-                                                          
virt_to_mfn((unsigned long)tmp_page+i*PAGE_SIZE ),
-                                                          1);
-       }
-
-       /*
-        * fill second level pages with data refs
-        */
-       for (i = 0; i < nents; i++) {
-               tmp_page[i] = data_refs[i];
-       }
-
-
-       /* allocate top level page */
-       gref_page_start = __get_free_pages(GFP_KERNEL, 1);
-       tmp_page = (grant_ref_t *)gref_page_start;
-
-       /* Store top level page to be freed later */
-       shared_pages_info->top_level_page = tmp_page;
-
-       /*
-        * fill top level page with reference numbers of second level pages 
refs.
-        */
-       for (i=0; i< n_2nd_level_pages; i++) {
-               tmp_page[i] =  addr_refs[i];
-       }
-
-       /* Share top level addressing page in readonly mode*/
-       top_level_ref = gnttab_grant_foreign_access(rdomain,
-                                                   virt_to_mfn((unsigned 
long)tmp_page),
-                                                   1);
-
-       kfree(addr_refs);
-
-       return top_level_ref;
-}
-
-/*
- * Maps provided top level ref id and then return array of pages containing 
data refs.
- */
-struct page** hyper_dmabuf_get_data_refs(grant_ref_t top_level_ref, int domid, 
int nents,
-                                        struct hyper_dmabuf_shared_pages_info 
*shared_pages_info)
-{
-       struct page *top_level_page;
-       struct page **level2_pages;
-
-       grant_ref_t *top_level_refs;
-
-       struct gnttab_map_grant_ref top_level_map_ops;
-       struct gnttab_unmap_grant_ref top_level_unmap_ops;
-
-       struct gnttab_map_grant_ref *map_ops;
-       struct gnttab_unmap_grant_ref *unmap_ops;
-
-       unsigned long addr;
-       int n_level2_refs = 0;
-       int i;
-
-       n_level2_refs = (nents / REFS_PER_PAGE) + ((nents % REFS_PER_PAGE) ? 1 
: 0);
-
-       level2_pages = kcalloc(sizeof(struct page*), n_level2_refs, GFP_KERNEL);
-
-       map_ops = kcalloc(sizeof(map_ops[0]), REFS_PER_PAGE, GFP_KERNEL);
-       unmap_ops = kcalloc(sizeof(unmap_ops[0]), REFS_PER_PAGE, GFP_KERNEL);
-
-       /* Map top level addressing page */
-       if (gnttab_alloc_pages(1, &top_level_page)) {
-               printk("Cannot allocate pages\n");
-               return NULL;
-       }
-
-       addr = (unsigned long)pfn_to_kaddr(page_to_pfn(top_level_page));
-       gnttab_set_map_op(&top_level_map_ops, addr, GNTMAP_host_map | 
GNTMAP_readonly,
-                         top_level_ref, domid);
-
-       gnttab_set_unmap_op(&top_level_unmap_ops, addr, GNTMAP_host_map | 
GNTMAP_readonly, -1);
-
-       if (gnttab_map_refs(&top_level_map_ops, NULL, &top_level_page, 1)) {
-               printk("\nxen: dom0: HYPERVISOR map grant ref failed");
-               return NULL;
-       }
-
-       if (top_level_map_ops.status) {
-               printk("\nxen: dom0: HYPERVISOR map grant ref failed status = 
%d",
-                               top_level_map_ops.status);
-               return NULL;
-       } else {
-               top_level_unmap_ops.handle = top_level_map_ops.handle;
-       }
-
-       /* Parse contents of top level addressing page to find how many second 
level pages is there*/
-       top_level_refs = pfn_to_kaddr(page_to_pfn(top_level_page));
-
-       /* Map all second level pages */
-       if (gnttab_alloc_pages(n_level2_refs, level2_pages)) {
-               printk("Cannot allocate pages\n");
-               return NULL;
-       }
-
-       for (i = 0; i < n_level2_refs; i++) {
-               addr = (unsigned 
long)pfn_to_kaddr(page_to_pfn(level2_pages[i]));
-               gnttab_set_map_op(&map_ops[i], addr, GNTMAP_host_map | 
GNTMAP_readonly,
-                                 top_level_refs[i], domid);
-               gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map | 
GNTMAP_readonly, -1);
-       }
-
-       if (gnttab_map_refs(map_ops, NULL, level2_pages, n_level2_refs)) {
-               printk("\nxen: dom0: HYPERVISOR map grant ref failed");
-               return NULL;
-       }
-
-       /* Checks if pages were mapped correctly and at the same time is 
calculating total number of data refids*/
-       for (i = 0; i < n_level2_refs; i++) {
-               if (map_ops[i].status) {
-                       printk("\nxen: dom0: HYPERVISOR map grant ref failed 
status = %d",
-                              map_ops[i].status);
-                       return NULL;
-               } else {
-                       unmap_ops[i].handle = map_ops[i].handle;
-               }
-       }
-
-       /* Unmap top level page, as it won't be needed any longer */
-       if (gnttab_unmap_refs(&top_level_unmap_ops, NULL, &top_level_page, 1)) {
-               printk("\xen: cannot unmap top level page\n");
-               return NULL;
-       }
-
-       gnttab_free_pages(1, &top_level_page);
-       kfree(map_ops);
-       shared_pages_info->unmap_ops = unmap_ops;
-
-       return level2_pages;
-}
-
-
-/* This collects all reference numbers for 2nd level shared pages and create a 
table
- * with those in 1st level shared pages then return reference numbers for this 
top level
- * table. */
-grant_ref_t hyper_dmabuf_create_gref_table(struct page **pages, int rdomain, 
int nents,
-                                          struct 
hyper_dmabuf_shared_pages_info *shared_pages_info)
-{
-       int i = 0;
-       grant_ref_t *data_refs;
-       grant_ref_t top_level_ref;
-
-       /* allocate temp array for refs of shared data pages */
-       data_refs = kcalloc(nents, sizeof(grant_ref_t), GFP_KERNEL);
-
-       /* share data pages in rw mode*/
-       for (i=0; i<nents; i++) {
-               data_refs[i] = gnttab_grant_foreign_access(rdomain,
-                                                          
pfn_to_mfn(page_to_pfn(pages[i])),
-                                                          0);
-       }
-
-       /* create additional shared pages with 2 level addressing of data pages 
*/
-       top_level_ref = hyper_dmabuf_create_addressing_tables(data_refs, nents, 
rdomain,
-                                                             
shared_pages_info);
-
-       /* Store exported pages refid to be unshared later */
-       shared_pages_info->data_refs = data_refs;
-       shared_pages_info->top_level_ref = top_level_ref;
-
-       return top_level_ref;
-}
-
-int hyper_dmabuf_cleanup_gref_table(struct hyper_dmabuf_sgt_info *sgt_info) {
-       uint32_t i = 0;
-       struct hyper_dmabuf_shared_pages_info *shared_pages_info = 
&sgt_info->shared_pages_info;
-
-       grant_ref_t *ref = shared_pages_info->top_level_page;
-       int n_2nd_level_pages = (sgt_info->nents/REFS_PER_PAGE +
-                               ((sgt_info->nents % REFS_PER_PAGE) ? 1: 0));
-
-
-       if (shared_pages_info->data_refs == NULL ||
-           shared_pages_info->addr_pages ==  NULL ||
-           shared_pages_info->top_level_page == NULL ||
-           shared_pages_info->top_level_ref == -1) {
-               printk("gref table for hyper_dmabuf already cleaned up\n");
-               return 0;
-       }
-
-       /* End foreign access for 2nd level addressing pages */
-       while(ref[i] != 0 && i < n_2nd_level_pages) {
-               if (gnttab_query_foreign_access(ref[i])) {
-                       printk("refid not shared !!\n");
-               }
-               if (!gnttab_end_foreign_access_ref(ref[i], 1)) {
-                       printk("refid still in use!!!\n");
-               }
-               gnttab_free_grant_reference(ref[i]);
-               i++;
-       }
-       free_pages((unsigned long)shared_pages_info->addr_pages, i);
-
-
-       /* End foreign access for top level addressing page */
-       if (gnttab_query_foreign_access(shared_pages_info->top_level_ref)) {
-               printk("refid not shared !!\n");
-       }
-       gnttab_end_foreign_access_ref(shared_pages_info->top_level_ref, 1);
-       gnttab_free_grant_reference(shared_pages_info->top_level_ref);
-
-       free_pages((unsigned long)shared_pages_info->top_level_page, 1);
-
-       /* End foreign access for data pages, but do not free them */
-       for (i = 0; i < sgt_info->nents; i++) {
-               if 
(gnttab_query_foreign_access(shared_pages_info->data_refs[i])) {
-                       printk("refid not shared !!\n");
-               }
-               gnttab_end_foreign_access_ref(shared_pages_info->data_refs[i], 
0);
-               gnttab_free_grant_reference(shared_pages_info->data_refs[i]);
-       }
-
-       kfree(shared_pages_info->data_refs);
-
-       shared_pages_info->data_refs = NULL;
-       shared_pages_info->addr_pages = NULL;
-       shared_pages_info->top_level_page = NULL;
-       shared_pages_info->top_level_ref = -1;
-
-       return 0;
-}
-
-int hyper_dmabuf_cleanup_imported_pages(struct hyper_dmabuf_imported_sgt_info 
*sgt_info) {
-       struct hyper_dmabuf_shared_pages_info *shared_pages_info = 
&sgt_info->shared_pages_info;
-
-       if(shared_pages_info->unmap_ops == NULL ||
-          shared_pages_info->data_pages == NULL) {
-               printk("Imported pages already cleaned up or buffer was not 
imported yet\n");
-               return 0;
-       }
-
-       if (gnttab_unmap_refs(shared_pages_info->unmap_ops, NULL,
-                             shared_pages_info->data_pages, sgt_info->nents) ) 
{
-               printk("Cannot unmap data pages\n");
-               return -EINVAL;
-       }
-
-       gnttab_free_pages(sgt_info->nents, shared_pages_info->data_pages);
-       kfree(shared_pages_info->data_pages);
-       kfree(shared_pages_info->unmap_ops);
-       shared_pages_info->unmap_ops = NULL;
-       shared_pages_info->data_pages = NULL;
-
-       return 0;
-}
-
-/* map and construct sg_lists from reference numbers */
-struct sg_table* hyper_dmabuf_map_pages(grant_ref_t top_level_gref, int 
frst_ofst,
-                                       int last_len, int nents, int sdomain,
-                                       struct hyper_dmabuf_shared_pages_info 
*shared_pages_info)
-{
-       struct sg_table *st;
-       struct page **pages;
-       struct gnttab_map_grant_ref *ops;
-       struct gnttab_unmap_grant_ref *unmap_ops;
-       unsigned long addr;
-       grant_ref_t *refs;
-       int i;
-       int n_level2_refs = (nents / REFS_PER_PAGE) + ((nents % REFS_PER_PAGE) 
? 1 : 0);
-
-       /* Get data refids */
-       struct page** refid_pages = hyper_dmabuf_get_data_refs(top_level_gref, 
sdomain, nents,
-                                                              
shared_pages_info);
-
-       pages = kcalloc(sizeof(struct page*), nents, GFP_KERNEL);
-       if (pages == NULL) {
-               return NULL;
-       }
-
-       /* allocate new pages that are mapped to shared pages via grant-table */
-       if (gnttab_alloc_pages(nents, pages)) {
-               printk("Cannot allocate pages\n");
-               return NULL;
-       }
-
-       ops = kcalloc(nents, sizeof(struct gnttab_map_grant_ref),
-                     GFP_KERNEL);
-       unmap_ops = kcalloc(nents, sizeof(struct gnttab_unmap_grant_ref),
-                           GFP_KERNEL);
-
-       for (i=0; i<nents; i++) {
-               addr = (unsigned long)pfn_to_kaddr(page_to_pfn(pages[i]));
-               refs = pfn_to_kaddr(page_to_pfn(refid_pages[i / 
REFS_PER_PAGE]));
-               gnttab_set_map_op(&ops[i], addr, GNTMAP_host_map | 
GNTMAP_readonly,
-                               refs[i % REFS_PER_PAGE], sdomain);
-               gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map | 
GNTMAP_readonly, -1);
-       }
-
-       if (gnttab_map_refs(ops, NULL, pages, nents)) {
-               printk("\nxen: dom0: HYPERVISOR map grant ref failed\n");
-               return NULL;
-       }
-
-       for (i=0; i<nents; i++) {
-               if (ops[i].status) {
-                       printk("\nxen: dom0: HYPERVISOR map grant ref failed 
status = %d\n",
-                               ops[0].status);
-                       return NULL;
-               } else {
-                       unmap_ops[i].handle = ops[i].handle;
-               }
+       if (sgt) {
+               sg_free_table(sgt);
+               kfree(sgt);
        }
-
-       st = hyper_dmabuf_create_sgt(pages, frst_ofst, last_len, nents);
-
-       if (gnttab_unmap_refs(shared_pages_info->unmap_ops, NULL, refid_pages,
-                       n_level2_refs) ) {
-               printk("Cannot unmap 2nd level refs\n");
-               return NULL;
-       }
-
-       gnttab_free_pages(n_level2_refs, refid_pages);
-       kfree(refid_pages);
-
-       kfree(shared_pages_info->unmap_ops);
-       shared_pages_info->unmap_ops = unmap_ops;
-       shared_pages_info->data_pages = pages;
-       kfree(ops);
-
-       return st;
 }
 
 int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int 
force)
@@ -537,6 +152,7 @@ int hyper_dmabuf_cleanup_sgt_info(struct 
hyper_dmabuf_sgt_info *sgt_info, int fo
        struct attachment_list *attachl;
        struct kmap_vaddr_list *va_kmapl;
        struct vmap_vaddr_list *va_vmapl;
+       struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
 
        if (!sgt_info) {
                printk("invalid hyper_dmabuf_id\n");
@@ -598,7 +214,7 @@ int hyper_dmabuf_cleanup_sgt_info(struct 
hyper_dmabuf_sgt_info *sgt_info, int fo
        }
 
        /* Start cleanup of buffer in reverse order to exporting */
-       hyper_dmabuf_cleanup_gref_table(sgt_info);
+       ops->unshare_pages(&sgt_info->refs_info, sgt_info->nents);
 
        /* unmap dma-buf */
        dma_buf_unmap_attachment(sgt_info->active_attached->attach,
@@ -620,21 +236,22 @@ int hyper_dmabuf_cleanup_sgt_info(struct 
hyper_dmabuf_sgt_info *sgt_info, int fo
        return 0;
 }
 
-inline int hyper_dmabuf_sync_request_and_wait(int id, int ops)
+inline int hyper_dmabuf_sync_request_and_wait(int id, int dmabuf_ops)
 {
-       struct hyper_dmabuf_ring_rq *req;
+       struct hyper_dmabuf_req *req;
+       struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
        int operands[2];
        int ret;
 
        operands[0] = id;
-       operands[1] = ops;
+       operands[1] = dmabuf_ops;
 
        req = kcalloc(1, sizeof(*req), GFP_KERNEL);
 
        hyper_dmabuf_create_request(req, HYPER_DMABUF_OPS_TO_SOURCE, 
&operands[0]);
 
        /* send request and wait for a response */
-       ret = hyper_dmabuf_send_request(HYPER_DMABUF_DOM_ID(id), req, true);
+       ret = ops->send_req(HYPER_DMABUF_DOM_ID(id), req, true);
 
        kfree(req);
 
@@ -753,6 +370,7 @@ static void hyper_dmabuf_ops_unmap(struct 
dma_buf_attachment *attachment,
 static void hyper_dmabuf_ops_release(struct dma_buf *dma_buf)
 {
        struct hyper_dmabuf_imported_sgt_info *sgt_info;
+       struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
        int ret;
        int final_release;
 
@@ -761,16 +379,22 @@ static void hyper_dmabuf_ops_release(struct dma_buf 
*dma_buf)
 
        sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dma_buf->priv;
 
-       final_release = sgt_info && !sgt_info->valid &&
-                      !dmabuf_refcount(sgt_info->dma_buf);
-
        if (!dmabuf_refcount(sgt_info->dma_buf)) {
                sgt_info->dma_buf = NULL;
        }
 
-       if (final_release) {
-               hyper_dmabuf_cleanup_imported_pages(sgt_info);
+       sgt_info->num_importers--;
+
+       if (sgt_info->num_importers == 0) {
+               ops->unmap_shared_pages(&sgt_info->refs_info, sgt_info->nents);
                hyper_dmabuf_free_sgt(sgt_info->sgt);
+               sgt_info->sgt = NULL;
+       }
+
+       final_release = sgt_info && !sgt_info->valid &&
+                       !sgt_info->num_importers;
+
+       if (final_release) {
                ret = 
hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
                                                        
HYPER_DMABUF_OPS_RELEASE_FINAL);
        } else {
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h
index 1b0801f..a4a6d63 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h
@@ -11,20 +11,6 @@ struct hyper_dmabuf_pages_info *hyper_dmabuf_ext_pgs(struct 
sg_table *sgt);
 struct sg_table* hyper_dmabuf_create_sgt(struct page **pages,
                                 int frst_ofst, int last_len, int nents);
 
-grant_ref_t hyper_dmabuf_create_gref_table(struct page **pages, int rdomain, 
int nents,
-                                          struct 
hyper_dmabuf_shared_pages_info *shared_pages_info);
-
-int hyper_dmabuf_cleanup_gref_table(struct hyper_dmabuf_sgt_info *sgt_info);
-
-int hyper_dmabuf_cleanup_imported_pages(struct hyper_dmabuf_imported_sgt_info 
*sgt_info);
-
-/* map first level tables that contains reference numbers for actual shared 
pages */
-grant_ref_t *hyper_dmabuf_map_gref_table(grant_ref_t *gref_table, int 
n_pages_table);
-
-/* map and construct sg_lists from reference numbers */
-struct sg_table* hyper_dmabuf_map_pages(grant_ref_t gref, int frst_ofst, int 
last_len, int nents, int sdomain,
-                                       struct hyper_dmabuf_shared_pages_info 
*shared_pages_info);
-
 int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int 
force);
 
 void hyper_dmabuf_free_sgt(struct sg_table *sgt);
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 5c6d9c8..70107bb 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -8,47 +8,37 @@
 #include <linux/delay.h>
 #include <linux/list.h>
 #include "hyper_dmabuf_struct.h"
-#include "hyper_dmabuf_imp.h"
+#include "hyper_dmabuf_ioctl.h"
 #include "hyper_dmabuf_list.h"
+#include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_id.h"
+#include "hyper_dmabuf_imp.h"
 #include "hyper_dmabuf_query.h"
-#include "xen/hyper_dmabuf_xen_comm.h"
-#include "xen/hyper_dmabuf_xen_comm_list.h"
-#include "hyper_dmabuf_msg.h"
 
 extern struct hyper_dmabuf_private hyper_dmabuf_private;
 
-static int hyper_dmabuf_exporter_ring_setup(void *data)
+static int hyper_dmabuf_tx_ch_setup(void *data)
 {
-       struct ioctl_hyper_dmabuf_exporter_ring_setup *ring_attr;
-       struct hyper_dmabuf_ring_info_export *ring_info;
+       struct ioctl_hyper_dmabuf_tx_ch_setup *tx_ch_attr;
+       struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
        int ret = 0;
 
        if (!data) {
                printk("user data is NULL\n");
                return -1;
        }
-       ring_attr = (struct ioctl_hyper_dmabuf_exporter_ring_setup *)data;
-
-       /* check if the ring ch already exists */
-       ring_info = hyper_dmabuf_find_exporter_ring(ring_attr->remote_domain);
-
-       if (ring_info) {
-               printk("(exporter's) ring ch to domid = %d already exist\ngref 
= %d, port = %d\n",
-                       ring_info->rdomain, ring_info->gref_ring, 
ring_info->port);
-               return 0;
-       }
+       tx_ch_attr = (struct ioctl_hyper_dmabuf_tx_ch_setup *)data;
 
-       ret = hyper_dmabuf_exporter_ringbuf_init(ring_attr->remote_domain);
+       ret = ops->init_tx_ch(tx_ch_attr->remote_domain);
 
        return ret;
 }
 
-static int hyper_dmabuf_importer_ring_setup(void *data)
+static int hyper_dmabuf_rx_ch_setup(void *data)
 {
-       struct ioctl_hyper_dmabuf_importer_ring_setup *setup_imp_ring_attr;
-       struct hyper_dmabuf_ring_info_import *ring_info;
+       struct ioctl_hyper_dmabuf_rx_ch_setup *rx_ch_attr;
+       struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
        int ret = 0;
 
        if (!data) {
@@ -56,17 +46,9 @@ static int hyper_dmabuf_importer_ring_setup(void *data)
                return -1;
        }
 
-       setup_imp_ring_attr = (struct ioctl_hyper_dmabuf_importer_ring_setup 
*)data;
-
-       /* check if the ring ch already exist */
-       ring_info = 
hyper_dmabuf_find_importer_ring(setup_imp_ring_attr->source_domain);
+       rx_ch_attr = (struct ioctl_hyper_dmabuf_rx_ch_setup *)data;
 
-       if (ring_info) {
-               printk("(importer's) ring ch to domid = %d already exist\n", 
ring_info->sdomain);
-               return 0;
-       }
-
-       ret = 
hyper_dmabuf_importer_ringbuf_init(setup_imp_ring_attr->source_domain);
+       ret = ops->init_rx_ch(rx_ch_attr->source_domain);
 
        return ret;
 }
@@ -74,13 +56,14 @@ static int hyper_dmabuf_importer_ring_setup(void *data)
 static int hyper_dmabuf_export_remote(void *data)
 {
        struct ioctl_hyper_dmabuf_export_remote *export_remote_attr;
+       struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
        struct dma_buf *dma_buf;
        struct dma_buf_attachment *attachment;
        struct sg_table *sgt;
        struct hyper_dmabuf_pages_info *page_info;
        struct hyper_dmabuf_sgt_info *sgt_info;
-       struct hyper_dmabuf_ring_rq *req;
-       int operands[9];
+       struct hyper_dmabuf_req *req;
+       int operands[MAX_NUMBER_OF_OPERANDS];
        int ret = 0;
 
        if (!data) {
@@ -125,6 +108,7 @@ static int hyper_dmabuf_export_remote(void *data)
        sgt_info = kmalloc(sizeof(*sgt_info), GFP_KERNEL);
 
        sgt_info->hyper_dmabuf_id = hyper_dmabuf_get_id();
+
        /* TODO: We might need to consider using port number on event channel? 
*/
        sgt_info->hyper_dmabuf_rdomain = export_remote_attr->remote_domain;
        sgt_info->dma_buf = dma_buf;
@@ -163,15 +147,14 @@ static int hyper_dmabuf_export_remote(void *data)
 
        export_remote_attr->hyper_dmabuf_id = sgt_info->hyper_dmabuf_id;
 
-       /* now create table of grefs for shared pages and */
-
        /* now create request for importer via ring */
        operands[0] = page_info->hyper_dmabuf_id;
        operands[1] = page_info->nents;
        operands[2] = page_info->frst_ofst;
        operands[3] = page_info->last_len;
-       operands[4] = hyper_dmabuf_create_gref_table(page_info->pages, 
export_remote_attr->remote_domain,
-                                                    page_info->nents, 
&sgt_info->shared_pages_info);
+       operands[4] = ops->share_pages (page_info->pages, 
export_remote_attr->remote_domain,
+                                       page_info->nents, &sgt_info->refs_info);
+
        /* driver/application specific private info, max 32 bytes */
        operands[5] = export_remote_attr->private[0];
        operands[6] = export_remote_attr->private[1];
@@ -182,7 +165,8 @@ static int hyper_dmabuf_export_remote(void *data)
 
        /* composing a message to the importer */
        hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT, &operands[0]);
-       if(hyper_dmabuf_send_request(export_remote_attr->remote_domain, req, 
false))
+
+       if(ops->send_req(export_remote_attr->remote_domain, req, false))
                goto fail_send_request;
 
        /* free msg */
@@ -215,8 +199,10 @@ static int hyper_dmabuf_export_remote(void *data)
 static int hyper_dmabuf_export_fd_ioctl(void *data)
 {
        struct ioctl_hyper_dmabuf_export_fd *export_fd_attr;
-       struct hyper_dmabuf_imported_sgt_info *imported_sgt_info;
-       struct hyper_dmabuf_ring_rq *req;
+       struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
+       struct hyper_dmabuf_imported_sgt_info *sgt_info;
+       struct hyper_dmabuf_req *req;
+       struct page **data_pages;
        int operand;
        int ret = 0;
 
@@ -228,43 +214,48 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
        export_fd_attr = (struct ioctl_hyper_dmabuf_export_fd *)data;
 
        /* look for dmabuf for the id */
-       imported_sgt_info = 
hyper_dmabuf_find_imported(export_fd_attr->hyper_dmabuf_id);
-       if (imported_sgt_info == NULL) /* can't find sgt from the table */
+       sgt_info = hyper_dmabuf_find_imported(export_fd_attr->hyper_dmabuf_id);
+       if (sgt_info == NULL) /* can't find sgt from the table */
                return -1;
 
        printk("%s Found buffer gref %d  off %d last len %d nents %d domain 
%d\n", __func__,
-               imported_sgt_info->gref, imported_sgt_info->frst_ofst,
-               imported_sgt_info->last_len, imported_sgt_info->nents,
-               HYPER_DMABUF_DOM_ID(imported_sgt_info->hyper_dmabuf_id));
-
-       if (!imported_sgt_info->sgt) {
-               imported_sgt_info->sgt = 
hyper_dmabuf_map_pages(imported_sgt_info->gref,
-                                                       
imported_sgt_info->frst_ofst,
-                                                       
imported_sgt_info->last_len,
-                                                       
imported_sgt_info->nents,
-                                                       
HYPER_DMABUF_DOM_ID(imported_sgt_info->hyper_dmabuf_id),
-                                                       
&imported_sgt_info->shared_pages_info);
-
-               /* send notifiticatio for first export_fd to exporter */
-               operand = imported_sgt_info->hyper_dmabuf_id;
-               req = kcalloc(1, sizeof(*req), GFP_KERNEL);
-               hyper_dmabuf_create_request(req, HYPER_DMABUF_FIRST_EXPORT, 
&operand);
-
-               ret = hyper_dmabuf_send_request(HYPER_DMABUF_DOM_ID(operand), 
req, false);
-
-               if (!imported_sgt_info->sgt || ret) {
-                       kfree(req);
-                       printk("Failed to create sgt or notify exporter\n");
-                       return -EINVAL;
-               }
+               sgt_info->ref_handle, sgt_info->frst_ofst,
+               sgt_info->last_len, sgt_info->nents,
+               HYPER_DMABUF_DOM_ID(sgt_info->hyper_dmabuf_id));
+
+       if (!sgt_info->sgt) {
+               data_pages = ops->map_shared_pages(sgt_info->ref_handle,
+                                                  
HYPER_DMABUF_DOM_ID(sgt_info->hyper_dmabuf_id),
+                                                  sgt_info->nents,
+                                                  &sgt_info->refs_info);
+
+               sgt_info->sgt = hyper_dmabuf_create_sgt(data_pages, 
sgt_info->frst_ofst,
+                                                       sgt_info->last_len, 
sgt_info->nents);
+
+       }
+
+       /* send notification for export_fd to exporter */
+       operand = sgt_info->hyper_dmabuf_id;
+
+       req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+       hyper_dmabuf_create_request(req, HYPER_DMABUF_FIRST_EXPORT, &operand);
+
+       ret = ops->send_req(HYPER_DMABUF_DOM_ID(operand), req, false);
+
+       if (!sgt_info->sgt || ret) {
                kfree(req);
+               printk("Failed to create sgt or notify exporter\n");
+               return -EINVAL;
        }
+       kfree(req);
 
-       export_fd_attr->fd = hyper_dmabuf_export_fd(imported_sgt_info, 
export_fd_attr->flags);
+       export_fd_attr->fd = hyper_dmabuf_export_fd(sgt_info, 
export_fd_attr->flags);
 
        if (export_fd_attr->fd < 0) {
                /* fail to get fd */
                ret = export_fd_attr->fd;
+       } else {
+               sgt_info->num_importers++;
        }
 
        return ret;
@@ -276,8 +267,9 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
 static int hyper_dmabuf_unexport(void *data)
 {
        struct ioctl_hyper_dmabuf_unexport *unexport_attr;
+       struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
        struct hyper_dmabuf_sgt_info *sgt_info;
-       struct hyper_dmabuf_ring_rq *req;
+       struct hyper_dmabuf_req *req;
        int ret;
 
        if (!data) {
@@ -301,7 +293,7 @@ static int hyper_dmabuf_unexport(void *data)
        hyper_dmabuf_create_request(req, HYPER_DMABUF_NOTIFY_UNEXPORT, 
&unexport_attr->hyper_dmabuf_id);
 
        /* Now send unexport request to remote domain, marking that buffer 
should not be used anymore */
-       ret = hyper_dmabuf_send_request(sgt_info->hyper_dmabuf_rdomain, req, 
true);
+       ret = ops->send_req(sgt_info->hyper_dmabuf_rdomain, req, true);
        if (ret < 0) {
                kfree(req);
                return -EFAULT;
@@ -405,8 +397,8 @@ static int hyper_dmabuf_query(void *data)
 }
 
 static const struct hyper_dmabuf_ioctl_desc hyper_dmabuf_ioctls[] = {
-       HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORTER_RING_SETUP, 
hyper_dmabuf_exporter_ring_setup, 0),
-       HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_IMPORTER_RING_SETUP, 
hyper_dmabuf_importer_ring_setup, 0),
+       HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_TX_CH_SETUP, 
hyper_dmabuf_tx_ch_setup, 0),
+       HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_RX_CH_SETUP, 
hyper_dmabuf_rx_ch_setup, 0),
        HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_REMOTE, 
hyper_dmabuf_export_remote, 0),
        HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_FD, 
hyper_dmabuf_export_fd_ioctl, 0),
        HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_UNEXPORT, 
hyper_dmabuf_unexport, 0),
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h
new file mode 100644
index 0000000..de216d3
--- /dev/null
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h
@@ -0,0 +1,87 @@
+#ifndef __LINUX_PUBLIC_HYPER_DMABUF_IOCTL_H__
+#define __LINUX_PUBLIC_HYPER_DMABUF_IOCTL_H__
+
+typedef int (*hyper_dmabuf_ioctl_t)(void *data);
+
+struct hyper_dmabuf_ioctl_desc {
+       unsigned int cmd;
+       int flags;
+       hyper_dmabuf_ioctl_t func;
+       const char *name;
+};
+
+#define HYPER_DMABUF_IOCTL_DEF(ioctl, _func, _flags)   \
+       [_IOC_NR(ioctl)] = {                            \
+                       .cmd = ioctl,                   \
+                       .func = _func,                  \
+                       .flags = _flags,                \
+                       .name = #ioctl                  \
+       }
+
+#define IOCTL_HYPER_DMABUF_TX_CH_SETUP \
+_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_hyper_dmabuf_tx_ch_setup))
+struct ioctl_hyper_dmabuf_tx_ch_setup {
+       /* IN parameters */
+       /* Remote domain id */
+       int remote_domain;
+};
+
+#define IOCTL_HYPER_DMABUF_RX_CH_SETUP \
+_IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_hyper_dmabuf_rx_ch_setup))
+struct ioctl_hyper_dmabuf_rx_ch_setup {
+       /* IN parameters */
+       /* Source domain id */
+       int source_domain;
+};
+
+#define IOCTL_HYPER_DMABUF_EXPORT_REMOTE \
+_IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_hyper_dmabuf_export_remote))
+struct ioctl_hyper_dmabuf_export_remote {
+       /* IN parameters */
+       /* DMA buf fd to be exported */
+       int dmabuf_fd;
+       /* Domain id to which buffer should be exported */
+       int remote_domain;
+       /* exported dma buf id */
+       int hyper_dmabuf_id;
+       int private[4];
+};
+
+#define IOCTL_HYPER_DMABUF_EXPORT_FD \
+_IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_hyper_dmabuf_export_fd))
+struct ioctl_hyper_dmabuf_export_fd {
+       /* IN parameters */
+       /* hyper dmabuf id to be imported */
+       int hyper_dmabuf_id;
+       /* flags */
+       int flags;
+       /* OUT parameters */
+       /* exported dma buf fd */
+       int fd;
+};
+
+#define IOCTL_HYPER_DMABUF_UNEXPORT \
+_IOC(_IOC_NONE, 'G', 4, sizeof(struct ioctl_hyper_dmabuf_unexport))
+struct ioctl_hyper_dmabuf_unexport {
+       /* IN parameters */
+       /* hyper dmabuf id to be unexported */
+       int hyper_dmabuf_id;
+       /* OUT parameters */
+       /* Status of request */
+       int status;
+};
+
+#define IOCTL_HYPER_DMABUF_QUERY \
+_IOC(_IOC_NONE, 'G', 5, sizeof(struct ioctl_hyper_dmabuf_query))
+struct ioctl_hyper_dmabuf_query {
+       /* in parameters */
+       /* hyper dmabuf id to be queried */
+       int hyper_dmabuf_id;
+       /* item to be queried */
+       int item;
+       /* OUT parameters */
+       /* Value of queried item */
+       int info;
+};
+
+#endif //__LINUX_PUBLIC_HYPER_DMABUF_DRV_H__
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
index a2d687f..4647115 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
@@ -5,11 +5,10 @@
 #include <linux/dma-buf.h>
 #include <xen/grant_table.h>
 #include <linux/workqueue.h>
+#include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_imp.h"
 #include "hyper_dmabuf_remote_sync.h"
-#include "xen/hyper_dmabuf_xen_comm.h"
-#include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_list.h"
 
 #define FORCED_UNEXPORTING 0
@@ -18,18 +17,17 @@ extern struct hyper_dmabuf_private hyper_dmabuf_private;
 
 struct cmd_process {
        struct work_struct work;
-       struct hyper_dmabuf_ring_rq *rq;
+       struct hyper_dmabuf_req *rq;
        int domid;
 };
 
-void hyper_dmabuf_create_request(struct hyper_dmabuf_ring_rq *request,
-                                       enum hyper_dmabuf_command command, int 
*operands)
+void hyper_dmabuf_create_request(struct hyper_dmabuf_req *req,
+                                enum hyper_dmabuf_command command, int 
*operands)
 {
        int i;
 
-       request->request_id = hyper_dmabuf_next_req_id_export();
-       request->status = HYPER_DMABUF_REQ_NOT_RESPONDED;
-       request->command = command;
+       req->status = HYPER_DMABUF_REQ_NOT_RESPONDED;
+       req->command = command;
 
        switch(command) {
        /* as exporter, commands to importer */
@@ -44,7 +42,7 @@ void hyper_dmabuf_create_request(struct hyper_dmabuf_ring_rq 
*request,
                 * operands5~8 : Driver-specific private data (e.g. graphic 
buffer's meta info)
                 */
                for (i=0; i < 8; i++)
-                       request->operands[i] = operands[i];
+                       req->operands[i] = operands[i];
                break;
 
        case HYPER_DMABUF_NOTIFY_UNEXPORT:
@@ -52,7 +50,7 @@ void hyper_dmabuf_create_request(struct hyper_dmabuf_ring_rq 
*request,
                /* command : DMABUF_DESTROY,
                 * operands0 : hyper_dmabuf_id
                 */
-               request->operands[0] = operands[0];
+               req->operands[0] = operands[0];
                break;
 
        case HYPER_DMABUF_FIRST_EXPORT:
@@ -60,7 +58,7 @@ void hyper_dmabuf_create_request(struct hyper_dmabuf_ring_rq 
*request,
                /* command : HYPER_DMABUF_FIRST_EXPORT,
                 * operands0 : hyper_dmabuf_id
                 */
-               request->operands[0] = operands[0];
+               req->operands[0] = operands[0];
                break;
 
        case HYPER_DMABUF_OPS_TO_REMOTE:
@@ -77,7 +75,7 @@ void hyper_dmabuf_create_request(struct hyper_dmabuf_ring_rq 
*request,
                 * operands1 : map(=1)/unmap(=2)/attach(=3)/detach(=4)
                 */
                for (i=0; i<2; i++)
-                       request->operands[i] = operands[i];
+                       req->operands[i] = operands[i];
                break;
 
        default:
@@ -88,10 +86,10 @@ void hyper_dmabuf_create_request(struct 
hyper_dmabuf_ring_rq *request,
 
 void cmd_process_work(struct work_struct *work)
 {
-       struct hyper_dmabuf_imported_sgt_info *imported_sgt_info;
        struct hyper_dmabuf_sgt_info *sgt_info;
+       struct hyper_dmabuf_imported_sgt_info *imported_sgt_info;
        struct cmd_process *proc = container_of(work, struct cmd_process, work);
-       struct hyper_dmabuf_ring_rq *req;
+       struct hyper_dmabuf_req *req;
        int domid;
        int i;
 
@@ -114,7 +112,7 @@ void cmd_process_work(struct work_struct *work)
                imported_sgt_info->frst_ofst = req->operands[2];
                imported_sgt_info->last_len = req->operands[3];
                imported_sgt_info->nents = req->operands[1];
-               imported_sgt_info->gref = req->operands[4];
+               imported_sgt_info->ref_handle = req->operands[4];
 
                printk("DMABUF was exported\n");
                printk("\thyper_dmabuf_id %d\n", req->operands[0]);
@@ -139,10 +137,7 @@ void cmd_process_work(struct work_struct *work)
                        break;
                }
 
-               if (sgt_info->importer_exported)
-                       printk("warning: exported flag is not supposed to be 1 
already\n");
-
-               sgt_info->importer_exported = 1;
+               sgt_info->importer_exported++;
                break;
 
        case HYPER_DMABUF_OPS_TO_REMOTE:
@@ -160,11 +155,11 @@ void cmd_process_work(struct work_struct *work)
        kfree(proc);
 }
 
-int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_ring_rq *req)
+int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
 {
        struct cmd_process *proc;
-       struct hyper_dmabuf_ring_rq *temp_req;
-       struct hyper_dmabuf_imported_sgt_info *imported_sgt_info;
+       struct hyper_dmabuf_req *temp_req;
+       struct hyper_dmabuf_imported_sgt_info *sgt_info;
        int ret;
 
        if (!req) {
@@ -189,22 +184,21 @@ int hyper_dmabuf_msg_parse(int domid, struct 
hyper_dmabuf_ring_rq *req)
                 * operands0 : hyper_dmabuf_id
                 */
 
-               imported_sgt_info =
-                       hyper_dmabuf_find_imported(req->operands[0]);
+               sgt_info = hyper_dmabuf_find_imported(req->operands[0]);
 
-               if (imported_sgt_info) {
+               if (sgt_info) {
                        /* if anything is still using dma_buf */
-                       if (imported_sgt_info->dma_buf &&
-                           dmabuf_refcount(imported_sgt_info->dma_buf) > 0) {
+                       if (sgt_info->dma_buf &&
+                           dmabuf_refcount(sgt_info->dma_buf) > 0) {
                                /*
                                 * Buffer is still in  use, just mark that it 
should
                                 * not be allowed to export its fd anymore.
                                 */
-                               imported_sgt_info->valid = 0;
+                               sgt_info->valid = 0;
                        } else {
                                /* No one is using buffer, remove it from 
imported list */
                                hyper_dmabuf_remove_imported(req->operands[0]);
-                               kfree(imported_sgt_info);
+                               kfree(sgt_info);
                        }
                } else {
                        req->status = HYPER_DMABUF_REQ_ERROR;
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h
index 1e9d827..ac4caeb 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h
@@ -1,6 +1,22 @@
 #ifndef __HYPER_DMABUF_MSG_H__
 #define __HYPER_DMABUF_MSG_H__
 
+#define MAX_NUMBER_OF_OPERANDS 9
+
+struct hyper_dmabuf_req {
+       unsigned int request_id;
+       unsigned int status;
+       unsigned int command;
+       unsigned int operands[MAX_NUMBER_OF_OPERANDS];
+};
+
+struct hyper_dmabuf_resp {
+       unsigned int response_id;
+       unsigned int status;
+       unsigned int command;
+       unsigned int operands[MAX_NUMBER_OF_OPERANDS];
+};
+
 enum hyper_dmabuf_command {
        HYPER_DMABUF_EXPORT = 0x10,
        HYPER_DMABUF_FIRST_EXPORT,
@@ -35,10 +51,11 @@ enum hyper_dmabuf_req_feedback {
 };
 
 /* create a request packet with given command and operands */
-void hyper_dmabuf_create_request(struct hyper_dmabuf_ring_rq *request,
-                                        enum hyper_dmabuf_command command, int 
*operands);
+void hyper_dmabuf_create_request(struct hyper_dmabuf_req *req,
+                                enum hyper_dmabuf_command command,
+                                int *operands);
 
 /* parse incoming request packet (or response) and take appropriate actions 
for those */
-int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_ring_rq *req);
+int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req);
 
 #endif // __HYPER_DMABUF_MSG_H__
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
index c5950e0..0f4735c 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
@@ -5,9 +5,9 @@
 #include <linux/dma-buf.h>
 #include "hyper_dmabuf_struct.h"
 #include "hyper_dmabuf_list.h"
+#include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_id.h"
-#include "xen/hyper_dmabuf_xen_comm.h"
 #include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_imp.h"
 
@@ -133,6 +133,8 @@ int hyper_dmabuf_remote_sync(int id, int ops)
 
        case HYPER_DMABUF_OPS_RELEASE:
                /* place holder */
+                sgt_info->importer_exported--;
+
                break;
 
        case HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS:
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h
index b52f958..f053dd10 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h
@@ -1,14 +1,6 @@
 #ifndef __HYPER_DMABUF_STRUCT_H__
 #define __HYPER_DMABUF_STRUCT_H__
 
-#include <xen/interface/grant_table.h>
-
-/* each grant_ref_t is 4 bytes, so total 4096 grant_ref_t can be
- * in this block meaning we can share 4KB*4096 = 16MB of buffer
- * (needs to be increased for large buffer use-cases such as 4K
- * frame buffer) */
-#define MAX_ALLOWED_NUM_PAGES_FOR_GREF_NUM_ARRAYS 4
-
 /* stack of mapped sgts */
 struct sgt_list {
        struct sg_table *sgt;
@@ -33,15 +25,6 @@ struct vmap_vaddr_list {
        struct list_head list;
 };
 
-struct hyper_dmabuf_shared_pages_info {
-       grant_ref_t *data_refs; /* table with shared buffer pages refid */
-       grant_ref_t *addr_pages; /* pages of 2nd level addressing */
-       grant_ref_t *top_level_page; /* page of top level addressing, it 
contains refids of 2nd level pages */
-       grant_ref_t top_level_ref; /* top level refid */
-       struct gnttab_unmap_grant_ref* unmap_ops; /* unmap ops for mapped pages 
*/
-       struct page **data_pages; /* data pages to be unmapped */
-};
-
 /* Exporter builds pages_info before sharing pages */
 struct hyper_dmabuf_pages_info {
         int hyper_dmabuf_id; /* unique id to reference dmabuf in source domain 
*/
@@ -69,8 +52,8 @@ struct hyper_dmabuf_sgt_info {
        struct kmap_vaddr_list *va_kmapped;
        struct vmap_vaddr_list *va_vmapped;
        bool valid;
-       bool importer_exported; /* exported locally on importer's side */
-       struct hyper_dmabuf_shared_pages_info shared_pages_info;
+       int importer_exported; /* exported locally on importer's side */
+       void *refs_info; /* hypervisor-specific info for the references */
        int private[4]; /* device specific info (e.g. image's meta info?) */
 };
 
@@ -79,14 +62,15 @@ struct hyper_dmabuf_sgt_info {
  * its own memory map once userspace asks for reference for the buffer */
 struct hyper_dmabuf_imported_sgt_info {
        int hyper_dmabuf_id; /* unique id to reference dmabuf 
(HYPER_DMABUF_ID_IMPORTER(source domain id, exporter's hyper_dmabuf_id */
+       int ref_handle; /* reference number of top level addressing page of 
shared pages */
        int frst_ofst;  /* start offset in shared page #1 */
        int last_len;   /* length of data in the last shared page */
        int nents;      /* number of pages to be shared */
-       grant_ref_t gref; /* reference number of top level addressing page of 
shared pages */
        struct dma_buf *dma_buf;
        struct sg_table *sgt; /* sgt pointer after importing buffer */
-       struct hyper_dmabuf_shared_pages_info shared_pages_info;
+       void *refs_info;
        bool valid;
+       int num_importers;
        int private[4]; /* device specific info (e.g. image's meta info?) */
 };
 
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
index f9e0df3..bd37ec2 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
@@ -10,16 +10,15 @@
 #include <asm/xen/page.h>
 #include "hyper_dmabuf_xen_comm.h"
 #include "hyper_dmabuf_xen_comm_list.h"
-#include "../hyper_dmabuf_imp.h"
-#include "../hyper_dmabuf_list.h"
-#include "../hyper_dmabuf_msg.h"
 
 static int export_req_id = 0;
 
-struct hyper_dmabuf_ring_rq req_pending = {0};
+struct hyper_dmabuf_req req_pending = {0};
 
-/* Creates entry in xen store that will keep details of all exporter rings 
created by this domain */
-int32_t hyper_dmabuf_setup_data_dir()
+/* Creates entry in xen store that will keep details of all
+ * exporter rings created by this domain
+ */
+static int xen_comm_setup_data_dir(void)
 {
        char buf[255];
 
@@ -27,13 +26,13 @@ int32_t hyper_dmabuf_setup_data_dir()
        return xenbus_mkdir(XBT_NIL, buf, "");
 }
 
-
 /* Removes entry from xenstore with exporter ring details.
- * Other domains that has connected to any of exporter rings created by this 
domain,
- * will be notified about removal of this entry and will treat that as signal 
to
- * cleanup importer rings created for this domain
+ * Other domains that has connected to any of exporter rings
+ * created by this domain, will be notified about removal of
+ * this entry and will treat that as signal to cleanup importer
+ * rings created for this domain
  */
-int32_t hyper_dmabuf_destroy_data_dir()
+static int xen_comm_destroy_data_dir(void)
 {
        char buf[255];
 
@@ -41,18 +40,19 @@ int32_t hyper_dmabuf_destroy_data_dir()
        return xenbus_rm(XBT_NIL, buf, "");
 }
 
-/*
- * Adds xenstore entries with details of exporter ring created for given 
remote domain.
- * It requires special daemon running in dom0 to make sure that given remote 
domain will
- * have right permissions to access that data.
+/* Adds xenstore entries with details of exporter ring created
+ * for given remote domain. It requires special daemon running
+ * in dom0 to make sure that given remote domain will have right
+ * permissions to access that data.
  */
-static int32_t hyper_dmabuf_expose_ring_details(uint32_t domid, uint32_t 
rdomid, uint32_t grefid, uint32_t port)
+static int xen_comm_expose_ring_details(int domid, int rdomid,
+                                       int gref, int port)
 {
        char buf[255];
        int ret;
 
        sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", domid, rdomid);
-       ret = xenbus_printf(XBT_NIL, buf, "grefid", "%d", grefid);
+       ret = xenbus_printf(XBT_NIL, buf, "grefid", "%d", gref);
 
        if (ret) {
                printk("Failed to write xenbus entry %s: %d\n", buf, ret);
@@ -72,7 +72,7 @@ static int32_t hyper_dmabuf_expose_ring_details(uint32_t 
domid, uint32_t rdomid,
 /*
  * Queries details of ring exposed by remote domain.
  */
-static int32_t hyper_dmabuf_get_ring_details(uint32_t domid, uint32_t rdomid, 
uint32_t *grefid, uint32_t *port)
+static int xen_comm_get_ring_details(int domid, int rdomid, int *grefid, int 
*port)
 {
        char buf[255];
        int ret;
@@ -95,10 +95,10 @@ static int32_t hyper_dmabuf_get_ring_details(uint32_t 
domid, uint32_t rdomid, ui
        return (ret <= 0 ? 1 : 0);
 }
 
-int32_t hyper_dmabuf_get_domid(void)
+int hyper_dmabuf_get_domid(void)
 {
        struct xenbus_transaction xbt;
-       int32_t domid;
+       int domid;
 
         xenbus_transaction_start(&xbt);
 
@@ -110,29 +110,35 @@ int32_t hyper_dmabuf_get_domid(void)
        return domid;
 }
 
-int hyper_dmabuf_next_req_id_export(void)
+static int xen_comm_next_req_id(void)
 {
         export_req_id++;
         return export_req_id;
 }
 
 /* For now cache latast rings as global variables TODO: keep them in list*/
-static irqreturn_t hyper_dmabuf_front_ring_isr(int irq, void *info);
-static irqreturn_t hyper_dmabuf_back_ring_isr(int irq, void *info);
-
-/*
- * Callback function that will be called on any change of xenbus path being 
watched.
- * Used for detecting creation/destruction of remote domain exporter ring.
- * When remote domain's exporter ring will be detected, importer ring on this 
domain will be created.
- * When remote domain's exporter ring destruction will be detected it will 
celanup this domain importer ring.
- * Destruction can be caused by unloading module by remote domain or it's 
crash/force shutdown.
+static irqreturn_t front_ring_isr(int irq, void *info);
+static irqreturn_t back_ring_isr(int irq, void *info);
+
+/* Callback function that will be called on any change of xenbus path
+ * being watched. Used for detecting creation/destruction of remote
+ * domain exporter ring.
+ *
+ * When remote domain's exporter ring will be detected, importer ring
+ * on this domain will be created.
+ *
+ * When remote domain's exporter ring destruction will be detected it
+ * will celanup this domain importer ring.
+ *
+ * Destruction can be caused by unloading module by remote domain or
+ * it's crash/force shutdown.
  */
-static void remote_domain_exporter_watch_cb(struct xenbus_watch *watch,
-                                  const char *path, const char *token)
+static void remote_dom_exporter_watch_cb(struct xenbus_watch *watch,
+                                        const char *path, const char *token)
 {
        int rdom,ret;
        uint32_t grefid, port;
-       struct hyper_dmabuf_ring_info_import *ring_info;
+       struct xen_comm_rx_ring_info *ring_info;
 
        /* Check which domain has changed its exporter rings */
        ret = sscanf(watch->node, "/local/domain/%d/", &rdom);
@@ -141,39 +147,49 @@ static void remote_domain_exporter_watch_cb(struct 
xenbus_watch *watch,
        }
 
        /* Check if we have importer ring for given remote domain alrady 
created */
-       ring_info = hyper_dmabuf_find_importer_ring(rdom);
-
-       /*
-        * Try to query remote domain exporter ring details - if that will fail 
and we have
-        * importer ring that means remote domains has cleanup its exporter 
ring, so our
-        * importer ring is no longer useful.
-        * If querying details will succeed and we don't have importer ring, it 
means that
-        * remote domain has setup it for us and we should connect to it.
+       ring_info = xen_comm_find_rx_ring(rdom);
+
+       /* Try to query remote domain exporter ring details - if that will
+        * fail and we have importer ring that means remote domains has cleanup
+        * its exporter ring, so our importer ring is no longer useful.
+        *
+        * If querying details will succeed and we don't have importer ring,
+        * it means that remote domain has setup it for us and we should connect
+        * to it.
         */
-       ret = hyper_dmabuf_get_ring_details(hyper_dmabuf_get_domid(), rdom, 
&grefid, &port);
+       ret = xen_comm_get_ring_details(hyper_dmabuf_get_domid(), rdom,
+                                       &grefid, &port);
 
        if (ring_info && ret != 0) {
                printk("Remote exporter closed, cleaninup importer\n");
-               hyper_dmabuf_importer_ringbuf_cleanup(rdom);
+               hyper_dmabuf_xen_cleanup_rx_rbuf(rdom);
        } else if (!ring_info && ret == 0) {
                printk("Registering importer\n");
-               hyper_dmabuf_importer_ringbuf_init(rdom);
+               hyper_dmabuf_xen_init_rx_rbuf(rdom);
        }
 }
 
 /* exporter needs to generated info for page sharing */
-int hyper_dmabuf_exporter_ringbuf_init(int rdomain)
+int hyper_dmabuf_xen_init_tx_rbuf(int domid)
 {
-       struct hyper_dmabuf_ring_info_export *ring_info;
-       struct hyper_dmabuf_sring *sring;
+       struct xen_comm_tx_ring_info *ring_info;
+       struct xen_comm_sring *sring;
        struct evtchn_alloc_unbound alloc_unbound;
        struct evtchn_close close;
 
        void *shared_ring;
        int ret;
 
-       ring_info = (struct hyper_dmabuf_ring_info_export*)
-                               kmalloc(sizeof(*ring_info), GFP_KERNEL);
+       /* check if there's any existing tx channel in the table */
+       ring_info = xen_comm_find_tx_ring(domid);
+
+       if (ring_info) {
+               printk("tx ring ch to domid = %d already exist\ngref = %d, port 
= %d\n",
+               ring_info->rdomain, ring_info->gref_ring, ring_info->port);
+               return 0;
+       }
+
+       ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL);
 
        /* from exporter to importer */
        shared_ring = (void *)__get_free_pages(GFP_KERNEL, 1);
@@ -181,20 +197,22 @@ int hyper_dmabuf_exporter_ringbuf_init(int rdomain)
                return -EINVAL;
        }
 
-       sring = (struct hyper_dmabuf_sring *) shared_ring;
+       sring = (struct xen_comm_sring *) shared_ring;
 
        SHARED_RING_INIT(sring);
 
        FRONT_RING_INIT(&(ring_info->ring_front), sring, PAGE_SIZE);
 
-       ring_info->gref_ring = gnttab_grant_foreign_access(rdomain,
-                                                       
virt_to_mfn(shared_ring), 0);
+       ring_info->gref_ring = gnttab_grant_foreign_access(domid,
+                                                          
virt_to_mfn(shared_ring),
+                                                          0);
        if (ring_info->gref_ring < 0) {
-               return -EINVAL; /* fail to get gref */
+               /* fail to get gref */
+               return -EINVAL;
        }
 
        alloc_unbound.dom = DOMID_SELF;
-       alloc_unbound.remote_dom = rdomain;
+       alloc_unbound.remote_dom = domid;
        ret = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
                                        &alloc_unbound);
        if (ret != 0) {
@@ -204,7 +222,7 @@ int hyper_dmabuf_exporter_ringbuf_init(int rdomain)
 
        /* setting up interrupt */
        ret = bind_evtchn_to_irqhandler(alloc_unbound.port,
-                                       hyper_dmabuf_front_ring_isr, 0,
+                                       front_ring_isr, 0,
                                        NULL, (void*) ring_info);
 
        if (ret < 0) {
@@ -216,7 +234,7 @@ int hyper_dmabuf_exporter_ringbuf_init(int rdomain)
                return -EINVAL;
        }
 
-       ring_info->rdomain = rdomain;
+       ring_info->rdomain = domid;
        ring_info->irq = ret;
        ring_info->port = alloc_unbound.port;
 
@@ -226,109 +244,128 @@ int hyper_dmabuf_exporter_ringbuf_init(int rdomain)
                ring_info->port,
                ring_info->irq);
 
-       ret = hyper_dmabuf_register_exporter_ring(ring_info);
+       ret = xen_comm_add_tx_ring(ring_info);
 
-       ret = hyper_dmabuf_expose_ring_details(hyper_dmabuf_get_domid(), 
rdomain,
-                                               ring_info->gref_ring, 
ring_info->port);
+       ret = xen_comm_expose_ring_details(hyper_dmabuf_get_domid(), domid,
+                                          ring_info->gref_ring, 
ring_info->port);
 
        /*
         * Register watch for remote domain exporter ring.
-        * When remote domain will setup its exporter ring, we will 
automatically connect our importer ring to it.
+        * When remote domain will setup its exporter ring,
+        * we will automatically connect our importer ring to it.
         */
-       ring_info->watch.callback = remote_domain_exporter_watch_cb;
+       ring_info->watch.callback = remote_dom_exporter_watch_cb;
        ring_info->watch.node = (const char*) kmalloc(sizeof(char) * 255, 
GFP_KERNEL);
-       sprintf((char*)ring_info->watch.node, 
"/local/domain/%d/data/hyper_dmabuf/%d/port", rdomain, 
hyper_dmabuf_get_domid());
+       sprintf((char*)ring_info->watch.node,
+               "/local/domain/%d/data/hyper_dmabuf/%d/port",
+               domid, hyper_dmabuf_get_domid());
+
        register_xenbus_watch(&ring_info->watch);
 
        return ret;
 }
 
 /* cleans up exporter ring created for given remote domain */
-void hyper_dmabuf_exporter_ringbuf_cleanup(int rdomain)
+void hyper_dmabuf_xen_cleanup_tx_rbuf(int domid)
 {
-       struct hyper_dmabuf_ring_info_export *ring_info;
+       struct xen_comm_tx_ring_info *ring_info;
 
        /* check if we at all have exporter ring for given rdomain */
-       ring_info = hyper_dmabuf_find_exporter_ring(rdomain);
+       ring_info = xen_comm_find_tx_ring(domid);
 
        if (!ring_info) {
                return;
        }
 
-       hyper_dmabuf_remove_exporter_ring(rdomain);
+       xen_comm_remove_tx_ring(domid);
 
        unregister_xenbus_watch(&ring_info->watch);
        kfree(ring_info->watch.node);
 
-       /* No need to close communication channel, will be done by this 
function */
-       unbind_from_irqhandler(ring_info->irq,  (void*) ring_info);
+       /* No need to close communication channel, will be done by
+        * this function
+        */
+       unbind_from_irqhandler(ring_info->irq, (void*) ring_info);
 
-       /* No need to free sring page, will be freed by this function when 
other side will end its access */
+       /* No need to free sring page, will be freed by this function
+        * when other side will end its access
+        */
        gnttab_end_foreign_access(ring_info->gref_ring, 0,
                                  (unsigned long) ring_info->ring_front.sring);
 
        kfree(ring_info);
 }
 
-/* importer needs to know about shared page and port numbers for ring buffer 
and event channel */
-int hyper_dmabuf_importer_ringbuf_init(int sdomain)
+/* importer needs to know about shared page and port numbers for
+ * ring buffer and event channel
+ */
+int hyper_dmabuf_xen_init_rx_rbuf(int domid)
 {
-       struct hyper_dmabuf_ring_info_import *ring_info;
-       struct hyper_dmabuf_sring *sring;
+       struct xen_comm_rx_ring_info *ring_info;
+       struct xen_comm_sring *sring;
 
        struct page *shared_ring;
 
-       struct gnttab_map_grant_ref *ops;
+       struct gnttab_map_grant_ref *map_ops;
+
        int ret;
-       int importer_gref, importer_port;
+       int rx_gref, rx_port;
 
-       ret = hyper_dmabuf_get_ring_details(hyper_dmabuf_get_domid(), sdomain,
-                                           &importer_gref, &importer_port);
+       /* check if there's existing rx ring channel */
+       ring_info = xen_comm_find_rx_ring(domid);
+
+       if (ring_info) {
+               printk("rx ring ch from domid = %d already exist\n", 
ring_info->sdomain);
+               return 0;
+       }
+
+       ret = xen_comm_get_ring_details(hyper_dmabuf_get_domid(), domid,
+                                       &rx_gref, &rx_port);
 
        if (ret) {
-               printk("Domain %d has not created exporter ring for current 
domain\n", sdomain);
+               printk("Domain %d has not created exporter ring for current 
domain\n", domid);
                return ret;
        }
 
-       ring_info = (struct hyper_dmabuf_ring_info_import *)
-                       kmalloc(sizeof(*ring_info), GFP_KERNEL);
+       ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL);
 
-       ring_info->sdomain = sdomain;
-       ring_info->evtchn = importer_port;
+       ring_info->sdomain = domid;
+       ring_info->evtchn = rx_port;
 
-       ops = (struct gnttab_map_grant_ref*)kmalloc(sizeof(*ops), GFP_KERNEL);
+       map_ops = kmalloc(sizeof(*map_ops), GFP_KERNEL);
 
        if (gnttab_alloc_pages(1, &shared_ring)) {
                return -EINVAL;
        }
 
-       gnttab_set_map_op(&ops[0], (unsigned 
long)pfn_to_kaddr(page_to_pfn(shared_ring)),
-                       GNTMAP_host_map, importer_gref, sdomain);
+       gnttab_set_map_op(&map_ops[0], (unsigned 
long)pfn_to_kaddr(page_to_pfn(shared_ring)),
+                         GNTMAP_host_map, rx_gref, domid);
+
        gnttab_set_unmap_op(&ring_info->unmap_op, (unsigned 
long)pfn_to_kaddr(page_to_pfn(shared_ring)),
-                       GNTMAP_host_map, -1);
+                           GNTMAP_host_map, -1);
 
-       ret = gnttab_map_refs(ops, NULL, &shared_ring, 1);
+       ret = gnttab_map_refs(map_ops, NULL, &shared_ring, 1);
        if (ret < 0) {
                printk("Cannot map ring\n");
                return -EINVAL;
        }
 
-       if (ops[0].status) {
+       if (map_ops[0].status) {
                printk("Ring mapping failed\n");
                return -EINVAL;
        } else {
-               ring_info->unmap_op.handle = ops[0].handle;
+               ring_info->unmap_op.handle = map_ops[0].handle;
        }
 
-       kfree(ops);
+       kfree(map_ops);
 
-       sring = (struct hyper_dmabuf_sring*) 
pfn_to_kaddr(page_to_pfn(shared_ring));
+       sring = (struct xen_comm_sring *)pfn_to_kaddr(page_to_pfn(shared_ring));
 
        BACK_RING_INIT(&ring_info->ring_back, sring, PAGE_SIZE);
 
-       ret = bind_interdomain_evtchn_to_irqhandler(sdomain, importer_port,
-                                               hyper_dmabuf_back_ring_isr, 0,
-                                               NULL, (void*)ring_info);
+       ret = bind_interdomain_evtchn_to_irqhandler(domid, rx_port,
+                                                   back_ring_isr, 0,
+                                                   NULL, (void*)ring_info);
        if (ret < 0) {
                return -EINVAL;
        }
@@ -336,35 +373,35 @@ int hyper_dmabuf_importer_ringbuf_init(int sdomain)
        ring_info->irq = ret;
 
        printk("%s: bound to eventchannel port: %d  irq: %d\n", __func__,
-               importer_port,
+               rx_port,
                ring_info->irq);
 
-       ret = hyper_dmabuf_register_importer_ring(ring_info);
+       ret = xen_comm_add_rx_ring(ring_info);
 
        /* Setup communcation channel in opposite direction */
-       if (!hyper_dmabuf_find_exporter_ring(sdomain)) {
-               ret = hyper_dmabuf_exporter_ringbuf_init(sdomain);
+       if (!xen_comm_find_tx_ring(domid)) {
+               ret = hyper_dmabuf_xen_init_tx_rbuf(domid);
        }
 
        return ret;
 }
 
 /* clenas up importer ring create for given source domain */
-void hyper_dmabuf_importer_ringbuf_cleanup(int sdomain)
+void hyper_dmabuf_xen_cleanup_rx_rbuf(int domid)
 {
-       struct hyper_dmabuf_ring_info_import *ring_info;
+       struct xen_comm_rx_ring_info *ring_info;
        struct page *shared_ring;
 
        /* check if we have importer ring created for given sdomain */
-       ring_info = hyper_dmabuf_find_importer_ring(sdomain);
+       ring_info = xen_comm_find_rx_ring(domid);
 
        if (!ring_info)
                return;
 
-       hyper_dmabuf_remove_importer_ring(sdomain);
+       xen_comm_remove_rx_ring(domid);
 
        /* no need to close event channel, will be done by that function */
-       unbind_from_irqhandler(ring_info->irq,  (void*) ring_info);
+       unbind_from_irqhandler(ring_info->irq, (void*)ring_info);
 
        /* unmapping shared ring page */
        shared_ring = virt_to_page(ring_info->ring_back.sring);
@@ -374,23 +411,39 @@ void hyper_dmabuf_importer_ringbuf_cleanup(int sdomain)
        kfree(ring_info);
 }
 
-/* cleans up all exporter/importer rings */
-void hyper_dmabuf_cleanup_ringbufs(void)
+int hyper_dmabuf_xen_init_comm_env(void)
 {
-       
hyper_dmabuf_foreach_exporter_ring(hyper_dmabuf_exporter_ringbuf_cleanup);
-       
hyper_dmabuf_foreach_importer_ring(hyper_dmabuf_importer_ringbuf_cleanup);
+       int ret;
+
+       xen_comm_ring_table_init();
+       ret = xen_comm_setup_data_dir();
+
+       return ret;
 }
 
-int hyper_dmabuf_send_request(int domain, struct hyper_dmabuf_ring_rq *req, 
int wait)
+/* cleans up all tx/rx rings */
+static void hyper_dmabuf_xen_cleanup_all_rbufs(void)
 {
-       struct hyper_dmabuf_front_ring *ring;
-       struct hyper_dmabuf_ring_rq *new_req;
-       struct hyper_dmabuf_ring_info_export *ring_info;
+       xen_comm_foreach_tx_ring(hyper_dmabuf_xen_cleanup_tx_rbuf);
+       xen_comm_foreach_rx_ring(hyper_dmabuf_xen_cleanup_rx_rbuf);
+}
+
+void hyper_dmabuf_xen_destroy_comm(void)
+{
+       hyper_dmabuf_xen_cleanup_all_rbufs();
+       xen_comm_destroy_data_dir();
+}
+
+int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int 
wait)
+{
+       struct xen_comm_front_ring *ring;
+       struct hyper_dmabuf_req *new_req;
+       struct xen_comm_tx_ring_info *ring_info;
        int notify;
        int timeout = 1000;
 
        /* find a ring info for the channel */
-       ring_info = hyper_dmabuf_find_exporter_ring(domain);
+       ring_info = xen_comm_find_tx_ring(domid);
        if (!ring_info) {
                printk("Can't find ring info for the channel\n");
                return -EINVAL;
@@ -407,6 +460,8 @@ int hyper_dmabuf_send_request(int domain, struct 
hyper_dmabuf_ring_rq *req, int
                return -EIO;
        }
 
+       req->request_id = xen_comm_next_req_id();
+
        /* update req_pending with current request */
        memcpy(&req_pending, req, sizeof(req_pending));
 
@@ -438,19 +493,19 @@ int hyper_dmabuf_send_request(int domain, struct 
hyper_dmabuf_ring_rq *req, int
 }
 
 /* ISR for handling request */
-static irqreturn_t hyper_dmabuf_back_ring_isr(int irq, void *info)
+static irqreturn_t back_ring_isr(int irq, void *info)
 {
        RING_IDX rc, rp;
-       struct hyper_dmabuf_ring_rq req;
-       struct hyper_dmabuf_ring_rp resp;
+       struct hyper_dmabuf_req req;
+       struct hyper_dmabuf_resp resp;
 
        int notify, more_to_do;
        int ret;
 
-       struct hyper_dmabuf_ring_info_import *ring_info;
-       struct hyper_dmabuf_back_ring *ring;
+       struct xen_comm_rx_ring_info *ring_info;
+       struct xen_comm_back_ring *ring;
 
-       ring_info = (struct hyper_dmabuf_ring_info_import *)info;
+       ring_info = (struct xen_comm_rx_ring_info *)info;
        ring = &ring_info->ring_back;
 
        do {
@@ -490,17 +545,17 @@ static irqreturn_t hyper_dmabuf_back_ring_isr(int irq, 
void *info)
 }
 
 /* ISR for handling responses */
-static irqreturn_t hyper_dmabuf_front_ring_isr(int irq, void *info)
+static irqreturn_t front_ring_isr(int irq, void *info)
 {
        /* front ring only care about response from back */
-       struct hyper_dmabuf_ring_rp *resp;
+       struct hyper_dmabuf_resp *resp;
        RING_IDX i, rp;
        int more_to_do, ret;
 
-       struct hyper_dmabuf_ring_info_export *ring_info;
-       struct hyper_dmabuf_front_ring *ring;
+       struct xen_comm_tx_ring_info *ring_info;
+       struct xen_comm_front_ring *ring;
 
-       ring_info = (struct hyper_dmabuf_ring_info_export *)info;
+       ring_info = (struct xen_comm_tx_ring_info *)info;
        ring = &ring_info->ring_front;
 
        do {
@@ -518,7 +573,7 @@ static irqreturn_t hyper_dmabuf_front_ring_isr(int irq, 
void *info)
                        if (resp->status == HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP) {
                                /* parsing response */
                                ret = hyper_dmabuf_msg_parse(ring_info->rdomain,
-                                                       (struct 
hyper_dmabuf_ring_rq *)resp);
+                                                       (struct 
hyper_dmabuf_req *)resp);
 
                                if (ret < 0) {
                                        printk("getting error while parsing 
response\n");
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h
index 4ab031a..ba41e9d 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h
@@ -3,27 +3,14 @@
 
 #include "xen/interface/io/ring.h"
 #include "xen/xenbus.h"
+#include "../hyper_dmabuf_msg.h"
 
 #define MAX_NUMBER_OF_OPERANDS 9
 
-struct hyper_dmabuf_ring_rq {
-        unsigned int request_id;
-        unsigned int status;
-        unsigned int command;
-        unsigned int operands[MAX_NUMBER_OF_OPERANDS];
-};
-
-struct hyper_dmabuf_ring_rp {
-        unsigned int response_id;
-        unsigned int status;
-        unsigned int command;
-        unsigned int operands[MAX_NUMBER_OF_OPERANDS];
-};
+DEFINE_RING_TYPES(xen_comm, struct hyper_dmabuf_req, struct hyper_dmabuf_resp);
 
-DEFINE_RING_TYPES(hyper_dmabuf, struct hyper_dmabuf_ring_rq, struct 
hyper_dmabuf_ring_rp);
-
-struct hyper_dmabuf_ring_info_export {
-        struct hyper_dmabuf_front_ring ring_front;
+struct xen_comm_tx_ring_info {
+        struct xen_comm_front_ring ring_front;
        int rdomain;
         int gref_ring;
         int irq;
@@ -31,39 +18,35 @@ struct hyper_dmabuf_ring_info_export {
        struct xenbus_watch watch;
 };
 
-struct hyper_dmabuf_ring_info_import {
+struct xen_comm_rx_ring_info {
         int sdomain;
         int irq;
         int evtchn;
-        struct hyper_dmabuf_back_ring ring_back;
+        struct xen_comm_back_ring ring_back;
        struct gnttab_unmap_grant_ref unmap_op;
 };
 
-int32_t hyper_dmabuf_get_domid(void);
-int32_t hyper_dmabuf_setup_data_dir(void);
-int32_t hyper_dmabuf_destroy_data_dir(void);
+int hyper_dmabuf_get_domid(void);
 
-int hyper_dmabuf_next_req_id_export(void);
+int hyper_dmabuf_xen_init_comm_env(void);
 
 /* exporter needs to generated info for page sharing */
-int hyper_dmabuf_exporter_ringbuf_init(int rdomain);
+int hyper_dmabuf_xen_init_tx_rbuf(int domid);
 
-/* importer needs to know about shared page and port numbers for ring buffer 
and event channel */
-int hyper_dmabuf_importer_ringbuf_init(int sdomain);
+/* importer needs to know about shared page and port numbers
+ * for ring buffer and event channel
+ */
+int hyper_dmabuf_xen_init_rx_rbuf(int domid);
 
 /* cleans up exporter ring created for given domain */
-void hyper_dmabuf_exporter_ringbuf_cleanup(int rdomain);
+void hyper_dmabuf_xen_cleanup_tx_rbuf(int domid);
 
 /* cleans up importer ring created for given domain */
-void hyper_dmabuf_importer_ringbuf_cleanup(int sdomain);
+void hyper_dmabuf_xen_cleanup_rx_rbuf(int domid);
 
-/* cleans up all exporter/importer rings */
-void hyper_dmabuf_cleanup_ringbufs(void);
+void hyper_dmabuf_xen_destroy_comm(void);
 
 /* send request to the remote domain */
-int hyper_dmabuf_send_request(int domain, struct hyper_dmabuf_ring_rq *req, 
int wait);
-
-/* called by interrupt (WORKQUEUE) */
-int hyper_dmabuf_send_response(struct hyper_dmabuf_ring_rp* response, int 
domain);
+int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int 
wait);
 
 #endif // __HYPER_DMABUF_XEN_COMM_H__
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
index a068276..2a1f45b 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
@@ -9,80 +9,73 @@
 #include "hyper_dmabuf_xen_comm.h"
 #include "hyper_dmabuf_xen_comm_list.h"
 
-DECLARE_HASHTABLE(hyper_dmabuf_hash_importer_ring, MAX_ENTRY_IMPORT_RING);
-DECLARE_HASHTABLE(hyper_dmabuf_hash_exporter_ring, MAX_ENTRY_EXPORT_RING);
+DECLARE_HASHTABLE(xen_comm_tx_ring_hash, MAX_ENTRY_TX_RING);
+DECLARE_HASHTABLE(xen_comm_rx_ring_hash, MAX_ENTRY_RX_RING);
 
-int hyper_dmabuf_ring_table_init()
+void xen_comm_ring_table_init()
 {
-       hash_init(hyper_dmabuf_hash_importer_ring);
-       hash_init(hyper_dmabuf_hash_exporter_ring);
-       return 0;
-}
-
-int hyper_dmabuf_ring_table_destroy()
-{
-       /* TODO: cleanup tables*/
-       return 0;
+       hash_init(xen_comm_rx_ring_hash);
+       hash_init(xen_comm_tx_ring_hash);
 }
 
-int hyper_dmabuf_register_exporter_ring(struct hyper_dmabuf_ring_info_export 
*ring_info)
+int xen_comm_add_tx_ring(struct xen_comm_tx_ring_info *ring_info)
 {
-       struct hyper_dmabuf_exporter_ring_info *info_entry;
+       struct xen_comm_tx_ring_info_entry *info_entry;
 
        info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
 
        info_entry->info = ring_info;
 
-       hash_add(hyper_dmabuf_hash_exporter_ring, &info_entry->node,
+       hash_add(xen_comm_tx_ring_hash, &info_entry->node,
                info_entry->info->rdomain);
 
        return 0;
 }
 
-int hyper_dmabuf_register_importer_ring(struct hyper_dmabuf_ring_info_import 
*ring_info)
+int xen_comm_add_rx_ring(struct xen_comm_rx_ring_info *ring_info)
 {
-       struct hyper_dmabuf_importer_ring_info *info_entry;
+       struct xen_comm_rx_ring_info_entry *info_entry;
 
        info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
 
        info_entry->info = ring_info;
 
-       hash_add(hyper_dmabuf_hash_importer_ring, &info_entry->node,
+       hash_add(xen_comm_rx_ring_hash, &info_entry->node,
                info_entry->info->sdomain);
 
        return 0;
 }
 
-struct hyper_dmabuf_ring_info_export *hyper_dmabuf_find_exporter_ring(int 
domid)
+struct xen_comm_tx_ring_info *xen_comm_find_tx_ring(int domid)
 {
-       struct hyper_dmabuf_exporter_ring_info *info_entry;
+       struct xen_comm_tx_ring_info_entry *info_entry;
        int bkt;
 
-       hash_for_each(hyper_dmabuf_hash_exporter_ring, bkt, info_entry, node)
+       hash_for_each(xen_comm_tx_ring_hash, bkt, info_entry, node)
                if(info_entry->info->rdomain == domid)
                        return info_entry->info;
 
        return NULL;
 }
 
-struct hyper_dmabuf_ring_info_import *hyper_dmabuf_find_importer_ring(int 
domid)
+struct xen_comm_rx_ring_info *xen_comm_find_rx_ring(int domid)
 {
-       struct hyper_dmabuf_importer_ring_info *info_entry;
+       struct xen_comm_rx_ring_info_entry *info_entry;
        int bkt;
 
-       hash_for_each(hyper_dmabuf_hash_importer_ring, bkt, info_entry, node)
+       hash_for_each(xen_comm_rx_ring_hash, bkt, info_entry, node)
                if(info_entry->info->sdomain == domid)
                        return info_entry->info;
 
        return NULL;
 }
 
-int hyper_dmabuf_remove_exporter_ring(int domid)
+int xen_comm_remove_tx_ring(int domid)
 {
-       struct hyper_dmabuf_exporter_ring_info *info_entry;
+       struct xen_comm_tx_ring_info_entry *info_entry;
        int bkt;
 
-       hash_for_each(hyper_dmabuf_hash_exporter_ring, bkt, info_entry, node)
+       hash_for_each(xen_comm_tx_ring_hash, bkt, info_entry, node)
                if(info_entry->info->rdomain == domid) {
                        hash_del(&info_entry->node);
                        kfree(info_entry);
@@ -92,12 +85,12 @@ int hyper_dmabuf_remove_exporter_ring(int domid)
        return -1;
 }
 
-int hyper_dmabuf_remove_importer_ring(int domid)
+int xen_comm_remove_rx_ring(int domid)
 {
-       struct hyper_dmabuf_importer_ring_info *info_entry;
+       struct xen_comm_rx_ring_info_entry *info_entry;
        int bkt;
 
-       hash_for_each(hyper_dmabuf_hash_importer_ring, bkt, info_entry, node)
+       hash_for_each(xen_comm_rx_ring_hash, bkt, info_entry, node)
                if(info_entry->info->sdomain == domid) {
                        hash_del(&info_entry->node);
                        kfree(info_entry);
@@ -107,24 +100,26 @@ int hyper_dmabuf_remove_importer_ring(int domid)
        return -1;
 }
 
-void hyper_dmabuf_foreach_exporter_ring(void (*func)(int rdom))
+void xen_comm_foreach_tx_ring(void (*func)(int domid))
 {
-       struct hyper_dmabuf_exporter_ring_info *info_entry;
+       struct xen_comm_tx_ring_info_entry *info_entry;
        struct hlist_node *tmp;
        int bkt;
 
-       hash_for_each_safe(hyper_dmabuf_hash_exporter_ring, bkt, tmp, 
info_entry, node) {
+       hash_for_each_safe(xen_comm_tx_ring_hash, bkt, tmp,
+                          info_entry, node) {
                func(info_entry->info->rdomain);
        }
 }
 
-void hyper_dmabuf_foreach_importer_ring(void (*func)(int sdom))
+void xen_comm_foreach_rx_ring(void (*func)(int domid))
 {
-       struct hyper_dmabuf_importer_ring_info *info_entry;
+       struct xen_comm_rx_ring_info_entry *info_entry;
        struct hlist_node *tmp;
        int bkt;
 
-       hash_for_each_safe(hyper_dmabuf_hash_importer_ring, bkt, tmp, 
info_entry, node) {
+       hash_for_each_safe(xen_comm_rx_ring_hash, bkt, tmp,
+                          info_entry, node) {
                func(info_entry->info->sdomain);
        }
 }
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h
index fd1958c..18b3afd 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h
@@ -2,40 +2,38 @@
 #define __HYPER_DMABUF_XEN_COMM_LIST_H__
 
 /* number of bits to be used for exported dmabufs hash table */
-#define MAX_ENTRY_EXPORT_RING 7
+#define MAX_ENTRY_TX_RING 7
 /* number of bits to be used for imported dmabufs hash table */
-#define MAX_ENTRY_IMPORT_RING 7
+#define MAX_ENTRY_RX_RING 7
 
-struct hyper_dmabuf_exporter_ring_info {
-        struct hyper_dmabuf_ring_info_export *info;
+struct xen_comm_tx_ring_info_entry {
+        struct xen_comm_tx_ring_info *info;
         struct hlist_node node;
 };
 
-struct hyper_dmabuf_importer_ring_info {
-        struct hyper_dmabuf_ring_info_import *info;
+struct xen_comm_rx_ring_info_entry {
+        struct xen_comm_rx_ring_info *info;
         struct hlist_node node;
 };
 
-int hyper_dmabuf_ring_table_init(void);
+void xen_comm_ring_table_init(void);
 
-int hyper_dmabuf_ring_table_destroy(void);
+int xen_comm_add_tx_ring(struct xen_comm_tx_ring_info *ring_info);
 
-int hyper_dmabuf_register_exporter_ring(struct hyper_dmabuf_ring_info_export 
*ring_info);
+int xen_comm_add_rx_ring(struct xen_comm_rx_ring_info *ring_info);
 
-int hyper_dmabuf_register_importer_ring(struct hyper_dmabuf_ring_info_import 
*ring_info);
+int xen_comm_remove_tx_ring(int domid);
 
-struct hyper_dmabuf_ring_info_export *hyper_dmabuf_find_exporter_ring(int 
domid);
+int xen_comm_remove_rx_ring(int domid);
 
-struct hyper_dmabuf_ring_info_import *hyper_dmabuf_find_importer_ring(int 
domid);
+struct xen_comm_tx_ring_info *xen_comm_find_tx_ring(int domid);
 
-int hyper_dmabuf_remove_exporter_ring(int domid);
-
-int hyper_dmabuf_remove_importer_ring(int domid);
+struct xen_comm_rx_ring_info *xen_comm_find_rx_ring(int domid);
 
 /* iterates over all exporter rings and calls provided function for each of 
them */
-void hyper_dmabuf_foreach_exporter_ring(void (*func)(int rdom));
+void xen_comm_foreach_tx_ring(void (*func)(int domid));
 
 /* iterates over all importer rings and calls provided function for each of 
them */
-void hyper_dmabuf_foreach_importer_ring(void (*func)(int sdom));
+void xen_comm_foreach_rx_ring(void (*func)(int domid));
 
 #endif // __HYPER_DMABUF_XEN_COMM_LIST_H__
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c
new file mode 100644
index 0000000..e7b871a
--- /dev/null
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c
@@ -0,0 +1,22 @@
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <xen/grant_table.h>
+#include "../hyper_dmabuf_msg.h"
+#include "../hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_xen_drv.h"
+#include "hyper_dmabuf_xen_comm.h"
+#include "hyper_dmabuf_xen_shm.h"
+
+struct hyper_dmabuf_backend_ops xen_backend_ops = {
+       .get_vm_id = hyper_dmabuf_get_domid,
+       .share_pages = hyper_dmabuf_xen_share_pages,
+       .unshare_pages = hyper_dmabuf_xen_unshare_pages,
+       .map_shared_pages = (void *)hyper_dmabuf_xen_map_shared_pages,
+       .unmap_shared_pages = hyper_dmabuf_xen_unmap_shared_pages,
+       .init_comm_env = hyper_dmabuf_xen_init_comm_env,
+       .destroy_comm = hyper_dmabuf_xen_destroy_comm,
+       .init_rx_ch = hyper_dmabuf_xen_init_rx_rbuf,
+       .init_tx_ch = hyper_dmabuf_xen_init_tx_rbuf,
+       .send_req = hyper_dmabuf_xen_send_req,
+};
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h
new file mode 100644
index 0000000..e351c08
--- /dev/null
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h
@@ -0,0 +1,20 @@
+#ifndef __HYPER_DMABUF_XEN_DRV_H__
+#define __HYPER_DMABUF_XEN_DRV_H__
+#include <xen/interface/grant_table.h>
+
+extern struct hyper_dmabuf_backend_ops xen_backend_ops;
+
+/* Main purpose of this structure is to keep
+ * all references created or acquired for sharing
+ * pages with another domain for freeing those later
+ * when unsharing.
+ */
+struct xen_shared_pages_info {
+        grant_ref_t lvl3_gref; /* top level refid */
+        grant_ref_t *lvl3_table; /* page of top level addressing, it contains 
refids of 2nd level pages */
+        grant_ref_t *lvl2_table; /* table of 2nd level pages, that contains 
refids to data pages */
+        struct gnttab_unmap_grant_ref* unmap_ops; /* unmap ops for mapped 
pages */
+        struct page **data_pages; /* data pages to be unmapped */
+};
+
+#endif // __HYPER_DMABUF_XEN_COMM_H__
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
new file mode 100644
index 0000000..c0045d4
--- /dev/null
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
@@ -0,0 +1,356 @@
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <xen/grant_table.h>
+#include <asm/xen/page.h>
+#include "hyper_dmabuf_xen_drv.h"
+
+#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
+
+/*
+ * Creates 2 level page directory structure for referencing shared pages.
+ * Top level page is a single page that contains up to 1024 refids that
+ * point to 2nd level pages.
+ * Each 2nd level page contains up to 1024 refids that point to shared
+ * data pages.
+ * There will always be one top level page and number of 2nd level pages
+ * depends on number of shared data pages.
+ *
+ *      3rd level page                2nd level pages            Data pages
+ * +-------------------------+   ┌>+--------------------+ ┌--->+------------+
+ * |2nd level page 0 refid   |---┘ |Data page 0 refid   |-┘    |Data page 0 |
+ * |2nd level page 1 refid   |---┐ |Data page 1 refid   |-┐    +------------+
+ * |           ...           |   | |     ....           | |
+ * |2nd level page 1023 refid|-┐ | |Data page 1023 refid| └--->+------------+
+ * +-------------------------+ | | +--------------------+      |Data page 1 |
+ *                             | |                             +------------+
+ *                             | └>+--------------------+
+ *                             |   |Data page 1024 refid|
+ *                             |   |Data page 1025 refid|
+ *                             |   |       ...          |
+ *                             |   |Data page 2047 refid|
+ *                             |   +--------------------+
+ *                             |
+ *                             |        .....
+ *                             └-->+-----------------------+
+ *                                 |Data page 1047552 refid|
+ *                                 |Data page 1047553 refid|
+ *                                 |       ...             |
+ *                                 |Data page 1048575 
refid|-->+------------------+
+ *                                 +-----------------------+   |Data page 
1048575 |
+ *                                                             
+------------------+
+ *
+ * Using such 2 level structure it is possible to reference up to 4GB of
+ * shared data using single refid pointing to top level page.
+ *
+ * Returns refid of top level page.
+ */
+int hyper_dmabuf_xen_share_pages(struct page **pages, int domid, int nents,
+                                void **refs_info)
+{
+       grant_ref_t lvl3_gref;
+       grant_ref_t *lvl2_table;
+       grant_ref_t *lvl3_table;
+
+       /*
+        * Calculate number of pages needed for 2nd level addresing:
+        */
+       int n_lvl2_grefs = (nents/REFS_PER_PAGE +
+                          ((nents % REFS_PER_PAGE) ? 1: 0));
+
+       struct xen_shared_pages_info *sh_pages_info;
+       int i;
+
+       lvl3_table = (grant_ref_t *)__get_free_pages(GFP_KERNEL, 1);
+       lvl2_table = (grant_ref_t *)__get_free_pages(GFP_KERNEL, n_lvl2_grefs);
+
+       sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL);
+       *refs_info = (void *)sh_pages_info;
+
+       /* share data pages in rw mode*/
+       for (i=0; i<nents; i++) {
+               lvl2_table[i] = gnttab_grant_foreign_access(domid,
+                                                           
pfn_to_mfn(page_to_pfn(pages[i])),
+                                                           0);
+       }
+
+       /* Share 2nd level addressing pages in readonly mode*/
+       for (i=0; i< n_lvl2_grefs; i++) {
+               lvl3_table[i] = gnttab_grant_foreign_access(domid,
+                                                          
virt_to_mfn((unsigned long)lvl2_table+i*PAGE_SIZE ),
+                                                          1);
+       }
+
+       /* Share lvl3_table in readonly mode*/
+       lvl3_gref = gnttab_grant_foreign_access(domid,
+                                               virt_to_mfn((unsigned 
long)lvl3_table),
+                                               1);
+
+
+       /* Store lvl3_table page to be freed later */
+       sh_pages_info->lvl3_table = lvl3_table;
+
+       /* Store lvl2_table pages to be freed later */
+       sh_pages_info->lvl2_table = lvl2_table;
+
+       /* Store exported pages refid to be unshared later */
+       sh_pages_info->lvl3_gref = lvl3_gref;
+
+       return lvl3_gref;
+}
+
+int hyper_dmabuf_xen_unshare_pages(void **refs_info, int nents) {
+       struct xen_shared_pages_info *sh_pages_info;
+       int n_lvl2_grefs = (nents/REFS_PER_PAGE + ((nents % REFS_PER_PAGE) ? 1: 
0));
+       int i;
+
+       sh_pages_info = (struct xen_shared_pages_info *)(*refs_info);
+
+       if (sh_pages_info->lvl3_table == NULL ||
+           sh_pages_info->lvl2_table ==  NULL ||
+           sh_pages_info->lvl3_gref == -1) {
+               printk("gref table for hyper_dmabuf already cleaned up\n");
+               return 0;
+       }
+
+       /* End foreign access for data pages, but do not free them */
+       for (i = 0; i < nents; i++) {
+               if (gnttab_query_foreign_access(sh_pages_info->lvl2_table[i])) {
+                       printk("refid not shared !!\n");
+               }
+               gnttab_end_foreign_access_ref(sh_pages_info->lvl2_table[i], 0);
+               gnttab_free_grant_reference(sh_pages_info->lvl2_table[i]);
+       }
+
+       /* End foreign access for 2nd level addressing pages */
+       for (i = 0; i < n_lvl2_grefs; i++) {
+               if (gnttab_query_foreign_access(sh_pages_info->lvl3_table[i])) {
+                       printk("refid not shared !!\n");
+               }
+               if 
(!gnttab_end_foreign_access_ref(sh_pages_info->lvl3_table[i], 1)) {
+                       printk("refid still in use!!!\n");
+               }
+               gnttab_free_grant_reference(sh_pages_info->lvl3_table[i]);
+       }
+
+       /* End foreign access for top level addressing page */
+       if (gnttab_query_foreign_access(sh_pages_info->lvl3_gref)) {
+               printk("gref not shared !!\n");
+       }
+
+       gnttab_end_foreign_access_ref(sh_pages_info->lvl3_gref, 1);
+       gnttab_free_grant_reference(sh_pages_info->lvl3_gref);
+
+       /* freeing all pages used for 2 level addressing */
+       free_pages((unsigned long)sh_pages_info->lvl2_table, n_lvl2_grefs);
+       free_pages((unsigned long)sh_pages_info->lvl3_table, 1);
+
+       sh_pages_info->lvl3_gref = -1;
+       sh_pages_info->lvl2_table = NULL;
+       sh_pages_info->lvl3_table = NULL;
+       kfree(sh_pages_info);
+       sh_pages_info = NULL;
+
+       return 0;
+}
+
+/*
+ * Maps provided top level ref id and then return array of pages containing 
data refs.
+ */
+struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int 
nents, void **refs_info)
+{
+       struct page *lvl3_table_page;
+       struct page **lvl2_table_pages;
+       struct page **data_pages;
+       struct xen_shared_pages_info *sh_pages_info;
+
+       grant_ref_t *lvl3_table;
+       grant_ref_t *lvl2_table;
+
+       struct gnttab_map_grant_ref lvl3_map_ops;
+       struct gnttab_unmap_grant_ref lvl3_unmap_ops;
+
+       struct gnttab_map_grant_ref *lvl2_map_ops;
+       struct gnttab_unmap_grant_ref *lvl2_unmap_ops;
+
+       struct gnttab_map_grant_ref *data_map_ops;
+       struct gnttab_unmap_grant_ref *data_unmap_ops;
+
+       int nents_last = nents % REFS_PER_PAGE;
+       int n_lvl2_grefs = (nents / REFS_PER_PAGE) + ((nents_last > 0) ? 1 : 0);
+       int i, j, k;
+
+       sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL);
+       *refs_info = (void *) sh_pages_info;
+
+       lvl2_table_pages = kcalloc(sizeof(struct page*), n_lvl2_grefs, 
GFP_KERNEL);
+       data_pages = kcalloc(sizeof(struct page*), nents, GFP_KERNEL);
+
+       lvl2_map_ops = kcalloc(sizeof(*lvl2_map_ops), n_lvl2_grefs, GFP_KERNEL);
+       lvl2_unmap_ops = kcalloc(sizeof(*lvl2_unmap_ops), n_lvl2_grefs, 
GFP_KERNEL);
+
+       data_map_ops = kcalloc(sizeof(*data_map_ops), nents, GFP_KERNEL);
+       data_unmap_ops = kcalloc(sizeof(*data_unmap_ops), nents, GFP_KERNEL);
+
+       /* Map top level addressing page */
+       if (gnttab_alloc_pages(1, &lvl3_table_page)) {
+               printk("Cannot allocate pages\n");
+               return NULL;
+       }
+
+       lvl3_table = (grant_ref_t *)pfn_to_kaddr(page_to_pfn(lvl3_table_page));
+
+       gnttab_set_map_op(&lvl3_map_ops, (unsigned long)lvl3_table, 
GNTMAP_host_map | GNTMAP_readonly,
+                         (grant_ref_t)lvl3_gref, domid);
+
+       gnttab_set_unmap_op(&lvl3_unmap_ops, (unsigned long)lvl3_table, 
GNTMAP_host_map | GNTMAP_readonly, -1);
+
+       if (gnttab_map_refs(&lvl3_map_ops, NULL, &lvl3_table_page, 1)) {
+               printk("\nxen: dom0: HYPERVISOR map grant ref failed");
+               return NULL;
+       }
+
+       if (lvl3_map_ops.status) {
+               printk("\nxen: dom0: HYPERVISOR map grant ref failed status = 
%d",
+                       lvl3_map_ops.status);
+               return NULL;
+       } else {
+               lvl3_unmap_ops.handle = lvl3_map_ops.handle;
+       }
+
+       /* Map all second level pages */
+       if (gnttab_alloc_pages(n_lvl2_grefs, lvl2_table_pages)) {
+               printk("Cannot allocate pages\n");
+               return NULL;
+       }
+
+       for (i = 0; i < n_lvl2_grefs; i++) {
+               lvl2_table = (grant_ref_t 
*)pfn_to_kaddr(page_to_pfn(lvl2_table_pages[i]));
+               gnttab_set_map_op(&lvl2_map_ops[i], (unsigned long)lvl2_table, 
GNTMAP_host_map | GNTMAP_readonly,
+                                 lvl3_table[i], domid);
+               gnttab_set_unmap_op(&lvl2_unmap_ops[i], (unsigned 
long)lvl2_table, GNTMAP_host_map | GNTMAP_readonly, -1);
+       }
+
+       /* Unmap top level page, as it won't be needed any longer */
+       if (gnttab_unmap_refs(&lvl3_unmap_ops, NULL, &lvl3_table_page, 1)) {
+               printk("\xen: cannot unmap top level page\n");
+               return NULL;
+       }
+
+       if (gnttab_map_refs(lvl2_map_ops, NULL, lvl2_table_pages, 
n_lvl2_grefs)) {
+               printk("\nxen: dom0: HYPERVISOR map grant ref failed");
+               return NULL;
+       }
+
+       /* Checks if pages were mapped correctly */
+       for (i = 0; i < n_lvl2_grefs; i++) {
+               if (lvl2_map_ops[i].status) {
+                       printk("\nxen: dom0: HYPERVISOR map grant ref failed 
status = %d",
+                              lvl2_map_ops[i].status);
+                       return NULL;
+               } else {
+                       lvl2_unmap_ops[i].handle = lvl2_map_ops[i].handle;
+               }
+       }
+
+       if (gnttab_alloc_pages(nents, data_pages)) {
+               printk("Cannot allocate pages\n");
+               return NULL;
+       }
+
+       k = 0;
+
+       for (i = 0; i < (nents_last ? n_lvl2_grefs - 1 : n_lvl2_grefs); i++) {
+               lvl2_table = pfn_to_kaddr(page_to_pfn(lvl2_table_pages[i]));
+               for (j = 0; j < REFS_PER_PAGE; j++) {
+                       gnttab_set_map_op(&data_map_ops[k],
+                                         (unsigned 
long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
+                                         GNTMAP_host_map,
+                                         lvl2_table[j], domid);
+
+                       gnttab_set_unmap_op(&data_unmap_ops[k],
+                                           (unsigned 
long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
+                                           GNTMAP_host_map, -1);
+                       k++;
+               }
+       }
+
+       /* for grefs in the last lvl2 table page */
+       lvl2_table = pfn_to_kaddr(page_to_pfn(lvl2_table_pages[n_lvl2_grefs - 
1]));
+
+       for (j = 0; j < nents_last; j++) {
+               gnttab_set_map_op(&data_map_ops[k],
+                                 (unsigned 
long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
+                                 GNTMAP_host_map,
+                                 lvl2_table[j], domid);
+
+               gnttab_set_unmap_op(&data_unmap_ops[k],
+                                   (unsigned 
long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
+                                   GNTMAP_host_map, -1);
+               k++;
+       }
+
+       if (gnttab_map_refs(data_map_ops, NULL, data_pages, nents)) {
+               printk("\nxen: dom0: HYPERVISOR map grant ref failed\n");
+               return NULL;
+       }
+
+       /* unmapping lvl2 table pages */
+       if (gnttab_unmap_refs(lvl2_unmap_ops, NULL, lvl2_table_pages,
+                             n_lvl2_grefs)) {
+               printk("Cannot unmap 2nd level refs\n");
+               return NULL;
+       }
+
+       for (i = 0; i < nents; i++) {
+               if (data_map_ops[i].status) {
+                       printk("\nxen: dom0: HYPERVISOR map grant ref failed 
status = %d\n",
+                               data_map_ops[i].status);
+                       return NULL;
+               } else {
+                       data_unmap_ops[i].handle = data_map_ops[i].handle;
+               }
+       }
+
+       /* store these references for unmapping in the future */
+       sh_pages_info->unmap_ops = data_unmap_ops;
+       sh_pages_info->data_pages = data_pages;
+
+       gnttab_free_pages(1, &lvl3_table_page);
+       gnttab_free_pages(n_lvl2_grefs, lvl2_table_pages);
+       kfree(lvl2_table_pages);
+       kfree(lvl2_map_ops);
+       kfree(lvl2_unmap_ops);
+       kfree(data_map_ops);
+
+       return data_pages;
+}
+
+int hyper_dmabuf_xen_unmap_shared_pages(void **refs_info, int nents) {
+       struct xen_shared_pages_info *sh_pages_info;
+
+       sh_pages_info = (struct xen_shared_pages_info *)(*refs_info);
+
+       if (sh_pages_info->unmap_ops == NULL ||
+           sh_pages_info->data_pages == NULL) {
+               printk("Imported pages already cleaned up or buffer was not 
imported yet\n");
+               return 0;
+       }
+
+       if (gnttab_unmap_refs(sh_pages_info->unmap_ops, NULL,
+                             sh_pages_info->data_pages, nents) ) {
+               printk("Cannot unmap data pages\n");
+               return -EINVAL;
+       }
+
+       gnttab_free_pages(nents, sh_pages_info->data_pages);
+
+       kfree(sh_pages_info->data_pages);
+       kfree(sh_pages_info->unmap_ops);
+       sh_pages_info->unmap_ops = NULL;
+       sh_pages_info->data_pages = NULL;
+       kfree(sh_pages_info);
+       sh_pages_info = NULL;
+
+       return 0;
+}
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h
new file mode 100644
index 0000000..2287804
--- /dev/null
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h
@@ -0,0 +1,19 @@
+#ifndef __HYPER_DMABUF_XEN_SHM_H__
+#define __HYPER_DMABUF_XEN_SHM_H__
+
+/* This collects all reference numbers for 2nd level shared pages and create a 
table
+ * with those in 1st level shared pages then return reference numbers for this 
top level
+ * table. */
+int hyper_dmabuf_xen_share_pages(struct page **pages, int domid, int nents,
+                                void **refs_info);
+
+int hyper_dmabuf_xen_unshare_pages(void **refs_info, int nents);
+
+/* Maps provided top level ref id and then return array of pages containing 
data refs.
+ */
+struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int 
nents,
+                                               void **refs_info);
+
+int hyper_dmabuf_xen_unmap_shared_pages(void **refs_info, int nents);
+
+#endif /* __HYPER_DMABUF_XEN_SHM_H__ */
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.