[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH 54/60] hyper_dmabuf: 'backend_ops' reduced to 'bknd_ops' and 'ops' to 'bknd_ops'
To make type's name compact, *_backend_ops is changed to '*_bknd_ops'. Also 'ops' is now changed to 'bknd_ops' to clarify it is a data structure with entry points of 'backend' operations. Signed-off-by: Dongwon Kim <dongwon.kim@xxxxxxxxx> --- drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c | 14 +++++------ drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h | 4 ++-- drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 28 +++++++++++----------- drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c | 10 ++++---- drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c | 4 ++-- .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c | 2 +- .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h | 2 +- 7 files changed, 33 insertions(+), 31 deletions(-) diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c index 387cc63..161fee7 100644 --- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c +++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c @@ -276,13 +276,13 @@ static int __init hyper_dmabuf_drv_init(void) /* currently only supports XEN hypervisor */ #ifdef CONFIG_HYPER_DMABUF_XEN - hy_drv_priv->backend_ops = &xen_backend_ops; + hy_drv_priv->bknd_ops = &xen_bknd_ops; #else - hy_drv_priv->backend_ops = NULL; + hy_drv_priv->bknd_ops = NULL; printk(KERN_ERR "hyper_dmabuf drv currently supports XEN only.\n"); #endif - if (hy_drv_priv->backend_ops == NULL) { + if (hy_drv_priv->bknd_ops == NULL) { printk(KERN_ERR "Hyper_dmabuf: no backend found\n"); return -1; } @@ -301,7 +301,7 @@ static int __init hyper_dmabuf_drv_init(void) ret = hyper_dmabuf_table_init(); if (ret < 0) { dev_err(hy_drv_priv->dev, - "failed to initialize table for exported/imported entries\n"); + "fail to init table for exported/imported entries\n"); mutex_unlock(&hy_drv_priv->lock); kfree(hy_drv_priv); return ret; @@ -330,9 +330,9 @@ static int __init hyper_dmabuf_drv_init(void) hy_drv_priv->pending = 0; #endif - hy_drv_priv->domid = hy_drv_priv->backend_ops->get_vm_id(); + hy_drv_priv->domid = hy_drv_priv->bknd_ops->get_vm_id(); - ret = hy_drv_priv->backend_ops->init_comm_env(); + ret = hy_drv_priv->bknd_ops->init_comm_env(); if (ret < 0) { dev_dbg(hy_drv_priv->dev, "failed to initialize comm-env.\n"); @@ -360,7 +360,7 @@ static void hyper_dmabuf_drv_exit(void) /* hash tables for export/import entries and ring_infos */ hyper_dmabuf_table_destroy(); - hy_drv_priv->backend_ops->destroy_comm(); + hy_drv_priv->bknd_ops->destroy_comm(); /* destroy workqueue */ if (hy_drv_priv->work_queue) diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h index 049c694..4a51f9e 100644 --- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h +++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h @@ -48,7 +48,7 @@ struct hyper_dmabuf_private { struct list_reusable_id *id_queue; /* backend ops - hypervisor specific */ - struct hyper_dmabuf_backend_ops *backend_ops; + struct hyper_dmabuf_bknd_ops *bknd_ops; /* device global lock */ /* TODO: might need a lock per resource (e.g. EXPORT LIST) */ @@ -72,7 +72,7 @@ struct list_reusable_id { struct list_head list; }; -struct hyper_dmabuf_backend_ops { +struct hyper_dmabuf_bknd_ops { /* retreiving id of current virtual machine */ int (*get_vm_id)(void); diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c index d11f609..d1970c8 100644 --- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c +++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c @@ -44,7 +44,7 @@ static int hyper_dmabuf_tx_ch_setup_ioctl(struct file *filp, void *data) { struct ioctl_hyper_dmabuf_tx_ch_setup *tx_ch_attr; - struct hyper_dmabuf_backend_ops *ops = hy_drv_priv->backend_ops; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; int ret = 0; if (!data) { @@ -53,7 +53,7 @@ static int hyper_dmabuf_tx_ch_setup_ioctl(struct file *filp, void *data) } tx_ch_attr = (struct ioctl_hyper_dmabuf_tx_ch_setup *)data; - ret = ops->init_tx_ch(tx_ch_attr->remote_domain); + ret = bknd_ops->init_tx_ch(tx_ch_attr->remote_domain); return ret; } @@ -61,7 +61,7 @@ static int hyper_dmabuf_tx_ch_setup_ioctl(struct file *filp, void *data) static int hyper_dmabuf_rx_ch_setup_ioctl(struct file *filp, void *data) { struct ioctl_hyper_dmabuf_rx_ch_setup *rx_ch_attr; - struct hyper_dmabuf_backend_ops *ops = hy_drv_priv->backend_ops; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; int ret = 0; if (!data) { @@ -71,7 +71,7 @@ static int hyper_dmabuf_rx_ch_setup_ioctl(struct file *filp, void *data) rx_ch_attr = (struct ioctl_hyper_dmabuf_rx_ch_setup *)data; - ret = ops->init_rx_ch(rx_ch_attr->source_domain); + ret = bknd_ops->init_rx_ch(rx_ch_attr->source_domain); return ret; } @@ -79,7 +79,7 @@ static int hyper_dmabuf_rx_ch_setup_ioctl(struct file *filp, void *data) static int send_export_msg(struct exported_sgt_info *exported, struct pages_info *pg_info) { - struct hyper_dmabuf_backend_ops *ops = hy_drv_priv->backend_ops; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; struct hyper_dmabuf_req *req; int op[MAX_NUMBER_OF_OPERANDS] = {0}; int ret, i; @@ -94,7 +94,7 @@ static int send_export_msg(struct exported_sgt_info *exported, op[4] = pg_info->nents; op[5] = pg_info->frst_ofst; op[6] = pg_info->last_len; - op[7] = ops->share_pages(pg_info->pgs, exported->rdomid, + op[7] = bknd_ops->share_pages(pg_info->pgs, exported->rdomid, pg_info->nents, &exported->refs_info); if (op[7] < 0) { dev_err(hy_drv_priv->dev, "pages sharing failed\n"); @@ -115,7 +115,7 @@ static int send_export_msg(struct exported_sgt_info *exported, /* composing a message to the importer */ hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT, &op[0]); - ret = ops->send_req(exported->rdomid, req, true); + ret = bknd_ops->send_req(exported->rdomid, req, true); kfree(req); @@ -423,7 +423,7 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data) { struct ioctl_hyper_dmabuf_export_fd *export_fd_attr = (struct ioctl_hyper_dmabuf_export_fd *)data; - struct hyper_dmabuf_backend_ops *ops = hy_drv_priv->backend_ops; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; struct imported_sgt_info *imported; struct hyper_dmabuf_req *req; struct page **data_pgs; @@ -465,7 +465,7 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data) hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD, &op[0]); - ret = ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, true); + ret = bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, true); if (ret < 0) { /* in case of timeout other end eventually will receive request, @@ -473,7 +473,7 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data) */ hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD_FAILED, &op[0]); - ops->send_req(op[0], req, false); + bknd_ops->send_req(op[0], req, false); kfree(req); dev_err(hy_drv_priv->dev, "Failed to create sgt or notify exporter\n"); @@ -512,7 +512,7 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data) imported->hid.id, imported->hid.rng_key[0], imported->hid.rng_key[1], imported->hid.rng_key[2]); - data_pgs = ops->map_shared_pages(imported->ref_handle, + data_pgs = bknd_ops->map_shared_pages(imported->ref_handle, HYPER_DMABUF_DOM_ID(imported->hid), imported->nents, &imported->refs_info); @@ -536,7 +536,7 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data) hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD_FAILED, &op[0]); - ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, + bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, false); kfree(req); mutex_unlock(&hy_drv_priv->lock); @@ -570,7 +570,7 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data) static void delayed_unexport(struct work_struct *work) { struct hyper_dmabuf_req *req; - struct hyper_dmabuf_backend_ops *ops = hy_drv_priv->backend_ops; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; struct exported_sgt_info *exported = container_of(work, struct exported_sgt_info, unexport.work); int op[4]; @@ -602,7 +602,7 @@ static void delayed_unexport(struct work_struct *work) /* Now send unexport request to remote domain, marking * that buffer should not be used anymore */ - ret = ops->send_req(exported->rdomid, req, true); + ret = bknd_ops->send_req(exported->rdomid, req, true); if (ret < 0) { dev_err(hy_drv_priv->dev, "unexport message for buffer {id:%d key:%d %d %d} failed\n", diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c index bf805b1..e85f619 100644 --- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c +++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c @@ -52,7 +52,7 @@ static int dmabuf_refcount(struct dma_buf *dma_buf) static int sync_request(hyper_dmabuf_id_t hid, int dmabuf_ops) { struct hyper_dmabuf_req *req; - struct hyper_dmabuf_backend_ops *ops = hy_drv_priv->backend_ops; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; int op[5]; int i; int ret; @@ -72,7 +72,8 @@ static int sync_request(hyper_dmabuf_id_t hid, int dmabuf_ops) hyper_dmabuf_create_req(req, HYPER_DMABUF_OPS_TO_SOURCE, &op[0]); /* send request and wait for a response */ - ret = ops->send_req(HYPER_DMABUF_DOM_ID(hid), req, WAIT_AFTER_SYNC_REQ); + ret = bknd_ops->send_req(HYPER_DMABUF_DOM_ID(hid), req, + WAIT_AFTER_SYNC_REQ); if (ret < 0) { dev_dbg(hy_drv_priv->dev, @@ -186,7 +187,7 @@ static void hyper_dmabuf_ops_unmap(struct dma_buf_attachment *attachment, static void hyper_dmabuf_ops_release(struct dma_buf *dma_buf) { struct imported_sgt_info *imported; - struct hyper_dmabuf_backend_ops *ops = hy_drv_priv->backend_ops; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; int ret; int finish; @@ -201,7 +202,8 @@ static void hyper_dmabuf_ops_release(struct dma_buf *dma_buf) imported->importers--; if (imported->importers == 0) { - ops->unmap_shared_pages(&imported->refs_info, imported->nents); + bknd_ops->unmap_shared_pages(&imported->refs_info, + imported->nents); if (imported->sgt) { sg_free_table(imported->sgt); diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c index 9ad7ab9..d15eb17 100644 --- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c +++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c @@ -170,7 +170,7 @@ int hyper_dmabuf_cleanup_sgt_info(struct exported_sgt_info *exported, struct attachment_list *attachl; struct kmap_vaddr_list *va_kmapl; struct vmap_vaddr_list *va_vmapl; - struct hyper_dmabuf_backend_ops *ops = hy_drv_priv->backend_ops; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; if (!exported) { dev_err(hy_drv_priv->dev, "invalid hyper_dmabuf_id\n"); @@ -231,7 +231,7 @@ int hyper_dmabuf_cleanup_sgt_info(struct exported_sgt_info *exported, } /* Start cleanup of buffer in reverse order to exporting */ - ops->unshare_pages(&exported->refs_info, exported->nents); + bknd_ops->unshare_pages(&exported->refs_info, exported->nents); /* unmap dma-buf */ dma_buf_unmap_attachment(exported->active_attached->attach, diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c index 23965b8..1d7249d 100644 --- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c +++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c @@ -30,7 +30,7 @@ #include "hyper_dmabuf_xen_comm.h" #include "hyper_dmabuf_xen_shm.h" -struct hyper_dmabuf_backend_ops xen_backend_ops = { +struct hyper_dmabuf_bknd_ops xen_bknd_ops = { .get_vm_id = xen_be_get_domid, .share_pages = xen_be_share_pages, .unshare_pages = xen_be_unshare_pages, diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h index e5bff09..a4902b7 100644 --- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h +++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h @@ -26,7 +26,7 @@ #define __HYPER_DMABUF_XEN_DRV_H__ #include <xen/interface/grant_table.h> -extern struct hyper_dmabuf_backend_ops xen_backend_ops; +extern struct hyper_dmabuf_bknd_ops xen_bknd_ops; /* Main purpose of this structure is to keep * all references created or acquired for sharing -- 2.7.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |