[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 10/34] vmap: Add vmalloc_cb and vfree_cb
For those users who want to supply their own vmap callback. To be called _after_ the pages have been allocated and the vmap API is ready to hand out virtual addresses. Instead of using the vmap ones it can call the callback which will be responsible for generating the virtual address. This allows users (such as xSplice) to provide their own mechanism to set the page flags. The users (such as patch titled "xsplice: Implement payload loading") can wrap the calls to __vmap to accomplish this. We also provide a mechanism for the calleer to squirrel the MFN array in case they want to modify the virtual addresses easily. We also provide the free-ing code path - to use the vunmap_cb to take care of tearing down the virtual addresses. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> --- Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Keir Fraser <keir@xxxxxxx> Cc: Tim Deegan <tim@xxxxxxx> --- --- xen/common/vmap.c | 35 ++++++++++++++++++++++++++++------- xen/include/xen/vmap.h | 14 ++++++++++++++ 2 files changed, 42 insertions(+), 7 deletions(-) diff --git a/xen/common/vmap.c b/xen/common/vmap.c index 1f1da1b..d1f18fb 100644 --- a/xen/common/vmap.c +++ b/xen/common/vmap.c @@ -216,7 +216,7 @@ void vunmap(const void *va) vm_free(va); } -void *vmalloc(size_t size) +void *vmalloc_cb(size_t size, vmap_cb_t vmap_cb, mfn_t **mfn_array) { mfn_t *mfn; size_t pages, i; @@ -238,11 +238,15 @@ void *vmalloc(size_t size) mfn[i] = _mfn(page_to_mfn(pg)); } - va = vmap(mfn, pages); + va = vmap_cb ? (vmap_cb)(mfn, pages) : vmap(mfn, pages); if ( va == NULL ) goto error; - xfree(mfn); + if ( mfn_array ) + *mfn_array = mfn; + else + xfree(mfn); + return va; error: @@ -252,6 +256,11 @@ void *vmalloc(size_t size) return NULL; } +void *vmalloc(size_t size) +{ + return vmalloc_cb(size, NULL, NULL); +} + void *vzalloc(size_t size) { void *p = vmalloc(size); @@ -266,7 +275,7 @@ void *vzalloc(size_t size) return p; } -void vfree(void *va) +void vfree_cb(void *va, unsigned int nr_pages, vfree_cb_t vfree_cb_fnc) { unsigned int i, pages; struct page_info *pg; @@ -275,8 +284,12 @@ void vfree(void *va) if ( !va ) return; - pages = vm_size(va); - ASSERT(pages); + if ( !vfree_cb_fnc ) + { + pages = vm_size(va); + ASSERT(pages); + } else + pages = nr_pages; for ( i = 0; i < pages; i++ ) { @@ -285,9 +298,17 @@ void vfree(void *va) ASSERT(page); page_list_add(page, &pg_list); } - vunmap(va); + if ( !vfree_cb_fnc ) + vunmap(va); + else + vfree_cb_fnc(va, nr_pages); while ( (pg = page_list_remove_head(&pg_list)) != NULL ) free_domheap_page(pg); } + +void vfree(void *va) +{ + vfree_cb(va, 0, NULL); +} #endif diff --git a/xen/include/xen/vmap.h b/xen/include/xen/vmap.h index 5671ac8..054eb25 100644 --- a/xen/include/xen/vmap.h +++ b/xen/include/xen/vmap.h @@ -12,9 +12,23 @@ void *__vmap(const mfn_t *mfn, unsigned int granularity, void *vmap(const mfn_t *mfn, unsigned int nr); void vunmap(const void *); void *vmalloc(size_t size); + +/* + * Callback for vmalloc_cb to use when vmap-ing. + */ +typedef void *(*vmap_cb_t)(const mfn_t *mfn, unsigned int pages); +void *vmalloc_cb(size_t size, vmap_cb_t vmap_cb, mfn_t **); + void *vzalloc(size_t size); void vfree(void *va); +/* + * Callback for vfree to use an equivalent of vmap_cb_t + * when tearing down. + */ +typedef void (*vfree_cb_t)(void *va, unsigned int pages); +void vfree_cb(void *va, unsigned int pages, vfree_cb_t vfree_cb_fnc); + void __iomem *ioremap(paddr_t, size_t); static inline void iounmap(void __iomem *va) -- 2.5.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |