[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 04/28] vmap: Add vmalloc_cb and vfree_cb
For those users who want to supply their own vmap callback. To be called _after_ the pages have been allocated and the vmap API is ready to hand out virtual addresses. Instead of using the vmap ones it can call the callback which will be responsible for generating the virtual address. This allows users (such as xSplice) to provide their own mechanism to set the page flags. The users (such as patch titled "xsplice: Implement payload loading") can wrap the calls to __vmap to accomplish this. We also provide a mechanism for the calleer to squirrel the MFN array in case they want to modify the virtual addresses easily. We also provide the free-ing code path - to use the vunmap_cb to take care of tearing down the virtual addresses. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> --- Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Keir Fraser <keir@xxxxxxx> Cc: Tim Deegan <tim@xxxxxxx> v4: New patch. v5: Update per Jan's comments. --- --- xen/common/vmap.c | 33 ++++++++++++++++++++++++++------- xen/include/xen/vmap.h | 14 ++++++++++++++ 2 files changed, 40 insertions(+), 7 deletions(-) diff --git a/xen/common/vmap.c b/xen/common/vmap.c index 134eda0..08e7859 100644 --- a/xen/common/vmap.c +++ b/xen/common/vmap.c @@ -216,7 +216,7 @@ void vunmap(const void *va) vm_free(va); } -void *vmalloc(size_t size) +void *vmalloc_cb(size_t size, vmap_cb_t *vmap_cb, mfn_t **mfn_array) { mfn_t *mfn; size_t pages, i; @@ -238,11 +238,15 @@ void *vmalloc(size_t size) mfn[i] = _mfn(page_to_mfn(pg)); } - va = vmap(mfn, pages); + va = vmap_cb ? vmap_cb(mfn, pages) : vmap(mfn, pages); if ( va == NULL ) goto error; - xfree(mfn); + if ( mfn_array ) + *mfn_array = mfn; + else + xfree(mfn); + return va; error: @@ -252,6 +256,11 @@ void *vmalloc(size_t size) return NULL; } +void *vmalloc(size_t size) +{ + return vmalloc_cb(size, NULL, NULL); +} + void *vzalloc(size_t size) { void *p = vmalloc(size); @@ -266,16 +275,15 @@ void *vzalloc(size_t size) return p; } -void vfree(void *va) +void vfree_cb(void *va, unsigned int pages, vfree_cb_t *vfree_cb_fnc) { - unsigned int i, pages; + unsigned int i; struct page_info *pg; PAGE_LIST_HEAD(pg_list); if ( !va ) return; - pages = vm_size(va); ASSERT(pages); for ( i = 0; i < pages; i++ ) @@ -285,9 +293,20 @@ void vfree(void *va) ASSERT(page); page_list_add(page, &pg_list); } - vunmap(va); + if ( !vfree_cb_fnc ) + vunmap(va); + else + vfree_cb_fnc(va, pages); while ( (pg = page_list_remove_head(&pg_list)) != NULL ) free_domheap_page(pg); } + +void vfree(void *va) +{ + if ( !va ) + return; + + vfree_cb(va, vm_size(va), NULL); +} #endif diff --git a/xen/include/xen/vmap.h b/xen/include/xen/vmap.h index 5671ac8..02cadb3 100644 --- a/xen/include/xen/vmap.h +++ b/xen/include/xen/vmap.h @@ -12,9 +12,23 @@ void *__vmap(const mfn_t *mfn, unsigned int granularity, void *vmap(const mfn_t *mfn, unsigned int nr); void vunmap(const void *); void *vmalloc(size_t size); + +/* + * Callback for vmalloc_cb to use when vmap-ing. + */ +typedef void *(vmap_cb_t)(const mfn_t *mfn, unsigned int pages); +void *vmalloc_cb(size_t size, vmap_cb_t *vmap_cb, mfn_t **); + void *vzalloc(size_t size); void vfree(void *va); +/* + * Callback for vfree to use an equivalent of vmap_cb_t + * when tearing down. + */ +typedef void (vfree_cb_t)(void *va, unsigned int pages); +void vfree_cb(void *va, unsigned int pages, vfree_cb_t *vfree_cb_fnc); + void __iomem *ioremap(paddr_t, size_t); static inline void iounmap(void __iomem *va) -- 2.5.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |