[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 05/22] golang/xenlight: re-name Bitmap marshaling functions



From: Nick Rosbrook <rosbrookn@xxxxxxxxxxxx>

Re-name and modify signature of toGo function to fromC. The reason for
using 'fromC' rather than 'toGo' is that it is not a good idea to define
methods on the C types. Also, add error return type to Bitmap's toC function.

Finally, as code-cleanup, re-organize the Bitmap type's comments as per
Go conventions.

Signed-off-by: Nick Rosbrook <rosbrookn@xxxxxxxxxxxx>
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>
--
Changes in v2:
- Use consistent variable naming for slice created from
  libxl_bitmap.
---
 tools/golang/xenlight/xenlight.go | 94 ++++++++++++++++---------------
 1 file changed, 48 insertions(+), 46 deletions(-)

diff --git a/tools/golang/xenlight/xenlight.go 
b/tools/golang/xenlight/xenlight.go
index 3edff18471..1c5e3c0cc7 100644
--- a/tools/golang/xenlight/xenlight.go
+++ b/tools/golang/xenlight/xenlight.go
@@ -212,20 +212,48 @@ type KeyValueList struct{}
 func (kvl KeyValueList) fromC(ckvl *C.libxl_key_value_list) error      { 
return nil }
 func (kvl KeyValueList) toC() (ckvl C.libxl_key_value_list, err error) { 
return }
 
-// typedef struct {
-//     uint32_t size;          /* number of bytes in map */
-//     uint8_t *map;
-// } libxl_bitmap;
-
+// Bitmap represents a libxl_bitmap.
+//
 // Implement the Go bitmap type such that the underlying data can
 // easily be copied in and out.  NB that we still have to do copies
 // both directions, because cgo runtime restrictions forbid passing to
 // a C function a pointer to a Go-allocated structure which contains a
 // pointer.
 type Bitmap struct {
+       // typedef struct {
+       //     uint32_t size;          /* number of bytes in map */
+       //     uint8_t *map;
+       // } libxl_bitmap;
        bitmap []C.uint8_t
 }
 
+func (bm *Bitmap) fromC(cbm *C.libxl_bitmap) error {
+       // Alloc a Go slice for the bytes
+       size := int(cbm.size)
+       bm.bitmap = make([]C.uint8_t, size)
+
+       // Make a slice pointing to the C array
+       cs := (*[1 << 30]C.uint8_t)(unsafe.Pointer(cbm._map))[:size:size]
+
+       // And copy the C array into the Go array
+       copy(bm.bitmap, cs)
+
+       return nil
+}
+
+func (bm *Bitmap) toC() (C.libxl_bitmap, error) {
+       var cbm C.libxl_bitmap
+
+       size := len(bm.bitmap)
+       cbm.size = C.uint32_t(size)
+       cbm._map = (*C.uint8_t)(C.malloc(C.ulong(cbm.size) * C.sizeof_uint8_t))
+       cs := (*[1 << 31]C.uint8_t)(unsafe.Pointer(cbm._map))[:size:size]
+
+       copy(cs, bm.bitmap)
+
+       return cbm, nil
+}
+
 /*
  * Types: IDL
  *
@@ -426,7 +454,7 @@ func (cci C.libxl_cpupoolinfo) toGo() (gci CpupoolInfo) {
        gci.PoolName = C.GoString(cci.pool_name)
        gci.Scheduler = Scheduler(cci.sched)
        gci.DomainCount = int(cci.n_dom)
-       gci.Cpumap = cci.cpumap.toGo()
+       gci.Cpumap.fromC(&cci.cpumap)
 
        return
 }
@@ -500,7 +528,10 @@ func (Ctx *Context) CpupoolCreate(Name string, Scheduler 
Scheduler, Cpumap Bitma
        var uuid C.libxl_uuid
        C.libxl_uuid_generate(&uuid)
 
-       cbm := Cpumap.toC()
+       cbm, err := Cpumap.toC()
+       if err != nil {
+               return
+       }
        defer C.libxl_bitmap_dispose(&cbm)
 
        ret := C.libxl_cpupool_create(Ctx.ctx, name, 
C.libxl_scheduler(Scheduler),
@@ -555,7 +586,10 @@ func (Ctx *Context) CpupoolCpuaddCpumap(Poolid uint32, 
Cpumap Bitmap) (err error
                return
        }
 
-       cbm := Cpumap.toC()
+       cbm, err := Cpumap.toC()
+       if err != nil {
+               return
+       }
        defer C.libxl_bitmap_dispose(&cbm)
 
        ret := C.libxl_cpupool_cpuadd_cpumap(Ctx.ctx, C.uint32_t(Poolid), &cbm)
@@ -591,7 +625,10 @@ func (Ctx *Context) CpupoolCpuremoveCpumap(Poolid uint32, 
Cpumap Bitmap) (err er
                return
        }
 
-       cbm := Cpumap.toC()
+       cbm, err := Cpumap.toC()
+       if err != nil {
+               return
+       }
        defer C.libxl_bitmap_dispose(&cbm)
 
        ret := C.libxl_cpupool_cpuremove_cpumap(Ctx.ctx, C.uint32_t(Poolid), 
&cbm)
@@ -714,41 +751,6 @@ func (Ctx *Context) CpupoolMakeFree(Cpumap Bitmap) (err 
error) {
  * Bitmap operations
  */
 
-// Return a Go bitmap which is a copy of the referred C bitmap.
-func (cbm C.libxl_bitmap) toGo() (gbm Bitmap) {
-       // Alloc a Go slice for the bytes
-       size := int(cbm.size)
-       gbm.bitmap = make([]C.uint8_t, size)
-
-       // Make a slice pointing to the C array
-       mapslice := (*[1 << 30]C.uint8_t)(unsafe.Pointer(cbm._map))[:size:size]
-
-       // And copy the C array into the Go array
-       copy(gbm.bitmap, mapslice)
-
-       return
-}
-
-// Must be C.libxl_bitmap_dispose'd of afterwards
-func (gbm Bitmap) toC() (cbm C.libxl_bitmap) {
-       C.libxl_bitmap_init(&cbm)
-
-       size := len(gbm.bitmap)
-       cbm._map = (*C.uint8_t)(C.malloc(C.size_t(size)))
-       cbm.size = C.uint32_t(size)
-       if cbm._map == nil {
-               panic("C.calloc failed!")
-       }
-
-       // Make a slice pointing to the C array
-       mapslice := (*[1 << 30]C.uint8_t)(unsafe.Pointer(cbm._map))[:size:size]
-
-       // And copy the Go array into the C array
-       copy(mapslice, gbm.bitmap)
-
-       return
-}
-
 func (bm *Bitmap) Test(bit int) bool {
        ubit := uint(bit)
        if bit > bm.Max() || bm.bitmap == nil {
@@ -1137,8 +1139,8 @@ func (cvci C.libxl_vcpuinfo) toGo() (gvci Vcpuinfo) {
        gvci.Blocked = bool(cvci.blocked)
        gvci.Running = bool(cvci.running)
        gvci.VCpuTime = time.Duration(cvci.vcpu_time)
-       gvci.Cpumap = cvci.cpumap.toGo()
-       gvci.CpumapSoft = cvci.cpumap_soft.toGo()
+       gvci.Cpumap.fromC(&cvci.cpumap)
+       gvci.CpumapSoft.fromC(&cvci.cpumap_soft)
 
        return
 }
-- 
2.19.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.