[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 COLOPre 04/13] tools/libxc: export xc_bitops.h





On 06/10/2015 11:20 PM, Ian Campbell wrote:
On Mon, 2015-06-08 at 11:43 +0800, Yang Hongyang wrote:
When we are under COLO, we will send dirty page bitmap info from
secondary to primary at every checkpoint.

... and this is a _libxl_ operation? Is that the right layer here?

For the first question, Yes, this is done in the suspend callback on
restore side. We do this in libxl because currently we only added a
back channel on libxl side. There're no back channel in libxc.

By considering this more, if we do this in libxc part, the code will be
less complex: we can drop the 4th & 9th patch of this series and also
get rid of the get_dirty_pfn() callback. instead we will add a patch to
add back channel in libxc.

For the second question, I'm not sure, what's Andrew's opinion? which
is the right layer to do this operation, libxl or libxc?


  So we need to get/test
the dirty page bitmap. We just expose xc_bitops.h for libxl use.

NOTE:
   Need to make clean and rerun configure to get it compiled.

Signed-off-by: Yang Hongyang <yanghy@xxxxxxxxxxxxxx>
---
  tools/libxc/include/xc_bitops.h | 76 +++++++++++++++++++++++++++++++++++++++++
  tools/libxc/xc_bitops.h         | 76 -----------------------------------------
  2 files changed, 76 insertions(+), 76 deletions(-)
  create mode 100644 tools/libxc/include/xc_bitops.h
  delete mode 100644 tools/libxc/xc_bitops.h

diff --git a/tools/libxc/include/xc_bitops.h b/tools/libxc/include/xc_bitops.h
new file mode 100644
index 0000000..cd749f4
--- /dev/null
+++ b/tools/libxc/include/xc_bitops.h
@@ -0,0 +1,76 @@
+#ifndef XC_BITOPS_H
+#define XC_BITOPS_H 1
+
+/* bitmap operations for single threaded access */
+
+#include <stdlib.h>
+#include <string.h>
+
+#define BITS_PER_LONG (sizeof(unsigned long) * 8)
+#define ORDER_LONG (sizeof(unsigned long) == 4 ? 5 : 6)
+
+#define BITMAP_ENTRY(_nr,_bmap) ((_bmap))[(_nr)/BITS_PER_LONG]
+#define BITMAP_SHIFT(_nr) ((_nr) % BITS_PER_LONG)
+
+/* calculate required space for number of longs needed to hold nr_bits */
+static inline int bitmap_size(int nr_bits)
+{
+    int nr_long, nr_bytes;
+    nr_long = (nr_bits + BITS_PER_LONG - 1) >> ORDER_LONG;
+    nr_bytes = nr_long * sizeof(unsigned long);
+    return nr_bytes;
+}
+
+static inline unsigned long *bitmap_alloc(int nr_bits)
+{
+    return calloc(1, bitmap_size(nr_bits));
+}
+
+static inline void bitmap_set(unsigned long *addr, int nr_bits)
+{
+    memset(addr, 0xff, bitmap_size(nr_bits));
+}
+
+static inline void bitmap_clear(unsigned long *addr, int nr_bits)
+{
+    memset(addr, 0, bitmap_size(nr_bits));
+}
+
+static inline int test_bit(int nr, unsigned long *addr)
+{
+    return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
+}
+
+static inline void clear_bit(int nr, unsigned long *addr)
+{
+    BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr));
+}
+
+static inline void set_bit(int nr, unsigned long *addr)
+{
+    BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr));
+}
+
+static inline int test_and_clear_bit(int nr, unsigned long *addr)
+{
+    int oldbit = test_bit(nr, addr);
+    clear_bit(nr, addr);
+    return oldbit;
+}
+
+static inline int test_and_set_bit(int nr, unsigned long *addr)
+{
+    int oldbit = test_bit(nr, addr);
+    set_bit(nr, addr);
+    return oldbit;
+}
+
+static inline void bitmap_or(unsigned long *dst, const unsigned long *other,
+                             int nr_bits)
+{
+    int i, nr_longs = (bitmap_size(nr_bits) / sizeof(unsigned long));
+    for ( i = 0; i < nr_longs; ++i )
+        dst[i] |= other[i];
+}
+
+#endif  /* XC_BITOPS_H */
diff --git a/tools/libxc/xc_bitops.h b/tools/libxc/xc_bitops.h
deleted file mode 100644
index cd749f4..0000000
--- a/tools/libxc/xc_bitops.h
+++ /dev/null
@@ -1,76 +0,0 @@
-#ifndef XC_BITOPS_H
-#define XC_BITOPS_H 1
-
-/* bitmap operations for single threaded access */
-
-#include <stdlib.h>
-#include <string.h>
-
-#define BITS_PER_LONG (sizeof(unsigned long) * 8)
-#define ORDER_LONG (sizeof(unsigned long) == 4 ? 5 : 6)
-
-#define BITMAP_ENTRY(_nr,_bmap) ((_bmap))[(_nr)/BITS_PER_LONG]
-#define BITMAP_SHIFT(_nr) ((_nr) % BITS_PER_LONG)
-
-/* calculate required space for number of longs needed to hold nr_bits */
-static inline int bitmap_size(int nr_bits)
-{
-    int nr_long, nr_bytes;
-    nr_long = (nr_bits + BITS_PER_LONG - 1) >> ORDER_LONG;
-    nr_bytes = nr_long * sizeof(unsigned long);
-    return nr_bytes;
-}
-
-static inline unsigned long *bitmap_alloc(int nr_bits)
-{
-    return calloc(1, bitmap_size(nr_bits));
-}
-
-static inline void bitmap_set(unsigned long *addr, int nr_bits)
-{
-    memset(addr, 0xff, bitmap_size(nr_bits));
-}
-
-static inline void bitmap_clear(unsigned long *addr, int nr_bits)
-{
-    memset(addr, 0, bitmap_size(nr_bits));
-}
-
-static inline int test_bit(int nr, unsigned long *addr)
-{
-    return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
-}
-
-static inline void clear_bit(int nr, unsigned long *addr)
-{
-    BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr));
-}
-
-static inline void set_bit(int nr, unsigned long *addr)
-{
-    BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr));
-}
-
-static inline int test_and_clear_bit(int nr, unsigned long *addr)
-{
-    int oldbit = test_bit(nr, addr);
-    clear_bit(nr, addr);
-    return oldbit;
-}
-
-static inline int test_and_set_bit(int nr, unsigned long *addr)
-{
-    int oldbit = test_bit(nr, addr);
-    set_bit(nr, addr);
-    return oldbit;
-}
-
-static inline void bitmap_or(unsigned long *dst, const unsigned long *other,
-                             int nr_bits)
-{
-    int i, nr_longs = (bitmap_size(nr_bits) / sizeof(unsigned long));
-    for ( i = 0; i < nr_longs; ++i )
-        dst[i] |= other[i];
-}
-
-#endif  /* XC_BITOPS_H */


.


--
Thanks,
Yang.

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.