[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 3/4] xen/virtual-region: Link the list build time



Given 3 statically initialised objects, its easy to link the list at build
time.  There's no need to do it during runtime at boot (and with IRQs-off,
even).

As a consequence, register_virtual_region() can now move inside ifdef
CONFIG_LIVEPATCH like unregister_virtual_region().

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Julien Grall <julien@xxxxxxx>
CC: Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
CC: Bertrand Marquis <bertrand.marquis@xxxxxxx>
CC: Michal Orzel <michal.orzel@xxxxxxx>
CC: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
CC: Shawn Anastasio <sanastasio@xxxxxxxxxxxxxxxxxxxxx>
CC: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
CC: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx>
---
 xen/common/virtual_region.c | 45 ++++++++++++++++++++++++-------------
 1 file changed, 30 insertions(+), 15 deletions(-)

diff --git a/xen/common/virtual_region.c b/xen/common/virtual_region.c
index eb9645daa99d..ad39bb74e15c 100644
--- a/xen/common/virtual_region.c
+++ b/xen/common/virtual_region.c
@@ -15,8 +15,19 @@ extern const struct bug_frame
     __start_bug_frames_2[], __stop_bug_frames_2[],
     __start_bug_frames_3[], __stop_bug_frames_3[];
 
+/*
+ * For the built-in regions, the double linked list can be constructed at
+ * build time.  Forward-declare the elements.
+ */
+static struct list_head virtual_region_list;
+static struct virtual_region core, core_init;
+
 static struct virtual_region core = {
-    .list = LIST_HEAD_INIT(core.list),
+    .list = {
+        .next = &core_init.list,
+        .prev = &virtual_region_list,
+    },
+
     .text_start = _stext,
     .text_end = _etext,
     .rodata_start = _srodata,
@@ -32,7 +43,11 @@ static struct virtual_region core = {
 
 /* Becomes irrelevant when __init sections are cleared. */
 static struct virtual_region core_init __initdata = {
-    .list = LIST_HEAD_INIT(core_init.list),
+    .list = {
+        .next = &virtual_region_list,
+        .prev = &core.list,
+    },
+
     .text_start = _sinittext,
     .text_end = _einittext,
 
@@ -50,7 +65,10 @@ static struct virtual_region core_init __initdata = {
  *
  * All readers of virtual_region_list MUST use list_for_each_entry_rcu.
  */
-static LIST_HEAD(virtual_region_list);
+static struct list_head virtual_region_list = {
+    .next = &core.list,
+    .prev = &core_init.list,
+};
 static DEFINE_SPINLOCK(virtual_region_lock);
 static DEFINE_RCU_READ_LOCK(rcu_virtual_region_lock);
 
@@ -73,15 +91,6 @@ const struct virtual_region *find_text_region(unsigned long 
addr)
     return region;
 }
 
-void register_virtual_region(struct virtual_region *r)
-{
-    unsigned long flags;
-
-    spin_lock_irqsave(&virtual_region_lock, flags);
-    list_add_tail_rcu(&r->list, &virtual_region_list);
-    spin_unlock_irqrestore(&virtual_region_lock, flags);
-}
-
 /*
  * Suggest inline so when !CONFIG_LIVEPATCH the function is not left
  * unreachable after init code is removed.
@@ -96,6 +105,15 @@ static void inline remove_virtual_region(struct 
virtual_region *r)
 }
 
 #ifdef CONFIG_LIVEPATCH
+void register_virtual_region(struct virtual_region *r)
+{
+    unsigned long flags;
+
+    spin_lock_irqsave(&virtual_region_lock, flags);
+    list_add_tail_rcu(&r->list, &virtual_region_list);
+    spin_unlock_irqrestore(&virtual_region_lock, flags);
+}
+
 void unregister_virtual_region(struct virtual_region *r)
 {
     remove_virtual_region(r);
@@ -155,9 +173,6 @@ void __init setup_virtual_regions(const struct 
exception_table_entry *start,
 {
     core_init.ex = core.ex = start;
     core_init.ex_end = core.ex_end = end;
-
-    register_virtual_region(&core_init);
-    register_virtual_region(&core);
 }
 
 /*
-- 
2.30.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.