[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] xen/virtual-region: Link the list build time



commit 038ba305fda74c42707cf1ccb6d1d171d8433477
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Fri Mar 15 17:18:42 2024 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Thu Apr 11 13:23:08 2024 +0100

    xen/virtual-region: Link the list build time
    
    Given 3 statically initialised objects, its easy to link the list at build
    time.  There's no need to do it during runtime at boot (and with IRQs-off,
    even).
    
    As a consequence, register_virtual_region() can now move inside ifdef
    CONFIG_LIVEPATCH like unregister_virtual_region().
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Michal Orzel <michal.orzel@xxxxxxx>
---
 xen/common/virtual_region.c | 38 +++++++++++++++++++++++---------------
 1 file changed, 23 insertions(+), 15 deletions(-)

diff --git a/xen/common/virtual_region.c b/xen/common/virtual_region.c
index 7d8bdeb612..fe4f53d86b 100644
--- a/xen/common/virtual_region.c
+++ b/xen/common/virtual_region.c
@@ -15,8 +15,19 @@ extern const struct bug_frame
     __start_bug_frames_2[], __stop_bug_frames_2[],
     __start_bug_frames_3[], __stop_bug_frames_3[];
 
+/*
+ * For the built-in regions, the double linked list can be constructed at
+ * build time.  Forward-declare the elements and their initialisers.
+ */
+static struct list_head virtual_region_list;
+static struct virtual_region core, core_init;
+
+#define LIST_ENTRY_HEAD() { .next = &core.list,           .prev = 
&core_init.list }
+#define LIST_ENTRY_CORE() { .next = &core_init.list,      .prev = 
&virtual_region_list }
+#define LIST_ENTRY_INIT() { .next = &virtual_region_list, .prev = &core.list }
+
 static struct virtual_region core = {
-    .list = LIST_HEAD_INIT(core.list),
+    .list = LIST_ENTRY_CORE(),
     .text_start = _stext,
     .text_end = _etext,
     .rodata_start = _srodata,
@@ -32,7 +43,7 @@ static struct virtual_region core = {
 
 /* Becomes irrelevant when __init sections are cleared. */
 static struct virtual_region core_init __initdata = {
-    .list = LIST_HEAD_INIT(core_init.list),
+    .list = LIST_ENTRY_INIT(),
     .text_start = _sinittext,
     .text_end = _einittext,
 
@@ -50,7 +61,7 @@ static struct virtual_region core_init __initdata = {
  *
  * All readers of virtual_region_list MUST use list_for_each_entry_rcu.
  */
-static LIST_HEAD(virtual_region_list);
+static struct list_head virtual_region_list = LIST_ENTRY_HEAD();
 static DEFINE_SPINLOCK(virtual_region_lock);
 static DEFINE_RCU_READ_LOCK(rcu_virtual_region_lock);
 
@@ -73,15 +84,6 @@ const struct virtual_region *find_text_region(unsigned long 
addr)
     return region;
 }
 
-void register_virtual_region(struct virtual_region *r)
-{
-    unsigned long flags;
-
-    spin_lock_irqsave(&virtual_region_lock, flags);
-    list_add_tail_rcu(&r->list, &virtual_region_list);
-    spin_unlock_irqrestore(&virtual_region_lock, flags);
-}
-
 /*
  * Suggest inline so when !CONFIG_LIVEPATCH the function is not left
  * unreachable after init code is removed.
@@ -96,6 +98,15 @@ static void inline remove_virtual_region(struct 
virtual_region *r)
 }
 
 #ifdef CONFIG_LIVEPATCH
+void register_virtual_region(struct virtual_region *r)
+{
+    unsigned long flags;
+
+    spin_lock_irqsave(&virtual_region_lock, flags);
+    list_add_tail_rcu(&r->list, &virtual_region_list);
+    spin_unlock_irqrestore(&virtual_region_lock, flags);
+}
+
 void unregister_virtual_region(struct virtual_region *r)
 {
     remove_virtual_region(r);
@@ -155,9 +166,6 @@ void __init setup_virtual_regions(const struct 
exception_table_entry *start,
 {
     core_init.ex = core.ex = start;
     core_init.ex_end = core.ex_end = end;
-
-    register_virtual_region(&core_init);
-    register_virtual_region(&core);
 }
 
 /*
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.