[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen stable-4.17] xen/livepatch: register livepatch regions when loaded
commit b11917de0cd261a878beaf50c18a689bde0b2f50 Author: Roger Pau Monné <roger.pau@xxxxxxxxxx> AuthorDate: Tue Mar 5 11:59:26 2024 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Tue Mar 5 11:59:26 2024 +0100 xen/livepatch: register livepatch regions when loaded Currently livepatch regions are registered as virtual regions only after the livepatch has been applied. This can lead to issues when using the pre-apply or post-revert hooks, as at that point the livepatch is not in the virtual regions list. If a livepatch pre-apply hook contains a WARN() it would trigger an hypervisor crash, as the code to handle the bug frame won't be able to find the instruction pointer that triggered the #UD in any of the registered virtual regions, and hence crash. Fix this by adding the livepatch payloads as virtual regions as soon as loaded, and only remove them once the payload is unloaded. This requires some changes to the virtual regions code, as the removal of the virtual regions is no longer done in stop machine context, and hence an RCU barrier is added in order to make sure there are no users of the virtual region after it's been removed from the list. Fixes: 8313c864fa95 ('livepatch: Implement pre-|post- apply|revert hooks') Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> Reviewed-by: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx> master commit: a57b4074ab39bee78b6c116277f0a9963bd8e687 master date: 2024-02-28 16:57:25 +0000 --- xen/common/livepatch.c | 4 ++-- xen/common/virtual_region.c | 44 +++++++++++++++++--------------------------- 2 files changed, 19 insertions(+), 29 deletions(-) diff --git a/xen/common/livepatch.c b/xen/common/livepatch.c index c2ae84d18b..537e9f33e4 100644 --- a/xen/common/livepatch.c +++ b/xen/common/livepatch.c @@ -1015,6 +1015,7 @@ static int build_symbol_table(struct payload *payload, static void free_payload(struct payload *data) { ASSERT(spin_is_locked(&payload_lock)); + unregister_virtual_region(&data->region); list_del(&data->list); payload_cnt--; payload_version++; @@ -1114,6 +1115,7 @@ static int livepatch_upload(struct xen_sysctl_livepatch_upload *upload) INIT_LIST_HEAD(&data->list); INIT_LIST_HEAD(&data->applied_list); + register_virtual_region(&data->region); list_add_tail(&data->list, &payload_list); payload_cnt++; payload_version++; @@ -1330,7 +1332,6 @@ static inline void apply_payload_tail(struct payload *data) * The applied_list is iterated by the trap code. */ list_add_tail_rcu(&data->applied_list, &applied_list); - register_virtual_region(&data->region); data->state = LIVEPATCH_STATE_APPLIED; } @@ -1376,7 +1377,6 @@ static inline void revert_payload_tail(struct payload *data) * The applied_list is iterated by the trap code. */ list_del_rcu(&data->applied_list); - unregister_virtual_region(&data->region); data->reverted = true; data->state = LIVEPATCH_STATE_CHECKED; diff --git a/xen/common/virtual_region.c b/xen/common/virtual_region.c index 5f89703f51..9f12c30efe 100644 --- a/xen/common/virtual_region.c +++ b/xen/common/virtual_region.c @@ -23,14 +23,8 @@ static struct virtual_region core_init __initdata = { }; /* - * RCU locking. Additions are done either at startup (when there is only - * one CPU) or when all CPUs are running without IRQs. - * - * Deletions are bit tricky. We do it when Live Patch (all CPUs running - * without IRQs) or during bootup (when clearing the init). - * - * Hence we use list_del_rcu (which sports an memory fence) and a spinlock - * on deletion. + * RCU locking. Modifications to the list must be done in exclusive mode, and + * hence need to hold the spinlock. * * All readers of virtual_region_list MUST use list_for_each_entry_rcu. */ @@ -58,41 +52,36 @@ const struct virtual_region *find_text_region(unsigned long addr) void register_virtual_region(struct virtual_region *r) { - ASSERT(!local_irq_is_enabled()); + unsigned long flags; + spin_lock_irqsave(&virtual_region_lock, flags); list_add_tail_rcu(&r->list, &virtual_region_list); + spin_unlock_irqrestore(&virtual_region_lock, flags); } -static void remove_virtual_region(struct virtual_region *r) +/* + * Suggest inline so when !CONFIG_LIVEPATCH the function is not left + * unreachable after init code is removed. + */ +static void inline remove_virtual_region(struct virtual_region *r) { unsigned long flags; spin_lock_irqsave(&virtual_region_lock, flags); list_del_rcu(&r->list); spin_unlock_irqrestore(&virtual_region_lock, flags); - /* - * We do not need to invoke call_rcu. - * - * This is due to the fact that on the deletion we have made sure - * to use spinlocks (to guard against somebody else calling - * unregister_virtual_region) and list_deletion spiced with - * memory barrier. - * - * That protects us from corrupting the list as the readers all - * use list_for_each_entry_rcu which is safe against concurrent - * deletions. - */ } +#ifdef CONFIG_LIVEPATCH void unregister_virtual_region(struct virtual_region *r) { - /* Expected to be called from Live Patch - which has IRQs disabled. */ - ASSERT(!local_irq_is_enabled()); - remove_virtual_region(r); + + /* Assert that no CPU might be using the removed region. */ + rcu_barrier(); } -#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_X86) +#ifdef CONFIG_X86 void relax_virtual_region_perms(void) { const struct virtual_region *region; @@ -116,7 +105,8 @@ void tighten_virtual_region_perms(void) PAGE_HYPERVISOR_RX); rcu_read_unlock(&rcu_virtual_region_lock); } -#endif +#endif /* CONFIG_X86 */ +#endif /* CONFIG_LIVEPATCH */ void __init unregister_init_virtual_region(void) { -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.17
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |