|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 11/17] livepatch/x86/arm[32, 64]: Use common vmap code for applying.
Patch titled "livepatch/arm[32,64]: Modify livepatch_funcs" added
the infrastructure on ARM [32,64] to use vmap as way to
map read-only regions. On x86 we use a global register.
But there is nothing wrong with using on x86 the same method
as on ARM[32,64] - which is exactly what this patch does.
As result the common code for setting up vmap is now
done in livepatch_quiesce and there is no arch specific
arch_livepatch_quiesce anymore.
The same treatment is applied to arch_livepatch_revive albeit
we still need arch specific code for ARM (to clear the i-cache).
Interestingly the arch_livepatch_revert looks almost the same
on x86 and ARM. See 'livepatch/x86/arm[32,64]: Unify arch_livepatch_revert'
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
xen/arch/arm/livepatch.c | 64 --------------------------------
xen/arch/x86/livepatch.c | 32 +++++++++-------
xen/common/livepatch.c | 81 +++++++++++++++++++++++++++++++++++++++--
xen/include/asm-arm/livepatch.h | 13 -------
xen/include/xen/livepatch.h | 13 +++++++
5 files changed, 108 insertions(+), 95 deletions(-)
diff --git a/xen/arch/arm/livepatch.c b/xen/arch/arm/livepatch.c
index 2f9ae8e61e..2debb5368c 100644
--- a/xen/arch/arm/livepatch.c
+++ b/xen/arch/arm/livepatch.c
@@ -17,57 +17,6 @@
#undef virt_to_mfn
#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-struct livepatch_vmap_stash livepatch_vmap;
-
-int arch_livepatch_quiesce(struct livepatch_func *funcs, unsigned int nfuncs)
-{
- mfn_t text_mfn, rodata_mfn;
- void *vmap_addr;
- unsigned int text_order;
- unsigned long va = (unsigned long)(funcs);
- unsigned int offs = va & (PAGE_SIZE - 1);
- unsigned int size = PFN_UP(offs + nfuncs * sizeof(*funcs));
-
- if ( livepatch_vmap.text || livepatch_vmap.funcs )
- return -EINVAL;
-
- text_mfn = virt_to_mfn(_start);
- text_order = get_order_from_bytes(_end - _start);
-
- /*
- * The text section is read-only. So re-map Xen to be able to patch
- * the code.
- */
- vmap_addr = __vmap(&text_mfn, 1U << text_order, 1, 1, PAGE_HYPERVISOR,
- VMAP_DEFAULT);
-
- if ( !vmap_addr )
- {
- printk(XENLOG_ERR LIVEPATCH "Failed to setup vmap of hypervisor!
(order=%u)\n",
- text_order);
- return -ENOMEM;
- }
-
- livepatch_vmap.text = vmap_addr;
- livepatch_vmap.offset = offs;
-
- rodata_mfn = virt_to_mfn(va & PAGE_MASK);
- vmap_addr = __vmap(&rodata_mfn, size, 1, 1, PAGE_HYPERVISOR,
VMAP_DEFAULT);
- if ( !vmap_addr )
- {
- printk(XENLOG_ERR LIVEPATCH "Failed to setup vmap of livepatch_funcs!
(mfn=%"PRI_mfn", size=%u)\n",
- mfn_x(rodata_mfn), size);
- vunmap(livepatch_vmap.text);
- livepatch_vmap.text = NULL;
- return -ENOMEM;
- }
-
- livepatch_vmap.funcs = vmap_addr;
- livepatch_vmap.va = funcs;
-
- return 0;
-}
-
void arch_livepatch_revive(void)
{
/*
@@ -75,19 +24,6 @@ void arch_livepatch_revive(void)
* arch_livepatch_[apply|revert].
*/
invalidate_icache();
-
- if ( livepatch_vmap.text )
- vunmap(livepatch_vmap.text);
-
- livepatch_vmap.text = NULL;
-
- if ( livepatch_vmap.funcs )
- vunmap(livepatch_vmap.funcs);
-
- livepatch_vmap.funcs = NULL;
-
- livepatch_vmap.va = NULL;
- livepatch_vmap.offset = 0;
}
int arch_livepatch_verify_func(const struct livepatch_func *func)
diff --git a/xen/arch/x86/livepatch.c b/xen/arch/x86/livepatch.c
index 8522fcbd36..5273f5a176 100644
--- a/xen/arch/x86/livepatch.c
+++ b/xen/arch/x86/livepatch.c
@@ -14,18 +14,9 @@
#include <asm/nmi.h>
#include <asm/livepatch.h>
-int arch_livepatch_quiesce(struct livepatch_func *func, unsigned int nfuncs)
-{
- /* Disable WP to allow changes to read-only pages. */
- write_cr0(read_cr0() & ~X86_CR0_WP);
-
- return 0;
-}
-
void arch_livepatch_revive(void)
{
- /* Reinstate WP. */
- write_cr0(read_cr0() | X86_CR0_WP);
+ /* Nothing to do. */
}
int arch_livepatch_verify_func(const struct livepatch_func *func)
@@ -54,14 +45,21 @@ void noinline arch_livepatch_apply(struct livepatch_func
*func)
{
uint8_t *old_ptr;
uint8_t insn[sizeof(func->opaque)];
- unsigned int len;
+ unsigned int i, len;
+ struct livepatch_func *f;
- old_ptr = func->old_addr;
+ /* Recompute using the vmap. */
+ old_ptr = func->old_addr - (void *)_start + livepatch_vmap.text;
len = livepatch_insn_len(func);
if ( !len )
return;
- memcpy(func->opaque, old_ptr, len);
+ /* Index in the vmap region. */
+ i = livepatch_vmap.va - func;
+ f = (struct livepatch_func *)(livepatch_vmap.funcs +
livepatch_vmap.offset) + i;
+
+ memcpy(f->opaque, old_ptr, len);
+
if ( func->new_addr )
{
int32_t val;
@@ -85,7 +83,13 @@ void noinline arch_livepatch_apply(struct livepatch_func
*func)
*/
void noinline arch_livepatch_revert(const struct livepatch_func *func)
{
- memcpy(func->old_addr, func->opaque, livepatch_insn_len(func));
+ uint32_t *new_ptr;
+ unsigned int len;
+
+ new_ptr = func->old_addr - (void *)_start + livepatch_vmap.text;
+
+ len = livepatch_insn_len(func);
+ memcpy(new_ptr, func->opaque, len);
}
/*
diff --git a/xen/common/livepatch.c b/xen/common/livepatch.c
index e707802279..eb7d4098fd 100644
--- a/xen/common/livepatch.c
+++ b/xen/common/livepatch.c
@@ -104,6 +104,12 @@ static struct livepatch_work livepatch_work;
*/
static DEFINE_PER_CPU(bool_t, work_to_do);
+/*
+ * The va of the hypervisor .text region and the livepatch_funcs.
+ * We need this as the normal va are write protected.
+ */
+struct livepatch_vmap_stash livepatch_vmap;
+
static int get_name(const xen_livepatch_name_t *name, char *n)
{
if ( !name->size || name->size > XEN_LIVEPATCH_NAME_SIZE )
@@ -1055,6 +1061,73 @@ static int livepatch_list(xen_sysctl_livepatch_list_t
*list)
return rc ? : idx;
}
+static int livepatch_quiesce(struct livepatch_func *funcs, unsigned int nfuncs)
+{
+ mfn_t text_mfn, rodata_mfn;
+ void *vmap_addr;
+ unsigned int text_order;
+ unsigned long va = (unsigned long)(funcs);
+ unsigned int offs = va & (PAGE_SIZE - 1);
+ unsigned int size = PFN_UP(offs + nfuncs * sizeof(*funcs));
+
+ if ( livepatch_vmap.text || livepatch_vmap.funcs )
+ return -EINVAL;
+
+ text_mfn = _mfn(virt_to_mfn(_start));
+ text_order = get_order_from_bytes(_end - _start);
+
+ /*
+ * The text section is read-only. So re-map Xen to be able to patch
+ * the code.
+ */
+ vmap_addr = __vmap(&text_mfn, 1U << text_order, 1, 1, PAGE_HYPERVISOR,
+ VMAP_DEFAULT);
+
+ if ( !vmap_addr )
+ {
+ printk(XENLOG_ERR LIVEPATCH "Failed to setup vmap of hypervisor!
(order=%u)\n",
+ text_order);
+ return -ENOMEM;
+ }
+
+ livepatch_vmap.text = vmap_addr;
+ livepatch_vmap.offset = offs;
+
+ rodata_mfn = _mfn(virt_to_mfn(va & PAGE_MASK));
+ vmap_addr = __vmap(&rodata_mfn, size, 1, 1, PAGE_HYPERVISOR,
VMAP_DEFAULT);
+ if ( !vmap_addr )
+ {
+ printk(XENLOG_ERR LIVEPATCH "Failed to setup vmap of livepatch_funcs!
(mfn=%"PRI_mfn", size=%u)\n",
+ mfn_x(rodata_mfn), size);
+ vunmap(livepatch_vmap.text);
+ livepatch_vmap.text = NULL;
+ return -ENOMEM;
+ }
+
+ livepatch_vmap.funcs = vmap_addr;
+ livepatch_vmap.va = funcs;
+
+ return 0;
+}
+
+static void livepatch_revive(void)
+{
+ arch_livepatch_revive();
+
+ if ( livepatch_vmap.text )
+ vunmap(livepatch_vmap.text);
+
+ livepatch_vmap.text = NULL;
+
+ if ( livepatch_vmap.funcs )
+ vunmap(livepatch_vmap.funcs);
+
+ livepatch_vmap.funcs = NULL;
+
+ livepatch_vmap.va = NULL;
+ livepatch_vmap.offset = 0;
+}
+
/*
* The following functions get the CPUs into an appropriate state and
* apply (or revert) each of the payload's functions. This is needed
@@ -1069,7 +1142,7 @@ static int apply_payload(struct payload *data)
printk(XENLOG_INFO LIVEPATCH "%s: Applying %u functions\n",
data->name, data->nfuncs);
- rc = arch_livepatch_quiesce(data->funcs, data->nfuncs);
+ rc = livepatch_quiesce(data->funcs, data->nfuncs);
if ( rc )
{
printk(XENLOG_ERR LIVEPATCH "%s: unable to quiesce!\n", data->name);
@@ -1091,7 +1164,7 @@ static int apply_payload(struct payload *data)
for ( i = 0; i < data->nfuncs; i++ )
arch_livepatch_apply(&data->funcs[i]);
- arch_livepatch_revive();
+ livepatch_revive();
/*
* We need RCU variant (which has barriers) in case we crash here.
@@ -1110,7 +1183,7 @@ static int revert_payload(struct payload *data)
printk(XENLOG_INFO LIVEPATCH "%s: Reverting\n", data->name);
- rc = arch_livepatch_quiesce(data->funcs, data->nfuncs);
+ rc = livepatch_quiesce(data->funcs, data->nfuncs);
if ( rc )
{
printk(XENLOG_ERR LIVEPATCH "%s: unable to quiesce!\n", data->name);
@@ -1132,7 +1205,7 @@ static int revert_payload(struct payload *data)
ASSERT(!local_irq_is_enabled());
- arch_livepatch_revive();
+ livepatch_revive();
/*
* We need RCU variant (which has barriers) in case we crash here.
diff --git a/xen/include/asm-arm/livepatch.h b/xen/include/asm-arm/livepatch.h
index e030aedced..1d746161a9 100644
--- a/xen/include/asm-arm/livepatch.h
+++ b/xen/include/asm-arm/livepatch.h
@@ -11,19 +11,6 @@
/* On ARM32,64 instructions are always 4 bytes long. */
#define ARCH_PATCH_INSN_SIZE 4
-/*
- * The va of the hypervisor .text region and the livepatch_funcs.
- * We need this as the normal va are write protected.
- */
-struct livepatch_vmap_stash {
- void *text; /* vmap of hypervisor code. */
- void *funcs; /* vmap of the .livepatch.funcs. */
- unsigned int offset; /* Offset in 'funcs'. */
- struct livepatch_func *va; /* The original va. */
-};
-
-extern struct livepatch_vmap_stash livepatch_vmap;
-
/* These ranges are only for unconditional branches. */
#ifdef CONFIG_ARM_32
/* ARM32: A4.3 IN ARM DDI 0406C.c - we are using only ARM instructions in
Xen.*/
diff --git a/xen/include/xen/livepatch.h b/xen/include/xen/livepatch.h
index a97afb92f9..1659ffcdf0 100644
--- a/xen/include/xen/livepatch.h
+++ b/xen/include/xen/livepatch.h
@@ -100,6 +100,19 @@ static inline int livepatch_verify_distance(const struct
livepatch_func *func)
return 0;
}
+
+/*
+ * The va of the hypervisor .text region and the livepatch_funcs.
+ * We need this as the normal va are write protected.
+ */
+struct livepatch_vmap_stash {
+ void *text; /* vmap of hypervisor code. */
+ void *funcs; /* vmap of the .livepatch.funcs. */
+ unsigned int offset; /* Offset in 'funcs'. */
+ struct livepatch_func *va; /* The original va. */
+};
+
+extern struct livepatch_vmap_stash livepatch_vmap;
/*
* These functions are called around the critical region patching live code,
* for an architecture to take make appropratie global state adjustments.
--
2.13.3
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |