|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 23/44] x86/smp: Allocate percpu resources for map_domain_page() to use
The mapcache infrastructure needs some linear address space with which to make
temporary mappings.
_alter_percpu_mappings() is updated to support allocating an L1t, and
cpu_smpboot_alloc_common() is updated to allocate an L1t for mapcache
purposes, and map the L1t into linear address space so it can be easily
modified.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
xen/arch/x86/smpboot.c | 27 ++++++++++++++++++++++++++-
xen/include/asm-x86/config.h | 4 ++++
2 files changed, 30 insertions(+), 1 deletion(-)
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 7f02dd8..6a5f18a 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -645,6 +645,7 @@ void cpu_exit_clear(unsigned int cpu)
*/
enum percpu_alter_action {
PERCPU_MAP, /* Map existing frame: page and flags are input parameters. */
+ PERCPU_ALLOC_L1T, /* Allocate an L1 table. optionally returned via *page.
*/
};
static int _alter_percpu_mappings(
unsigned int cpu, unsigned long linear,
@@ -694,7 +695,10 @@ static int _alter_percpu_mappings(
l2t[l2_table_offset(linear)] = l2e_from_page(pg, __PAGE_HYPERVISOR);
}
else
- l1t = map_l1t_from_l2e(l2t[l2_table_offset(linear)]);
+ {
+ pg = l2e_get_page(l2t[l2_table_offset(linear)]);
+ l1t = __map_domain_page(pg);
+ }
switch ( action )
{
@@ -703,6 +707,11 @@ static int _alter_percpu_mappings(
l1t[l1_table_offset(linear)] = l1e_from_page(*page, flags);
break;
+ case PERCPU_ALLOC_L1T:
+ if ( page )
+ *page = pg;
+ break;
+
default:
ASSERT_UNREACHABLE();
rc = -EINVAL;
@@ -727,6 +736,12 @@ static int percpu_map_frame(unsigned int cpu, unsigned
long linear,
return _alter_percpu_mappings(cpu, linear, PERCPU_MAP, &page, flags);
}
+static int percpu_alloc_l1t(unsigned int cpu, unsigned long linear,
+ struct page_info **page)
+{
+ return _alter_percpu_mappings(cpu, linear, PERCPU_ALLOC_L1T, page, 0);
+}
+
/* Allocate data common between the BSP and APs. */
static int cpu_smpboot_alloc_common(unsigned int cpu)
{
@@ -770,6 +785,16 @@ static int cpu_smpboot_alloc_common(unsigned int cpu)
if ( rc )
goto out;
+ /* Allocate space for the mapcache L1e's... */
+ rc = percpu_alloc_l1t(cpu, PERCPU_MAPCACHE_START, &pg);
+ if ( rc )
+ goto out;
+
+ /* ... and map the L1t so it can be used. */
+ rc = percpu_map_frame(cpu, PERCPU_MAPCACHE_L1ES, pg, PAGE_HYPERVISOR_RW);
+ if ( rc )
+ goto out;
+
rc = 0; /* Success */
out:
diff --git a/xen/include/asm-x86/config.h b/xen/include/asm-x86/config.h
index cddfc4e..a95f8c8 100644
--- a/xen/include/asm-x86/config.h
+++ b/xen/include/asm-x86/config.h
@@ -296,6 +296,10 @@ extern unsigned long xen_phys_start;
/* Mappings in the percpu area: */
#define PERCPU_IDT_MAPPING (PERCPU_LINEAR_START + KB(4))
+#define PERCPU_MAPCACHE_L1ES (PERCPU_LINEAR_START + MB(2) + KB(12))
+#define PERCPU_MAPCACHE_START (PERCPU_LINEAR_START + MB(4))
+#define PERCPU_MAPCACHE_END (PERCPU_MAPCACHE_START + MB(2))
+
/* GDT/LDT shadow mapping area. The first per-domain-mapping sub-area. */
#define GDT_LDT_VCPU_SHIFT 5
#define GDT_LDT_VCPU_VA_SHIFT (GDT_LDT_VCPU_SHIFT + PAGE_SHIFT)
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |