[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCHv6 17/37] plat/kvm: Add exception table for Arm64


  • To: <minios-devel@xxxxxxxxxxxxxxxxxxxx>, <simon.kuenzer@xxxxxxxxx>
  • From: Wei Chen <wei.chen@xxxxxxx>
  • Date: Fri, 14 Sep 2018 07:56:50 +0000
  • Authentication-results: spf=pass (sender IP is 40.67.248.234) smtp.mailfrom=arm.com; lists.xenproject.org; dkim=none (message not signed) header.d=none;lists.xenproject.org; dmarc=bestguesspass action=none header.from=arm.com;
  • Cc: Kaly.Xin@xxxxxxx, nd@xxxxxxx, wei.chen@xxxxxxx
  • Delivery-date: Fri, 14 Sep 2018 07:58:06 +0000
  • List-id: Mini-os development list <minios-devel.lists.xenproject.org>
  • Nodisclaimer: True
  • Spamdiagnosticmetadata: NSPM
  • Spamdiagnosticoutput: 1:99

From: Wei Chen <Wei.Chen@xxxxxxx>

On Arm64, we need SYNC exception handler to handle some exceptions
like access NULL pointer, and we need IRQ exception handler to handle
IRQs like timer IRQ. Both these types of exceptions would be handled
in EL1. Except these two types of exceptions, other exceptions would
treated as invalid exceptions.

But we have't enabled the GIC, so the IRQ exception also be treated
as invalid exception in current stage.

Signed-off-by: Wei Chen <Wei.Chen@xxxxxxx>
---
 include/uk/arch/arm/arm64/lcpu.h |  14 +++
 plat/kvm/Makefile.uk             |   1 +
 plat/kvm/arm/entry64.S           |   4 +
 plat/kvm/arm/exceptions.S        | 187 +++++++++++++++++++++++++++++++
 4 files changed, 206 insertions(+)
 create mode 100644 plat/kvm/arm/exceptions.S

diff --git a/include/uk/arch/arm/arm64/lcpu.h b/include/uk/arch/arm/arm64/lcpu.h
index 1acdd2b..3e1fd9b 100644
--- a/include/uk/arch/arm/arm64/lcpu.h
+++ b/include/uk/arch/arm/arm64/lcpu.h
@@ -30,6 +30,18 @@
 #error Do not include this header directly
 #endif
 
+#ifdef __ASSEMBLY__
+/*
+ * Stack size to save general purpose registers and essential system
+ * registers. 8 * (30 + lr + elr_el1 + spsr_el1 + esr_el1) = 272.
+ * From exceptions come from EL0, we have to save sp_el0. So the
+ * TRAP_STACK_SIZE should be 272 + 8 = 280
+ *
+ * TODO: We'd better to calculate this size automatically later.
+ */
+#define __TRAP_STACK_SIZE 280
+#else
+/* Change this structure must update TRAP_STACK_SIZE at the same time */
 struct __regs {
        /* Generic Purpose registers, from x0 ~ x29 */
        unsigned long x[30];
@@ -87,3 +99,5 @@ struct __regs {
 #ifndef wmb
 #define wmb()   dsb(st) /* Full system memory barrier store */
 #endif
+
+#endif /* __ASSEMBLY__ */
diff --git a/plat/kvm/Makefile.uk b/plat/kvm/Makefile.uk
index c3b2f93..a298f8c 100644
--- a/plat/kvm/Makefile.uk
+++ b/plat/kvm/Makefile.uk
@@ -58,6 +58,7 @@ LIBKVMPLAT_SRCS-$(CONFIG_ARCH_ARM_64) += 
$(UK_PLAT_COMMON_BASE)/arm/cache64.S|co
 LIBKVMPLAT_SRCS-$(CONFIG_ARCH_ARM_64) += 
$(UK_PLAT_COMMON_BASE)/arm/time.c|common
 LIBKVMPLAT_SRCS-$(CONFIG_ARCH_ARM_64) += 
$(UK_PLAT_COMMON_BASE)/arm/traps.c|common
 LIBKVMPLAT_SRCS-$(CONFIG_ARCH_ARM_64) += $(LIBKVMPLAT_BASE)/arm/entry64.S
+LIBKVMPLAT_SRCS-$(CONFIG_ARCH_ARM_64) += $(LIBKVMPLAT_BASE)/arm/exceptions.S
 LIBKVMPLAT_SRCS-$(CONFIG_ARCH_ARM_64) += $(LIBKVMPLAT_BASE)/arm/setup.c
 LIBKVMPLAT_SRCS-$(CONFIG_ARCH_ARM_64) += $(LIBKVMPLAT_BASE)/arm/lcpu.c
 LIBKVMPLAT_SRCS-$(CONFIG_ARCH_ARM_64) += $(LIBKVMPLAT_BASE)/arm/intctrl.c
diff --git a/plat/kvm/arm/entry64.S b/plat/kvm/arm/entry64.S
index e723caf..2aecbec 100644
--- a/plat/kvm/arm/entry64.S
+++ b/plat/kvm/arm/entry64.S
@@ -63,6 +63,10 @@ ENTRY(_libkvmplat_entry)
 
        mov sp, x27
 
+       /* Setup excetpion vector table address before enable MMU */
+       ldr x29, =vector_table
+       msr VBAR_EL1, x29
+
        /* Load dtb address to x0 as a parameter */
        ldr x0, =_dtb
        b _libkvmplat_start
diff --git a/plat/kvm/arm/exceptions.S b/plat/kvm/arm/exceptions.S
new file mode 100644
index 0000000..af039d8
--- /dev/null
+++ b/plat/kvm/arm/exceptions.S
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: ISC */
+/*-
+ *
+ * Copyright (c) 2014 Andrew Turner, All rights reserved.
+ * Copyright (c) 2018 Arm Ltd., All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <uk/arch/lcpu.h>
+#include <uk/asm.h>
+
+.macro ENTER_TRAP, el
+       sub  sp, sp, #__TRAP_STACK_SIZE
+
+       /* Save general purpose registers */
+       stp x0, x1, [sp, #16 * 0]
+       stp x2, x3, [sp, #16 * 1]
+       stp x4, x5, [sp, #16 * 2]
+       stp x6, x7, [sp, #16 * 3]
+       stp x8, x9, [sp, #16 * 4]
+       stp x10, x11, [sp, #16 * 5]
+       stp x12, x13, [sp, #16 * 6]
+       stp x14, x15, [sp, #16 * 7]
+       stp x16, x17, [sp, #16 * 8]
+       stp x18, x19, [sp, #16 * 9]
+       stp x20, x21, [sp, #16 * 10]
+       stp x22, x23, [sp, #16 * 11]
+       stp x24, x25, [sp, #16 * 12]
+       stp x26, x27, [sp, #16 * 13]
+       stp x28, x29, [sp, #16 * 14]
+
+       /* Save LR and exception PC */
+       mrs x21, elr_el1
+       stp x30, x21, [sp, #16 * 15]
+
+       /* Save pstate and exception status register */
+       mrs x22, spsr_el1
+       mrs x23, esr_el1
+       stp x22, x23, [sp, #16 * 16]
+
+       /* Save stack pointer for lower level exception */
+.if \el == 0
+       mrs x18, sp_el0
+.else
+       add x18, sp, #__TRAP_STACK_SIZE
+.endif
+       str x18, [sp, #16 * 17]
+.endm
+
+.macro LEAVE_TRAP, el
+       /* Mask IRQ to make sure restore would not be interrupted by IRQ */
+       msr daifset, #2
+
+       /* Restore stack pointer for lower level exception */
+       ldr x18, [sp, #16 * 17]
+.if \el == 0
+       msr sp_el0, x18
+.endif
+
+       /* Restore pstate and exception status register */
+       ldp x22, x23, [sp, #16 * 16]
+       msr spsr_el1, x22
+       msr esr_el1, x23
+
+       /* Restore LR and exception PC */
+       ldp x30, x21, [sp, #16 * 15]
+       msr elr_el1, x21
+
+       /* Restore general purpose registers */
+       ldp x28, x29, [sp, #16 * 14]
+       ldp x26, x27, [sp, #16 * 13]
+       ldp x24, x25, [sp, #16 * 12]
+       ldp x22, x23, [sp, #16 * 11]
+       ldp x20, x21, [sp, #16 * 10]
+       ldp x18, x19, [sp, #16 * 9]
+       ldp x16, x17, [sp, #16 * 8]
+       ldp x14, x15, [sp, #16 * 7]
+       ldp x12, x13, [sp, #16 * 6]
+       ldp x10, x11, [sp, #16 * 5]
+       ldp x8, x9, [sp, #16 * 4]
+       ldp x6, x7, [sp, #16 * 3]
+       ldp x4, x5, [sp, #16 * 2]
+       ldp x2, x3, [sp, #16 * 1]
+       ldp x0, x1, [sp, #16 * 0]
+
+       eret
+.endm
+
+/*
+ * Most aarch64 SoC is using 64-byte cache line. Align the
+ * exception handlers to 64-byte will benefit the cache hit
+ * rate of handlers.
+ */
+.align 6
+el1_sync:
+       ENTER_TRAP 1
+       mov x0, sp
+       mrs x1, far_el1
+       bl trap_el1_sync
+       LEAVE_TRAP 1
+
+/* Bad Abort numbers */
+#define BAD_SYNC  0
+#define BAD_IRQ   1
+#define BAD_FIQ   2
+#define BAD_ERROR 3
+
+#define el_invalid(name, reason, el)   \
+.align 6;                              \
+name##_invalid:                                \
+       ENTER_TRAP  el;                 \
+       mov x0, sp;                     \
+       mov x1, el;                     \
+       mov x2, #(reason);              \
+       mrs x3, far_el1;                \
+       b   invalid_trap_handler;       \
+ENDPROC(name##_invalid);               \
+
+el_invalid(el1_sync, BAD_SYNC, 1);
+el_invalid(el0_sync, BAD_SYNC, 0);
+el_invalid(el1_irq, BAD_IRQ, 1);
+el_invalid(el0_irq, BAD_IRQ, 0);
+el_invalid(el1_fiq, BAD_FIQ, 1);
+el_invalid(el0_fiq, BAD_FIQ, 0);
+el_invalid(el1_error, BAD_ERROR, 1);
+el_invalid(el0_error, BAD_ERROR, 0);
+
+/*
+ * Macro for Exception vectors.
+ */
+.macro vector_entry label
+.align  7
+       b \label
+.endm
+
+/*
+ * Exception vectors.
+ *
+ * AArch64 unikernel runs in EL1 mode using the SP_EL1 stack. The vectors
+ * don't have a fixed address, only alignment (2^11) requirements.
+ */
+.align  11
+ENTRY(vector_table)
+       /* Current Exception level with SP_EL0 */
+       vector_entry el1_sync_invalid   /* Synchronous EL1t       */
+       vector_entry el1_irq_invalid    /* IRQ EL1t               */
+       vector_entry el1_fiq_invalid    /* FIQ EL1t               */
+       vector_entry el1_error_invalid  /* Error EL1t             */
+
+       /* Current Exception level with SP_EL1 */
+       vector_entry el1_sync           /* Synchronous EL1h       */
+       vector_entry el1_irq_invalid    /* IRQ EL1h               */
+       vector_entry el1_fiq_invalid    /* FIQ EL1h               */
+       vector_entry el1_error_invalid  /* Error EL1h             */
+
+       /* Lower Exception level using AArch64 */
+       vector_entry el0_sync_invalid   /* Synchronous 64-bit EL0 */
+       vector_entry el0_irq_invalid    /* IRQ 64-bit EL0         */
+       vector_entry el0_fiq_invalid    /* FIQ 64-bit EL0         */
+       vector_entry el0_error_invalid  /* Error 64-bit EL0       */
+
+       /* Lower Exception level using AArch32 */
+       vector_entry el0_sync_invalid   /* Synchronous 32-bit EL0 */
+       vector_entry el0_irq_invalid    /* IRQ 32-bit EL0         */
+       vector_entry el0_fiq_invalid    /* FIQ 32-bit EL0         */
+       vector_entry el0_error_invalid  /* Error 32-bit EL0       */
+END(vector_table)
-- 
2.17.1


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.