[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] xen: move include/asm-* to arch/*/include/asm
commit 725381a5eab35227ef0099a43e05034def42bb77 Author: Anthony PERARD <anthony.perard@xxxxxxxxxx> AuthorDate: Wed Dec 15 10:14:13 2021 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Wed Dec 15 10:14:13 2021 +0100 xen: move include/asm-* to arch/*/include/asm This avoid the need to create the symbolic link "include/asm". Whenever a comment refer to an "asm" headers, this patch avoid spelling the arch when not needed to avoid some code churn. One unrelated change is to sort entries in MAINTAINERS for "INTEL(R) VT FOR X86 (VT-X)" Signed-off-by: Anthony PERARD <anthony.perard@xxxxxxxxxx> Acked-by: Paul Durrant <paul@xxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> Acked-by: Julien Grall <jgrall@xxxxxxxxxx> Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- .gitignore | 5 +- MAINTAINERS | 37 +- tools/include/Makefile | 2 +- tools/misc/xen-access.c | 4 +- tools/tests/vhpet/Makefile | 2 +- xen/Makefile | 13 +- xen/arch/arm/README.LinuxPrimitives | 10 +- xen/arch/arm/arch.mk | 1 + xen/arch/arm/arm32/head.S | 2 +- xen/arch/arm/arm64/head.S | 2 +- xen/arch/arm/include/asm/acpi.h | 82 ++ xen/arch/arm/include/asm/alternative.h | 221 +++++ xen/arch/arm/include/asm/altp2m.h | 39 + xen/arch/arm/include/asm/arm32/atomic.h | 175 ++++ xen/arch/arm/include/asm/arm32/bitops.h | 42 + xen/arch/arm/include/asm/arm32/bug.h | 15 + xen/arch/arm/include/asm/arm32/cmpxchg.h | 229 +++++ xen/arch/arm/include/asm/arm32/flushtlb.h | 63 ++ xen/arch/arm/include/asm/arm32/insn.h | 71 ++ xen/arch/arm/include/asm/arm32/io.h | 96 ++ xen/arch/arm/include/asm/arm32/macros.h | 8 + xen/arch/arm/include/asm/arm32/mm.h | 23 + xen/arch/arm/include/asm/arm32/page.h | 118 +++ xen/arch/arm/include/asm/arm32/processor.h | 69 ++ xen/arch/arm/include/asm/arm32/sysregs.h | 78 ++ xen/arch/arm/include/asm/arm32/system.h | 77 ++ xen/arch/arm/include/asm/arm32/traps.h | 13 + xen/arch/arm/include/asm/arm32/vfp.h | 41 + xen/arch/arm/include/asm/arm64/atomic.h | 148 +++ xen/arch/arm/include/asm/arm64/bitops.h | 98 ++ xen/arch/arm/include/asm/arm64/brk.h | 39 + xen/arch/arm/include/asm/arm64/bug.h | 11 + xen/arch/arm/include/asm/arm64/cmpxchg.h | 183 ++++ xen/arch/arm/include/asm/arm64/cpufeature.h | 104 ++ xen/arch/arm/include/asm/arm64/efibind.h | 216 +++++ xen/arch/arm/include/asm/arm64/flushtlb.h | 77 ++ xen/arch/arm/include/asm/arm64/hsr.h | 159 +++ xen/arch/arm/include/asm/arm64/insn.h | 110 +++ xen/arch/arm/include/asm/arm64/io.h | 148 +++ xen/arch/arm/include/asm/arm64/macros.h | 36 + xen/arch/arm/include/asm/arm64/mm.h | 23 + xen/arch/arm/include/asm/arm64/page.h | 103 ++ xen/arch/arm/include/asm/arm64/processor.h | 99 ++ xen/arch/arm/include/asm/arm64/sysregs.h | 423 ++++++++ xen/arch/arm/include/asm/arm64/system.h | 91 ++ xen/arch/arm/include/asm/arm64/traps.h | 18 + xen/arch/arm/include/asm/arm64/vfp.h | 23 + xen/arch/arm/include/asm/asm_defns.h | 44 + xen/arch/arm/include/asm/atomic.h | 236 +++++ xen/arch/arm/include/asm/bitops.h | 192 ++++ xen/arch/arm/include/asm/bug.h | 106 ++ xen/arch/arm/include/asm/byteorder.h | 16 + xen/arch/arm/include/asm/cache.h | 19 + xen/arch/arm/include/asm/cadence-uart.h | 55 ++ xen/arch/arm/include/asm/config.h | 207 ++++ xen/arch/arm/include/asm/cpregs.h | 375 +++++++ xen/arch/arm/include/asm/cpuerrata.h | 85 ++ xen/arch/arm/include/asm/cpufeature.h | 428 ++++++++ xen/arch/arm/include/asm/current.h | 73 ++ xen/arch/arm/include/asm/debugger.h | 15 + xen/arch/arm/include/asm/delay.h | 14 + xen/arch/arm/include/asm/desc.h | 12 + xen/arch/arm/include/asm/device.h | 124 +++ xen/arch/arm/include/asm/div64.h | 250 +++++ xen/arch/arm/include/asm/domain.h | 279 ++++++ xen/arch/arm/include/asm/domain_build.h | 31 + xen/arch/arm/include/asm/early_printk.h | 23 + xen/arch/arm/include/asm/efibind.h | 2 + xen/arch/arm/include/asm/elf.h | 33 + xen/arch/arm/include/asm/event.h | 63 ++ xen/arch/arm/include/asm/exynos4210-uart.h | 112 +++ xen/arch/arm/include/asm/flushtlb.h | 77 ++ xen/arch/arm/include/asm/gic.h | 459 +++++++++ xen/arch/arm/include/asm/gic_v3_defs.h | 220 +++++ xen/arch/arm/include/asm/gic_v3_its.h | 283 ++++++ xen/arch/arm/include/asm/grant_table.h | 108 +++ xen/arch/arm/include/asm/guest_access.h | 42 + xen/arch/arm/include/asm/guest_atomics.h | 148 +++ xen/arch/arm/include/asm/guest_walk.h | 19 + xen/arch/arm/include/asm/hardirq.h | 27 + xen/arch/arm/include/asm/hsr.h | 217 +++++ xen/arch/arm/include/asm/hypercall.h | 20 + xen/arch/arm/include/asm/init.h | 20 + xen/arch/arm/include/asm/insn.h | 29 + xen/arch/arm/include/asm/io.h | 20 + xen/arch/arm/include/asm/iocap.h | 16 + xen/arch/arm/include/asm/iommu.h | 45 + xen/arch/arm/include/asm/iommu_fwspec.h | 68 ++ xen/arch/arm/include/asm/ioreq.h | 70 ++ xen/arch/arm/include/asm/irq.h | 109 +++ xen/arch/arm/include/asm/kernel.h | 89 ++ xen/arch/arm/include/asm/livepatch.h | 37 + xen/arch/arm/include/asm/lpae.h | 257 +++++ xen/arch/arm/include/asm/macros.h | 32 + xen/arch/arm/include/asm/mem_access.h | 53 + xen/arch/arm/include/asm/mm.h | 373 +++++++ xen/arch/arm/include/asm/mmio.h | 86 ++ xen/arch/arm/include/asm/monitor.h | 68 ++ xen/arch/arm/include/asm/new_vgic.h | 198 ++++ xen/arch/arm/include/asm/nospec.h | 25 + xen/arch/arm/include/asm/numa.h | 36 + xen/arch/arm/include/asm/p2m.h | 439 +++++++++ xen/arch/arm/include/asm/page-bits.h | 12 + xen/arch/arm/include/asm/page.h | 293 ++++++ xen/arch/arm/include/asm/paging.h | 16 + xen/arch/arm/include/asm/pci.h | 133 +++ xen/arch/arm/include/asm/percpu.h | 33 + xen/arch/arm/include/asm/perfc.h | 21 + xen/arch/arm/include/asm/perfc_defn.h | 89 ++ xen/arch/arm/include/asm/pl011-uart.h | 87 ++ xen/arch/arm/include/asm/platform.h | 82 ++ xen/arch/arm/include/asm/platforms/exynos5.h | 20 + xen/arch/arm/include/asm/platforms/midway.h | 21 + xen/arch/arm/include/asm/platforms/omap5.h | 32 + xen/arch/arm/include/asm/platforms/vexpress.h | 37 + .../arm/include/asm/platforms/xilinx-zynqmp-eemi.h | 128 +++ xen/arch/arm/include/asm/processor.h | 598 ++++++++++++ xen/arch/arm/include/asm/procinfo.h | 43 + xen/arch/arm/include/asm/psci.h | 91 ++ xen/arch/arm/include/asm/random.h | 9 + xen/arch/arm/include/asm/regs.h | 73 ++ xen/arch/arm/include/asm/scif-uart.h | 127 +++ xen/arch/arm/include/asm/setup.h | 135 +++ xen/arch/arm/include/asm/short-desc.h | 130 +++ xen/arch/arm/include/asm/smccc.h | 356 +++++++ xen/arch/arm/include/asm/smp.h | 46 + xen/arch/arm/include/asm/softirq.h | 16 + xen/arch/arm/include/asm/spinlock.h | 15 + xen/arch/arm/include/asm/string.h | 53 + xen/arch/arm/include/asm/sysregs.h | 22 + xen/arch/arm/include/asm/system.h | 73 ++ xen/arch/arm/include/asm/tee/optee_msg.h | 310 ++++++ xen/arch/arm/include/asm/tee/optee_rpc_cmd.h | 318 ++++++ xen/arch/arm/include/asm/tee/optee_smc.h | 567 +++++++++++ xen/arch/arm/include/asm/tee/tee.h | 112 +++ xen/arch/arm/include/asm/time.h | 118 +++ xen/arch/arm/include/asm/trace.h | 12 + xen/arch/arm/include/asm/traps.h | 121 +++ xen/arch/arm/include/asm/types.h | 80 ++ xen/arch/arm/include/asm/vfp.h | 25 + xen/arch/arm/include/asm/vgic-emul.h | 33 + xen/arch/arm/include/asm/vgic.h | 383 ++++++++ xen/arch/arm/include/asm/vm_event.h | 67 ++ xen/arch/arm/include/asm/vpl011.h | 89 ++ xen/arch/arm/include/asm/vpsci.h | 42 + xen/arch/arm/include/asm/vreg.h | 196 ++++ xen/arch/arm/include/asm/vtimer.h | 41 + xen/arch/arm/include/asm/xenoprof.h | 12 + xen/arch/arm/smpboot.c | 2 +- xen/arch/arm/vpsci.c | 2 +- xen/arch/riscv/arch.mk | 1 + xen/arch/riscv/include/asm/config.h | 47 + xen/arch/x86/Makefile | 6 +- xen/arch/x86/arch.mk | 5 +- xen/arch/x86/include/asm/acpi.h | 162 ++++ xen/arch/x86/include/asm/alternative-asm.h | 125 +++ xen/arch/x86/include/asm/alternative.h | 387 ++++++++ xen/arch/x86/include/asm/altp2m.h | 57 ++ xen/arch/x86/include/asm/amd.h | 154 +++ xen/arch/x86/include/asm/apic.h | 202 ++++ xen/arch/x86/include/asm/apicdef.h | 134 +++ xen/arch/x86/include/asm/asm-defns.h | 78 ++ xen/arch/x86/include/asm/asm_defns.h | 354 +++++++ xen/arch/x86/include/asm/atomic.h | 239 +++++ xen/arch/x86/include/asm/bitops.h | 483 +++++++++ xen/arch/x86/include/asm/bug.h | 125 +++ xen/arch/x86/include/asm/byteorder.h | 36 + xen/arch/x86/include/asm/bzimage.h | 11 + xen/arch/x86/include/asm/cache.h | 14 + xen/arch/x86/include/asm/compat.h | 20 + xen/arch/x86/include/asm/config.h | 329 +++++++ xen/arch/x86/include/asm/cpufeature.h | 214 ++++ xen/arch/x86/include/asm/cpufeatures.h | 51 + xen/arch/x86/include/asm/cpufeatureset.h | 40 + xen/arch/x86/include/asm/cpuid.h | 80 ++ xen/arch/x86/include/asm/cpuidle.h | 31 + xen/arch/x86/include/asm/current.h | 210 ++++ xen/arch/x86/include/asm/debugger.h | 101 ++ xen/arch/x86/include/asm/debugreg.h | 83 ++ xen/arch/x86/include/asm/delay.h | 13 + xen/arch/x86/include/asm/desc.h | 252 +++++ xen/arch/x86/include/asm/device.h | 25 + xen/arch/x86/include/asm/div64.h | 14 + xen/arch/x86/include/asm/dom0_build.h | 42 + xen/arch/x86/include/asm/domain.h | 769 +++++++++++++++ xen/arch/x86/include/asm/e820.h | 42 + xen/arch/x86/include/asm/edd.h | 164 ++++ xen/arch/x86/include/asm/efibind.h | 2 + xen/arch/x86/include/asm/elf.h | 20 + xen/arch/x86/include/asm/event.h | 56 ++ xen/arch/x86/include/asm/fixmap.h | 117 +++ xen/arch/x86/include/asm/flushtlb.h | 203 ++++ xen/arch/x86/include/asm/genapic.h | 70 ++ xen/arch/x86/include/asm/grant_table.h | 80 ++ xen/arch/x86/include/asm/guest.h | 39 + xen/arch/x86/include/asm/guest/hyperv-hcall.h | 97 ++ xen/arch/x86/include/asm/guest/hyperv-tlfs.h | 934 ++++++++++++++++++ xen/arch/x86/include/asm/guest/hyperv.h | 86 ++ xen/arch/x86/include/asm/guest/hypervisor.h | 85 ++ xen/arch/x86/include/asm/guest/pvh-boot.h | 58 ++ xen/arch/x86/include/asm/guest/xen-hcall.h | 212 ++++ xen/arch/x86/include/asm/guest/xen.h | 61 ++ xen/arch/x86/include/asm/guest_access.h | 59 ++ xen/arch/x86/include/asm/guest_atomics.h | 33 + xen/arch/x86/include/asm/guest_pt.h | 468 +++++++++ xen/arch/x86/include/asm/hap.h | 60 ++ xen/arch/x86/include/asm/hardirq.h | 37 + xen/arch/x86/include/asm/hpet.h | 101 ++ xen/arch/x86/include/asm/hvm/asid.h | 52 + xen/arch/x86/include/asm/hvm/cacheattr.h | 23 + xen/arch/x86/include/asm/hvm/domain.h | 173 ++++ xen/arch/x86/include/asm/hvm/emulate.h | 156 +++ xen/arch/x86/include/asm/hvm/grant_table.h | 61 ++ xen/arch/x86/include/asm/hvm/guest_access.h | 8 + xen/arch/x86/include/asm/hvm/hvm.h | 886 +++++++++++++++++ xen/arch/x86/include/asm/hvm/io.h | 181 ++++ xen/arch/x86/include/asm/hvm/ioreq.h | 37 + xen/arch/x86/include/asm/hvm/irq.h | 227 +++++ xen/arch/x86/include/asm/hvm/monitor.h | 65 ++ xen/arch/x86/include/asm/hvm/nestedhvm.h | 100 ++ xen/arch/x86/include/asm/hvm/save.h | 144 +++ xen/arch/x86/include/asm/hvm/support.h | 170 ++++ xen/arch/x86/include/asm/hvm/svm/asid.h | 49 + xen/arch/x86/include/asm/hvm/svm/emulate.h | 66 ++ xen/arch/x86/include/asm/hvm/svm/intr.h | 25 + xen/arch/x86/include/asm/hvm/svm/nestedsvm.h | 145 +++ xen/arch/x86/include/asm/hvm/svm/svm.h | 110 +++ xen/arch/x86/include/asm/hvm/svm/svmdebug.h | 30 + xen/arch/x86/include/asm/hvm/svm/vmcb.h | 664 +++++++++++++ xen/arch/x86/include/asm/hvm/trace.h | 114 +++ xen/arch/x86/include/asm/hvm/vcpu.h | 210 ++++ xen/arch/x86/include/asm/hvm/vioapic.h | 72 ++ xen/arch/x86/include/asm/hvm/viridian.h | 112 +++ xen/arch/x86/include/asm/hvm/vlapic.h | 157 +++ xen/arch/x86/include/asm/hvm/vm_event.h | 34 + xen/arch/x86/include/asm/hvm/vmx/vmcs.h | 688 +++++++++++++ xen/arch/x86/include/asm/hvm/vmx/vmx.h | 692 +++++++++++++ xen/arch/x86/include/asm/hvm/vmx/vvmx.h | 214 ++++ xen/arch/x86/include/asm/hvm/vpic.h | 40 + xen/arch/x86/include/asm/hvm/vpt.h | 205 ++++ xen/arch/x86/include/asm/hypercall.h | 198 ++++ xen/arch/x86/include/asm/i387.h | 40 + xen/arch/x86/include/asm/init.h | 4 + xen/arch/x86/include/asm/invpcid.h | 67 ++ xen/arch/x86/include/asm/io.h | 56 ++ xen/arch/x86/include/asm/io_apic.h | 212 ++++ xen/arch/x86/include/asm/iocap.h | 21 + xen/arch/x86/include/asm/iommu.h | 155 +++ xen/arch/x86/include/asm/ioreq.h | 39 + xen/arch/x86/include/asm/irq.h | 221 +++++ xen/arch/x86/include/asm/ldt.h | 35 + xen/arch/x86/include/asm/livepatch.h | 25 + xen/arch/x86/include/asm/mach-default/bios_ebda.h | 15 + xen/arch/x86/include/asm/mach-default/io_ports.h | 30 + .../x86/include/asm/mach-default/irq_vectors.h | 46 + .../x86/include/asm/mach-default/mach_mpspec.h | 10 + xen/arch/x86/include/asm/mach-generic/mach_apic.h | 80 ++ xen/arch/x86/include/asm/machine_kexec.h | 16 + xen/arch/x86/include/asm/mc146818rtc.h | 116 +++ xen/arch/x86/include/asm/mce.h | 49 + xen/arch/x86/include/asm/mem_access.h | 68 ++ xen/arch/x86/include/asm/mem_paging.h | 42 + xen/arch/x86/include/asm/mem_sharing.h | 153 +++ xen/arch/x86/include/asm/microcode.h | 27 + xen/arch/x86/include/asm/mm.h | 655 +++++++++++++ xen/arch/x86/include/asm/monitor.h | 126 +++ xen/arch/x86/include/asm/mpspec.h | 73 ++ xen/arch/x86/include/asm/mpspec_def.h | 188 ++++ xen/arch/x86/include/asm/msi.h | 256 +++++ xen/arch/x86/include/asm/msr-index.h | 671 +++++++++++++ xen/arch/x86/include/asm/msr.h | 381 ++++++++ xen/arch/x86/include/asm/mtrr.h | 103 ++ xen/arch/x86/include/asm/multicall.h | 12 + xen/arch/x86/include/asm/mwait.h | 19 + xen/arch/x86/include/asm/nmi.h | 46 + xen/arch/x86/include/asm/nops.h | 70 ++ xen/arch/x86/include/asm/nospec.h | 39 + xen/arch/x86/include/asm/numa.h | 84 ++ xen/arch/x86/include/asm/p2m.h | 1022 ++++++++++++++++++++ xen/arch/x86/include/asm/page-bits.h | 26 + xen/arch/x86/include/asm/page.h | 409 ++++++++ xen/arch/x86/include/asm/paging.h | 433 +++++++++ xen/arch/x86/include/asm/pci.h | 41 + xen/arch/x86/include/asm/percpu.h | 22 + xen/arch/x86/include/asm/perfc.h | 12 + xen/arch/x86/include/asm/perfc_defn.h | 120 +++ xen/arch/x86/include/asm/processor.h | 650 +++++++++++++ xen/arch/x86/include/asm/psr.h | 99 ++ xen/arch/x86/include/asm/pv/domain.h | 120 +++ xen/arch/x86/include/asm/pv/grant_table.h | 60 ++ xen/arch/x86/include/asm/pv/mm.h | 60 ++ xen/arch/x86/include/asm/pv/shim.h | 119 +++ xen/arch/x86/include/asm/pv/trace.h | 48 + xen/arch/x86/include/asm/pv/traps.h | 71 ++ xen/arch/x86/include/asm/random.h | 16 + xen/arch/x86/include/asm/regs.h | 33 + xen/arch/x86/include/asm/setup.h | 75 ++ xen/arch/x86/include/asm/shadow.h | 273 ++++++ xen/arch/x86/include/asm/shared.h | 79 ++ xen/arch/x86/include/asm/smp.h | 90 ++ xen/arch/x86/include/asm/softirq.h | 14 + xen/arch/x86/include/asm/spec_ctrl.h | 151 +++ xen/arch/x86/include/asm/spec_ctrl_asm.h | 342 +++++++ xen/arch/x86/include/asm/spinlock.h | 27 + xen/arch/x86/include/asm/string.h | 12 + xen/arch/x86/include/asm/system.h | 295 ++++++ xen/arch/x86/include/asm/tboot.h | 160 +++ xen/arch/x86/include/asm/time.h | 76 ++ xen/arch/x86/include/asm/trace.h | 4 + xen/arch/x86/include/asm/traps.h | 34 + xen/arch/x86/include/asm/types.h | 50 + xen/arch/x86/include/asm/uaccess.h | 429 ++++++++ xen/arch/x86/include/asm/unaligned.h | 6 + xen/arch/x86/include/asm/vm_event.h | 59 ++ xen/arch/x86/include/asm/vpmu.h | 140 +++ xen/arch/x86/include/asm/x86-defns.h | 156 +++ xen/arch/x86/include/asm/x86-vendors.h | 39 + xen/arch/x86/include/asm/x86_64/efibind.h | 280 ++++++ xen/arch/x86/include/asm/x86_64/elf.h | 85 ++ xen/arch/x86/include/asm/x86_64/page.h | 166 ++++ xen/arch/x86/include/asm/x86_64/regs.h | 28 + xen/arch/x86/include/asm/x86_64/system.h | 62 ++ xen/arch/x86/include/asm/x86_64/uaccess.h | 70 ++ xen/arch/x86/include/asm/x86_emulate.h | 21 + xen/arch/x86/include/asm/xenoprof.h | 107 ++ xen/arch/x86/include/asm/xstate.h | 141 +++ xen/common/efi/runtime.c | 2 +- xen/common/page_alloc.c | 2 +- xen/include/asm-arm/acpi.h | 82 -- xen/include/asm-arm/alternative.h | 221 ----- xen/include/asm-arm/altp2m.h | 39 - xen/include/asm-arm/arm32/atomic.h | 175 ---- xen/include/asm-arm/arm32/bitops.h | 42 - xen/include/asm-arm/arm32/bug.h | 15 - xen/include/asm-arm/arm32/cmpxchg.h | 229 ----- xen/include/asm-arm/arm32/flushtlb.h | 63 -- xen/include/asm-arm/arm32/insn.h | 71 -- xen/include/asm-arm/arm32/io.h | 96 -- xen/include/asm-arm/arm32/macros.h | 8 - xen/include/asm-arm/arm32/mm.h | 23 - xen/include/asm-arm/arm32/page.h | 118 --- xen/include/asm-arm/arm32/processor.h | 69 -- xen/include/asm-arm/arm32/sysregs.h | 78 -- xen/include/asm-arm/arm32/system.h | 77 -- xen/include/asm-arm/arm32/traps.h | 13 - xen/include/asm-arm/arm32/vfp.h | 41 - xen/include/asm-arm/arm64/atomic.h | 148 --- xen/include/asm-arm/arm64/bitops.h | 98 -- xen/include/asm-arm/arm64/brk.h | 39 - xen/include/asm-arm/arm64/bug.h | 11 - xen/include/asm-arm/arm64/cmpxchg.h | 183 ---- xen/include/asm-arm/arm64/cpufeature.h | 104 -- xen/include/asm-arm/arm64/efibind.h | 216 ----- xen/include/asm-arm/arm64/flushtlb.h | 77 -- xen/include/asm-arm/arm64/hsr.h | 159 --- xen/include/asm-arm/arm64/insn.h | 110 --- xen/include/asm-arm/arm64/io.h | 148 --- xen/include/asm-arm/arm64/macros.h | 36 - xen/include/asm-arm/arm64/mm.h | 23 - xen/include/asm-arm/arm64/page.h | 103 -- xen/include/asm-arm/arm64/processor.h | 99 -- xen/include/asm-arm/arm64/sysregs.h | 423 -------- xen/include/asm-arm/arm64/system.h | 91 -- xen/include/asm-arm/arm64/traps.h | 18 - xen/include/asm-arm/arm64/vfp.h | 23 - xen/include/asm-arm/asm_defns.h | 44 - xen/include/asm-arm/atomic.h | 236 ----- xen/include/asm-arm/bitops.h | 192 ---- xen/include/asm-arm/bug.h | 106 -- xen/include/asm-arm/byteorder.h | 16 - xen/include/asm-arm/cache.h | 19 - xen/include/asm-arm/cadence-uart.h | 55 -- xen/include/asm-arm/config.h | 207 ---- xen/include/asm-arm/cpregs.h | 375 ------- xen/include/asm-arm/cpuerrata.h | 85 -- xen/include/asm-arm/cpufeature.h | 428 -------- xen/include/asm-arm/current.h | 73 -- xen/include/asm-arm/debugger.h | 15 - xen/include/asm-arm/delay.h | 14 - xen/include/asm-arm/desc.h | 12 - xen/include/asm-arm/device.h | 124 --- xen/include/asm-arm/div64.h | 250 ----- xen/include/asm-arm/domain.h | 279 ------ xen/include/asm-arm/domain_build.h | 31 - xen/include/asm-arm/early_printk.h | 23 - xen/include/asm-arm/efibind.h | 2 - xen/include/asm-arm/elf.h | 33 - xen/include/asm-arm/event.h | 63 -- xen/include/asm-arm/exynos4210-uart.h | 112 --- xen/include/asm-arm/flushtlb.h | 77 -- xen/include/asm-arm/gic.h | 459 --------- xen/include/asm-arm/gic_v3_defs.h | 220 ----- xen/include/asm-arm/gic_v3_its.h | 283 ------ xen/include/asm-arm/grant_table.h | 108 --- xen/include/asm-arm/guest_access.h | 42 - xen/include/asm-arm/guest_atomics.h | 148 --- xen/include/asm-arm/guest_walk.h | 19 - xen/include/asm-arm/hardirq.h | 27 - xen/include/asm-arm/hsr.h | 217 ----- xen/include/asm-arm/hypercall.h | 20 - xen/include/asm-arm/init.h | 20 - xen/include/asm-arm/insn.h | 29 - xen/include/asm-arm/io.h | 20 - xen/include/asm-arm/iocap.h | 16 - xen/include/asm-arm/iommu.h | 45 - xen/include/asm-arm/iommu_fwspec.h | 68 -- xen/include/asm-arm/ioreq.h | 70 -- xen/include/asm-arm/irq.h | 109 --- xen/include/asm-arm/kernel.h | 89 -- xen/include/asm-arm/livepatch.h | 37 - xen/include/asm-arm/lpae.h | 257 ----- xen/include/asm-arm/macros.h | 32 - xen/include/asm-arm/mem_access.h | 53 - xen/include/asm-arm/mm.h | 373 ------- xen/include/asm-arm/mmio.h | 86 -- xen/include/asm-arm/monitor.h | 68 -- xen/include/asm-arm/new_vgic.h | 198 ---- xen/include/asm-arm/nospec.h | 25 - xen/include/asm-arm/numa.h | 36 - xen/include/asm-arm/p2m.h | 439 --------- xen/include/asm-arm/page-bits.h | 12 - xen/include/asm-arm/page.h | 293 ------ xen/include/asm-arm/paging.h | 16 - xen/include/asm-arm/pci.h | 133 --- xen/include/asm-arm/percpu.h | 33 - xen/include/asm-arm/perfc.h | 21 - xen/include/asm-arm/perfc_defn.h | 89 -- xen/include/asm-arm/pl011-uart.h | 87 -- xen/include/asm-arm/platform.h | 82 -- xen/include/asm-arm/platforms/exynos5.h | 20 - xen/include/asm-arm/platforms/midway.h | 21 - xen/include/asm-arm/platforms/omap5.h | 32 - xen/include/asm-arm/platforms/vexpress.h | 37 - xen/include/asm-arm/platforms/xilinx-zynqmp-eemi.h | 128 --- xen/include/asm-arm/processor.h | 598 ------------ xen/include/asm-arm/procinfo.h | 43 - xen/include/asm-arm/psci.h | 91 -- xen/include/asm-arm/random.h | 9 - xen/include/asm-arm/regs.h | 73 -- xen/include/asm-arm/scif-uart.h | 127 --- xen/include/asm-arm/setup.h | 135 --- xen/include/asm-arm/short-desc.h | 130 --- xen/include/asm-arm/smccc.h | 356 ------- xen/include/asm-arm/smp.h | 46 - xen/include/asm-arm/softirq.h | 16 - xen/include/asm-arm/spinlock.h | 15 - xen/include/asm-arm/string.h | 53 - xen/include/asm-arm/sysregs.h | 22 - xen/include/asm-arm/system.h | 73 -- xen/include/asm-arm/tee/optee_msg.h | 310 ------ xen/include/asm-arm/tee/optee_rpc_cmd.h | 318 ------ xen/include/asm-arm/tee/optee_smc.h | 567 ----------- xen/include/asm-arm/tee/tee.h | 112 --- xen/include/asm-arm/time.h | 118 --- xen/include/asm-arm/trace.h | 12 - xen/include/asm-arm/traps.h | 121 --- xen/include/asm-arm/types.h | 80 -- xen/include/asm-arm/vfp.h | 25 - xen/include/asm-arm/vgic-emul.h | 33 - xen/include/asm-arm/vgic.h | 383 -------- xen/include/asm-arm/vm_event.h | 67 -- xen/include/asm-arm/vpl011.h | 89 -- xen/include/asm-arm/vpsci.h | 42 - xen/include/asm-arm/vreg.h | 196 ---- xen/include/asm-arm/vtimer.h | 41 - xen/include/asm-arm/xenoprof.h | 12 - xen/include/asm-riscv/config.h | 47 - xen/include/asm-x86/acpi.h | 162 ---- xen/include/asm-x86/alternative-asm.h | 125 --- xen/include/asm-x86/alternative.h | 387 -------- xen/include/asm-x86/altp2m.h | 57 -- xen/include/asm-x86/amd.h | 154 --- xen/include/asm-x86/apic.h | 202 ---- xen/include/asm-x86/apicdef.h | 134 --- xen/include/asm-x86/asm-defns.h | 78 -- xen/include/asm-x86/asm_defns.h | 354 ------- xen/include/asm-x86/atomic.h | 239 ----- xen/include/asm-x86/bitops.h | 483 --------- xen/include/asm-x86/bug.h | 125 --- xen/include/asm-x86/byteorder.h | 36 - xen/include/asm-x86/bzimage.h | 11 - xen/include/asm-x86/cache.h | 14 - xen/include/asm-x86/compat.h | 20 - xen/include/asm-x86/config.h | 329 ------- xen/include/asm-x86/cpufeature.h | 214 ---- xen/include/asm-x86/cpufeatures.h | 51 - xen/include/asm-x86/cpufeatureset.h | 40 - xen/include/asm-x86/cpuid.h | 80 -- xen/include/asm-x86/cpuidle.h | 31 - xen/include/asm-x86/current.h | 210 ---- xen/include/asm-x86/debugger.h | 101 -- xen/include/asm-x86/debugreg.h | 83 -- xen/include/asm-x86/delay.h | 13 - xen/include/asm-x86/desc.h | 252 ----- xen/include/asm-x86/device.h | 25 - xen/include/asm-x86/div64.h | 14 - xen/include/asm-x86/dom0_build.h | 42 - xen/include/asm-x86/domain.h | 769 --------------- xen/include/asm-x86/e820.h | 42 - xen/include/asm-x86/edd.h | 164 ---- xen/include/asm-x86/efibind.h | 2 - xen/include/asm-x86/elf.h | 20 - xen/include/asm-x86/event.h | 56 -- xen/include/asm-x86/fixmap.h | 117 --- xen/include/asm-x86/flushtlb.h | 203 ---- xen/include/asm-x86/genapic.h | 70 -- xen/include/asm-x86/grant_table.h | 80 -- xen/include/asm-x86/guest.h | 39 - xen/include/asm-x86/guest/hyperv-hcall.h | 97 -- xen/include/asm-x86/guest/hyperv-tlfs.h | 934 ------------------ xen/include/asm-x86/guest/hyperv.h | 86 -- xen/include/asm-x86/guest/hypervisor.h | 85 -- xen/include/asm-x86/guest/pvh-boot.h | 58 -- xen/include/asm-x86/guest/xen-hcall.h | 212 ---- xen/include/asm-x86/guest/xen.h | 61 -- xen/include/asm-x86/guest_access.h | 59 -- xen/include/asm-x86/guest_atomics.h | 33 - xen/include/asm-x86/guest_pt.h | 468 --------- xen/include/asm-x86/hap.h | 60 -- xen/include/asm-x86/hardirq.h | 37 - xen/include/asm-x86/hpet.h | 101 -- xen/include/asm-x86/hvm/asid.h | 52 - xen/include/asm-x86/hvm/cacheattr.h | 23 - xen/include/asm-x86/hvm/domain.h | 173 ---- xen/include/asm-x86/hvm/emulate.h | 156 --- xen/include/asm-x86/hvm/grant_table.h | 61 -- xen/include/asm-x86/hvm/guest_access.h | 8 - xen/include/asm-x86/hvm/hvm.h | 886 ----------------- xen/include/asm-x86/hvm/io.h | 181 ---- xen/include/asm-x86/hvm/ioreq.h | 37 - xen/include/asm-x86/hvm/irq.h | 227 ----- xen/include/asm-x86/hvm/monitor.h | 65 -- xen/include/asm-x86/hvm/nestedhvm.h | 100 -- xen/include/asm-x86/hvm/save.h | 144 --- xen/include/asm-x86/hvm/support.h | 170 ---- xen/include/asm-x86/hvm/svm/asid.h | 49 - xen/include/asm-x86/hvm/svm/emulate.h | 66 -- xen/include/asm-x86/hvm/svm/intr.h | 25 - xen/include/asm-x86/hvm/svm/nestedsvm.h | 145 --- xen/include/asm-x86/hvm/svm/svm.h | 110 --- xen/include/asm-x86/hvm/svm/svmdebug.h | 30 - xen/include/asm-x86/hvm/svm/vmcb.h | 664 ------------- xen/include/asm-x86/hvm/trace.h | 114 --- xen/include/asm-x86/hvm/vcpu.h | 210 ---- xen/include/asm-x86/hvm/vioapic.h | 72 -- xen/include/asm-x86/hvm/viridian.h | 112 --- xen/include/asm-x86/hvm/vlapic.h | 157 --- xen/include/asm-x86/hvm/vm_event.h | 34 - xen/include/asm-x86/hvm/vmx/vmcs.h | 688 ------------- xen/include/asm-x86/hvm/vmx/vmx.h | 692 ------------- xen/include/asm-x86/hvm/vmx/vvmx.h | 214 ---- xen/include/asm-x86/hvm/vpic.h | 40 - xen/include/asm-x86/hvm/vpt.h | 205 ---- xen/include/asm-x86/hypercall.h | 198 ---- xen/include/asm-x86/i387.h | 40 - xen/include/asm-x86/init.h | 4 - xen/include/asm-x86/invpcid.h | 67 -- xen/include/asm-x86/io.h | 56 -- xen/include/asm-x86/io_apic.h | 212 ---- xen/include/asm-x86/iocap.h | 21 - xen/include/asm-x86/iommu.h | 155 --- xen/include/asm-x86/ioreq.h | 39 - xen/include/asm-x86/irq.h | 221 ----- xen/include/asm-x86/ldt.h | 35 - xen/include/asm-x86/livepatch.h | 25 - xen/include/asm-x86/mach-default/bios_ebda.h | 15 - xen/include/asm-x86/mach-default/io_ports.h | 30 - xen/include/asm-x86/mach-default/irq_vectors.h | 46 - xen/include/asm-x86/mach-default/mach_mpspec.h | 10 - xen/include/asm-x86/mach-generic/mach_apic.h | 80 -- xen/include/asm-x86/machine_kexec.h | 16 - xen/include/asm-x86/mc146818rtc.h | 116 --- xen/include/asm-x86/mce.h | 49 - xen/include/asm-x86/mem_access.h | 68 -- xen/include/asm-x86/mem_paging.h | 42 - xen/include/asm-x86/mem_sharing.h | 153 --- xen/include/asm-x86/microcode.h | 27 - xen/include/asm-x86/mm.h | 655 ------------- xen/include/asm-x86/monitor.h | 126 --- xen/include/asm-x86/mpspec.h | 73 -- xen/include/asm-x86/mpspec_def.h | 188 ---- xen/include/asm-x86/msi.h | 256 ----- xen/include/asm-x86/msr-index.h | 671 ------------- xen/include/asm-x86/msr.h | 381 -------- xen/include/asm-x86/mtrr.h | 103 -- xen/include/asm-x86/multicall.h | 12 - xen/include/asm-x86/mwait.h | 19 - xen/include/asm-x86/nmi.h | 46 - xen/include/asm-x86/nops.h | 70 -- xen/include/asm-x86/nospec.h | 39 - xen/include/asm-x86/numa.h | 84 -- xen/include/asm-x86/p2m.h | 1022 -------------------- xen/include/asm-x86/page-bits.h | 26 - xen/include/asm-x86/page.h | 409 -------- xen/include/asm-x86/paging.h | 433 --------- xen/include/asm-x86/pci.h | 41 - xen/include/asm-x86/percpu.h | 22 - xen/include/asm-x86/perfc.h | 12 - xen/include/asm-x86/perfc_defn.h | 120 --- xen/include/asm-x86/processor.h | 650 ------------- xen/include/asm-x86/psr.h | 99 -- xen/include/asm-x86/pv/domain.h | 120 --- xen/include/asm-x86/pv/grant_table.h | 60 -- xen/include/asm-x86/pv/mm.h | 60 -- xen/include/asm-x86/pv/shim.h | 119 --- xen/include/asm-x86/pv/trace.h | 48 - xen/include/asm-x86/pv/traps.h | 71 -- xen/include/asm-x86/random.h | 16 - xen/include/asm-x86/regs.h | 33 - xen/include/asm-x86/setup.h | 75 -- xen/include/asm-x86/shadow.h | 273 ------ xen/include/asm-x86/shared.h | 79 -- xen/include/asm-x86/smp.h | 90 -- xen/include/asm-x86/softirq.h | 14 - xen/include/asm-x86/spec_ctrl.h | 151 --- xen/include/asm-x86/spec_ctrl_asm.h | 342 ------- xen/include/asm-x86/spinlock.h | 27 - xen/include/asm-x86/string.h | 12 - xen/include/asm-x86/system.h | 295 ------ xen/include/asm-x86/tboot.h | 160 --- xen/include/asm-x86/time.h | 76 -- xen/include/asm-x86/trace.h | 4 - xen/include/asm-x86/traps.h | 34 - xen/include/asm-x86/types.h | 50 - xen/include/asm-x86/uaccess.h | 429 -------- xen/include/asm-x86/unaligned.h | 6 - xen/include/asm-x86/vm_event.h | 59 -- xen/include/asm-x86/vpmu.h | 140 --- xen/include/asm-x86/x86-defns.h | 156 --- xen/include/asm-x86/x86-vendors.h | 39 - xen/include/asm-x86/x86_64/efibind.h | 280 ------ xen/include/asm-x86/x86_64/elf.h | 85 -- xen/include/asm-x86/x86_64/page.h | 166 ---- xen/include/asm-x86/x86_64/regs.h | 28 - xen/include/asm-x86/x86_64/system.h | 62 -- xen/include/asm-x86/x86_64/uaccess.h | 70 -- xen/include/asm-x86/x86_emulate.h | 21 - xen/include/asm-x86/xenoprof.h | 107 -- xen/include/asm-x86/xstate.h | 141 --- xen/include/xen/acpi.h | 5 +- xen/include/xen/bitmap.h | 2 +- 641 files changed, 40578 insertions(+), 40579 deletions(-) diff --git a/.gitignore b/.gitignore index 111eb03b86..e13cbf84b2 100644 --- a/.gitignore +++ b/.gitignore @@ -314,6 +314,7 @@ xen/arch/x86/boot/*.lnk xen/arch/x86/efi.lds xen/arch/x86/efi/check.efi xen/arch/x86/efi/mkreloc +xen/arch/x86/include/asm/asm-macros.h xen/arch/*/xen.lds xen/arch/*/efi/boot.c xen/arch/*/efi/compat.c @@ -321,12 +322,10 @@ xen/arch/*/efi/ebmalloc.c xen/arch/*/efi/efi.h xen/arch/*/efi/pe.c xen/arch/*/efi/runtime.c +xen/arch/*/include/asm/asm-offsets.h xen/common/config_data.S xen/common/config.gz xen/include/headers*.chk -xen/include/asm -xen/include/asm-*/asm-offsets.h -xen/include/asm-x86/asm-macros.h xen/include/compat/* xen/include/config/ xen/include/generated/ diff --git a/MAINTAINERS b/MAINTAINERS index 22ea62d964..6e84a05760 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -245,7 +245,6 @@ F: xen/drivers/char/omap-uart.c F: xen/drivers/char/pl011.c F: xen/drivers/char/scif-uart.c F: xen/drivers/passthrough/arm/ -F: xen/include/asm-arm/ F: xen/include/public/arch-arm/ F: xen/include/public/arch-arm.h @@ -290,10 +289,10 @@ EFI M: Jan Beulich <jbeulich@xxxxxxxx> S: Supported F: xen/arch/x86/efi/ +F: xen/arch/x86/include/asm/efi*.h +F: xen/arch/x86/include/asm/x86_*/efi*.h F: xen/common/efi/ F: xen/include/efi/ -F: xen/include/asm-x86/efi*.h -F: xen/include/asm-x86/x86_*/efi*.h GDBSX DEBUGGER M: Elena Ufimtseva <elena.ufimtseva@xxxxxxxxxx> @@ -319,8 +318,8 @@ F: xen/include/xen/hypfs.h INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT) R: Lukasz Hawrylko <lukasz.hawrylko@xxxxxxxxxxxxxxx> S: Odd Fixes +F: xen/arch/x86/include/asm/tboot.h F: xen/arch/x86/tboot.c -F: xen/include/asm-x86/tboot.h INTEL(R) VT FOR DIRECTED I/O (VT-D) M: Kevin Tian <kevin.tian@xxxxxxxxx> @@ -331,10 +330,10 @@ INTEL(R) VT FOR X86 (VT-X) M: Jun Nakajima <jun.nakajima@xxxxxxxxx> M: Kevin Tian <kevin.tian@xxxxxxxxx> S: Supported +F: xen/arch/x86/cpu/vpmu_intel.c F: xen/arch/x86/hvm/vmx/ +F: xen/arch/x86/include/asm/hvm/vmx/ F: xen/arch/x86/mm/p2m-ept.c -F: xen/include/asm-x86/hvm/vmx/ -F: xen/arch/x86/cpu/vpmu_intel.c IOMMU VENDOR INDEPENDENT CODE M: Jan Beulich <jbeulich@xxxxxxxx> @@ -401,10 +400,10 @@ M: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx> S: Supported F: docs/misc/livepatch.pandoc F: tools/misc/xen-livepatch.c +F: xen/arch/*/include/asm/livepatch.h F: xen/arch/*/livepatch* F: xen/arch/*/*/livepatch* F: xen/common/livepatch* -F: xen/include/asm-*/livepatch.h F: xen/include/xen/livepatch* F: xen/test/livepatch/* @@ -473,7 +472,6 @@ R: Connor Davis <connojdavis@xxxxxxxxx> S: Supported F: config/riscv64.mk F: xen/arch/riscv/ -F: xen/include/asm-riscv/ RTDS SCHEDULER M: Dario Faggioli <dfaggioli@xxxxxxxx> @@ -502,8 +500,8 @@ F: stubdom/ TEE MEDIATORS M: Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx> S: Supported +F: xen/arch/arm/include/asm/tee F: xen/arch/arm/tee/ -F: xen/include/asm-arm/tee TOOLSTACK M: Wei Liu <wl@xxxxxxx> @@ -531,6 +529,8 @@ F: tools/misc/xen-access.c F: xen/arch/*/monitor.c F: xen/arch/*/vm_event.c F: xen/arch/arm/mem_access.c +F: xen/arch/x86/include/asm/hvm/monitor.h +F: xen/arch/x86/include/asm/hvm/vm_event.h F: xen/arch/x86/mm/mem_access.c F: xen/arch/x86/hvm/monitor.c F: xen/arch/x86/hvm/vm_event.c @@ -540,8 +540,6 @@ F: xen/common/vm_event.c F: xen/include/*/mem_access.h F: xen/include/*/monitor.h F: xen/include/*/vm_event.h -F: xen/include/asm-x86/hvm/monitor.h -F: xen/include/asm-x86/hvm/vm_event.h VPCI M: Roger Pau Monné <roger.pau@xxxxxxxxxx> @@ -567,7 +565,6 @@ R: Wei Liu <wl@xxxxxxx> S: Supported L: xen-devel@xxxxxxxxxxxxxxxxxxxx F: xen/arch/x86/ -F: xen/include/asm-x86/ F: xen/include/public/arch-x86/ F: xen/include/xen/lib/x86 F: xen/lib/x86 @@ -587,10 +584,10 @@ F: xen/arch/x86/hvm/emulate.c F: xen/arch/x86/hvm/intercept.c F: xen/arch/x86/hvm/io.c F: xen/arch/x86/hvm/ioreq.c -F: xen/include/asm-x86/hvm/emulate.h -F: xen/include/asm-x86/hvm/io.h -F: xen/include/asm-x86/hvm/ioreq.h -F: xen/include/asm-x86/ioreq.h +F: xen/arch/x86/include/asm/hvm/emulate.h +F: xen/arch/x86/include/asm/hvm/io.h +F: xen/arch/x86/include/asm/hvm/ioreq.h +F: xen/arch/x86/include/asm/ioreq.h X86 MEMORY MANAGEMENT M: Jan Beulich <jbeulich@xxxxxxxx> @@ -622,10 +619,10 @@ M: Wei Liu <wl@xxxxxxx> S: Supported F: xen/arch/x86/guest/hyperv/ F: xen/arch/x86/hvm/viridian/ -F: xen/include/asm-x86/guest/hyperv.h -F: xen/include/asm-x86/guest/hyperv-hcall.h -F: xen/include/asm-x86/guest/hyperv-tlfs.h -F: xen/include/asm-x86/hvm/viridian.h +F: xen/arch/x86/include/asm/guest/hyperv.h +F: xen/arch/x86/include/asm/guest/hyperv-hcall.h +F: xen/arch/x86/include/asm/guest/hyperv-tlfs.h +F: xen/arch/x86/include/asm/hvm/viridian.h XENSTORE M: Wei Liu <wl@xxxxxxx> diff --git a/tools/include/Makefile b/tools/include/Makefile index 42605d46b9..d7b51006e0 100644 --- a/tools/include/Makefile +++ b/tools/include/Makefile @@ -30,7 +30,7 @@ xen-dir: ln -s $(XEN_ROOT)/xen/include/acpi/platform acpi/ ln -s $(XEN_ROOT)/xen/include/acpi/ac*.h acpi/ ifeq ($(CONFIG_X86),y) - ln -s $(XEN_ROOT)/xen/include/asm-x86 xen/asm + ln -s $(XEN_ROOT)/xen/arch/x86/include/asm xen/ mkdir -p xen/lib/x86 ln -s $(filter-out %autogen.h,$(wildcard $(XEN_ROOT)/xen/include/xen/lib/x86/*.h)) xen/lib/x86/ ln -s $(XEN_ROOT)/xen/include/xen/lib/x86/Makefile xen/lib/x86/ diff --git a/tools/misc/xen-access.c b/tools/misc/xen-access.c index 4bbef0bd2e..0731c20b83 100644 --- a/tools/misc/xen-access.c +++ b/tools/misc/xen-access.c @@ -56,11 +56,11 @@ #define ERROR(a, b...) fprintf(stderr, a "\n", ## b) #define PERROR(a, b...) fprintf(stderr, a ": %s\n", ## b, strerror(errno)) -/* From xen/include/asm-x86/processor.h */ +/* From xen/arch/x86/include/asm/processor.h */ #define X86_TRAP_DEBUG 1 #define X86_TRAP_INT3 3 -/* From xen/include/asm-x86/x86-defns.h */ +/* From xen/arch/x86/include/asm/x86-defns.h */ #define X86_CR4_PGE 0x00000080 /* enable global pages */ typedef struct vm_event { diff --git a/tools/tests/vhpet/Makefile b/tools/tests/vhpet/Makefile index cb88dd01c5..2d56ffdfd9 100644 --- a/tools/tests/vhpet/Makefile +++ b/tools/tests/vhpet/Makefile @@ -32,7 +32,7 @@ distclean: clean .PHONY: install install: -hpet.h: $(XEN_ROOT)/xen/include/asm-x86/hpet.h +hpet.h: $(XEN_ROOT)/xen/arch/x86/include/asm/hpet.h cp $< $@ hpet.c: $(XEN_ROOT)/xen/arch/x86/hvm/hpet.c diff --git a/xen/Makefile b/xen/Makefile index 1fd48af7ae..b2a63edca1 100644 --- a/xen/Makefile +++ b/xen/Makefile @@ -166,7 +166,7 @@ ifeq ($(TARGET_ARCH),x86) t1 = $(call as-insn,$(CC),".L0: .L1: .skip (.L1 - .L0)",,-no-integrated-as) # Check whether clang asm()-s support .include. -t2 = $(call as-insn,$(CC) -I$(BASEDIR)/include,".include \"asm-x86/asm-defns.h\"",,-no-integrated-as) +t2 = $(call as-insn,$(CC) -I$(BASEDIR)/arch/x86/include,".include \"asm/asm-defns.h\"",,-no-integrated-as) # Check whether clang keeps .macro-s between asm()-s: # https://bugs.llvm.org/show_bug.cgi?id=36110 @@ -382,7 +382,7 @@ _clean: delete-unfresh-files -o -name ".*.o.tmp" -o -name "*~" -o -name "core" \ -o -name "*.gcno" -o -name ".*.cmd" -o -name "lib.a" \) -exec rm -f {} \; rm -f include/asm $(TARGET) $(TARGET).gz $(TARGET).efi $(TARGET).efi.map $(TARGET)-syms $(TARGET)-syms.map *~ core - rm -f asm-offsets.s include/asm-*/asm-offsets.h + rm -f asm-offsets.s arch/*/include/asm/asm-offsets.h rm -f .banner .allconfig.tmp .PHONY: _distclean @@ -396,7 +396,6 @@ $(TARGET).gz: $(TARGET) $(TARGET): delete-unfresh-files $(MAKE) -C tools $(MAKE) -f $(BASEDIR)/Rules.mk include/xen/compile.h - [ -e include/asm ] || ln -sf asm-$(TARGET_ARCH) include/asm [ -e arch/$(TARGET_ARCH)/efi ] && for f in $$(cd common/efi; echo *.[ch]); \ do test -r arch/$(TARGET_ARCH)/efi/$$f || \ ln -nsf ../../../common/efi/$$f arch/$(TARGET_ARCH)/efi/; \ @@ -404,7 +403,7 @@ $(TARGET): delete-unfresh-files true $(MAKE) -f $(BASEDIR)/Rules.mk -C include $(MAKE) -f $(BASEDIR)/Rules.mk -C arch/$(TARGET_ARCH) include - $(MAKE) -f $(BASEDIR)/Rules.mk include/asm-$(TARGET_ARCH)/asm-offsets.h + $(MAKE) -f $(BASEDIR)/Rules.mk arch/$(TARGET_ARCH)/include/asm/asm-offsets.h $(MAKE) -f $(BASEDIR)/Rules.mk -C arch/$(TARGET_ARCH) $@ # drivers/char/console.o contains static banner/compile info. Blow it away. @@ -450,7 +449,7 @@ asm-offsets.s: arch/$(TARGET_ARCH)/$(TARGET_SUBARCH)/asm-offsets.c $(CC) $(call cpp_flags,$(c_flags)) -S -g0 -o $@.new -MQ $@ $< $(call move-if-changed,$@.new,$@) -include/asm-$(TARGET_ARCH)/asm-offsets.h: asm-offsets.s +arch/$(TARGET_ARCH)/include/asm/asm-offsets.h: asm-offsets.s @(set -e; \ echo "/*"; \ echo " * DO NOT MODIFY."; \ @@ -468,8 +467,8 @@ include/asm-$(TARGET_ARCH)/asm-offsets.h: asm-offsets.s SUBDIRS = xsm arch/$(TARGET_ARCH) common drivers lib test define all_sources - ( find include/asm-$(TARGET_ARCH) -name '*.h' -print; \ - find include -name 'asm-*' -prune -o -name '*.h' -print; \ + ( find arch/$(TARGET_ARCH)/include -name '*.h' -print; \ + find include -name '*.h' -print; \ find $(SUBDIRS) -name '*.[chS]' -print ) endef diff --git a/xen/arch/arm/README.LinuxPrimitives b/xen/arch/arm/README.LinuxPrimitives index 664a9f89ed..1d53e6a898 100644 --- a/xen/arch/arm/README.LinuxPrimitives +++ b/xen/arch/arm/README.LinuxPrimitives @@ -8,19 +8,19 @@ arm64: bitops: last sync @ v3.16-rc6 (last commit: 8715466b6027) -linux/arch/arm64/include/asm/bitops.h xen/include/asm-arm/arm64/bitops.h +linux/arch/arm64/include/asm/bitops.h xen/arch/arm/include/asm/arm64/bitops.h --------------------------------------------------------------------- cmpxchg: last sync @ v3.16-rc6 (last commit: e1dfda9ced9b) -linux/arch/arm64/include/asm/cmpxchg.h xen/include/asm-arm/arm64/cmpxchg.h +linux/arch/arm64/include/asm/cmpxchg.h xen/arch/arm/include/asm/arm64/cmpxchg.h --------------------------------------------------------------------- atomics: last sync @ v3.16-rc6 (last commit: 8715466b6027) -linux/arch/arm64/include/asm/atomic.h xen/include/asm-arm/arm64/atomic.h +linux/arch/arm64/include/asm/atomic.h xen/arch/arm/include/asm/arm64/atomic.h The following functions were taken from Linux: atomic_add(), atomic_add_return(), atomic_sub(), atomic_sub_return(), @@ -76,13 +76,13 @@ diff -u ../linux/arch/arm/lib/findbit.S xen/arch/arm/arm32/lib/findbit.S cmpxchg: last sync @ v3.16-rc6 (last commit: c32ffce0f66e) -linux/arch/arm/include/asm/cmpxchg.h xen/include/asm-arm/arm32/cmpxchg.h +linux/arch/arm/include/asm/cmpxchg.h xen/arch/arm/include/asm/arm32/cmpxchg.h --------------------------------------------------------------------- atomics: last sync @ v3.16-rc6 (last commit: 030d0178bdbd) -linux/arch/arm/include/asm/atomic.h xen/include/asm-arm/arm32/atomic.h +linux/arch/arm/include/asm/atomic.h xen/arch/arm/include/asm/arm32/atomic.h The following functions were taken from Linux: atomic_add(), atomic_add_return(), atomic_sub(), atomic_sub_return(), diff --git a/xen/arch/arm/arch.mk b/xen/arch/arm/arch.mk index 6a29820594..c3ac443b37 100644 --- a/xen/arch/arm/arch.mk +++ b/xen/arch/arm/arch.mk @@ -2,6 +2,7 @@ # arm-specific definitions CFLAGS += -I$(BASEDIR)/include +CFLAGS += -I$(BASEDIR)/arch/$(TARGET_ARCH)/include $(call cc-options-add,CFLAGS,CC,$(EMBEDDED_EXTRA_CFLAGS)) $(call cc-option-add,CFLAGS,CC,-Wnested-externs) diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S index 7178865f48..b5912d381b 100644 --- a/xen/arch/arm/arm32/head.S +++ b/xen/arch/arm/arm32/head.S @@ -599,7 +599,7 @@ remove_identity_mapping: strd r2, r3, [r0, r1] identity_mapping_removed: - /* See asm-arm/arm32/flushtlb.h for the explanation of the sequence. */ + /* See asm/arm32/flushtlb.h for the explanation of the sequence. */ dsb nshst mcr CP32(r0, TLBIALLH) dsb nsh diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S index aa1f88c764..51b00ab0be 100644 --- a/xen/arch/arm/arm64/head.S +++ b/xen/arch/arm/arm64/head.S @@ -737,7 +737,7 @@ remove_identity_mapping: str xzr, [x0, x1, lsl #3] identity_mapping_removed: - /* See asm-arm/arm64/flushtlb.h for the explanation of the sequence. */ + /* See asm/arm64/flushtlb.h for the explanation of the sequence. */ dsb nshst tlbi alle2 dsb nsh diff --git a/xen/arch/arm/include/asm/acpi.h b/xen/arch/arm/include/asm/acpi.h new file mode 100644 index 0000000000..e53973e054 --- /dev/null +++ b/xen/arch/arm/include/asm/acpi.h @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2015, Shannon Zhao <shannon.zhao@xxxxxxxxxx> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see <http://www.gnu.org/licenses/>. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#ifndef _ASM_ARM_ACPI_H +#define _ASM_ARM_ACPI_H + +#include <asm/setup.h> + +#define COMPILER_DEPENDENT_INT64 long long +#define COMPILER_DEPENDENT_UINT64 unsigned long long +#define ACPI_MAP_MEM_ATTR PAGE_HYPERVISOR + +/* Tables marked as reserved in efi table */ +typedef enum { + TBL_FADT, + TBL_MADT, + TBL_STAO, + TBL_XSDT, + TBL_RSDP, + TBL_EFIT, + TBL_MMAP, + TBL_MMAX, +} EFI_MEM_RES; + +bool acpi_psci_present(void); +bool acpi_psci_hvc_present(void); +void acpi_smp_init_cpus(void); + +/* + * This function returns the offset of a given ACPI/EFI table in the allocated + * memory region. Currently, the tables should be created in the same order as + * their associated 'index' in the enum EFI_MEM_RES. This means the function + * won't return the correct offset until all the tables before a given 'index' + * are created. + */ +paddr_t acpi_get_table_offset(struct membank tbl_add[], EFI_MEM_RES index); + +/* Macros for consistency checks of the GICC subtable of MADT */ +#define ACPI_MADT_GICC_LENGTH \ + (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) + +#define BAD_MADT_GICC_ENTRY(entry, end) \ + (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \ + (entry)->header.length != ACPI_MADT_GICC_LENGTH) + +#ifdef CONFIG_ACPI +extern bool acpi_disabled; +/* Basic configuration for ACPI */ +static inline void disable_acpi(void) +{ + acpi_disabled = true; +} + +static inline void enable_acpi(void) +{ + acpi_disabled = false; +} +#else +#define acpi_disabled (true) +#define disable_acpi() +#define enable_acpi() +#endif + +#endif /*_ASM_ARM_ACPI_H*/ diff --git a/xen/arch/arm/include/asm/alternative.h b/xen/arch/arm/include/asm/alternative.h new file mode 100644 index 0000000000..1eb4b60fbb --- /dev/null +++ b/xen/arch/arm/include/asm/alternative.h @@ -0,0 +1,221 @@ +#ifndef __ASM_ALTERNATIVE_H +#define __ASM_ALTERNATIVE_H + +#include <asm/cpufeature.h> +#include <asm/insn.h> + +#define ARM_CB_PATCH ARM_NCAPS + +#ifndef __ASSEMBLY__ + +#include <xen/types.h> +#include <xen/stringify.h> + +struct alt_instr { + s32 orig_offset; /* offset to original instruction */ + s32 alt_offset; /* offset to replacement instruction */ + u16 cpufeature; /* cpufeature bit set for replacement */ + u8 orig_len; /* size of original instruction(s) */ + u8 alt_len; /* size of new instruction(s), <= orig_len */ +}; + +/* Xen: helpers used by common code. */ +#define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f) +#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) +#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) + +typedef void (*alternative_cb_t)(const struct alt_instr *alt, + const uint32_t *origptr, uint32_t *updptr, + int nr_inst); + +void apply_alternatives_all(void); +int apply_alternatives(const struct alt_instr *start, const struct alt_instr *end); + +#define ALTINSTR_ENTRY(feature, cb) \ + " .word 661b - .\n" /* label */ \ + " .if " __stringify(cb) " == 0\n" \ + " .word 663f - .\n" /* new instruction */ \ + " .else\n" \ + " .word " __stringify(cb) "- .\n" /* callback */ \ + " .endif\n" \ + " .hword " __stringify(feature) "\n" /* feature bit */ \ + " .byte 662b-661b\n" /* source len */ \ + " .byte 664f-663f\n" /* replacement len */ + +/* + * alternative assembly primitive: + * + * If any of these .org directive fail, it means that insn1 and insn2 + * don't have the same length. This used to be written as + * + * .if ((664b-663b) != (662b-661b)) + * .error "Alternatives instruction length mismatch" + * .endif + * + * but most assemblers die if insn1 or insn2 have a .inst. This should + * be fixed in a binutils release posterior to 2.25.51.0.2 (anything + * containing commit 4e4d08cf7399b606 or c1baaddf8861). + * + * Alternatives with callbacks do not generate replacement instructions. + */ +#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \ + ".if "__stringify(cfg_enabled)" == 1\n" \ + "661:\n\t" \ + oldinstr "\n" \ + "662:\n" \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(feature,cb) \ + ".popsection\n" \ + " .if " __stringify(cb) " == 0\n" \ + ".pushsection .altinstr_replacement, \"ax\"\n" \ + "663:\n\t" \ + newinstr "\n" \ + "664:\n\t" \ + ".popsection\n\t" \ + ".org . - (664b-663b) + (662b-661b)\n\t" \ + ".org . - (662b-661b) + (664b-663b)\n" \ + ".else\n\t" \ + "663:\n\t" \ + "664:\n\t" \ + ".endif\n" \ + ".endif\n" + +#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ + __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0) + +#define ALTERNATIVE_CB(oldinstr, cb) \ + __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM_CB_PATCH, 1, cb) +#else + +#include <asm/asm_defns.h> +#include <asm/macros.h> + +.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len + .word \orig_offset - . + .word \alt_offset - . + .hword \feature + .byte \orig_len + .byte \alt_len +.endm + +.macro alternative_insn insn1, insn2, cap, enable = 1 + .if \enable +661: \insn1 +662: .pushsection .altinstructions, "a" + altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f + .popsection + .pushsection .altinstr_replacement, "ax" +663: \insn2 +664: .popsection + .org . - (664b-663b) + (662b-661b) + .org . - (662b-661b) + (664b-663b) + .endif +.endm + +/* + * Alternative sequences + * + * The code for the case where the capability is not present will be + * assembled and linked as normal. There are no restrictions on this + * code. + * + * The code for the case where the capability is present will be + * assembled into a special section to be used for dynamic patching. + * Code for that case must: + * + * 1. Be exactly the same length (in bytes) as the default code + * sequence. + * + * 2. Not contain a branch target that is used outside of the + * alternative sequence it is defined in (branches into an + * alternative sequence are not fixed up). + */ + +/* + * Begin an alternative code sequence. + */ +.macro alternative_if_not cap + .set .Lasm_alt_mode, 0 + .pushsection .altinstructions, "a" + altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f + .popsection +661: +.endm + +.macro alternative_if cap + .set .Lasm_alt_mode, 1 + .pushsection .altinstructions, "a" + altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f + .popsection + .pushsection .altinstr_replacement, "ax" + .align 2 /* So GAS knows label 661 is suitably aligned */ +661: +.endm + +/* + * Provide the other half of the alternative code sequence. + */ +.macro alternative_else +662: + .if .Lasm_alt_mode==0 + .pushsection .altinstr_replacement, "ax" + .else + .popsection + .endif +663: +.endm + +.macro alternative_cb cb + .set .Lasm_alt_mode, 0 + .pushsection .altinstructions, "a" + altinstruction_entry 661f, \cb, ARM_CB_PATCH, 662f-661f, 0 + .popsection +661: +.endm + +/* + * Complete an alternative code sequence. + */ +.macro alternative_endif +664: + .if .Lasm_alt_mode==0 + .popsection + .endif + .org . - (664b-663b) + (662b-661b) + .org . - (662b-661b) + (664b-663b) +.endm + +/* + * Provides a trivial alternative or default sequence consisting solely + * of NOPs. The number of NOPs is chosen automatically to match the + * previous case. + */ +.macro alternative_else_nop_endif +alternative_else + nops (662b-661b) / ARCH_PATCH_INSN_SIZE +alternative_endif +.endm + +/* + * Callback-based alternative epilogue + */ +.macro alternative_cb_end +662: +.endm + +#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \ + alternative_insn insn1, insn2, cap, IS_ENABLED(cfg) + +#endif /* __ASSEMBLY__ */ + +/* + * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature)); + * + * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO)); + * N.B. If CONFIG_FOO is specified, but not selected, the whole block + * will be omitted, including oldinstr. + */ +#define ALTERNATIVE(oldinstr, newinstr, ...) \ + _ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1) + +#endif /* __ASM_ALTERNATIVE_H */ diff --git a/xen/arch/arm/include/asm/altp2m.h b/xen/arch/arm/include/asm/altp2m.h new file mode 100644 index 0000000000..df50cb2f09 --- /dev/null +++ b/xen/arch/arm/include/asm/altp2m.h @@ -0,0 +1,39 @@ +/* + * Alternate p2m + * + * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ASM_ARM_ALTP2M_H +#define __ASM_ARM_ALTP2M_H + +#include <xen/sched.h> + +/* Alternate p2m on/off per domain */ +static inline bool altp2m_active(const struct domain *d) +{ + /* Not implemented on ARM. */ + return false; +} + +/* Alternate p2m VCPU */ +static inline uint16_t altp2m_vcpu_idx(const struct vcpu *v) +{ + /* Not implemented on ARM, should not be reached. */ + BUG(); + return 0; +} + +#endif /* __ASM_ARM_ALTP2M_H */ diff --git a/xen/arch/arm/include/asm/arm32/atomic.h b/xen/arch/arm/include/asm/arm32/atomic.h new file mode 100644 index 0000000000..2832a72792 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/atomic.h @@ -0,0 +1,175 @@ +/* + * arch/arm/include/asm/atomic.h + * + * Copyright (C) 1996 Russell King. + * Copyright (C) 2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ARCH_ARM_ARM32_ATOMIC__ +#define __ARCH_ARM_ARM32_ATOMIC__ + +/* + * ARMv6 UP and SMP safe atomic ops. We use load exclusive and + * store exclusive to ensure that these are atomic. We may loop + * to ensure that the update happens. + */ +static inline void atomic_add(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + prefetchw(&v->counter); + __asm__ __volatile__("@ atomic_add\n" +"1: ldrex %0, [%3]\n" +" add %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + smp_mb(); + prefetchw(&v->counter); + + __asm__ __volatile__("@ atomic_add_return\n" +"1: ldrex %0, [%3]\n" +" add %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + prefetchw(&v->counter); + __asm__ __volatile__("@ atomic_sub\n" +"1: ldrex %0, [%3]\n" +" sub %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + smp_mb(); + prefetchw(&v->counter); + + __asm__ __volatile__("@ atomic_sub_return\n" +"1: ldrex %0, [%3]\n" +" sub %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline void atomic_and(int m, atomic_t *v) +{ + unsigned long tmp; + int result; + + prefetchw(&v->counter); + __asm__ __volatile__("@ atomic_and\n" +"1: ldrex %0, [%3]\n" +" and %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (m) + : "cc"); +} + +static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) +{ + int oldval; + unsigned long res; + + smp_mb(); + prefetchw(&ptr->counter); + + do { + __asm__ __volatile__("@ atomic_cmpxchg\n" + "ldrex %1, [%3]\n" + "mov %0, #0\n" + "teq %1, %4\n" + "strexeq %0, %5, [%3]\n" + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) + : "r" (&ptr->counter), "Ir" (old), "r" (new) + : "cc"); + } while (res); + + smp_mb(); + + return oldval; +} + +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + int oldval, newval; + unsigned long tmp; + + smp_mb(); + prefetchw(&v->counter); + + __asm__ __volatile__ ("@ atomic_add_unless\n" +"1: ldrex %0, [%4]\n" +" teq %0, %5\n" +" beq 2f\n" +" add %1, %0, %6\n" +" strex %2, %1, [%4]\n" +" teq %2, #0\n" +" bne 1b\n" +"2:" + : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (u), "r" (a) + : "cc"); + + if (oldval != u) + smp_mb(); + + return oldval; +} + +#endif /* __ARCH_ARM_ARM32_ATOMIC__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 8 + * indent-tabs-mode: t + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/bitops.h b/xen/arch/arm/include/asm/arm32/bitops.h new file mode 100644 index 0000000000..57938a5874 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/bitops.h @@ -0,0 +1,42 @@ +#ifndef _ARM_ARM32_BITOPS_H +#define _ARM_ARM32_BITOPS_H + +#define flsl fls + +/* + * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. + */ +extern int _find_first_zero_bit_le(const void * p, unsigned size); +extern int _find_next_zero_bit_le(const void * p, int size, int offset); +extern int _find_first_bit_le(const unsigned long *p, unsigned size); +extern int _find_next_bit_le(const unsigned long *p, int size, int offset); + +/* + * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. + */ +extern int _find_first_zero_bit_be(const void * p, unsigned size); +extern int _find_next_zero_bit_be(const void * p, int size, int offset); +extern int _find_first_bit_be(const unsigned long *p, unsigned size); +extern int _find_next_bit_be(const unsigned long *p, int size, int offset); + +#ifndef __ARMEB__ +/* + * These are the little endian, atomic definitions. + */ +#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) +#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) +#define find_first_bit(p,sz) _find_first_bit_le(p,sz) +#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) + +#else +/* + * These are the big endian, atomic definitions. + */ +#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) +#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) +#define find_first_bit(p,sz) _find_first_bit_be(p,sz) +#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off) + +#endif + +#endif /* _ARM_ARM32_BITOPS_H */ diff --git a/xen/arch/arm/include/asm/arm32/bug.h b/xen/arch/arm/include/asm/arm32/bug.h new file mode 100644 index 0000000000..25cce151dc --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/bug.h @@ -0,0 +1,15 @@ +#ifndef __ARM_ARM32_BUG_H__ +#define __ARM_ARM32_BUG_H__ + +#include <xen/stringify.h> + +/* ARMv7 provides a list of undefined opcode (see A8.8.247 DDI 0406C.b) + * Use one them encoding A1 to go in exception mode + */ +#define BUG_OPCODE 0xe7f000f0 + +#define BUG_INSTR ".word " __stringify(BUG_OPCODE) + +#define BUG_FN_REG r0 + +#endif /* __ARM_ARM32_BUG_H__ */ diff --git a/xen/arch/arm/include/asm/arm32/cmpxchg.h b/xen/arch/arm/include/asm/arm32/cmpxchg.h new file mode 100644 index 0000000000..b0bd1d8b68 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/cmpxchg.h @@ -0,0 +1,229 @@ +#ifndef __ASM_ARM32_CMPXCHG_H +#define __ASM_ARM32_CMPXCHG_H + +#include <xen/prefetch.h> + +extern void __bad_xchg(volatile void *, int); + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + unsigned long ret; + unsigned int tmp; + + smp_mb(); + prefetchw((const void *)ptr); + + switch (size) { + case 1: + asm volatile("@ __xchg1\n" + "1: ldrexb %0, [%3]\n" + " strexb %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 4: + asm volatile("@ __xchg4\n" + "1: ldrex %0, [%3]\n" + " strex %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + default: + __bad_xchg(ptr, size), ret = 0; + break; + } + smp_mb(); + + return ret; +} + +#define xchg(ptr,x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + */ + +extern unsigned long __bad_cmpxchg(volatile void *ptr, int size); + +#define __CMPXCHG_CASE(sz, name) \ +static inline bool __cmpxchg_case_##name(volatile void *ptr, \ + unsigned long *old, \ + unsigned long new, \ + bool timeout, \ + unsigned int max_try) \ +{ \ + unsigned long oldval; \ + unsigned long res; \ + \ + do { \ + asm volatile("@ __cmpxchg_case_" #name "\n" \ + " ldrex" #sz " %1, [%2]\n" \ + " mov %0, #0\n" \ + " teq %1, %3\n" \ + " strex" #sz "eq %0, %4, [%2]\n" \ + : "=&r" (res), "=&r" (oldval) \ + : "r" (ptr), "Ir" (*old), "r" (new) \ + : "memory", "cc"); \ + \ + if (!res) \ + break; \ + } while (!timeout || ((--max_try) > 0)); \ + \ + *old = oldval; \ + \ + return !res; \ +} + +__CMPXCHG_CASE(b, 1) +__CMPXCHG_CASE(h, 2) +__CMPXCHG_CASE( , 4) + +static inline bool __cmpxchg_case_8(volatile uint64_t *ptr, + uint64_t *old, + uint64_t new, + bool timeout, + unsigned int max_try) +{ + uint64_t oldval; + uint64_t res; + + do { + asm volatile( + " ldrexd %1, %H1, [%3]\n" + " teq %1, %4\n" + " teqeq %H1, %H4\n" + " movne %0, #0\n" + " movne %H0, #0\n" + " bne 2f\n" + " strexd %0, %5, %H5, [%3]\n" + "2:" + : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr) + : "r" (ptr), "r" (*old), "r" (new) + : "memory", "cc"); + if (!res) + break; + } while (!timeout || ((--max_try) > 0)); + + *old = oldval; + + return !res; +} + +static always_inline bool __int_cmpxchg(volatile void *ptr, unsigned long *old, + unsigned long new, int size, + bool timeout, unsigned int max_try) +{ + prefetchw((const void *)ptr); + + switch (size) { + case 1: + return __cmpxchg_case_1(ptr, old, new, timeout, max_try); + case 2: + return __cmpxchg_case_2(ptr, old, new, timeout, max_try); + case 4: + return __cmpxchg_case_4(ptr, old, new, timeout, max_try); + default: + return __bad_cmpxchg(ptr, size); + } + + ASSERT_UNREACHABLE(); +} + +static always_inline unsigned long __cmpxchg(volatile void *ptr, + unsigned long old, + unsigned long new, + int size) +{ + smp_mb(); + if (!__int_cmpxchg(ptr, &old, new, size, false, 0)) + ASSERT_UNREACHABLE(); + smp_mb(); + + return old; +} + +/* + * The helper may fail to update the memory if the action takes too long. + * + * @old: On call the value pointed contains the expected old value. It will be + * updated to the actual old value. + * @max_try: Maximum number of iterations + * + * The helper will return true when the update has succeeded (i.e no + * timeout) and false if the update has failed. + */ +static always_inline bool __cmpxchg_timeout(volatile void *ptr, + unsigned long *old, + unsigned long new, + int size, + unsigned int max_try) +{ + bool ret; + + smp_mb(); + ret = __int_cmpxchg(ptr, old, new, size, true, max_try); + smp_mb(); + + return ret; +} + +/* + * The helper may fail to update the memory if the action takes too long. + * + * @old: On call the value pointed contains the expected old value. It will be + * updated to the actual old value. + * @max_try: Maximum number of iterations + * + * The helper will return true when the update has succeeded (i.e no + * timeout) and false if the update has failed. + */ +static always_inline bool __cmpxchg64_timeout(volatile uint64_t *ptr, + uint64_t *old, + uint64_t new, + unsigned int max_try) +{ + bool ret; + + smp_mb(); + ret = __cmpxchg_case_8(ptr, old, new, true, max_try); + smp_mb(); + + return ret; +} + +#define cmpxchg(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) + +static inline uint64_t cmpxchg64(volatile uint64_t *ptr, + uint64_t old, + uint64_t new) +{ + smp_mb(); + if (!__cmpxchg_case_8(ptr, &old, new, false, 0)) + ASSERT_UNREACHABLE(); + smp_mb(); + + return old; +} + +#endif +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 8 + * indent-tabs-mode: t + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/flushtlb.h b/xen/arch/arm/include/asm/arm32/flushtlb.h new file mode 100644 index 0000000000..9085e65011 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/flushtlb.h @@ -0,0 +1,63 @@ +#ifndef __ASM_ARM_ARM32_FLUSHTLB_H__ +#define __ASM_ARM_ARM32_FLUSHTLB_H__ + +/* + * Every invalidation operation use the following patterns: + * + * DSB ISHST // Ensure prior page-tables updates have completed + * TLBI... // Invalidate the TLB + * DSB ISH // Ensure the TLB invalidation has completed + * ISB // See explanation below + * + * For Xen page-tables the ISB will discard any instructions fetched + * from the old mappings. + * + * For the Stage-2 page-tables the ISB ensures the completion of the DSB + * (and therefore the TLB invalidation) before continuing. So we know + * the TLBs cannot contain an entry for a mapping we may have removed. + */ +#define TLB_HELPER(name, tlbop) \ +static inline void name(void) \ +{ \ + dsb(ishst); \ + WRITE_CP32(0, tlbop); \ + dsb(ish); \ + isb(); \ +} + +/* Flush local TLBs, current VMID only */ +TLB_HELPER(flush_guest_tlb_local, TLBIALL); + +/* Flush inner shareable TLBs, current VMID only */ +TLB_HELPER(flush_guest_tlb, TLBIALLIS); + +/* Flush local TLBs, all VMIDs, non-hypervisor mode */ +TLB_HELPER(flush_all_guests_tlb_local, TLBIALLNSNH); + +/* Flush innershareable TLBs, all VMIDs, non-hypervisor mode */ +TLB_HELPER(flush_all_guests_tlb, TLBIALLNSNHIS); + +/* Flush all hypervisor mappings from the TLB of the local processor. */ +TLB_HELPER(flush_xen_tlb_local, TLBIALLH); + +/* Flush TLB of local processor for address va. */ +static inline void __flush_xen_tlb_one_local(vaddr_t va) +{ + asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory"); +} + +/* Flush TLB of all processors in the inner-shareable domain for address va. */ +static inline void __flush_xen_tlb_one(vaddr_t va) +{ + asm volatile(STORE_CP32(0, TLBIMVAHIS) : : "r" (va) : "memory"); +} + +#endif /* __ASM_ARM_ARM32_FLUSHTLB_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/insn.h b/xen/arch/arm/include/asm/arm32/insn.h new file mode 100644 index 0000000000..c800cbfff5 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/insn.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2017 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ARCH_ARM_ARM32_INSN +#define __ARCH_ARM_ARM32_INSN + +#include <xen/types.h> + +int32_t aarch32_get_branch_offset(uint32_t insn); +uint32_t aarch32_set_branch_offset(uint32_t insn, int32_t offset); + +/* Wrapper for common code */ +static inline bool insn_is_branch_imm(uint32_t insn) +{ + /* + * Xen is using ARM execution state only on ARM32 platform. So, the + * Thumb branch instructions (CBZ, CBNZ, TBB and TBH) will not be used + * in Xen. The left ARM32 branch instructions are BX, BLX, BL and B. + * BX is using register as parameter, we don't need to rewrite it. So, + * we only need to check BLX, BL and B encodings in this function. + * + * From ARM DDI 0406C.c Section A8.8.18 and A8.8.25, we can see these + * three branch instructions' encodings: + * - b cccc1010xxxxxxxxxxxxxxxxxxxxxxxx + * - bl cccc1011xxxxxxxxxxxxxxxxxxxxxxxx + * - blx 1111101Hxxxxxxxxxxxxxxxxxxxxxxxx + * + * The H bit of blx can be 0 or 1, it depends on the Instruction Sets of + * target instruction. Regardless, if we mask the conditional bits and + * bit 24 (H bit of blx), we can see all above branch instructions have + * the same value 0x0A000000. + * + * And from ARM DDI 0406C.c Section A5.7 Table A5-23, we can see that the + * blx is the only one unconditional instruction has the same value as + * conditional branch instructions. So, mask the conditional bits will not + * make other unconditional instruction to hit this check. + */ + return ( (insn & 0x0E000000) == 0x0A000000 ); +} + +static inline int32_t insn_get_branch_offset(uint32_t insn) +{ + return aarch32_get_branch_offset(insn); +} + +static inline uint32_t insn_set_branch_offset(uint32_t insn, int32_t offset) +{ + return aarch32_set_branch_offset(insn, offset); +} + +#endif /* !__ARCH_ARM_ARM32_INSN */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/io.h b/xen/arch/arm/include/asm/arm32/io.h new file mode 100644 index 0000000000..73a879e9fb --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/io.h @@ -0,0 +1,96 @@ +/* + * Based on linux arch/arm/include/asm/io.h + * + * Copyright (C) 1996-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Modifications: + * 16-Sep-1996 RMK Inlined the inx/outx functions & optimised for both + * constant addresses and variable addresses. + * 04-Dec-1997 RMK Moved a lot of this stuff to the new architecture + * specific IO header files. + * 27-Mar-1999 PJB Second parameter of memcpy_toio is const.. + * 04-Apr-1999 PJB Added check_signature. + * 12-Dec-1999 RMK More cleanups + * 18-Jun-2000 RMK Removed virt_to_* and friends definitions + * 05-Oct-2004 BJD Moved memory string functions to use void __iomem + */ +#ifndef _ARM_ARM32_IO_H +#define _ARM_ARM32_IO_H + +#include <asm/system.h> +#include <asm/byteorder.h> + +static inline void __raw_writeb(u8 val, volatile void __iomem *addr) +{ + asm volatile("strb %1, %0" + : "+Qo" (*(volatile u8 __force *)addr) + : "r" (val)); +} + +static inline void __raw_writew(u16 val, volatile void __iomem *addr) +{ + asm volatile("strh %1, %0" + : "+Q" (*(volatile u16 __force *)addr) + : "r" (val)); +} + +static inline void __raw_writel(u32 val, volatile void __iomem *addr) +{ + asm volatile("str %1, %0" + : "+Qo" (*(volatile u32 __force *)addr) + : "r" (val)); +} + +static inline u8 __raw_readb(const volatile void __iomem *addr) +{ + u8 val; + asm volatile("ldrb %1, %0" + : "+Qo" (*(volatile u8 __force *)addr), + "=r" (val)); + return val; +} + +static inline u16 __raw_readw(const volatile void __iomem *addr) +{ + u16 val; + asm volatile("ldrh %1, %0" + : "+Q" (*(volatile u16 __force *)addr), + "=r" (val)); + return val; +} + +static inline u32 __raw_readl(const volatile void __iomem *addr) +{ + u32 val; + asm volatile("ldr %1, %0" + : "+Qo" (*(volatile u32 __force *)addr), + "=r" (val)); + return val; +} + +#define __iormb() rmb() +#define __iowmb() wmb() + +#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; }) +#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \ + __raw_readw(c)); __r; }) +#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \ + __raw_readl(c)); __r; }) + +#define writeb_relaxed(v,c) __raw_writeb(v,c) +#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c) +#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c) + +#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) + +#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) +#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) +#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) + +#endif /* _ARM_ARM32_IO_H */ diff --git a/xen/arch/arm/include/asm/arm32/macros.h b/xen/arch/arm/include/asm/arm32/macros.h new file mode 100644 index 0000000000..a4e20aa520 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/macros.h @@ -0,0 +1,8 @@ +#ifndef __ASM_ARM_ARM32_MACROS_H +#define __ASM_ARM_ARM32_MACROS_H + + .macro ret + mov pc, lr + .endm + +#endif /* __ASM_ARM_ARM32_MACROS_H */ diff --git a/xen/arch/arm/include/asm/arm32/mm.h b/xen/arch/arm/include/asm/arm32/mm.h new file mode 100644 index 0000000000..68612499bf --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/mm.h @@ -0,0 +1,23 @@ +#ifndef __ARM_ARM32_MM_H__ +#define __ARM_ARM32_MM_H__ + +/* + * Only a limited amount of RAM, called xenheap, is always mapped on ARM32. + * For convenience always return false. + */ +static inline bool arch_mfn_in_directmap(unsigned long mfn) +{ + return false; +} + +#endif /* __ARM_ARM32_MM_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/page.h b/xen/arch/arm/include/asm/arm32/page.h new file mode 100644 index 0000000000..715a9e4fef --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/page.h @@ -0,0 +1,118 @@ +#ifndef __ARM_ARM32_PAGE_H__ +#define __ARM_ARM32_PAGE_H__ + +#ifndef __ASSEMBLY__ + +/* Write a pagetable entry. + * + * If the table entry is changing a text mapping, it is responsibility + * of the caller to issue an ISB after write_pte. + */ +static inline void write_pte(lpae_t *p, lpae_t pte) +{ + asm volatile ( + /* Ensure any writes have completed with the old mappings. */ + "dsb;" + /* Safely write the entry (STRD is atomic on CPUs that support LPAE) */ + "strd %0, %H0, [%1];" + "dsb;" + : : "r" (pte.bits), "r" (p) : "memory"); +} + +/* Inline ASM to invalidate dcache on register R (may be an inline asm operand) */ +#define __invalidate_dcache_one(R) STORE_CP32(R, DCIMVAC) + +/* Inline ASM to flush dcache on register R (may be an inline asm operand) */ +#define __clean_dcache_one(R) STORE_CP32(R, DCCMVAC) + +/* Inline ASM to clean and invalidate dcache on register R (may be an + * inline asm operand) */ +#define __clean_and_invalidate_dcache_one(R) STORE_CP32(R, DCCIMVAC) + +/* + * Invalidate all instruction caches in Inner Shareable domain to PoU. + * We also need to flush the branch predictor for ARMv7 as it may be + * architecturally visible to the software (see B2.2.4 in ARM DDI 0406C.b). + */ +static inline void invalidate_icache(void) +{ + asm volatile ( + CMD_CP32(ICIALLUIS) /* Flush I-cache. */ + CMD_CP32(BPIALLIS) /* Flush branch predictor. */ + : : : "memory"); + + dsb(ish); /* Ensure completion of the flush I-cache */ + isb(); /* Synchronize fetched instruction stream. */ +} + +/* + * Invalidate all instruction caches on the local processor to PoU. + * We also need to flush the branch predictor for ARMv7 as it may be + * architecturally visible to the software (see B2.2.4 in ARM DDI 0406C.b). + */ +static inline void invalidate_icache_local(void) +{ + asm volatile ( + CMD_CP32(ICIALLU) /* Flush I-cache. */ + CMD_CP32(BPIALL) /* Flush branch predictor. */ + : : : "memory"); + + dsb(nsh); /* Ensure completion of the flush I-cache */ + isb(); /* Synchronize fetched instruction stream. */ +} + +/* Ask the MMU to translate a VA for us */ +static inline uint64_t __va_to_par(vaddr_t va) +{ + uint64_t par, tmp; + tmp = READ_CP64(PAR); + WRITE_CP32(va, ATS1HR); + isb(); /* Ensure result is available. */ + par = READ_CP64(PAR); + WRITE_CP64(tmp, PAR); + return par; +} + +/* Ask the MMU to translate a Guest VA for us */ +static inline uint64_t gva_to_ma_par(vaddr_t va, unsigned int flags) +{ + uint64_t par, tmp; + tmp = READ_CP64(PAR); + if ( (flags & GV2M_WRITE) == GV2M_WRITE ) + WRITE_CP32(va, ATS12NSOPW); + else + WRITE_CP32(va, ATS12NSOPR); + isb(); /* Ensure result is available. */ + par = READ_CP64(PAR); + WRITE_CP64(tmp, PAR); + return par; +} +static inline uint64_t gva_to_ipa_par(vaddr_t va, unsigned int flags) +{ + uint64_t par, tmp; + tmp = READ_CP64(PAR); + if ( (flags & GV2M_WRITE) == GV2M_WRITE ) + WRITE_CP32(va, ATS1CPW); + else + WRITE_CP32(va, ATS1CPR); + isb(); /* Ensure result is available. */ + par = READ_CP64(PAR); + WRITE_CP64(tmp, PAR); + return par; +} + +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) + +#endif /* __ASSEMBLY__ */ + +#endif /* __ARM_ARM32_PAGE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/processor.h b/xen/arch/arm/include/asm/arm32/processor.h new file mode 100644 index 0000000000..4e679f3273 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/processor.h @@ -0,0 +1,69 @@ +#ifndef __ASM_ARM_ARM32_PROCESSOR_H +#define __ASM_ARM_ARM32_PROCESSOR_H + +#define ACTLR_CAXX_SMP (1<<6) + +#ifndef __ASSEMBLY__ +/* On stack VCPU state */ +struct cpu_user_regs +{ + uint32_t r0; + uint32_t r1; + uint32_t r2; + uint32_t r3; + uint32_t r4; + uint32_t r5; + uint32_t r6; + uint32_t r7; + uint32_t r8; + uint32_t r9; + uint32_t r10; + union { + uint32_t r11; + uint32_t fp; + }; + uint32_t r12; + + uint32_t sp; /* r13 - SP: Valid for Hyp. frames only, o/w banked (see below) */ + + /* r14 - LR: is the same physical register as LR_usr */ + union { + uint32_t lr; /* r14 - LR: Valid for Hyp. Same physical register as lr_usr. */ + + uint32_t lr_usr; + }; + + union { /* Return IP, pc32 is used to allow code to be common with 64-bit */ + uint32_t pc, pc32; + }; + uint32_t cpsr; /* Return mode */ + uint32_t hsr; /* Exception Syndrome */ + + /* Outer guest frame only from here on... */ + + uint32_t sp_usr; /* LR_usr is the same register as LR, see above */ + + uint32_t sp_irq, lr_irq; + uint32_t sp_svc, lr_svc; + uint32_t sp_abt, lr_abt; + uint32_t sp_und, lr_und; + + uint32_t r8_fiq, r9_fiq, r10_fiq, r11_fiq, r12_fiq; + uint32_t sp_fiq, lr_fiq; + + uint32_t spsr_svc, spsr_abt, spsr_und, spsr_irq, spsr_fiq; + + uint32_t pad1; /* Doubleword-align the user half of the frame */ +}; + +#endif + +#endif /* __ASM_ARM_ARM32_PROCESSOR_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/sysregs.h b/xen/arch/arm/include/asm/arm32/sysregs.h new file mode 100644 index 0000000000..6841d5de43 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/sysregs.h @@ -0,0 +1,78 @@ +#ifndef __ASM_ARM_ARM32_SYSREGS_H +#define __ASM_ARM_ARM32_SYSREGS_H + +#include <xen/stringify.h> +#include <asm/cpregs.h> + +/* Layout as used in assembly, with src/dest registers mixed in */ +#define __CP32(r, coproc, opc1, crn, crm, opc2) coproc, opc1, r, crn, crm, opc2 +#define __CP64(r1, r2, coproc, opc, crm) coproc, opc, r1, r2, crm +#define CP32(r, name...) __CP32(r, name) +#define CP64(r, name...) __CP64(r, name) + +/* Stringified for inline assembly */ +#define LOAD_CP32(r, name...) "mrc " __stringify(CP32(%r, name)) ";" +#define STORE_CP32(r, name...) "mcr " __stringify(CP32(%r, name)) ";" +#define LOAD_CP64(r, name...) "mrrc " __stringify(CP64(%r, %H##r, name)) ";" +#define STORE_CP64(r, name...) "mcrr " __stringify(CP64(%r, %H##r, name)) ";" + +/* Issue a CP operation which takes no argument, + * uses r0 as a placeholder register. */ +#define CMD_CP32(name...) "mcr " __stringify(CP32(r0, name)) ";" + +#ifndef __ASSEMBLY__ + +/* C wrappers */ +#define READ_CP32(name...) ({ \ + register uint32_t _r; \ + asm volatile(LOAD_CP32(0, name) : "=r" (_r)); \ + _r; }) + +#define WRITE_CP32(v, name...) do { \ + register uint32_t _r = (v); \ + asm volatile(STORE_CP32(0, name) : : "r" (_r)); \ +} while (0) + +#define READ_CP64(name...) ({ \ + register uint64_t _r; \ + asm volatile(LOAD_CP64(0, name) : "=r" (_r)); \ + _r; }) + +#define WRITE_CP64(v, name...) do { \ + register uint64_t _r = (v); \ + asm volatile(STORE_CP64(0, name) : : "r" (_r)); \ +} while (0) + +/* + * C wrappers for accessing system registers. + * + * Registers come in 3 types: + * - those which are always 32-bit regardless of AArch32 vs AArch64 + * (use {READ,WRITE}_SYSREG32). + * - those which are always 64-bit regardless of AArch32 vs AArch64 + * (use {READ,WRITE}_SYSREG64). + * - those which vary between AArch32 and AArch64 (use {READ,WRITE}_SYSREG). + */ +#define READ_SYSREG32(R...) READ_CP32(R) +#define WRITE_SYSREG32(V, R...) WRITE_CP32(V, R) + +#define READ_SYSREG64(R...) READ_CP64(R) +#define WRITE_SYSREG64(V, R...) WRITE_CP64(V, R) + +#define READ_SYSREG(R...) READ_SYSREG32(R) +#define WRITE_SYSREG(V, R...) WRITE_SYSREG32(V, R) + +/* MVFR2 is not defined on ARMv7 */ +#define MVFR2_MAYBE_UNDEFINED + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_ARM_ARM32_SYSREGS_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/system.h b/xen/arch/arm/include/asm/arm32/system.h new file mode 100644 index 0000000000..ab57abfbc5 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/system.h @@ -0,0 +1,77 @@ +/* Portions taken from Linux arch arm */ +#ifndef __ASM_ARM32_SYSTEM_H +#define __ASM_ARM32_SYSTEM_H + +#include <asm/arm32/cmpxchg.h> + +#define local_irq_disable() asm volatile ( "cpsid i @ local_irq_disable\n" : : : "cc" ) +#define local_irq_enable() asm volatile ( "cpsie i @ local_irq_enable\n" : : : "cc" ) + +#define local_save_flags(x) \ +({ \ + BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ + asm volatile ( "mrs %0, cpsr @ local_save_flags\n" \ + : "=r" (x) :: "memory", "cc" ); \ +}) +#define local_irq_save(x) \ +({ \ + local_save_flags(x); \ + local_irq_disable(); \ +}) +#define local_irq_restore(x) \ +({ \ + BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ + asm volatile ( \ + "msr cpsr_c, %0 @ local_irq_restore\n" \ + : \ + : "r" (x) \ + : "memory", "cc"); \ +}) + +static inline int local_irq_is_enabled(void) +{ + unsigned long flags; + local_save_flags(flags); + return !(flags & PSR_IRQ_MASK); +} + +#define local_fiq_enable() __asm__("cpsie f @ __stf\n" : : : "memory", "cc") +#define local_fiq_disable() __asm__("cpsid f @ __clf\n" : : : "memory", "cc") + +#define local_abort_enable() __asm__("cpsie a @ __sta\n" : : : "memory", "cc") +#define local_abort_disable() __asm__("cpsid a @ __sta\n" : : : "memory", "cc") + +static inline int local_fiq_is_enabled(void) +{ + unsigned long flags; + local_save_flags(flags); + return !(flags & PSR_FIQ_MASK); +} + +#define CSDB ".inst 0xe320f014" + +static inline unsigned long array_index_mask_nospec(unsigned long idx, + unsigned long sz) +{ + unsigned long mask; + + asm volatile( "cmp %1, %2\n" + "sbc %0, %1, %1\n" + CSDB + : "=r" (mask) + : "r" (idx), "Ir" (sz) + : "cc" ); + + return mask; +} +#define array_index_mask_nospec array_index_mask_nospec + +#endif +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/traps.h b/xen/arch/arm/include/asm/arm32/traps.h new file mode 100644 index 0000000000..e3c4a8b473 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/traps.h @@ -0,0 +1,13 @@ +#ifndef __ASM_ARM32_TRAPS__ +#define __ASM_ARM32_TRAPS__ + +#endif /* __ASM_ARM32_TRAPS__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ + diff --git a/xen/arch/arm/include/asm/arm32/vfp.h b/xen/arch/arm/include/asm/arm32/vfp.h new file mode 100644 index 0000000000..bade3bc66e --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/vfp.h @@ -0,0 +1,41 @@ +#ifndef _ARM_ARM32_VFP_H +#define _ARM_ARM32_VFP_H + +#define FPEXC_EX (1u << 31) +#define FPEXC_EN (1u << 30) +#define FPEXC_FP2V (1u << 28) + +#define MVFR0_A_SIMD_MASK (0xf << 0) + + +#define FPSID_IMPLEMENTER_BIT (24) +#define FPSID_IMPLEMENTER_MASK (0xff << FPSID_IMPLEMENTER_BIT) +#define FPSID_ARCH_BIT (16) +#define FPSID_ARCH_MASK (0xf << FPSID_ARCH_BIT) +#define FPSID_PART_BIT (8) +#define FPSID_PART_MASK (0xff << FPSID_PART_BIT) +#define FPSID_VARIANT_BIT (4) +#define FPSID_VARIANT_MASK (0xf << FPSID_VARIANT_BIT) +#define FPSID_REV_BIT (0) +#define FPSID_REV_MASK (0xf << FPSID_REV_BIT) + +struct vfp_state +{ + uint64_t fpregs1[16]; /* {d0-d15} */ + uint64_t fpregs2[16]; /* {d16-d31} */ + uint32_t fpexc; + uint32_t fpscr; + /* VFP implementation specific state */ + uint32_t fpinst; + uint32_t fpinst2; +}; + +#endif /* _ARM_ARM32_VFP_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/atomic.h b/xen/arch/arm/include/asm/arm64/atomic.h new file mode 100644 index 0000000000..2d42567866 --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/atomic.h @@ -0,0 +1,148 @@ +/* + * Based on arch/arm64/include/asm/atomic.h + * which in turn is + * Based on arch/arm/include/asm/atomic.h + * + * Copyright (C) 1996 Russell King. + * Copyright (C) 2002 Deep Blue Solutions Ltd. + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ARCH_ARM_ARM64_ATOMIC +#define __ARCH_ARM_ARM64_ATOMIC + +/* + * AArch64 UP and SMP safe atomic ops. We use load exclusive and + * store exclusive to ensure that these are atomic. We may loop + * to ensure that the update happens. + */ +static inline void atomic_add(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_add\n" +"1: ldxr %w0, %2\n" +" add %w0, %w0, %w3\n" +" stxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i)); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_add_return\n" +"1: ldxr %w0, %2\n" +" add %w0, %w0, %w3\n" +" stlxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "memory"); + + smp_mb(); + return result; +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_sub\n" +"1: ldxr %w0, %2\n" +" sub %w0, %w0, %w3\n" +" stxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i)); +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_sub_return\n" +"1: ldxr %w0, %2\n" +" sub %w0, %w0, %w3\n" +" stlxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "memory"); + + smp_mb(); + return result; +} + +static inline void atomic_and(int m, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_and\n" +"1: ldxr %w0, %2\n" +" and %w0, %w0, %w3\n" +" stxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (m)); +} + +static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) +{ + unsigned long tmp; + int oldval; + + smp_mb(); + + asm volatile("// atomic_cmpxchg\n" +"1: ldxr %w1, %2\n" +" cmp %w1, %w3\n" +" b.ne 2f\n" +" stxr %w0, %w4, %2\n" +" cbnz %w0, 1b\n" +"2:" + : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) + : "Ir" (old), "r" (new) + : "cc"); + + smp_mb(); + return oldval; +} + +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) + c = old; + return c; +} + +#endif +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 8 + * indent-tabs-mode: t + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/bitops.h b/xen/arch/arm/include/asm/arm64/bitops.h new file mode 100644 index 0000000000..d85a49bca4 --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/bitops.h @@ -0,0 +1,98 @@ +#ifndef _ARM_ARM64_BITOPS_H +#define _ARM_ARM64_BITOPS_H + +/* Based on linux/include/asm-generic/bitops/builtin-__ffs.h */ +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static /*__*/always_inline unsigned long __ffs(unsigned long word) +{ + return __builtin_ctzl(word); +} + +/* Based on linux/include/asm-generic/bitops/ffz.h */ +/* + * ffz - find first zero in word. + * @word: The word to search + * + * Undefined if no zero exists, so code should check against ~0UL first. + */ +#define ffz(x) __ffs(~(x)) + +static inline int flsl(unsigned long x) +{ + uint64_t ret; + + if (__builtin_constant_p(x)) + return generic_flsl(x); + + asm("clz\t%0, %1" : "=r" (ret) : "r" (x)); + + return BITS_PER_LONG - ret; +} + +/* Based on linux/include/asm-generic/bitops/find.h */ + +#ifndef find_next_bit +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + */ +extern unsigned long find_next_bit(const unsigned long *addr, unsigned long + size, unsigned long offset); +#endif + +#ifndef find_next_zero_bit +/** + * find_next_zero_bit - find the next cleared bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + */ +extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned + long size, unsigned long offset); +#endif + +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT + +/** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first set bit. + */ +extern unsigned long find_first_bit(const unsigned long *addr, + unsigned long size); + +/** + * find_first_zero_bit - find the first cleared bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first cleared bit. + */ +extern unsigned long find_first_zero_bit(const unsigned long *addr, + unsigned long size); +#else /* CONFIG_GENERIC_FIND_FIRST_BIT */ + +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) +#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) + +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ + + +#endif /* _ARM_ARM64_BITOPS_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/brk.h b/xen/arch/arm/include/asm/arm64/brk.h new file mode 100644 index 0000000000..04442c4b9f --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/brk.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2016 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARM_ARM64_BRK +#define __ASM_ARM_ARM64_BRK + +/* + * #imm16 values used for BRK instruction generation + * 0x001: xen-mode BUG() and WARN() traps + * 0x002: for triggering a fault on purpose (reserved) + */ +#define BRK_BUG_FRAME_IMM 1 +#define BRK_FAULT_IMM 2 + +/* + * BRK instruction encoding + * The #imm16 value should be placed at bits[20:5] within BRK ins + */ +#define AARCH64_BREAK_MON 0xd4200000 + +/* + * BRK instruction for provoking a fault on purpose + */ +#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (BRK_FAULT_IMM << 5)) + +#endif /* !__ASM_ARM_ARM64_BRK */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/bug.h b/xen/arch/arm/include/asm/arm64/bug.h new file mode 100644 index 0000000000..5e11c0dfd5 --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/bug.h @@ -0,0 +1,11 @@ +#ifndef __ARM_ARM64_BUG_H__ +#define __ARM_ARM64_BUG_H__ + +#include <xen/stringify.h> +#include <asm/arm64/brk.h> + +#define BUG_INSTR "brk " __stringify(BRK_BUG_FRAME_IMM) + +#define BUG_FN_REG x0 + +#endif /* __ARM_ARM64_BUG_H__ */ diff --git a/xen/arch/arm/include/asm/arm64/cmpxchg.h b/xen/arch/arm/include/asm/arm64/cmpxchg.h new file mode 100644 index 0000000000..10e4edc022 --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/cmpxchg.h @@ -0,0 +1,183 @@ +#ifndef __ASM_ARM64_CMPXCHG_H +#define __ASM_ARM64_CMPXCHG_H + +extern void __bad_xchg(volatile void *, int); + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + unsigned long ret, tmp; + + switch (size) { + case 1: + asm volatile("// __xchg1\n" + "1: ldxrb %w0, %2\n" + " stlxrb %w1, %w3, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) + : "r" (x) + : "memory"); + break; + case 2: + asm volatile("// __xchg2\n" + "1: ldxrh %w0, %2\n" + " stlxrh %w1, %w3, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) + : "r" (x) + : "memory"); + break; + case 4: + asm volatile("// __xchg4\n" + "1: ldxr %w0, %2\n" + " stlxr %w1, %w3, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) + : "r" (x) + : "memory"); + break; + case 8: + asm volatile("// __xchg8\n" + "1: ldxr %0, %2\n" + " stlxr %w1, %3, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) + : "r" (x) + : "memory"); + break; + default: + __bad_xchg(ptr, size), ret = 0; + break; + } + + smp_mb(); + return ret; +} + +#define xchg(ptr,x) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \ + __ret; \ +}) + +extern unsigned long __bad_cmpxchg(volatile void *ptr, int size); + +#define __CMPXCHG_CASE(w, sz, name) \ +static inline bool __cmpxchg_case_##name(volatile void *ptr, \ + unsigned long *old, \ + unsigned long new, \ + bool timeout, \ + unsigned int max_try) \ +{ \ + unsigned long oldval; \ + unsigned long res; \ + \ + do { \ + asm volatile("// __cmpxchg_case_" #name "\n" \ + " ldxr" #sz " %" #w "1, %2\n" \ + " mov %w0, #0\n" \ + " cmp %" #w "1, %" #w "3\n" \ + " b.ne 1f\n" \ + " stxr" #sz " %w0, %" #w "4, %2\n" \ + "1:\n" \ + : "=&r" (res), "=&r" (oldval), \ + "+Q" (*(unsigned long *)ptr) \ + : "Ir" (*old), "r" (new) \ + : "cc"); \ + \ + if (!res) \ + break; \ + } while (!timeout || ((--max_try) > 0)); \ + \
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |