[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 5/9] xen/ppc: Define minimal stub headers required for full build
Additionally, change inclusion of asm/ headers to corresponding xen/ ones throughout arch/ppc now that they work. Signed-off-by: Shawn Anastasio <sanastasio@xxxxxxxxxxxxxxxxxxxxx> --- xen/arch/ppc/Kconfig | 1 + xen/arch/ppc/include/asm/altp2m.h | 39 ++++ xen/arch/ppc/include/asm/bug.h | 7 + xen/arch/ppc/include/asm/cache.h | 2 + xen/arch/ppc/include/asm/config.h | 9 + xen/arch/ppc/include/asm/cpufeature.h | 9 + xen/arch/ppc/include/asm/current.h | 42 ++++ xen/arch/ppc/include/asm/delay.h | 11 + xen/arch/ppc/include/asm/desc.h | 0 xen/arch/ppc/include/asm/device.h | 53 +++++ xen/arch/ppc/include/asm/div64.h | 14 ++ xen/arch/ppc/include/asm/domain.h | 46 ++++ xen/arch/ppc/include/asm/event.h | 35 ++++ xen/arch/ppc/include/asm/flushtlb.h | 23 ++ xen/arch/ppc/include/asm/grant_table.h | 0 xen/arch/ppc/include/asm/guest_access.h | 54 +++++ xen/arch/ppc/include/asm/guest_atomics.h | 14 ++ xen/arch/ppc/include/asm/hardirq.h | 18 ++ xen/arch/ppc/include/asm/hypercall.h | 0 xen/arch/ppc/include/asm/io.h | 15 ++ xen/arch/ppc/include/asm/iocap.h | 7 + xen/arch/ppc/include/asm/iommu.h | 7 + xen/arch/ppc/include/asm/irq.h | 32 +++ xen/arch/ppc/include/asm/mem_access.h | 0 xen/arch/ppc/include/asm/mm.h | 255 ++++++++++++++++++++++- xen/arch/ppc/include/asm/monitor.h | 48 +++++ xen/arch/ppc/include/asm/nospec.h | 18 ++ xen/arch/ppc/include/asm/numa.h | 26 +++ xen/arch/ppc/include/asm/p2m.h | 105 ++++++++++ xen/arch/ppc/include/asm/page.h | 19 ++ xen/arch/ppc/include/asm/paging.h | 7 + xen/arch/ppc/include/asm/pci.h | 7 + xen/arch/ppc/include/asm/percpu.h | 26 +++ xen/arch/ppc/include/asm/procarea.h | 38 ++++ xen/arch/ppc/include/asm/processor.h | 6 + xen/arch/ppc/include/asm/random.h | 9 + xen/arch/ppc/include/asm/regs.h | 2 + xen/arch/ppc/include/asm/setup.h | 6 + xen/arch/ppc/include/asm/smp.h | 18 ++ xen/arch/ppc/include/asm/softirq.h | 8 + xen/arch/ppc/include/asm/spinlock.h | 15 ++ xen/arch/ppc/include/asm/system.h | 243 ++++++++++++++++++++- xen/arch/ppc/include/asm/time.h | 20 ++ xen/arch/ppc/include/asm/vm_event.h | 49 +++++ xen/arch/ppc/include/asm/xenoprof.h | 0 xen/arch/ppc/mm-radix.c | 2 +- xen/arch/ppc/opal.c | 2 - xen/arch/ppc/tlb-radix.c | 2 +- xen/include/public/hvm/save.h | 2 + xen/include/public/pmu.h | 2 + xen/include/public/xen.h | 2 + 51 files changed, 1368 insertions(+), 7 deletions(-) create mode 100644 xen/arch/ppc/include/asm/altp2m.h create mode 100644 xen/arch/ppc/include/asm/cpufeature.h create mode 100644 xen/arch/ppc/include/asm/current.h create mode 100644 xen/arch/ppc/include/asm/delay.h create mode 100644 xen/arch/ppc/include/asm/desc.h create mode 100644 xen/arch/ppc/include/asm/device.h create mode 100644 xen/arch/ppc/include/asm/div64.h create mode 100644 xen/arch/ppc/include/asm/domain.h create mode 100644 xen/arch/ppc/include/asm/event.h create mode 100644 xen/arch/ppc/include/asm/flushtlb.h create mode 100644 xen/arch/ppc/include/asm/grant_table.h create mode 100644 xen/arch/ppc/include/asm/guest_access.h create mode 100644 xen/arch/ppc/include/asm/guest_atomics.h create mode 100644 xen/arch/ppc/include/asm/hardirq.h create mode 100644 xen/arch/ppc/include/asm/hypercall.h create mode 100644 xen/arch/ppc/include/asm/io.h create mode 100644 xen/arch/ppc/include/asm/iocap.h create mode 100644 xen/arch/ppc/include/asm/iommu.h create mode 100644 xen/arch/ppc/include/asm/irq.h create mode 100644 xen/arch/ppc/include/asm/mem_access.h create mode 100644 xen/arch/ppc/include/asm/monitor.h create mode 100644 xen/arch/ppc/include/asm/nospec.h create mode 100644 xen/arch/ppc/include/asm/numa.h create mode 100644 xen/arch/ppc/include/asm/p2m.h create mode 100644 xen/arch/ppc/include/asm/paging.h create mode 100644 xen/arch/ppc/include/asm/pci.h create mode 100644 xen/arch/ppc/include/asm/percpu.h create mode 100644 xen/arch/ppc/include/asm/procarea.h create mode 100644 xen/arch/ppc/include/asm/random.h create mode 100644 xen/arch/ppc/include/asm/setup.h create mode 100644 xen/arch/ppc/include/asm/smp.h create mode 100644 xen/arch/ppc/include/asm/softirq.h create mode 100644 xen/arch/ppc/include/asm/spinlock.h create mode 100644 xen/arch/ppc/include/asm/time.h create mode 100644 xen/arch/ppc/include/asm/vm_event.h create mode 100644 xen/arch/ppc/include/asm/xenoprof.h diff --git a/xen/arch/ppc/Kconfig b/xen/arch/ppc/Kconfig index ab116ffb2a..a6eae597af 100644 --- a/xen/arch/ppc/Kconfig +++ b/xen/arch/ppc/Kconfig @@ -1,6 +1,7 @@ config PPC def_bool y select HAS_DEVICE_TREE + select HAS_PDX config PPC64 def_bool y diff --git a/xen/arch/ppc/include/asm/altp2m.h b/xen/arch/ppc/include/asm/altp2m.h new file mode 100644 index 0000000000..73a1e3a114 --- /dev/null +++ b/xen/arch/ppc/include/asm/altp2m.h @@ -0,0 +1,39 @@ +/* + * Alternate p2m + * + * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ASM_PPC_ALTP2M_H__ +#define __ASM_PPC_ALTP2M_H__ + +#include <xen/sched.h> + +/* Alternate p2m on/off per domain */ +static inline bool altp2m_active(const struct domain *d) +{ + /* Not implemented on PPC. */ + return false; +} + +/* Alternate p2m VCPU */ +static inline uint16_t altp2m_vcpu_idx(const struct vcpu *v) +{ + /* Not implemented on PPC, should not be reached. */ + BUG(); + return 0; +} + +#endif /* __ASM_PPC_ALTP2M_H__ */ diff --git a/xen/arch/ppc/include/asm/bug.h b/xen/arch/ppc/include/asm/bug.h index e5e874b31c..d02e8e2898 100644 --- a/xen/arch/ppc/include/asm/bug.h +++ b/xen/arch/ppc/include/asm/bug.h @@ -4,6 +4,7 @@ #define _ASM_PPC_BUG_H #include <xen/stringify.h> +#include <asm/processor.h> /* * Power ISA guarantees that an instruction consisting of all zeroes is @@ -15,4 +16,10 @@ #define BUG_FN_REG r0 +#define BUG() do { \ + die(); \ +} while (0) + +#define BUG_FRAME(type, line, ptr, second_frame, msg) do { } while (0) + #endif /* _ASM_PPC_BUG_H */ diff --git a/xen/arch/ppc/include/asm/cache.h b/xen/arch/ppc/include/asm/cache.h index 8a0a6b7b17..0d7323d789 100644 --- a/xen/arch/ppc/include/asm/cache.h +++ b/xen/arch/ppc/include/asm/cache.h @@ -3,4 +3,6 @@ #ifndef _ASM_PPC_CACHE_H #define _ASM_PPC_CACHE_H +#define __read_mostly __section(".data.read_mostly") + #endif /* _ASM_PPC_CACHE_H */ diff --git a/xen/arch/ppc/include/asm/config.h b/xen/arch/ppc/include/asm/config.h index b76f940431..965b79aa64 100644 --- a/xen/arch/ppc/include/asm/config.h +++ b/xen/arch/ppc/include/asm/config.h @@ -41,6 +41,15 @@ #define XEN_VIRT_START _AT(vaddr_t, 0xc000000000000000) +#define VMAP_VIRT_START (XEN_VIRT_START + GB(1)) +#define VMAP_VIRT_SIZE GB(1) + +#define FRAMETABLE_VIRT_START (XEN_VIRT_START + GB(32)) +#define FRAMETABLE_SIZE GB(32) +#define FRAMETABLE_NR (FRAMETABLE_SIZE / sizeof(*frame_table)) + +#define HYPERVISOR_VIRT_START XEN_VIRT_START + #define SMP_CACHE_BYTES (1 << 6) #define STACK_ORDER 0 diff --git a/xen/arch/ppc/include/asm/cpufeature.h b/xen/arch/ppc/include/asm/cpufeature.h new file mode 100644 index 0000000000..3552b9231d --- /dev/null +++ b/xen/arch/ppc/include/asm/cpufeature.h @@ -0,0 +1,9 @@ +#ifndef __ASM_PPC_CPUFEATURE_H__ +#define __ASM_PPC_CPUFEATURE_H__ + +static inline int cpu_nr_siblings(unsigned int cpu) +{ + return 1; +} + +#endif /* __ASM_PPC_CPUFEATURE_H__ */ diff --git a/xen/arch/ppc/include/asm/current.h b/xen/arch/ppc/include/asm/current.h new file mode 100644 index 0000000000..87a854d6b0 --- /dev/null +++ b/xen/arch/ppc/include/asm/current.h @@ -0,0 +1,42 @@ +#ifndef __ASM_PPC_CURRENT_H__ +#define __ASM_PPC_CURRENT_H__ + +#include <xen/percpu.h> + +#ifndef __ASSEMBLY__ + +struct vcpu; + +/* Which VCPU is "current" on this PCPU. */ +DECLARE_PER_CPU(struct vcpu *, curr_vcpu); + +#define current (this_cpu(curr_vcpu)) +#define set_current(vcpu) do { current = (vcpu); } while (0) +#define get_cpu_current(cpu) (per_cpu(curr_vcpu, cpu)) + +/* Per-VCPU state that lives at the top of the stack */ +struct cpu_info { + struct cpu_user_regs guest_cpu_user_regs; + unsigned long elr; + uint32_t flags; +}; + +static inline struct cpu_info *get_cpu_info(void) +{ +#ifdef __clang__ + unsigned long sp; + + asm ("mr %0, 1" : "=r" (sp)); +#else + register unsigned long sp asm ("r1"); +#endif + + return (struct cpu_info *)((sp & ~(STACK_SIZE - 1)) + + STACK_SIZE - sizeof(struct cpu_info)); +} + +#define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs) + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_PPC_CURRENT_H__ */ diff --git a/xen/arch/ppc/include/asm/delay.h b/xen/arch/ppc/include/asm/delay.h new file mode 100644 index 0000000000..36be1775f8 --- /dev/null +++ b/xen/arch/ppc/include/asm/delay.h @@ -0,0 +1,11 @@ +#ifndef __ASM_PPC_DELAY_H__ +#define __ASM_PPC_DELAY_H__ + +#include <xen/lib.h> + +static inline void udelay(unsigned long usecs) +{ + BUG(); +} + +#endif /* __ASM_PPC_DELAY_H__ */ diff --git a/xen/arch/ppc/include/asm/desc.h b/xen/arch/ppc/include/asm/desc.h new file mode 100644 index 0000000000..e69de29bb2 diff --git a/xen/arch/ppc/include/asm/device.h b/xen/arch/ppc/include/asm/device.h new file mode 100644 index 0000000000..cb8454f605 --- /dev/null +++ b/xen/arch/ppc/include/asm/device.h @@ -0,0 +1,53 @@ +#ifndef __ASM_PPC_DEVICE_H__ +#define __ASM_PPC_DEVICE_H__ + +enum device_type +{ + DEV_DT, + DEV_PCI, +}; + +struct device { + enum device_type type; +#ifdef CONFIG_HAS_DEVICE_TREE + struct dt_device_node *of_node; /* Used by drivers imported from Linux */ +#endif +}; + +enum device_class +{ + DEVICE_SERIAL, + DEVICE_IOMMU, + DEVICE_GIC, + DEVICE_PCI_HOSTBRIDGE, + /* Use for error */ + DEVICE_UNKNOWN, +}; + +struct device_desc { + /* Device name */ + const char *name; + /* Device class */ + enum device_class class; + /* List of devices supported by this driver */ + const struct dt_device_match *dt_match; + /* + * Device initialization. + * + * -EAGAIN is used to indicate that device probing is deferred. + */ + int (*init)(struct dt_device_node *dev, const void *data); +}; + +typedef struct device device_t; + +#define DT_DEVICE_START(_name, _namestr, _class) \ +static const struct device_desc __dev_desc_##_name __used \ +__section(".dev.info") = { \ + .name = _namestr, \ + .class = _class, \ + +#define DT_DEVICE_END \ +}; + +#endif /* __ASM_PPC_DEVICE_H__ */ diff --git a/xen/arch/ppc/include/asm/div64.h b/xen/arch/ppc/include/asm/div64.h new file mode 100644 index 0000000000..6959c3fb26 --- /dev/null +++ b/xen/arch/ppc/include/asm/div64.h @@ -0,0 +1,14 @@ +#ifndef __ASM_PPC_DIV64_H__ +#define __ASM_PPC_DIV64_H__ + +#include <xen/types.h> + +#define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ +}) + +#endif /* __ASM_PPC_DIV64_H__ */ diff --git a/xen/arch/ppc/include/asm/domain.h b/xen/arch/ppc/include/asm/domain.h new file mode 100644 index 0000000000..4ade3d484e --- /dev/null +++ b/xen/arch/ppc/include/asm/domain.h @@ -0,0 +1,46 @@ +#ifndef __ASM_PPC_DOMAIN_H__ +#define __ASM_PPC_DOMAIN_H__ + +#include <xen/xmalloc.h> +#include <public/hvm/params.h> + +struct hvm_domain +{ + uint64_t params[HVM_NR_PARAMS]; +}; + +#define is_domain_direct_mapped(d) ((void)(d), 0) + +/* TODO: Implement */ +#define guest_mode(r) ({ (void) (r); BUG(); 0; }) + +struct arch_vcpu_io { +}; + +struct arch_vcpu { +}; + +struct arch_domain { + struct hvm_domain hvm; +}; + +#include <xen/sched.h> + +static inline struct vcpu_guest_context *alloc_vcpu_guest_context(void) +{ + return xmalloc(struct vcpu_guest_context); +} + +static inline void free_vcpu_guest_context(struct vcpu_guest_context *vgc) +{ + xfree(vgc); +} + +struct guest_memory_policy {}; +static inline void update_guest_memory_policy(struct vcpu *v, + struct guest_memory_policy *gmp) +{} + +static inline void arch_vcpu_block(struct vcpu *v) {} + +#endif /* __ASM_PPC_DOMAIN_H__ */ diff --git a/xen/arch/ppc/include/asm/event.h b/xen/arch/ppc/include/asm/event.h new file mode 100644 index 0000000000..3141127f31 --- /dev/null +++ b/xen/arch/ppc/include/asm/event.h @@ -0,0 +1,35 @@ +#ifndef __ASM_PPC_EVENT_H__ +#define __ASM_PPC_EVENT_H__ + +#include <xen/lib.h> + +/* TODO: implement */ +static inline void vcpu_kick(struct vcpu *v) { BUG(); } +static inline void vcpu_mark_events_pending(struct vcpu *v) { BUG(); } +static inline void vcpu_update_evtchn_irq(struct vcpu *v) { BUG(); } +static inline void vcpu_block_unless_event_pending(struct vcpu *v) { BUG(); } + +static inline int vcpu_event_delivery_is_enabled(struct vcpu *v) +{ + BUG(); + return 0; +} + +/* No arch specific virq definition now. Default to global. */ +static inline bool arch_virq_is_global(unsigned int virq) +{ + return true; +} + +static inline int local_events_need_delivery(void) +{ + BUG(); + return 0; +} + +static inline void local_event_delivery_enable(void) +{ + BUG(); +} + +#endif /* __ASM_PPC_EVENT_H__ */ diff --git a/xen/arch/ppc/include/asm/flushtlb.h b/xen/arch/ppc/include/asm/flushtlb.h new file mode 100644 index 0000000000..1af3bd2301 --- /dev/null +++ b/xen/arch/ppc/include/asm/flushtlb.h @@ -0,0 +1,23 @@ +#ifndef __ASM_PPC_FLUSHTLB_H__ +#define __ASM_PPC_FLUSHTLB_H__ + +#include <xen/cpumask.h> + +/* + * Filter the given set of CPUs, removing those that definitely flushed their + * TLB since @page_timestamp. + */ +/* XXX lazy implementation just doesn't clear anything.... */ +static inline void tlbflush_filter(cpumask_t *mask, uint32_t page_timestamp) {} + +#define tlbflush_current_time() (0) + +static inline void page_set_tlbflush_timestamp(struct page_info *page) +{ + page->tlbflush_timestamp = tlbflush_current_time(); +} + +/* Flush specified CPUs' TLBs */ +void arch_flush_tlb_mask(const cpumask_t *mask); + +#endif /* __ASM_PPC_FLUSHTLB_H__ */ diff --git a/xen/arch/ppc/include/asm/grant_table.h b/xen/arch/ppc/include/asm/grant_table.h new file mode 100644 index 0000000000..e69de29bb2 diff --git a/xen/arch/ppc/include/asm/guest_access.h b/xen/arch/ppc/include/asm/guest_access.h new file mode 100644 index 0000000000..1919e0566f --- /dev/null +++ b/xen/arch/ppc/include/asm/guest_access.h @@ -0,0 +1,54 @@ +#ifndef __ASM_PPC_GUEST_ACCESS_H__ +#define __ASM_PPC_GUEST_ACCESS_H__ + +#include <xen/mm.h> + +/* TODO */ + +static inline unsigned long raw_copy_to_guest(void *to, const void *from, unsigned int len) +{ + BUG(); +} +static inline unsigned long raw_copy_to_guest_flush_dcache(void *to, const void *from, + unsigned int len) +{ + BUG(); +} +static inline unsigned long raw_copy_from_guest(void *to, const void *from, unsigned int len) +{ + BUG(); +} +static inline unsigned long raw_clear_guest(void *to, unsigned int len) +{ + BUG(); +} + +/* Copy data to guest physical address, then clean the region. */ +static inline unsigned long copy_to_guest_phys_flush_dcache(struct domain *d, + paddr_t gpa, + void *buf, + unsigned int len) +{ + BUG(); +} + +static inline int access_guest_memory_by_gpa(struct domain *d, paddr_t gpa, void *buf, + uint32_t size, bool is_write) +{ + BUG(); +} + + +#define __raw_copy_to_guest raw_copy_to_guest +#define __raw_copy_from_guest raw_copy_from_guest +#define __raw_clear_guest raw_clear_guest + +/* + * Pre-validate a guest handle. + * Allows use of faster __copy_* functions. + */ +/* All PPC guests are paging mode external and hence safe */ +#define guest_handle_okay(hnd, nr) (1) +#define guest_handle_subrange_okay(hnd, first, last) (1) + +#endif /* __ASM_PPC_GUEST_ACCESS_H__ */ diff --git a/xen/arch/ppc/include/asm/guest_atomics.h b/xen/arch/ppc/include/asm/guest_atomics.h new file mode 100644 index 0000000000..bf4f802a15 --- /dev/null +++ b/xen/arch/ppc/include/asm/guest_atomics.h @@ -0,0 +1,14 @@ +#ifndef __ASM_PPC_GUEST_ATOMICS_H__ +#define __ASM_PPC_GUEST_ATOMICS_H__ + +#include <xen/lib.h> + +/* TODO: implement */ +#define guest_test_bit(d, nr, p) ({ (void) (d); (void) (nr); (void) (p); BUG(); false; }) +#define guest_clear_bit(d, nr, p) ({ (void) (d); (void) (nr); (void) (p); BUG(); false; }) +#define guest_set_bit(d, nr, p) ({ (void) (d); (void) (nr); (void) (p); BUG(); false; }) +#define guest_test_and_set_bit(d, nr, p) ({ (void) (d); (void) (nr); (void) (p); BUG(); false; }) +#define guest_test_and_clear_bit(d, nr, p) ({ (void) (d); (void) (nr); (void) (p); BUG(); false; }) +#define guest_test_and_change_bit(d, nr, p) ({ (void) (d); (void) (nr); (void) (p); BUG(); false; }) + +#endif /* __ASM_PPC_GUEST_ATOMICS_H__ */ diff --git a/xen/arch/ppc/include/asm/hardirq.h b/xen/arch/ppc/include/asm/hardirq.h new file mode 100644 index 0000000000..51ef290961 --- /dev/null +++ b/xen/arch/ppc/include/asm/hardirq.h @@ -0,0 +1,18 @@ +#ifndef __ASM_PPC_HARDIRQ_H__ +#define __ASM_PPC_HARDIRQ_H__ + +#include <xen/cache.h> + +typedef struct { + unsigned long __softirq_pending; + unsigned int __local_irq_count; +} __cacheline_aligned irq_cpustat_t; + +#include <xen/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ + +#define in_irq() (local_irq_count(smp_processor_id()) != 0) + +#define irq_enter() (local_irq_count(smp_processor_id())++) +#define irq_exit() (local_irq_count(smp_processor_id())--) + +#endif /* __ASM_PPC_HARDIRQ_H__ */ diff --git a/xen/arch/ppc/include/asm/hypercall.h b/xen/arch/ppc/include/asm/hypercall.h new file mode 100644 index 0000000000..e69de29bb2 diff --git a/xen/arch/ppc/include/asm/io.h b/xen/arch/ppc/include/asm/io.h new file mode 100644 index 0000000000..f8f3bd6ff7 --- /dev/null +++ b/xen/arch/ppc/include/asm/io.h @@ -0,0 +1,15 @@ +#ifndef __ASM_PPC_IO_H__ +#define __ASM_PPC_IO_H__ + +#include <xen/lib.h> + +/* TODO */ +#define readb(c) ({ (void)(c); BUG(); 0; }) +#define readw(c) ({ (void)(c); BUG(); 0; }) +#define readl(c) ({ (void)(c); BUG(); 0; }) + +#define writeb(v,c) ({ (void)(v); (void)(c); BUG(); }) +#define writew(v,c) ({ (void)(v); (void)(c); BUG(); }) +#define writel(v,c) ({ (void)(v); (void)(c); BUG(); }) + +#endif /* __ASM_PPC_IO_H__ */ diff --git a/xen/arch/ppc/include/asm/iocap.h b/xen/arch/ppc/include/asm/iocap.h new file mode 100644 index 0000000000..16ae0cf1c8 --- /dev/null +++ b/xen/arch/ppc/include/asm/iocap.h @@ -0,0 +1,7 @@ +#ifndef __ASM_PPC_IOCAP_H__ +#define __ASM_PPC_IOCAP_H__ + +#define cache_flush_permitted(d) \ + (!rangeset_is_empty((d)->iomem_caps)) + +#endif /* __ASM_PPC_IOCAP_H__ */ diff --git a/xen/arch/ppc/include/asm/iommu.h b/xen/arch/ppc/include/asm/iommu.h new file mode 100644 index 0000000000..fb1a381518 --- /dev/null +++ b/xen/arch/ppc/include/asm/iommu.h @@ -0,0 +1,7 @@ +#ifndef __ASM_PPC_IOMMU_H__ +#define __ASM_PPC_IOMMU_H__ + +struct arch_iommu { +}; + +#endif /* __ASM_PPC_IOMMU_H__ */ diff --git a/xen/arch/ppc/include/asm/irq.h b/xen/arch/ppc/include/asm/irq.h new file mode 100644 index 0000000000..99d30dd2bf --- /dev/null +++ b/xen/arch/ppc/include/asm/irq.h @@ -0,0 +1,32 @@ +#ifndef __ASM_PPC_IRQ_H__ +#define __ASM_PPC_IRQ_H__ + +#include <xen/lib.h> +#include <xen/device_tree.h> +#include <public/device_tree_defs.h> + +/* TODO */ +#define nr_irqs 0U +#define nr_static_irqs 0 +#define arch_hwdom_irqs(domid) 0U + +#define domain_pirq_to_irq(d, pirq) (pirq) + +struct arch_pirq { +}; + +struct arch_irq_desc { + unsigned int type; +}; + +static inline void arch_move_irqs(struct vcpu *v) +{ + BUG(); +} + +static inline int platform_get_irq(const struct dt_device_node *device, int index) +{ + BUG(); +} + +#endif /* __ASM_PPC_IRQ_H__ */ diff --git a/xen/arch/ppc/include/asm/mem_access.h b/xen/arch/ppc/include/asm/mem_access.h new file mode 100644 index 0000000000..e69de29bb2 diff --git a/xen/arch/ppc/include/asm/mm.h b/xen/arch/ppc/include/asm/mm.h index 36e44a4356..86e5cbf609 100644 --- a/xen/arch/ppc/include/asm/mm.h +++ b/xen/arch/ppc/include/asm/mm.h @@ -1,19 +1,270 @@ #ifndef _ASM_PPC_MM_H #define _ASM_PPC_MM_H +#include <public/xen.h> +#include <xen/pdx.h> +#include <xen/types.h> #include <asm/config.h> #include <asm/page-bits.h> +void setup_initial_pagetables(void); + +extern unsigned long total_pages; + #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT) #define paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT)) +#define paddr_to_pdx(pa) mfn_to_pdx(maddr_to_mfn(pa)) +#define gfn_to_gaddr(gfn) pfn_to_paddr(gfn_x(gfn)) +#define gaddr_to_gfn(ga) _gfn(paddr_to_pfn(ga)) +#define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn)) +#define maddr_to_mfn(ma) _mfn(paddr_to_pfn(ma)) +#define vmap_to_mfn(va) maddr_to_mfn(virt_to_maddr((vaddr_t)va)) +#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va)) #define virt_to_maddr(va) ((paddr_t)((vaddr_t)(va) & PADDR_MASK)) -#define maddr_to_virt(pa) ((void *)((paddr_t)(pa) | XEN_VIRT_START) +#define maddr_to_virt(pa) ((void *)((paddr_t)(pa) | XEN_VIRT_START)) /* Convert between Xen-heap virtual addresses and machine addresses. */ #define __pa(x) (virt_to_maddr(x)) #define __va(x) (maddr_to_virt(x)) -void setup_initial_pagetables(void); +/* Convert between Xen-heap virtual addresses and machine frame numbers. */ +#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT) +#define __mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT)) + +/* Convert between Xen-heap virtual addresses and page-info structures. */ +static inline struct page_info *virt_to_page(const void *v) +{ + BUG(); + return NULL; +} + +/* + * We define non-underscored wrappers for above conversion functions. + * These are overriden in various source files while underscored version + * remain intact. + */ +#define virt_to_mfn(va) __virt_to_mfn(va) +#define mfn_to_virt(mfn) __mfn_to_virt(mfn) + +#define PG_shift(idx) (BITS_PER_LONG - (idx)) +#define PG_mask(x, idx) (x ## UL << PG_shift(idx)) + +#define PGT_none PG_mask(0, 1) /* no special uses of this page */ +#define PGT_writable_page PG_mask(1, 1) /* has writable mappings? */ +#define PGT_type_mask PG_mask(1, 1) /* Bits 31 or 63. */ + + /* 2-bit count of uses of this frame as its current type. */ +#define PGT_count_mask PG_mask(3, 3) + +/* Cleared when the owning guest 'frees' this page. */ +#define _PGC_allocated PG_shift(1) +#define PGC_allocated PG_mask(1, 1) +/* Page is Xen heap? */ +#define _PGC_xen_heap PG_shift(2) +#define PGC_xen_heap PG_mask(1, 2) +/* Page is static memory */ +#define PGC_static 0 +/* Page is broken? */ +#define _PGC_broken PG_shift(7) +#define PGC_broken PG_mask(1, 7) + /* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */ +#define PGC_state PG_mask(3, 9) +#define PGC_state_inuse PG_mask(0, 9) +#define PGC_state_offlining PG_mask(1, 9) +#define PGC_state_offlined PG_mask(2, 9) +#define PGC_state_free PG_mask(3, 9) +#define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st) +/* Page is not reference counted */ +#define _PGC_extra PG_shift(10) +#define PGC_extra PG_mask(1, 10) + +/* Count of references to this frame. */ +#define PGC_count_width PG_shift(10) +#define PGC_count_mask ((1UL<<PGC_count_width)-1) + +/* + * Page needs to be scrubbed. Since this bit can only be set on a page that is + * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit. + */ +#define _PGC_need_scrub _PGC_allocated +#define PGC_need_scrub PGC_allocated + +#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap) +#define is_xen_heap_mfn(mfn) \ + (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn))) + +#define is_xen_fixed_mfn(mfn) \ + ((mfn_to_maddr(mfn) >= virt_to_maddr(&_start)) && \ + (mfn_to_maddr(mfn) <= virt_to_maddr((vaddr_t)_end - 1))) + +#define page_get_owner(_p) (_p)->v.inuse.domain +#define page_set_owner(_p,_d) ((_p)->v.inuse.domain = (_d)) + +/* TODO: implement */ +#define mfn_valid(mfn) ({ (void) (mfn); 0; }) +#define max_page ((unsigned long )0) + +#define mfn_to_gfn(d, mfn) ((void)(d), _gfn(mfn_x(mfn))) + +#define domain_set_alloc_bitsize(d) ((void)0) +#define domain_clamp_alloc_bitsize(d, b) (b) + +#define PFN_ORDER(_pfn) ((_pfn)->v.free.order) + +struct page_info +{ + /* Each frame can be threaded onto a doubly-linked list. */ + struct page_list_entry list; + + /* Reference count and various PGC_xxx flags and fields. */ + unsigned long count_info; + + /* Context-dependent fields follow... */ + union { + /* Page is in use: ((count_info & PGC_count_mask) != 0). */ + struct { + /* Type reference count and various PGT_xxx flags and fields. */ + unsigned long type_info; + } inuse; + /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */ + union { + struct { + /* + * Index of the first *possibly* unscrubbed page in the buddy. + * One more bit than maximum possible order to accommodate + * INVALID_DIRTY_IDX. + */ +#define INVALID_DIRTY_IDX ((1UL << (MAX_ORDER + 1)) - 1) + unsigned long first_dirty:MAX_ORDER + 1; + + /* Do TLBs need flushing for safety before next page use? */ + bool need_tlbflush:1; + +#define BUDDY_NOT_SCRUBBING 0 +#define BUDDY_SCRUBBING 1 +#define BUDDY_SCRUB_ABORT 2 + unsigned long scrub_state:2; + }; + + unsigned long val; + } free; + + } u; + + union { + /* Page is in use, but not as a shadow. */ + struct { + /* Owner of this page (zero if page is anonymous). */ + struct domain *domain; + } inuse; + + /* Page is on a free list. */ + struct { + /* Order-size of the free chunk this page is the head of. */ + unsigned int order; + } free; + + } v; + + union { + /* + * Timestamp from 'TLB clock', used to avoid extra safety flushes. + * Only valid for: a) free pages, and b) pages with zero type count + */ + u32 tlbflush_timestamp; + }; + u64 pad; +}; + + +#define FRAMETABLE_VIRT_START (XEN_VIRT_START + GB(32)) +#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START) + +/* PDX of the first page in the frame table. */ +extern unsigned long frametable_base_pdx; + +/* Convert between machine frame numbers and page-info structures. */ +#define mfn_to_page(mfn) \ + (frame_table + (mfn_to_pdx(mfn) - frametable_base_pdx)) +#define page_to_mfn(pg) \ + pdx_to_mfn((unsigned long)((pg) - frame_table) + frametable_base_pdx) + +static inline void *page_to_virt(const struct page_info *pg) +{ + return mfn_to_virt(mfn_x(page_to_mfn(pg))); +} + +/* + * Common code requires get_page_type and put_page_type. + * We don't care about typecounts so we just do the minimum to make it + * happy. + */ +static inline int get_page_type(struct page_info *page, unsigned long type) +{ + return 1; +} + +static inline void put_page_type(struct page_info *page) +{ + return; +} + +/* TODO */ +static inline bool get_page_nr(struct page_info *page, const struct domain *domain, + unsigned long nr) +{ + BUG(); +} +static inline void put_page_nr(struct page_info *page, unsigned long nr) +{ + BUG(); +} + +static inline void put_page_and_type(struct page_info *page) +{ + put_page_type(page); + put_page(page); +} + +/* + * PPC does not have an M2P, but common code expects a handful of + * M2P-related defines and functions. Provide dummy versions of these. + */ +#define INVALID_M2P_ENTRY (~0UL) +#define SHARED_M2P_ENTRY (~0UL - 1UL) +#define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY) + +/* Xen always owns P2M on PPC */ +#define set_gpfn_from_mfn(mfn, pfn) do { (void) (mfn), (void)(pfn); } while (0) +#define mfn_to_gfn(d, mfn) ((void)(d), _gfn(mfn_x(mfn))) + +#define PDX_GROUP_SHIFT (16 + 5) + +static inline unsigned long domain_get_maximum_gpfn(struct domain *d) +{ + BUG(); + return 0; +} + +static inline long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) +{ + BUG(); + return 0; +} + +static inline unsigned int arch_get_dma_bitsize(void) +{ + return 32; /* TODO */ +} + +/* + * On PPC, all the RAM is currently direct mapped in Xen. + * Hence return always true. + */ +static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr) +{ + return true; +} #endif /* _ASM_PPC_MM_H */ diff --git a/xen/arch/ppc/include/asm/monitor.h b/xen/arch/ppc/include/asm/monitor.h new file mode 100644 index 0000000000..f810a367c7 --- /dev/null +++ b/xen/arch/ppc/include/asm/monitor.h @@ -0,0 +1,48 @@ +/* Derived from xen/arch/arm/include/asm/monitor.h */ +#ifndef __ASM_PPC_MONITOR_H__ +#define __ASM_PPC_MONITOR_H__ + +#include <public/domctl.h> + +#include <xen/sched.h> +#include <public/domctl.h> + +static inline +void arch_monitor_allow_userspace(struct domain *d, bool allow_userspace) +{ +} + +static inline +int arch_monitor_domctl_op(struct domain *d, struct xen_domctl_monitor_op *mop) +{ + /* No arch-specific monitor ops on PPC. */ + return -EOPNOTSUPP; +} + +int arch_monitor_domctl_event(struct domain *d, + struct xen_domctl_monitor_op *mop); + +static inline +int arch_monitor_init_domain(struct domain *d) +{ + /* No arch-specific domain initialization on PPC. */ + return 0; +} + +static inline +void arch_monitor_cleanup_domain(struct domain *d) +{ + /* No arch-specific domain cleanup on PPC. */ +} + +static inline uint32_t arch_monitor_get_capabilities(struct domain *d) +{ + uint32_t capabilities = 0; + + capabilities = (1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST | + 1U << XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL); + + return capabilities; +} + +#endif /* __ASM_PPC_MONITOR_H__ */ diff --git a/xen/arch/ppc/include/asm/nospec.h b/xen/arch/ppc/include/asm/nospec.h new file mode 100644 index 0000000000..4d8ec923e9 --- /dev/null +++ b/xen/arch/ppc/include/asm/nospec.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * From arch/arm/include/asm/nospec.h. + * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + */ +#ifndef __ASM_PPC_NOSPEC_H__ +#define __ASM_PPC_NOSPEC_H__ + +static inline bool evaluate_nospec(bool condition) +{ + return condition; +} + +static inline void block_speculation(void) +{ +} + +#endif /* __ASM_PPC_NOSPEC_H__ */ diff --git a/xen/arch/ppc/include/asm/numa.h b/xen/arch/ppc/include/asm/numa.h new file mode 100644 index 0000000000..d857bba2ba --- /dev/null +++ b/xen/arch/ppc/include/asm/numa.h @@ -0,0 +1,26 @@ +#ifndef __ASM_PPC_NUMA_H__ +#define __ASM_PPC_NUMA_H__ + +#include <xen/types.h> +#include <xen/mm.h> + +typedef uint8_t nodeid_t; + +/* Fake one node for now. See also node_online_map. */ +#define cpu_to_node(cpu) 0 +#define node_to_cpumask(node) (cpu_online_map) + +/* + * TODO: make first_valid_mfn static when NUMA is supported on Arm, this + * is required because the dummy helpers are using it. + */ +extern mfn_t first_valid_mfn; + +/* XXX: implement NUMA support */ +#define node_spanned_pages(nid) (max_page - mfn_x(first_valid_mfn)) +#define node_start_pfn(nid) (mfn_x(first_valid_mfn)) +#define __node_distance(a, b) (20) + +#define arch_want_default_dmazone() (false) + +#endif /* __ASM_PPC_NUMA_H__ */ diff --git a/xen/arch/ppc/include/asm/p2m.h b/xen/arch/ppc/include/asm/p2m.h new file mode 100644 index 0000000000..851e9f011a --- /dev/null +++ b/xen/arch/ppc/include/asm/p2m.h @@ -0,0 +1,105 @@ +#ifndef __ASM_PPC_P2M_H__ +#define __ASM_PPC_P2M_H__ + +#include <asm/page-bits.h> + +#define paddr_bits PADDR_BITS + +/* + * List of possible type for each page in the p2m entry. + * The number of available bit per page in the pte for this purpose is 4 bits. + * So it's possible to only have 16 fields. If we run out of value in the + * future, it's possible to use higher value for pseudo-type and don't store + * them in the p2m entry. + */ +typedef enum { + p2m_invalid = 0, /* Nothing mapped here */ + p2m_ram_rw, /* Normal read/write guest RAM */ + p2m_ram_ro, /* Read-only; writes are silently dropped */ + p2m_mmio_direct_dev,/* Read/write mapping of genuine Device MMIO area */ + p2m_mmio_direct_nc, /* Read/write mapping of genuine MMIO area non-cacheable */ + p2m_mmio_direct_c, /* Read/write mapping of genuine MMIO area cacheable */ + p2m_map_foreign_rw, /* Read/write RAM pages from foreign domain */ + p2m_map_foreign_ro, /* Read-only RAM pages from foreign domain */ + p2m_grant_map_rw, /* Read/write grant mapping */ + p2m_grant_map_ro, /* Read-only grant mapping */ + /* The types below are only used to decide the page attribute in the P2M */ + p2m_iommu_map_rw, /* Read/write iommu mapping */ + p2m_iommu_map_ro, /* Read-only iommu mapping */ + p2m_max_real_type, /* Types after this won't be store in the p2m */ +} p2m_type_t; + +#include <xen/p2m-common.h> + +static inline int get_page_and_type(struct page_info *page, + struct domain *domain, + unsigned long type) +{ + BUG(); + return 1; +} + +/* Look up a GFN and take a reference count on the backing page. */ +typedef unsigned int p2m_query_t; +#define P2M_ALLOC (1u<<0) /* Populate PoD and paged-out entries */ +#define P2M_UNSHARE (1u<<1) /* Break CoW sharing */ + +static inline struct page_info *get_page_from_gfn( + struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q) +{ + BUG(); + return NULL; +} + +static inline void memory_type_changed(struct domain *d) +{ + BUG(); +} + + +static inline int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn, + unsigned int order) +{ + BUG(); + return 1; +} + +static inline int guest_physmap_add_entry(struct domain *d, + gfn_t gfn, + mfn_t mfn, + unsigned long page_order, + p2m_type_t t) +{ + BUG(); + return 1; +} + +/* Untyped version for RAM only, for compatibility */ +static inline int __must_check +guest_physmap_add_page(struct domain *d, gfn_t gfn, mfn_t mfn, + unsigned int page_order) +{ + return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw); +} + +static inline mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn) +{ + BUG(); + return _mfn(0); +} + +static inline bool arch_acquire_resource_check(struct domain *d) +{ + /* + * The reference counting of foreign entries in set_foreign_p2m_entry() + * is supported on PPC. + */ + return true; +} + +static inline void p2m_altp2m_check(struct vcpu *v, uint16_t idx) +{ + /* Not supported on PPC. */ +} + +#endif /* __ASM_PPC_P2M_H__ */ diff --git a/xen/arch/ppc/include/asm/page.h b/xen/arch/ppc/include/asm/page.h index e58b0a7354..713630c393 100644 --- a/xen/arch/ppc/include/asm/page.h +++ b/xen/arch/ppc/include/asm/page.h @@ -39,6 +39,9 @@ #define PTE_XEN_RO (PTE_XEN_BASE | PTE_EAA_READ) #define PTE_XEN_RX (PTE_XEN_BASE | PTE_EAA_READ | PTE_EAA_EXECUTE) +/* TODO */ +#define PAGE_HYPERVISOR 0 + /* * Radix Tree layout for 64KB pages: * @@ -175,4 +178,20 @@ struct prtb_entry { void tlbie_all(void); +static inline void invalidate_icache(void) +{ + BUG(); +} + +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) +#define copy_page(dp, sp) memcpy(dp, sp, PAGE_SIZE) + +/* TODO: Flush the dcache for an entire page. */ +static inline void flush_page_to_ram(unsigned long mfn, bool sync_icache) +{ + BUG(); +} + +#define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) + #endif /* _ASM_PPC_PAGE_H */ diff --git a/xen/arch/ppc/include/asm/paging.h b/xen/arch/ppc/include/asm/paging.h new file mode 100644 index 0000000000..eccacece29 --- /dev/null +++ b/xen/arch/ppc/include/asm/paging.h @@ -0,0 +1,7 @@ +#ifndef __ASM_PPC_PAGING_H__ +#define __ASM_PPC_PAGING_H__ + +#define paging_mode_translate(d) (1) +#define paging_mode_external(d) (1) + +#endif /* __ASM_PPC_PAGING_H__ */ diff --git a/xen/arch/ppc/include/asm/pci.h b/xen/arch/ppc/include/asm/pci.h new file mode 100644 index 0000000000..e76c8e5475 --- /dev/null +++ b/xen/arch/ppc/include/asm/pci.h @@ -0,0 +1,7 @@ +#ifndef __ASM_PPC_PCI_H__ +#define __ASM_PPC_PCI_H__ + +struct arch_pci_dev { +}; + +#endif /* __ASM_PPC_PCI_H__ */ diff --git a/xen/arch/ppc/include/asm/percpu.h b/xen/arch/ppc/include/asm/percpu.h new file mode 100644 index 0000000000..faae72ee41 --- /dev/null +++ b/xen/arch/ppc/include/asm/percpu.h @@ -0,0 +1,26 @@ +#ifndef __PPC_PERCPU_H__ +#define __PPC_PERCPU_H__ + +#ifndef __ASSEMBLY__ + +#include <xen/types.h> + +extern char __per_cpu_start[], __per_cpu_data_end[]; +extern unsigned long __per_cpu_offset[NR_CPUS]; +void percpu_init_areas(void); + +#define smp_processor_id() 0 /* TODO: Fix this */ + +#define per_cpu(var, cpu) \ + (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) +#define this_cpu(var) \ + (*RELOC_HIDE(&per_cpu__##var, smp_processor_id())) + +#define per_cpu_ptr(var, cpu) \ + (*RELOC_HIDE(var, __per_cpu_offset[cpu])) +#define this_cpu_ptr(var) \ + (*RELOC_HIDE(var, smp_processor_id())) + +#endif + +#endif /* __PPC_PERCPU_H__ */ diff --git a/xen/arch/ppc/include/asm/procarea.h b/xen/arch/ppc/include/asm/procarea.h new file mode 100644 index 0000000000..55a358f1e4 --- /dev/null +++ b/xen/arch/ppc/include/asm/procarea.h @@ -0,0 +1,38 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright (C) IBM Corp. 2005 + * + * Authors: Hollis Blanchard <hollisb@xxxxxxxxxx> + */ + +#ifndef _ASM_PROCAREA_H_ +#define _ASM_PROCAREA_H_ + +#include <xen/types.h> + +struct vcpu; +struct gdb_state; + +struct processor_area +{ + unsigned int whoami; + unsigned int hard_id; + struct vcpu *cur_vcpu; + void *hyp_stack_base; + unsigned long saved_regs[2]; +}; + +#endif diff --git a/xen/arch/ppc/include/asm/processor.h b/xen/arch/ppc/include/asm/processor.h index 417288738c..695f98af5b 100644 --- a/xen/arch/ppc/include/asm/processor.h +++ b/xen/arch/ppc/include/asm/processor.h @@ -110,6 +110,10 @@ /* Macro to adjust thread priority for hardware multithreading */ #define HMT_very_low() asm volatile ( "or %r31, %r31, %r31" ) +/* TODO: This isn't correct */ +#define cpu_to_core(_cpu) (0) +#define cpu_to_socket(_cpu) (0) + /* * User-accessible registers: most of these need to be saved/restored * for every nested Xen invocation. @@ -175,6 +179,8 @@ static inline void noreturn die(void) HMT_very_low(); } +#define cpu_relax() asm volatile ( "or %r1, %r1, %r1; or %r2, %r2, %r2" ) + #endif /* __ASSEMBLY__ */ #endif /* _ASM_PPC_PROCESSOR_H */ diff --git a/xen/arch/ppc/include/asm/random.h b/xen/arch/ppc/include/asm/random.h new file mode 100644 index 0000000000..2f9e9bbae4 --- /dev/null +++ b/xen/arch/ppc/include/asm/random.h @@ -0,0 +1,9 @@ +#ifndef __ASM_PPC_RANDOM_H__ +#define __ASM_PPC_RANDOM_H__ + +static inline unsigned int arch_get_random(void) +{ + return 0; +} + +#endif /* __ASM_PPC_RANDOM_H__ */ diff --git a/xen/arch/ppc/include/asm/regs.h b/xen/arch/ppc/include/asm/regs.h index d5f47a6ff1..68135ca584 100644 --- a/xen/arch/ppc/include/asm/regs.h +++ b/xen/arch/ppc/include/asm/regs.h @@ -23,6 +23,8 @@ #ifndef _ASM_REG_DEFS_H_ #define _ASM_REG_DEFS_H_ +#include <xen/types.h> + /* Special Purpose Registers */ #define SPRN_VRSAVE 256 #define SPRN_DSISR 18 diff --git a/xen/arch/ppc/include/asm/setup.h b/xen/arch/ppc/include/asm/setup.h new file mode 100644 index 0000000000..e4f64879b6 --- /dev/null +++ b/xen/arch/ppc/include/asm/setup.h @@ -0,0 +1,6 @@ +#ifndef __ASM_PPC_SETUP_H__ +#define __ASM_PPC_SETUP_H__ + +#define max_init_domid (0) + +#endif /* __ASM_PPC_SETUP_H__ */ diff --git a/xen/arch/ppc/include/asm/smp.h b/xen/arch/ppc/include/asm/smp.h new file mode 100644 index 0000000000..eca43f0e6c --- /dev/null +++ b/xen/arch/ppc/include/asm/smp.h @@ -0,0 +1,18 @@ +#ifndef __ASM_SMP_H +#define __ASM_SMP_H + +#include <xen/cpumask.h> +#include <xen/percpu.h> + +DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_mask); +DECLARE_PER_CPU(cpumask_var_t, cpu_core_mask); + +#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) + +/* + * Do we, for platform reasons, need to actually keep CPUs online when we + * would otherwise prefer them to be off? + */ +#define park_offline_cpus false + +#endif diff --git a/xen/arch/ppc/include/asm/softirq.h b/xen/arch/ppc/include/asm/softirq.h new file mode 100644 index 0000000000..a0b28a5e51 --- /dev/null +++ b/xen/arch/ppc/include/asm/softirq.h @@ -0,0 +1,8 @@ +#ifndef __ASM_PPC_SOFTIRQ_H__ +#define __ASM_PPC_SOFTIRQ_H__ + +#define NR_ARCH_SOFTIRQS 0 + +#define arch_skip_send_event_check(cpu) 0 + +#endif /* __ASM_PPC_SOFTIRQ_H__ */ diff --git a/xen/arch/ppc/include/asm/spinlock.h b/xen/arch/ppc/include/asm/spinlock.h new file mode 100644 index 0000000000..4bdb4b1e98 --- /dev/null +++ b/xen/arch/ppc/include/asm/spinlock.h @@ -0,0 +1,15 @@ +#ifndef __ASM_SPINLOCK_H +#define __ASM_SPINLOCK_H + +#define arch_lock_acquire_barrier() smp_mb() +#define arch_lock_release_barrier() smp_mb() + +#define arch_lock_relax() cpu_relax() +#define arch_lock_signal() +#define arch_lock_signal_wmb() \ +({ \ + smp_wmb(); \ + arch_lock_signal(); \ +}) + +#endif /* __ASM_SPINLOCK_H */ diff --git a/xen/arch/ppc/include/asm/system.h b/xen/arch/ppc/include/asm/system.h index 94091df644..5adc321e88 100644 --- a/xen/arch/ppc/include/asm/system.h +++ b/xen/arch/ppc/include/asm/system.h @@ -1,6 +1,247 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright (C) IBM Corp. 2005 + * Copyright (C) Raptor Engineering LLC + * + * Authors: Jimi Xenidis <jimix@xxxxxxxxxxxxxx> + * Shawn Anastasio <sanastasio@xxxxxxxxxxxxxxxxxxxxx> + */ + #ifndef _ASM_SYSTEM_H_ #define _ASM_SYSTEM_H_ -#define smp_wmb() __asm__ __volatile__ ( "lwsync" : : : "memory" ) +#include <xen/config.h> +#include <xen/lib.h> +#include <asm/memory.h> +#include <asm/time.h> +#include <asm/processor.h> +#include <asm/msr.h> + +#define xchg(ptr,x) \ +({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ +}) + +#define build_xchg(fn, type, ldinsn, stinsn) \ +static inline unsigned long \ +fn(volatile type *m, unsigned long val) \ +{ \ + unsigned long dummy; \ + \ + __asm__ __volatile__( \ + PPC_ATOMIC_ENTRY_BARRIER \ +"1: " ldinsn " %0,0,%3\n" \ + stinsn " %2,0,%3\n" \ +"2: bne- 1b" \ + PPC_ATOMIC_EXIT_BARRIER \ + : "=&r" (dummy), "=m" (*m) \ + : "r" (val), "r" (m) \ + : "cc", "memory"); \ + return dummy; \ +} + +build_xchg(__xchg_u8, uint8_t, "lbarx", "stbcx.") +build_xchg(__xchg_u16, uint16_t, "lharx", "sthcx.") +build_xchg(__xchg_u32, uint32_t, "lwarx", "stwcx.") +build_xchg(__xchg_u64, uint64_t, "ldarx", "stdcx.") + +#undef build_xchg + +/* + * This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid xchg(). + */ +extern void __xchg_called_with_bad_pointer(void); + +static inline unsigned long +__xchg(volatile void *ptr, unsigned long x, int size) +{ + switch (size) { + case 1: + return __xchg_u8(ptr, x); + case 2: + return __xchg_u16(ptr, x); + case 4: + return __xchg_u32(ptr, x); + case 8: + return __xchg_u64(ptr, x); + } + __xchg_called_with_bad_pointer(); + return x; +} + + +static inline unsigned long +__cmpxchg_u32(volatile int *p, int old, int new) +{ + unsigned int prev; + + __asm__ __volatile__ ( + PPC_ATOMIC_ENTRY_BARRIER +"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ + cmpw 0,%0,%3\n\ + bne- 2f\n\ + stwcx. %4,0,%2\n\ + bne- 1b" + PPC_ATOMIC_EXIT_BARRIER + "\n\ +2:" + : "=&r" (prev), "=m" (*p) + : "r" (p), "r" (old), "r" (new), "m" (*p) + : "cc", "memory"); + + return prev; +} + +static inline unsigned long +__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new) +{ + unsigned long prev; + + __asm__ __volatile__ ( + PPC_ATOMIC_ENTRY_BARRIER +"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ + cmpd 0,%0,%3\n\ + bne- 2f\n\ + stdcx. %4,0,%2\n\ + bne- 1b" + PPC_ATOMIC_EXIT_BARRIER + "\n\ +2:" + : "=&r" (prev), "=m" (*p) + : "r" (p), "r" (old), "r" (new), "m" (*p) + : "cc", "memory"); + + return prev; +} + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid cmpxchg(). */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static always_inline unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) +{ + switch (size) { + case 2: + BUG(); return 0; /* XXX implement __cmpxchg_u16 ? */ + case 4: + return __cmpxchg_u32(ptr, old, new); + case 8: + return __cmpxchg_u64(ptr, old, new); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg_user(ptr,o,n) cmpxchg(ptr,o,n) + +#define cmpxchg(ptr,o,n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ + }) + + +/* + * Memory barrier. + * The sync instruction guarantees that all memory accesses initiated + * by this processor have been performed (with respect to all other + * mechanisms that access memory). The eieio instruction is a barrier + * providing an ordering (separately) for (a) cacheable stores and (b) + * loads and stores to non-cacheable memory (e.g. I/O devices). + * + * mb() prevents loads and stores being reordered across this point. + * rmb() prevents loads being reordered across this point. + * wmb() prevents stores being reordered across this point. + * read_barrier_depends() prevents data-dependent loads being reordered + * across this point (nop on PPC). + * + * We have to use the sync instructions for mb(), since lwsync doesn't + * order loads with respect to previous stores. Lwsync is fine for + * rmb(), though. + * For wmb(), we use sync since wmb is used in drivers to order + * stores to system memory with respect to writes to the device. + * However, smp_wmb() can be a lighter-weight eieio barrier on + * SMP since it is only used to order updates to system memory. + */ +#define mb() __asm__ __volatile__ ("sync" : : : "memory") +#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory") +#define wmb() __asm__ __volatile__ ("sync" : : : "memory") +#define read_barrier_depends() do { } while(0) + +#define set_mb(var, value) do { var = value; smp_mb(); } while (0) +#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0) + +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() __asm__ __volatile__ ("lwsync" : : : "memory") +#define smp_read_barrier_depends() read_barrier_depends() +#else +#define smp_mb() __asm__ __volatile__("": : :"memory") +#define smp_rmb() __asm__ __volatile__("": : :"memory") +#define smp_wmb() __asm__ __volatile__("": : :"memory") +#define smp_read_barrier_depends() do { } while(0) +#endif /* CONFIG_SMP */ + +#define local_save_flags(flags) ((flags) = mfmsr()) +#define local_irq_restore(flags) do { \ + __asm__ __volatile__("": : :"memory"); \ + mtmsrd((flags)); \ +} while(0) + +static inline void local_irq_disable(void) +{ + unsigned long msr; + msr = mfmsr(); + mtmsrd(msr & ~MSR_EE); + __asm__ __volatile__("" : : : "memory"); +} + +static inline void local_irq_enable(void) +{ + unsigned long msr; + __asm__ __volatile__("" : : : "memory"); + msr = mfmsr(); + mtmsrd(msr | MSR_EE); +} + +static inline void __do_save_and_cli(unsigned long *flags) +{ + unsigned long msr; + msr = mfmsr(); + *flags = msr; + mtmsrd(msr & ~MSR_EE); + __asm__ __volatile__("" : : : "memory"); +} + +#define local_irq_save(flags) __do_save_and_cli(&flags) + +static inline int local_irq_is_enabled(void) +{ + return !!(mfmsr() & MSR_EE); +} + +#define arch_fetch_and_add(x, v) __sync_fetch_and_add(x, v) #endif /* _ASM_SYSTEM_H */ diff --git a/xen/arch/ppc/include/asm/time.h b/xen/arch/ppc/include/asm/time.h new file mode 100644 index 0000000000..7872d3c15b --- /dev/null +++ b/xen/arch/ppc/include/asm/time.h @@ -0,0 +1,20 @@ +#ifndef __ASM_PPC_TIME_H__ +#define __ASM_PPC_TIME_H__ + +#include <xen/lib.h> +#include <asm/processor.h> +#include <asm/regs.h> + +struct vcpu; + +/* TODO: implement */ +static inline void force_update_vcpu_system_time(struct vcpu *v) { BUG(); } + +typedef unsigned long cycles_t; + +static inline cycles_t get_cycles(void) +{ + return mfspr(SPRN_TBRL); +} + +#endif /* __ASM_PPC_TIME_H__ */ diff --git a/xen/arch/ppc/include/asm/vm_event.h b/xen/arch/ppc/include/asm/vm_event.h new file mode 100644 index 0000000000..346653f32a --- /dev/null +++ b/xen/arch/ppc/include/asm/vm_event.h @@ -0,0 +1,49 @@ +#ifndef __ASM_PPC_VM_EVENT_H__ +#define __ASM_PPC_VM_EVENT_H__ + +#include <xen/sched.h> +#include <xen/vm_event.h> +#include <public/domctl.h> + +static inline int vm_event_init_domain(struct domain *d) +{ + /* Nothing to do. */ + return 0; +} + +static inline void vm_event_cleanup_domain(struct domain *d) +{ + memset(&d->monitor, 0, sizeof(d->monitor)); +} + +static inline void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v, + vm_event_response_t *rsp) +{ + /* Not supported on PPC. */ +} + +static inline +void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp) +{ + /* Not supported on PPC. */ +} + +static inline +void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp) +{ + /* Not supported on PPC. */ +} + +static inline +void vm_event_sync_event(struct vcpu *v, bool value) +{ + /* Not supported on PPC. */ +} + +static inline +void vm_event_reset_vmtrace(struct vcpu *v) +{ + /* Not supported on PPC. */ +} + +#endif /* __ASM_PPC_VM_EVENT_H__ */ diff --git a/xen/arch/ppc/include/asm/xenoprof.h b/xen/arch/ppc/include/asm/xenoprof.h new file mode 100644 index 0000000000..e69de29bb2 diff --git a/xen/arch/ppc/mm-radix.c b/xen/arch/ppc/mm-radix.c index 071e71b73e..399898a36d 100644 --- a/xen/arch/ppc/mm-radix.c +++ b/xen/arch/ppc/mm-radix.c @@ -1,13 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ #include <xen/init.h> #include <xen/kernel.h> +#include <xen/mm.h> #include <xen/types.h> #include <xen/lib.h> #include <asm/bitops.h> #include <asm/byteorder.h> #include <asm/early_printk.h> -#include <asm/mm.h> #include <asm/page.h> #include <asm/processor.h> #include <asm/regs.h> diff --git a/xen/arch/ppc/opal.c b/xen/arch/ppc/opal.c index 396a9e403b..1183b7d5ef 100644 --- a/xen/arch/ppc/opal.c +++ b/xen/arch/ppc/opal.c @@ -1,10 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -#include "xen/compiler.h" #include <asm/boot.h> #include <asm/early_printk.h> #include <asm/opal-api.h> #include <asm/processor.h> -#include <asm/mm.h> #include <xen/types.h> #include <xen/libfdt/libfdt.h> #include <xen/init.h> diff --git a/xen/arch/ppc/tlb-radix.c b/xen/arch/ppc/tlb-radix.c index 69934076a7..551e8280de 100644 --- a/xen/arch/ppc/tlb-radix.c +++ b/xen/arch/ppc/tlb-radix.c @@ -5,9 +5,9 @@ * * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. */ +#include <xen/bitops.h> #include <xen/stringify.h> -#include <asm/bitops.h> #include <asm/msr.h> #include <asm/processor.h> diff --git a/xen/include/public/hvm/save.h b/xen/include/public/hvm/save.h index 464ebdb0da..2cf4238daa 100644 --- a/xen/include/public/hvm/save.h +++ b/xen/include/public/hvm/save.h @@ -89,6 +89,8 @@ DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end); #include "../arch-x86/hvm/save.h" #elif defined(__arm__) || defined(__aarch64__) #include "../arch-arm/hvm/save.h" +#elif defined(__powerpc64__) +#include "../arch-ppc.h" #else #error "unsupported architecture" #endif diff --git a/xen/include/public/pmu.h b/xen/include/public/pmu.h index eb87a81e7b..5a176b6ac3 100644 --- a/xen/include/public/pmu.h +++ b/xen/include/public/pmu.h @@ -11,6 +11,8 @@ #include "arch-x86/pmu.h" #elif defined (__arm__) || defined (__aarch64__) #include "arch-arm.h" +#elif defined (__powerpc64__) +#include "arch-ppc.h" #else #error "Unsupported architecture" #endif diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h index 920567e006..b812a0a324 100644 --- a/xen/include/public/xen.h +++ b/xen/include/public/xen.h @@ -16,6 +16,8 @@ #include "arch-x86/xen.h" #elif defined(__arm__) || defined (__aarch64__) #include "arch-arm.h" +#elif defined(__powerpc64__) +#include "arch-ppc.h" #else #error "Unsupported architecture" #endif -- 2.30.2
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |