[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 1/2] make ioremap_attr() common


  • To: Jan Beulich <jbeulich@xxxxxxxx>, "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: "Orzel, Michal" <michal.orzel@xxxxxxx>
  • Date: Wed, 1 Apr 2026 18:05:30 +0200
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=suse.com smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none (0)
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=fSIazrg/cqQji1+wSWpobRCOkBtPno0U1QWTzFGCx+k=; b=KNfFUjecw5XZTlg9+72jVOs4PMCoNyLnpYGMyr3kPs549DudZ7bXmLxOuZwhz6Iy6wfHdf0b8z8rPJSLnJhbI6Owz5pbKBEjk01Cy5JNLQHu3tnx/6MXMXlalKvr9MrlByfIySdII45OtDXV1VZheIi2ZPNuRRV+CXYs3sID7LZvRSaYfFNUuZSJE/BjKoowHhBSn7/ud2oaGXoxSm1ZqOpZKlsOfF5nQrvInX1k4MTJuPr+kZskUbbihs4L9QOdVs3nu98pDtRgjy+1OlqkB3NOZK3QtkfgP3j1fCgzradfqSwhTMxfADetJ7wR6fZqdR6eYgCmNeNSP7ScK5QgzA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=QFpxjoxdC5coyM1Wqys6pK6OgG/kE5gqEoOycLn5k6rY37WWBPY4r+klJLwMT9/AOZmbnacy4OifLE4OVGHKR8JjCHRav12hzj/I7+amDZM6v/0vS9XG1Dt6TuzgC6+2zt5m24Oq/kxImQL9OPUEpyGHRYw3QJvy/j9QMzNb91E1MebNYobn5r5QWyqwO0RkK3o++XHBPVHddmLJl3aRCi5LONgnIFKCjwp9fMwgfL9ENDk+nKHcsDRHXHU6MMkBCUbswOPn7rYMqP94hkeQDIf38kFFSQx8T+8PPlxgB63IhaiAKGB6qLLiq/9x49OdbjfoKNcMNo3jRlFDRtwdwA==
  • Authentication-results: eu.smtp.expurgate.cloud; dkim=pass header.s=selector1 header.d=amd.com header.i="@amd.com" header.h="From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck"
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Anthony PERARD <anthony.perard@xxxxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Alistair Francis <alistair.francis@xxxxxxx>, Connor Davis <connojdavis@xxxxxxxxx>, Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>, Bertrand Marquis <bertrand.marquis@xxxxxxx>, Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx>, Timothy Pearson <tpearson@xxxxxxxxxxxxxxxxxxxxx>
  • Delivery-date: Wed, 01 Apr 2026 16:05:44 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>


On 19/02/2026 16:51, Jan Beulich wrote:
> This core backing function is uniform; what varies across architectures
> are the attributes passed and hence the wrappers around it. Yet of course
> extra checking or special handling may be needed per arch, so introduce a
> suitable hook. Permit such a hook to return both adjusted and un-adjusted
> (for the page offset) pointers.
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> ---
> Declarations (or inline counterparts) aren't being moved around, to avoid
> the need to touch source files using the functions. Quite possibly they
> want to consistently go into xen/io.h and asm/io.h.
> 
> Of course ioremap.c could also go into lib/.
> 
> For RISC-V the wrappers likely should become inline functions?
> 
> PPC doesn't reference any of the functions just yet, so gets only a
> declaration.
> 
> For Arm, a TODO item is deliberately retained, yet seeing the use of
> ioremap_wc() in domain building (which by itself is questionable, see next
> patch) I wonder if that's even feasible as long as we don't have
> memremap() or alike.
> 
> --- a/xen/arch/arm/include/asm/io.h
> +++ b/xen/arch/arm/include/asm/io.h
> @@ -1,6 +1,8 @@
>  #ifndef _ASM_IO_H
>  #define _ASM_IO_H
>  
> +#include <xen/mm-types.h>
> +
>  #if defined(CONFIG_ARM_32)
>  # include <asm/arm32/io.h>
>  #elif defined(CONFIG_ARM_64)
> @@ -9,6 +11,16 @@
>  # error "unknown ARM variant"
>  #endif
>  
> +#ifdef CONFIG_MPU
> +void __iomem *mpu_ioremap_attr(paddr_t start, size_t len, pte_attr_t flags);
> +#define arch_ioremap_attr mpu_ioremap_attr
> +#else
> +/*
> + * ioremap_attr() should only be used to remap device address ranges.
> + * TODO: Add an arch hook to verify this assumption.
> + */
> +#endif
> +
>  #endif
>  /*
>   * Local variables:
> --- a/xen/arch/arm/include/asm/mm.h
> +++ b/xen/arch/arm/include/asm/mm.h
> @@ -5,6 +5,7 @@
>  #include <asm/page.h>
>  #include <public/xen.h>
>  #include <xen/pdx.h>
> +#include <xen/vmap.h>
>  
>  #if defined(CONFIG_ARM_32)
>  # include <asm/arm32/mm.h>
> @@ -200,13 +201,12 @@ extern int prepare_secondary_mm(int cpu)
>  extern void setup_frametable_mappings(paddr_t ps, paddr_t pe);
>  /* Helper function to setup memory management */
>  void setup_mm_helper(void);
> -/* map a physical range in virtual memory */
> -void __iomem *ioremap_attr(paddr_t start, size_t len, unsigned int 
> attributes);
>  
>  static inline void __iomem *ioremap_nocache(paddr_t start, size_t len)
>  {
>      return ioremap_attr(start, len, PAGE_HYPERVISOR_NOCACHE);
>  }
> +#define ioremap ioremap_nocache
>  
>  static inline void __iomem *ioremap_cache(paddr_t start, size_t len)
>  {
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -455,11 +455,6 @@ unsigned long get_upper_mfn_bound(void)
>      return max_page - 1;
>  }
>  
> -void *ioremap(paddr_t pa, size_t len)
> -{
> -    return ioremap_attr(pa, len, PAGE_HYPERVISOR_NOCACHE);
> -}
> -
>  /*
>   * Local variables:
>   * mode: C
> --- a/xen/arch/arm/mmu/pt.c
> +++ b/xen/arch/arm/mmu/pt.c
> @@ -206,23 +206,6 @@ void clear_fixmap(unsigned int map)
>      BUG_ON(res != 0);
>  }
>  
> -/*
> - * This function should only be used to remap device address ranges
> - * TODO: add a check to verify this assumption
> - */
> -void *ioremap_attr(paddr_t start, size_t len, unsigned int attributes)
> -{
> -    mfn_t mfn = _mfn(PFN_DOWN(start));
> -    unsigned int offs = start & (PAGE_SIZE - 1);
> -    unsigned int nr = PFN_UP(offs + len);
> -    void *ptr = __vmap(&mfn, nr, 1, 1, attributes, VMAP_DEFAULT);
> -
> -    if ( ptr == NULL )
> -        return NULL;
> -
> -    return ptr + offs;
> -}
> -
>  static int create_xen_table(lpae_t *entry)
>  {
>      mfn_t mfn;
> --- a/xen/arch/arm/mpu/mm.c
> +++ b/xen/arch/arm/mpu/mm.c
> @@ -9,6 +9,8 @@
>  #include <xen/sizes.h>
>  #include <xen/spinlock.h>
>  #include <xen/types.h>
> +
> +#include <asm/io.h>
>  #include <asm/mpu.h>
>  #include <asm/mpu/mm.h>
>  #include <asm/page.h>
> @@ -593,7 +595,7 @@ void free_init_memory(void)
>      spin_unlock(&xen_mpumap_lock);
>  }
>  
> -void __iomem *ioremap_attr(paddr_t start, size_t len, unsigned int flags)
> +void __iomem *mpu_ioremap_attr(paddr_t start, size_t len, pte_attr_t flags)
>  {
>      paddr_t start_pg = round_pgdown(start);
>      paddr_t end_pg = round_pgup(start_pg + len);
> --- a/xen/arch/ppc/include/asm/io.h
> +++ b/xen/arch/ppc/include/asm/io.h
> @@ -13,4 +13,6 @@
>  #define writew(v,c)     ({ (void)(v); (void)(c); BUG_ON("unimplemented"); })
>  #define writel(v,c)     ({ (void)(v); (void)(c); BUG_ON("unimplemented"); })
>  
> +void __iomem *ioremap(paddr_t pa, size_t len);
> +
>  #endif /* __ASM_PPC_IO_H__ */
> --- a/xen/arch/riscv/include/asm/io.h
> +++ b/xen/arch/riscv/include/asm/io.h
> @@ -41,6 +41,7 @@
>  #include <xen/macros.h>
>  #include <xen/types.h>
>  
> +void __iomem *ioremap(paddr_t pa, size_t len);
>  void __iomem *ioremap_cache(paddr_t pa, size_t len);
>  void __iomem *ioremap_wc(paddr_t pa, size_t len);
>  
> --- a/xen/arch/riscv/mm.c
> +++ b/xen/arch/riscv/mm.c
> @@ -587,20 +587,6 @@ void *__init arch_vmap_virt_end(void)
>      return (void *)(VMAP_VIRT_START + VMAP_VIRT_SIZE);
>  }
>  
> -static void __iomem *ioremap_attr(paddr_t pa, size_t len,
> -                                  pte_attr_t attributes)
> -{
> -    mfn_t mfn = _mfn(PFN_DOWN(pa));
> -    unsigned int offs = pa & (PAGE_SIZE - 1);
> -    unsigned int nr = PFN_UP(offs + len);
> -    void *ptr = __vmap(&mfn, nr, 1, 1, attributes, VMAP_DEFAULT);
> -
> -    if ( ptr == NULL )
> -        return NULL;
> -
> -    return ptr + offs;
> -}
> -
>  void __iomem *ioremap_cache(paddr_t pa, size_t len)
>  {
>      return ioremap_attr(pa, len, PAGE_HYPERVISOR);
> --- a/xen/arch/x86/include/asm/io.h
> +++ b/xen/arch/x86/include/asm/io.h
> @@ -47,6 +47,9 @@ __OUT(b,"b",char)
>  __OUT(w,"w",short)
>  __OUT(l,,int)
>  
> +void __iomem *x86_ioremap_attr(paddr_t pa, size_t len, pte_attr_t attr);
> +#define arch_ioremap_attr x86_ioremap_attr
> +
>  /*
>   * Boolean indicator and function used to handle platform specific I/O port
>   * emulation.
> --- a/xen/arch/x86/include/asm/mm.h
> +++ b/xen/arch/x86/include/asm/mm.h
> @@ -611,7 +611,15 @@ void destroy_perdomain_mapping(struct do
>                                 unsigned int nr);
>  void free_perdomain_mappings(struct domain *d);
>  
> -void __iomem *ioremap_wc(paddr_t pa, size_t len);
> +static inline void __iomem *ioremap(paddr_t pa, size_t len)
> +{
> +    return ioremap_attr(pa, len, PAGE_HYPERVISOR_UCMINUS);
> +}
> +
> +static inline void __iomem *ioremap_wc(paddr_t pa, size_t len)
> +{
> +    return ioremap_attr(pa, len, PAGE_HYPERVISOR_WC);
> +}
>  
>  extern int memory_add(unsigned long spfn, unsigned long epfn, unsigned int 
> pxm);
>  
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -6166,41 +6166,15 @@ void *__init arch_vmap_virt_end(void)
>      return fix_to_virt(__end_of_fixed_addresses);
>  }
>  
> -void __iomem *ioremap(paddr_t pa, size_t len)
> +void __iomem *x86_ioremap_attr(paddr_t pa, size_t len, pte_attr_t attr)
>  {
> -    mfn_t mfn = _mfn(PFN_DOWN(pa));
> -    void *va;
> -
> -    WARN_ON(page_is_ram_type(mfn_x(mfn), RAM_TYPE_CONVENTIONAL));
> +    WARN_ON(page_is_ram_type(PFN_DOWN(pa), RAM_TYPE_CONVENTIONAL));
>  
>      /* The low first Mb is always mapped. */
> -    if ( !((pa + len - 1) >> 20) )
> -        va = __va(pa);
> -    else
> -    {
> -        unsigned int offs = pa & (PAGE_SIZE - 1);
> -        unsigned int nr = PFN_UP(offs + len);
> -
> -        va = __vmap(&mfn, nr, 1, 1, PAGE_HYPERVISOR_UCMINUS, VMAP_DEFAULT);
> -        if ( va )
> -            va += offs;
> -    }
> -
> -    return (void __force __iomem *)va;
> -}
> -
> -void __iomem *__init ioremap_wc(paddr_t pa, size_t len)
> -{
> -    mfn_t mfn = _mfn(PFN_DOWN(pa));
> -    unsigned int offs = pa & (PAGE_SIZE - 1);
> -    unsigned int nr = PFN_UP(offs + len);
> -    void *va;
> -
> -    WARN_ON(page_is_ram_type(mfn_x(mfn), RAM_TYPE_CONVENTIONAL));
> -
> -    va = __vmap(&mfn, nr, 1, 1, PAGE_HYPERVISOR_WC, VMAP_DEFAULT);
> +    if ( !((pa + len - 1) >> 20) && attr == PAGE_HYPERVISOR_UCMINUS )
> +        return (void __force __iomem *)__va(pa);
>  
> -    return (void __force __iomem *)(va ? va + offs : NULL);
> +    return NULL;
>  }
>  
>  int create_perdomain_mapping(struct domain *d, unsigned long va,
> --- a/xen/common/Makefile
> +++ b/xen/common/Makefile
> @@ -19,6 +19,7 @@ obj-$(CONFIG_GRANT_TABLE) += grant_table
>  obj-y += guestcopy.o
>  obj-y += gzip/
>  obj-$(CONFIG_HYPFS) += hypfs.o
> +obj-y += ioremap.o
>  obj-$(CONFIG_IOREQ_SERVER) += ioreq.o
>  obj-y += irq.o
>  obj-y += kernel.o
> --- /dev/null
> +++ b/xen/common/ioremap.c
> @@ -0,0 +1,42 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +
> +#include <xen/mm.h>
> +#include <xen/pfn.h>
> +#include <xen/vmap.h>
> +
> +#include <asm/io.h>
> +
> +void __iomem *ioremap_attr(paddr_t pa, size_t len, pte_attr_t attr)
> +{
> +    void __iomem *ptr = NULL;
> +    unsigned int offs = PAGE_OFFSET(pa);
> +
> +#ifdef arch_ioremap_attr
> +    ptr = arch_ioremap_attr(pa, len, attr);
I made an observation reviewing this patch.

TL;DR: Nothing wrong with this patch

For Arm MPU, callers now receive a sub-page-offset-adjusted pointer instead of a
page-aligned one. mpu_ioremap_attr computes end_pg = round_pgup(start_pg + len)
instead of round_pgup(start + len). Before, this was masked because the returned
pointer was page-aligned, so callers effectively started from the page base. Now
that the sub-page offset is correctly applied to the returned pointer, a caller
using a non-page-aligned start with a len that crosses a page boundary would
access memory from a different region.

I need to send a fix for MPU.

Reviewed-by: Michal Orzel <michal.orzel@xxxxxxx>

~Michal




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.