[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 1/3] xen/riscv: implement virt_to_maddr()



Implement the virt_to_maddr() function to convert virtual addresses
to machine addresses. The function includes checks for valid address
ranges, specifically the direct mapping region (DIRECTMAP_VIRT_START)
and the Xen's Linkage (XEN_VIRT_START) region. If the virtual address
falls outside of these regions, an assertion will trigger.
To implement this, the phys_offset variable is made accessible
outside of riscv/mm.c.

Signed-off-by: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
---
Changes in v3:
 - Drop ASSERT(va >= (DIRECTMAP_VIRT_START + DIRECTMAP_SIZE)) as it is enough
   to have 'if (...)' for directmap region and ASSERT() for checking that
   virtual address is from Xen's linkage virtual space.
 - Add the comment above virt_to_maddr().
 - Update the commit message.
 - Add the comment for declaration and defoition of
   phys_offset: /* = load_start - XEN_VIRT_START */.
---
Changes in V2:
  - Drop casts in virt_to_maddr() for ASSERT which checks that VA is
    in the range of where Xen is located.
  - Add UL suffix for or XEN_VIRT_START by using _AC(..., UL) and add inclusion
    of <xen/const.h>
  - Add the comment above return which explains why there is no need
    to do " - XEN_VIRT_START.
---
 xen/arch/riscv/include/asm/config.h |  4 ++++
 xen/arch/riscv/include/asm/mm.h     | 22 +++++++++++++++++++++-
 xen/arch/riscv/mm.c                 |  2 +-
 3 files changed, 26 insertions(+), 2 deletions(-)

diff --git a/xen/arch/riscv/include/asm/config.h 
b/xen/arch/riscv/include/asm/config.h
index ef68281653..ad75871283 100644
--- a/xen/arch/riscv/include/asm/config.h
+++ b/xen/arch/riscv/include/asm/config.h
@@ -155,6 +155,10 @@
 
 #define IDENT_AREA_SIZE 64
 
+#ifndef __ASSEMBLY__
+extern unsigned long phys_offset; /* = load_start - XEN_VIRT_START */
+#endif
+
 #endif /* ASM__RISCV__CONFIG_H */
 /*
  * Local variables:
diff --git a/xen/arch/riscv/include/asm/mm.h b/xen/arch/riscv/include/asm/mm.h
index 5c79f3def3..ebb142502e 100644
--- a/xen/arch/riscv/include/asm/mm.h
+++ b/xen/arch/riscv/include/asm/mm.h
@@ -5,6 +5,7 @@
 
 #include <public/xen.h>
 #include <xen/bug.h>
+#include <xen/const.h>
 #include <xen/mm-frame.h>
 #include <xen/pdx.h>
 #include <xen/types.h>
@@ -28,7 +29,26 @@ static inline void *maddr_to_virt(paddr_t ma)
     return NULL;
 }
 
-#define virt_to_maddr(va) ({ BUG_ON("unimplemented"); 0; })
+/*
+ * virt_to_maddr() is expected to work with virtual addresses from either
+ * the directmap region or Xen's linkage (XEN_VIRT_START) region.
+ * Therefore, it is sufficient to check only these regions and assert if `va`
+ * is not within the directmap or Xen's linkage region.
+ */
+static inline unsigned long virt_to_maddr(unsigned long va)
+{
+    if ((va >= DIRECTMAP_VIRT_START) &&
+        (va < (DIRECTMAP_VIRT_START + DIRECTMAP_SIZE)))
+        return directmapoff_to_maddr(va - DIRECTMAP_VIRT_START);
+
+    BUILD_BUG_ON(XEN_VIRT_SIZE != MB(2));
+    ASSERT((va >> (PAGETABLE_ORDER + PAGE_SHIFT)) ==
+           (_AC(XEN_VIRT_START, UL) >> (PAGETABLE_ORDER + PAGE_SHIFT)));
+
+    /* phys_offset = load_start - XEN_VIRT_START */
+    return phys_offset + va;
+}
+#define virt_to_maddr(va) virt_to_maddr((unsigned long)(va))
 
 /* Convert between Xen-heap virtual addresses and machine frame numbers. */
 #define __virt_to_mfn(va)  mfn_x(maddr_to_mfn(virt_to_maddr(va)))
diff --git a/xen/arch/riscv/mm.c b/xen/arch/riscv/mm.c
index 4a628aef83..daa02b2c60 100644
--- a/xen/arch/riscv/mm.c
+++ b/xen/arch/riscv/mm.c
@@ -26,7 +26,7 @@ struct mmu_desc {
     pte_t *pgtbl_base;
 };
 
-static unsigned long __ro_after_init phys_offset;
+unsigned long __ro_after_init phys_offset; /* = load_start - XEN_VIRT_START */
 
 #define LOAD_TO_LINK(addr) ((unsigned long)(addr) - phys_offset)
 #define LINK_TO_LOAD(addr) ((unsigned long)(addr) + phys_offset)
-- 
2.46.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.