[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[mini-os master] mini-os: mm: convert set_readonly() to use walk_pt()



commit 3932490a01cfb1bf69a1bf95c9b1f37a3fdce8b3
Author:     Juergen Gross <jgross@xxxxxxxx>
AuthorDate: Tue Aug 20 16:11:31 2024 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Aug 20 16:11:31 2024 +0200

    mini-os: mm: convert set_readonly() to use walk_pt()
    
    Instead of having another copy of a page table walk in set_readonly(),
    just use walk_pt().
    
    As it will be needed later anyway, split out the TLB flushing into a
    dedicated function.
    
    Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
    Reviewed-by: Samuel Thibault <samuel.thibault@xxxxxxxxxxxx>
---
 arch/x86/mm.c | 124 ++++++++++++++++++++++++++--------------------------------
 1 file changed, 56 insertions(+), 68 deletions(-)

diff --git a/arch/x86/mm.c b/arch/x86/mm.c
index 84a6d7f..572e762 100644
--- a/arch/x86/mm.c
+++ b/arch/x86/mm.c
@@ -402,92 +402,80 @@ static void build_pagetable(unsigned long *start_pfn, 
unsigned long *max_pfn)
  * Mark portion of the address space read only.
  */
 extern struct shared_info shared_info;
-static void set_readonly(void *text, void *etext)
-{
-    unsigned long start_address =
-        ((unsigned long) text + PAGE_SIZE - 1) & PAGE_MASK;
-    unsigned long end_address = (unsigned long) etext;
-    pgentry_t *tab = pt_base, page;
-    unsigned long mfn = pfn_to_mfn(virt_to_pfn(pt_base));
-    unsigned long offset;
-    unsigned long page_size = PAGE_SIZE;
+
+struct set_readonly_par {
+    unsigned long etext;
 #ifdef CONFIG_PARAVIRT
-    int count = 0;
-    int rc;
+    unsigned int count;
 #endif
+};
 
-    printk("setting %p-%p readonly\n", text, etext);
+static int set_readonly_func(unsigned long va, unsigned int lvl, bool is_leaf,
+                             pgentry_t *pte, void *par)
+{
+    struct set_readonly_par *ro = par;
 
-    while ( start_address + page_size <= end_address )
-    {
-        tab = pt_base;
-        mfn = pfn_to_mfn(virt_to_pfn(pt_base));
+    if ( !is_leaf )
+        return 0;
 
-#if defined(__x86_64__)
-        offset = l4_table_offset(start_address);
-        page = tab[offset];
-        mfn = pte_to_mfn(page);
-        tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
-#endif
-        offset = l3_table_offset(start_address);
-        page = tab[offset];
-        mfn = pte_to_mfn(page);
-        tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
-        offset = l2_table_offset(start_address);        
-        if ( !(tab[offset] & _PAGE_PSE) )
-        {
-            page = tab[offset];
-            mfn = pte_to_mfn(page);
-            tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
+    if ( va + (1UL << ptdata[lvl].shift) > ro->etext )
+        return 1;
 
-            offset = l1_table_offset(start_address);
-        }
+    if ( va == (unsigned long)&shared_info )
+    {
+        printk("skipped %lx\n", va);
+        return 0;
+    }
 
-        if ( start_address != (unsigned long)&shared_info )
-        {
 #ifdef CONFIG_PARAVIRT
-            mmu_updates[count].ptr = 
-                ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
-            mmu_updates[count].val = tab[offset] & ~_PAGE_RW;
-            count++;
+    mmu_updates[ro->count].ptr = virt_to_mach(pte);
+    mmu_updates[ro->count].val = *pte & ~_PAGE_RW;
+    ro->count++;
+
+    if ( ro->count == L1_PAGETABLE_ENTRIES )
+    {
+         if ( HYPERVISOR_mmu_update(mmu_updates, ro->count, NULL,
+                                    DOMID_SELF) < 0 )
+             BUG();
+         ro->count = 0;
+    }
 #else
-            tab[offset] &= ~_PAGE_RW;
+    *pte &= ~_PAGE_RW;
 #endif
-        }
-        else
-            printk("skipped %lx\n", start_address);
 
-        start_address += page_size;
+    return 0;
+}
 
 #ifdef CONFIG_PARAVIRT
-        if ( count == L1_PAGETABLE_ENTRIES || 
-             start_address + page_size > end_address )
-        {
-            rc = HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF);
-            if ( rc < 0 )
-            {
-                printk("ERROR: set_readonly(): PTE could not be updated\n");
-                do_exit();
-            }
-            count = 0;
-        }
+static void tlb_flush(void)
+{
+    mmuext_op_t op = { .cmd = MMUEXT_TLB_FLUSH_ALL };
+    int count;
+
+    HYPERVISOR_mmuext_op(&op, 1, &count, DOMID_SELF);
+}
 #else
-        if ( start_address == (1UL << L2_PAGETABLE_SHIFT) )
-            page_size = 1UL << L2_PAGETABLE_SHIFT;
+static void tlb_flush(void)
+{
+    write_cr3((unsigned long)pt_base);
+}
 #endif
-    }
+
+static void set_readonly(void *text, void *etext)
+{
+    struct set_readonly_par setro = { .etext = (unsigned long)etext };
+    unsigned long start_address = PAGE_ALIGN((unsigned long)text);
+
+    printk("setting %p-%p readonly\n", text, etext);
+    walk_pt(start_address, setro.etext, set_readonly_func, &setro);
 
 #ifdef CONFIG_PARAVIRT
-    {
-        mmuext_op_t op = {
-            .cmd = MMUEXT_TLB_FLUSH_ALL,
-        };
-        int count;
-        HYPERVISOR_mmuext_op(&op, 1, &count, DOMID_SELF);
-    }
-#else
-    write_cr3((unsigned long)pt_base);
+    if ( setro.count &&
+         HYPERVISOR_mmu_update(mmu_updates, setro.count, NULL, DOMID_SELF) < 0)
+        BUG();
 #endif
+
+    tlb_flush();
 }
 
 /*
--
generated by git-patchbot for /home/xen/git/mini-os.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.