[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 01/33] s390: Use _pt_s390_gaddr for gmap address tracking



s390 uses page->index to keep track of page tables for the guest address
space. In an attempt to consolidate the usage of page fields in s390,
replace _pt_pad_2 with _pt_s390_gaddr to replace page->index in gmap.

This will help with the splitting of struct ptdesc from struct page, as
well as allow s390 to use _pt_frag_refcount for fragmented page table
tracking.

Since page->_pt_s390_gaddr aliases with mapping, ensure its set to NULL
before freeing the pages as well.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@xxxxxxxxx>
---
 arch/s390/mm/gmap.c      | 50 +++++++++++++++++++++++++++-------------
 include/linux/mm_types.h |  2 +-
 2 files changed, 35 insertions(+), 17 deletions(-)

diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 5a716bdcba05..a61ea1a491dc 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -70,7 +70,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
        page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
        if (!page)
                goto out_free;
-       page->index = 0;
+       page->_pt_s390_gaddr = 0;
        list_add(&page->lru, &gmap->crst_list);
        table = page_to_virt(page);
        crst_table_init(table, etype);
@@ -187,16 +187,20 @@ static void gmap_free(struct gmap *gmap)
        if (!(gmap_is_shadow(gmap) && gmap->removed))
                gmap_flush_tlb(gmap);
        /* Free all segment & region tables. */
-       list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
+       list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
+               page->_pt_s390_gaddr = 0;
                __free_pages(page, CRST_ALLOC_ORDER);
+       }
        gmap_radix_tree_free(&gmap->guest_to_host);
        gmap_radix_tree_free(&gmap->host_to_guest);
 
        /* Free additional data for a shadow gmap */
        if (gmap_is_shadow(gmap)) {
                /* Free all page tables. */
-               list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
+               list_for_each_entry_safe(page, next, &gmap->pt_list, lru) {
+                       page->_pt_s390_gaddr = 0;
                        page_table_free_pgste(page);
+               }
                gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
                /* Release reference to the parent */
                gmap_put(gmap->parent);
@@ -318,12 +322,14 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned 
long *table,
                list_add(&page->lru, &gmap->crst_list);
                *table = __pa(new) | _REGION_ENTRY_LENGTH |
                        (*table & _REGION_ENTRY_TYPE_MASK);
-               page->index = gaddr;
+               page->_pt_s390_gaddr = gaddr;
                page = NULL;
        }
        spin_unlock(&gmap->guest_table_lock);
-       if (page)
+       if (page) {
+               page->_pt_s390_gaddr = 0;
                __free_pages(page, CRST_ALLOC_ORDER);
+       }
        return 0;
 }
 
@@ -341,7 +347,7 @@ static unsigned long __gmap_segment_gaddr(unsigned long 
*entry)
        offset = (unsigned long) entry / sizeof(unsigned long);
        offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
        page = pmd_pgtable_page((pmd_t *) entry);
-       return page->index + offset;
+       return page->_pt_s390_gaddr + offset;
 }
 
 /**
@@ -1351,6 +1357,7 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned 
long raddr)
        /* Free page table */
        page = phys_to_page(pgt);
        list_del(&page->lru);
+       page->_pt_s390_gaddr = 0;
        page_table_free_pgste(page);
 }
 
@@ -1379,6 +1386,7 @@ static void __gmap_unshadow_sgt(struct gmap *sg, unsigned 
long raddr,
                /* Free page table */
                page = phys_to_page(pgt);
                list_del(&page->lru);
+               page->_pt_s390_gaddr = 0;
                page_table_free_pgste(page);
        }
 }
@@ -1409,6 +1417,7 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned 
long raddr)
        /* Free segment table */
        page = phys_to_page(sgt);
        list_del(&page->lru);
+       page->_pt_s390_gaddr = 0;
        __free_pages(page, CRST_ALLOC_ORDER);
 }
 
@@ -1437,6 +1446,7 @@ static void __gmap_unshadow_r3t(struct gmap *sg, unsigned 
long raddr,
                /* Free segment table */
                page = phys_to_page(sgt);
                list_del(&page->lru);
+               page->_pt_s390_gaddr = 0;
                __free_pages(page, CRST_ALLOC_ORDER);
        }
 }
@@ -1467,6 +1477,7 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned 
long raddr)
        /* Free region 3 table */
        page = phys_to_page(r3t);
        list_del(&page->lru);
+       page->_pt_s390_gaddr = 0;
        __free_pages(page, CRST_ALLOC_ORDER);
 }
 
@@ -1495,6 +1506,7 @@ static void __gmap_unshadow_r2t(struct gmap *sg, unsigned 
long raddr,
                /* Free region 3 table */
                page = phys_to_page(r3t);
                list_del(&page->lru);
+               page->_pt_s390_gaddr = 0;
                __free_pages(page, CRST_ALLOC_ORDER);
        }
 }
@@ -1525,6 +1537,7 @@ static void gmap_unshadow_r2t(struct gmap *sg, unsigned 
long raddr)
        /* Free region 2 table */
        page = phys_to_page(r2t);
        list_del(&page->lru);
+       page->_pt_s390_gaddr = 0;
        __free_pages(page, CRST_ALLOC_ORDER);
 }
 
@@ -1557,6 +1570,7 @@ static void __gmap_unshadow_r1t(struct gmap *sg, unsigned 
long raddr,
                /* Free region 2 table */
                page = phys_to_page(r2t);
                list_del(&page->lru);
+               page->_pt_s390_gaddr = 0;
                __free_pages(page, CRST_ALLOC_ORDER);
        }
 }
@@ -1762,9 +1776,9 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, 
unsigned long r2t,
        page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
        if (!page)
                return -ENOMEM;
-       page->index = r2t & _REGION_ENTRY_ORIGIN;
+       page->_pt_s390_gaddr = r2t & _REGION_ENTRY_ORIGIN;
        if (fake)
-               page->index |= GMAP_SHADOW_FAKE_TABLE;
+               page->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
        s_r2t = page_to_phys(page);
        /* Install shadow region second table */
        spin_lock(&sg->guest_table_lock);
@@ -1814,6 +1828,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, 
unsigned long r2t,
        return rc;
 out_free:
        spin_unlock(&sg->guest_table_lock);
+       page->_pt_s390_gaddr = 0;
        __free_pages(page, CRST_ALLOC_ORDER);
        return rc;
 }
@@ -1846,9 +1861,9 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, 
unsigned long r3t,
        page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
        if (!page)
                return -ENOMEM;
-       page->index = r3t & _REGION_ENTRY_ORIGIN;
+       page->_pt_s390_gaddr = r3t & _REGION_ENTRY_ORIGIN;
        if (fake)
-               page->index |= GMAP_SHADOW_FAKE_TABLE;
+               page->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
        s_r3t = page_to_phys(page);
        /* Install shadow region second table */
        spin_lock(&sg->guest_table_lock);
@@ -1898,6 +1913,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, 
unsigned long r3t,
        return rc;
 out_free:
        spin_unlock(&sg->guest_table_lock);
+       page->_pt_s390_gaddr = 0;
        __free_pages(page, CRST_ALLOC_ORDER);
        return rc;
 }
@@ -1930,9 +1946,9 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, 
unsigned long sgt,
        page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
        if (!page)
                return -ENOMEM;
-       page->index = sgt & _REGION_ENTRY_ORIGIN;
+       page->_pt_s390_gaddr = sgt & _REGION_ENTRY_ORIGIN;
        if (fake)
-               page->index |= GMAP_SHADOW_FAKE_TABLE;
+               page->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
        s_sgt = page_to_phys(page);
        /* Install shadow region second table */
        spin_lock(&sg->guest_table_lock);
@@ -1982,6 +1998,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, 
unsigned long sgt,
        return rc;
 out_free:
        spin_unlock(&sg->guest_table_lock);
+       page->_pt_s390_gaddr = 0;
        __free_pages(page, CRST_ALLOC_ORDER);
        return rc;
 }
@@ -2014,9 +2031,9 @@ int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long 
saddr,
        if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
                /* Shadow page tables are full pages (pte+pgste) */
                page = pfn_to_page(*table >> PAGE_SHIFT);
-               *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
+               *pgt = page->_pt_s390_gaddr & ~GMAP_SHADOW_FAKE_TABLE;
                *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
-               *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
+               *fake = !!(page->_pt_s390_gaddr & GMAP_SHADOW_FAKE_TABLE);
                rc = 0;
        } else  {
                rc = -EAGAIN;
@@ -2054,9 +2071,9 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, 
unsigned long pgt,
        page = page_table_alloc_pgste(sg->mm);
        if (!page)
                return -ENOMEM;
-       page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
+       page->_pt_s390_gaddr = pgt & _SEGMENT_ENTRY_ORIGIN;
        if (fake)
-               page->index |= GMAP_SHADOW_FAKE_TABLE;
+               page->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
        s_pgt = page_to_phys(page);
        /* Install shadow page table */
        spin_lock(&sg->guest_table_lock);
@@ -2101,6 +2118,7 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, 
unsigned long pgt,
        return rc;
 out_free:
        spin_unlock(&sg->guest_table_lock);
+       page->_pt_s390_gaddr = 0;
        page_table_free_pgste(page);
        return rc;
 
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 3fc9e680f174..2616d64c0e8c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -144,7 +144,7 @@ struct page {
                struct {        /* Page table pages */
                        unsigned long _pt_pad_1;        /* compound_head */
                        pgtable_t pmd_huge_pte; /* protected by page->ptl */
-                       unsigned long _pt_pad_2;        /* mapping */
+                       unsigned long _pt_s390_gaddr;   /* mapping */
                        union {
                                struct mm_struct *pt_mm; /* x86 pgds only */
                                atomic_t pt_frag_refcount; /* powerpc */
-- 
2.39.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.