[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 4/7] Update sparce tree with page_alloc.c patches from 2.6.18.8



Update sparce tree with following patches from 2.6.18.8

zone_reclaim: dynamic slab reclaim [1]
do not free non slab allocated per_cpu_pageset [2]
[PATCH] Reintroduce NODES_SPAN_OTHER_NODES for powerpc [3]
[PATCH] vmscan: Fix temp_priority race

[1] 
http://git.kernel.org/?p=linux/kernel/git/stable/linux-2.6.18.y.git;a=commitdiff;h=be64642c614ee7b193a75da3731c7ee397c21b4b#patch6
[2] 
http://git.kernel.org/?p=linux/kernel/git/stable/linux-2.6.18.y.git;a=commitdiff;h=45aa989ccaf46972daeca0dd12e98f7d89f73fc7
[3] 
http://git.kernel.org/?p=linux/kernel/git/stable/linux-2.6.18.y.git;a=commitdiff;h=d940c78f8aa3e60fe1681839833567b5632fe22a#patch4
[4] 
http://git.kernel.org/?p=linux/kernel/git/stable/linux-2.6.18.y.git;a=commitdiff;h=252287f4e8e825fbced96f1a8bc7dc1dfead325c#patch2

--- a/linux-2.6-xen-sparse/mm/page_alloc.c
+++ b/linux-2.6-xen-sparse/mm/page_alloc.c
@@ -1687,6 +1687,8 @@ void __meminit memmap_init_zone(unsigned
        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
                if (!early_pfn_valid(pfn))
                        continue;
+               if (!early_pfn_in_nid(pfn, nid))
+                       continue;
                page = pfn_to_page(pfn);
                set_page_links(page, zone, nid, pfn);
                init_page_count(page);
@@ -1859,8 +1861,10 @@ static inline void free_zone_pagesets(in
        for_each_zone(zone) {
                struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
 
+               /* Free per_cpu_pageset if it is slab allocated */
+               if (pset != &boot_pageset[cpu])
+                       kfree(pset);
                zone_pcp(zone, cpu) = NULL;
-               kfree(pset);
        }
 }
 
@@ -2022,6 +2026,7 @@ static void __meminit free_area_init_cor
 #ifdef CONFIG_NUMA
                zone->min_unmapped_ratio = (realsize*sysctl_min_unmapped_ratio)
                                                / 100;
+               zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
 #endif
                zone->name = zone_names[j];
                spin_lock_init(&zone->lock);
@@ -2030,7 +2035,7 @@ static void __meminit free_area_init_cor
                zone->zone_pgdat = pgdat;
                zone->free_pages = 0;
 
-               zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
+               zone->prev_priority = DEF_PRIORITY;
 
                zone_pcp_init(zone);
                INIT_LIST_HEAD(&zone->active_list);
@@ -2332,6 +2337,22 @@ int sysctl_min_unmapped_ratio_sysctl_han
                                sysctl_min_unmapped_ratio) / 100;
        return 0;
 }
+
+int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
+       struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+{
+       struct zone *zone;
+       int rc;
+
+       rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+       if (rc)
+               return rc;
+
+       for_each_zone(zone)
+               zone->min_slab_pages = (zone->present_pages *
+                               sysctl_min_slab_ratio) / 100;
+       return 0;
+}
 #endif
 
 /*

-- 
S.ÃaÄlar Onur <caglar@xxxxxxxxxxxxx>
http://cekirdek.pardus.org.tr/~caglar/

Linux is like living in a teepee. No Windows, no Gates and an Apache in house!

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.