summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2016-09-05 11:27:23 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2016-09-05 11:27:23 -0400
commit26e1a018e274e1b37d55d10fae16041313868f98 (patch)
treebba0beb435ca9e2bb267167a4dbd1244cf406cbf
parent273cb6c93cf422748a28834bca3f1f114afaded4 (diff)
download4.8-rt-patches-26e1a018e274e1b37d55d10fae16041313868f98.tar.gz
mm: page_alloc refresh for context
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch36
1 files changed, 16 insertions, 20 deletions
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 8df20b35fb02bd..8eec2756168723 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -1,4 +1,4 @@
-From ccc366366e1c846e32982ccefa5bf322e9fa0e19 Mon Sep 17 00:00:00 2001
+From fc8e6af362117acb2450436f4a3d587d34378f2b Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:37 -0500
Subject: [PATCH] mm: page_alloc: rt-friendly per-cpu pages
@@ -14,7 +14,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 452513bf02ce..22368ad72e7d 100644
+index ea759b935360..6c4983d88a2e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -61,6 +61,7 @@
@@ -44,7 +44,7 @@ index 452513bf02ce..22368ad72e7d 100644
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -1243,10 +1256,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
+@@ -1230,10 +1243,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
return;
migratetype = get_pfnblock_migratetype(page, pfn);
@@ -57,7 +57,7 @@ index 452513bf02ce..22368ad72e7d 100644
}
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
-@@ -2236,14 +2249,14 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+@@ -2223,14 +2236,14 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
unsigned long flags;
int to_drain, batch;
@@ -74,7 +74,7 @@ index 452513bf02ce..22368ad72e7d 100644
}
#endif
-@@ -2260,7 +2273,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
+@@ -2247,7 +2260,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -83,7 +83,7 @@ index 452513bf02ce..22368ad72e7d 100644
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
-@@ -2268,7 +2281,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
+@@ -2255,7 +2268,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
}
@@ -92,7 +92,7 @@ index 452513bf02ce..22368ad72e7d 100644
}
/*
-@@ -2354,8 +2367,17 @@ void drain_all_pages(struct zone *zone)
+@@ -2341,8 +2354,17 @@ void drain_all_pages(struct zone *zone)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
@@ -110,7 +110,7 @@ index 452513bf02ce..22368ad72e7d 100644
}
#ifdef CONFIG_HIBERNATION
-@@ -2415,7 +2437,7 @@ void free_hot_cold_page(struct page *page, bool cold)
+@@ -2402,7 +2424,7 @@ void free_hot_cold_page(struct page *page, bool cold)
migratetype = get_pfnblock_migratetype(page, pfn);
set_pcppage_migratetype(page, migratetype);
@@ -119,7 +119,7 @@ index 452513bf02ce..22368ad72e7d 100644
__count_vm_event(PGFREE);
/*
-@@ -2446,7 +2468,7 @@ void free_hot_cold_page(struct page *page, bool cold)
+@@ -2433,7 +2455,7 @@ void free_hot_cold_page(struct page *page, bool cold)
}
out:
@@ -128,7 +128,7 @@ index 452513bf02ce..22368ad72e7d 100644
}
/*
-@@ -2580,7 +2602,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
+@@ -2570,7 +2592,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
struct per_cpu_pages *pcp;
struct list_head *list;
@@ -137,7 +137,7 @@ index 452513bf02ce..22368ad72e7d 100644
do {
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
-@@ -2608,7 +2630,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
+@@ -2597,7 +2619,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
@@ -146,7 +146,7 @@ index 452513bf02ce..22368ad72e7d 100644
do {
page = NULL;
-@@ -2620,12 +2642,14 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
+@@ -2609,22 +2631,24 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
if (!page)
page = __rmqueue(zone, order, migratetype);
} while (page && check_new_pages(page, order));
@@ -156,16 +156,12 @@ index 452513bf02ce..22368ad72e7d 100644
+ spin_unlock(&zone->lock);
goto failed;
+ }
- __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
__mod_zone_freepage_state(zone, -(1 << order),
get_pcppage_migratetype(page));
+ spin_unlock(&zone->lock);
}
- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
-@@ -2634,13 +2658,13 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
-
- __count_zone_vm_events(PGALLOC, zone, 1 << order);
+ __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
@@ -179,7 +175,7 @@ index 452513bf02ce..22368ad72e7d 100644
return NULL;
}
-@@ -6686,6 +6710,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
+@@ -6606,6 +6630,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
void __init page_alloc_init(void)
{
hotcpu_notifier(page_alloc_cpu_notify, 0);
@@ -187,7 +183,7 @@ index 452513bf02ce..22368ad72e7d 100644
}
/*
-@@ -7474,7 +7499,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7401,7 +7426,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -196,7 +192,7 @@ index 452513bf02ce..22368ad72e7d 100644
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -7483,7 +7508,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7410,7 +7435,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}