summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2024-04-12 17:49:42 -0700
committerAndrew Morton <akpm@linux-foundation.org>2024-04-12 17:49:42 -0700
commit977fa12dfee29e48452b6b1b783e1cc35acdf3cc (patch)
tree0253cae3745b4fb9e33346ec5d7446c18ae03e3d
parent264cbabb93355780b816628546e6465a1f615bb9 (diff)
download25-new-977fa12dfee29e48452b6b1b783e1cc35acdf3cc.tar.gz
foo
-rw-r--r--patches/mm-add-defines-for-min-max-swappiness.patch8
-rw-r--r--patches/mm-add-swapiness=-arg-to-memoryreclaim.patch6
-rw-r--r--patches/mm-free-up-pg_slab.patch6
-rw-r--r--patches/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios-v2.patch2
-rw-r--r--patches/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.patch2
-rw-r--r--patches/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.patch2
-rw-r--r--patches/mm-memory-failure-use-folio_mapcount-in-hwpoison_user_mappings.patch2
-rw-r--r--patches/mm-optimization-on-page-allocation-when-cma-enabled.patch4
-rw-r--r--patches/mm-record-the-migration-reason-for-struct-migration_target_control.patch2
-rw-r--r--patches/mm-rename-vma_pgoff_address-back-to-vma_address.patch2
10 files changed, 18 insertions, 18 deletions
diff --git a/patches/mm-add-defines-for-min-max-swappiness.patch b/patches/mm-add-defines-for-min-max-swappiness.patch
index 34cf66178..fe36dbb4a 100644
--- a/patches/mm-add-defines-for-min-max-swappiness.patch
+++ b/patches/mm-add-defines-for-min-max-swappiness.patch
@@ -150,7 +150,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
*/
int vm_swappiness = 60;
-@@ -2428,7 +2428,7 @@ static void get_scan_count(struct lruvec
+@@ -2431,7 +2431,7 @@ static void get_scan_count(struct lruvec
ap = swappiness * (total_cost + 1);
ap /= anon_cost + 1;
@@ -159,7 +159,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fp /= file_cost + 1;
fraction[0] = ap;
-@@ -4448,7 +4448,7 @@ static int get_type_to_scan(struct lruve
+@@ -4451,7 +4451,7 @@ static int get_type_to_scan(struct lruve
{
int type, tier;
struct ctrl_pos sp, pv;
@@ -168,7 +168,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/*
* Compare the first tier of anon with that of file to determine which
-@@ -4495,7 +4495,7 @@ static int isolate_folios(struct lruvec
+@@ -4498,7 +4498,7 @@ static int isolate_folios(struct lruvec
type = LRU_GEN_ANON;
else if (swappiness == 1)
type = LRU_GEN_FILE;
@@ -177,7 +177,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
type = LRU_GEN_ANON;
else if (!(sc->gfp_mask & __GFP_IO))
type = LRU_GEN_FILE;
-@@ -5429,9 +5429,9 @@ static int run_cmd(char cmd, int memcg_i
+@@ -5432,9 +5432,9 @@ static int run_cmd(char cmd, int memcg_i
lruvec = get_lruvec(memcg, nid);
diff --git a/patches/mm-add-swapiness=-arg-to-memoryreclaim.patch b/patches/mm-add-swapiness=-arg-to-memoryreclaim.patch
index 315052940..9af08aeef 100644
--- a/patches/mm-add-swapiness=-arg-to-memoryreclaim.patch
+++ b/patches/mm-add-swapiness=-arg-to-memoryreclaim.patch
@@ -305,7 +305,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
#endif
static void set_task_reclaim_state(struct task_struct *task,
-@@ -2352,7 +2369,7 @@ static void get_scan_count(struct lruvec
+@@ -2355,7 +2372,7 @@ static void get_scan_count(struct lruvec
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
unsigned long anon_cost, file_cost, total_cost;
@@ -314,7 +314,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
u64 fraction[ANON_AND_FILE];
u64 denominator = 0; /* gcc */
enum scan_balance scan_balance;
-@@ -2633,7 +2650,7 @@ static int get_swappiness(struct lruvec
+@@ -2636,7 +2653,7 @@ static int get_swappiness(struct lruvec
mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH)
return 0;
@@ -323,7 +323,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
static int get_nr_gens(struct lruvec *lruvec, int type)
-@@ -6514,12 +6531,14 @@ unsigned long mem_cgroup_shrink_node(str
+@@ -6517,12 +6534,14 @@ unsigned long mem_cgroup_shrink_node(str
unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
diff --git a/patches/mm-free-up-pg_slab.patch b/patches/mm-free-up-pg_slab.patch
index 7cb00f5bb..9178048e2 100644
--- a/patches/mm-free-up-pg_slab.patch
+++ b/patches/mm-free-up-pg_slab.patch
@@ -109,7 +109,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
DEF_PAGETYPE_NAME(guard), \
--- a/mm/memory-failure.c~mm-free-up-pg_slab
+++ a/mm/memory-failure.c
-@@ -1245,7 +1245,6 @@ static int me_huge_page(struct page_stat
+@@ -1251,7 +1251,6 @@ static int me_huge_page(struct page_stat
#define mlock (1UL << PG_mlocked)
#define lru (1UL << PG_lru)
#define head (1UL << PG_head)
@@ -117,7 +117,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
#define reserved (1UL << PG_reserved)
static struct page_state error_states[] = {
-@@ -1255,13 +1254,6 @@ static struct page_state error_states[]
+@@ -1261,13 +1260,6 @@ static struct page_state error_states[]
* PG_buddy pages only make a small fraction of all free pages.
*/
@@ -131,7 +131,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
{ head, head, MF_MSG_HUGE, me_huge_page },
{ sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
-@@ -1288,7 +1280,6 @@ static struct page_state error_states[]
+@@ -1294,7 +1286,6 @@ static struct page_state error_states[]
#undef mlock
#undef lru
#undef head
diff --git a/patches/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios-v2.patch b/patches/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios-v2.patch
index 5aad29f7d..8d666ee12 100644
--- a/patches/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios-v2.patch
+++ b/patches/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios-v2.patch
@@ -41,7 +41,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
{
--- a/mm/memory-failure.c~mm-hugetlb-convert-dissolve_free_huge_pages-to-folios-v2
+++ a/mm/memory-failure.c
-@@ -172,8 +172,8 @@ static bool page_handle_poison(struct pa
+@@ -178,8 +178,8 @@ static bool page_handle_poison(struct pa
{
if (hugepage_or_freepage) {
/*
diff --git a/patches/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.patch b/patches/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.patch
index 915ee2eb1..bb7d53e91 100644
--- a/patches/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.patch
+++ b/patches/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.patch
@@ -109,7 +109,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* queue if we need to refill those.
*/
- ret = dissolve_free_huge_page(page);
-+ ret = dissolve_free_hugetlb_folio(page);
++ ret = dissolve_free_hugetlb_folio(page_folio(page));
if (!ret) {
drain_all_pages(page_zone(page));
ret = take_page_off_buddy(page);
diff --git a/patches/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.patch b/patches/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.patch
index 09c6b1225..deab04e66 100644
--- a/patches/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.patch
+++ b/patches/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.patch
@@ -119,7 +119,7 @@ lock(pcp_batch_high_lock) is already in the __page_handle_poison().
Link: https://lkml.kernel.org/r/20240407085456.2798193-1-linmiaohe@huawei.com
Fixes: a6b40850c442 ("mm: hugetlb: replace hugetlb_free_vmemmap_enabled with a static_key")
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
-Cc: Oscar Salvador <osalvador@suse.de>
+Acked-by: Oscar Salvador <osalvador@suse.de>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
diff --git a/patches/mm-memory-failure-use-folio_mapcount-in-hwpoison_user_mappings.patch b/patches/mm-memory-failure-use-folio_mapcount-in-hwpoison_user_mappings.patch
index 6be3a45ca..08879638d 100644
--- a/patches/mm-memory-failure-use-folio_mapcount-in-hwpoison_user_mappings.patch
+++ b/patches/mm-memory-failure-use-folio_mapcount-in-hwpoison_user_mappings.patch
@@ -34,7 +34,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/memory-failure.c~mm-memory-failure-use-folio_mapcount-in-hwpoison_user_mappings
+++ a/mm/memory-failure.c
-@@ -1628,8 +1628,8 @@ static bool hwpoison_user_mappings(struc
+@@ -1634,8 +1634,8 @@ static bool hwpoison_user_mappings(struc
unmap_success = !page_mapped(p);
if (!unmap_success)
diff --git a/patches/mm-optimization-on-page-allocation-when-cma-enabled.patch b/patches/mm-optimization-on-page-allocation-when-cma-enabled.patch
index c50a8512c..8a2cb90fb 100644
--- a/patches/mm-optimization-on-page-allocation-when-cma-enabled.patch
+++ b/patches/mm-optimization-on-page-allocation-when-cma-enabled.patch
@@ -35,7 +35,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/page_alloc.c~mm-optimization-on-page-allocation-when-cma-enabled
+++ a/mm/page_alloc.c
-@@ -2173,6 +2173,43 @@ do_steal:
+@@ -2175,6 +2175,43 @@ do_steal:
return page;
}
@@ -79,7 +79,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/*
* Do the hard work of removing an element from the buddy allocator.
* Call me with the zone->lock already held.
-@@ -2186,12 +2223,11 @@ __rmqueue(struct zone *zone, unsigned in
+@@ -2188,12 +2225,11 @@ __rmqueue(struct zone *zone, unsigned in
if (IS_ENABLED(CONFIG_CMA)) {
/*
* Balance movable allocations between regular and CMA areas by
diff --git a/patches/mm-record-the-migration-reason-for-struct-migration_target_control.patch b/patches/mm-record-the-migration-reason-for-struct-migration_target_control.patch
index 43cdba5cb..edea8e5aa 100644
--- a/patches/mm-record-the-migration-reason-for-struct-migration_target_control.patch
+++ b/patches/mm-record-the-migration-reason-for-struct-migration_target_control.patch
@@ -70,7 +70,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/*
--- a/mm/memory-failure.c~mm-record-the-migration-reason-for-struct-migration_target_control
+++ a/mm/memory-failure.c
-@@ -2663,6 +2663,7 @@ static int soft_offline_in_use_page(stru
+@@ -2669,6 +2669,7 @@ static int soft_offline_in_use_page(stru
struct migration_target_control mtc = {
.nid = NUMA_NO_NODE,
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
diff --git a/patches/mm-rename-vma_pgoff_address-back-to-vma_address.patch b/patches/mm-rename-vma_pgoff_address-back-to-vma_address.patch
index 9b111dd21..d58a432a0 100644
--- a/patches/mm-rename-vma_pgoff_address-back-to-vma_address.patch
+++ b/patches/mm-rename-vma_pgoff_address-back-to-vma_address.patch
@@ -43,7 +43,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/memory-failure.c~mm-rename-vma_pgoff_address-back-to-vma_address
+++ a/mm/memory-failure.c
-@@ -449,7 +449,7 @@ static void __add_to_kill(struct task_st
+@@ -455,7 +455,7 @@ static void __add_to_kill(struct task_st
tk->addr = ksm_addr ? ksm_addr : page_address_in_vma(p, vma);
if (is_zone_device_page(p)) {
if (fsdax_pgoff != FSDAX_INVALID_PGOFF)