aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/ksm.c3
-rw-r--r--mm/memory-failure.c11
-rw-r--r--mm/vmalloc.c5
3 files changed, 13 insertions, 6 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index f5138f43f0d27a..452ac8346e6e28 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2153,7 +2153,6 @@ again:
INIT_HLIST_HEAD(&stable_node_dup->hlist);
stable_node_dup->kpfn = kpfn;
- folio_set_stable_node(kfolio, stable_node_dup);
stable_node_dup->rmap_hlist_len = 0;
DO_NUMA(stable_node_dup->nid = nid);
if (!need_chain) {
@@ -2172,6 +2171,8 @@ again:
stable_node_chain_add_dup(stable_node_dup, stable_node);
}
+ folio_set_stable_node(kfolio, stable_node_dup);
+
return stable_node_dup;
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 16ada4fb02b799..d3c830e817e35e 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1221,7 +1221,7 @@ static int me_huge_page(struct page_state *ps, struct page *p)
* subpages.
*/
folio_put(folio);
- if (__page_handle_poison(p) >= 0) {
+ if (__page_handle_poison(p) > 0) {
page_ref_inc(p);
res = MF_RECOVERED;
} else {
@@ -2091,7 +2091,7 @@ retry:
*/
if (res == 0) {
folio_unlock(folio);
- if (__page_handle_poison(p) >= 0) {
+ if (__page_handle_poison(p) > 0) {
page_ref_inc(p);
res = MF_RECOVERED;
} else {
@@ -2546,6 +2546,13 @@ int unpoison_memory(unsigned long pfn)
goto unlock_mutex;
}
+ if (is_huge_zero_folio(folio)) {
+ unpoison_pr_info("Unpoison: huge zero page is not supported %#lx\n",
+ pfn, &unpoison_rs);
+ ret = -EOPNOTSUPP;
+ goto unlock_mutex;
+ }
+
if (!PageHWPoison(p)) {
unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
pfn, &unpoison_rs);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 6641be0ca80b4d..5d3aa2dc88a83f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3498,7 +3498,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
{
unsigned int nr_allocated = 0;
gfp_t alloc_gfp = gfp;
- bool nofail = false;
+ bool nofail = gfp & __GFP_NOFAIL;
struct page *page;
int i;
@@ -3555,12 +3555,11 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
* and compaction etc.
*/
alloc_gfp &= ~__GFP_NOFAIL;
- nofail = true;
}
/* High-order pages or fallback path if "bulk" fails. */
while (nr_allocated < nr_pages) {
- if (fatal_signal_pending(current))
+ if (!nofail && fatal_signal_pending(current))
break;
if (nid == NUMA_NO_NODE)