aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoropeneuler-ci-bot <george@openeuler.sh>2024-04-29 12:18:27 +0000
committerGitee <noreply@gitee.com>2024-04-29 12:18:27 +0000
commit7aae1269eec8e4cfd9483c0ca0abd48f11dc3f45 (patch)
tree9b0bc27ba9929ef30144fb8929da26e72d0e3c3d
parent0ae85f24223376e2edbc83d727a81ac8174120e6 (diff)
parentc46c16f98ddfe6662f2cab0b60803d00e87e7091 (diff)
downloadopenEuler-kernel-openEuler-22.03-LTS-SP1.tar.gz
!6752 [sync] PR-6677: v3 olk-5.10: bugfix for mmopenEuler-22.03-LTS-SP1
Merge Pull Request from: @openeuler-sync-bot Origin pull request: https://gitee.com/openeuler/kernel/pulls/6677 PR sync from: Wupeng Ma <mawupeng1@huawei.com> https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/F2VRDUQ6NU77XVWZ7ZJ63PSSBRJBGCO4/ From: Ma Wupeng <mawupeng1@huawei.com> backport minor bugfix for mm. Changelog since v2: - fix style problem. Changelog since v1: - fix style problem. Mel Gorman (1): mm/page_alloc: always attempt to allocate at least one page during bulk allocation Miaohe Lin (1): mm/madvise: fix potential pte_unmap_unlock pte error -- 2.25.1 https://gitee.com/openeuler/kernel/issues/I9JPDJ Link:https://gitee.com/openeuler/kernel/pulls/6752 Reviewed-by: Jialin Zhang <zhangjialin11@huawei.com> Signed-off-by: Jialin Zhang <zhangjialin11@huawei.com>
-rw-r--r--mm/madvise.c8
-rw-r--r--mm/page_alloc.c7
2 files changed, 8 insertions, 7 deletions
diff --git a/mm/madvise.c b/mm/madvise.c
index 0a1d6f9d75eaa2..197b375f7d0771 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -432,12 +432,12 @@ regular_page:
if (split_huge_page(page)) {
unlock_page(page);
put_page(page);
- pte_offset_map_lock(mm, pmd, addr, &ptl);
+ orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
break;
}
unlock_page(page);
put_page(page);
- pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
pte--;
addr -= PAGE_SIZE;
continue;
@@ -645,12 +645,12 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
if (split_huge_page(page)) {
unlock_page(page);
put_page(page);
- pte_offset_map_lock(mm, pmd, addr, &ptl);
+ orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
goto out;
}
unlock_page(page);
put_page(page);
- pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
pte--;
addr -= PAGE_SIZE;
continue;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4c21965a300793..42117da9424478 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5109,7 +5109,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
struct alloc_context ac;
gfp_t alloc_gfp;
unsigned int alloc_flags = ALLOC_WMARK_LOW;
- int nr_populated = 0;
+ int nr_populated = 0, nr_account = 0;
/*
* Skip populated array elements to determine if any pages need
@@ -5194,11 +5194,12 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
page = __rmqueue_pcplist(zone, ac.migratetype, alloc_flags,
pcp, pcp_list);
if (unlikely(!page)) {
- /* Try and get at least one page */
- if (!nr_populated)
+ /* Try and allocate at least one page */
+ if (!nr_account)
goto failed_irq;
break;
}
+ nr_account++;
/*
* Ideally this would be batched but the best way to do