Skip to content

Commit d2136d7

Browse files
Baolin Wangakpm00
Baolin Wang
authored andcommitted
mm: support multi-size THP numa balancing
Now the anonymous page allocation already supports multi-size THP (mTHP), but the numa balancing still prohibits mTHP migration even though it is an exclusive mapping, which is unreasonable. Allow scanning mTHP: Commit 859d4ad ("mm: numa: do not trap faults on shared data section pages") skips shared CoW pages' NUMA page migration to avoid shared data segment migration. In addition, commit 80d47f5 ("mm: don't try to NUMA-migrate COW pages that have other uses") change to use page_count() to avoid GUP pages migration, that will also skip the mTHP numa scanning. Theoretically, we can use folio_maybe_dma_pinned() to detect the GUP issue, although there is still a GUP race, the issue seems to have been resolved by commit 80d47f5. Meanwhile, use the folio_likely_mapped_shared() to skip shared CoW pages though this is not a precise sharers count. To check if the folio is shared, ideally we want to make sure every page is mapped to the same process, but doing that seems expensive and using the estimated mapcount seems can work when running autonuma benchmark. Allow migrating mTHP: As mentioned in the previous thread[1], large folios (including THP) are more susceptible to false sharing issues among threads than 4K base page, leading to pages ping-pong back and forth during numa balancing, which is currently not easy to resolve. Therefore, as a start to support mTHP numa balancing, we can follow the PMD mapped THP's strategy, that means we can reuse the 2-stage filter in should_numa_migrate_memory() to check if the mTHP is being heavily contended among threads (through checking the CPU id and pid of the last access) to avoid false sharing at some degree. Thus, we can restore all PTE maps upon the first hint page fault of a large folio to follow the PMD mapped THP's strategy. In the future, we can continue to optimize the NUMA balancing algorithm to avoid the false sharing issue with large folios as much as possible. Performance data: Machine environment: 2 nodes, 128 cores Intel(R) Xeon(R) Platinum Base: 2024-03-25 mm-unstable branch Enable mTHP to run autonuma-benchmark mTHP:16K Base Patched numa01 numa01 224.70 143.48 numa01_THREAD_ALLOC numa01_THREAD_ALLOC 118.05 47.43 numa02 numa02 13.45 9.29 numa02_SMT numa02_SMT 14.80 7.50 mTHP:64K Base Patched numa01 numa01 216.15 114.40 numa01_THREAD_ALLOC numa01_THREAD_ALLOC 115.35 47.41 numa02 numa02 13.24 9.25 numa02_SMT numa02_SMT 14.67 7.34 mTHP:128K Base Patched numa01 numa01 205.13 144.45 numa01_THREAD_ALLOC numa01_THREAD_ALLOC 112.93 41.88 numa02 numa02 13.16 9.18 numa02_SMT numa02_SMT 14.81 7.49 [1] https://lore.kernel.org/all/[email protected]/ [[email protected]: v3] Link: https://lkml.kernel.org/r/c33a5c0b0a0323b1f8ed53772f50501f4b196e25.1712132950.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/d28d276d599c26df7f38c9de8446f60e22dd1950.1711683069.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang <[email protected]> Reviewed-by: "Huang, Ying" <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: John Hubbard <[email protected]> Cc: Kefeng Wang <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Ryan Roberts <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 6b0ed7b commit d2136d7

File tree

2 files changed

+52
-13
lines changed

2 files changed

+52
-13
lines changed

mm/memory.c

Lines changed: 50 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -5064,29 +5064,64 @@ int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
50645064
}
50655065

50665066
static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
5067+
unsigned long fault_addr, pte_t *fault_pte,
50675068
bool writable)
50685069
{
50695070
pte_t pte, old_pte;
50705071

5071-
old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
5072+
old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte);
50725073
pte = pte_modify(old_pte, vma->vm_page_prot);
50735074
pte = pte_mkyoung(pte);
50745075
if (writable)
50755076
pte = pte_mkwrite(pte, vma);
5076-
ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
5077-
update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
5077+
ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte);
5078+
update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1);
5079+
}
5080+
5081+
static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
5082+
struct folio *folio, pte_t fault_pte,
5083+
bool ignore_writable, bool pte_write_upgrade)
5084+
{
5085+
int nr = pte_pfn(fault_pte) - folio_pfn(folio);
5086+
unsigned long start = max(vmf->address - nr * PAGE_SIZE, vma->vm_start);
5087+
unsigned long end = min(vmf->address + (folio_nr_pages(folio) - nr) * PAGE_SIZE, vma->vm_end);
5088+
pte_t *start_ptep = vmf->pte - (vmf->address - start) / PAGE_SIZE;
5089+
unsigned long addr;
5090+
5091+
/* Restore all PTEs' mapping of the large folio */
5092+
for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) {
5093+
pte_t ptent = ptep_get(start_ptep);
5094+
bool writable = false;
5095+
5096+
if (!pte_present(ptent) || !pte_protnone(ptent))
5097+
continue;
5098+
5099+
if (pfn_folio(pte_pfn(ptent)) != folio)
5100+
continue;
5101+
5102+
if (!ignore_writable) {
5103+
ptent = pte_modify(ptent, vma->vm_page_prot);
5104+
writable = pte_write(ptent);
5105+
if (!writable && pte_write_upgrade &&
5106+
can_change_pte_writable(vma, addr, ptent))
5107+
writable = true;
5108+
}
5109+
5110+
numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable);
5111+
}
50785112
}
50795113

50805114
static vm_fault_t do_numa_page(struct vm_fault *vmf)
50815115
{
50825116
struct vm_area_struct *vma = vmf->vma;
50835117
struct folio *folio = NULL;
50845118
int nid = NUMA_NO_NODE;
5085-
bool writable = false;
5119+
bool writable = false, ignore_writable = false;
5120+
bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma);
50865121
int last_cpupid;
50875122
int target_nid;
50885123
pte_t pte, old_pte;
5089-
int flags = 0;
5124+
int flags = 0, nr_pages;
50905125

50915126
/*
50925127
* The pte cannot be used safely until we verify, while holding the page
@@ -5108,18 +5143,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
51085143
* is only valid while holding the PT lock.
51095144
*/
51105145
writable = pte_write(pte);
5111-
if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
5146+
if (!writable && pte_write_upgrade &&
51125147
can_change_pte_writable(vma, vmf->address, pte))
51135148
writable = true;
51145149

51155150
folio = vm_normal_folio(vma, vmf->address, pte);
51165151
if (!folio || folio_is_zone_device(folio))
51175152
goto out_map;
51185153

5119-
/* TODO: handle PTE-mapped THP */
5120-
if (folio_test_large(folio))
5121-
goto out_map;
5122-
51235154
/*
51245155
* Avoid grouping on RO pages in general. RO pages shouldn't hurt as
51255156
* much anyway since they can be in shared cache state. This misses
@@ -5139,6 +5170,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
51395170
flags |= TNF_SHARED;
51405171

51415172
nid = folio_nid(folio);
5173+
nr_pages = folio_nr_pages(folio);
51425174
/*
51435175
* For memory tiering mode, cpupid of slow memory page is used
51445176
* to record page access time. So use default value.
@@ -5155,6 +5187,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
51555187
}
51565188
pte_unmap_unlock(vmf->pte, vmf->ptl);
51575189
writable = false;
5190+
ignore_writable = true;
51585191

51595192
/* Migrate to the requested node */
51605193
if (migrate_misplaced_folio(folio, vma, target_nid)) {
@@ -5175,14 +5208,19 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
51755208

51765209
out:
51775210
if (nid != NUMA_NO_NODE)
5178-
task_numa_fault(last_cpupid, nid, 1, flags);
5211+
task_numa_fault(last_cpupid, nid, nr_pages, flags);
51795212
return 0;
51805213
out_map:
51815214
/*
51825215
* Make it present again, depending on how arch implements
51835216
* non-accessible ptes, some can allow access by kernel mode.
51845217
*/
5185-
numa_rebuild_single_mapping(vmf, vma, writable);
5218+
if (folio && folio_test_large(folio))
5219+
numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable,
5220+
pte_write_upgrade);
5221+
else
5222+
numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
5223+
writable);
51865224
pte_unmap_unlock(vmf->pte, vmf->ptl);
51875225
goto out;
51885226
}

mm/mprotect.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,8 @@ static long change_pte_range(struct mmu_gather *tlb,
129129

130130
/* Also skip shared copy-on-write pages */
131131
if (is_cow_mapping(vma->vm_flags) &&
132-
folio_ref_count(folio) != 1)
132+
(folio_maybe_dma_pinned(folio) ||
133+
folio_likely_mapped_shared(folio)))
133134
continue;
134135

135136
/*

0 commit comments

Comments
 (0)