Skip to content

Commit 9acad7b

Browse files
VMoolaakpm00
authored andcommitted
hugetlb: use vmf_anon_prepare() instead of anon_vma_prepare()
hugetlb_no_page() and hugetlb_wp() call anon_vma_prepare(). In preparation for hugetlb to safely handle faults under the VMA lock, use vmf_anon_prepare() here instead. Additionally, passing hugetlb_wp() the vm_fault struct from hugetlb_fault() works toward cleaning up the hugetlb code and function stack. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Vishal Moola (Oracle) <[email protected]> Reviewed-by: Matthew Wilcox (Oracle) <[email protected]> Cc: Muchun Song <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 7dac0ec commit 9acad7b

File tree

1 file changed

+9
-9
lines changed

1 file changed

+9
-9
lines changed

mm/hugetlb.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -5851,7 +5851,8 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
58515851
*/
58525852
static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
58535853
unsigned long address, pte_t *ptep, unsigned int flags,
5854-
struct folio *pagecache_folio, spinlock_t *ptl)
5854+
struct folio *pagecache_folio, spinlock_t *ptl,
5855+
struct vm_fault *vmf)
58555856
{
58565857
const bool unshare = flags & FAULT_FLAG_UNSHARE;
58575858
pte_t pte = huge_ptep_get(ptep);
@@ -5985,10 +5986,9 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
59855986
* When the original hugepage is shared one, it does not have
59865987
* anon_vma prepared.
59875988
*/
5988-
if (unlikely(anon_vma_prepare(vma))) {
5989-
ret = VM_FAULT_OOM;
5989+
ret = vmf_anon_prepare(vmf);
5990+
if (unlikely(ret))
59905991
goto out_release_all;
5991-
}
59925992

59935993
if (copy_user_large_folio(new_folio, old_folio, address, vma)) {
59945994
ret = VM_FAULT_HWPOISON_LARGE;
@@ -6228,10 +6228,10 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
62286228
new_pagecache_folio = true;
62296229
} else {
62306230
folio_lock(folio);
6231-
if (unlikely(anon_vma_prepare(vma))) {
6232-
ret = VM_FAULT_OOM;
6231+
6232+
ret = vmf_anon_prepare(vmf);
6233+
if (unlikely(ret))
62336234
goto backout_unlocked;
6234-
}
62356235
anon_rmap = 1;
62366236
}
62376237
} else {
@@ -6298,7 +6298,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
62986298
hugetlb_count_add(pages_per_huge_page(h), mm);
62996299
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
63006300
/* Optimization, do the COW without a second fault */
6301-
ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl);
6301+
ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl, vmf);
63026302
}
63036303

63046304
spin_unlock(ptl);
@@ -6521,7 +6521,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
65216521
if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
65226522
if (!huge_pte_write(entry)) {
65236523
ret = hugetlb_wp(mm, vma, address, ptep, flags,
6524-
pagecache_folio, ptl);
6524+
pagecache_folio, ptl, &vmf);
65256525
goto out_put_page;
65266526
} else if (likely(flags & FAULT_FLAG_WRITE)) {
65276527
entry = huge_pte_mkdirty(entry);

0 commit comments

Comments
 (0)