1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

mm: page_vma_mapped_walk: map_pte() use pte_offset_map_rw_nolock()

In the caller of map_pte(), we may modify the pvmw->pte after acquiring
the pvmw->ptl, so convert it to using pte_offset_map_rw_nolock().  At this
time, the pte_same() check is not performed after the pvmw->ptl held, so
we should get pmdval and do pmd_same() check to ensure the stability of
pvmw->pmd.

Link: https://lkml.kernel.org/r/2620a48f34c9f19864ab0169cdbf253d31a8fcaa.1727332572.git.zhengqi.arch@bytedance.com
Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
Reviewed-by: Muchun Song <muchun.song@linux.dev>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Qi Zheng 2024-09-26 14:46:23 +08:00 committed by Andrew Morton
parent 838d023544
commit 04965da7a4

View File

@ -13,7 +13,8 @@ static inline bool not_found(struct page_vma_mapped_walk *pvmw)
return false;
}
static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
static bool map_pte(struct page_vma_mapped_walk *pvmw, pmd_t *pmdvalp,
spinlock_t **ptlp)
{
pte_t ptent;
@ -25,6 +26,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
return !!pvmw->pte;
}
again:
/*
* It is important to return the ptl corresponding to pte,
* in case *pvmw->pmd changes underneath us; so we need to
@ -32,8 +34,8 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
* proceeds to loop over next ptes, and finds a match later.
* Though, in most cases, page lock already protects this.
*/
pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd,
pvmw->address, ptlp);
pvmw->pte = pte_offset_map_rw_nolock(pvmw->vma->vm_mm, pvmw->pmd,
pvmw->address, pmdvalp, ptlp);
if (!pvmw->pte)
return false;
@ -67,8 +69,13 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
} else if (!pte_present(ptent)) {
return false;
}
spin_lock(*ptlp);
if (unlikely(!pmd_same(*pmdvalp, pmdp_get_lockless(pvmw->pmd)))) {
pte_unmap_unlock(pvmw->pte, *ptlp);
goto again;
}
pvmw->ptl = *ptlp;
spin_lock(pvmw->ptl);
return true;
}
@ -278,7 +285,7 @@ restart:
step_forward(pvmw, PMD_SIZE);
continue;
}
if (!map_pte(pvmw, &ptl)) {
if (!map_pte(pvmw, &pmde, &ptl)) {
if (!pvmw->pte)
goto restart;
goto next_pte;
@ -305,8 +312,13 @@ next_pte:
} while (pte_none(ptep_get(pvmw->pte)));
if (!pvmw->ptl) {
spin_lock(ptl);
if (unlikely(!pmd_same(pmde, pmdp_get_lockless(pvmw->pmd)))) {
pte_unmap_unlock(pvmw->pte, ptl);
pvmw->pte = NULL;
goto restart;
}
pvmw->ptl = ptl;
spin_lock(pvmw->ptl);
}
goto this_pte;
} while (pvmw->address < end);