Skip to content

Commit 3bd786f

Browse files
fyin1akpm00
authored andcommitted
mm: convert do_set_pte() to set_pte_range()
set_pte_range() allows to setup page table entries for a specific range. It takes advantage of batched rmap update for large folio. It now takes care of calling update_mmu_cache_range(). Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Yin Fengwei <[email protected]> Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 86f35f6 commit 3bd786f

File tree

4 files changed

+28
-17
lines changed

4 files changed

+28
-17
lines changed

Documentation/filesystems/locking.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -661,7 +661,7 @@ locked. The VM will unlock the page.
661661
Filesystem should find and map pages associated with offsets from "start_pgoff"
662662
till "end_pgoff". ->map_pages() is called with the RCU lock held and must
663663
not block. If it's not possible to reach a page without blocking,
664-
filesystem should skip it. Filesystem should use do_set_pte() to setup
664+
filesystem should skip it. Filesystem should use set_pte_range() to setup
665665
page table entry. Pointer to entry associated with the page is passed in
666666
"pte" field in vm_fault structure. Pointers to entries for other offsets
667667
should be calculated relative to "pte".

include/linux/mm.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1322,7 +1322,8 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
13221322
}
13231323

13241324
vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
1325-
void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
1325+
void set_pte_range(struct vm_fault *vmf, struct folio *folio,
1326+
struct page *page, unsigned int nr, unsigned long addr);
13261327

13271328
vm_fault_t finish_fault(struct vm_fault *vmf);
13281329
vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);

mm/filemap.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3501,8 +3501,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
35013501
ret = VM_FAULT_NOPAGE;
35023502

35033503
ref_count++;
3504-
do_set_pte(vmf, page, addr);
3505-
update_mmu_cache(vma, addr, vmf->pte);
3504+
set_pte_range(vmf, folio, page, 1, addr);
35063505
} while (vmf->pte++, page++, addr += PAGE_SIZE, ++count < nr_pages);
35073506

35083507
/* Restore the vmf->pte */

mm/memory.c

Lines changed: 24 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -4330,15 +4330,24 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
43304330
}
43314331
#endif
43324332

4333-
void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
4333+
/**
4334+
* set_pte_range - Set a range of PTEs to point to pages in a folio.
4335+
* @vmf: Fault decription.
4336+
* @folio: The folio that contains @page.
4337+
* @page: The first page to create a PTE for.
4338+
* @nr: The number of PTEs to create.
4339+
* @addr: The first address to create a PTE for.
4340+
*/
4341+
void set_pte_range(struct vm_fault *vmf, struct folio *folio,
4342+
struct page *page, unsigned int nr, unsigned long addr)
43344343
{
43354344
struct vm_area_struct *vma = vmf->vma;
43364345
bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
43374346
bool write = vmf->flags & FAULT_FLAG_WRITE;
4338-
bool prefault = vmf->address != addr;
4347+
bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE);
43394348
pte_t entry;
43404349

4341-
flush_icache_page(vma, page);
4350+
flush_icache_pages(vma, page, nr);
43424351
entry = mk_pte(page, vma->vm_page_prot);
43434352

43444353
if (prefault && arch_wants_old_prefaulted_pte())
@@ -4352,14 +4361,18 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
43524361
entry = pte_mkuffd_wp(entry);
43534362
/* copy-on-write page */
43544363
if (write && !(vma->vm_flags & VM_SHARED)) {
4355-
inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
4356-
page_add_new_anon_rmap(page, vma, addr);
4357-
lru_cache_add_inactive_or_unevictable(page, vma);
4364+
add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr);
4365+
VM_BUG_ON_FOLIO(nr != 1, folio);
4366+
folio_add_new_anon_rmap(folio, vma, addr);
4367+
folio_add_lru_vma(folio, vma);
43584368
} else {
4359-
inc_mm_counter(vma->vm_mm, mm_counter_file(page));
4360-
page_add_file_rmap(page, vma, false);
4369+
add_mm_counter(vma->vm_mm, mm_counter_file(page), nr);
4370+
folio_add_file_rmap_range(folio, page, nr, vma, false);
43614371
}
4362-
set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
4372+
set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
4373+
4374+
/* no need to invalidate: a not-present page won't be cached */
4375+
update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr);
43634376
}
43644377

43654378
static bool vmf_pte_changed(struct vm_fault *vmf)
@@ -4427,11 +4440,9 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
44274440

44284441
/* Re-check under ptl */
44294442
if (likely(!vmf_pte_changed(vmf))) {
4430-
do_set_pte(vmf, page, vmf->address);
4431-
4432-
/* no need to invalidate: a not-present page won't be cached */
4433-
update_mmu_cache(vma, vmf->address, vmf->pte);
4443+
struct folio *folio = page_folio(page);
44344444

4445+
set_pte_range(vmf, folio, page, 1, vmf->address);
44354446
ret = 0;
44364447
} else {
44374448
update_mmu_tlb(vma, vmf->address, vmf->pte);

0 commit comments

Comments
 (0)