@@ -4330,15 +4330,24 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
4330
4330
}
4331
4331
#endif
4332
4332
4333
- void do_set_pte (struct vm_fault * vmf , struct page * page , unsigned long addr )
4333
+ /**
4334
+ * set_pte_range - Set a range of PTEs to point to pages in a folio.
4335
+ * @vmf: Fault decription.
4336
+ * @folio: The folio that contains @page.
4337
+ * @page: The first page to create a PTE for.
4338
+ * @nr: The number of PTEs to create.
4339
+ * @addr: The first address to create a PTE for.
4340
+ */
4341
+ void set_pte_range (struct vm_fault * vmf , struct folio * folio ,
4342
+ struct page * page , unsigned int nr , unsigned long addr )
4334
4343
{
4335
4344
struct vm_area_struct * vma = vmf -> vma ;
4336
4345
bool uffd_wp = vmf_orig_pte_uffd_wp (vmf );
4337
4346
bool write = vmf -> flags & FAULT_FLAG_WRITE ;
4338
- bool prefault = vmf -> address != addr ;
4347
+ bool prefault = in_range ( vmf -> address , addr , nr * PAGE_SIZE ) ;
4339
4348
pte_t entry ;
4340
4349
4341
- flush_icache_page (vma , page );
4350
+ flush_icache_pages (vma , page , nr );
4342
4351
entry = mk_pte (page , vma -> vm_page_prot );
4343
4352
4344
4353
if (prefault && arch_wants_old_prefaulted_pte ())
@@ -4352,14 +4361,18 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
4352
4361
entry = pte_mkuffd_wp (entry );
4353
4362
/* copy-on-write page */
4354
4363
if (write && !(vma -> vm_flags & VM_SHARED )) {
4355
- inc_mm_counter (vma -> vm_mm , MM_ANONPAGES );
4356
- page_add_new_anon_rmap (page , vma , addr );
4357
- lru_cache_add_inactive_or_unevictable (page , vma );
4364
+ add_mm_counter (vma -> vm_mm , MM_ANONPAGES , nr );
4365
+ VM_BUG_ON_FOLIO (nr != 1 , folio );
4366
+ folio_add_new_anon_rmap (folio , vma , addr );
4367
+ folio_add_lru_vma (folio , vma );
4358
4368
} else {
4359
- inc_mm_counter (vma -> vm_mm , mm_counter_file (page ));
4360
- page_add_file_rmap ( page , vma , false);
4369
+ add_mm_counter (vma -> vm_mm , mm_counter_file (page ), nr );
4370
+ folio_add_file_rmap_range ( folio , page , nr , vma , false);
4361
4371
}
4362
- set_pte_at (vma -> vm_mm , addr , vmf -> pte , entry );
4372
+ set_ptes (vma -> vm_mm , addr , vmf -> pte , entry , nr );
4373
+
4374
+ /* no need to invalidate: a not-present page won't be cached */
4375
+ update_mmu_cache_range (vmf , vma , addr , vmf -> pte , nr );
4363
4376
}
4364
4377
4365
4378
static bool vmf_pte_changed (struct vm_fault * vmf )
@@ -4427,11 +4440,9 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
4427
4440
4428
4441
/* Re-check under ptl */
4429
4442
if (likely (!vmf_pte_changed (vmf ))) {
4430
- do_set_pte (vmf , page , vmf -> address );
4431
-
4432
- /* no need to invalidate: a not-present page won't be cached */
4433
- update_mmu_cache (vma , vmf -> address , vmf -> pte );
4443
+ struct folio * folio = page_folio (page );
4434
4444
4445
+ set_pte_range (vmf , folio , page , 1 , vmf -> address );
4435
4446
ret = 0 ;
4436
4447
} else {
4437
4448
update_mmu_tlb (vma , vmf -> address , vmf -> pte );
0 commit comments