@@ -357,6 +357,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
357
357
unsigned long dst_start ,
358
358
unsigned long src_start ,
359
359
unsigned long len ,
360
+ atomic_t * mmap_changing ,
360
361
uffd_flags_t flags )
361
362
{
362
363
struct mm_struct * dst_mm = dst_vma -> vm_mm ;
@@ -472,6 +473,15 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
472
473
goto out ;
473
474
}
474
475
mmap_read_lock (dst_mm );
476
+ /*
477
+ * If memory mappings are changing because of non-cooperative
478
+ * operation (e.g. mremap) running in parallel, bail out and
479
+ * request the user to retry later
480
+ */
481
+ if (mmap_changing && atomic_read (mmap_changing )) {
482
+ err = - EAGAIN ;
483
+ break ;
484
+ }
475
485
476
486
dst_vma = NULL ;
477
487
goto retry ;
@@ -506,6 +516,7 @@ extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma,
506
516
unsigned long dst_start ,
507
517
unsigned long src_start ,
508
518
unsigned long len ,
519
+ atomic_t * mmap_changing ,
509
520
uffd_flags_t flags );
510
521
#endif /* CONFIG_HUGETLB_PAGE */
511
522
@@ -622,8 +633,8 @@ static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm,
622
633
* If this is a HUGETLB vma, pass off to appropriate routine
623
634
*/
624
635
if (is_vm_hugetlb_page (dst_vma ))
625
- return mfill_atomic_hugetlb (dst_vma , dst_start ,
626
- src_start , len , flags );
636
+ return mfill_atomic_hugetlb (dst_vma , dst_start , src_start ,
637
+ len , mmap_changing , flags );
627
638
628
639
if (!vma_is_anonymous (dst_vma ) && !vma_is_shmem (dst_vma ))
629
640
goto out_unlock ;
0 commit comments