Skip to content

Commit fce86ff

Browse files
djbwtorvalds
authored andcommitted
mm/huge_memory: fix vmf_insert_pfn_{pmd, pud}() crash, handle unaligned addresses
Starting with c6f3c5e ("mm/huge_memory.c: fix modifying of page protection by insert_pfn_pmd()") vmf_insert_pfn_pmd() internally calls pmdp_set_access_flags(). That helper enforces a pmd aligned @address argument via VM_BUG_ON() assertion. Update the implementation to take a 'struct vm_fault' argument directly and apply the address alignment fixup internally to fix crash signatures like: kernel BUG at arch/x86/mm/pgtable.c:515! invalid opcode: 0000 [#1] SMP NOPTI CPU: 51 PID: 43713 Comm: java Tainted: G OE 4.19.35 #1 [..] RIP: 0010:pmdp_set_access_flags+0x48/0x50 [..] Call Trace: vmf_insert_pfn_pmd+0x198/0x350 dax_iomap_fault+0xe82/0x1190 ext4_dax_huge_fault+0x103/0x1f0 ? __switch_to_asm+0x40/0x70 __handle_mm_fault+0x3f6/0x1370 ? __switch_to_asm+0x34/0x70 ? __switch_to_asm+0x40/0x70 handle_mm_fault+0xda/0x200 __do_page_fault+0x249/0x4f0 do_page_fault+0x32/0x110 ? page_fault+0x8/0x30 page_fault+0x1e/0x30 Link: http://lkml.kernel.org/r/155741946350.372037.11148198430068238140.stgit@dwillia2-desk3.amr.corp.intel.com Fixes: c6f3c5e ("mm/huge_memory.c: fix modifying of page protection by insert_pfn_pmd()") Signed-off-by: Dan Williams <[email protected]> Reported-by: Piotr Balcer <[email protected]> Tested-by: Yan Ma <[email protected]> Tested-by: Pankaj Gupta <[email protected]> Reviewed-by: Matthew Wilcox <[email protected]> Reviewed-by: Jan Kara <[email protected]> Reviewed-by: Aneesh Kumar K.V <[email protected]> Cc: Chandan Rajendra <[email protected]> Cc: Souptick Joarder <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent a13f065 commit fce86ff

File tree

4 files changed

+16
-18
lines changed

4 files changed

+16
-18
lines changed

drivers/dax/device.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -184,8 +184,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
184184

185185
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
186186

187-
return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn,
188-
vmf->flags & FAULT_FLAG_WRITE);
187+
return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
189188
}
190189

191190
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
@@ -235,8 +234,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
235234

236235
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
237236

238-
return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn,
239-
vmf->flags & FAULT_FLAG_WRITE);
237+
return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
240238
}
241239
#else
242240
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,

fs/dax.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1575,8 +1575,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
15751575
}
15761576

15771577
trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1578-
result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
1579-
write);
1578+
result = vmf_insert_pfn_pmd(vmf, pfn, write);
15801579
break;
15811580
case IOMAP_UNWRITTEN:
15821581
case IOMAP_HOLE:
@@ -1686,8 +1685,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
16861685
ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
16871686
#ifdef CONFIG_FS_DAX_PMD
16881687
else if (order == PMD_ORDER)
1689-
ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1690-
pfn, true);
1688+
ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
16911689
#endif
16921690
else
16931691
ret = VM_FAULT_FALLBACK;

include/linux/huge_mm.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,10 +47,8 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
4747
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
4848
unsigned long addr, pgprot_t newprot,
4949
int prot_numa);
50-
vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
51-
pmd_t *pmd, pfn_t pfn, bool write);
52-
vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
53-
pud_t *pud, pfn_t pfn, bool write);
50+
vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
51+
vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
5452
enum transparent_hugepage_flag {
5553
TRANSPARENT_HUGEPAGE_FLAG,
5654
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,

mm/huge_memory.c

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -793,11 +793,13 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
793793
pte_free(mm, pgtable);
794794
}
795795

796-
vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
797-
pmd_t *pmd, pfn_t pfn, bool write)
796+
vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
798797
{
798+
unsigned long addr = vmf->address & PMD_MASK;
799+
struct vm_area_struct *vma = vmf->vma;
799800
pgprot_t pgprot = vma->vm_page_prot;
800801
pgtable_t pgtable = NULL;
802+
801803
/*
802804
* If we had pmd_special, we could avoid all these restrictions,
803805
* but we need to be consistent with PTEs and architectures that
@@ -820,7 +822,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
820822

821823
track_pfn_insert(vma, &pgprot, pfn);
822824

823-
insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable);
825+
insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
824826
return VM_FAULT_NOPAGE;
825827
}
826828
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
@@ -869,10 +871,12 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
869871
spin_unlock(ptl);
870872
}
871873

872-
vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
873-
pud_t *pud, pfn_t pfn, bool write)
874+
vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
874875
{
876+
unsigned long addr = vmf->address & PUD_MASK;
877+
struct vm_area_struct *vma = vmf->vma;
875878
pgprot_t pgprot = vma->vm_page_prot;
879+
876880
/*
877881
* If we had pud_special, we could avoid all these restrictions,
878882
* but we need to be consistent with PTEs and architectures that
@@ -889,7 +893,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
889893

890894
track_pfn_insert(vma, &pgprot, pfn);
891895

892-
insert_pfn_pud(vma, addr, pud, pfn, pgprot, write);
896+
insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
893897
return VM_FAULT_NOPAGE;
894898
}
895899
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);

0 commit comments

Comments
 (0)