Skip to content

Commit 86cf69f

Browse files
joergroedeltorvalds
authored andcommitted
x86/mm/32: implement arch_sync_kernel_mappings()
Implement the function to sync changes in vmalloc and ioremap ranges to all page-tables. Signed-off-by: Joerg Roedel <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Acked-by: Andy Lutomirski <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Cc: Arnd Bergmann <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Dave Hansen <[email protected]> Cc: "H . Peter Anvin" <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: Michal Hocko <[email protected]> Cc: "Rafael J. Wysocki" <[email protected]> Cc: Steven Rostedt (VMware) <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Vlastimil Babka <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 8e19843 commit 86cf69f

File tree

3 files changed

+20
-9
lines changed

3 files changed

+20
-9
lines changed

arch/x86/include/asm/pgtable-2level_types.h

+2
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,8 @@ typedef union {
2020

2121
#define SHARED_KERNEL_PMD 0
2222

23+
#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
24+
2325
/*
2426
* traditional i386 two-level paging structure:
2527
*/

arch/x86/include/asm/pgtable-3level_types.h

+2
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@ typedef union {
2727
#define SHARED_KERNEL_PMD (!static_cpu_has(X86_FEATURE_PTI))
2828
#endif
2929

30+
#define ARCH_PAGE_TABLE_SYNC_MASK (SHARED_KERNEL_PMD ? 0 : PGTBL_PMD_MODIFIED)
31+
3032
/*
3133
* PGDIR_SHIFT determines what a top-level page table entry can map
3234
*/

arch/x86/mm/fault.c

+16-9
Original file line numberDiff line numberDiff line change
@@ -190,16 +190,13 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
190190
return pmd_k;
191191
}
192192

193-
static void vmalloc_sync(void)
193+
void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
194194
{
195-
unsigned long address;
196-
197-
if (SHARED_KERNEL_PMD)
198-
return;
195+
unsigned long addr;
199196

200-
for (address = VMALLOC_START & PMD_MASK;
201-
address >= TASK_SIZE_MAX && address < VMALLOC_END;
202-
address += PMD_SIZE) {
197+
for (addr = start & PMD_MASK;
198+
addr >= TASK_SIZE_MAX && addr < VMALLOC_END;
199+
addr += PMD_SIZE) {
203200
struct page *page;
204201

205202
spin_lock(&pgd_lock);
@@ -210,13 +207,23 @@ static void vmalloc_sync(void)
210207
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
211208

212209
spin_lock(pgt_lock);
213-
vmalloc_sync_one(page_address(page), address);
210+
vmalloc_sync_one(page_address(page), addr);
214211
spin_unlock(pgt_lock);
215212
}
216213
spin_unlock(&pgd_lock);
217214
}
218215
}
219216

217+
static void vmalloc_sync(void)
218+
{
219+
unsigned long address;
220+
221+
if (SHARED_KERNEL_PMD)
222+
return;
223+
224+
arch_sync_kernel_mappings(VMALLOC_START, VMALLOC_END);
225+
}
226+
220227
void vmalloc_sync_mappings(void)
221228
{
222229
vmalloc_sync();

0 commit comments

Comments
 (0)