Skip to content

Commit cc77245

Browse files
Carsten OtteMartin Schwidefsky
Carsten Otte
authored and
Martin Schwidefsky
committed
[S390] fix list corruption in gmap reverse mapping
This introduces locking via mm->page_table_lock to protect the rmap list for guest mappings from being corrupted by concurrent operations. Signed-off-by: Carsten Otte <[email protected]> Signed-off-by: Martin Schwidefsky <[email protected]>
1 parent a9162f2 commit cc77245

File tree

1 file changed

+9
-0
lines changed

1 file changed

+9
-0
lines changed

arch/s390/mm/pgtable.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -222,6 +222,7 @@ void gmap_free(struct gmap *gmap)
222222

223223
/* Free all segment & region tables. */
224224
down_read(&gmap->mm->mmap_sem);
225+
spin_lock(&gmap->mm->page_table_lock);
225226
list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
226227
table = (unsigned long *) page_to_phys(page);
227228
if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
@@ -230,6 +231,7 @@ void gmap_free(struct gmap *gmap)
230231
gmap_unlink_segment(gmap, table);
231232
__free_pages(page, ALLOC_ORDER);
232233
}
234+
spin_unlock(&gmap->mm->page_table_lock);
233235
up_read(&gmap->mm->mmap_sem);
234236
list_del(&gmap->list);
235237
kfree(gmap);
@@ -300,6 +302,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
300302

301303
flush = 0;
302304
down_read(&gmap->mm->mmap_sem);
305+
spin_lock(&gmap->mm->page_table_lock);
303306
for (off = 0; off < len; off += PMD_SIZE) {
304307
/* Walk the guest addr space page table */
305308
table = gmap->table + (((to + off) >> 53) & 0x7ff);
@@ -321,6 +324,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
321324
*table = _SEGMENT_ENTRY_INV;
322325
}
323326
out:
327+
spin_unlock(&gmap->mm->page_table_lock);
324328
up_read(&gmap->mm->mmap_sem);
325329
if (flush)
326330
gmap_flush_tlb(gmap);
@@ -351,6 +355,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
351355

352356
flush = 0;
353357
down_read(&gmap->mm->mmap_sem);
358+
spin_lock(&gmap->mm->page_table_lock);
354359
for (off = 0; off < len; off += PMD_SIZE) {
355360
/* Walk the gmap address space page table */
356361
table = gmap->table + (((to + off) >> 53) & 0x7ff);
@@ -374,12 +379,14 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
374379
flush |= gmap_unlink_segment(gmap, table);
375380
*table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
376381
}
382+
spin_unlock(&gmap->mm->page_table_lock);
377383
up_read(&gmap->mm->mmap_sem);
378384
if (flush)
379385
gmap_flush_tlb(gmap);
380386
return 0;
381387

382388
out_unmap:
389+
spin_unlock(&gmap->mm->page_table_lock);
383390
up_read(&gmap->mm->mmap_sem);
384391
gmap_unmap_segment(gmap, to, len);
385392
return -ENOMEM;
@@ -446,7 +453,9 @@ unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
446453
page = pmd_page(*pmd);
447454
mp = (struct gmap_pgtable *) page->index;
448455
rmap->entry = table;
456+
spin_lock(&mm->page_table_lock);
449457
list_add(&rmap->list, &mp->mapper);
458+
spin_unlock(&mm->page_table_lock);
450459
/* Set gmap segment table entry to page table. */
451460
*table = pmd_val(*pmd) & PAGE_MASK;
452461
return vmaddr | (address & ~PMD_MASK);

0 commit comments

Comments
 (0)