@@ -222,6 +222,7 @@ void gmap_free(struct gmap *gmap)
222
222
223
223
/* Free all segment & region tables. */
224
224
down_read (& gmap -> mm -> mmap_sem );
225
+ spin_lock (& gmap -> mm -> page_table_lock );
225
226
list_for_each_entry_safe (page , next , & gmap -> crst_list , lru ) {
226
227
table = (unsigned long * ) page_to_phys (page );
227
228
if ((* table & _REGION_ENTRY_TYPE_MASK ) == 0 )
@@ -230,6 +231,7 @@ void gmap_free(struct gmap *gmap)
230
231
gmap_unlink_segment (gmap , table );
231
232
__free_pages (page , ALLOC_ORDER );
232
233
}
234
+ spin_unlock (& gmap -> mm -> page_table_lock );
233
235
up_read (& gmap -> mm -> mmap_sem );
234
236
list_del (& gmap -> list );
235
237
kfree (gmap );
@@ -300,6 +302,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
300
302
301
303
flush = 0 ;
302
304
down_read (& gmap -> mm -> mmap_sem );
305
+ spin_lock (& gmap -> mm -> page_table_lock );
303
306
for (off = 0 ; off < len ; off += PMD_SIZE ) {
304
307
/* Walk the guest addr space page table */
305
308
table = gmap -> table + (((to + off ) >> 53 ) & 0x7ff );
@@ -321,6 +324,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
321
324
* table = _SEGMENT_ENTRY_INV ;
322
325
}
323
326
out :
327
+ spin_unlock (& gmap -> mm -> page_table_lock );
324
328
up_read (& gmap -> mm -> mmap_sem );
325
329
if (flush )
326
330
gmap_flush_tlb (gmap );
@@ -351,6 +355,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
351
355
352
356
flush = 0 ;
353
357
down_read (& gmap -> mm -> mmap_sem );
358
+ spin_lock (& gmap -> mm -> page_table_lock );
354
359
for (off = 0 ; off < len ; off += PMD_SIZE ) {
355
360
/* Walk the gmap address space page table */
356
361
table = gmap -> table + (((to + off ) >> 53 ) & 0x7ff );
@@ -374,12 +379,14 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
374
379
flush |= gmap_unlink_segment (gmap , table );
375
380
* table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off );
376
381
}
382
+ spin_unlock (& gmap -> mm -> page_table_lock );
377
383
up_read (& gmap -> mm -> mmap_sem );
378
384
if (flush )
379
385
gmap_flush_tlb (gmap );
380
386
return 0 ;
381
387
382
388
out_unmap :
389
+ spin_unlock (& gmap -> mm -> page_table_lock );
383
390
up_read (& gmap -> mm -> mmap_sem );
384
391
gmap_unmap_segment (gmap , to , len );
385
392
return - ENOMEM ;
@@ -446,7 +453,9 @@ unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
446
453
page = pmd_page (* pmd );
447
454
mp = (struct gmap_pgtable * ) page -> index ;
448
455
rmap -> entry = table ;
456
+ spin_lock (& mm -> page_table_lock );
449
457
list_add (& rmap -> list , & mp -> mapper );
458
+ spin_unlock (& mm -> page_table_lock );
450
459
/* Set gmap segment table entry to page table. */
451
460
* table = pmd_val (* pmd ) & PAGE_MASK ;
452
461
return vmaddr | (address & ~PMD_MASK );
0 commit comments