@@ -534,23 +534,22 @@ _dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c)
534
534
volatile bitmap_t * page_bitmaps ;
535
535
get_maps_and_indices_for_continuation ((dispatch_continuation_t )page , NULL ,
536
536
NULL , (bitmap_t * * )& page_bitmaps , NULL );
537
- unsigned int i ;
538
- for (i = 0 ; i < BITMAPS_PER_PAGE ; i ++ ) {
539
- if (page_bitmaps [i ] != 0 ) {
537
+ unsigned int last_locked ;
538
+ for (last_locked = 0 ; last_locked < BITMAPS_PER_PAGE ; last_locked ++ ) {
539
+ if (page_bitmaps [last_locked ] != 0 ) {
540
540
return ;
541
541
}
542
542
}
543
543
// They are all unallocated, so we could madvise the page. Try to
544
544
// take ownership of them all.
545
- int last_locked = 0 ;
546
- do {
545
+ for (last_locked = 0 ; last_locked < BITMAPS_PER_PAGE ; last_locked ++ ) {
547
546
if (!os_atomic_cmpxchg (& page_bitmaps [last_locked ], BITMAP_C (0 ),
548
547
BITMAP_ALL_ONES , relaxed )) {
549
548
// We didn't get one; since there is a cont allocated in
550
549
// the page, we can't madvise. Give up and unlock all.
551
550
goto unlock ;
552
551
}
553
- } while ( ++ last_locked < ( signed ) BITMAPS_PER_PAGE );
552
+ }
554
553
#if DISPATCH_DEBUG
555
554
//fprintf(stderr, "%s: madvised page %p for cont %p (next = %p), "
556
555
// "[%u+1]=%u bitmaps at %p\n", __func__, page, c, c->do_next,
@@ -563,10 +562,10 @@ _dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c)
563
562
MADV_FREE ));
564
563
565
564
unlock :
566
- while (last_locked > 1 ) {
567
- page_bitmaps [-- last_locked ] = BITMAP_C (0 );
568
- }
569
565
if (last_locked ) {
566
+ while (last_locked > 1 ) {
567
+ page_bitmaps [-- last_locked ] = BITMAP_C (0 );
568
+ }
570
569
os_atomic_store (& page_bitmaps [0 ], BITMAP_C (0 ), relaxed );
571
570
}
572
571
return ;
@@ -654,27 +653,37 @@ _dispatch_allocator_enumerate(task_t remote_task,
654
653
vm_address_t zone_address , memory_reader_t reader ,
655
654
void (^recorder )(vm_address_t , void * , size_t , bool * stop ))
656
655
{
657
- const size_t heap_size = remote_dal -> dal_magazine_size ;
658
- const size_t dc_size = remote_dal -> dal_allocation_size ;
659
- const size_t dc_flags_offset = remote_dal -> dal_allocation_isa_offset ;
660
- bool stop = false;
661
- void * heap ;
662
-
663
- while (zone_address ) {
664
- // FIXME: improve this by not faulting everything and driving it through
665
- // the bitmap.
666
- kern_return_t kr = reader (remote_task , zone_address , heap_size , & heap );
667
- size_t offs = remote_dal -> dal_first_allocation_offset ;
668
- if (kr ) return kr ;
669
- while (offs < heap_size ) {
670
- void * isa = * (void * * )(heap + offs + dc_flags_offset );
671
- if (isa && isa != remote_dal -> dal_deferred_free_isa ) {
672
- recorder (zone_address + offs , heap + offs , dc_size , & stop );
673
- if (stop ) return KERN_SUCCESS ;
656
+ if (zone_address )
657
+ {
658
+ const size_t heap_size = remote_dal -> dal_magazine_size ;
659
+ const size_t dc_size = remote_dal -> dal_allocation_size ;
660
+ const size_t dc_flags_offset = remote_dal -> dal_allocation_isa_offset ;
661
+ bool stop = false;
662
+ void * heap = NULL ;
663
+
664
+ do
665
+ {
666
+ // FIXME: improve this by not faulting everything and driving it through
667
+ // the bitmap.
668
+ kern_return_t kr ;
669
+ size_t offs ;
670
+
671
+ kr = reader (remote_task , zone_address , heap_size , & heap );
672
+ if (kr )
673
+ return kr ;
674
+
675
+ for (offs = remote_dal -> dal_first_allocation_offset ; offs < heap_size ; offs += dc_size )
676
+ {
677
+ void * isa = * (void * * )(heap + offs + dc_flags_offset );
678
+ if (isa && isa != remote_dal -> dal_deferred_free_isa )
679
+ {
680
+ recorder (zone_address + offs , heap + offs , dc_size , & stop );
681
+ if (stop )
682
+ return KERN_SUCCESS ;
683
+ }
674
684
}
675
- offs += dc_size ;
676
- }
677
- zone_address = (vm_address_t )((dispatch_heap_t )heap )-> header .dh_next ;
685
+ zone_address = (vm_address_t )((dispatch_heap_t )heap )-> header .dh_next ;
686
+ } while (zone_address );
678
687
}
679
688
680
689
return KERN_SUCCESS ;
0 commit comments