@@ -345,6 +345,8 @@ static int pthread_allocate_stack(const pthread_attr_t *attr,
345
345
346
346
/* Allocate space for stack and thread descriptor at default address */
347
347
#ifdef NEED_SEPARATE_REGISTER_STACK
348
+ void * res_addr ;
349
+
348
350
if (attr != NULL )
349
351
{
350
352
guardsize = page_roundup (attr -> __guardsize , granularity );
@@ -371,18 +373,26 @@ static int pthread_allocate_stack(const pthread_attr_t *attr,
371
373
/* XXX Fix for floating stacks with variable sizes. */
372
374
373
375
/* First the main stack: */
374
- if (mmap ((caddr_t )((char * )(new_thread + 1 ) - stacksize / 2 ),
375
- stacksize / 2 , PROT_READ | PROT_WRITE | PROT_EXEC ,
376
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED , -1 , 0 )
377
- == MAP_FAILED )
378
- /* Bad luck, this segment is already mapped. */
379
- return -1 ;
376
+ map_addr = (caddr_t )((char * )(new_thread + 1 ) - stacksize / 2 );
377
+ res_addr = mmap (map_addr , stacksize / 2 ,
378
+ PROT_READ | PROT_WRITE | PROT_EXEC ,
379
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED , -1 , 0 );
380
+ if (res_addr != map_addr )
381
+ {
382
+ /* Bad luck, this segment is already mapped. */
383
+ if (res_addr != MAP_FAILED )
384
+ munmap (res_addr , stacksize / 2 );
385
+ return -1 ;
386
+ }
380
387
/* Then the register stack: */
381
- if (mmap ((caddr_t )new_thread_bottom , stacksize /2 ,
382
- PROT_READ | PROT_WRITE | PROT_EXEC ,
383
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED , -1 , 0 )
384
- == MAP_FAILED )
388
+ map_addr = (caddr_t )new_thread_bottom ;
389
+ res_addr = mmap (map_addr , stacksize /2 ,
390
+ PROT_READ | PROT_WRITE | PROT_EXEC ,
391
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXES , -1 , 0 );
392
+ if (res_addr != map_addr )
385
393
{
394
+ if (res_addr != MAP_FAILED )
395
+ munmap (res_addr , stacksize / 2 );
386
396
munmap ((caddr_t )((char * )(new_thread + 1 ) - stacksize /2 ),
387
397
stacksize /2 );
388
398
return -1 ;
@@ -419,6 +429,8 @@ static int pthread_allocate_stack(const pthread_attr_t *attr,
419
429
new_thread_bottom = (char * ) map_addr + guardsize ;
420
430
new_thread = ((pthread_descr ) (new_thread_bottom + stacksize )) - 1 ;
421
431
# else /* !FLOATING_STACKS */
432
+ void * res_addr ;
433
+
422
434
if (attr != NULL )
423
435
{
424
436
guardsize = page_roundup (attr -> __guardsize , granularity );
@@ -434,13 +446,17 @@ static int pthread_allocate_stack(const pthread_attr_t *attr,
434
446
435
447
new_thread = default_new_thread ;
436
448
new_thread_bottom = (char * ) (new_thread + 1 ) - stacksize ;
437
- map_addr = mmap (( caddr_t )(( char * )( new_thread + 1 ) - stacksize - guardsize ),
438
- stacksize + guardsize ,
449
+ map_addr = new_thread_bottom - guardsize ;
450
+ res_addr = mmap ( map_addr , stacksize + guardsize ,
439
451
PROT_READ | PROT_WRITE | PROT_EXEC ,
440
452
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED , -1 , 0 );
441
- if (map_addr == MAP_FAILED )
442
- /* Bad luck, this segment is already mapped. */
443
- return -1 ;
453
+ if (res_addr != map_addr )
454
+ {
455
+ /* Bad luck, this segment is already mapped. */
456
+ if (res_addr != MAP_FAILED )
457
+ munmap (res_addr , stacksize + guardsize );
458
+ return -1 ;
459
+ }
444
460
445
461
/* We manage to get a stack. Protect the guard area pages if
446
462
necessary. */
0 commit comments