@@ -418,8 +418,11 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
418
418
unsigned int usersize ,
419
419
void (* ctor )(void * ))
420
420
{
421
+ unsigned long mask = 0 ;
422
+ unsigned int idx ;
421
423
kmem_buckets * b ;
422
- int idx ;
424
+
425
+ BUILD_BUG_ON (ARRAY_SIZE (kmalloc_caches [KMALLOC_NORMAL ]) > BITS_PER_LONG );
423
426
424
427
/*
425
428
* When the separate buckets API is not built in, just return
@@ -441,7 +444,7 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
441
444
for (idx = 0 ; idx < ARRAY_SIZE (kmalloc_caches [KMALLOC_NORMAL ]); idx ++ ) {
442
445
char * short_size , * cache_name ;
443
446
unsigned int cache_useroffset , cache_usersize ;
444
- unsigned int size ;
447
+ unsigned int size , aligned_idx ;
445
448
446
449
if (!kmalloc_caches [KMALLOC_NORMAL ][idx ])
447
450
continue ;
@@ -454,29 +457,35 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
454
457
if (WARN_ON (!short_size ))
455
458
goto fail ;
456
459
457
- cache_name = kasprintf (GFP_KERNEL , "%s-%s" , name , short_size + 1 );
458
- if (WARN_ON (!cache_name ))
459
- goto fail ;
460
-
461
460
if (useroffset >= size ) {
462
461
cache_useroffset = 0 ;
463
462
cache_usersize = 0 ;
464
463
} else {
465
464
cache_useroffset = useroffset ;
466
465
cache_usersize = min (size - cache_useroffset , usersize );
467
466
}
468
- (* b )[idx ] = kmem_cache_create_usercopy (cache_name , size ,
467
+
468
+ aligned_idx = __kmalloc_index (size , false);
469
+ if (!(* b )[aligned_idx ]) {
470
+ cache_name = kasprintf (GFP_KERNEL , "%s-%s" , name , short_size + 1 );
471
+ if (WARN_ON (!cache_name ))
472
+ goto fail ;
473
+ (* b )[aligned_idx ] = kmem_cache_create_usercopy (cache_name , size ,
469
474
0 , flags , cache_useroffset ,
470
475
cache_usersize , ctor );
471
- kfree (cache_name );
472
- if (WARN_ON (!(* b )[idx ]))
473
- goto fail ;
476
+ kfree (cache_name );
477
+ if (WARN_ON (!(* b )[aligned_idx ]))
478
+ goto fail ;
479
+ set_bit (aligned_idx , & mask );
480
+ }
481
+ if (idx != aligned_idx )
482
+ (* b )[idx ] = (* b )[aligned_idx ];
474
483
}
475
484
476
485
return b ;
477
486
478
487
fail :
479
- for (idx = 0 ; idx < ARRAY_SIZE (kmalloc_caches [KMALLOC_NORMAL ]); idx ++ )
488
+ for_each_set_bit (idx , & mask , ARRAY_SIZE (kmalloc_caches [KMALLOC_NORMAL ]))
480
489
kmem_cache_destroy ((* b )[idx ]);
481
490
kfree (b );
482
491
0 commit comments