Skip to content

Commit b5dce70

Browse files
YuryNorovnotcarbide
authored andcommitted
lib: add fast path for find_next_*_bit()
Similarly to bitmap functions, find_next_*_bit() users will benefit if we'll handle a case of bitmaps that fit into a single word inline. In the very best case, the compiler may replace a function call with a few instructions. This is the quite typical find_next_bit() user: unsigned int cpumask_next(int n, const struct cpumask *srcp) { /* -1 is a legal arg here. */ if (n != -1) cpumask_check(n); return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1); } EXPORT_SYMBOL(cpumask_next); Currently, on ARM64 the generated code looks like this: 0000000000000000 <cpumask_next>: 0: a9bf7bfd stp x29, x30, [sp, #-16]! 4: 11000402 add w2, w0, #0x1 8: aa0103e0 mov x0, x1 c: d2800401 mov x1, #0x40 // raspberrypi#64 10: 910003fd mov x29, sp 14: 93407c42 sxtw x2, w2 18: 94000000 bl 0 <find_next_bit> 1c: a8c17bfd ldp x29, x30, [sp], raspberrypi#16 20: d65f03c0 ret 24: d503201f nop After applying this patch: 0000000000000140 <cpumask_next>: 140: 11000400 add w0, w0, #0x1 144: 93407c00 sxtw x0, w0 148: f100fc1f cmp x0, #0x3f 14c: 54000168 b.hi 178 <cpumask_next+0x38> // b.pmore 150: f9400023 ldr x3, [x1] 154: 92800001 mov x1, #0xffffffffffffffff // #-1 158: 9ac02020 lsl x0, x1, x0 15c: 52800802 mov w2, #0x40 // raspberrypi#64 160: 8a030001 and x1, x0, x3 164: dac00020 rbit x0, x1 168: f100003f cmp x1, #0x0 16c: dac01000 clz x0, x0 170: 1a800040 csel w0, w2, w0, eq // eq = none 174: d65f03c0 ret 178: 52800800 mov w0, #0x40 // raspberrypi#64 17c: d65f03c0 ret find_next_bit() call is replaced with 6 instructions. find_next_bit() itself is 41 instructions plus function call overhead. Despite inlining, the scripts/bloat-o-meter report smaller .text size after applying the series: add/remove: 11/9 grow/shrink: 233/176 up/down: 5780/-6768 (-988) Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Yury Norov <[email protected]> Acked-by: Rasmus Villemoes <[email protected]> Acked-by: Andy Shevchenko <[email protected]> Cc: Alexey Klimov <[email protected]> Cc: Arnd Bergmann <[email protected]> Cc: David Sterba <[email protected]> Cc: Dennis Zhou <[email protected]> Cc: Geert Uytterhoeven <[email protected]> Cc: Jianpeng Ma <[email protected]> Cc: Joe Perches <[email protected]> Cc: John Paul Adrian Glaubitz <[email protected]> Cc: Josh Poimboeuf <[email protected]> Cc: Rich Felker <[email protected]> Cc: Stefano Brivio <[email protected]> Cc: Wei Yang <[email protected]> Cc: Wolfram Sang <[email protected]> Cc: Yoshinori Sato <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Stephen Rothwell <[email protected]>
1 parent ea81c98 commit b5dce70

File tree

2 files changed

+51
-0
lines changed

2 files changed

+51
-0
lines changed

include/asm-generic/bitops/find.h

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,16 @@ static inline
2020
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
2121
unsigned long offset)
2222
{
23+
if (small_const_nbits(size)) {
24+
unsigned long val;
25+
26+
if (unlikely(offset >= size))
27+
return size;
28+
29+
val = *addr & GENMASK(size - 1, offset);
30+
return val ? __ffs(val) : size;
31+
}
32+
2333
return _find_next_bit(addr, NULL, size, offset, 0UL, 0);
2434
}
2535
#endif
@@ -40,6 +50,16 @@ unsigned long find_next_and_bit(const unsigned long *addr1,
4050
const unsigned long *addr2, unsigned long size,
4151
unsigned long offset)
4252
{
53+
if (small_const_nbits(size)) {
54+
unsigned long val;
55+
56+
if (unlikely(offset >= size))
57+
return size;
58+
59+
val = *addr1 & *addr2 & GENMASK(size - 1, offset);
60+
return val ? __ffs(val) : size;
61+
}
62+
4363
return _find_next_bit(addr1, addr2, size, offset, 0UL, 0);
4464
}
4565
#endif
@@ -58,6 +78,16 @@ static inline
5878
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
5979
unsigned long offset)
6080
{
81+
if (small_const_nbits(size)) {
82+
unsigned long val;
83+
84+
if (unlikely(offset >= size))
85+
return size;
86+
87+
val = *addr | ~GENMASK(size - 1, offset);
88+
return val == ~0UL ? size : ffz(val);
89+
}
90+
6191
return _find_next_bit(addr, NULL, size, offset, ~0UL, 0);
6292
}
6393
#endif

include/asm-generic/bitops/le.h

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#include <asm-generic/bitops/find.h>
66
#include <asm/types.h>
77
#include <asm/byteorder.h>
8+
#include <linux/swab.h>
89

910
#if defined(__LITTLE_ENDIAN)
1011

@@ -37,6 +38,16 @@ static inline
3738
unsigned long find_next_zero_bit_le(const void *addr, unsigned
3839
long size, unsigned long offset)
3940
{
41+
if (small_const_nbits(size)) {
42+
unsigned long val = *(const unsigned long *)addr;
43+
44+
if (unlikely(offset >= size))
45+
return size;
46+
47+
val = swab(val) | ~GENMASK(size - 1, offset);
48+
return val == ~0UL ? size : ffz(val);
49+
}
50+
4051
return _find_next_bit(addr, NULL, size, offset, ~0UL, 1);
4152
}
4253
#endif
@@ -46,6 +57,16 @@ static inline
4657
unsigned long find_next_bit_le(const void *addr, unsigned
4758
long size, unsigned long offset)
4859
{
60+
if (small_const_nbits(size)) {
61+
unsigned long val = *(const unsigned long *)addr;
62+
63+
if (unlikely(offset >= size))
64+
return size;
65+
66+
val = swab(val) & GENMASK(size - 1, offset);
67+
return val ? __ffs(val) : size;
68+
}
69+
4970
return _find_next_bit(addr, NULL, size, offset, 0UL, 1);
5071
}
5172
#endif

0 commit comments

Comments
 (0)