Skip to content

Commit 43aa6e6

Browse files
authored
[hwasan] Fixing false invalid-free with disabled tagging (#67169)
This problem was accidentally discovered by the internal symbolizer, but it's relevant for external one as well, see the test. If we just disable tagging, there may still be tagged allocations that have already been freed. After disabling tagging, these tagged allocations can be released to the user as-is, which would later break the "invalid-free" check. We cannot just disable the "invalid-free" check with disabled tagging, because if we re-enable tagging, the issue still applies to allocations created when it was disabled. The fix is to continue tagging with zero even if tagging is disabled. This makes the "disabled" mode less efficient, but this is not the primary use case.
1 parent 7ca8c21 commit 43aa6e6

File tree

2 files changed

+57
-20
lines changed

2 files changed

+57
-20
lines changed

compiler-rt/lib/hwasan/hwasan_allocator.cpp

Lines changed: 15 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -234,28 +234,23 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
234234
}
235235

236236
void *user_ptr = allocated;
237-
// Tagging can only be skipped when both tag_in_malloc and tag_in_free are
238-
// false. When tag_in_malloc = false and tag_in_free = true malloc needs to
239-
// retag to 0.
240237
if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
241-
(flags()->tag_in_malloc || flags()->tag_in_free) &&
242-
atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
243-
if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
244-
tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
245-
uptr tag_size = orig_size ? orig_size : 1;
246-
uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
247-
user_ptr =
248-
(void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
249-
if (full_granule_size != tag_size) {
250-
u8 *short_granule =
251-
reinterpret_cast<u8 *>(allocated) + full_granule_size;
252-
TagMemoryAligned((uptr)short_granule, kShadowAlignment,
253-
tag_size % kShadowAlignment);
254-
short_granule[kShadowAlignment - 1] = tag;
255-
}
256-
} else {
257-
user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
238+
atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
239+
flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
240+
tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
241+
uptr tag_size = orig_size ? orig_size : 1;
242+
uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
243+
user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
244+
if (full_granule_size != tag_size) {
245+
u8 *short_granule = reinterpret_cast<u8 *>(allocated) + full_granule_size;
246+
TagMemoryAligned((uptr)short_granule, kShadowAlignment,
247+
tag_size % kShadowAlignment);
248+
short_granule[kShadowAlignment - 1] = tag;
258249
}
250+
} else {
251+
// Tagging can not be completely skipped. If it's disabled, we need to tag
252+
// with zeros.
253+
user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
259254
}
260255

261256
Metadata *meta =
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
// Test that disabling/enabling tagging does not trigger false reports on
2+
// allocations happened in a different state.
3+
4+
// RUN: %clang_hwasan -O1 %s -o %t && %run %t 2>&1
5+
6+
#include <assert.h>
7+
#include <sanitizer/hwasan_interface.h>
8+
#include <stdlib.h>
9+
10+
enum {
11+
COUNT = 5,
12+
SZ = 10,
13+
};
14+
void *x[COUNT];
15+
16+
int main() {
17+
__hwasan_enable_allocator_tagging();
18+
for (unsigned i = 0; i < COUNT; ++i) {
19+
x[i] = malloc(SZ);
20+
assert(__hwasan_test_shadow(x[i], SZ) == -1);
21+
}
22+
for (unsigned i = 0; i < COUNT; ++i)
23+
free(x[i]);
24+
25+
__hwasan_disable_allocator_tagging();
26+
for (unsigned i = 0; i < COUNT; ++i) {
27+
x[i] = malloc(SZ);
28+
assert(__hwasan_tag_pointer(x[i], 0) == x[i]);
29+
assert(__hwasan_test_shadow(x[i], SZ) == -1);
30+
}
31+
for (unsigned i = 0; i < COUNT; ++i)
32+
free(x[i]);
33+
34+
__hwasan_enable_allocator_tagging();
35+
for (unsigned i = 0; i < COUNT; ++i) {
36+
x[i] = malloc(SZ);
37+
assert(__hwasan_test_shadow(x[i], SZ) == -1);
38+
}
39+
for (unsigned i = 0; i < COUNT; ++i)
40+
free(x[i]);
41+
return 0;
42+
}

0 commit comments

Comments
 (0)