Skip to content

Commit 1326892

Browse files
committed
[Sanitizers] TSan allocator set errno on failure.
Summary: Set proper errno code on allocation failures and change realloc, pvalloc, aligned_alloc, memalign and posix_memalign implementation to satisfy their man-specified requirements. Modify allocator API implementation to bring it closer to other sanitizers allocators. Reviewers: dvyukov Subscribers: llvm-commits, kubamracek Differential Revision: https://reviews.llvm.org/D35690 llvm-svn: 308929
1 parent e0ba415 commit 1326892

File tree

8 files changed

+181
-44
lines changed

8 files changed

+181
-44
lines changed

compiler-rt/lib/tsan/rtl/tsan_fd.cc

+3-3
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,8 @@ static bool bogusfd(int fd) {
4848
}
4949

5050
static FdSync *allocsync(ThreadState *thr, uptr pc) {
51-
FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync), kDefaultAlignment,
52-
false);
51+
FdSync *s = (FdSync*)user_alloc_internal(thr, pc, sizeof(FdSync),
52+
kDefaultAlignment, false);
5353
atomic_store(&s->rc, 1, memory_order_relaxed);
5454
return s;
5555
}
@@ -79,7 +79,7 @@ static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
7979
if (l1 == 0) {
8080
uptr size = kTableSizeL2 * sizeof(FdDesc);
8181
// We need this to reside in user memory to properly catch races on it.
82-
void *p = user_alloc(thr, pc, size, kDefaultAlignment, false);
82+
void *p = user_alloc_internal(thr, pc, size, kDefaultAlignment, false);
8383
internal_memset(p, 0, size);
8484
MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
8585
if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))

compiler-rt/lib/tsan/rtl/tsan_interceptors.cc

+7-9
Original file line numberDiff line numberDiff line change
@@ -584,7 +584,7 @@ TSAN_INTERCEPTOR(void*, malloc, uptr size) {
584584

585585
TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
586586
SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz);
587-
return user_alloc(thr, pc, sz, align);
587+
return user_memalign(thr, pc, align, sz);
588588
}
589589

590590
TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
@@ -730,7 +730,7 @@ TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
730730
#if SANITIZER_LINUX
731731
TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
732732
SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
733-
return user_alloc(thr, pc, sz, align);
733+
return user_memalign(thr, pc, align, sz);
734734
}
735735
#define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
736736
#else
@@ -739,21 +739,20 @@ TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
739739

740740
#if !SANITIZER_MAC
741741
TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
742-
SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
743-
return user_alloc(thr, pc, sz, align);
742+
SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
743+
return user_aligned_alloc(thr, pc, align, sz);
744744
}
745745

746746
TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
747747
SCOPED_INTERCEPTOR_RAW(valloc, sz);
748-
return user_alloc(thr, pc, sz, GetPageSizeCached());
748+
return user_valloc(thr, pc, sz);
749749
}
750750
#endif
751751

752752
#if SANITIZER_LINUX
753753
TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
754754
SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
755-
sz = RoundUp(sz, GetPageSizeCached());
756-
return user_alloc(thr, pc, sz, GetPageSizeCached());
755+
return user_pvalloc(thr, pc, sz);
757756
}
758757
#define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
759758
#else
@@ -763,8 +762,7 @@ TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
763762
#if !SANITIZER_MAC
764763
TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
765764
SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
766-
*memptr = user_alloc(thr, pc, sz, align);
767-
return 0;
765+
return user_posix_memalign(thr, pc, memptr, align, sz);
768766
}
769767
#endif
770768

compiler-rt/lib/tsan/rtl/tsan_libdispatch_mac.cc

+2-1
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,8 @@ static tsan_block_context_t *AllocContext(ThreadState *thr, uptr pc,
8686
void *orig_context,
8787
dispatch_function_t orig_work) {
8888
tsan_block_context_t *new_context =
89-
(tsan_block_context_t *)user_alloc(thr, pc, sizeof(tsan_block_context_t));
89+
(tsan_block_context_t *)user_alloc_internal(thr, pc,
90+
sizeof(tsan_block_context_t));
9091
new_context->queue = queue;
9192
new_context->orig_context = orig_context;
9293
new_context->orig_work = orig_work;

compiler-rt/lib/tsan/rtl/tsan_malloc_mac.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ using namespace __tsan;
2626
#define COMMON_MALLOC_FORCE_UNLOCK()
2727
#define COMMON_MALLOC_MEMALIGN(alignment, size) \
2828
void *p = \
29-
user_alloc(cur_thread(), StackTrace::GetCurrentPc(), size, alignment)
29+
user_memalign(cur_thread(), StackTrace::GetCurrentPc(), alignment, size)
3030
#define COMMON_MALLOC_MALLOC(size) \
3131
if (cur_thread()->in_symbolizer) return InternalAlloc(size); \
3232
SCOPED_INTERCEPTOR_RAW(malloc, size); \
@@ -43,7 +43,7 @@ using namespace __tsan;
4343
if (cur_thread()->in_symbolizer) \
4444
return InternalAlloc(size, nullptr, GetPageSizeCached()); \
4545
SCOPED_INTERCEPTOR_RAW(valloc, size); \
46-
void *p = user_alloc(thr, pc, size, GetPageSizeCached())
46+
void *p = user_valloc(thr, pc, size)
4747
#define COMMON_MALLOC_FREE(ptr) \
4848
if (cur_thread()->in_symbolizer) return InternalFree(ptr); \
4949
SCOPED_INTERCEPTOR_RAW(free, ptr); \

compiler-rt/lib/tsan/rtl/tsan_mman.cc

+68-18
Original file line numberDiff line numberDiff line change
@@ -149,11 +149,12 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
149149
OutputReport(thr, rep);
150150
}
151151

152-
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
152+
void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
153+
bool signal) {
153154
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
154155
return Allocator::FailureHandler::OnBadRequest();
155156
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
156-
if (p == 0)
157+
if (UNLIKELY(p == 0))
157158
return 0;
158159
if (ctx && ctx->initialized)
159160
OnUserAlloc(thr, pc, (uptr)p, sz, true);
@@ -162,15 +163,6 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
162163
return p;
163164
}
164165

165-
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
166-
if (CheckForCallocOverflow(size, n))
167-
return Allocator::FailureHandler::OnBadRequest();
168-
void *p = user_alloc(thr, pc, n * size);
169-
if (p)
170-
internal_memset(p, 0, n * size);
171-
return p;
172-
}
173-
174166
void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
175167
ScopedGlobalProcessor sgp;
176168
if (ctx && ctx->initialized)
@@ -180,6 +172,19 @@ void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
180172
SignalUnsafeCall(thr, pc);
181173
}
182174

175+
void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
176+
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
177+
}
178+
179+
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
180+
if (UNLIKELY(CheckForCallocOverflow(size, n)))
181+
return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest());
182+
void *p = user_alloc_internal(thr, pc, n * size);
183+
if (p)
184+
internal_memset(p, 0, n * size);
185+
return SetErrnoOnNull(p);
186+
}
187+
183188
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
184189
DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
185190
ctx->metamap.AllocBlock(thr, pc, p, sz);
@@ -200,15 +205,60 @@ void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
200205
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
201206
// FIXME: Handle "shrinking" more efficiently,
202207
// it seems that some software actually does this.
203-
void *p2 = user_alloc(thr, pc, sz);
204-
if (p2 == 0)
205-
return 0;
206-
if (p) {
207-
uptr oldsz = user_alloc_usable_size(p);
208-
internal_memcpy(p2, p, min(oldsz, sz));
208+
if (!p)
209+
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
210+
if (!sz) {
209211
user_free(thr, pc, p);
212+
return nullptr;
210213
}
211-
return p2;
214+
void *new_p = user_alloc_internal(thr, pc, sz);
215+
if (new_p) {
216+
uptr old_sz = user_alloc_usable_size(p);
217+
internal_memcpy(new_p, p, min(old_sz, sz));
218+
user_free(thr, pc, p);
219+
}
220+
return SetErrnoOnNull(new_p);
221+
}
222+
223+
void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
224+
if (UNLIKELY(!IsPowerOfTwo(align))) {
225+
errno = errno_EINVAL;
226+
return Allocator::FailureHandler::OnBadRequest();
227+
}
228+
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
229+
}
230+
231+
int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
232+
uptr sz) {
233+
if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
234+
Allocator::FailureHandler::OnBadRequest();
235+
return errno_EINVAL;
236+
}
237+
void *ptr = user_alloc_internal(thr, pc, sz, align);
238+
if (UNLIKELY(!ptr))
239+
return errno_ENOMEM;
240+
CHECK(IsAligned((uptr)ptr, align));
241+
*memptr = ptr;
242+
return 0;
243+
}
244+
245+
void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
246+
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
247+
errno = errno_EINVAL;
248+
return Allocator::FailureHandler::OnBadRequest();
249+
}
250+
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
251+
}
252+
253+
void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
254+
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
255+
}
256+
257+
void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
258+
uptr PageSize = GetPageSizeCached();
259+
// pvalloc(0) should allocate one page.
260+
sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
261+
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
212262
}
213263

214264
uptr user_alloc_usable_size(const void *p) {

compiler-rt/lib/tsan/rtl/tsan_mman.h

+11-4
Original file line numberDiff line numberDiff line change
@@ -27,13 +27,20 @@ void AllocatorProcFinish(Processor *proc);
2727
void AllocatorPrintStats();
2828

2929
// For user allocations.
30-
void *user_alloc(ThreadState *thr, uptr pc, uptr sz,
31-
uptr align = kDefaultAlignment, bool signal = true);
32-
void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n);
30+
void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz,
31+
uptr align = kDefaultAlignment, bool signal = true);
3332
// Does not accept NULL.
3433
void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true);
34+
// Interceptor implementations.
35+
void *user_alloc(ThreadState *thr, uptr pc, uptr sz);
36+
void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n);
3537
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
36-
void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align);
38+
void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz);
39+
int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
40+
uptr sz);
41+
void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz);
42+
void *user_valloc(ThreadState *thr, uptr pc, uptr sz);
43+
void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz);
3744
uptr user_alloc_usable_size(const void *p);
3845

3946
// Invoking malloc/free hooks that may be installed by the user.

compiler-rt/lib/tsan/tests/unit/tsan_mman_test.cc

+77-6
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ TEST(Mman, UserRealloc) {
5656
// Realloc(NULL, N) is equivalent to malloc(N), thus must return
5757
// non-NULL pointer.
5858
EXPECT_NE(p, (void*)0);
59+
user_free(thr, pc, p);
5960
}
6061
{
6162
void *p = user_realloc(thr, pc, 0, 100);
@@ -67,8 +68,9 @@ TEST(Mman, UserRealloc) {
6768
void *p = user_alloc(thr, pc, 100);
6869
EXPECT_NE(p, (void*)0);
6970
memset(p, 0xde, 100);
71+
// Realloc(P, 0) is equivalent to free(P) and returns NULL.
7072
void *p2 = user_realloc(thr, pc, p, 0);
71-
EXPECT_NE(p2, (void*)0);
73+
EXPECT_EQ(p2, (void*)0);
7274
}
7375
{
7476
void *p = user_realloc(thr, pc, 0, 100);
@@ -135,12 +137,28 @@ TEST(Mman, Stats) {
135137
EXPECT_EQ(unmapped0, __sanitizer_get_unmapped_bytes());
136138
}
137139

140+
TEST(Mman, Valloc) {
141+
ThreadState *thr = cur_thread();
142+
143+
void *p = user_valloc(thr, 0, 100);
144+
EXPECT_NE(p, (void*)0);
145+
user_free(thr, 0, p);
146+
147+
p = user_pvalloc(thr, 0, 100);
148+
EXPECT_NE(p, (void*)0);
149+
user_free(thr, 0, p);
150+
151+
p = user_pvalloc(thr, 0, 0);
152+
EXPECT_NE(p, (void*)0);
153+
EXPECT_EQ(GetPageSizeCached(), __sanitizer_get_allocated_size(p));
154+
user_free(thr, 0, p);
155+
}
156+
157+
#if !SANITIZER_DEBUG
158+
// EXPECT_DEATH clones a thread with 4K stack,
159+
// which is overflown by tsan memory accesses functions in debug mode.
160+
138161
TEST(Mman, CallocOverflow) {
139-
#if SANITIZER_DEBUG
140-
// EXPECT_DEATH clones a thread with 4K stack,
141-
// which is overflown by tsan memory accesses functions in debug mode.
142-
return;
143-
#endif
144162
ThreadState *thr = cur_thread();
145163
uptr pc = 0;
146164
size_t kArraySize = 4096;
@@ -152,4 +170,57 @@ TEST(Mman, CallocOverflow) {
152170
EXPECT_EQ(0L, p);
153171
}
154172

173+
TEST(Mman, Memalign) {
174+
ThreadState *thr = cur_thread();
175+
176+
void *p = user_memalign(thr, 0, 8, 100);
177+
EXPECT_NE(p, (void*)0);
178+
user_free(thr, 0, p);
179+
180+
p = NULL;
181+
EXPECT_DEATH(p = user_memalign(thr, 0, 7, 100),
182+
"allocator is terminating the process instead of returning 0");
183+
EXPECT_EQ(0L, p);
184+
}
185+
186+
TEST(Mman, PosixMemalign) {
187+
ThreadState *thr = cur_thread();
188+
189+
void *p = NULL;
190+
int res = user_posix_memalign(thr, 0, &p, 8, 100);
191+
EXPECT_NE(p, (void*)0);
192+
EXPECT_EQ(res, 0);
193+
user_free(thr, 0, p);
194+
195+
p = NULL;
196+
// Alignment is not a power of two, although is a multiple of sizeof(void*).
197+
EXPECT_DEATH(res = user_posix_memalign(thr, 0, &p, 3 * sizeof(p), 100),
198+
"allocator is terminating the process instead of returning 0");
199+
EXPECT_EQ(0L, p);
200+
// Alignment is not a multiple of sizeof(void*), although is a power of 2.
201+
EXPECT_DEATH(res = user_posix_memalign(thr, 0, &p, 2, 100),
202+
"allocator is terminating the process instead of returning 0");
203+
EXPECT_EQ(0L, p);
204+
}
205+
206+
TEST(Mman, AlignedAlloc) {
207+
ThreadState *thr = cur_thread();
208+
209+
void *p = user_aligned_alloc(thr, 0, 8, 64);
210+
EXPECT_NE(p, (void*)0);
211+
user_free(thr, 0, p);
212+
213+
p = NULL;
214+
// Alignement is not a power of 2.
215+
EXPECT_DEATH(p = user_aligned_alloc(thr, 0, 7, 100),
216+
"allocator is terminating the process instead of returning 0");
217+
EXPECT_EQ(0L, p);
218+
// Size is not a multiple of alignment.
219+
EXPECT_DEATH(p = user_aligned_alloc(thr, 0, 8, 100),
220+
"allocator is terminating the process instead of returning 0");
221+
EXPECT_EQ(0L, p);
222+
}
223+
224+
#endif
225+
155226
} // namespace __tsan

0 commit comments

Comments
 (0)