@@ -149,11 +149,12 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
149
149
OutputReport (thr, rep);
150
150
}
151
151
152
- void *user_alloc (ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
152
+ void *user_alloc_internal (ThreadState *thr, uptr pc, uptr sz, uptr align,
153
+ bool signal) {
153
154
if ((sz >= (1ull << 40 )) || (align >= (1ull << 40 )))
154
155
return Allocator::FailureHandler::OnBadRequest ();
155
156
void *p = allocator ()->Allocate (&thr->proc ()->alloc_cache , sz, align);
156
- if (p == 0 )
157
+ if (UNLIKELY ( p == 0 ) )
157
158
return 0 ;
158
159
if (ctx && ctx->initialized )
159
160
OnUserAlloc (thr, pc, (uptr)p, sz, true );
@@ -162,15 +163,6 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
162
163
return p;
163
164
}
164
165
165
- void *user_calloc (ThreadState *thr, uptr pc, uptr size, uptr n) {
166
- if (CheckForCallocOverflow (size, n))
167
- return Allocator::FailureHandler::OnBadRequest ();
168
- void *p = user_alloc (thr, pc, n * size);
169
- if (p)
170
- internal_memset (p, 0 , n * size);
171
- return p;
172
- }
173
-
174
166
void user_free (ThreadState *thr, uptr pc, void *p, bool signal) {
175
167
ScopedGlobalProcessor sgp;
176
168
if (ctx && ctx->initialized )
@@ -180,6 +172,19 @@ void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
180
172
SignalUnsafeCall (thr, pc);
181
173
}
182
174
175
+ void *user_alloc (ThreadState *thr, uptr pc, uptr sz) {
176
+ return SetErrnoOnNull (user_alloc_internal (thr, pc, sz, kDefaultAlignment ));
177
+ }
178
+
179
+ void *user_calloc (ThreadState *thr, uptr pc, uptr size, uptr n) {
180
+ if (UNLIKELY (CheckForCallocOverflow (size, n)))
181
+ return SetErrnoOnNull (Allocator::FailureHandler::OnBadRequest ());
182
+ void *p = user_alloc_internal (thr, pc, n * size);
183
+ if (p)
184
+ internal_memset (p, 0 , n * size);
185
+ return SetErrnoOnNull (p);
186
+ }
187
+
183
188
void OnUserAlloc (ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
184
189
DPrintf (" #%d: alloc(%zu) = %p\n " , thr->tid , sz, p);
185
190
ctx->metamap .AllocBlock (thr, pc, p, sz);
@@ -200,15 +205,60 @@ void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
200
205
void *user_realloc (ThreadState *thr, uptr pc, void *p, uptr sz) {
201
206
// FIXME: Handle "shrinking" more efficiently,
202
207
// it seems that some software actually does this.
203
- void *p2 = user_alloc (thr, pc, sz);
204
- if (p2 == 0 )
205
- return 0 ;
206
- if (p) {
207
- uptr oldsz = user_alloc_usable_size (p);
208
- internal_memcpy (p2, p, min (oldsz, sz));
208
+ if (!p)
209
+ return SetErrnoOnNull (user_alloc_internal (thr, pc, sz));
210
+ if (!sz) {
209
211
user_free (thr, pc, p);
212
+ return nullptr ;
210
213
}
211
- return p2;
214
+ void *new_p = user_alloc_internal (thr, pc, sz);
215
+ if (new_p) {
216
+ uptr old_sz = user_alloc_usable_size (p);
217
+ internal_memcpy (new_p, p, min (old_sz, sz));
218
+ user_free (thr, pc, p);
219
+ }
220
+ return SetErrnoOnNull (new_p);
221
+ }
222
+
223
+ void *user_memalign (ThreadState *thr, uptr pc, uptr align, uptr sz) {
224
+ if (UNLIKELY (!IsPowerOfTwo (align))) {
225
+ errno = errno_EINVAL;
226
+ return Allocator::FailureHandler::OnBadRequest ();
227
+ }
228
+ return SetErrnoOnNull (user_alloc_internal (thr, pc, sz, align));
229
+ }
230
+
231
+ int user_posix_memalign (ThreadState *thr, uptr pc, void **memptr, uptr align,
232
+ uptr sz) {
233
+ if (UNLIKELY (!CheckPosixMemalignAlignment (align))) {
234
+ Allocator::FailureHandler::OnBadRequest ();
235
+ return errno_EINVAL;
236
+ }
237
+ void *ptr = user_alloc_internal (thr, pc, sz, align);
238
+ if (UNLIKELY (!ptr))
239
+ return errno_ENOMEM;
240
+ CHECK (IsAligned ((uptr)ptr, align));
241
+ *memptr = ptr;
242
+ return 0 ;
243
+ }
244
+
245
+ void *user_aligned_alloc (ThreadState *thr, uptr pc, uptr align, uptr sz) {
246
+ if (UNLIKELY (!CheckAlignedAllocAlignmentAndSize (align, sz))) {
247
+ errno = errno_EINVAL;
248
+ return Allocator::FailureHandler::OnBadRequest ();
249
+ }
250
+ return SetErrnoOnNull (user_alloc_internal (thr, pc, sz, align));
251
+ }
252
+
253
+ void *user_valloc (ThreadState *thr, uptr pc, uptr sz) {
254
+ return SetErrnoOnNull (user_alloc_internal (thr, pc, sz, GetPageSizeCached ()));
255
+ }
256
+
257
+ void *user_pvalloc (ThreadState *thr, uptr pc, uptr sz) {
258
+ uptr PageSize = GetPageSizeCached ();
259
+ // pvalloc(0) should allocate one page.
260
+ sz = sz ? RoundUpTo (sz, PageSize) : PageSize;
261
+ return SetErrnoOnNull (user_alloc_internal (thr, pc, sz, PageSize));
212
262
}
213
263
214
264
uptr user_alloc_usable_size (const void *p) {
0 commit comments