22
22
#endif
23
23
#include < windows.h>
24
24
#include < intrin.h>
25
+ #elif defined(__Fuchsia__)
26
+ #include < unistd.h>
27
+ #include < zircon/process.h>
28
+ #include < zircon/syscalls.h>
25
29
#else
26
30
#include < errno.h>
27
31
#include < sys/mman.h>
@@ -172,10 +176,17 @@ void deallocate(void *memory)
172
176
#endif
173
177
}
174
178
179
+ // Rounds |x| up to a multiple of |m|, where |m| is a power of 2.
180
+ inline uintptr_t roundUp (uintptr_t x, uintptr_t m)
181
+ {
182
+ ASSERT (m > 0 && (m & (m - 1 )) == 0 ); // |m| must be a power of 2.
183
+ return (x + m - 1 ) & ~(m - 1 );
184
+ }
185
+
175
186
void *allocateExecutable (size_t bytes)
176
187
{
177
188
size_t pageSize = memoryPageSize ();
178
- size_t length = (bytes + pageSize - 1 ) & ~(pageSize - 1 );
189
+ size_t length = roundUp (bytes, pageSize);
179
190
void *mapping;
180
191
181
192
#if defined(LINUX_ENABLE_NAMED_MMAP)
@@ -198,6 +209,39 @@ void *allocateExecutable(size_t bytes)
198
209
{
199
210
mapping = nullptr ;
200
211
}
212
+ #elif defined(__Fuchsia__)
213
+ zx_handle_t vmo;
214
+ if (zx_vmo_create (length, ZX_VMO_NON_RESIZABLE, &vmo) != ZX_OK) {
215
+ return nullptr ;
216
+ }
217
+ zx_vaddr_t reservation;
218
+ zx_status_t status = zx_vmar_map (
219
+ zx_vmar_root_self (), ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE,
220
+ 0 , vmo, 0 , length, &reservation);
221
+ zx_handle_close (vmo);
222
+ if (status != ZX_OK) {
223
+ return nullptr ;
224
+ }
225
+
226
+ zx_vaddr_t alignedReservation = roundUp (reservation, pageSize);
227
+ mapping = reinterpret_cast <void *>(alignedReservation);
228
+
229
+ // Unmap extra memory reserved before the block.
230
+ if (alignedReservation != reservation) {
231
+ size_t prefix_size = alignedReservation - reservation;
232
+ status =
233
+ zx_vmar_unmap (zx_vmar_root_self (), reservation, prefix_size);
234
+ ASSERT (status == ZX_OK);
235
+ length -= prefix_size;
236
+ }
237
+
238
+ // Unmap extra memory at the end.
239
+ if (length > bytes) {
240
+ status = zx_vmar_unmap (
241
+ zx_vmar_root_self (), alignedReservation + bytes,
242
+ length - bytes);
243
+ ASSERT (status == ZX_OK);
244
+ }
201
245
#else
202
246
mapping = allocate (length, pageSize);
203
247
#endif
@@ -210,6 +254,11 @@ void markExecutable(void *memory, size_t bytes)
210
254
#if defined(_WIN32)
211
255
unsigned long oldProtection;
212
256
VirtualProtect (memory, bytes, PAGE_EXECUTE_READ, &oldProtection);
257
+ #elif defined(__Fuchsia__)
258
+ zx_status_t status = zx_vmar_protect (
259
+ zx_vmar_root_self (), ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_EXECUTE,
260
+ reinterpret_cast <zx_vaddr_t >(memory), bytes);
261
+ ASSERT (status != ZX_OK);
213
262
#else
214
263
mprotect (memory, bytes, PROT_READ | PROT_EXEC);
215
264
#endif
@@ -225,6 +274,9 @@ void deallocateExecutable(void *memory, size_t bytes)
225
274
size_t pageSize = memoryPageSize ();
226
275
size_t length = (bytes + pageSize - 1 ) & ~(pageSize - 1 );
227
276
munmap (memory, length);
277
+ #elif defined(__Fuchsia__)
278
+ zx_vmar_unmap (zx_vmar_root_self (), reinterpret_cast <zx_vaddr_t >(memory),
279
+ bytes);
228
280
#else
229
281
mprotect (memory, bytes, PROT_READ | PROT_WRITE);
230
282
deallocate (memory);
0 commit comments