Skip to content
This repository was archived by the owner on Feb 25, 2025. It is now read-only.

Commit 24e7192

Browse files
committed
[Fuchsia] Implement ExecutableMemory support on Fuchsia.
Added fuchsia-specific versions of allocateExecutable() and markExecutable(). Bug: chromium: 778467 Change-Id: I027f1409f7b0343e102f98fcc34f2e93c2d626e4 Reviewed-on: https://swiftshader-review.googlesource.com/c/23408 Reviewed-by: Alexis Hétu <[email protected]> Tested-by: Sergey Ulanov <[email protected]>
1 parent f6d56f1 commit 24e7192

File tree

1 file changed

+53
-1
lines changed

1 file changed

+53
-1
lines changed

src/Reactor/ExecutableMemory.cpp

Lines changed: 53 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,10 @@
2222
#endif
2323
#include <windows.h>
2424
#include <intrin.h>
25+
#elif defined(__Fuchsia__)
26+
#include <unistd.h>
27+
#include <zircon/process.h>
28+
#include <zircon/syscalls.h>
2529
#else
2630
#include <errno.h>
2731
#include <sys/mman.h>
@@ -172,10 +176,17 @@ void deallocate(void *memory)
172176
#endif
173177
}
174178

179+
// Rounds |x| up to a multiple of |m|, where |m| is a power of 2.
180+
inline uintptr_t roundUp(uintptr_t x, uintptr_t m)
181+
{
182+
ASSERT(m > 0 && (m & (m - 1)) == 0); // |m| must be a power of 2.
183+
return (x + m - 1) & ~(m - 1);
184+
}
185+
175186
void *allocateExecutable(size_t bytes)
176187
{
177188
size_t pageSize = memoryPageSize();
178-
size_t length = (bytes + pageSize - 1) & ~(pageSize - 1);
189+
size_t length = roundUp(bytes, pageSize);
179190
void *mapping;
180191

181192
#if defined(LINUX_ENABLE_NAMED_MMAP)
@@ -198,6 +209,39 @@ void *allocateExecutable(size_t bytes)
198209
{
199210
mapping = nullptr;
200211
}
212+
#elif defined(__Fuchsia__)
213+
zx_handle_t vmo;
214+
if (zx_vmo_create(length, ZX_VMO_NON_RESIZABLE, &vmo) != ZX_OK) {
215+
return nullptr;
216+
}
217+
zx_vaddr_t reservation;
218+
zx_status_t status = zx_vmar_map(
219+
zx_vmar_root_self(), ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE,
220+
0, vmo, 0, length, &reservation);
221+
zx_handle_close(vmo);
222+
if (status != ZX_OK) {
223+
return nullptr;
224+
}
225+
226+
zx_vaddr_t alignedReservation = roundUp(reservation, pageSize);
227+
mapping = reinterpret_cast<void*>(alignedReservation);
228+
229+
// Unmap extra memory reserved before the block.
230+
if (alignedReservation != reservation) {
231+
size_t prefix_size = alignedReservation - reservation;
232+
status =
233+
zx_vmar_unmap(zx_vmar_root_self(), reservation, prefix_size);
234+
ASSERT(status == ZX_OK);
235+
length -= prefix_size;
236+
}
237+
238+
// Unmap extra memory at the end.
239+
if (length > bytes) {
240+
status = zx_vmar_unmap(
241+
zx_vmar_root_self(), alignedReservation + bytes,
242+
length - bytes);
243+
ASSERT(status == ZX_OK);
244+
}
201245
#else
202246
mapping = allocate(length, pageSize);
203247
#endif
@@ -210,6 +254,11 @@ void markExecutable(void *memory, size_t bytes)
210254
#if defined(_WIN32)
211255
unsigned long oldProtection;
212256
VirtualProtect(memory, bytes, PAGE_EXECUTE_READ, &oldProtection);
257+
#elif defined(__Fuchsia__)
258+
zx_status_t status = zx_vmar_protect(
259+
zx_vmar_root_self(), ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_EXECUTE,
260+
reinterpret_cast<zx_vaddr_t>(memory), bytes);
261+
ASSERT(status != ZX_OK);
213262
#else
214263
mprotect(memory, bytes, PROT_READ | PROT_EXEC);
215264
#endif
@@ -225,6 +274,9 @@ void deallocateExecutable(void *memory, size_t bytes)
225274
size_t pageSize = memoryPageSize();
226275
size_t length = (bytes + pageSize - 1) & ~(pageSize - 1);
227276
munmap(memory, length);
277+
#elif defined(__Fuchsia__)
278+
zx_vmar_unmap(zx_vmar_root_self(), reinterpret_cast<zx_vaddr_t>(memory),
279+
bytes);
228280
#else
229281
mprotect(memory, bytes, PROT_READ | PROT_WRITE);
230282
deallocate(memory);

0 commit comments

Comments
 (0)