@@ -26,9 +26,22 @@ namespace testing {
26
26
27
27
namespace {
28
28
29
+ #ifdef __x86_64__
29
30
#define GET_XMM (__var, __xmm ) \
30
31
asm volatile (" movq %%" #__xmm " , %0" : " =r" (__var))
31
32
#define SET_XMM (__var, __xmm ) asm volatile (" movq %0, %%" #__xmm : : " r" (__var))
33
+ #define GET_FP0 (__var ) GET_XMM(__var, xmm0)
34
+ #define SET_FP0 (__var ) SET_XMM(__var, xmm0)
35
+ #elif __aarch64__
36
+ #define __stringify_1 (x... ) #x
37
+ #define __stringify (x... ) __stringify_1(x)
38
+ #define GET_FPREG (var, regname ) \
39
+ asm volatile (" str " __stringify(regname) ", %0" : "=m"(var))
40
+ #define SET_FPREG (var, regname ) \
41
+ asm volatile (" ldr " __stringify(regname) ", %0" : "=m"(var))
42
+ #define GET_FP0 (var ) GET_FPREG(var, d0)
43
+ #define SET_FP0 (var ) SET_FPREG(var, d0)
44
+ #endif
32
45
33
46
int pid;
34
47
int tid;
@@ -40,20 +53,21 @@ void sigusr2(int s, siginfo_t* siginfo, void* _uc) {
40
53
uint64_t val = SIGUSR2;
41
54
42
55
// Record the value of %xmm0 on entry and then clobber it.
43
- GET_XMM (entryxmm[1 ], xmm0 );
44
- SET_XMM (val, xmm0 );
45
- GET_XMM (exitxmm[1 ], xmm0 );
56
+ GET_FP0 (entryxmm[1 ]);
57
+ SET_FP0 (val);
58
+ GET_FP0 (exitxmm[1 ]);
46
59
}
47
60
48
61
void sigusr1 (int s, siginfo_t * siginfo, void * _uc) {
49
62
uint64_t val = SIGUSR1;
50
63
51
64
// Record the value of %xmm0 on entry and then clobber it.
52
- GET_XMM (entryxmm[0 ], xmm0 );
53
- SET_XMM (val, xmm0 );
65
+ GET_FP0 (entryxmm[0 ]);
66
+ SET_FP0 (val);
54
67
55
68
// Send a SIGUSR2 to ourself. The signal mask is configured such that
56
69
// the SIGUSR2 handler will run before this handler returns.
70
+ #ifdef __x86_64__
57
71
asm volatile (
58
72
" movl %[killnr], %%eax;"
59
73
" movl %[pid], %%edi;"
@@ -66,10 +80,19 @@ void sigusr1(int s, siginfo_t* siginfo, void* _uc) {
66
80
: " rax" , " rdi" , " rsi" , " rdx" ,
67
81
// Clobbered by syscall.
68
82
" rcx" , " r11" );
83
+ #elif __aarch64__
84
+ asm volatile (
85
+ " mov x8, %0\n "
86
+ " mov x0, %1\n "
87
+ " mov x1, %2\n "
88
+ " mov x2, %3\n "
89
+ " svc #0\n " ::" r" (__NR_tgkill),
90
+ " r" (pid), " r" (tid), " r" (SIGUSR2));
91
+ #endif
69
92
70
93
// Record value of %xmm0 again to verify that the nested signal handler
71
94
// does not clobber it.
72
- GET_XMM (exitxmm[0 ], xmm0 );
95
+ GET_FP0 (exitxmm[0 ]);
73
96
}
74
97
75
98
TEST (FPSigTest, NestedSignals) {
@@ -98,8 +121,9 @@ TEST(FPSigTest, NestedSignals) {
98
121
// to signal the current thread ensures that this is the clobbered thread.
99
122
100
123
uint64_t expected = 0xdeadbeeffacefeed ;
101
- SET_XMM (expected, xmm0 );
124
+ SET_FP0 (expected);
102
125
126
+ #ifdef __x86_64__
103
127
asm volatile (
104
128
" movl %[killnr], %%eax;"
105
129
" movl %[pid], %%edi;"
@@ -112,9 +136,18 @@ TEST(FPSigTest, NestedSignals) {
112
136
: " rax" , " rdi" , " rsi" , " rdx" ,
113
137
// Clobbered by syscall.
114
138
" rcx" , " r11" );
139
+ #elif __aarch64__
140
+ asm volatile (
141
+ " mov x8, %0\n "
142
+ " mov x0, %1\n "
143
+ " mov x1, %2\n "
144
+ " mov x2, %3\n "
145
+ " svc #0\n " ::" r" (__NR_tgkill),
146
+ " r" (pid), " r" (tid), " r" (SIGUSR1));
147
+ #endif
115
148
116
149
uint64_t got;
117
- GET_XMM (got, xmm0 );
150
+ GET_FP0 (got);
118
151
119
152
//
120
153
// The checks below verifies the following:
0 commit comments