@@ -157,7 +157,16 @@ defm : AMOPat<"atomic_load_min_32", "AMOMIN_W">;
157
157
defm : AMOPat<"atomic_load_umax_32", "AMOMAXU_W">;
158
158
defm : AMOPat<"atomic_load_umin_32", "AMOMINU_W">;
159
159
160
- let Predicates = [HasStdExtA] in {
160
+ defm : AMOPat<"atomic_swap_64", "AMOSWAP_D", i64, [IsRV64]>;
161
+ defm : AMOPat<"atomic_load_add_64", "AMOADD_D", i64, [IsRV64]>;
162
+ defm : AMOPat<"atomic_load_and_64", "AMOAND_D", i64, [IsRV64]>;
163
+ defm : AMOPat<"atomic_load_or_64", "AMOOR_D", i64, [IsRV64]>;
164
+ defm : AMOPat<"atomic_load_xor_64", "AMOXOR_D", i64, [IsRV64]>;
165
+ defm : AMOPat<"atomic_load_max_64", "AMOMAX_D", i64, [IsRV64]>;
166
+ defm : AMOPat<"atomic_load_min_64", "AMOMIN_D", i64, [IsRV64]>;
167
+ defm : AMOPat<"atomic_load_umax_64", "AMOMAXU_D", i64, [IsRV64]>;
168
+ defm : AMOPat<"atomic_load_umin_64", "AMOMINU_D", i64, [IsRV64]>;
169
+
161
170
162
171
/// Pseudo AMOs
163
172
@@ -169,21 +178,6 @@ class PseudoAMO : Pseudo<(outs GPR:$res, GPR:$scratch),
169
178
let hasSideEffects = 0;
170
179
}
171
180
172
- let Size = 20 in
173
- def PseudoAtomicLoadNand32 : PseudoAMO;
174
- // Ordering constants must be kept in sync with the AtomicOrdering enum in
175
- // AtomicOrdering.h.
176
- def : Pat<(XLenVT (atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr)),
177
- (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>;
178
- def : Pat<(XLenVT (atomic_load_nand_32_acquire GPR:$addr, GPR:$incr)),
179
- (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>;
180
- def : Pat<(XLenVT (atomic_load_nand_32_release GPR:$addr, GPR:$incr)),
181
- (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>;
182
- def : Pat<(XLenVT (atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr)),
183
- (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>;
184
- def : Pat<(XLenVT (atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr)),
185
- (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>;
186
-
187
181
class PseudoMaskedAMO
188
182
: Pseudo<(outs GPR:$res, GPR:$scratch),
189
183
(ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
@@ -224,6 +218,23 @@ class PseudoMaskedAMOMinMaxPat<Intrinsic intrin, Pseudo AMOInst>
224
218
(AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
225
219
timm:$ordering)>;
226
220
221
+ let Predicates = [HasStdExtA] in {
222
+
223
+ let Size = 20 in
224
+ def PseudoAtomicLoadNand32 : PseudoAMO;
225
+ // Ordering constants must be kept in sync with the AtomicOrdering enum in
226
+ // AtomicOrdering.h.
227
+ def : Pat<(XLenVT (atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr)),
228
+ (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>;
229
+ def : Pat<(XLenVT (atomic_load_nand_32_acquire GPR:$addr, GPR:$incr)),
230
+ (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>;
231
+ def : Pat<(XLenVT (atomic_load_nand_32_release GPR:$addr, GPR:$incr)),
232
+ (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>;
233
+ def : Pat<(XLenVT (atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr)),
234
+ (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>;
235
+ def : Pat<(XLenVT (atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr)),
236
+ (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>;
237
+
227
238
let Size = 28 in
228
239
def PseudoMaskedAtomicSwap32 : PseudoMaskedAMO;
229
240
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i32,
@@ -256,6 +267,43 @@ let Size = 36 in
256
267
def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax;
257
268
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32,
258
269
PseudoMaskedAtomicLoadUMin32>;
270
+ } // Predicates = [HasStdExtA]
271
+
272
+ let Predicates = [HasStdExtA, IsRV64] in {
273
+
274
+ let Size = 20 in
275
+ def PseudoAtomicLoadNand64 : PseudoAMO;
276
+ // Ordering constants must be kept in sync with the AtomicOrdering enum in
277
+ // AtomicOrdering.h.
278
+ def : Pat<(i64 (atomic_load_nand_64_monotonic GPR:$addr, GPR:$incr)),
279
+ (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 2)>;
280
+ def : Pat<(i64 (atomic_load_nand_64_acquire GPR:$addr, GPR:$incr)),
281
+ (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 4)>;
282
+ def : Pat<(i64 (atomic_load_nand_64_release GPR:$addr, GPR:$incr)),
283
+ (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 5)>;
284
+ def : Pat<(i64 (atomic_load_nand_64_acq_rel GPR:$addr, GPR:$incr)),
285
+ (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>;
286
+ def : Pat<(i64 (atomic_load_nand_64_seq_cst GPR:$addr, GPR:$incr)),
287
+ (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>;
288
+
289
+ def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64,
290
+ PseudoMaskedAtomicSwap32>;
291
+ def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64,
292
+ PseudoMaskedAtomicLoadAdd32>;
293
+ def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64,
294
+ PseudoMaskedAtomicLoadSub32>;
295
+ def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64,
296
+ PseudoMaskedAtomicLoadNand32>;
297
+ def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64,
298
+ PseudoMaskedAtomicLoadMax32>;
299
+ def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64,
300
+ PseudoMaskedAtomicLoadMin32>;
301
+ def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64,
302
+ PseudoMaskedAtomicLoadUMax32>;
303
+ def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64,
304
+ PseudoMaskedAtomicLoadUMin32>;
305
+ } // Predicates = [HasStdExtA, IsRV64]
306
+
259
307
260
308
/// Compare and exchange
261
309
@@ -285,6 +333,8 @@ multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst,
285
333
(CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>;
286
334
}
287
335
336
+ let Predicates = [HasStdExtA] in {
337
+
288
338
def PseudoCmpXchg32 : PseudoCmpXchg;
289
339
defm : PseudoCmpXchgPat<"atomic_cmp_swap_32", PseudoCmpXchg32>;
290
340
@@ -303,57 +353,10 @@ def : Pat<(int_riscv_masked_cmpxchg_i32
303
353
GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
304
354
(PseudoMaskedCmpXchg32
305
355
GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
306
-
307
356
} // Predicates = [HasStdExtA]
308
357
309
- defm : AMOPat<"atomic_swap_64", "AMOSWAP_D", i64, [IsRV64]>;
310
- defm : AMOPat<"atomic_load_add_64", "AMOADD_D", i64, [IsRV64]>;
311
- defm : AMOPat<"atomic_load_and_64", "AMOAND_D", i64, [IsRV64]>;
312
- defm : AMOPat<"atomic_load_or_64", "AMOOR_D", i64, [IsRV64]>;
313
- defm : AMOPat<"atomic_load_xor_64", "AMOXOR_D", i64, [IsRV64]>;
314
- defm : AMOPat<"atomic_load_max_64", "AMOMAX_D", i64, [IsRV64]>;
315
- defm : AMOPat<"atomic_load_min_64", "AMOMIN_D", i64, [IsRV64]>;
316
- defm : AMOPat<"atomic_load_umax_64", "AMOMAXU_D", i64, [IsRV64]>;
317
- defm : AMOPat<"atomic_load_umin_64", "AMOMINU_D", i64, [IsRV64]>;
318
-
319
358
let Predicates = [HasStdExtA, IsRV64] in {
320
359
321
- /// 64-bit pseudo AMOs
322
-
323
- let Size = 20 in
324
- def PseudoAtomicLoadNand64 : PseudoAMO;
325
- // Ordering constants must be kept in sync with the AtomicOrdering enum in
326
- // AtomicOrdering.h.
327
- def : Pat<(i64 (atomic_load_nand_64_monotonic GPR:$addr, GPR:$incr)),
328
- (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 2)>;
329
- def : Pat<(i64 (atomic_load_nand_64_acquire GPR:$addr, GPR:$incr)),
330
- (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 4)>;
331
- def : Pat<(i64 (atomic_load_nand_64_release GPR:$addr, GPR:$incr)),
332
- (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 5)>;
333
- def : Pat<(i64 (atomic_load_nand_64_acq_rel GPR:$addr, GPR:$incr)),
334
- (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>;
335
- def : Pat<(i64 (atomic_load_nand_64_seq_cst GPR:$addr, GPR:$incr)),
336
- (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>;
337
-
338
- def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64,
339
- PseudoMaskedAtomicSwap32>;
340
- def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64,
341
- PseudoMaskedAtomicLoadAdd32>;
342
- def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64,
343
- PseudoMaskedAtomicLoadSub32>;
344
- def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64,
345
- PseudoMaskedAtomicLoadNand32>;
346
- def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64,
347
- PseudoMaskedAtomicLoadMax32>;
348
- def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64,
349
- PseudoMaskedAtomicLoadMin32>;
350
- def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64,
351
- PseudoMaskedAtomicLoadUMax32>;
352
- def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64,
353
- PseudoMaskedAtomicLoadUMin32>;
354
-
355
- /// 64-bit compare and exchange
356
-
357
360
def PseudoCmpXchg64 : PseudoCmpXchg;
358
361
defm : PseudoCmpXchgPat<"atomic_cmp_swap_64", PseudoCmpXchg64, i64>;
359
362
@@ -408,6 +411,7 @@ defm : AMOPat2<"atomic_load_min_32", "AMOMIN_W", i32>;
408
411
defm : AMOPat2<"atomic_load_umax_32", "AMOMAXU_W", i32>;
409
412
defm : AMOPat2<"atomic_load_umin_32", "AMOMINU_W", i32>;
410
413
414
+ let Predicates = [HasStdExtA, IsRV64] in
411
415
defm : PseudoCmpXchgPat<"atomic_cmp_swap_32", PseudoCmpXchg32, i32>;
412
416
413
417
let Predicates = [HasAtomicLdSt] in {
0 commit comments