Skip to content

Commit c0744ff

Browse files
committed
intrinsics.fmuladdf{32,64}: expose llvm.fmuladd.* semantics
Add intrinsics `fmuladd{f32,f64}`. This computes `(a * b) + c`, to be fused if the code generator determines that (i) the target instruction set has support for a fused operation, and (ii) that the fused operation is more efficient than the equivalent, separate pair of `mul` and `add` instructions. https://llvm.org/docs/LangRef.html#llvm-fmuladd-intrinsic The codegen_cranelift uses the `fma` function from libc, which is a correct implementation, but without the desired performance semantic. I think this requires an update to cranelift to expose a suitable instruction in its IR. I have not tested with codegen_gcc, but it should behave the same way (using `fma` from libc).
1 parent ae9f501 commit c0744ff

File tree

11 files changed

+196
-1
lines changed

11 files changed

+196
-1
lines changed

Diff for: compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs

+4-1
Original file line numberDiff line numberDiff line change
@@ -328,6 +328,9 @@ fn codegen_float_intrinsic_call<'tcx>(
328328
sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64, types::F64),
329329
sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32, types::F32),
330330
sym::fmaf64 => ("fma", 3, fx.tcx.types.f64, types::F64),
331+
// FIXME: calling `fma` from libc without FMA target feature uses expensive sofware emulation
332+
sym::fmuladdf32 => ("fmaf", 3, fx.tcx.types.f32, types::F32), // TODO: use cranelift intrinsic analogous to llvm.fmuladd.f32
333+
sym::fmuladdf64 => ("fma", 3, fx.tcx.types.f64, types::F64), // TODO: use cranelift intrinsic analogous to llvm.fmuladd.f64
331334
sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32, types::F32),
332335
sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64, types::F64),
333336
sym::floorf32 => ("floorf", 1, fx.tcx.types.f32, types::F32),
@@ -381,7 +384,7 @@ fn codegen_float_intrinsic_call<'tcx>(
381384

382385
let layout = fx.layout_of(ty);
383386
let res = match intrinsic {
384-
sym::fmaf32 | sym::fmaf64 => {
387+
sym::fmaf32 | sym::fmaf64 | sym::fmuladdf32 | sym::fmuladdf64 => {
385388
CValue::by_val(fx.bcx.ins().fma(args[0], args[1], args[2]), layout)
386389
}
387390
sym::copysignf32 | sym::copysignf64 => {

Diff for: compiler/rustc_codegen_gcc/src/intrinsic/mod.rs

+3
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,9 @@ fn get_simple_intrinsic<'gcc, 'tcx>(
6666
sym::log2f64 => "log2",
6767
sym::fmaf32 => "fmaf",
6868
sym::fmaf64 => "fma",
69+
// FIXME: calling `fma` from libc without FMA target feature uses expensive sofware emulation
70+
sym::fmuladdf32 => "fmaf", // TODO: use gcc intrinsic analogous to llvm.fmuladd.f32
71+
sym::fmuladdf64 => "fma", // TODO: use gcc intrinsic analogous to llvm.fmuladd.f64
6972
sym::fabsf32 => "fabsf",
7073
sym::fabsf64 => "fabs",
7174
sym::minnumf32 => "fminf",

Diff for: compiler/rustc_codegen_llvm/src/context.rs

+5
Original file line numberDiff line numberDiff line change
@@ -863,6 +863,11 @@ impl<'ll> CodegenCx<'ll, '_> {
863863
ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
864864
ifn!("llvm.fma.f128", fn(t_f128, t_f128, t_f128) -> t_f128);
865865

866+
ifn!("llvm.fmuladd.f16", fn(t_f16, t_f16, t_f16) -> t_f16);
867+
ifn!("llvm.fmuladd.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
868+
ifn!("llvm.fmuladd.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
869+
ifn!("llvm.fmuladd.f128", fn(t_f128, t_f128, t_f128) -> t_f128);
870+
866871
ifn!("llvm.fabs.f16", fn(t_f16) -> t_f16);
867872
ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
868873
ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);

Diff for: compiler/rustc_codegen_llvm/src/intrinsic.rs

+5
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,11 @@ fn get_simple_intrinsic<'ll>(
8686
sym::fmaf64 => "llvm.fma.f64",
8787
sym::fmaf128 => "llvm.fma.f128",
8888

89+
sym::fmuladdf16 => "llvm.fmuladd.f16",
90+
sym::fmuladdf32 => "llvm.fmuladd.f32",
91+
sym::fmuladdf64 => "llvm.fmuladd.f64",
92+
sym::fmuladdf128 => "llvm.fmuladd.f128",
93+
8994
sym::fabsf16 => "llvm.fabs.f16",
9095
sym::fabsf32 => "llvm.fabs.f32",
9196
sym::fabsf64 => "llvm.fabs.f64",

Diff for: compiler/rustc_hir_analysis/src/check/intrinsic.rs

+13
Original file line numberDiff line numberDiff line change
@@ -359,6 +359,19 @@ pub fn check_intrinsic_type(
359359
(0, 0, vec![tcx.types.f128, tcx.types.f128, tcx.types.f128], tcx.types.f128)
360360
}
361361

362+
sym::fmuladdf16 => {
363+
(0, 0, vec![tcx.types.f16, tcx.types.f16, tcx.types.f16], tcx.types.f16)
364+
}
365+
sym::fmuladdf32 => {
366+
(0, 0, vec![tcx.types.f32, tcx.types.f32, tcx.types.f32], tcx.types.f32)
367+
}
368+
sym::fmuladdf64 => {
369+
(0, 0, vec![tcx.types.f64, tcx.types.f64, tcx.types.f64], tcx.types.f64)
370+
}
371+
sym::fmuladdf128 => {
372+
(0, 0, vec![tcx.types.f128, tcx.types.f128, tcx.types.f128], tcx.types.f128)
373+
}
374+
362375
sym::fabsf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
363376
sym::fabsf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
364377
sym::fabsf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),

Diff for: compiler/rustc_span/src/symbol.rs

+4
Original file line numberDiff line numberDiff line change
@@ -896,6 +896,10 @@ symbols! {
896896
fmt,
897897
fmul_algebraic,
898898
fmul_fast,
899+
fmuladdf128,
900+
fmuladdf16,
901+
fmuladdf32,
902+
fmuladdf64,
899903
fn_align,
900904
fn_delegation,
901905
fn_must_use,

Diff for: library/core/src/intrinsics.rs

+27
Original file line numberDiff line numberDiff line change
@@ -1803,6 +1803,33 @@ extern "rust-intrinsic" {
18031803
#[rustc_nounwind]
18041804
pub fn fmaf128(a: f128, b: f128, c: f128) -> f128;
18051805

1806+
/// Returns `a * b + c` for `f32` values, non-deterministically executing
1807+
/// either a fused multiply-add or two operations with rounding of the
1808+
/// intermediate result.
1809+
///
1810+
/// The operation is fused if the code generator determines that target
1811+
/// instruction set has support for a fused operation, and that the fused
1812+
/// operation is more efficient than the equivalent, separate pair of mul
1813+
/// and add instructions. It is unspecified whether or not a fused operation
1814+
/// is selected, and that may depend on optimization level and context, for
1815+
/// example.
1816+
#[rustc_nounwind]
1817+
#[cfg(not(bootstrap))]
1818+
pub fn fmuladdf32(a: f32, b: f32, c: f32) -> f32;
1819+
/// Returns `a * b + c` for `f64` values, non-deterministically executing
1820+
/// either a fused multiply-add or two operations with rounding of the
1821+
/// intermediate result.
1822+
///
1823+
/// The operation is fused if the code generator determines that target
1824+
/// instruction set has support for a fused operation, and that the fused
1825+
/// operation is more efficient than the equivalent, separate pair of mul
1826+
/// and add instructions. It is unspecified whether or not a fused operation
1827+
/// is selected, and that may depend on optimization level and context, for
1828+
/// example.
1829+
#[rustc_nounwind]
1830+
#[cfg(not(bootstrap))]
1831+
pub fn fmuladdf64(a: f64, b: f64, c: f64) -> f64;
1832+
18061833
/// Returns the absolute value of an `f16`.
18071834
///
18081835
/// The stabilized version of this intrinsic is

Diff for: src/tools/miri/src/intrinsics/mod.rs

+31
Original file line numberDiff line numberDiff line change
@@ -308,6 +308,37 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
308308
this.write_scalar(res, dest)?;
309309
}
310310

311+
"fmuladdf32" => {
312+
let [a, b, c] = check_arg_count(args)?;
313+
let a = this.read_scalar(a)?.to_f32()?;
314+
let b = this.read_scalar(b)?.to_f32()?;
315+
let c = this.read_scalar(c)?.to_f32()?;
316+
let fuse: bool = this.machine.rng.get_mut().gen();
317+
let res = if fuse {
318+
// FIXME: Using host floats, to work around https://github.com/rust-lang/rustc_apfloat/issues/11
319+
a.to_host().mul_add(b.to_host(), c.to_host()).to_soft()
320+
} else {
321+
((a * b).value + c).value
322+
};
323+
let res = this.adjust_nan(res, &[a, b, c]);
324+
this.write_scalar(res, dest)?;
325+
}
326+
"fmuladdf64" => {
327+
let [a, b, c] = check_arg_count(args)?;
328+
let a = this.read_scalar(a)?.to_f64()?;
329+
let b = this.read_scalar(b)?.to_f64()?;
330+
let c = this.read_scalar(c)?.to_f64()?;
331+
let fuse: bool = this.machine.rng.get_mut().gen();
332+
let res = if fuse {
333+
// FIXME: Using host floats, to work around https://github.com/rust-lang/rustc_apfloat/issues/11
334+
a.to_host().mul_add(b.to_host(), c.to_host()).to_soft()
335+
} else {
336+
((a * b).value + c).value
337+
};
338+
let res = this.adjust_nan(res, &[a, b, c]);
339+
this.write_scalar(res, dest)?;
340+
}
341+
311342
"powf32" => {
312343
let [f1, f2] = check_arg_count(args)?;
313344
let f1 = this.read_scalar(f1)?.to_f32()?;

Diff for: src/tools/miri/tests/pass/float.rs

+18
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ fn main() {
3030
libm();
3131
test_fast();
3232
test_algebraic();
33+
test_fmuladd();
3334
}
3435

3536
trait Float: Copy + PartialEq + Debug {
@@ -1041,3 +1042,20 @@ fn test_algebraic() {
10411042
test_operations_f32(11., 2.);
10421043
test_operations_f32(10., 15.);
10431044
}
1045+
1046+
fn test_fmuladd() {
1047+
use std::intrinsics::{fmuladdf32, fmuladdf64};
1048+
1049+
#[inline(never)]
1050+
pub fn test_operations_f32(a: f32, b: f32, c: f32) {
1051+
assert_approx_eq!(unsafe { fmuladdf32(a, b, c) }, a * b + c);
1052+
}
1053+
1054+
#[inline(never)]
1055+
pub fn test_operations_f64(a: f64, b: f64, c: f64) {
1056+
assert_approx_eq!(unsafe { fmuladdf64(a, b, c) }, a * b + c);
1057+
}
1058+
1059+
test_operations_f32(0.1, 0.2, 0.3);
1060+
test_operations_f64(1.1, 1.2, 1.3);
1061+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
#![feature(core_intrinsics)]
2+
use std::intrinsics::{fmuladdf32, fmuladdf64};
3+
4+
fn main() {
5+
let mut saw_zero = false;
6+
let mut saw_nonzero = false;
7+
for _ in 0..50 {
8+
let a = std::hint::black_box(0.1_f64);
9+
let b = std::hint::black_box(0.2);
10+
let c = std::hint::black_box(-a * b);
11+
// It is unspecified whether the following operation is fused or not. The
12+
// following evaluates to 0.0 if unfused, and nonzero (-1.66e-18) if fused.
13+
let x = unsafe { fmuladdf64(a, b, c) };
14+
if x == 0.0 {
15+
saw_zero = true;
16+
} else {
17+
saw_nonzero = true;
18+
}
19+
}
20+
assert!(
21+
saw_zero && saw_nonzero,
22+
"`fmuladdf64` failed to be evaluated as both fused and unfused"
23+
);
24+
25+
let mut saw_zero = false;
26+
let mut saw_nonzero = false;
27+
for _ in 0..50 {
28+
let a = std::hint::black_box(0.1_f32);
29+
let b = std::hint::black_box(0.2);
30+
let c = std::hint::black_box(-a * b);
31+
// It is unspecified whether the following operation is fused or not. The
32+
// following evaluates to 0.0 if unfused, and nonzero (-8.1956386e-10) if fused.
33+
let x = unsafe { fmuladdf32(a, b, c) };
34+
if x == 0.0 {
35+
saw_zero = true;
36+
} else {
37+
saw_nonzero = true;
38+
}
39+
}
40+
assert!(
41+
saw_zero && saw_nonzero,
42+
"`fmuladdf32` failed to be evaluated as both fused and unfused"
43+
);
44+
}

Diff for: tests/ui/intrinsics/intrinsic-fmuladd.rs

+42
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
//@ run-pass
2+
#![feature(core_intrinsics)]
3+
4+
use std::intrinsics::*;
5+
6+
macro_rules! assert_approx_eq {
7+
($a:expr, $b:expr) => {{
8+
let (a, b) = (&$a, &$b);
9+
assert!((*a - *b).abs() < 1.0e-6, "{} is not approximately equal to {}", *a, *b);
10+
}};
11+
}
12+
13+
fn main() {
14+
unsafe {
15+
let nan: f32 = f32::NAN;
16+
let inf: f32 = f32::INFINITY;
17+
let neg_inf: f32 = f32::NEG_INFINITY;
18+
assert_approx_eq!(fmuladdf32(12.3, 4.5, 6.7), 62.05);
19+
assert_approx_eq!(fmuladdf32(-12.3, -4.5, -6.7), 48.65);
20+
assert_approx_eq!(fmuladdf32(0.0, 8.9, 1.2), 1.2);
21+
assert_approx_eq!(fmuladdf32(3.4, -0.0, 5.6), 5.6);
22+
assert!(fmuladdf32(nan, 7.8, 9.0).is_nan());
23+
assert_eq!(fmuladdf32(inf, 7.8, 9.0), inf);
24+
assert_eq!(fmuladdf32(neg_inf, 7.8, 9.0), neg_inf);
25+
assert_eq!(fmuladdf32(8.9, inf, 3.2), inf);
26+
assert_eq!(fmuladdf32(-3.2, 2.4, neg_inf), neg_inf);
27+
}
28+
unsafe {
29+
let nan: f64 = f64::NAN;
30+
let inf: f64 = f64::INFINITY;
31+
let neg_inf: f64 = f64::NEG_INFINITY;
32+
assert_approx_eq!(fmuladdf64(12.3, 4.5, 6.7), 62.05);
33+
assert_approx_eq!(fmuladdf64(-12.3, -4.5, -6.7), 48.65);
34+
assert_approx_eq!(fmuladdf64(0.0, 8.9, 1.2), 1.2);
35+
assert_approx_eq!(fmuladdf64(3.4, -0.0, 5.6), 5.6);
36+
assert!(fmuladdf64(nan, 7.8, 9.0).is_nan());
37+
assert_eq!(fmuladdf64(inf, 7.8, 9.0), inf);
38+
assert_eq!(fmuladdf64(neg_inf, 7.8, 9.0), neg_inf);
39+
assert_eq!(fmuladdf64(8.9, inf, 3.2), inf);
40+
assert_eq!(fmuladdf64(-3.2, 2.4, neg_inf), neg_inf);
41+
}
42+
}

0 commit comments

Comments
 (0)