diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index 20a3482aaa27f..2052a6aa21597 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -64,11 +64,6 @@ impl<'gcc, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { if type_is_pointer(typ) { self.context.new_null(typ) } else { self.const_int(typ, 0) } } - fn is_undef(&self, _val: RValue<'gcc>) -> bool { - // FIXME: actually check for undef - false - } - fn const_undef(&self, typ: Type<'gcc>) -> RValue<'gcc> { let local = self.current_func.borrow().expect("func").new_local(None, typ, "undefined"); if typ.is_struct().is_some() { diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 78b3a7f85417b..35ac6158e9772 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -126,10 +126,6 @@ impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { unsafe { llvm::LLVMGetUndef(t) } } - fn is_undef(&self, v: &'ll Value) -> bool { - unsafe { llvm::LLVMIsUndef(v) == True } - } - fn const_poison(&self, t: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMGetPoison(t) } } diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 3b0187b9d37b1..7f6f82eae48f2 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -1035,7 +1035,6 @@ unsafe extern "C" { pub(crate) fn LLVMMetadataTypeInContext(C: &Context) -> &Type; // Operations on all values - pub(crate) fn LLVMIsUndef(Val: &Value) -> Bool; pub(crate) fn LLVMTypeOf(Val: &Value) -> &Type; pub(crate) fn LLVMGetValueName2(Val: &Value, Length: *mut size_t) -> *const c_char; pub(crate) fn LLVMSetValueName2(Val: &Value, Name: *const c_char, NameLen: size_t); diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 9ca7d4f8f0047..19101ec2d1ba3 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -204,30 +204,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let alloc_align = alloc.inner().align; assert!(alloc_align >= layout.align.abi); - // Returns `None` when the value is partially undefined or any byte of it has provenance. - // Otherwise returns the value or (if the entire value is undef) returns an undef. let read_scalar = |start, size, s: abi::Scalar, ty| { - let range = alloc_range(start, size); match alloc.0.read_scalar( bx, - range, + alloc_range(start, size), /*read_provenance*/ matches!(s.primitive(), abi::Primitive::Pointer(_)), ) { - Ok(val) => Some(bx.scalar_to_backend(val, s, ty)), - Err(_) => { - // We may have failed due to partial provenance or unexpected provenance, - // continue down the normal code path if so. - if alloc.0.provenance().range_empty(range, &bx.tcx()) - // Since `read_scalar` failed, but there were no relocations involved, the - // bytes must be partially or fully uninitialized. Thus we can now unwrap the - // information about the range of uninit bytes and check if it's the full range. - && alloc.0.init_mask().is_range_initialized(range).unwrap_err() == range - { - Some(bx.const_undef(ty)) - } else { - None - } - } + Ok(val) => bx.scalar_to_backend(val, s, ty), + Err(_) => bx.const_poison(ty), } }; @@ -238,14 +222,16 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { // check that walks over the type of `mplace` to make sure it is truly correct to treat this // like a `Scalar` (or `ScalarPair`). match layout.backend_repr { - BackendRepr::Scalar(s) => { + BackendRepr::Scalar(s @ abi::Scalar::Initialized { .. }) => { let size = s.size(bx); assert_eq!(size, layout.size, "abi::Scalar size does not match layout size"); - if let Some(val) = read_scalar(offset, size, s, bx.immediate_backend_type(layout)) { - return OperandRef { val: OperandValue::Immediate(val), layout }; - } + let val = read_scalar(offset, size, s, bx.immediate_backend_type(layout)); + OperandRef { val: OperandValue::Immediate(val), layout } } - BackendRepr::ScalarPair(a, b) => { + BackendRepr::ScalarPair( + a @ abi::Scalar::Initialized { .. }, + b @ abi::Scalar::Initialized { .. }, + ) => { let (a_size, b_size) = (a.size(bx), b.size(bx)); let b_offset = (offset + a_size).align_to(b.align(bx).abi); assert!(b_offset.bytes() > 0); @@ -261,21 +247,20 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { b, bx.scalar_pair_element_backend_type(layout, 1, true), ); - if let (Some(a_val), Some(b_val)) = (a_val, b_val) { - return OperandRef { val: OperandValue::Pair(a_val, b_val), layout }; - } + OperandRef { val: OperandValue::Pair(a_val, b_val), layout } + } + _ if layout.is_zst() => OperandRef::zero_sized(layout), + _ => { + // Neither a scalar nor scalar pair. Load from a place + // FIXME: should we cache `const_data_from_alloc` to avoid repeating this for the + // same `ConstAllocation`? + let init = bx.const_data_from_alloc(alloc); + let base_addr = bx.static_addr_of(init, alloc_align, None); + + let llval = bx.const_ptr_byte_offset(base_addr, offset); + bx.load_operand(PlaceRef::new_sized(llval, layout)) } - _ if layout.is_zst() => return OperandRef::zero_sized(layout), - _ => {} } - // Neither a scalar nor scalar pair. Load from a place - // FIXME: should we cache `const_data_from_alloc` to avoid repeating this for the - // same `ConstAllocation`? - let init = bx.const_data_from_alloc(alloc); - let base_addr = bx.static_addr_of(init, alloc_align, None); - - let llval = bx.const_ptr_byte_offset(base_addr, offset); - bx.load_operand(PlaceRef::new_sized(llval, layout)) } /// Asserts that this operand refers to a scalar and returns diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 4c5b183cfe92a..63f421243acfe 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -8,7 +8,7 @@ use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; use rustc_middle::{bug, mir, span_bug}; use rustc_session::config::OptLevel; use rustc_span::{DUMMY_SP, Span}; -use tracing::{debug, instrument, trace}; +use tracing::{debug, instrument}; use super::operand::{OperandRef, OperandValue}; use super::place::PlaceRef; @@ -93,8 +93,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { return; } - // If `v` is an integer constant whose value is just a single byte repeated N times, - // emit a `memset` filling the entire `dest` with that byte. let try_init_all_same = |bx: &mut Bx, v| { let start = dest.val.llval; let size = bx.const_usize(dest.layout.size.bytes()); @@ -119,33 +117,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { false }; - trace!(?cg_elem.val); match cg_elem.val { OperandValue::Immediate(v) => { if try_init_all_same(bx, v) { return; } } - OperandValue::Pair(a, b) => { - let a_is_undef = bx.cx().is_undef(a); - match (a_is_undef, bx.cx().is_undef(b)) { - // Can happen for uninit unions - (true, true) => { - // FIXME: can we produce better output here? - } - (false, true) | (true, false) => { - let val = if a_is_undef { b } else { a }; - if try_init_all_same(bx, val) { - return; - } - } - (false, false) => { - // FIXME: if both are the same value, use try_init_all_same - } - } - } - OperandValue::ZeroSized => unreachable!("checked above"), - OperandValue::Ref(..) => {} + _ => (), } let count = self diff --git a/compiler/rustc_codegen_ssa/src/traits/consts.rs b/compiler/rustc_codegen_ssa/src/traits/consts.rs index 5cfb56ebacee8..dc6b68ceff7d8 100644 --- a/compiler/rustc_codegen_ssa/src/traits/consts.rs +++ b/compiler/rustc_codegen_ssa/src/traits/consts.rs @@ -9,7 +9,6 @@ pub trait ConstCodegenMethods<'tcx>: BackendTypes { /// Generate an uninitialized value (matching uninitialized memory in MIR). /// Whether memory is initialized or not is tracked byte-for-byte. fn const_undef(&self, t: Self::Type) -> Self::Value; - fn is_undef(&self, v: Self::Value) -> bool; /// Generate a fake value. Poison always affects the entire value, even if just a single byte is /// poison. This can only be used in codepaths that are already UB, i.e., UB-free Rust code /// (including code that e.g. copies uninit memory with `MaybeUninit`) can never encounter a diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs index 95bc9b71fe0ad..697a0e6592df0 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs @@ -222,7 +222,7 @@ impl AllocError { } /// The information that makes up a memory access: offset and size. -#[derive(Copy, Clone, PartialEq)] +#[derive(Copy, Clone)] pub struct AllocRange { pub start: Size, pub size: Size, diff --git a/compiler/rustc_middle/src/ty/instance.rs b/compiler/rustc_middle/src/ty/instance.rs index e9c19331e4a0b..98ca71b86be07 100644 --- a/compiler/rustc_middle/src/ty/instance.rs +++ b/compiler/rustc_middle/src/ty/instance.rs @@ -111,8 +111,9 @@ pub enum InstanceKind<'tcx> { /// Dynamic dispatch to `::fn`. /// - /// This `InstanceKind` does not have callable MIR. Calls to `Virtual` instances must be - /// codegen'd as virtual calls through the vtable. + /// This `InstanceKind` may have a callable MIR as the default implementation. + /// Calls to `Virtual` instances must be codegen'd as virtual calls through the vtable. + /// *This means we might not know exactly what is being called.* /// /// If this is reified to a `fn` pointer, a `ReifyShim` is used (see `ReifyShim` above for more /// details on that). diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs index c5d2d0afbfad2..606f0b5396ee2 100644 --- a/compiler/rustc_ty_utils/src/abi.rs +++ b/compiler/rustc_ty_utils/src/abi.rs @@ -309,15 +309,11 @@ fn fn_abi_of_fn_ptr<'tcx>( query: ty::PseudoCanonicalInput<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List>)>, ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> { let ty::PseudoCanonicalInput { typing_env, value: (sig, extra_args) } = query; - - let cx = LayoutCx::new(tcx, typing_env); fn_abi_new_uncached( - &cx, + &LayoutCx::new(tcx, typing_env), tcx.instantiate_bound_regions_with_erased(sig), extra_args, None, - None, - false, ) } @@ -326,19 +322,11 @@ fn fn_abi_of_instance<'tcx>( query: ty::PseudoCanonicalInput<'tcx, (ty::Instance<'tcx>, &'tcx ty::List>)>, ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> { let ty::PseudoCanonicalInput { typing_env, value: (instance, extra_args) } = query; - - let sig = fn_sig_for_fn_abi(tcx, instance, typing_env); - - let caller_location = - instance.def.requires_caller_location(tcx).then(|| tcx.caller_location_ty()); - fn_abi_new_uncached( &LayoutCx::new(tcx, typing_env), - sig, + fn_sig_for_fn_abi(tcx, instance, typing_env), extra_args, - caller_location, - Some(instance.def_id()), - matches!(instance.def, ty::InstanceKind::Virtual(..)), + Some(instance), ) } @@ -549,19 +537,25 @@ fn fn_abi_sanity_check<'tcx>( fn_arg_sanity_check(cx, fn_abi, spec_abi, &fn_abi.ret); } -// FIXME(eddyb) perhaps group the signature/type-containing (or all of them?) -// arguments of this method, into a separate `struct`. -#[tracing::instrument(level = "debug", skip(cx, caller_location, fn_def_id, force_thin_self_ptr))] +#[tracing::instrument(level = "debug", skip(cx, instance))] fn fn_abi_new_uncached<'tcx>( cx: &LayoutCx<'tcx>, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>], - caller_location: Option>, - fn_def_id: Option, - // FIXME(eddyb) replace this with something typed, like an `enum`. - force_thin_self_ptr: bool, + instance: Option>, ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> { let tcx = cx.tcx(); + let (caller_location, determined_fn_def_id, is_virtual_call) = if let Some(instance) = instance + { + let is_virtual_call = matches!(instance.def, ty::InstanceKind::Virtual(..)); + ( + instance.def.requires_caller_location(tcx).then(|| tcx.caller_location_ty()), + if is_virtual_call { None } else { Some(instance.def_id()) }, + is_virtual_call, + ) + } else { + (None, None, false) + }; let sig = tcx.normalize_erasing_regions(cx.typing_env, sig); let conv = conv_from_spec_abi(cx.tcx(), sig.abi, sig.c_variadic); @@ -570,16 +564,11 @@ fn fn_abi_new_uncached<'tcx>( let extra_args = if sig.abi == ExternAbi::RustCall { assert!(!sig.c_variadic && extra_args.is_empty()); - if let Some(input) = sig.inputs().last() { - if let ty::Tuple(tupled_arguments) = input.kind() { - inputs = &sig.inputs()[0..sig.inputs().len() - 1]; - tupled_arguments - } else { - bug!( - "argument to function with \"rust-call\" ABI \ - is not a tuple" - ); - } + if let Some(input) = sig.inputs().last() + && let ty::Tuple(tupled_arguments) = input.kind() + { + inputs = &sig.inputs()[0..sig.inputs().len() - 1]; + tupled_arguments } else { bug!( "argument to function with \"rust-call\" ABI \ @@ -592,7 +581,7 @@ fn fn_abi_new_uncached<'tcx>( }; let is_drop_in_place = - fn_def_id.is_some_and(|def_id| tcx.is_lang_item(def_id, LangItem::DropInPlace)); + determined_fn_def_id.is_some_and(|def_id| tcx.is_lang_item(def_id, LangItem::DropInPlace)); let arg_of = |ty: Ty<'tcx>, arg_idx: Option| -> Result<_, &'tcx FnAbiError<'tcx>> { let span = tracing::debug_span!("arg_of"); @@ -605,7 +594,7 @@ fn fn_abi_new_uncached<'tcx>( }); let layout = cx.layout_of(ty).map_err(|err| &*tcx.arena.alloc(FnAbiError::Layout(*err)))?; - let layout = if force_thin_self_ptr && arg_idx == Some(0) { + let layout = if is_virtual_call && arg_idx == Some(0) { // Don't pass the vtable, it's not an argument of the virtual fn. // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait` // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen @@ -648,9 +637,22 @@ fn fn_abi_new_uncached<'tcx>( c_variadic: sig.c_variadic, fixed_count: inputs.len() as u32, conv, - can_unwind: fn_can_unwind(cx.tcx(), fn_def_id, sig.abi), + can_unwind: fn_can_unwind( + tcx, + // Since `#[rustc_nounwind]` can change unwinding, we cannot infer unwinding by `fn_def_id` for a virtual call. + determined_fn_def_id, + sig.abi, + ), }; - fn_abi_adjust_for_abi(cx, &mut fn_abi, sig.abi, fn_def_id); + fn_abi_adjust_for_abi( + cx, + &mut fn_abi, + sig.abi, + // If this is a virtual call, we cannot pass the `fn_def_id`, as it might call other + // functions from vtable. Internally, `deduced_param_attrs` attempts to infer attributes by + // visit the function body. + determined_fn_def_id, + ); debug!("fn_abi_new_uncached = {:?}", fn_abi); fn_abi_sanity_check(cx, &fn_abi, sig.abi); Ok(tcx.arena.alloc(fn_abi)) diff --git a/src/ci/docker/scripts/rfl-build.sh b/src/ci/docker/scripts/rfl-build.sh index 3eb85ab215e0b..22468d84678f9 100755 --- a/src/ci/docker/scripts/rfl-build.sh +++ b/src/ci/docker/scripts/rfl-build.sh @@ -8,16 +8,10 @@ LINUX_VERSION=50e57739141b41f731ab31f8380821c7969f9dc4 ../x.py build --stage 2 library rustdoc clippy rustfmt ../x.py build --stage 0 cargo -# Install rustup so that we can use the built toolchain easily, and also -# install bindgen in an easy way. -curl --proto '=https' --tlsv1.2 -sSf -o rustup.sh https://sh.rustup.rs -sh rustup.sh -y --default-toolchain none +BUILD_DIR=$(realpath ./build/x86_64-unknown-linux-gnu) -source /cargo/env - -BUILD_DIR=$(realpath ./build) -rustup toolchain link local "${BUILD_DIR}"/x86_64-unknown-linux-gnu/stage2 -rustup default local +# Provide path to rustc, rustdoc, clippy-driver and rustfmt to RfL +export PATH=${PATH}:${BUILD_DIR}/stage2/bin mkdir -p rfl cd rfl @@ -33,10 +27,14 @@ git -C linux fetch --depth 1 origin ${LINUX_VERSION} git -C linux checkout FETCH_HEAD # Install bindgen -"${BUILD_DIR}"/x86_64-unknown-linux-gnu/stage0/bin/cargo install \ +"${BUILD_DIR}"/stage0/bin/cargo install \ --version $(linux/scripts/min-tool-version.sh bindgen) \ + --root ${BUILD_DIR}/bindgen \ bindgen-cli +# Provide path to bindgen to RfL +export PATH=${PATH}:${BUILD_DIR}/bindgen/bin + # Configure Rust for Linux cat < linux/kernel/configs/rfl-for-rust-ci.config # CONFIG_WERROR is not set diff --git a/tests/codegen/slice-init.rs b/tests/codegen/slice-init.rs index b36a5b5de3d41..950e0b0c10db7 100644 --- a/tests/codegen/slice-init.rs +++ b/tests/codegen/slice-init.rs @@ -2,8 +2,6 @@ #![crate_type = "lib"] -use std::mem::MaybeUninit; - // CHECK-LABEL: @zero_sized_elem #[no_mangle] pub fn zero_sized_elem() { @@ -78,64 +76,31 @@ pub fn u16_init_one_bytes() -> [u16; N] { [const { u16::from_be_bytes([1, 1]) }; N] } +// FIXME: undef bytes can just be initialized with the same value as the +// defined bytes, if the defines bytes are all the same. // CHECK-LABEL: @option_none_init #[no_mangle] pub fn option_none_init() -> [Option; N] { - // CHECK-NOT: select - // CHECK-NOT: br - // CHECK-NOT: switch - // CHECK-NOT: icmp - // CHECK: call void @llvm.memset.p0 - [const { None }; N] -} - -// If there is partial provenance or some bytes are initialized and some are not, -// we can't really do better than initialize bytes or groups of bytes together. -// CHECK-LABEL: @option_maybe_uninit_init -#[no_mangle] -pub fn option_maybe_uninit_init() -> [MaybeUninit; N] { // CHECK-NOT: select // CHECK: br label %repeat_loop_header{{.*}} // CHECK-NOT: switch // CHECK: icmp // CHECK-NOT: call void @llvm.memset.p0 - [const { - let mut val: MaybeUninit = MaybeUninit::uninit(); - let ptr = val.as_mut_ptr() as *mut u8; - unsafe { - ptr.write(0); - } - val - }; N] + [None; N] } -#[repr(packed)] -struct Packed { - start: u8, - ptr: &'static (), - rest: u16, - rest2: u8, -} +use std::mem::MaybeUninit; -// If there is partial provenance or some bytes are initialized and some are not, -// we can't really do better than initialize bytes or groups of bytes together. -// CHECK-LABEL: @option_maybe_uninit_provenance +// FIXME: This could be optimized into a memset. +// Regression test for . #[no_mangle] -pub fn option_maybe_uninit_provenance() -> [MaybeUninit; N] { +pub fn half_uninit() -> [(u128, MaybeUninit); N] { // CHECK-NOT: select // CHECK: br label %repeat_loop_header{{.*}} // CHECK-NOT: switch // CHECK: icmp // CHECK-NOT: call void @llvm.memset.p0 - [const { - let mut val: MaybeUninit = MaybeUninit::uninit(); - unsafe { - let ptr = &raw mut (*val.as_mut_ptr()).ptr; - static HAS_ADDR: () = (); - ptr.write_unaligned(&HAS_ADDR); - } - val - }; N] + [const { (0, MaybeUninit::uninit()) }; N] } // Use an opaque function to prevent rustc from removing useless drops. diff --git a/tests/codegen/virtual-call-attrs-issue-137646.rs b/tests/codegen/virtual-call-attrs-issue-137646.rs new file mode 100644 index 0000000000000..5e453947f27db --- /dev/null +++ b/tests/codegen/virtual-call-attrs-issue-137646.rs @@ -0,0 +1,37 @@ +//! Regression test for https://github.com/rust-lang/rust/issues/137646. +//! Since we don't know the exact implementation of the virtual call, +//! it might write to parameters, we can't infer the readonly attribute. +//@ compile-flags: -C opt-level=3 -C no-prepopulate-passes + +#![crate_type = "lib"] +#![feature(rustc_attrs)] + +pub trait Trait { + #[rustc_nounwind] + fn m(&self, _: (i32, i32, i32)) {} +} + +#[no_mangle] +pub fn foo(trait_: &dyn Trait) { + // CHECK-LABEL: @foo( + // CHECK: call void + // CHECK-NOT: readonly + trait_.m((1, 1, 1)); +} + +#[no_mangle] +#[rustc_nounwind] +pub fn foo_nounwind(trait_: &dyn Trait) { + // CHECK-LABEL: @foo_nounwind( + // FIXME: Here should be invoke. + // COM: CHECK: invoke + trait_.m((1, 1, 1)); +} + +#[no_mangle] +pub extern "C" fn c_nounwind(trait_: &dyn Trait) { + // CHECK-LABEL: @c_nounwind( + // FIXME: Here should be invoke. + // COM: CHECK: invoke + trait_.m((1, 1, 1)); +} diff --git a/tests/ui/virtual-call-attrs-issue-137646.rs b/tests/ui/virtual-call-attrs-issue-137646.rs new file mode 100644 index 0000000000000..e80bd5768a429 --- /dev/null +++ b/tests/ui/virtual-call-attrs-issue-137646.rs @@ -0,0 +1,45 @@ +//! Regression test for https://github.com/rust-lang/rust/issues/137646. +//! The parameter value at all calls to `check` should be `(1, 1, 1)`. + +//@ run-pass + +use std::hint::black_box; + +type T = (i32, i32, i32); + +pub trait Trait { + fn m(&self, _: T, _: T) {} +} + +impl Trait for () { + fn m(&self, mut _v1: T, v2: T) { + _v1 = (0, 0, 0); + check(v2); + } +} + +pub fn run_1(trait_: &dyn Trait) { + let v1 = (1, 1, 1); + let v2 = (1, 1, 1); + trait_.m(v1, v2); +} + +pub fn run_2(trait_: &dyn Trait) { + let v1 = (1, 1, 1); + let v2 = (1, 1, 1); + trait_.m(v1, v2); + check(v1); + check(v2); +} + +#[inline(never)] +fn check(v: T) { + assert_eq!(v, (1, 1, 1)); +} + +fn main() { + black_box(run_1 as fn(&dyn Trait)); + black_box(run_2 as fn(&dyn Trait)); + run_1(&()); + run_2(&()); +}