From 826e1f5c76e78a79edde0e4be5d83d81f7186cc0 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Sat, 27 Aug 2022 15:53:45 -0700 Subject: [PATCH 1/3] enable formatting for rustc_codegen_gcc --- compiler/rustc_codegen_gcc/.rustfmt.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compiler/rustc_codegen_gcc/.rustfmt.toml b/compiler/rustc_codegen_gcc/.rustfmt.toml index c7ad93bafe36c..cdb28d6d3630d 100644 --- a/compiler/rustc_codegen_gcc/.rustfmt.toml +++ b/compiler/rustc_codegen_gcc/.rustfmt.toml @@ -1 +1 @@ -disable_all_formatting = true +disable_all_formatting = false From 47bf17d9633ab6d901668c6d561432be1da864cc Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Sat, 27 Aug 2022 16:34:42 -0700 Subject: [PATCH 2/3] format rustc_codegen_gcc --- compiler/rustc_codegen_gcc/src/abi.rs | 119 +- compiler/rustc_codegen_gcc/src/allocator.rs | 70 +- compiler/rustc_codegen_gcc/src/archive.rs | 49 +- compiler/rustc_codegen_gcc/src/asm.rs | 258 ++-- compiler/rustc_codegen_gcc/src/back/write.rs | 40 +- compiler/rustc_codegen_gcc/src/base.rs | 39 +- compiler/rustc_codegen_gcc/src/builder.rs | 1159 +++++++++++------ compiler/rustc_codegen_gcc/src/callee.rs | 79 +- compiler/rustc_codegen_gcc/src/common.rs | 210 ++- compiler/rustc_codegen_gcc/src/consts.rs | 299 +++-- compiler/rustc_codegen_gcc/src/context.rs | 178 ++- .../rustc_codegen_gcc/src/coverageinfo.rs | 23 +- compiler/rustc_codegen_gcc/src/debuginfo.rs | 55 +- compiler/rustc_codegen_gcc/src/declare.rs | 151 ++- compiler/rustc_codegen_gcc/src/int.rs | 702 +++++----- .../rustc_codegen_gcc/src/intrinsic/llvm.rs | 48 +- .../rustc_codegen_gcc/src/intrinsic/mod.rs | 1155 ++++++++-------- .../rustc_codegen_gcc/src/intrinsic/simd.rs | 339 +++-- compiler/rustc_codegen_gcc/src/lib.rs | 198 ++- compiler/rustc_codegen_gcc/src/mono_item.rs | 18 +- compiler/rustc_codegen_gcc/src/type_.rs | 80 +- compiler/rustc_codegen_gcc/src/type_of.rs | 184 ++- .../tests/lang_tests_common.rs | 44 +- 23 files changed, 3371 insertions(+), 2126 deletions(-) diff --git a/compiler/rustc_codegen_gcc/src/abi.rs b/compiler/rustc_codegen_gcc/src/abi.rs index 848c34211ff61..18e60ca7d3b19 100644 --- a/compiler/rustc_codegen_gcc/src/abi.rs +++ b/compiler/rustc_codegen_gcc/src/abi.rs @@ -18,17 +18,16 @@ impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { fn get_param(&mut self, index: usize) -> Self::Value { let func = self.current_func(); let param = func.get_param(index as i32); - let on_stack = - if let Some(on_stack_param_indices) = self.on_stack_function_params.borrow().get(&func) { - on_stack_param_indices.contains(&index) - } - else { - false - }; + let on_stack = if let Some(on_stack_param_indices) = + self.on_stack_function_params.borrow().get(&func) + { + on_stack_param_indices.contains(&index) + } else { + false + }; if on_stack { param.to_lvalue().get_address(None) - } - else { + } else { param.to_rvalue() } } @@ -37,13 +36,14 @@ impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { impl GccType for CastTarget { fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> { let rest_gcc_unit = self.rest.unit.gcc_type(cx); - let (rest_count, rem_bytes) = - if self.rest.unit.size.bytes() == 0 { - (0, 0) - } - else { - (self.rest.total.bytes() / self.rest.unit.size.bytes(), self.rest.total.bytes() % self.rest.unit.size.bytes()) - }; + let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 { + (0, 0) + } else { + ( + self.rest.total.bytes() / self.rest.unit.size.bytes(), + self.rest.total.bytes() % self.rest.unit.size.bytes(), + ) + }; if self.prefix.iter().all(|x| x.is_none()) { // Simplify to a single unit when there is no prefix and size <= unit size @@ -61,9 +61,7 @@ impl GccType for CastTarget { let mut args: Vec<_> = self .prefix .iter() - .flat_map(|option_reg| { - option_reg.map(|reg| reg.gcc_type(cx)) - }) + .flat_map(|option_reg| option_reg.map(|reg| reg.gcc_type(cx))) .chain((0..rest_count).map(|_| rest_gcc_unit)) .collect(); @@ -86,12 +84,10 @@ impl GccType for Reg { fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> { match self.kind { RegKind::Integer => cx.type_ix(self.size.bits()), - RegKind::Float => { - match self.size.bits() { - 32 => cx.type_f32(), - 64 => cx.type_f64(), - _ => bug!("unsupported float: {:?}", self), - } + RegKind::Float => match self.size.bits() { + 32 => cx.type_f32(), + 64 => cx.type_f64(), + _ => bug!("unsupported float: {:?}", self), }, RegKind::Vector => unimplemented!(), //cx.type_vector(cx.type_i8(), self.size.bytes()), } @@ -100,29 +96,39 @@ impl GccType for Reg { pub trait FnAbiGccExt<'gcc, 'tcx> { // TODO(antoyo): return a function pointer type instead? - fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec>, bool, FxHashSet); + fn gcc_type( + &self, + cx: &CodegenCx<'gcc, 'tcx>, + ) -> (Type<'gcc>, Vec>, bool, FxHashSet); fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; } impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { - fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec>, bool, FxHashSet) { + fn gcc_type( + &self, + cx: &CodegenCx<'gcc, 'tcx>, + ) -> (Type<'gcc>, Vec>, bool, FxHashSet) { let mut on_stack_param_indices = FxHashSet::default(); // This capacity calculation is approximate. let mut argument_tys = Vec::with_capacity( - self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 } + self.args.len() + + if let PassMode::Indirect { .. } = self.ret.mode { + 1 + } else { + 0 + }, ); - let return_ty = - match self.ret.mode { - PassMode::Ignore => cx.type_void(), - PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx), - PassMode::Cast(ref cast, _) => cast.gcc_type(cx), - PassMode::Indirect { .. } => { - argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); - cx.type_void() - } - }; + let return_ty = match self.ret.mode { + PassMode::Ignore => cx.type_void(), + PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx), + PassMode::Cast(ref cast, _) => cast.gcc_type(cx), + PassMode::Indirect { .. } => { + argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); + cx.type_void() + } + }; for arg in self.args.iter() { let arg_ty = match arg.mode { @@ -133,7 +139,10 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 1, true)); continue; } - PassMode::Indirect { extra_attrs: Some(_), .. } => { + PassMode::Indirect { + extra_attrs: Some(_), + .. + } => { unimplemented!(); } PassMode::Cast(ref cast, pad_i32) => { @@ -143,22 +152,42 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } cast.gcc_type(cx) } - PassMode::Indirect { extra_attrs: None, on_stack: true, .. } => { + PassMode::Indirect { + extra_attrs: None, + on_stack: true, + .. + } => { on_stack_param_indices.insert(argument_tys.len()); arg.memory_ty(cx) - }, - PassMode::Indirect { extra_attrs: None, on_stack: false, .. } => cx.type_ptr_to(arg.memory_ty(cx)), + } + PassMode::Indirect { + extra_attrs: None, + on_stack: false, + .. + } => cx.type_ptr_to(arg.memory_ty(cx)), }; argument_tys.push(arg_ty); } - (return_ty, argument_tys, self.c_variadic, on_stack_param_indices) + ( + return_ty, + argument_tys, + self.c_variadic, + on_stack_param_indices, + ) } fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { let (return_type, params, variadic, on_stack_param_indices) = self.gcc_type(cx); - let pointer_type = cx.context.new_function_pointer_type(None, return_type, ¶ms, variadic); - cx.on_stack_params.borrow_mut().insert(pointer_type.dyncast_function_ptr_type().expect("function ptr type"), on_stack_param_indices); + let pointer_type = + cx.context + .new_function_pointer_type(None, return_type, ¶ms, variadic); + cx.on_stack_params.borrow_mut().insert( + pointer_type + .dyncast_function_ptr_type() + .expect("function ptr type"), + on_stack_param_indices, + ); pointer_type } } diff --git a/compiler/rustc_codegen_gcc/src/allocator.rs b/compiler/rustc_codegen_gcc/src/allocator.rs index 58efb81e80011..bd89eb2c79a1d 100644 --- a/compiler/rustc_codegen_gcc/src/allocator.rs +++ b/compiler/rustc_codegen_gcc/src/allocator.rs @@ -7,15 +7,20 @@ use rustc_span::symbol::sym; use crate::GccContext; -pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_name: &str, kind: AllocatorKind, has_alloc_error_handler: bool) { +pub(crate) unsafe fn codegen( + tcx: TyCtxt<'_>, + mods: &mut GccContext, + _module_name: &str, + kind: AllocatorKind, + has_alloc_error_handler: bool, +) { let context = &mods.context; - let usize = - match tcx.sess.target.pointer_width { - 16 => context.new_type::(), - 32 => context.new_type::(), - 64 => context.new_type::(), - tws => bug!("Unsupported target word size for int: {}", tws), - }; + let usize = match tcx.sess.target.pointer_width { + 16 => context.new_type::(), + 32 => context.new_type::(), + 64 => context.new_type::(), + tws => bug!("Unsupported target word size for int: {}", tws), + }; let i8 = context.new_type::(); let i8p = i8.make_pointer(); let void = context.new_type::<()>(); @@ -44,10 +49,19 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam }; let name = format!("__rust_{}", method.name); - let args: Vec<_> = types.iter().enumerate() + let args: Vec<_> = types + .iter() + .enumerate() .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) .collect(); - let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, name, false); + let func = context.new_function( + None, + FunctionType::Exported, + output.unwrap_or(void), + &args, + name, + false, + ); if tcx.sess.target.options.default_hidden_visibility { // TODO(antoyo): set visibility. @@ -57,10 +71,19 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam } let callee = kind.fn_name(method.name); - let args: Vec<_> = types.iter().enumerate() + let args: Vec<_> = types + .iter() + .enumerate() .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) .collect(); - let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, callee, false); + let callee = context.new_function( + None, + FunctionType::Extern, + output.unwrap_or(void), + &args, + callee, + false, + ); // TODO(antoyo): set visibility. let block = func.new_block("entry"); @@ -74,8 +97,7 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam //llvm::LLVMSetTailCall(ret, True); if output.is_some() { block.end_with_return(None, ret); - } - else { + } else { block.end_with_void_return(None); } @@ -85,20 +107,22 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam let types = [usize, usize]; let name = "__rust_alloc_error_handler".to_string(); - let args: Vec<_> = types.iter().enumerate() + let args: Vec<_> = types + .iter() + .enumerate() .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) .collect(); let func = context.new_function(None, FunctionType::Exported, void, &args, name, false); - let kind = - if has_alloc_error_handler { - AllocatorKind::Global - } - else { - AllocatorKind::Default - }; + let kind = if has_alloc_error_handler { + AllocatorKind::Global + } else { + AllocatorKind::Default + }; let callee = kind.fn_name(sym::oom); - let args: Vec<_> = types.iter().enumerate() + let args: Vec<_> = types + .iter() + .enumerate() .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) .collect(); let callee = context.new_function(None, FunctionType::Extern, void, &args, callee, false); diff --git a/compiler/rustc_codegen_gcc/src/archive.rs b/compiler/rustc_codegen_gcc/src/archive.rs index f863abdcc97ec..4d9d3ea0a08c3 100644 --- a/compiler/rustc_codegen_gcc/src/archive.rs +++ b/compiler/rustc_codegen_gcc/src/archive.rs @@ -80,8 +80,13 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> { let file_name = String::from_utf8(entry.header().identifier().to_vec()) .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?; if !skip(&file_name) { - self.entries - .push((file_name, ArchiveEntry::FromArchive { archive_index, entry_index: i })); + self.entries.push(( + file_name, + ArchiveEntry::FromArchive { + archive_index, + entry_index: i, + }, + )); } i += 1; } @@ -156,20 +161,22 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> { } } } - ArchiveEntry::File(file) => - match builder { - BuilderKind::Bsd(ref mut builder) => { - builder - .append_file(entry_name.as_bytes(), &mut File::open(file).expect("file for bsd builder")) - .unwrap() - }, - BuilderKind::Gnu(ref mut builder) => { - builder - .append_file(entry_name.as_bytes(), &mut File::open(&file).expect(&format!("file {:?} for gnu builder", file))) - .unwrap() - }, - BuilderKind::NativeAr(archive_file) => add_file_using_ar(archive_file, &file), - }, + ArchiveEntry::File(file) => match builder { + BuilderKind::Bsd(ref mut builder) => builder + .append_file( + entry_name.as_bytes(), + &mut File::open(file).expect("file for bsd builder"), + ) + .unwrap(), + BuilderKind::Gnu(ref mut builder) => builder + .append_file( + entry_name.as_bytes(), + &mut File::open(&file) + .expect(&format!("file {:?} for gnu builder", file)), + ) + .unwrap(), + BuilderKind::NativeAr(archive_file) => add_file_using_ar(archive_file, &file), + }, } } @@ -177,11 +184,15 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> { std::mem::drop(builder); // Run ranlib to be able to link the archive - let status = - std::process::Command::new("ranlib").arg(output).status().expect("Couldn't run ranlib"); + let status = std::process::Command::new("ranlib") + .arg(output) + .status() + .expect("Couldn't run ranlib"); if !status.success() { - self.config.sess.fatal(&format!("Ranlib exited with code {:?}", status.code())); + self.config + .sess + .fatal(&format!("Ranlib exited with code {:?}", status.code())); } any_members diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs index 52fd66af0659d..f5674de12b5fb 100644 --- a/compiler/rustc_codegen_gcc/src/asm.rs +++ b/compiler/rustc_codegen_gcc/src/asm.rs @@ -2,7 +2,10 @@ use gccjit::{LValue, RValue, ToRValue, Type}; use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_codegen_ssa::mir::operand::OperandValue; use rustc_codegen_ssa::mir::place::PlaceRef; -use rustc_codegen_ssa::traits::{AsmBuilderMethods, AsmMethods, BaseTypeMethods, BuilderMethods, GlobalAsmOperandRef, InlineAsmOperandRef}; +use rustc_codegen_ssa::traits::{ + AsmBuilderMethods, AsmMethods, BaseTypeMethods, BuilderMethods, GlobalAsmOperandRef, + InlineAsmOperandRef, +}; use rustc_middle::{bug, ty::Instance}; use rustc_span::Span; @@ -11,10 +14,9 @@ use rustc_target::asm::*; use std::borrow::Cow; use crate::builder::Builder; +use crate::callee::get_fn; use crate::context::CodegenCx; use crate::type_of::LayoutGccExt; -use crate::callee::get_fn; - // Rust asm! and GCC Extended Asm semantics differ substantially. // @@ -67,7 +69,6 @@ use crate::callee::get_fn; const ATT_SYNTAX_INS: &str = ".att_syntax noprefix\n\t"; const INTEL_SYNTAX_INS: &str = "\n\t.intel_syntax noprefix"; - struct AsmOutOperand<'a, 'tcx, 'gcc> { rust_idx: usize, constraint: &'a str, @@ -75,13 +76,13 @@ struct AsmOutOperand<'a, 'tcx, 'gcc> { readwrite: bool, tmp_var: LValue<'gcc>, - out_place: Option>> + out_place: Option>>, } struct AsmInOperand<'a, 'tcx> { rust_idx: usize, constraint: Cow<'a, str>, - val: RValue<'tcx> + val: RValue<'tcx>, } impl AsmOutOperand<'_, '_, '_> { @@ -101,15 +102,25 @@ impl AsmOutOperand<'_, '_, '_> { enum ConstraintOrRegister { Constraint(&'static str), - Register(&'static str) + Register(&'static str), } - impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { - fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, span: &[Span], _instance: Instance<'_>, _dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>) { + fn codegen_inline_asm( + &mut self, + template: &[InlineAsmTemplatePiece], + rust_operands: &[InlineAsmOperandRef<'tcx, Self>], + options: InlineAsmOptions, + span: &[Span], + _instance: Instance<'_>, + _dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>, + ) { if options.contains(InlineAsmOptions::MAY_UNWIND) { self.sess() - .struct_span_err(span[0], "GCC backend does not support unwinding from inline asm") + .struct_span_err( + span[0], + "GCC backend does not support unwinding from inline asm", + ) .emit(); return; } @@ -156,32 +167,38 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { use ConstraintOrRegister::*; let (constraint, ty) = match (reg_to_gcc(reg), place) { - (Constraint(constraint), Some(place)) => (constraint, place.layout.gcc_type(self.cx, false)), + (Constraint(constraint), Some(place)) => { + (constraint, place.layout.gcc_type(self.cx, false)) + } // When `reg` is a class and not an explicit register but the out place is not specified, // we need to create an unused output variable to assign the output to. This var // needs to be of a type that's "compatible" with the register class, but specific type // doesn't matter. - (Constraint(constraint), None) => (constraint, dummy_output_type(self.cx, reg.reg_class())), + (Constraint(constraint), None) => { + (constraint, dummy_output_type(self.cx, reg.reg_class())) + } (Register(_), Some(_)) => { // left for the next pass - continue - }, + continue; + } (Register(reg_name), None) => { // `clobber_abi` can add lots of clobbers that are not supported by the target, // such as AVX-512 registers, so we just ignore unsupported registers - let is_target_supported = reg.reg_class().supported_types(asm_arch).iter() - .any(|&(_, feature)| { - if let Some(feature) = feature { - self.tcx.sess.target_features.contains(&feature) - } else { - true // Register class is unconditionally supported - } - }); + let is_target_supported = + reg.reg_class().supported_types(asm_arch).iter().any( + |&(_, feature)| { + if let Some(feature) = feature { + self.tcx.sess.target_features.contains(&feature) + } else { + true // Register class is unconditionally supported + } + }, + ); if is_target_supported && !clobbers.contains(®_name) { clobbers.push(reg_name); } - continue + continue; } }; @@ -192,7 +209,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { late, readwrite: false, tmp_var, - out_place: place + out_place: place, }); } @@ -201,23 +218,27 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { inputs.push(AsmInOperand { constraint: Cow::Borrowed(constraint), rust_idx, - val: value.immediate() + val: value.immediate(), }); - } - else { + } else { // left for the next pass - continue + continue; } } - InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => { - let constraint = if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) { - constraint - } - else { - // left for the next pass - continue - }; + InlineAsmOperandRef::InOut { + reg, + late, + in_value, + out_place, + } => { + let constraint = + if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) { + constraint + } else { + // left for the next pass + continue; + }; // Rustc frontend guarantees that input and output types are "compatible", // so we can just use input var's type for the output variable. @@ -248,7 +269,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { inputs.push(AsmInOperand { constraint, rust_idx, - val: in_value.immediate() + val: in_value.immediate(), }); } } @@ -266,7 +287,11 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { InlineAsmOperandRef::SymStatic { def_id } => { // TODO(@Amanieu): Additional mangling is needed on // some targets to add a leading underscore (Mach-O). - constants_len += self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name.len(); + constants_len += self + .tcx + .symbol_name(Instance::mono(self.tcx, def_id)) + .name + .len(); } } } @@ -279,10 +304,9 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) { let out_place = if let Some(place) = place { place - } - else { + } else { // processed in the previous pass - continue + continue; }; let ty = out_place.layout.gcc_type(self.cx, false); @@ -295,7 +319,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { late, readwrite: false, tmp_var, - out_place: Some(out_place) + out_place: Some(out_place), }); } @@ -313,7 +337,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { inputs.push(AsmInOperand { constraint: "r".into(), rust_idx, - val: reg_var.to_rvalue() + val: reg_var.to_rvalue(), }); } @@ -321,7 +345,12 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { } // `inout("explicit register") in_var => out_var` - InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => { + InlineAsmOperandRef::InOut { + reg, + late, + in_value, + out_place, + } => { if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) { // See explanation in the first pass. let ty = in_value.layout.gcc_type(self.cx, false); @@ -341,7 +370,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { inputs.push(AsmInOperand { constraint, rust_idx, - val: in_value.immediate() + val: in_value.immediate(), }); } @@ -352,7 +381,9 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { inputs.push(AsmInOperand { constraint: "X".into(), rust_idx, - val: self.cx.rvalue_as_function(get_fn(self.cx, instance)) + val: self + .cx + .rvalue_as_function(get_fn(self.cx, instance)) .get_address(None), }); } @@ -373,7 +404,11 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { // 3. Build the template string - let mut template_str = String::with_capacity(estimate_template_length(template, constants_len, att_dialect)); + let mut template_str = String::with_capacity(estimate_template_length( + template, + constants_len, + att_dialect, + )); if att_dialect { template_str.push_str(ATT_SYNTAX_INS); } @@ -392,7 +427,11 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { template_str.push_str(s); } } - InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => { + InlineAsmTemplatePiece::Placeholder { + operand_idx, + modifier, + span: _, + } => { let mut push_to_template = |modifier, gcc_idx| { use std::fmt::Write; @@ -404,9 +443,10 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { }; match rust_operands[operand_idx] { - InlineAsmOperandRef::Out { reg, .. } => { + InlineAsmOperandRef::Out { reg, .. } => { let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier); - let gcc_index = outputs.iter() + let gcc_index = outputs + .iter() .position(|op| operand_idx == op.rust_idx) .expect("wrong rust index"); push_to_template(modifier, gcc_index); @@ -414,7 +454,8 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { InlineAsmOperandRef::In { reg, .. } => { let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier); - let in_gcc_index = inputs.iter() + let in_gcc_index = inputs + .iter() .position(|op| operand_idx == op.rust_idx) .expect("wrong rust index"); let gcc_index = in_gcc_index + outputs.len(); @@ -425,7 +466,8 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier); // The input register is tied to the output, so we can just use the index of the output register - let gcc_index = outputs.iter() + let gcc_index = outputs + .iter() .position(|op| operand_idx == op.rust_idx) .expect("wrong rust index"); push_to_template(modifier, gcc_index); @@ -496,7 +538,8 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { } if options.contains(InlineAsmOptions::NORETURN) { let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable"); - let builtin_unreachable: RValue<'gcc> = unsafe { std::mem::transmute(builtin_unreachable) }; + let builtin_unreachable: RValue<'gcc> = + unsafe { std::mem::transmute(builtin_unreachable) }; self.call(self.type_void(), builtin_unreachable, &[], None); } @@ -514,23 +557,26 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { OperandValue::Immediate(op.tmp_var.to_rvalue()).store(self, place); } } - } } -fn estimate_template_length(template: &[InlineAsmTemplatePiece], constants_len: usize, att_dialect: bool) -> usize { - let len: usize = template.iter().map(|piece| { - match *piece { - InlineAsmTemplatePiece::String(ref string) => { - string.len() - } - InlineAsmTemplatePiece::Placeholder { .. } => { - // '%' + 1 char modifier + 1 char index - 3 +fn estimate_template_length( + template: &[InlineAsmTemplatePiece], + constants_len: usize, + att_dialect: bool, +) -> usize { + let len: usize = template + .iter() + .map(|piece| { + match *piece { + InlineAsmTemplatePiece::String(ref string) => string.len(), + InlineAsmTemplatePiece::Placeholder { .. } => { + // '%' + 1 char modifier + 1 char index + 3 + } } - } - }) - .sum(); + }) + .sum(); // increase it by 5% to account for possible '%' signs that'll be duplicated // I pulled the number out of blue, but should be fair enough @@ -563,7 +609,7 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { _ => unimplemented!(), } - }, + } InlineAsmRegOrRegClass::RegClass(reg) => match reg { InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => unimplemented!(), InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => unimplemented!(), @@ -593,7 +639,7 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr) | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => { unreachable!("clobber-only") - }, + } InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => unimplemented!(), InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(), InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(), @@ -607,7 +653,9 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg0) => unimplemented!(), InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(), InlineAsmRegClass::X86( - X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::tmm_reg, + X86InlineAsmRegClass::x87_reg + | X86InlineAsmRegClass::mmx_reg + | X86InlineAsmRegClass::tmm_reg, ) => unreachable!("clobber-only"), InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("GCC backend does not support SPIR-V") @@ -615,7 +663,7 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => unimplemented!(), InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => unimplemented!(), InlineAsmRegClass::Err => unreachable!(), - } + }, }; ConstraintOrRegister::Constraint(constraint) @@ -631,7 +679,7 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => { unimplemented!() } - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)=> cx.type_i32(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(), InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg) | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(), InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg) @@ -657,7 +705,7 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr) | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => { unreachable!("clobber-only") - }, + } InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(), InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(), InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => cx.type_f32(), @@ -675,7 +723,7 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(), InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("LLVM backend does not support SPIR-V") - }, + } InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => cx.type_i32(), InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(), InlineAsmRegClass::Err => unreachable!(), @@ -683,7 +731,13 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl } impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> { - fn codegen_global_asm(&self, template: &[InlineAsmTemplatePiece], operands: &[GlobalAsmOperandRef<'tcx>], options: InlineAsmOptions, _line_spans: &[Span]) { + fn codegen_global_asm( + &self, + template: &[InlineAsmTemplatePiece], + operands: &[GlobalAsmOperandRef<'tcx>], + options: InlineAsmOptions, + _line_spans: &[Span], + ) { let asm_arch = self.tcx.sess.asm_arch.unwrap(); // Default to Intel syntax on x86 @@ -697,18 +751,20 @@ impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> { InlineAsmTemplatePiece::String(ref string) => { for line in string.lines() { // NOTE: gcc does not allow inline comment, so remove them. - let line = - if let Some(index) = line.rfind("//") { - &line[..index] - } - else { - line - }; + let line = if let Some(index) = line.rfind("//") { + &line[..index] + } else { + line + }; template_str.push_str(line); template_str.push('\n'); } - }, - InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => { + } + InlineAsmTemplatePiece::Placeholder { + operand_idx, + modifier: _, + span: _, + } => { match operands[operand_idx] { GlobalAsmOperandRef::Const { ref string } => { // Const operands get injected directly into the @@ -737,20 +793,22 @@ impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } } - let template_str = - if att_dialect { - format!(".att_syntax\n\t{}\n\t.intel_syntax noprefix", template_str) - } - else { - template_str - }; + let template_str = if att_dialect { + format!(".att_syntax\n\t{}\n\t.intel_syntax noprefix", template_str) + } else { + template_str + }; // NOTE: seems like gcc will put the asm in the wrong section, so set it to .text manually. let template_str = format!(".pushsection .text\n{}\n.popsection", template_str); self.context.add_top_level_asm(None, &template_str); } } -fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option) -> Option { +fn modifier_to_gcc( + arch: InlineAsmArch, + reg: InlineAsmRegClass, + modifier: Option, +) -> Option { match reg { InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier, InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => modifier, @@ -758,7 +816,7 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => { unimplemented!() } - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => unimplemented!(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => unimplemented!(), InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg) | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => unimplemented!(), InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg) @@ -781,7 +839,13 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(), InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier { - None => if arch == InlineAsmArch::X86_64 { Some('q') } else { Some('k') }, + None => { + if arch == InlineAsmArch::X86_64 { + Some('q') + } else { + Some('k') + } + } Some('l') => Some('b'), Some('h') => Some('h'), Some('x') => Some('w'), @@ -803,13 +867,17 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option }, InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None, InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg0) => None, - InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::tmm_reg) => { + InlineAsmRegClass::X86( + X86InlineAsmRegClass::x87_reg + | X86InlineAsmRegClass::mmx_reg + | X86InlineAsmRegClass::tmm_reg, + ) => { unreachable!("clobber-only") } InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(), InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("LLVM backend does not support SPIR-V") - }, + } InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => unimplemented!(), InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => unimplemented!(), InlineAsmRegClass::Err => unreachable!(), diff --git a/compiler/rustc_codegen_gcc/src/back/write.rs b/compiler/rustc_codegen_gcc/src/back/write.rs index efcf18d31eb09..8627211ed5954 100644 --- a/compiler/rustc_codegen_gcc/src/back/write.rs +++ b/compiler/rustc_codegen_gcc/src/back/write.rs @@ -1,8 +1,8 @@ use std::{env, fs}; use gccjit::OutputKind; -use rustc_codegen_ssa::{CompiledModule, ModuleCodegen}; use rustc_codegen_ssa::back::write::{CodegenContext, EmitObj, ModuleConfig}; +use rustc_codegen_ssa::{CompiledModule, ModuleCodegen}; use rustc_errors::Handler; use rustc_session::config::OutputType; use rustc_span::fatal_error::FatalError; @@ -10,16 +10,27 @@ use rustc_target::spec::SplitDebuginfo; use crate::{GccCodegenBackend, GccContext}; -pub(crate) unsafe fn codegen(cgcx: &CodegenContext, _diag_handler: &Handler, module: ModuleCodegen, config: &ModuleConfig) -> Result { - let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name); +pub(crate) unsafe fn codegen( + cgcx: &CodegenContext, + _diag_handler: &Handler, + module: ModuleCodegen, + config: &ModuleConfig, +) -> Result { + let _timer = cgcx + .prof + .generic_activity_with_arg("LLVM_module_codegen", &*module.name); { let context = &module.module_llvm.context; let module_name = module.name.clone(); let module_name = Some(&module_name[..]); - let _bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name); - let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name); + let _bc_out = cgcx + .output_filenames + .temp_path(OutputType::Bitcode, module_name); + let obj_out = cgcx + .output_filenames + .temp_path(OutputType::Object, module_name); if config.bitcode_needed() { // TODO(antoyo) @@ -33,7 +44,9 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext, _diag_han let _timer = cgcx .prof .generic_activity_with_arg("LLVM_module_codegen_emit_asm", &*module.name); - let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name); + let path = cgcx + .output_filenames + .temp_path(OutputType::Assembly, module_name); context.compile_to_file(OutputKind::Assembler, path.to_str().expect("path to str")); } @@ -45,7 +58,9 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext, _diag_han if env::var("CG_GCCJIT_DUMP_MODULE_NAMES").as_deref() == Ok("1") { println!("Module {}", module.name); } - if env::var("CG_GCCJIT_DUMP_ALL_MODULES").as_deref() == Ok("1") || env::var("CG_GCCJIT_DUMP_MODULE").as_deref() == Ok(&module.name) { + if env::var("CG_GCCJIT_DUMP_ALL_MODULES").as_deref() == Ok("1") + || env::var("CG_GCCJIT_DUMP_MODULE").as_deref() == Ok(&module.name) + { println!("Dumping reproducer {}", module.name); let _ = fs::create_dir("/tmp/reproducers"); // FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by @@ -59,7 +74,10 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext, _diag_han let path = &format!("/tmp/gccjit_dumps/{}.c", module.name); context.dump_to_file(path, true); } - context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str")); + context.compile_to_file( + OutputKind::ObjectFile, + obj_out.to_str().expect("path to str"), + ); } EmitObj::Bitcode => { @@ -78,6 +96,10 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext, _diag_han )) } -pub(crate) fn link(_cgcx: &CodegenContext, _diag_handler: &Handler, mut _modules: Vec>) -> Result, FatalError> { +pub(crate) fn link( + _cgcx: &CodegenContext, + _diag_handler: &Handler, + mut _modules: Vec>, +) -> Result, FatalError> { unimplemented!(); } diff --git a/compiler/rustc_codegen_gcc/src/base.rs b/compiler/rustc_codegen_gcc/src/base.rs index 8f9f6f98faf81..da3dbb62542b0 100644 --- a/compiler/rustc_codegen_gcc/src/base.rs +++ b/compiler/rustc_codegen_gcc/src/base.rs @@ -1,24 +1,20 @@ use std::env; use std::time::Instant; -use gccjit::{ - Context, - FunctionType, - GlobalKind, -}; -use rustc_middle::dep_graph; -use rustc_middle::ty::TyCtxt; -use rustc_middle::mir::mono::Linkage; -use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; +use gccjit::{Context, FunctionType, GlobalKind}; use rustc_codegen_ssa::base::maybe_create_entry_wrapper; use rustc_codegen_ssa::mono_item::MonoItemExt; use rustc_codegen_ssa::traits::DebugInfoMethods; +use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; +use rustc_middle::dep_graph; +use rustc_middle::mir::mono::Linkage; +use rustc_middle::ty::TyCtxt; use rustc_session::config::DebugInfo; use rustc_span::Symbol; -use crate::GccContext; use crate::builder::Builder; use crate::context::CodegenCx; +use crate::GccContext; pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind { match linkage { @@ -52,7 +48,11 @@ pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType { } } -pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol, supports_128bit_integers: bool) -> (ModuleCodegen, u64) { +pub fn compile_codegen_unit<'tcx>( + tcx: TyCtxt<'tcx>, + cgu_name: Symbol, + supports_128bit_integers: bool, +) -> (ModuleCodegen, u64) { let prof_timer = tcx.prof.generic_activity("codegen_module"); let start_time = Instant::now(); @@ -71,7 +71,10 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol, supports_ // the time we needed for codegenning it. let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64; - fn module_codegen(tcx: TyCtxt<'_>, (cgu_name, supports_128bit_integers): (Symbol, bool)) -> ModuleCodegen { + fn module_codegen( + tcx: TyCtxt<'_>, + (cgu_name, supports_128bit_integers): (Symbol, bool), + ) -> ModuleCodegen { let cgu = tcx.codegen_unit(cgu_name); // Instantiate monomorphizations without filling out definitions yet... //let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str()); @@ -96,7 +99,13 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol, supports_ // NOTE: Rust relies on LLVM not doing TBAA (https://github.com/rust-lang/unsafe-code-guidelines/issues/292). context.add_command_line_option("-fno-strict-aliasing"); - if tcx.sess.opts.unstable_opts.function_sections.unwrap_or(tcx.sess.target.function_sections) { + if tcx + .sess + .opts + .unstable_opts + .function_sections + .unwrap_or(tcx.sess.target.function_sections) + { context.add_command_line_option("-ffunction-sections"); context.add_command_line_option("-fdata-sections"); } @@ -143,9 +152,7 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol, supports_ ModuleCodegen { name: cgu_name.to_string(), - module_llvm: GccContext { - context - }, + module_llvm: GccContext { context }, kind: ModuleKind::Regular, } } diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index 4d40dd0994dd2..e27fe0ef8c954 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -4,49 +4,32 @@ use std::convert::TryFrom; use std::ops::Deref; use gccjit::{ - BinaryOp, - Block, - ComparisonOp, - Context, - Function, - LValue, - RValue, - ToRValue, - Type, - UnaryOp, + BinaryOp, Block, ComparisonOp, Context, Function, LValue, RValue, ToRValue, Type, UnaryOp, +}; +use rustc_codegen_ssa::common::{ + AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, }; -use rustc_codegen_ssa::MemFlags; -use rustc_codegen_ssa::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope}; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{ - BackendTypes, - BaseTypeMethods, - BuilderMethods, - ConstMethods, - DerivedTypeMethods, - LayoutTypeMethods, - HasCodegen, - OverflowOp, - StaticBuilderMethods, + BackendTypes, BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods, HasCodegen, + LayoutTypeMethods, OverflowOp, StaticBuilderMethods, }; +use rustc_codegen_ssa::MemFlags; use rustc_data_structures::fx::FxHashSet; +use rustc_middle::ty::layout::{ + FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, + TyAndLayout, +}; use rustc_middle::ty::{ParamEnv, Ty, TyCtxt}; -use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout}; -use rustc_span::Span; use rustc_span::def_id::DefId; +use rustc_span::Span; use rustc_target::abi::{ - self, - call::FnAbi, - Align, - HasDataLayout, - Size, - TargetDataLayout, - WrappingRange, + self, call::FnAbi, Align, HasDataLayout, Size, TargetDataLayout, WrappingRange, }; use rustc_target::spec::{HasTargetSpec, Target}; -use crate::common::{SignType, TypeReflection, type_is_pointer}; +use crate::common::{type_is_pointer, SignType, TypeReflection}; use crate::context::CodegenCx; use crate::intrinsic::llvm; use crate::type_of::LayoutGccExt; @@ -77,22 +60,30 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } } - fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> { + fn atomic_extremum( + &mut self, + operation: ExtremumOperation, + dst: RValue<'gcc>, + src: RValue<'gcc>, + order: AtomicOrdering, + ) -> RValue<'gcc> { let size = src.get_type().get_size(); let func = self.current_func(); - let load_ordering = - match order { - // TODO(antoyo): does this make sense? - AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire, - _ => order, - }; - let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering, Size::from_bytes(size)); + let load_ordering = match order { + // TODO(antoyo): does this make sense? + AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire, + _ => order, + }; + let previous_value = + self.atomic_load(dst.get_type(), dst, load_ordering, Size::from_bytes(size)); let previous_var = func.new_local(None, previous_value.get_type(), "previous_value"); let return_value = func.new_local(None, previous_value.get_type(), "return_value"); - self.llbb().add_assignment(None, previous_var, previous_value); - self.llbb().add_assignment(None, return_value, previous_var.to_rvalue()); + self.llbb() + .add_assignment(None, previous_var, previous_value); + self.llbb() + .add_assignment(None, return_value, previous_var.to_rvalue()); let while_block = func.new_block("while"); let after_block = func.new_block("after_while"); @@ -102,16 +93,32 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { // state need to be updated. self.switch_to_block(while_block); - let comparison_operator = - match operation { - ExtremumOperation::Max => ComparisonOp::LessThan, - ExtremumOperation::Min => ComparisonOp::GreaterThan, - }; - - let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type())); - let compare_exchange = self.compare_exchange(dst, previous_var, src, order, load_ordering, false); - let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange); - let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2); + let comparison_operator = match operation { + ExtremumOperation::Max => ComparisonOp::LessThan, + ExtremumOperation::Min => ComparisonOp::GreaterThan, + }; + + let cond1 = self.context.new_comparison( + None, + comparison_operator, + previous_var.to_rvalue(), + self.context.new_cast(None, src, previous_value.get_type()), + ); + let compare_exchange = + self.compare_exchange(dst, previous_var, src, order, load_ordering, false); + let cond2 = self.cx.context.new_unary_op( + None, + UnaryOp::LogicalNegate, + compare_exchange.get_type(), + compare_exchange, + ); + let cond = self.cx.context.new_binary_op( + None, + BinaryOp::LogicalAnd, + self.cx.bool_type, + cond1, + cond2, + ); while_block.end_with_conditional(None, cond, while_block, after_block); @@ -122,29 +129,56 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { return_value.to_rvalue() } - fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> { + fn compare_exchange( + &self, + dst: RValue<'gcc>, + cmp: LValue<'gcc>, + src: RValue<'gcc>, + order: AtomicOrdering, + failure_order: AtomicOrdering, + weak: bool, + ) -> RValue<'gcc> { let size = src.get_type().get_size(); - let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size)); - let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); - let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc()); - let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32); + let compare_exchange = self + .context + .get_builtin_function(&format!("__atomic_compare_exchange_{}", size)); + let order = self + .context + .new_rvalue_from_int(self.i32_type, order.to_gcc()); + let failure_order = self + .context + .new_rvalue_from_int(self.i32_type, failure_order.to_gcc()); + let weak = self + .context + .new_rvalue_from_int(self.bool_type, weak as i32); let void_ptr_type = self.context.new_type::<*mut ()>(); let volatile_void_ptr_type = void_ptr_type.make_volatile(); let dst = self.context.new_cast(None, dst, volatile_void_ptr_type); - let expected = self.context.new_cast(None, cmp.get_address(None), void_ptr_type); + let expected = self + .context + .new_cast(None, cmp.get_address(None), void_ptr_type); // NOTE: not sure why, but we have the wrong type here. let int_type = compare_exchange.get_param(2).to_rvalue().get_type(); let src = self.context.new_cast(None, src, int_type); - self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order]) + self.context.new_call( + None, + compare_exchange, + &[dst, expected, src, weak, order, failure_order], + ) } pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) { self.llbb().add_assignment(None, lvalue, value); } - fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> { + fn check_call<'b>( + &mut self, + _typ: &str, + func: Function<'gcc>, + args: &'b [RValue<'gcc>], + ) -> Cow<'b, [RValue<'gcc>]> { let mut all_args_match = true; let mut param_types = vec![]; let param_count = func.get_param_count(); @@ -169,8 +203,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let actual_ty = actual_val.get_type(); if expected_ty != actual_ty { self.bitcast(actual_val, expected_ty) - } - else { + } else { actual_val } }) @@ -179,10 +212,18 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { Cow::Owned(casted_args) } - fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> { + fn check_ptr_call<'b>( + &mut self, + _typ: &str, + func_ptr: RValue<'gcc>, + args: &'b [RValue<'gcc>], + ) -> Cow<'b, [RValue<'gcc>]> { let mut all_args_match = true; let mut param_types = vec![]; - let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr"); + let gcc_func = func_ptr + .get_type() + .dyncast_function_ptr_type() + .expect("function ptr"); for (index, arg) in args.iter().enumerate().take(gcc_func.get_param_count()) { let param = gcc_func.get_param_type(index); if param != arg.get_type() { @@ -213,19 +254,31 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let actual_ty = actual_val.get_type(); if expected_ty != actual_ty { - if !actual_ty.is_vector() && !expected_ty.is_vector() && actual_ty.is_integral() && expected_ty.is_integral() && actual_ty.get_size() != expected_ty.get_size() { + if !actual_ty.is_vector() + && !expected_ty.is_vector() + && actual_ty.is_integral() + && expected_ty.is_integral() + && actual_ty.get_size() != expected_ty.get_size() + { self.context.new_cast(None, actual_val, expected_ty) - } - else if on_stack_param_indices.contains(&index) { + } else if on_stack_param_indices.contains(&index) { actual_val.dereference(None).to_rvalue() - } - else { - assert!(!((actual_ty.is_vector() && !expected_ty.is_vector()) || (!actual_ty.is_vector() && expected_ty.is_vector())), "{:?} ({}) -> {:?} ({}), index: {:?}[{}]", actual_ty, actual_ty.is_vector(), expected_ty, expected_ty.is_vector(), func_ptr, index); + } else { + assert!( + !((actual_ty.is_vector() && !expected_ty.is_vector()) + || (!actual_ty.is_vector() && expected_ty.is_vector())), + "{:?} ({}) -> {:?} ({}), index: {:?}[{}]", + actual_ty, + actual_ty.is_vector(), + expected_ty, + expected_ty.is_vector(), + func_ptr, + index + ); // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. self.bitcast(actual_val, expected_ty) } - } - else { + } else { actual_val } }) @@ -241,8 +294,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { if dest_ptr_ty == stored_ptr_ty { ptr - } - else { + } else { self.bitcast(ptr, stored_ptr_ty) } } @@ -251,7 +303,12 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { self.block.get_function() } - fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { + fn function_call( + &mut self, + func: RValue<'gcc>, + args: &[RValue<'gcc>], + _funclet: Option<&Funclet>, + ) -> RValue<'gcc> { // TODO(antoyo): remove when the API supports a different type for functions. let func: Function<'gcc> = self.cx.rvalue_as_function(func); let args = self.check_call("call", func, args); @@ -263,62 +320,105 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let current_func = self.block.get_function(); if return_type != void_type { unsafe { RETURN_VALUE_COUNT += 1 }; - let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT })); - self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args)); + let result = current_func.new_local( + None, + return_type, + &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }), + ); + self.block + .add_assignment(None, result, self.cx.context.new_call(None, func, &args)); result.to_rvalue() - } - else { - self.block.add_eval(None, self.cx.context.new_call(None, func, &args)); + } else { + self.block + .add_eval(None, self.cx.context.new_call(None, func, &args)); // Return dummy value when not having return value. self.context.new_rvalue_from_long(self.isize_type, 0) } } - fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { + fn function_ptr_call( + &mut self, + func_ptr: RValue<'gcc>, + args: &[RValue<'gcc>], + _funclet: Option<&Funclet>, + ) -> RValue<'gcc> { let args = self.check_ptr_call("call", func_ptr, args); // gccjit requires to use the result of functions, even when it's not used. // That's why we assign the result to a local or call add_eval(). - let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr"); + let gcc_func = func_ptr + .get_type() + .dyncast_function_ptr_type() + .expect("function ptr"); let return_type = gcc_func.get_return_type(); let void_type = self.context.new_type::<()>(); let current_func = self.block.get_function(); if return_type != void_type { unsafe { RETURN_VALUE_COUNT += 1 }; - let result = current_func.new_local(None, return_type, &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT })); + let result = current_func.new_local( + None, + return_type, + &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT }), + ); let func_name = format!("{:?}", func_ptr); let args = llvm::adjust_intrinsic_arguments(&self, gcc_func, args, &func_name); - self.block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args)); + self.block.add_assignment( + None, + result, + self.cx.context.new_call_through_ptr(None, func_ptr, &args), + ); result.to_rvalue() - } - else { - #[cfg(not(feature="master"))] + } else { + #[cfg(not(feature = "master"))] if gcc_func.get_param_count() == 0 { // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics. - self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[])); - } - else { - self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args)); + self.block.add_eval( + None, + self.cx.context.new_call_through_ptr(None, func_ptr, &[]), + ); + } else { + self.block.add_eval( + None, + self.cx.context.new_call_through_ptr(None, func_ptr, &args), + ); } - #[cfg(feature="master")] - self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args)); + #[cfg(feature = "master")] + self.block.add_eval( + None, + self.cx.context.new_call_through_ptr(None, func_ptr, &args), + ); // Return dummy value when not having return value. - let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed"); - self.block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0)); + let result = + current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed"); + self.block.add_assignment( + None, + result, + self.context.new_rvalue_from_long(self.isize_type, 0), + ); result.to_rvalue() } } - pub fn overflow_call(&self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { + pub fn overflow_call( + &self, + func: Function<'gcc>, + args: &[RValue<'gcc>], + _funclet: Option<&Funclet>, + ) -> RValue<'gcc> { // gccjit requires to use the result of functions, even when it's not used. // That's why we assign the result to a local. let return_type = self.context.new_type::(); let current_func = self.block.get_function(); // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects. unsafe { RETURN_VALUE_COUNT += 1 }; - let result = current_func.new_local(None, return_type, &format!("overflowReturnValue{}", unsafe { RETURN_VALUE_COUNT })); - self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args)); + let result = current_func.new_local( + None, + return_type, + &format!("overflowReturnValue{}", unsafe { RETURN_VALUE_COUNT }), + ); + self.block + .add_assignment(None, result, self.cx.context.new_call(None, func, &args)); result.to_rvalue() } } @@ -410,15 +510,13 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn ret(&mut self, value: RValue<'gcc>) { - let value = - if self.structs_as_pointer.borrow().contains(&value) { - // NOTE: hack to workaround a limitation of the rustc API: see comment on - // CodegenCx.structs_as_pointer - value.dereference(None).to_rvalue() - } - else { - value - }; + let value = if self.structs_as_pointer.borrow().contains(&value) { + // NOTE: hack to workaround a limitation of the rustc API: see comment on + // CodegenCx.structs_as_pointer + value.dereference(None).to_rvalue() + } else { + value + }; self.llbb().end_with_return(None, value); } @@ -427,38 +525,55 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) { - self.llbb().end_with_conditional(None, cond, then_block, else_block) + self.llbb() + .end_with_conditional(None, cond, then_block, else_block) } - fn switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator)>) { + fn switch( + &mut self, + value: RValue<'gcc>, + default_block: Block<'gcc>, + cases: impl ExactSizeIterator)>, + ) { let mut gcc_cases = vec![]; let typ = self.val_ty(value); for (on_val, dest) in cases { let on_val = self.const_uint_big(typ, on_val); gcc_cases.push(self.context.new_case(on_val, on_val, dest)); } - self.block.end_with_switch(None, value, default_block, &gcc_cases); + self.block + .end_with_switch(None, value, default_block, &gcc_cases); } - fn invoke(&mut self, typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> { + fn invoke( + &mut self, + typ: Type<'gcc>, + func: RValue<'gcc>, + args: &[RValue<'gcc>], + then: Block<'gcc>, + catch: Block<'gcc>, + _funclet: Option<&Funclet>, + ) -> RValue<'gcc> { // TODO(bjorn3): Properly implement unwinding. let call_site = self.call(typ, func, args, None); let condition = self.context.new_rvalue_from_int(self.bool_type, 1); - self.llbb().end_with_conditional(None, condition, then, catch); + self.llbb() + .end_with_conditional(None, condition, then, catch); call_site } fn unreachable(&mut self) { let func = self.context.get_builtin_function("__builtin_unreachable"); - self.block.add_eval(None, self.context.new_call(None, func, &[])); + self.block + .add_eval(None, self.context.new_call(None, func, &[])); let return_type = self.block.get_function().get_return_type(); let void_type = self.context.new_type::<()>(); if return_type == void_type { self.block.end_with_void_return(None) - } - else { - let return_value = self.current_func() - .new_local(None, return_type, "unreachableReturn"); + } else { + let return_value = + self.current_func() + .new_local(None, return_type, "unreachableReturn"); self.block.end_with_return(None, return_value) } } @@ -568,7 +683,9 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> { - self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a) + self.cx + .context + .new_unary_op(None, UnaryOp::Minus, a.get_type(), a) } fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> { @@ -620,23 +737,33 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { unimplemented!(); } - fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) { + fn checked_binop( + &mut self, + oop: OverflowOp, + typ: Ty<'_>, + lhs: Self::Value, + rhs: Self::Value, + ) -> (Self::Value, Self::Value) { self.gcc_checked_binop(oop, typ, lhs, rhs) } fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> { // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type. // Ideally, we shouldn't need to do this check. - let aligned_type = - if ty == self.cx.u128_type || ty == self.cx.i128_type { - ty - } - else { - ty.get_aligned(align.bytes()) - }; + let aligned_type = if ty == self.cx.u128_type || ty == self.cx.i128_type { + ty + } else { + ty.get_aligned(align.bytes()) + }; // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial. self.stack_var_count.set(self.stack_var_count.get() + 1); - self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None) + self.current_func() + .new_local( + None, + aligned_type, + &format!("stack_var_{}", self.stack_var_count.get()), + ) + .get_address(None) } fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> { @@ -657,39 +784,66 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let ptr = self.context.new_cast(None, ptr, pointee_ty.make_pointer()); let deref = ptr.dereference(None).to_rvalue(); unsafe { RETURN_VALUE_COUNT += 1 }; - let loaded_value = function.new_local(None, pointee_ty, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT })); + let loaded_value = function.new_local( + None, + pointee_ty, + &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }), + ); block.add_assignment(None, loaded_value, deref); loaded_value.to_rvalue() } fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> { // TODO(antoyo): use ty. - let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile()); + let ptr = self + .context + .new_cast(None, ptr, ptr.get_type().make_volatile()); ptr.dereference(None).to_rvalue() } - fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> { + fn atomic_load( + &mut self, + _ty: Type<'gcc>, + ptr: RValue<'gcc>, + order: AtomicOrdering, + size: Size, + ) -> RValue<'gcc> { // TODO(antoyo): use ty. // TODO(antoyo): handle alignment. - let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes())); - let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); - - let volatile_const_void_ptr_type = self.context.new_type::<()>() + let atomic_load = self + .context + .get_builtin_function(&format!("__atomic_load_{}", size.bytes())); + let ordering = self + .context + .new_rvalue_from_int(self.i32_type, order.to_gcc()); + + let volatile_const_void_ptr_type = self + .context + .new_type::<()>() .make_const() .make_volatile() .make_pointer(); - let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type); + let ptr = self + .context + .new_cast(None, ptr, volatile_const_void_ptr_type); self.context.new_call(None, atomic_load, &[ptr, ordering]) } - fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> { + fn load_operand( + &mut self, + place: PlaceRef<'tcx, RValue<'gcc>>, + ) -> OperandRef<'tcx, RValue<'gcc>> { assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); if place.layout.is_zst() { return OperandRef::new_zst(self, place.layout); } - fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) { + fn scalar_load_metadata<'a, 'gcc, 'tcx>( + bx: &mut Builder<'a, 'gcc, 'tcx>, + load: RValue<'gcc>, + scalar: &abi::Scalar, + ) { let vr = scalar.valid_range(bx); match scalar.primitive() { abi::Int(..) => { @@ -704,46 +858,50 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } } - let val = - if let Some(llextra) = place.llextra { - OperandValue::Ref(place.llval, Some(llextra), place.align) + let val = if let Some(llextra) = place.llextra { + OperandValue::Ref(place.llval, Some(llextra), place.align) + } else if place.layout.is_gcc_immediate() { + let load = self.load(place.layout.gcc_type(self, false), place.llval, place.align); + if let abi::Abi::Scalar(ref scalar) = place.layout.abi { + scalar_load_metadata(self, load, scalar); } - else if place.layout.is_gcc_immediate() { - let load = self.load( - place.layout.gcc_type(self, false), - place.llval, - place.align, - ); - if let abi::Abi::Scalar(ref scalar) = place.layout.abi { - scalar_load_metadata(self, load, scalar); + OperandValue::Immediate(self.to_immediate(load, place.layout)) + } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi { + let b_offset = a.size(self).align_to(b.align(self).abi); + let pair_type = place.layout.gcc_type(self, false); + + let mut load = |i, scalar: &abi::Scalar, align| { + let llptr = self.struct_gep(pair_type, place.llval, i as u64); + let llty = place.layout.scalar_pair_element_gcc_type(self, i, false); + let load = self.load(llty, llptr, align); + scalar_load_metadata(self, load, scalar); + if scalar.is_bool() { + self.trunc(load, self.type_i1()) + } else { + load } - OperandValue::Immediate(self.to_immediate(load, place.layout)) - } - else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi { - let b_offset = a.size(self).align_to(b.align(self).abi); - let pair_type = place.layout.gcc_type(self, false); - - let mut load = |i, scalar: &abi::Scalar, align| { - let llptr = self.struct_gep(pair_type, place.llval, i as u64); - let llty = place.layout.scalar_pair_element_gcc_type(self, i, false); - let load = self.load(llty, llptr, align); - scalar_load_metadata(self, load, scalar); - if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load } - }; - - OperandValue::Pair( - load(0, a, place.align), - load(1, b, place.align.restrict_for_offset(b_offset)), - ) - } - else { - OperandValue::Ref(place.llval, None, place.align) }; - OperandRef { val, layout: place.layout } + OperandValue::Pair( + load(0, a, place.align), + load(1, b, place.align.restrict_for_offset(b_offset)), + ) + } else { + OperandValue::Ref(place.llval, None, place.align) + }; + + OperandRef { + val, + layout: place.layout, + } } - fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self { + fn write_operand_repeatedly( + mut self, + cg_elem: OperandRef<'tcx, RValue<'gcc>>, + count: u64, + dest: PlaceRef<'tcx, RValue<'gcc>>, + ) -> Self { let zero = self.const_usize(0); let count = self.const_usize(count); let start = dest.project_index(&mut self, zero).llval; @@ -754,7 +912,10 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let next_bb = self.append_sibling_block("repeat_loop_next"); let ptr_type = start.get_type(); - let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var"); + let current = self + .llbb() + .get_function() + .new_local(None, ptr_type, "loop_var"); let current_val = current.to_rvalue(); self.assign(current, start); @@ -765,10 +926,19 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.cond_br(keep_going, body_bb, next_bb); self.switch_to_block(body_bb); - let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); - cg_elem.val.store(&mut self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align)); - - let next = self.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]); + let align = dest + .align + .restrict_for_offset(dest.layout.field(self.cx(), 0).size); + cg_elem.val.store( + &mut self, + PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align), + ); + + let next = self.inbounds_gep( + self.backend_type(cg_elem.layout), + current.to_rvalue(), + &[self.const_usize(1)], + ); self.llbb().add_assignment(None, current, next); self.br(header_bb); @@ -788,7 +958,13 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.store_with_flags(val, ptr, align, MemFlags::empty()) } - fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align, _flags: MemFlags) -> RValue<'gcc> { + fn store_with_flags( + &mut self, + val: RValue<'gcc>, + ptr: RValue<'gcc>, + align: Align, + _flags: MemFlags, + ) -> RValue<'gcc> { let ptr = self.check_store(val, ptr); let destination = ptr.dereference(None); // NOTE: libgccjit does not support specifying the alignment on the assignment, so we cast @@ -803,43 +979,74 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.cx.context.new_rvalue_zero(self.type_i32()) } - fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) { + fn atomic_store( + &mut self, + value: RValue<'gcc>, + ptr: RValue<'gcc>, + order: AtomicOrdering, + size: Size, + ) { // TODO(antoyo): handle alignment. - let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes())); - let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); - let volatile_const_void_ptr_type = self.context.new_type::<()>() - .make_volatile() - .make_pointer(); - let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type); + let atomic_store = self + .context + .get_builtin_function(&format!("__atomic_store_{}", size.bytes())); + let ordering = self + .context + .new_rvalue_from_int(self.i32_type, order.to_gcc()); + let volatile_const_void_ptr_type = + self.context.new_type::<()>().make_volatile().make_pointer(); + let ptr = self + .context + .new_cast(None, ptr, volatile_const_void_ptr_type); // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because // the following cast is required to avoid this error: // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int __attribute__((aligned(4)))) let int_type = atomic_store.get_param(1).to_rvalue().get_type(); let value = self.context.new_cast(None, value, int_type); - self.llbb() - .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering])); + self.llbb().add_eval( + None, + self.context + .new_call(None, atomic_store, &[ptr, value, ordering]), + ); } - fn gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { + fn gep( + &mut self, + _typ: Type<'gcc>, + ptr: RValue<'gcc>, + indices: &[RValue<'gcc>], + ) -> RValue<'gcc> { let mut result = ptr; for index in indices { - result = self.context.new_array_access(None, result, *index).get_address(None).to_rvalue(); + result = self + .context + .new_array_access(None, result, *index) + .get_address(None) + .to_rvalue(); } result } - fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { + fn inbounds_gep( + &mut self, + _typ: Type<'gcc>, + ptr: RValue<'gcc>, + indices: &[RValue<'gcc>], + ) -> RValue<'gcc> { // FIXME(antoyo): would be safer if doing the same thing (loop) as gep. // TODO(antoyo): specify inbounds somehow. match indices.len() { - 1 => { - self.context.new_array_access(None, ptr, indices[0]).get_address(None) - }, + 1 => self + .context + .new_array_access(None, ptr, indices[0]) + .get_address(None), 2 => { let array = ptr.dereference(None); // TODO(antoyo): assert that first index is 0? - self.context.new_array_access(None, array, indices[1]).get_address(None) - }, + self.context + .new_array_access(None, array, indices[1]) + .get_address(None) + } _ => unimplemented!(), } } @@ -850,21 +1057,23 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let value = ptr.dereference(None).to_rvalue(); if value_type.dyncast_array().is_some() { - let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); + let index = self + .context + .new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); let element = self.context.new_array_access(None, value, index); element.get_address(None) - } - else if let Some(vector_type) = value_type.dyncast_vector() { + } else if let Some(vector_type) = value_type.dyncast_vector() { let array_type = vector_type.get_element_type().make_pointer(); let array = self.bitcast(ptr, array_type); - let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); + let index = self + .context + .new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); let element = self.context.new_array_access(None, array, index); element.get_address(None) - } - else if let Some(struct_type) = value_type.is_struct() { - ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None) - } - else { + } else if let Some(struct_type) = value_type.is_struct() { + ptr.dereference_field(None, struct_type.get_field(idx as i32)) + .get_address(None) + } else { panic!("Unexpected type {:?}", value_type); } } @@ -923,7 +1132,12 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.cx.const_bitcast(value, dest_ty) } - fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> { + fn intcast( + &mut self, + value: RValue<'gcc>, + dest_typ: Type<'gcc>, + _is_signed: bool, + ) -> RValue<'gcc> { // NOTE: is_signed is for value, not dest_typ. self.gcc_int_cast(value, dest_typ) } @@ -934,12 +1148,16 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { (false, true) => { // NOTE: Projecting a field of a pointer type will attempt a cast from a signed char to // a pointer, which is not supported by gccjit. - return self.cx.context.new_cast(None, self.inttoptr(value, val_type.make_pointer()), dest_ty); - }, + return self.cx.context.new_cast( + None, + self.inttoptr(value, val_type.make_pointer()), + dest_ty, + ); + } (false, false) => { // When they are not pointers, we want a transmute (or reinterpret_cast). self.bitcast(value, dest_ty) - }, + } (true, true) => self.cx.context.new_cast(None, value, dest_ty), (true, false) => unimplemented!(), } @@ -951,25 +1169,50 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { - self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs) + self.context + .new_comparison(None, op.to_gcc_comparison(), lhs, rhs) } /* Miscellaneous instructions */ - fn memcpy(&mut self, dst: RValue<'gcc>, _dst_align: Align, src: RValue<'gcc>, _src_align: Align, size: RValue<'gcc>, flags: MemFlags) { - assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported"); + fn memcpy( + &mut self, + dst: RValue<'gcc>, + _dst_align: Align, + src: RValue<'gcc>, + _src_align: Align, + size: RValue<'gcc>, + flags: MemFlags, + ) { + assert!( + !flags.contains(MemFlags::NONTEMPORAL), + "non-temporal memcpy not supported" + ); let size = self.intcast(size, self.type_size_t(), false); let _is_volatile = flags.contains(MemFlags::VOLATILE); let dst = self.pointercast(dst, self.type_i8p()); let src = self.pointercast(src, self.type_ptr_to(self.type_void())); let memcpy = self.context.get_builtin_function("memcpy"); // TODO(antoyo): handle aligns and is_volatile. - self.block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size])); + self.block + .add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size])); } - fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) { + fn memmove( + &mut self, + dst: RValue<'gcc>, + dst_align: Align, + src: RValue<'gcc>, + src_align: Align, + size: RValue<'gcc>, + flags: MemFlags, + ) { if flags.contains(MemFlags::NONTEMPORAL) { // HACK(nox): This is inefficient but there is no nontemporal memmove. - let val = self.load(src.get_type().get_pointee().expect("get_pointee"), src, src_align); + let val = self.load( + src.get_type().get_pointee().expect("get_pointee"), + src, + src_align, + ); let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); self.store_with_flags(val, ptr, dst_align, flags); return; @@ -981,26 +1224,45 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let memmove = self.context.get_builtin_function("memmove"); // TODO(antoyo): handle is_volatile. - self.block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size])); + self.block.add_eval( + None, + self.context.new_call(None, memmove, &[dst, src, size]), + ); } - fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) { + fn memset( + &mut self, + ptr: RValue<'gcc>, + fill_byte: RValue<'gcc>, + size: RValue<'gcc>, + _align: Align, + flags: MemFlags, + ) { let _is_volatile = flags.contains(MemFlags::VOLATILE); let ptr = self.pointercast(ptr, self.type_i8p()); let memset = self.context.get_builtin_function("memset"); // TODO(antoyo): handle align and is_volatile. let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type); let size = self.intcast(size, self.type_size_t(), false); - self.block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size])); + self.block.add_eval( + None, + self.context.new_call(None, memset, &[ptr, fill_byte, size]), + ); } - fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> { + fn select( + &mut self, + cond: RValue<'gcc>, + then_val: RValue<'gcc>, + mut else_val: RValue<'gcc>, + ) -> RValue<'gcc> { let func = self.current_func(); let variable = func.new_local(None, then_val.get_type(), "selectVar"); let then_block = func.new_block("then"); let else_block = func.new_block("else"); let after_block = func.new_block("after"); - self.llbb().end_with_conditional(None, cond, then_block, else_block); + self.llbb() + .end_with_conditional(None, cond, then_block, else_block); then_block.add_assignment(None, variable, then_val); then_block.end_with_jump(None, after_block); @@ -1037,57 +1299,60 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let value_type = aggregate_value.get_type(); if value_type.dyncast_array().is_some() { - let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); + let index = self + .context + .new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); let element = self.context.new_array_access(None, aggregate_value, index); element.get_address(None) - } - else if value_type.dyncast_vector().is_some() { + } else if value_type.dyncast_vector().is_some() { panic!(); - } - else if let Some(pointer_type) = value_type.get_pointee() { + } else if let Some(pointer_type) = value_type.get_pointee() { if let Some(struct_type) = pointer_type.is_struct() { // NOTE: hack to workaround a limitation of the rustc API: see comment on // CodegenCx.structs_as_pointer - aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)).to_rvalue() - } - else { + aggregate_value + .dereference_field(None, struct_type.get_field(idx as i32)) + .to_rvalue() + } else { panic!("Unexpected type {:?}", value_type); } - } - else if let Some(struct_type) = value_type.is_struct() { - aggregate_value.access_field(None, struct_type.get_field(idx as i32)).to_rvalue() - } - else { + } else if let Some(struct_type) = value_type.is_struct() { + aggregate_value + .access_field(None, struct_type.get_field(idx as i32)) + .to_rvalue() + } else { panic!("Unexpected type {:?}", value_type); } } - fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> { + fn insert_value( + &mut self, + aggregate_value: RValue<'gcc>, + value: RValue<'gcc>, + idx: u64, + ) -> RValue<'gcc> { // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays. assert_eq!(idx as usize as u64, idx); let value_type = aggregate_value.get_type(); - let lvalue = - if value_type.dyncast_array().is_some() { - let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); - self.context.new_array_access(None, aggregate_value, index) - } - else if value_type.dyncast_vector().is_some() { - panic!(); - } - else if let Some(pointer_type) = value_type.get_pointee() { - if let Some(struct_type) = pointer_type.is_struct() { - // NOTE: hack to workaround a limitation of the rustc API: see comment on - // CodegenCx.structs_as_pointer - aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)) - } - else { - panic!("Unexpected type {:?}", value_type); - } - } - else { + let lvalue = if value_type.dyncast_array().is_some() { + let index = self + .context + .new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); + self.context.new_array_access(None, aggregate_value, index) + } else if value_type.dyncast_vector().is_some() { + panic!(); + } else if let Some(pointer_type) = value_type.get_pointee() { + if let Some(struct_type) = pointer_type.is_struct() { + // NOTE: hack to workaround a limitation of the rustc API: see comment on + // CodegenCx.structs_as_pointer + aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)) + } else { panic!("Unexpected type {:?}", value_type); - }; + } + } else { + panic!("Unexpected type {:?}", value_type); + }; let lvalue_type = lvalue.to_rvalue().get_type(); let value = @@ -1109,10 +1374,17 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn cleanup_landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>) -> RValue<'gcc> { - let field1 = self.context.new_field(None, self.u8_type.make_pointer(), "landing_pad_field_1"); - let field2 = self.context.new_field(None, self.i32_type, "landing_pad_field_1"); - let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]); - self.current_func().new_local(None, struct_type.as_type(), "landing_pad") + let field1 = + self.context + .new_field(None, self.u8_type.make_pointer(), "landing_pad_field_1"); + let field2 = self + .context + .new_field(None, self.i32_type, "landing_pad_field_1"); + let struct_type = self + .context + .new_struct_type(None, "landing_pad", &[field1, field2]); + self.current_func() + .new_local(None, struct_type.as_type(), "landing_pad") .to_rvalue() // TODO(antoyo): Properly implement unwinding. // the above is just to make the compilation work as it seems @@ -1146,47 +1418,87 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } // Atomic Operations - fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> { - let expected = self.current_func().new_local(None, cmp.get_type(), "expected"); + fn atomic_cmpxchg( + &mut self, + dst: RValue<'gcc>, + cmp: RValue<'gcc>, + src: RValue<'gcc>, + order: AtomicOrdering, + failure_order: AtomicOrdering, + weak: bool, + ) -> RValue<'gcc> { + let expected = self + .current_func() + .new_local(None, cmp.get_type(), "expected"); self.llbb().add_assignment(None, expected, cmp); let success = self.compare_exchange(dst, expected, src, order, failure_order, weak); - let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false); - let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result"); + let pair_type = self + .cx + .type_struct(&[src.get_type(), self.bool_type], false); + let result = self + .current_func() + .new_local(None, pair_type, "atomic_cmpxchg_result"); let align = Align::from_bits(64).expect("align"); // TODO(antoyo): use good align. let value_type = result.to_rvalue().get_type(); if let Some(struct_type) = value_type.is_struct() { - self.store(success, result.access_field(None, struct_type.get_field(1)).get_address(None), align); + self.store( + success, + result + .access_field(None, struct_type.get_field(1)) + .get_address(None), + align, + ); // NOTE: since success contains the call to the intrinsic, it must be stored before // expected so that we store expected after the call. - self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align); + self.store( + expected.to_rvalue(), + result + .access_field(None, struct_type.get_field(0)) + .get_address(None), + align, + ); } // TODO(antoyo): handle when value is not a struct. result.to_rvalue() } - fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> { + fn atomic_rmw( + &mut self, + op: AtomicRmwBinOp, + dst: RValue<'gcc>, + src: RValue<'gcc>, + order: AtomicOrdering, + ) -> RValue<'gcc> { let size = src.get_type().get_size(); - let name = - match op { - AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size), - AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size), - AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size), - AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size), - AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size), - AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size), - AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size), - AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order), - AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order), - AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order), - AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order), - }; - + let name = match op { + AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size), + AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size), + AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size), + AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size), + AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size), + AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size), + AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size), + AtomicRmwBinOp::AtomicMax => { + return self.atomic_extremum(ExtremumOperation::Max, dst, src, order) + } + AtomicRmwBinOp::AtomicMin => { + return self.atomic_extremum(ExtremumOperation::Min, dst, src, order) + } + AtomicRmwBinOp::AtomicUMax => { + return self.atomic_extremum(ExtremumOperation::Max, dst, src, order) + } + AtomicRmwBinOp::AtomicUMin => { + return self.atomic_extremum(ExtremumOperation::Min, dst, src, order) + } + }; let atomic_function = self.context.get_builtin_function(name); - let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); + let order = self + .context + .new_rvalue_from_int(self.i32_type, order.to_gcc()); let void_ptr_type = self.context.new_type::<*mut ()>(); let volatile_void_ptr_type = void_ptr_type.make_volatile(); @@ -1194,19 +1506,23 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { // FIXME(antoyo): not sure why, but we have the wrong type here. let new_src_type = atomic_function.get_param(1).to_rvalue().get_type(); let src = self.context.new_cast(None, src, new_src_type); - let res = self.context.new_call(None, atomic_function, &[dst, src, order]); + let res = self + .context + .new_call(None, atomic_function, &[dst, src, order]); self.context.new_cast(None, res, src.get_type()) } fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) { - let name = - match scope { - SynchronizationScope::SingleThread => "__atomic_signal_fence", - SynchronizationScope::CrossThread => "__atomic_thread_fence", - }; + let name = match scope { + SynchronizationScope::SingleThread => "__atomic_signal_fence", + SynchronizationScope::CrossThread => "__atomic_thread_fence", + }; let thread_fence = self.context.get_builtin_function(name); - let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); - self.llbb().add_eval(None, self.context.new_call(None, thread_fence, &[order])); + let order = self + .context + .new_rvalue_from_int(self.i32_type, order.to_gcc()); + self.llbb() + .add_eval(None, self.context.new_call(None, thread_fence, &[order])); } fn set_invariant_load(&mut self, load: RValue<'gcc>) { @@ -1223,13 +1539,24 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { // TODO(antoyo) } - fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> { + fn call( + &mut self, + _typ: Type<'gcc>, + func: RValue<'gcc>, + args: &[RValue<'gcc>], + funclet: Option<&Funclet>, + ) -> RValue<'gcc> { // FIXME(antoyo): remove when having a proper API. let gcc_func = unsafe { std::mem::transmute(func) }; - if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() { + if self + .functions + .borrow() + .values() + .find(|value| **value == gcc_func) + .is_some() + { self.function_call(func, args, funclet) - } - else { + } else { // If it's a not function that was defined, it's a function pointer. self.function_ptr_call(func, args, funclet) } @@ -1258,8 +1585,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn from_immediate(&mut self, val: Self::Value) -> Self::Value { if self.cx().val_ty(val) == self.cx().type_i1() { self.zext(val, self.cx().type_i8()) - } - else { + } else { val } } @@ -1279,38 +1605,55 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { None } - fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) { + fn instrprof_increment( + &mut self, + _fn_name: RValue<'gcc>, + _hash: RValue<'gcc>, + _num_counters: RValue<'gcc>, + _index: RValue<'gcc>, + ) { unimplemented!(); } } impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { - #[cfg(feature="master")] - pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> { + #[cfg(feature = "master")] + pub fn shuffle_vector( + &mut self, + v1: RValue<'gcc>, + v2: RValue<'gcc>, + mask: RValue<'gcc>, + ) -> RValue<'gcc> { let struct_type = mask.get_type().is_struct().expect("mask of struct type"); // TODO(antoyo): use a recursive unqualified() here. - let vector_type = v1.get_type().unqualified().dyncast_vector().expect("vector type"); + let vector_type = v1 + .get_type() + .unqualified() + .dyncast_vector() + .expect("vector type"); let element_type = vector_type.get_element_type(); let vec_num_units = vector_type.get_num_units(); let mask_num_units = struct_type.get_field_count(); let mut vector_elements = vec![]; - let mask_element_type = - if element_type.is_integral() { - element_type + let mask_element_type = if element_type.is_integral() { + element_type + } else { + #[cfg(feature = "master")] + { + self.cx.type_ix(element_type.get_size() as u64 * 8) } - else { - #[cfg(feature="master")] - { - self.cx.type_ix(element_type.get_size() as u64 * 8) - } - #[cfg(not(feature="master"))] - self.int_type - }; + #[cfg(not(feature = "master"))] + self.int_type + }; for i in 0..mask_num_units { let field = struct_type.get_field(i as i32); - vector_elements.push(self.context.new_cast(None, mask.access_field(None, field).to_rvalue(), mask_element_type)); + vector_elements.push(self.context.new_cast( + None, + mask.access_field(None, field).to_rvalue(), + mask_element_type, + )); } // NOTE: the mask needs to be the same length as the input vectors, so add the missing @@ -1319,35 +1662,61 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { vector_elements.push(self.context.new_rvalue_zero(mask_element_type)); } - let array_type = self.context.new_array_type(None, element_type, vec_num_units as i32); - let result_type = self.context.new_vector_type(element_type, mask_num_units as u64); - let (v1, v2) = - if vec_num_units < mask_num_units { - // NOTE: the mask needs to be the same length as the input vectors, so join the 2 - // vectors and create a dummy second vector. - // TODO(antoyo): switch to using new_vector_access. - let array = self.context.new_bitcast(None, v1, array_type); - let mut elements = vec![]; - for i in 0..vec_num_units { - elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); - } - // TODO(antoyo): switch to using new_vector_access. - let array = self.context.new_bitcast(None, v2, array_type); - for i in 0..(mask_num_units - vec_num_units) { - elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); - } - let v1 = self.context.new_rvalue_from_vector(None, result_type, &elements); - let zero = self.context.new_rvalue_zero(element_type); - let v2 = self.context.new_rvalue_from_vector(None, result_type, &vec![zero; mask_num_units]); - (v1, v2) + let array_type = self + .context + .new_array_type(None, element_type, vec_num_units as i32); + let result_type = self + .context + .new_vector_type(element_type, mask_num_units as u64); + let (v1, v2) = if vec_num_units < mask_num_units { + // NOTE: the mask needs to be the same length as the input vectors, so join the 2 + // vectors and create a dummy second vector. + // TODO(antoyo): switch to using new_vector_access. + let array = self.context.new_bitcast(None, v1, array_type); + let mut elements = vec![]; + for i in 0..vec_num_units { + elements.push( + self.context + .new_array_access( + None, + array, + self.context.new_rvalue_from_int(self.int_type, i as i32), + ) + .to_rvalue(), + ); } - else { - (v1, v2) - }; + // TODO(antoyo): switch to using new_vector_access. + let array = self.context.new_bitcast(None, v2, array_type); + for i in 0..(mask_num_units - vec_num_units) { + elements.push( + self.context + .new_array_access( + None, + array, + self.context.new_rvalue_from_int(self.int_type, i as i32), + ) + .to_rvalue(), + ); + } + let v1 = self + .context + .new_rvalue_from_vector(None, result_type, &elements); + let zero = self.context.new_rvalue_zero(element_type); + let v2 = + self.context + .new_rvalue_from_vector(None, result_type, &vec![zero; mask_num_units]); + (v1, v2) + } else { + (v1, v2) + }; let new_mask_num_units = std::cmp::max(mask_num_units, vec_num_units); - let mask_type = self.context.new_vector_type(mask_element_type, new_mask_num_units as u64); - let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements); + let mask_type = self + .context + .new_vector_type(mask_element_type, new_mask_num_units as u64); + let mask = self + .context + .new_rvalue_from_vector(None, mask_type, &vector_elements); let result = self.context.new_rvalue_vector_perm(None, v1, v2, mask); if vec_num_units != mask_num_units { @@ -1357,63 +1726,100 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { // TODO(antoyo): switch to using new_vector_access. let array = self.context.new_bitcast(None, result, array_type); for i in 0..mask_num_units { - elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); + elements.push( + self.context + .new_array_access( + None, + array, + self.context.new_rvalue_from_int(self.int_type, i as i32), + ) + .to_rvalue(), + ); } - self.context.new_rvalue_from_vector(None, result_type, &elements) - } - else { + self.context + .new_rvalue_from_vector(None, result_type, &elements) + } else { result } } - #[cfg(not(feature="master"))] - pub fn shuffle_vector(&mut self, _v1: RValue<'gcc>, _v2: RValue<'gcc>, _mask: RValue<'gcc>) -> RValue<'gcc> { + #[cfg(not(feature = "master"))] + pub fn shuffle_vector( + &mut self, + _v1: RValue<'gcc>, + _v2: RValue<'gcc>, + _mask: RValue<'gcc>, + ) -> RValue<'gcc> { unimplemented!(); } - #[cfg(feature="master")] + #[cfg(feature = "master")] pub fn vector_reduce(&mut self, src: RValue<'gcc>, op: F) -> RValue<'gcc> - where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc> + where + F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>, { - let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type"); + let vector_type = src + .get_type() + .unqualified() + .dyncast_vector() + .expect("vector type"); let element_count = vector_type.get_num_units(); let mut vector_elements = vec![]; for i in 0..element_count { vector_elements.push(i); } - let mask_type = self.context.new_vector_type(self.int_type, element_count as u64); + let mask_type = self + .context + .new_vector_type(self.int_type, element_count as u64); let mut shift = 1; let mut res = src; while shift < element_count { - let vector_elements: Vec<_> = - vector_elements.iter() - .map(|i| self.context.new_rvalue_from_int(self.int_type, ((i + shift) % element_count) as i32)) - .collect(); - let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements); + let vector_elements: Vec<_> = vector_elements + .iter() + .map(|i| { + self.context + .new_rvalue_from_int(self.int_type, ((i + shift) % element_count) as i32) + }) + .collect(); + let mask = self + .context + .new_rvalue_from_vector(None, mask_type, &vector_elements); let shifted = self.context.new_rvalue_vector_perm(None, res, res, mask); shift *= 2; res = op(res, shifted, &self.context); } - self.context.new_vector_access(None, res, self.context.new_rvalue_zero(self.int_type)) + self.context + .new_vector_access(None, res, self.context.new_rvalue_zero(self.int_type)) .to_rvalue() } - #[cfg(not(feature="master"))] + #[cfg(not(feature = "master"))] pub fn vector_reduce(&mut self, src: RValue<'gcc>, op: F) -> RValue<'gcc> - where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc> + where + F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>, { unimplemented!(); } pub fn vector_reduce_op(&mut self, src: RValue<'gcc>, op: BinaryOp) -> RValue<'gcc> { - self.vector_reduce(src, |a, b, context| context.new_binary_op(None, op, a.get_type(), a, b)) + self.vector_reduce(src, |a, b, context| { + context.new_binary_op(None, op, a.get_type(), a, b) + }) } - pub fn vector_reduce_fadd_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> { + pub fn vector_reduce_fadd_fast( + &mut self, + _acc: RValue<'gcc>, + _src: RValue<'gcc>, + ) -> RValue<'gcc> { unimplemented!(); } - pub fn vector_reduce_fmul_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> { + pub fn vector_reduce_fmul_fast( + &mut self, + _acc: RValue<'gcc>, + _src: RValue<'gcc>, + ) -> RValue<'gcc> { unimplemented!(); } @@ -1433,16 +1839,26 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { }) } - pub fn vector_select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, else_val: RValue<'gcc>) -> RValue<'gcc> { + pub fn vector_select( + &mut self, + cond: RValue<'gcc>, + then_val: RValue<'gcc>, + else_val: RValue<'gcc>, + ) -> RValue<'gcc> { // cond is a vector of integers, not of bools. let cond_type = cond.get_type(); - let vector_type = cond_type.unqualified().dyncast_vector().expect("vector type"); + let vector_type = cond_type + .unqualified() + .dyncast_vector() + .expect("vector type"); let num_units = vector_type.get_num_units(); let element_type = vector_type.get_element_type(); let zeros = vec![self.context.new_rvalue_zero(element_type); num_units]; let zeros = self.context.new_rvalue_from_vector(None, cond_type, &zeros); - let masks = self.context.new_comparison(None, ComparisonOp::NotEquals, cond, zeros); + let masks = self + .context + .new_comparison(None, ComparisonOp::NotEquals, cond, zeros); let then_vals = masks & then_val; let ones = vec![self.context.new_rvalue_one(element_type); num_units]; @@ -1451,14 +1867,20 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { // NOTE: sometimes, the type of else_val can be different than the type of then_val in // libgccjit (vector of int vs vector of int32_t), but they should be the same for the AND // operation to work. - let else_val = self.context.new_bitcast(None, else_val, then_val.get_type()); + let else_val = self + .context + .new_bitcast(None, else_val, then_val.get_type()); let else_vals = inverted_masks & else_val; then_vals | else_vals } } -fn difference_or_zero<'gcc>(a: RValue<'gcc>, b: RValue<'gcc>, context: &'gcc Context<'gcc>) -> RValue<'gcc> { +fn difference_or_zero<'gcc>( + a: RValue<'gcc>, + b: RValue<'gcc>, + context: &'gcc Context<'gcc>, +) -> RValue<'gcc> { let difference = a - b; let masks = context.new_comparison(None, ComparisonOp::GreaterThanEquals, b, a); difference & masks @@ -1547,15 +1969,14 @@ impl ToGccOrdering for AtomicOrdering { fn to_gcc(self) -> i32 { use MemOrdering::*; - let ordering = - match self { - AtomicOrdering::Unordered => __ATOMIC_RELAXED, - AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same. - AtomicOrdering::Acquire => __ATOMIC_ACQUIRE, - AtomicOrdering::Release => __ATOMIC_RELEASE, - AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL, - AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST, - }; + let ordering = match self { + AtomicOrdering::Unordered => __ATOMIC_RELAXED, + AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same. + AtomicOrdering::Acquire => __ATOMIC_ACQUIRE, + AtomicOrdering::Release => __ATOMIC_RELEASE, + AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL, + AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST, + }; ordering as i32 } } diff --git a/compiler/rustc_codegen_gcc/src/callee.rs b/compiler/rustc_codegen_gcc/src/callee.rs index c1041125ecce7..a3cc5854ce3bd 100644 --- a/compiler/rustc_codegen_gcc/src/callee.rs +++ b/compiler/rustc_codegen_gcc/src/callee.rs @@ -1,7 +1,7 @@ use gccjit::{FunctionType, RValue}; use rustc_codegen_ssa::traits::BaseTypeMethods; -use rustc_middle::ty::{self, Instance, TypeVisitable}; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; +use rustc_middle::ty::{self, Instance, TypeVisitable}; use crate::abi::FnAbiGccExt; use crate::context::CodegenCx; @@ -27,49 +27,46 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty()); - let func = - if let Some(func) = cx.get_declared_value(&sym) { - // Create a fn pointer with the new signature. - let ptrty = fn_abi.ptr_to_gcc_type(cx); + let func = if let Some(func) = cx.get_declared_value(&sym) { + // Create a fn pointer with the new signature. + let ptrty = fn_abi.ptr_to_gcc_type(cx); - // This is subtle and surprising, but sometimes we have to bitcast - // the resulting fn pointer. The reason has to do with external - // functions. If you have two crates that both bind the same C - // library, they may not use precisely the same types: for - // example, they will probably each declare their own structs, - // which are distinct types from LLVM's point of view (nominal - // types). - // - // Now, if those two crates are linked into an application, and - // they contain inlined code, you can wind up with a situation - // where both of those functions wind up being loaded into this - // application simultaneously. In that case, the same function - // (from LLVM's point of view) requires two types. But of course - // LLVM won't allow one function to have two types. - // - // What we currently do, therefore, is declare the function with - // one of the two types (whichever happens to come first) and then - // bitcast as needed when the function is referenced to make sure - // it has the type we expect. - // - // This can occur on either a crate-local or crate-external - // reference. It also occurs when testing libcore and in some - // other weird situations. Annoying. - if cx.val_ty(func) != ptrty { - // TODO(antoyo): cast the pointer. - func - } - else { - func - } + // This is subtle and surprising, but sometimes we have to bitcast + // the resulting fn pointer. The reason has to do with external + // functions. If you have two crates that both bind the same C + // library, they may not use precisely the same types: for + // example, they will probably each declare their own structs, + // which are distinct types from LLVM's point of view (nominal + // types). + // + // Now, if those two crates are linked into an application, and + // they contain inlined code, you can wind up with a situation + // where both of those functions wind up being loaded into this + // application simultaneously. In that case, the same function + // (from LLVM's point of view) requires two types. But of course + // LLVM won't allow one function to have two types. + // + // What we currently do, therefore, is declare the function with + // one of the two types (whichever happens to come first) and then + // bitcast as needed when the function is referenced to make sure + // it has the type we expect. + // + // This can occur on either a crate-local or crate-external + // reference. It also occurs when testing libcore and in some + // other weird situations. Annoying. + if cx.val_ty(func) != ptrty { + // TODO(antoyo): cast the pointer. + func + } else { + func } - else { - cx.linkage.set(FunctionType::Extern); - let func = cx.declare_fn(&sym, &fn_abi); + } else { + cx.linkage.set(FunctionType::Extern); + let func = cx.declare_fn(&sym, &fn_abi); - // TODO(antoyo): set linkage and attributes. - func - }; + // TODO(antoyo): set linkage and attributes. + func + }; cx.function_instances.borrow_mut().insert(instance, func); diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index ccb6cbbc2c8a7..978d8ebca58d5 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -1,16 +1,12 @@ use gccjit::LValue; -use gccjit::{RValue, Type, ToRValue}; +use gccjit::{RValue, ToRValue, Type}; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{ - BaseTypeMethods, - ConstMethods, - DerivedTypeMethods, - MiscMethods, - StaticMethods, + BaseTypeMethods, ConstMethods, DerivedTypeMethods, MiscMethods, StaticMethods, }; -use rustc_middle::mir::Mutability; -use rustc_middle::ty::layout::{TyAndLayout, LayoutOf}; use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; +use rustc_middle::mir::Mutability; +use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_target::abi::{self, HasDataLayout, Pointer, Size}; use crate::consts::const_alloc_to_gcc; @@ -37,8 +33,8 @@ pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> let context = &cx.context; let byte_type = context.new_type::(); let typ = context.new_array_type(None, byte_type, bytes.len() as i32); - let elements: Vec<_> = - bytes.iter() + let elements: Vec<_> = bytes + .iter() .map(|&byte| context.new_rvalue_from_int(byte_type, byte as i32)) .collect(); context.new_array_constructor(None, typ, &elements) @@ -52,14 +48,16 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn const_null(&self, typ: Type<'gcc>) -> RValue<'gcc> { if type_is_pointer(typ) { self.context.new_null(typ) - } - else { + } else { self.const_int(typ, 0) } } fn const_undef(&self, typ: Type<'gcc>) -> RValue<'gcc> { - let local = self.current_func.borrow().expect("func") + let local = self + .current_func + .borrow() + .expect("func") .new_local(None, typ, "undefined"); if typ.is_struct().is_some() { // NOTE: hack to workaround a limitation of the rustc API: see comment on @@ -67,8 +65,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { let pointer = local.get_address(None); self.structs_as_pointer.borrow_mut().insert(pointer); pointer - } - else { + } else { local.to_rvalue() } } @@ -132,20 +129,20 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { .or_insert_with(|| (s.to_owned(), self.global_string(s))) .1; let len = s.len(); - let cs = self.const_ptrcast(str_global.get_address(None), + let cs = self.const_ptrcast( + str_global.get_address(None), self.type_ptr_to(self.layout_of(self.tcx.types.str_).gcc_type(self, true)), ); (cs, self.const_usize(len as u64)) } fn const_struct(&self, values: &[RValue<'gcc>], packed: bool) -> RValue<'gcc> { - let fields: Vec<_> = values.iter() - .map(|value| value.get_type()) - .collect(); + let fields: Vec<_> = values.iter().map(|value| value.get_type()).collect(); // TODO(antoyo): cache the type? It's anonymous, so probably not. let typ = self.type_struct(&fields, packed); let struct_type = typ.is_struct().expect("struct type"); - self.context.new_struct_constructor(None, struct_type.as_type(), None, values) + self.context + .new_struct_constructor(None, struct_type.as_type(), None, values) } fn const_to_opt_uint(&self, _v: RValue<'gcc>) -> Option { @@ -163,7 +160,11 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> { - let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() }; + let bitsize = if layout.is_bool() { + 1 + } else { + layout.size(self).bits() + }; match cv { Scalar::Int(int) => { let data = int.assert_bits(layout.size(self)); @@ -171,10 +172,13 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { // FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code // the paths for floating-point values. if ty == self.float_type { - return self.context.new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64); - } - else if ty == self.double_type { - return self.context.new_rvalue_from_double(ty, f64::from_bits(data as u64)); + return self + .context + .new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64); + } else if ty == self.double_type { + return self + .context + .new_rvalue_from_double(ty, f64::from_bits(data as u64)); } let value = self.const_uint_big(self.type_ix(bitsize), data); @@ -183,42 +187,42 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } Scalar::Ptr(ptr, _size) => { let (alloc_id, offset) = ptr.into_parts(); - let base_addr = - match self.tcx.global_alloc(alloc_id) { - GlobalAlloc::Memory(alloc) => { - let init = const_alloc_to_gcc(self, alloc); - let alloc = alloc.inner(); - let value = - match alloc.mutability { - Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None), - _ => self.static_addr_of(init, alloc.align, None), - }; - if !self.sess().fewer_names() { - // TODO(antoyo): set value name. - } - value - }, - GlobalAlloc::Function(fn_instance) => { - self.get_fn_addr(fn_instance) - }, - GlobalAlloc::VTable(ty, trait_ref) => { - let alloc = self.tcx.global_alloc(self.tcx.vtable_allocation((ty, trait_ref))).unwrap_memory(); - let init = const_alloc_to_gcc(self, alloc); - self.static_addr_of(init, alloc.inner().align, None) + let base_addr = match self.tcx.global_alloc(alloc_id) { + GlobalAlloc::Memory(alloc) => { + let init = const_alloc_to_gcc(self, alloc); + let alloc = alloc.inner(); + let value = match alloc.mutability { + Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None), + _ => self.static_addr_of(init, alloc.align, None), + }; + if !self.sess().fewer_names() { + // TODO(antoyo): set value name. } - GlobalAlloc::Static(def_id) => { - assert!(self.tcx.is_static(def_id)); - self.get_static(def_id).get_address(None) - }, - }; + value + } + GlobalAlloc::Function(fn_instance) => self.get_fn_addr(fn_instance), + GlobalAlloc::VTable(ty, trait_ref) => { + let alloc = self + .tcx + .global_alloc(self.tcx.vtable_allocation((ty, trait_ref))) + .unwrap_memory(); + let init = const_alloc_to_gcc(self, alloc); + self.static_addr_of(init, alloc.inner().align, None) + } + GlobalAlloc::Static(def_id) => { + assert!(self.tcx.is_static(def_id)); + self.get_static(def_id).get_address(None) + } + }; let ptr_type = base_addr.get_type(); let base_addr = self.const_bitcast(base_addr, self.usize_type); - let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64); + let offset = self + .context + .new_rvalue_from_long(self.usize_type, offset.bytes() as i64); let ptr = self.const_bitcast(base_addr + offset, ptr_type); if layout.primitive() != Pointer { self.const_bitcast(ptr.dereference(None).to_rvalue(), ty) - } - else { + } else { self.const_bitcast(ptr, ty) } } @@ -229,22 +233,28 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { const_alloc_to_gcc(self, alloc) } - fn from_const_alloc(&self, layout: TyAndLayout<'tcx>, alloc: ConstAllocation<'tcx>, offset: Size) -> PlaceRef<'tcx, RValue<'gcc>> { + fn from_const_alloc( + &self, + layout: TyAndLayout<'tcx>, + alloc: ConstAllocation<'tcx>, + offset: Size, + ) -> PlaceRef<'tcx, RValue<'gcc>> { assert_eq!(alloc.inner().align, layout.align.abi); let ty = self.type_ptr_to(layout.gcc_type(self, true)); - let value = - if layout.size == Size::ZERO { - let value = self.const_usize(alloc.inner().align.bytes()); - self.context.new_cast(None, value, ty) - } - else { - let init = const_alloc_to_gcc(self, alloc); - let base_addr = self.static_addr_of(init, alloc.inner().align, None); - - let array = self.const_bitcast(base_addr, self.type_i8p()); - let value = self.context.new_array_access(None, array, self.const_usize(offset.bytes())).get_address(None); - self.const_bitcast(value, ty) - }; + let value = if layout.size == Size::ZERO { + let value = self.const_usize(alloc.inner().align.bytes()); + self.context.new_cast(None, value, ty) + } else { + let init = const_alloc_to_gcc(self, alloc); + let base_addr = self.static_addr_of(init, alloc.inner().align, None); + + let array = self.const_bitcast(base_addr, self.type_i8p()); + let value = self + .context + .new_array_access(None, array, self.const_usize(offset.bytes())) + .get_address(None); + self.const_bitcast(value, ty) + }; PlaceRef::new_sized(value, layout) } @@ -272,35 +282,25 @@ impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> { fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { if self.is_u8(cx) { cx.i8_type - } - else if self.is_u16(cx) { + } else if self.is_u16(cx) { cx.i16_type - } - else if self.is_u32(cx) { + } else if self.is_u32(cx) { cx.i32_type - } - else if self.is_u64(cx) { + } else if self.is_u64(cx) { cx.i64_type - } - else if self.is_u128(cx) { + } else if self.is_u128(cx) { cx.i128_type - } - else if self.is_uchar(cx) { + } else if self.is_uchar(cx) { cx.char_type - } - else if self.is_ushort(cx) { + } else if self.is_ushort(cx) { cx.short_type - } - else if self.is_uint(cx) { + } else if self.is_uint(cx) { cx.int_type - } - else if self.is_ulong(cx) { + } else if self.is_ulong(cx) { cx.long_type - } - else if self.is_ulonglong(cx) { + } else if self.is_ulonglong(cx) { cx.longlong_type - } - else { + } else { self.clone() } } @@ -308,41 +308,31 @@ impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> { fn to_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { if self.is_i8(cx) { cx.u8_type - } - else if self.is_i16(cx) { + } else if self.is_i16(cx) { cx.u16_type - } - else if self.is_i32(cx) { + } else if self.is_i32(cx) { cx.u32_type - } - else if self.is_i64(cx) { + } else if self.is_i64(cx) { cx.u64_type - } - else if self.is_i128(cx) { + } else if self.is_i128(cx) { cx.u128_type - } - else if self.is_char(cx) { + } else if self.is_char(cx) { cx.uchar_type - } - else if self.is_short(cx) { + } else if self.is_short(cx) { cx.ushort_type - } - else if self.is_int(cx) { + } else if self.is_int(cx) { cx.uint_type - } - else if self.is_long(cx) { + } else if self.is_long(cx) { cx.ulong_type - } - else if self.is_longlong(cx) { + } else if self.is_longlong(cx) { cx.ulonglong_type - } - else { + } else { self.clone() } } } -pub trait TypeReflection<'gcc, 'tcx> { +pub trait TypeReflection<'gcc, 'tcx> { fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs index c0b8d21818f85..2682d1c622ad1 100644 --- a/compiler/rustc_codegen_gcc/src/consts.rs +++ b/compiler/rustc_codegen_gcc/src/consts.rs @@ -2,14 +2,16 @@ use gccjit::{GlobalKind, LValue, RValue, ToRValue, Type}; use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods}; use rustc_hir as hir; use rustc_hir::Node; -use rustc_middle::{bug, span_bug}; use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; +use rustc_middle::mir::interpret::{ + self, read_target_uint, ConstAllocation, ErrorHandled, Scalar as InterpScalar, +}; use rustc_middle::mir::mono::MonoItem; -use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::ty::layout::LayoutOf; -use rustc_middle::mir::interpret::{self, ConstAllocation, ErrorHandled, Scalar as InterpScalar, read_target_uint}; -use rustc_span::Span; +use rustc_middle::ty::{self, Instance, Ty}; +use rustc_middle::{bug, span_bug}; use rustc_span::def_id::DefId; +use rustc_span::Span; use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size, WrappingRange}; use crate::base; @@ -48,7 +50,9 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { } let global_value = self.static_addr_of_mut(cv, align, kind); #[cfg(feature = "master")] - self.global_lvalues.borrow().get(&global_value) + self.global_lvalues + .borrow() + .get(&global_value) .expect("`static_addr_of_mut` did not add the global to `self.global_lvalues`") .global_set_readonly(); self.const_globals.borrow_mut().insert(cv, global_value); @@ -58,25 +62,22 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { fn codegen_static(&self, def_id: DefId, is_mutable: bool) { let attrs = self.tcx.codegen_fn_attrs(def_id); - let value = - match codegen_static_initializer(&self, def_id) { - Ok((value, _)) => value, - // Error has already been reported - Err(_) => return, - }; + let value = match codegen_static_initializer(&self, def_id) { + Ok((value, _)) => value, + // Error has already been reported + Err(_) => return, + }; let global = self.get_static(def_id); // boolean SSA values are i1, but they have to be stored in i8 slots, // otherwise some LLVM optimization passes don't work as expected let val_llty = self.val_ty(value); - let value = - if val_llty == self.type_i1() { - unimplemented!(); - } - else { - value - }; + let value = if val_llty == self.type_i1() { + unimplemented!(); + } else { + value + }; let instance = Instance::mono(self.tcx, def_id); let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); @@ -141,7 +142,14 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { // Wasm statics with custom link sections get special treatment as they // go into custom sections of the wasm executable. - if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") { + if self + .tcx + .sess + .opts + .target_triple + .triple() + .starts_with("wasm32") + { if let Some(_section) = attrs.link_section { unimplemented!(); } @@ -149,7 +157,9 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { // TODO(antoyo): set link section. } - if attrs.flags.contains(CodegenFnAttrFlags::USED) || attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) { + if attrs.flags.contains(CodegenFnAttrFlags::USED) + || attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) + { self.add_used_global(global.to_rvalue()); } } @@ -165,23 +175,27 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { } impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { - pub fn static_addr_of_mut(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> { - let global = - match kind { - Some(kind) if !self.tcx.sess.fewer_names() => { - let name = self.generate_local_symbol_name(kind); - // TODO(antoyo): check if it's okay that no link_section is set. - - let typ = self.val_ty(cv).get_aligned(align.bytes()); - let global = self.declare_private_global(&name[..], typ); - global - } - _ => { - let typ = self.val_ty(cv).get_aligned(align.bytes()); - let global = self.declare_unnamed_global(typ); - global - }, - }; + pub fn static_addr_of_mut( + &self, + cv: RValue<'gcc>, + align: Align, + kind: Option<&str>, + ) -> RValue<'gcc> { + let global = match kind { + Some(kind) if !self.tcx.sess.fewer_names() => { + let name = self.generate_local_symbol_name(kind); + // TODO(antoyo): check if it's okay that no link_section is set. + + let typ = self.val_ty(cv).get_aligned(align.bytes()); + let global = self.declare_private_global(&name[..], typ); + global + } + _ => { + let typ = self.val_ty(cv).get_aligned(align.bytes()); + let global = self.declare_unnamed_global(typ); + global + } + }; global.global_set_initializer_rvalue(cv); // TODO(antoyo): set unnamed address. let rvalue = global.get_address(None); @@ -196,8 +210,10 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { return global; } - let defined_in_current_codegen_unit = - self.codegen_unit.items().contains_key(&MonoItem::Static(def_id)); + let defined_in_current_codegen_unit = self + .codegen_unit + .items() + .contains_key(&MonoItem::Static(def_id)); assert!( !defined_in_current_codegen_unit, "consts::get_static() should always hit the cache for \ @@ -208,85 +224,87 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); let sym = self.tcx.symbol_name(instance).name; - let global = - if let Some(def_id) = def_id.as_local() { - let id = self.tcx.hir().local_def_id_to_hir_id(def_id); - let llty = self.layout_of(ty).gcc_type(self, true); - // FIXME: refactor this to work without accessing the HIR - let global = match self.tcx.hir().get(id) { - Node::Item(&hir::Item { span, kind: hir::ItemKind::Static(..), .. }) => { - if let Some(global) = self.get_declared_value(&sym) { - if self.val_ty(global) != self.type_ptr_to(llty) { - span_bug!(span, "Conflicting types for static"); - } - } - - let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); - let global = self.declare_global( - &sym, - llty, - GlobalKind::Exported, - is_tls, - fn_attrs.link_section, - ); - - if !self.tcx.is_reachable_non_generic(def_id) { - // TODO(antoyo): set visibility. + let global = if let Some(def_id) = def_id.as_local() { + let id = self.tcx.hir().local_def_id_to_hir_id(def_id); + let llty = self.layout_of(ty).gcc_type(self, true); + // FIXME: refactor this to work without accessing the HIR + let global = match self.tcx.hir().get(id) { + Node::Item(&hir::Item { + span, + kind: hir::ItemKind::Static(..), + .. + }) => { + if let Some(global) = self.get_declared_value(&sym) { + if self.val_ty(global) != self.type_ptr_to(llty) { + span_bug!(span, "Conflicting types for static"); } - - global } - Node::ForeignItem(&hir::ForeignItem { - span, - kind: hir::ForeignItemKind::Static(..), - .. - }) => { - let fn_attrs = self.tcx.codegen_fn_attrs(def_id); - check_and_apply_linkage(&self, &fn_attrs, ty, sym, span) + let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); + let global = self.declare_global( + &sym, + llty, + GlobalKind::Exported, + is_tls, + fn_attrs.link_section, + ); + + if !self.tcx.is_reachable_non_generic(def_id) { + // TODO(antoyo): set visibility. } - item => bug!("get_static: expected static, found {:?}", item), - }; + global + } - global - } - else { - // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? - //debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id)); - - let attrs = self.tcx.codegen_fn_attrs(def_id); - let span = self.tcx.def_span(def_id); - let global = check_and_apply_linkage(&self, &attrs, ty, sym, span); - - let needs_dll_storage_attr = false; // TODO(antoyo) - - // If this assertion triggers, there's something wrong with commandline - // argument validation. - debug_assert!( - !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled() - && self.tcx.sess.target.options.is_like_msvc - && self.tcx.sess.opts.cg.prefer_dynamic) - ); - - if needs_dll_storage_attr { - // This item is external but not foreign, i.e., it originates from an external Rust - // crate. Since we don't know whether this crate will be linked dynamically or - // statically in the final application, we always mark such symbols as 'dllimport'. - // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs - // to make things work. - // - // However, in some scenarios we defer emission of statics to downstream - // crates, so there are cases where a static with an upstream DefId - // is actually present in the current crate. We can find out via the - // is_codegened_item query. - if !self.tcx.is_codegened_item(def_id) { - unimplemented!(); - } + Node::ForeignItem(&hir::ForeignItem { + span, + kind: hir::ForeignItemKind::Static(..), + .. + }) => { + let fn_attrs = self.tcx.codegen_fn_attrs(def_id); + check_and_apply_linkage(&self, &fn_attrs, ty, sym, span) } - global + + item => bug!("get_static: expected static, found {:?}", item), }; + global + } else { + // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? + //debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id)); + + let attrs = self.tcx.codegen_fn_attrs(def_id); + let span = self.tcx.def_span(def_id); + let global = check_and_apply_linkage(&self, &attrs, ty, sym, span); + + let needs_dll_storage_attr = false; // TODO(antoyo) + + // If this assertion triggers, there's something wrong with commandline + // argument validation. + debug_assert!( + !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled() + && self.tcx.sess.target.options.is_like_msvc + && self.tcx.sess.opts.cg.prefer_dynamic) + ); + + if needs_dll_storage_attr { + // This item is external but not foreign, i.e., it originates from an external Rust + // crate. Since we don't know whether this crate will be linked dynamically or + // statically in the final application, we always mark such symbols as 'dllimport'. + // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs + // to make things work. + // + // However, in some scenarios we defer emission of statics to downstream + // crates, so there are cases where a static with an upstream DefId + // is actually present in the current crate. We can find out via the + // is_codegened_item query. + if !self.tcx.is_codegened_item(def_id) { + unimplemented!(); + } + } + global + }; + // TODO(antoyo): set dll storage class. self.instances.borrow_mut().insert(instance, global); @@ -294,7 +312,10 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } } -pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAllocation<'tcx>) -> RValue<'gcc> { +pub fn const_alloc_to_gcc<'gcc, 'tcx>( + cx: &CodegenCx<'gcc, 'tcx>, + alloc: ConstAllocation<'tcx>, +) -> RValue<'gcc> { let alloc = alloc.inner(); let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1); let dl = cx.data_layout(); @@ -315,21 +336,24 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(next_offset..offset); llvals.push(cx.const_bytes(bytes)); } - let ptr_offset = - read_target_uint( dl.endian, - // This `inspect` is okay since it is within the bounds of the allocation, it doesn't - // affect interpreter execution (we inspect the result after interpreter execution), - // and we properly interpret the relocation as a relocation pointer offset. - alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)), - ) - .expect("const_alloc_to_llvm: could not read relocation pointer") + let ptr_offset = read_target_uint( + dl.endian, + // This `inspect` is okay since it is within the bounds of the allocation, it doesn't + // affect interpreter execution (we inspect the result after interpreter execution), + // and we properly interpret the relocation as a relocation pointer offset. + alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)), + ) + .expect("const_alloc_to_llvm: could not read relocation pointer") as u64; llvals.push(cx.scalar_to_backend( InterpScalar::from_pointer( interpret::Pointer::new(alloc_id, Size::from_bytes(ptr_offset)), &cx.tcx, ), - abi::Scalar::Initialized { value: Primitive::Pointer, valid_range: WrappingRange::full(dl.pointer_size) }, + abi::Scalar::Initialized { + value: Primitive::Pointer, + valid_range: WrappingRange::full(dl.pointer_size), + }, cx.type_i8p(), )); next_offset = offset + pointer_size; @@ -349,12 +373,21 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl cx.const_struct(&llvals, true) } -pub fn codegen_static_initializer<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, def_id: DefId) -> Result<(RValue<'gcc>, ConstAllocation<'tcx>), ErrorHandled> { +pub fn codegen_static_initializer<'gcc, 'tcx>( + cx: &CodegenCx<'gcc, 'tcx>, + def_id: DefId, +) -> Result<(RValue<'gcc>, ConstAllocation<'tcx>), ErrorHandled> { let alloc = cx.tcx.eval_static_initializer(def_id)?; Ok((const_alloc_to_gcc(cx, alloc), alloc)) } -fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: &str, span: Span) -> LValue<'gcc> { +fn check_and_apply_linkage<'gcc, 'tcx>( + cx: &CodegenCx<'gcc, 'tcx>, + attrs: &CodegenFnAttrs, + ty: Ty<'tcx>, + sym: &str, + span: Span, +) -> LValue<'gcc> { let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); let llty = cx.layout_of(ty).gcc_type(cx, true); if let Some(linkage) = attrs.linkage { @@ -363,18 +396,17 @@ fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &Codeg // extern "C" fn() from being non-null, so we can't just declare a // static and call it a day. Some linkages (like weak) will make it such // that the static actually has a null value. - let llty2 = - if let ty::RawPtr(ref mt) = ty.kind() { - cx.layout_of(mt.ty).gcc_type(cx, true) - } - else { - cx.sess().span_fatal( - span, - "must have type `*const T` or `*mut T` due to `#[linkage]` attribute", - ) - }; + let llty2 = if let ty::RawPtr(ref mt) = ty.kind() { + cx.layout_of(mt.ty).gcc_type(cx, true) + } else { + cx.sess().span_fatal( + span, + "must have type `*const T` or `*mut T` due to `#[linkage]` attribute", + ) + }; // Declare a symbol `foo` with the desired linkage. - let global1 = cx.declare_global_with_linkage(&sym, llty2, base::global_linkage_to_gcc(linkage)); + let global1 = + cx.declare_global_with_linkage(&sym, llty2, base::global_linkage_to_gcc(linkage)); // Declare an internal global `extern_with_linkage_foo` which // is initialized with the address of `foo`. If `foo` is @@ -389,8 +421,7 @@ fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &Codeg global2.global_set_initializer_rvalue(global1.get_address(None)); // TODO(antoyo): use global_set_initializer() when it will work. global2 - } - else { + } else { // Generate an external declaration. // FIXME(nagisa): investigate whether it can be changed into define_global diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs index 478f6d893dd0f..ac4de1054ba8c 100644 --- a/compiler/rustc_codegen_gcc/src/context.rs +++ b/compiler/rustc_codegen_gcc/src/context.rs @@ -1,20 +1,24 @@ use std::cell::{Cell, RefCell}; -use gccjit::{Block, CType, Context, Function, FunctionPtrType, FunctionType, LValue, RValue, Struct, Type}; -use rustc_codegen_ssa::base::wants_msvc_seh; -use rustc_codegen_ssa::traits::{ - BackendTypes, - MiscMethods, +use gccjit::{ + Block, CType, Context, Function, FunctionPtrType, FunctionType, LValue, RValue, Struct, Type, }; +use rustc_codegen_ssa::base::wants_msvc_seh; +use rustc_codegen_ssa::traits::{BackendTypes, MiscMethods}; use rustc_data_structures::base_n; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; -use rustc_middle::span_bug; use rustc_middle::mir::mono::CodegenUnit; +use rustc_middle::span_bug; +use rustc_middle::ty::layout::{ + FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, + TyAndLayout, +}; use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt}; -use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout, LayoutOfHelpers}; use rustc_session::Session; use rustc_span::Span; -use rustc_target::abi::{call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx}; +use rustc_target::abi::{ + call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx, +}; use rustc_target::spec::{HasTargetSpec, Target, TlsModel}; use crate::callee::get_fn; @@ -85,7 +89,8 @@ pub struct CodegenCx<'gcc, 'tcx> { /// Cache function instances of monomorphic and polymorphic items pub function_instances: RefCell, RValue<'gcc>>>, /// Cache generated vtables - pub vtables: RefCell, Option>), RValue<'gcc>>>, + pub vtables: + RefCell, Option>), RValue<'gcc>>>, // TODO(antoyo): improve the SSA API to not require those. // Mapping from function pointer type to indexes of on stack parameters. @@ -122,7 +127,12 @@ pub struct CodegenCx<'gcc, 'tcx> { } impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { - pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>, supports_128bit_integers: bool) -> Self { + pub fn new( + context: &'gcc Context<'gcc>, + codegen_unit: &'tcx CodegenUnit<'tcx>, + tcx: TyCtxt<'tcx>, + supports_128bit_integers: bool, + ) -> Self { let check_overflow = tcx.sess.overflow_checks(); let i8_type = context.new_c_type(CType::Int8t); @@ -134,17 +144,15 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { let u32_type = context.new_c_type(CType::UInt32t); let u64_type = context.new_c_type(CType::UInt64t); - let (i128_type, u128_type) = - if supports_128bit_integers { - let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?; - let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?; - (i128_type, u128_type) - } - else { - let i128_type = context.new_array_type(None, i64_type, 2); - let u128_type = context.new_array_type(None, u64_type, 2); - (i128_type, u128_type) - }; + let (i128_type, u128_type) = if supports_128bit_integers { + let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?; + let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?; + (i128_type, u128_type) + } else { + let i128_type = context.new_array_type(None, i64_type, 2); + let u128_type = context.new_array_type(None, u64_type, 2); + (i128_type, u128_type) + }; let tls_model = to_gcc_tls_mode(tcx.sess.tls_model()); @@ -173,14 +181,64 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { let mut functions = FxHashMap::default(); let builtins = [ - "__builtin_unreachable", "abort", "__builtin_expect", "__builtin_add_overflow", "__builtin_mul_overflow", - "__builtin_saddll_overflow", /*"__builtin_sadd_overflow",*/ "__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/ - "__builtin_ssubll_overflow", /*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow", "__builtin_uaddll_overflow", - "__builtin_uadd_overflow", "__builtin_umulll_overflow", "__builtin_umul_overflow", "__builtin_usubll_overflow", - "__builtin_usub_overflow", "sqrtf", "sqrt", "__builtin_powif", "__builtin_powi", "sinf", "sin", "cosf", "cos", - "powf", "pow", "expf", "exp", "exp2f", "exp2", "logf", "log", "log10f", "log10", "log2f", "log2", "fmaf", - "fma", "fabsf", "fabs", "fminf", "fmin", "fmaxf", "fmax", "copysignf", "copysign", "floorf", "floor", "ceilf", - "ceil", "truncf", "trunc", "rintf", "rint", "nearbyintf", "nearbyint", "roundf", "round", + "__builtin_unreachable", + "abort", + "__builtin_expect", + "__builtin_add_overflow", + "__builtin_mul_overflow", + "__builtin_saddll_overflow", + /*"__builtin_sadd_overflow",*/ + "__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/ + "__builtin_ssubll_overflow", + /*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow", + "__builtin_uaddll_overflow", + "__builtin_uadd_overflow", + "__builtin_umulll_overflow", + "__builtin_umul_overflow", + "__builtin_usubll_overflow", + "__builtin_usub_overflow", + "sqrtf", + "sqrt", + "__builtin_powif", + "__builtin_powi", + "sinf", + "sin", + "cosf", + "cos", + "powf", + "pow", + "expf", + "exp", + "exp2f", + "exp2", + "logf", + "log", + "log10f", + "log10", + "log2f", + "log2", + "fmaf", + "fma", + "fabsf", + "fabs", + "fminf", + "fmin", + "fmaxf", + "fmax", + "copysignf", + "copysign", + "floorf", + "floor", + "ceilf", + "ceil", + "truncf", + "trunc", + "rintf", + "rint", + "nearbyintf", + "nearbyint", + "roundf", + "round", "__builtin_expect_with_probability", ]; @@ -253,8 +311,16 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn rvalue_as_function(&self, value: RValue<'gcc>) -> Function<'gcc> { let function: Function<'gcc> = unsafe { std::mem::transmute(value) }; - debug_assert!(self.functions.borrow().values().find(|value| **value == function).is_some(), - "{:?} ({:?}) is not a function", value, value.get_type()); + debug_assert!( + self.functions + .borrow() + .values() + .find(|value| **value == function) + .is_some(), + "{:?} ({:?}) is not a function", + value, + value.get_type() + ); function } @@ -276,13 +342,13 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } } - self.supports_128bit_integers && - (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ)) + self.supports_128bit_integers + && (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ)) } pub fn is_non_native_int_type(&self, typ: Type<'gcc>) -> bool { - !self.supports_128bit_integers && - (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ)) + !self.supports_128bit_integers + && (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ)) } pub fn is_native_int_type_or_bool(&self, typ: Type<'gcc>) -> bool { @@ -290,18 +356,23 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } pub fn is_int_type_or_bool(&self, typ: Type<'gcc>) -> bool { - self.is_native_int_type(typ) || self.is_non_native_int_type(typ) || typ.is_compatible_with(self.bool_type) + self.is_native_int_type(typ) + || self.is_non_native_int_type(typ) + || typ.is_compatible_with(self.bool_type) } pub fn sess(&self) -> &Session { &self.tcx.sess } - pub fn bitcast_if_needed(&self, value: RValue<'gcc>, expected_type: Type<'gcc>) -> RValue<'gcc> { + pub fn bitcast_if_needed( + &self, + value: RValue<'gcc>, + expected_type: Type<'gcc>, + ) -> RValue<'gcc> { if value.get_type() != expected_type { self.context.new_bitcast(None, value, expected_type) - } - else { + } else { value } } @@ -321,7 +392,9 @@ impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> { } impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { - fn vtables(&self) -> &RefCell, Option>), RValue<'gcc>>> { + fn vtables( + &self, + ) -> &RefCell, Option>), RValue<'gcc>>> { &self.vtables } @@ -334,14 +407,12 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> { let func_name = self.tcx.symbol_name(instance).name; - let func = - if self.intrinsics.borrow().contains_key(func_name) { - self.intrinsics.borrow()[func_name].clone() - } - else { - let func = get_fn(self, instance); - self.rvalue_as_function(func) - }; + let func = if self.intrinsics.borrow().contains_key(func_name) { + self.intrinsics.borrow()[func_name].clone() + } else { + let func = get_fn(self, instance); + self.rvalue_as_function(func) + }; let ptr = func.get_address(None); // TODO(antoyo): don't do this twice: i.e. in declare_fn and here. @@ -385,7 +456,8 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { def_id, tcx.intern_substs(&[]), ) - .unwrap().unwrap(), + .unwrap() + .unwrap(), ), _ => { let _name = if wants_msvc_seh(self.sess()) { @@ -435,8 +507,7 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn declare_c_main(&self, fn_type: Self::Type) -> Option { if self.get_declared_value("main").is_none() { Some(self.declare_cfn("main", fn_type)) - } - else { + } else { // If the symbol already exists, it is an error: for example, the user wrote // #[no_mangle] extern "C" fn main(..) {..} // instead of #[start] @@ -507,7 +578,10 @@ impl<'gcc, 'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { err ); } - FnAbiRequest::OfInstance { instance, extra_args } => { + FnAbiRequest::OfInstance { + instance, + extra_args, + } => { span_bug!( span, "`fn_abi_of_instance({}, {:?})` failed: {}", diff --git a/compiler/rustc_codegen_gcc/src/coverageinfo.rs b/compiler/rustc_codegen_gcc/src/coverageinfo.rs index 872fc2472e223..9a48dc1926a7b 100644 --- a/compiler/rustc_codegen_gcc/src/coverageinfo.rs +++ b/compiler/rustc_codegen_gcc/src/coverageinfo.rs @@ -2,11 +2,7 @@ use gccjit::RValue; use rustc_codegen_ssa::traits::{CoverageInfoBuilderMethods, CoverageInfoMethods}; use rustc_hir::def_id::DefId; use rustc_middle::mir::coverage::{ - CodeRegion, - CounterValueReference, - ExpressionOperandId, - InjectedExpressionId, - Op, + CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId, Op, }; use rustc_middle::ty::Instance; @@ -22,12 +18,25 @@ impl<'a, 'gcc, 'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx unimplemented!(); } - fn add_coverage_counter(&mut self, _instance: Instance<'tcx>, _id: CounterValueReference, _region: CodeRegion) -> bool { + fn add_coverage_counter( + &mut self, + _instance: Instance<'tcx>, + _id: CounterValueReference, + _region: CodeRegion, + ) -> bool { // TODO(antoyo) false } - fn add_coverage_counter_expression(&mut self, _instance: Instance<'tcx>, _id: InjectedExpressionId, _lhs: ExpressionOperandId, _op: Op, _rhs: ExpressionOperandId, _region: Option) -> bool { + fn add_coverage_counter_expression( + &mut self, + _instance: Instance<'tcx>, + _id: InjectedExpressionId, + _lhs: ExpressionOperandId, + _op: Op, + _rhs: ExpressionOperandId, + _region: Option, + ) -> bool { // TODO(antoyo) false } diff --git a/compiler/rustc_codegen_gcc/src/debuginfo.rs b/compiler/rustc_codegen_gcc/src/debuginfo.rs index 266759ed6cfa1..d7dc0ce76251f 100644 --- a/compiler/rustc_codegen_gcc/src/debuginfo.rs +++ b/compiler/rustc_codegen_gcc/src/debuginfo.rs @@ -4,8 +4,8 @@ use rustc_codegen_ssa::traits::{DebugInfoBuilderMethods, DebugInfoMethods}; use rustc_middle::mir; use rustc_middle::ty::{Instance, PolyExistentialTraitRef, Ty}; use rustc_span::{SourceFile, Span, Symbol}; -use rustc_target::abi::Size; use rustc_target::abi::call::FnAbi; +use rustc_target::abi::Size; use crate::builder::Builder; use crate::context::CodegenCx; @@ -13,7 +13,14 @@ use crate::context::CodegenCx; impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> { // FIXME(eddyb) find a common convention for all of the debuginfo-related // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.). - fn dbg_var_addr(&mut self, _dbg_var: Self::DIVariable, _scope_metadata: Self::DIScope, _variable_alloca: Self::Value, _direct_offset: Size, _indirect_offsets: &[Size]) { + fn dbg_var_addr( + &mut self, + _dbg_var: Self::DIVariable, + _scope_metadata: Self::DIScope, + _variable_alloca: Self::Value, + _direct_offset: Size, + _indirect_offsets: &[Size], + ) { unimplemented!(); } @@ -31,16 +38,31 @@ impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> { } impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> { - fn create_vtable_debuginfo(&self, _ty: Ty<'tcx>, _trait_ref: Option>, _vtable: Self::Value) { + fn create_vtable_debuginfo( + &self, + _ty: Ty<'tcx>, + _trait_ref: Option>, + _vtable: Self::Value, + ) { // TODO(antoyo) } - fn create_function_debug_context(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _llfn: RValue<'gcc>, _mir: &mir::Body<'tcx>) -> Option> { + fn create_function_debug_context( + &self, + _instance: Instance<'tcx>, + _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, + _llfn: RValue<'gcc>, + _mir: &mir::Body<'tcx>, + ) -> Option> { // TODO(antoyo) None } - fn extend_scope_to_file(&self, _scope_metadata: Self::DIScope, _file: &SourceFile) -> Self::DIScope { + fn extend_scope_to_file( + &self, + _scope_metadata: Self::DIScope, + _file: &SourceFile, + ) -> Self::DIScope { unimplemented!(); } @@ -48,15 +70,32 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> { // TODO(antoyo) } - fn create_dbg_var(&self, _variable_name: Symbol, _variable_type: Ty<'tcx>, _scope_metadata: Self::DIScope, _variable_kind: VariableKind, _span: Span) -> Self::DIVariable { + fn create_dbg_var( + &self, + _variable_name: Symbol, + _variable_type: Ty<'tcx>, + _scope_metadata: Self::DIScope, + _variable_kind: VariableKind, + _span: Span, + ) -> Self::DIVariable { unimplemented!(); } - fn dbg_scope_fn(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _maybe_definition_llfn: Option>) -> Self::DIScope { + fn dbg_scope_fn( + &self, + _instance: Instance<'tcx>, + _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, + _maybe_definition_llfn: Option>, + ) -> Self::DIScope { unimplemented!(); } - fn dbg_loc(&self, _scope: Self::DIScope, _inlined_at: Option, _span: Span) -> Self::DILocation { + fn dbg_loc( + &self, + _scope: Self::DIScope, + _inlined_at: Option, + _span: Span, + ) -> Self::DILocation { unimplemented!(); } } diff --git a/compiler/rustc_codegen_gcc/src/declare.rs b/compiler/rustc_codegen_gcc/src/declare.rs index a619e2f771252..a6923aed1992d 100644 --- a/compiler/rustc_codegen_gcc/src/declare.rs +++ b/compiler/rustc_codegen_gcc/src/declare.rs @@ -9,10 +9,18 @@ use crate::context::CodegenCx; use crate::intrinsic::llvm; impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { - pub fn get_or_insert_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option) -> LValue<'gcc> { + pub fn get_or_insert_global( + &self, + name: &str, + ty: Type<'gcc>, + is_tls: bool, + link_section: Option, + ) -> LValue<'gcc> { if self.globals.borrow().contains_key(name) { let typ = self.globals.borrow()[name].get_type(); - let global = self.context.new_global(None, GlobalKind::Imported, typ, name); + let global = self + .context + .new_global(None, GlobalKind::Imported, typ, name); if is_tls { global.set_tls_model(self.tls_model); } @@ -20,21 +28,28 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { global.set_link_section(link_section.as_str()); } global - } - else { + } else { self.declare_global(name, ty, GlobalKind::Exported, is_tls, link_section) } } pub fn declare_unnamed_global(&self, ty: Type<'gcc>) -> LValue<'gcc> { let name = self.generate_local_symbol_name("global"); - self.context.new_global(None, GlobalKind::Internal, ty, &name) + self.context + .new_global(None, GlobalKind::Internal, ty, &name) } - pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> LValue<'gcc> { + pub fn declare_global_with_linkage( + &self, + name: &str, + ty: Type<'gcc>, + linkage: GlobalKind, + ) -> LValue<'gcc> { let global = self.context.new_global(None, linkage, ty, name); let global_address = global.get_address(None); - self.globals.borrow_mut().insert(name.to_string(), global_address); + self.globals + .borrow_mut() + .insert(name.to_string(), global_address); global } @@ -45,7 +60,14 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { unsafe { std::mem::transmute(func) } }*/ - pub fn declare_global(&self, name: &str, ty: Type<'gcc>, global_kind: GlobalKind, is_tls: bool, link_section: Option) -> LValue<'gcc> { + pub fn declare_global( + &self, + name: &str, + ty: Type<'gcc>, + global_kind: GlobalKind, + is_tls: bool, + link_section: Option, + ) -> LValue<'gcc> { let global = self.context.new_global(None, global_kind, ty, name); if is_tls { global.set_tls_model(self.tls_model); @@ -54,14 +76,20 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { global.set_link_section(link_section.as_str()); } let global_address = global.get_address(None); - self.globals.borrow_mut().insert(name.to_string(), global_address); + self.globals + .borrow_mut() + .insert(name.to_string(), global_address); global } pub fn declare_private_global(&self, name: &str, ty: Type<'gcc>) -> LValue<'gcc> { - let global = self.context.new_global(None, GlobalKind::Internal, ty, name); + let global = self + .context + .new_global(None, GlobalKind::Internal, ty, name); let global_address = global.get_address(None); - self.globals.borrow_mut().insert(name.to_string(), global_address); + self.globals + .borrow_mut() + .insert(name.to_string(), global_address); global } @@ -71,7 +99,14 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { let return_type = self.type_i32(); let variadic = false; self.linkage.set(FunctionType::Exported); - let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, &[self.type_i32(), const_string], variadic); + let func = declare_raw_fn( + self, + name, + (), /*llvm::CCallConv*/ + return_type, + &[self.type_i32(), const_string], + variadic, + ); // NOTE: it is needed to set the current_func here as well, because get_fn() is not called // for the main function. *self.current_func.borrow_mut() = Some(func); @@ -81,13 +116,28 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> RValue<'gcc> { let (return_type, params, variadic, on_stack_param_indices) = fn_abi.gcc_type(self); - let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, ¶ms, variadic); - self.on_stack_function_params.borrow_mut().insert(func, on_stack_param_indices); + let func = declare_raw_fn( + self, + name, + (), /*fn_abi.llvm_cconv()*/ + return_type, + ¶ms, + variadic, + ); + self.on_stack_function_params + .borrow_mut() + .insert(func, on_stack_param_indices); // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API. unsafe { std::mem::transmute(func) } } - pub fn define_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option) -> LValue<'gcc> { + pub fn define_global( + &self, + name: &str, + ty: Type<'gcc>, + is_tls: bool, + link_section: Option, + ) -> LValue<'gcc> { self.get_or_insert_global(name, ty, is_tls, link_section) } @@ -101,24 +151,43 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { /// /// If there’s a value with the same name already declared, the function will /// update the declaration and return existing Value instead. -fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*llvm::CallConv*/, return_type: Type<'gcc>, param_types: &[Type<'gcc>], variadic: bool) -> Function<'gcc> { +fn declare_raw_fn<'gcc>( + cx: &CodegenCx<'gcc, '_>, + name: &str, + _callconv: (), /*llvm::CallConv*/ + return_type: Type<'gcc>, + param_types: &[Type<'gcc>], + variadic: bool, +) -> Function<'gcc> { if name.starts_with("llvm.") { let intrinsic = llvm::intrinsic(name, cx); - cx.intrinsics.borrow_mut().insert(name.to_string(), intrinsic); + cx.intrinsics + .borrow_mut() + .insert(name.to_string(), intrinsic); return intrinsic; } - let func = - if cx.functions.borrow().contains_key(name) { - cx.functions.borrow()[name] - } - else { - let params: Vec<_> = param_types.into_iter().enumerate() - .map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO(antoyo): set name. - .collect(); - let func = cx.context.new_function(None, cx.linkage.get(), return_type, ¶ms, mangle_name(name), variadic); - cx.functions.borrow_mut().insert(name.to_string(), func); - func - }; + let func = if cx.functions.borrow().contains_key(name) { + cx.functions.borrow()[name] + } else { + let params: Vec<_> = param_types + .into_iter() + .enumerate() + .map(|(index, param)| { + cx.context + .new_parameter(None, *param, &format!("param{}", index)) + }) // TODO(antoyo): set name. + .collect(); + let func = cx.context.new_function( + None, + cx.linkage.get(), + return_type, + ¶ms, + mangle_name(name), + variadic, + ); + cx.functions.borrow_mut().insert(name.to_string(), func); + func + }; // TODO(antoyo): set function calling convention. // TODO(antoyo): set unnamed address. @@ -133,13 +202,19 @@ fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*ll // FIXME(antoyo): this is a hack because libgccjit currently only supports alpha, num and _. // Unsupported characters: `$` and `.`. pub fn mangle_name(name: &str) -> String { - name.replace(|char: char| { - if !char.is_alphanumeric() && char != '_' { - debug_assert!("$.".contains(char), "Unsupported char in function name: {}", char); - true - } - else { - false - } - }, "_") + name.replace( + |char: char| { + if !char.is_alphanumeric() && char != '_' { + debug_assert!( + "$.".contains(char), + "Unsupported char in function name: {}", + char + ); + true + } else { + false + } + }, + "_", + ) } diff --git a/compiler/rustc_codegen_gcc/src/int.rs b/compiler/rustc_codegen_gcc/src/int.rs index 0c5dab0046684..b8873aa8ad3b0 100644 --- a/compiler/rustc_codegen_gcc/src/int.rs +++ b/compiler/rustc_codegen_gcc/src/int.rs @@ -4,13 +4,17 @@ use std::convert::TryFrom; -use gccjit::{ComparisonOp, FunctionType, RValue, ToRValue, Type, UnaryOp, BinaryOp}; +use gccjit::{BinaryOp, ComparisonOp, FunctionType, RValue, ToRValue, Type, UnaryOp}; use rustc_codegen_ssa::common::{IntPredicate, TypeKind}; use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeMethods, BuilderMethods, OverflowOp}; use rustc_middle::ty::Ty; use crate::builder::ToGccComp; -use crate::{builder::Builder, common::{SignType, TypeReflection}, context::CodegenCx}; +use crate::{ + builder::Builder, + common::{SignType, TypeReflection}, + context::CodegenCx, +}; impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { pub fn gcc_urem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { @@ -26,21 +30,28 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { pub fn gcc_not(&self, a: RValue<'gcc>) -> RValue<'gcc> { let typ = a.get_type(); if self.is_native_int_type_or_bool(typ) { - let operation = - if typ.is_bool() { - UnaryOp::LogicalNegate - } - else { - UnaryOp::BitwiseNegate - }; + let operation = if typ.is_bool() { + UnaryOp::LogicalNegate + } else { + UnaryOp::BitwiseNegate + }; self.cx.context.new_unary_op(None, operation, typ, a) - } - else { + } else { // TODO(antoyo): use __negdi2 and __negti2 instead? let element_type = typ.dyncast_array().expect("element type"); let values = [ - self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.low(a)), - self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.high(a)), + self.cx.context.new_unary_op( + None, + UnaryOp::BitwiseNegate, + element_type, + self.low(a), + ), + self.cx.context.new_unary_op( + None, + UnaryOp::BitwiseNegate, + element_type, + self.high(a), + ), ]; self.cx.context.new_array_constructor(None, typ, &values) } @@ -49,11 +60,19 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { pub fn gcc_neg(&self, a: RValue<'gcc>) -> RValue<'gcc> { let a_type = a.get_type(); if self.is_native_int_type(a_type) { - self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a) - } - else { + self.cx + .context + .new_unary_op(None, UnaryOp::Minus, a.get_type(), a) + } else { let param_a = self.context.new_parameter(None, a_type, "a"); - let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a], "__negti2", false); + let func = self.context.new_function( + None, + FunctionType::Extern, + a_type, + &[param_a], + "__negti2", + false, + ); self.context.new_call(None, func, &[a]) } } @@ -73,15 +92,12 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { if a_type.is_signed(self) != b_type.is_signed(self) { let b = self.context.new_cast(None, b, a_type); a >> b - } - else { + } else { a >> b } - } - else if a_native && !b_native { + } else if a_native && !b_native { self.gcc_lshr(a, self.gcc_int_cast(b, a_type)) - } - else { + } else { // NOTE: we cannot use the lshr builtin because it's calling hi() (to get the most // significant half of the number) which uses lshr. @@ -101,22 +117,18 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let zero = self.gcc_zero(native_int_type); let b = self.gcc_int_cast(b, native_int_type); let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero); - self.llbb().end_with_conditional(None, condition, then_block, else_block); + self.llbb() + .end_with_conditional(None, condition, then_block, else_block); // TODO(antoyo): take endianness into account. let shift_value = self.gcc_sub(b, sixty_four); let high = self.high(a); - let sign = - if a_type.is_signed(self) { - high >> sixty_three - } - else { - zero - }; - let values = [ - high >> shift_value, - sign, - ]; + let sign = if a_type.is_signed(self) { + high >> sixty_three + } else { + zero + }; + let values = [high >> shift_value, sign]; let array_value = self.context.new_array_constructor(None, a_type, &values); then_block.add_assignment(None, result, array_value); then_block.end_with_jump(None, after_block); @@ -133,10 +145,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let casted_low = self.context.new_cast(None, self.low(a), unsigned_type); let shifted_low = casted_low >> self.context.new_cast(None, b, unsigned_type); let shifted_low = self.context.new_cast(None, shifted_low, native_int_type); - let values = [ - (high << shift_value) | shifted_low, - high >> b, - ]; + let values = [(high << shift_value) | shifted_low, high >> b]; let array_value = self.context.new_array_constructor(None, a_type, &values); actual_else_block.add_assignment(None, result, array_value); actual_else_block.end_with_jump(None, after_block); @@ -149,7 +158,12 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } } - fn additive_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> { + fn additive_operation( + &self, + operation: BinaryOp, + a: RValue<'gcc>, + mut b: RValue<'gcc>, + ) -> RValue<'gcc> { let a_type = a.get_type(); let b_type = b.get_type(); if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) { @@ -158,26 +172,30 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { // Vector types need to be bitcast. // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. b = self.context.new_bitcast(None, b, a.get_type()); - } - else { + } else { b = self.context.new_cast(None, b, a.get_type()); } } self.context.new_binary_op(None, operation, a_type, a, b) - } - else { + } else { let signed = a_type.is_compatible_with(self.i128_type); - let func_name = - match (operation, signed) { - (BinaryOp::Plus, true) => "__rust_i128_add", - (BinaryOp::Plus, false) => "__rust_u128_add", - (BinaryOp::Minus, true) => "__rust_i128_sub", - (BinaryOp::Minus, false) => "__rust_u128_sub", - _ => unreachable!("unexpected additive operation {:?}", operation), - }; + let func_name = match (operation, signed) { + (BinaryOp::Plus, true) => "__rust_i128_add", + (BinaryOp::Plus, false) => "__rust_u128_add", + (BinaryOp::Minus, true) => "__rust_i128_sub", + (BinaryOp::Minus, false) => "__rust_u128_sub", + _ => unreachable!("unexpected additive operation {:?}", operation), + }; let param_a = self.context.new_parameter(None, a_type, "a"); let param_b = self.context.new_parameter(None, b_type, "b"); - let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false); + let func = self.context.new_function( + None, + FunctionType::Extern, + a_type, + &[param_a, param_b], + func_name, + false, + ); self.context.new_call(None, func, &[a, b]) } } @@ -194,24 +212,31 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { self.additive_operation(BinaryOp::Minus, a, b) } - fn multiplicative_operation(&self, operation: BinaryOp, operation_name: &str, signed: bool, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + fn multiplicative_operation( + &self, + operation: BinaryOp, + operation_name: &str, + signed: bool, + a: RValue<'gcc>, + b: RValue<'gcc>, + ) -> RValue<'gcc> { let a_type = a.get_type(); let b_type = b.get_type(); if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) { self.context.new_binary_op(None, operation, a_type, a, b) - } - else { - let sign = - if signed { - "" - } - else { - "u" - }; + } else { + let sign = if signed { "" } else { "u" }; let func_name = format!("__{}{}ti3", sign, operation_name); let param_a = self.context.new_parameter(None, a_type, "a"); let param_b = self.context.new_parameter(None, b_type, "b"); - let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false); + let func = self.context.new_function( + None, + FunctionType::Extern, + a_type, + &[param_a, param_b], + func_name, + false, + ); self.context.new_call(None, func, &[a, b]) } } @@ -228,124 +253,129 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { self.multiplicative_operation(BinaryOp::Divide, "div", false, a, b) } - pub fn gcc_checked_binop(&self, oop: OverflowOp, typ: Ty<'_>, lhs: ::Value, rhs: ::Value) -> (::Value, ::Value) { + pub fn gcc_checked_binop( + &self, + oop: OverflowOp, + typ: Ty<'_>, + lhs: ::Value, + rhs: ::Value, + ) -> (::Value, ::Value) { use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*}; - let new_kind = - match typ.kind() { - Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)), - Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)), - t @ (Uint(_) | Int(_)) => t.clone(), - _ => panic!("tried to get overflow intrinsic for op applied to non-int type"), - }; + let new_kind = match typ.kind() { + Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)), + Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)), + t @ (Uint(_) | Int(_)) => t.clone(), + _ => panic!("tried to get overflow intrinsic for op applied to non-int type"), + }; // TODO(antoyo): remove duplication with intrinsic? - let name = - if self.is_native_int_type(lhs.get_type()) { - match oop { - OverflowOp::Add => - match new_kind { - Int(I8) => "__builtin_add_overflow", - Int(I16) => "__builtin_add_overflow", - Int(I32) => "__builtin_sadd_overflow", - Int(I64) => "__builtin_saddll_overflow", - Int(I128) => "__builtin_add_overflow", - - Uint(U8) => "__builtin_add_overflow", - Uint(U16) => "__builtin_add_overflow", - Uint(U32) => "__builtin_uadd_overflow", - Uint(U64) => "__builtin_uaddll_overflow", - Uint(U128) => "__builtin_add_overflow", - + let name = if self.is_native_int_type(lhs.get_type()) { + match oop { + OverflowOp::Add => match new_kind { + Int(I8) => "__builtin_add_overflow", + Int(I16) => "__builtin_add_overflow", + Int(I32) => "__builtin_sadd_overflow", + Int(I64) => "__builtin_saddll_overflow", + Int(I128) => "__builtin_add_overflow", + + Uint(U8) => "__builtin_add_overflow", + Uint(U16) => "__builtin_add_overflow", + Uint(U32) => "__builtin_uadd_overflow", + Uint(U64) => "__builtin_uaddll_overflow", + Uint(U128) => "__builtin_add_overflow", + + _ => unreachable!(), + }, + OverflowOp::Sub => match new_kind { + Int(I8) => "__builtin_sub_overflow", + Int(I16) => "__builtin_sub_overflow", + Int(I32) => "__builtin_ssub_overflow", + Int(I64) => "__builtin_ssubll_overflow", + Int(I128) => "__builtin_sub_overflow", + + Uint(U8) => "__builtin_sub_overflow", + Uint(U16) => "__builtin_sub_overflow", + Uint(U32) => "__builtin_usub_overflow", + Uint(U64) => "__builtin_usubll_overflow", + Uint(U128) => "__builtin_sub_overflow", + + _ => unreachable!(), + }, + OverflowOp::Mul => match new_kind { + Int(I8) => "__builtin_mul_overflow", + Int(I16) => "__builtin_mul_overflow", + Int(I32) => "__builtin_smul_overflow", + Int(I64) => "__builtin_smulll_overflow", + Int(I128) => "__builtin_mul_overflow", + + Uint(U8) => "__builtin_mul_overflow", + Uint(U16) => "__builtin_mul_overflow", + Uint(U32) => "__builtin_umul_overflow", + Uint(U64) => "__builtin_umulll_overflow", + Uint(U128) => "__builtin_mul_overflow", + + _ => unreachable!(), + }, + } + } else { + match new_kind { + Int(I128) | Uint(U128) => { + let func_name = match oop { + OverflowOp::Add => match new_kind { + Int(I128) => "__rust_i128_addo", + Uint(U128) => "__rust_u128_addo", _ => unreachable!(), }, - OverflowOp::Sub => - match new_kind { - Int(I8) => "__builtin_sub_overflow", - Int(I16) => "__builtin_sub_overflow", - Int(I32) => "__builtin_ssub_overflow", - Int(I64) => "__builtin_ssubll_overflow", - Int(I128) => "__builtin_sub_overflow", - - Uint(U8) => "__builtin_sub_overflow", - Uint(U16) => "__builtin_sub_overflow", - Uint(U32) => "__builtin_usub_overflow", - Uint(U64) => "__builtin_usubll_overflow", - Uint(U128) => "__builtin_sub_overflow", - + OverflowOp::Sub => match new_kind { + Int(I128) => "__rust_i128_subo", + Uint(U128) => "__rust_u128_subo", _ => unreachable!(), }, - OverflowOp::Mul => - match new_kind { - Int(I8) => "__builtin_mul_overflow", - Int(I16) => "__builtin_mul_overflow", - Int(I32) => "__builtin_smul_overflow", - Int(I64) => "__builtin_smulll_overflow", - Int(I128) => "__builtin_mul_overflow", - - Uint(U8) => "__builtin_mul_overflow", - Uint(U16) => "__builtin_mul_overflow", - Uint(U32) => "__builtin_umul_overflow", - Uint(U64) => "__builtin_umulll_overflow", - Uint(U128) => "__builtin_mul_overflow", - + OverflowOp::Mul => match new_kind { + Int(I128) => "__rust_i128_mulo", // TODO(antoyo): use __muloti4d instead? + Uint(U128) => "__rust_u128_mulo", _ => unreachable!(), }, + }; + let a_type = lhs.get_type(); + let b_type = rhs.get_type(); + let param_a = self.context.new_parameter(None, a_type, "a"); + let param_b = self.context.new_parameter(None, b_type, "b"); + let result_field = self.context.new_field(None, a_type, "result"); + let overflow_field = self.context.new_field(None, self.bool_type, "overflow"); + let return_type = self.context.new_struct_type( + None, + "result_overflow", + &[result_field, overflow_field], + ); + let func = self.context.new_function( + None, + FunctionType::Extern, + return_type.as_type(), + &[param_a, param_b], + func_name, + false, + ); + let result = self.context.new_call(None, func, &[lhs, rhs]); + let overflow = result.access_field(None, overflow_field); + let int_result = result.access_field(None, result_field); + return (int_result, overflow); } - } - else { - match new_kind { - Int(I128) | Uint(U128) => { - let func_name = - match oop { - OverflowOp::Add => - match new_kind { - Int(I128) => "__rust_i128_addo", - Uint(U128) => "__rust_u128_addo", - _ => unreachable!(), - }, - OverflowOp::Sub => - match new_kind { - Int(I128) => "__rust_i128_subo", - Uint(U128) => "__rust_u128_subo", - _ => unreachable!(), - }, - OverflowOp::Mul => - match new_kind { - Int(I128) => "__rust_i128_mulo", // TODO(antoyo): use __muloti4d instead? - Uint(U128) => "__rust_u128_mulo", - _ => unreachable!(), - }, - }; - let a_type = lhs.get_type(); - let b_type = rhs.get_type(); - let param_a = self.context.new_parameter(None, a_type, "a"); - let param_b = self.context.new_parameter(None, b_type, "b"); - let result_field = self.context.new_field(None, a_type, "result"); - let overflow_field = self.context.new_field(None, self.bool_type, "overflow"); - let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]); - let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false); - let result = self.context.new_call(None, func, &[lhs, rhs]); - let overflow = result.access_field(None, overflow_field); - let int_result = result.access_field(None, result_field); - return (int_result, overflow); + _ => match oop { + OverflowOp::Mul => match new_kind { + Int(I32) => "__mulosi4", + Int(I64) => "__mulodi4", + _ => unreachable!(), }, - _ => { - match oop { - OverflowOp::Mul => - match new_kind { - Int(I32) => "__mulosi4", - Int(I64) => "__mulodi4", - _ => unreachable!(), - }, - _ => unimplemented!("overflow operation for {:?}", new_kind), - } - } - } - }; + _ => unimplemented!("overflow operation for {:?}", new_kind), + }, + } + }; let intrinsic = self.context.get_builtin_function(&name); - let res = self.current_func() + let res = self + .current_func() // TODO(antoyo): is it correct to use rhs type instead of the parameter typ? .new_local(None, rhs.get_type(), "binopResult") .get_address(None); @@ -353,57 +383,83 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { (res.dereference(None).to_rvalue(), overflow) } - pub fn gcc_icmp(&self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> { + pub fn gcc_icmp( + &self, + op: IntPredicate, + mut lhs: RValue<'gcc>, + mut rhs: RValue<'gcc>, + ) -> RValue<'gcc> { let a_type = lhs.get_type(); let b_type = rhs.get_type(); if self.is_non_native_int_type(a_type) || self.is_non_native_int_type(b_type) { let signed = a_type.is_compatible_with(self.i128_type); - let sign = - if signed { - "" - } - else { - "u" - }; + let sign = if signed { "" } else { "u" }; let func_name = format!("__{}cmpti2", sign); let param_a = self.context.new_parameter(None, a_type, "a"); let param_b = self.context.new_parameter(None, b_type, "b"); - let func = self.context.new_function(None, FunctionType::Extern, self.int_type, &[param_a, param_b], func_name, false); + let func = self.context.new_function( + None, + FunctionType::Extern, + self.int_type, + &[param_a, param_b], + func_name, + false, + ); let cmp = self.context.new_call(None, func, &[lhs, rhs]); - let (op, limit) = - match op { - IntPredicate::IntEQ => { - return self.context.new_comparison(None, ComparisonOp::Equals, cmp, self.context.new_rvalue_one(self.int_type)); - }, - IntPredicate::IntNE => { - return self.context.new_comparison(None, ComparisonOp::NotEquals, cmp, self.context.new_rvalue_one(self.int_type)); - }, - IntPredicate::IntUGT => (ComparisonOp::Equals, 2), - IntPredicate::IntUGE => (ComparisonOp::GreaterThanEquals, 1), - IntPredicate::IntULT => (ComparisonOp::Equals, 0), - IntPredicate::IntULE => (ComparisonOp::LessThanEquals, 1), - IntPredicate::IntSGT => (ComparisonOp::Equals, 2), - IntPredicate::IntSGE => (ComparisonOp::GreaterThanEquals, 1), - IntPredicate::IntSLT => (ComparisonOp::Equals, 0), - IntPredicate::IntSLE => (ComparisonOp::LessThanEquals, 1), - }; - self.context.new_comparison(None, op, cmp, self.context.new_rvalue_from_int(self.int_type, limit)) - } - else { + let (op, limit) = match op { + IntPredicate::IntEQ => { + return self.context.new_comparison( + None, + ComparisonOp::Equals, + cmp, + self.context.new_rvalue_one(self.int_type), + ); + } + IntPredicate::IntNE => { + return self.context.new_comparison( + None, + ComparisonOp::NotEquals, + cmp, + self.context.new_rvalue_one(self.int_type), + ); + } + IntPredicate::IntUGT => (ComparisonOp::Equals, 2), + IntPredicate::IntUGE => (ComparisonOp::GreaterThanEquals, 1), + IntPredicate::IntULT => (ComparisonOp::Equals, 0), + IntPredicate::IntULE => (ComparisonOp::LessThanEquals, 1), + IntPredicate::IntSGT => (ComparisonOp::Equals, 2), + IntPredicate::IntSGE => (ComparisonOp::GreaterThanEquals, 1), + IntPredicate::IntSLT => (ComparisonOp::Equals, 0), + IntPredicate::IntSLE => (ComparisonOp::LessThanEquals, 1), + }; + self.context.new_comparison( + None, + op, + cmp, + self.context.new_rvalue_from_int(self.int_type, limit), + ) + } else { let left_type = lhs.get_type(); let right_type = rhs.get_type(); if left_type != right_type { // NOTE: because libgccjit cannot compare function pointers. - if left_type.dyncast_function_ptr_type().is_some() && right_type.dyncast_function_ptr_type().is_some() { - lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer()); - rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer()); + if left_type.dyncast_function_ptr_type().is_some() + && right_type.dyncast_function_ptr_type().is_some() + { + lhs = self + .context + .new_cast(None, lhs, self.usize_type.make_pointer()); + rhs = self + .context + .new_cast(None, rhs, self.usize_type.make_pointer()); } // NOTE: hack because we try to cast a vector type to the same vector type. else if format!("{:?}", left_type) != format!("{:?}", right_type) { rhs = self.context.new_cast(None, rhs, left_type); } } - self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs) + self.context + .new_comparison(None, op.to_gcc_comparison(), lhs, rhs) } } @@ -412,12 +468,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let b_type = b.get_type(); if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) { a ^ b - } - else { - let values = [ - self.low(a) ^ self.low(b), - self.high(a) ^ self.high(b), - ]; + } else { + let values = [self.low(a) ^ self.low(b), self.high(a) ^ self.high(b)]; self.context.new_array_constructor(None, a_type, &values) } } @@ -433,19 +485,15 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let a = self.context.new_cast(None, a, b_type); let result = a << b; self.context.new_cast(None, result, a_type) - } - else if a_type.is_signed(self) && b_type.is_unsigned(self) { + } else if a_type.is_signed(self) && b_type.is_unsigned(self) { let b = self.context.new_cast(None, b, a_type); a << b - } - else { + } else { a << b } - } - else if a_native && !b_native { + } else if a_native && !b_native { self.gcc_shl(a, self.gcc_int_cast(b, a_type)) - } - else { + } else { // NOTE: we cannot use the ashl builtin because it's calling widen_hi() which uses ashl. let native_int_type = a_type.dyncast_array().expect("get element type"); @@ -462,13 +510,11 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let sixty_four = self.gcc_int(native_int_type, 64); let zero = self.gcc_zero(native_int_type); let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero); - self.llbb().end_with_conditional(None, condition, then_block, else_block); + self.llbb() + .end_with_conditional(None, condition, then_block, else_block); // TODO(antoyo): take endianness into account. - let values = [ - zero, - self.low(a) << (b - sixty_four), - ]; + let values = [zero, self.low(a) << (b - sixty_four)]; let array_value = self.context.new_array_constructor(None, a_type, &values); then_block.add_assignment(None, result, array_value); then_block.end_with_jump(None, after_block); @@ -483,11 +529,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let unsigned_type = native_int_type.to_unsigned(&self.cx); let casted_low = self.context.new_cast(None, self.low(a), unsigned_type); let shift_value = self.context.new_cast(None, sixty_four - b, unsigned_type); - let high_low = self.context.new_cast(None, casted_low >> shift_value, native_int_type); - let values = [ - self.low(a) << b, - (self.high(a) << b) | high_low, - ]; + let high_low = self + .context + .new_cast(None, casted_low >> shift_value, native_int_type); + let values = [self.low(a) << b, (self.high(a) << b) | high_low]; let array_value = self.context.new_array_constructor(None, a_type, &values); actual_else_block.add_assignment(None, result, array_value); @@ -505,21 +550,40 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let arg_type = arg.get_type(); if !self.is_native_int_type(arg_type) { let native_int_type = arg_type.dyncast_array().expect("get element type"); - let lsb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 0)).to_rvalue(); + let lsb = self + .context + .new_array_access( + None, + arg, + self.context.new_rvalue_from_int(self.int_type, 0), + ) + .to_rvalue(); let swapped_lsb = self.gcc_bswap(lsb, width / 2); let swapped_lsb = self.context.new_cast(None, swapped_lsb, native_int_type); - let msb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 1)).to_rvalue(); + let msb = self + .context + .new_array_access( + None, + arg, + self.context.new_rvalue_from_int(self.int_type, 1), + ) + .to_rvalue(); let swapped_msb = self.gcc_bswap(msb, width / 2); let swapped_msb = self.context.new_cast(None, swapped_msb, native_int_type); // NOTE: we also need to swap the two elements here, in addition to swapping inside // the elements themselves like done above. - return self.context.new_array_constructor(None, arg_type, &[swapped_msb, swapped_lsb]); + return self + .context + .new_array_constructor(None, arg_type, &[swapped_msb, swapped_lsb]); } // TODO(antoyo): check if it's faster to use string literals and a // match instead of format!. - let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width)); + let bswap = self + .cx + .context + .get_builtin_function(&format!("__builtin_bswap{}", width)); // FIXME(antoyo): this cast should not be necessary. Remove // when having proper sized integer types. let param_type = bswap.get_param(0).to_rvalue().get_type(); @@ -533,9 +597,9 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn gcc_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> { if self.is_native_int_type_or_bool(typ) { - self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from")) - } - else { + self.context + .new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from")) + } else { // NOTE: set the sign in high. self.from_low_high(typ, int, -(int.is_negative() as i64)) } @@ -543,9 +607,9 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn gcc_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> { if self.is_native_int_type_or_bool(typ) { - self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64) - } - else { + self.context + .new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64) + } else { self.from_low_high(typ, int as i64, 0) } } @@ -562,16 +626,15 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { let sixty_four = self.context.new_rvalue_from_long(typ, 64); let shift = high << sixty_four; shift | self.context.new_cast(None, low, typ) - } - else { + } else { self.from_low_high(typ, low as i64, high as i64) } - } - else if typ.is_i128(self) { - let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64); + } else if typ.is_i128(self) { + let num = self + .context + .new_rvalue_from_long(self.u64_type, num as u64 as i64); self.gcc_int_cast(num, typ) - } - else { + } else { self.gcc_uint(typ, num as u64) } } @@ -579,8 +642,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn gcc_zero(&self, typ: Type<'gcc>) -> RValue<'gcc> { if self.is_native_int_type_or_bool(typ) { self.context.new_rvalue_zero(typ) - } - else { + } else { self.from_low_high(typ, 0, 0) } } @@ -588,33 +650,50 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn gcc_int_width(&self, typ: Type<'gcc>) -> u64 { if self.is_native_int_type_or_bool(typ) { typ.get_size() as u64 * 8 - } - else { + } else { // NOTE: the only unsupported types are u128 and i128. 128 } } - fn bitwise_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> { + fn bitwise_operation( + &self, + operation: BinaryOp, + a: RValue<'gcc>, + mut b: RValue<'gcc>, + ) -> RValue<'gcc> { let a_type = a.get_type(); let b_type = b.get_type(); let a_native = self.is_native_int_type_or_bool(a_type); let b_native = self.is_native_int_type_or_bool(b_type); if a_type.is_vector() && b_type.is_vector() { self.context.new_binary_op(None, operation, a_type, a, b) - } - else if a_native && b_native { + } else if a_native && b_native { if a_type != b_type { b = self.context.new_cast(None, b, a_type); } self.context.new_binary_op(None, operation, a_type, a, b) - } - else { - assert!(!a_native && !b_native, "both types should either be native or non-native for or operation"); + } else { + assert!( + !a_native && !b_native, + "both types should either be native or non-native for or operation" + ); let native_int_type = a_type.dyncast_array().expect("get element type"); let values = [ - self.context.new_binary_op(None, operation, native_int_type, self.low(a), self.low(b)), - self.context.new_binary_op(None, operation, native_int_type, self.high(a), self.high(b)), + self.context.new_binary_op( + None, + operation, + native_int_type, + self.low(a), + self.low(b), + ), + self.context.new_binary_op( + None, + operation, + native_int_type, + self.high(a), + self.high(b), + ), ]; self.context.new_array_constructor(None, a_type, &values) } @@ -627,26 +706,27 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { // TODO(antoyo): can we use https://github.com/rust-lang/compiler-builtins/blob/master/src/int/mod.rs#L379 instead? pub fn gcc_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { let value_type = value.get_type(); - if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type) { + if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type) + { self.context.new_cast(None, value, dest_typ) - } - else if self.is_native_int_type_or_bool(dest_typ) { + } else if self.is_native_int_type_or_bool(dest_typ) { self.context.new_cast(None, self.low(value), dest_typ) - } - else if self.is_native_int_type_or_bool(value_type) { + } else if self.is_native_int_type_or_bool(value_type) { let dest_element_type = dest_typ.dyncast_array().expect("get element type"); // NOTE: set the sign of the value. let zero = self.context.new_rvalue_zero(value_type); - let is_negative = self.context.new_comparison(None, ComparisonOp::LessThan, value, zero); + let is_negative = + self.context + .new_comparison(None, ComparisonOp::LessThan, value, zero); let is_negative = self.gcc_int_cast(is_negative, dest_element_type); let values = [ self.context.new_cast(None, value, dest_element_type), - self.context.new_unary_op(None, UnaryOp::Minus, dest_element_type, is_negative), + self.context + .new_unary_op(None, UnaryOp::Minus, dest_element_type, is_negative), ]; self.context.new_array_constructor(None, dest_typ, &values) - } - else { + } else { // Since u128 and i128 are the only types that can be unsupported, we know the type of // value and the destination type have the same size, so a bitcast is fine. @@ -655,28 +735,33 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } } - fn int_to_float_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { + fn int_to_float_cast( + &self, + signed: bool, + value: RValue<'gcc>, + dest_typ: Type<'gcc>, + ) -> RValue<'gcc> { let value_type = value.get_type(); if self.is_native_int_type_or_bool(value_type) { return self.context.new_cast(None, value, dest_typ); } - let name_suffix = - match self.type_kind(dest_typ) { - TypeKind::Float => "tisf", - TypeKind::Double => "tidf", - kind => panic!("cannot cast a non-native integer to type {:?}", kind), - }; - let sign = - if signed { - "" - } - else { - "un" - }; + let name_suffix = match self.type_kind(dest_typ) { + TypeKind::Float => "tisf", + TypeKind::Double => "tidf", + kind => panic!("cannot cast a non-native integer to type {:?}", kind), + }; + let sign = if signed { "" } else { "un" }; let func_name = format!("__float{}{}", sign, name_suffix); let param = self.context.new_parameter(None, value_type, "n"); - let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false); + let func = self.context.new_function( + None, + FunctionType::Extern, + dest_typ, + &[param], + func_name, + false, + ); self.context.new_call(None, func, &[value]) } @@ -684,32 +769,41 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { self.int_to_float_cast(true, value, dest_typ) } - pub fn gcc_uint_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { + pub fn gcc_uint_to_float_cast( + &self, + value: RValue<'gcc>, + dest_typ: Type<'gcc>, + ) -> RValue<'gcc> { self.int_to_float_cast(false, value, dest_typ) } - fn float_to_int_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { + fn float_to_int_cast( + &self, + signed: bool, + value: RValue<'gcc>, + dest_typ: Type<'gcc>, + ) -> RValue<'gcc> { let value_type = value.get_type(); if self.is_native_int_type_or_bool(dest_typ) { return self.context.new_cast(None, value, dest_typ); } - let name_suffix = - match self.type_kind(value_type) { - TypeKind::Float => "sfti", - TypeKind::Double => "dfti", - kind => panic!("cannot cast a {:?} to non-native integer", kind), - }; - let sign = - if signed { - "" - } - else { - "uns" - }; + let name_suffix = match self.type_kind(value_type) { + TypeKind::Float => "sfti", + TypeKind::Double => "dfti", + kind => panic!("cannot cast a {:?} to non-native integer", kind), + }; + let sign = if signed { "" } else { "uns" }; let func_name = format!("__fix{}{}", sign, name_suffix); let param = self.context.new_parameter(None, value_type, "n"); - let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false); + let func = self.context.new_function( + None, + FunctionType::Extern, + dest_typ, + &[param], + func_name, + false, + ); self.context.new_call(None, func, &[value]) } @@ -717,17 +811,31 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { self.float_to_int_cast(true, value, dest_typ) } - pub fn gcc_float_to_uint_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { + pub fn gcc_float_to_uint_cast( + &self, + value: RValue<'gcc>, + dest_typ: Type<'gcc>, + ) -> RValue<'gcc> { self.float_to_int_cast(false, value, dest_typ) } fn high(&self, value: RValue<'gcc>) -> RValue<'gcc> { - self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 1)) + self.context + .new_array_access( + None, + value, + self.context.new_rvalue_from_int(self.int_type, 1), + ) .to_rvalue() } fn low(&self, value: RValue<'gcc>) -> RValue<'gcc> { - self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 0)) + self.context + .new_array_access( + None, + value, + self.context.new_rvalue_from_int(self.int_type, 0), + ) .to_rvalue() } diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs index 1b089f08f764a..f89c05138db60 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs @@ -2,9 +2,14 @@ use std::borrow::Cow; use gccjit::{Function, FunctionPtrType, RValue, ToRValue}; -use crate::{context::CodegenCx, builder::Builder}; +use crate::{builder::Builder, context::CodegenCx}; -pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc, 'tcx>, gcc_func: FunctionPtrType<'gcc>, mut args: Cow<'b, [RValue<'gcc>]>, func_name: &str) -> Cow<'b, [RValue<'gcc>]> { +pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>( + builder: &Builder<'a, 'gcc, 'tcx>, + gcc_func: FunctionPtrType<'gcc>, + mut args: Cow<'b, [RValue<'gcc>]>, + func_name: &str, +) -> Cow<'b, [RValue<'gcc>]> { // Some LLVM intrinsics do not map 1-to-1 to GCC intrinsics, so we add the missing // arguments here. if gcc_func.get_param_count() != args.len() { @@ -143,17 +148,26 @@ pub fn ignore_arg_cast(func_name: &str, index: usize, args_len: usize) -> bool { // last argument type check. // FIXME(antoyo): find a way to refactor in order to avoid this hack. match func_name { - "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask" - | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask" | "__builtin_ia32_sqrtps512_mask" - | "__builtin_ia32_sqrtpd512_mask" | "__builtin_ia32_addps512_mask" | "__builtin_ia32_addpd512_mask" - | "__builtin_ia32_subps512_mask" | "__builtin_ia32_subpd512_mask" - | "__builtin_ia32_mulps512_mask" | "__builtin_ia32_mulpd512_mask" - | "__builtin_ia32_divps512_mask" | "__builtin_ia32_divpd512_mask" - | "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" => { - if index == args_len - 1 { - return true; - } - }, + "__builtin_ia32_maxps512_mask" + | "__builtin_ia32_maxpd512_mask" + | "__builtin_ia32_minps512_mask" + | "__builtin_ia32_minpd512_mask" + | "__builtin_ia32_sqrtps512_mask" + | "__builtin_ia32_sqrtpd512_mask" + | "__builtin_ia32_addps512_mask" + | "__builtin_ia32_addpd512_mask" + | "__builtin_ia32_subps512_mask" + | "__builtin_ia32_subpd512_mask" + | "__builtin_ia32_mulps512_mask" + | "__builtin_ia32_mulpd512_mask" + | "__builtin_ia32_divps512_mask" + | "__builtin_ia32_divpd512_mask" + | "__builtin_ia32_vfmaddsubps512_mask" + | "__builtin_ia32_vfmaddsubpd512_mask" => { + if index == args_len - 1 { + return true; + } + } "__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => { // Since there are two LLVM intrinsics that map to each of these GCC builtins and only // one of them has a missing parameter before the last one, we check the number of @@ -161,14 +175,14 @@ pub fn ignore_arg_cast(func_name: &str, index: usize, args_len: usize) -> bool { if args_len == 4 && index == args_len - 1 { return true; } - }, + } _ => (), } false } -#[cfg(not(feature="master"))] +#[cfg(not(feature = "master"))] pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> { match name { "llvm.x86.xgetbv" => { @@ -176,12 +190,12 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function let func = cx.context.get_builtin_function(gcc_name); cx.functions.borrow_mut().insert(gcc_name.to_string(), func); return func; - }, + } _ => unimplemented!("unsupported LLVM intrinsic {}", name), } } -#[cfg(feature="master")] +#[cfg(feature = "master")] pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> { let gcc_name = match name { "llvm.x86.xgetbv" => "__builtin_ia32_xgetbv", diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index 352fe7568eff0..cf60bcac15515 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -1,29 +1,34 @@ pub mod llvm; mod simd; -use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp, FunctionType}; -use rustc_codegen_ssa::MemFlags; +use gccjit::{ComparisonOp, Function, FunctionType, RValue, ToRValue, Type, UnaryOp}; use rustc_codegen_ssa::base::wants_msvc_seh; -use rustc_codegen_ssa::common::{IntPredicate, span_invalid_monomorphization_error}; +use rustc_codegen_ssa::common::{span_invalid_monomorphization_error, IntPredicate}; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::PlaceRef; -use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods}; +use rustc_codegen_ssa::traits::{ + ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods, +}; +use rustc_codegen_ssa::MemFlags; use rustc_middle::bug; -use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::ty::layout::LayoutOf; -use rustc_span::{Span, Symbol, symbol::kw, sym}; -use rustc_target::abi::HasDataLayout; +use rustc_middle::ty::{self, Instance, Ty}; +use rustc_span::{sym, symbol::kw, Span, Symbol}; use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode}; +use rustc_target::abi::HasDataLayout; use rustc_target::spec::PanicStrategy; use crate::abi::GccType; use crate::builder::Builder; use crate::common::{SignType, TypeReflection}; use crate::context::CodegenCx; -use crate::type_of::LayoutGccExt; use crate::intrinsic::simd::generic_simd_intrinsic; +use crate::type_of::LayoutGccExt; -fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> Option> { +fn get_simple_intrinsic<'gcc, 'tcx>( + cx: &CodegenCx<'gcc, 'tcx>, + name: Symbol, +) -> Option> { let gcc_name = match name { sym::sqrtf32 => "sqrtf", sym::sqrtf64 => "sqrt", @@ -74,7 +79,14 @@ fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> } impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { - fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) { + fn codegen_intrinsic_call( + &mut self, + instance: Instance<'tcx>, + fn_abi: &FnAbi<'tcx, Ty<'tcx>>, + args: &[OperandRef<'tcx, RValue<'gcc>>], + llresult: RValue<'gcc>, + span: Span, + ) { let tcx = self.tcx; let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all()); @@ -94,238 +106,241 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout); let simple = get_simple_intrinsic(self, name); - let llval = - match name { - _ if simple.is_some() => { - // FIXME(antoyo): remove this cast when the API supports function. - let func = unsafe { std::mem::transmute(simple.expect("simple")) }; - self.call(self.type_void(), func, &args.iter().map(|arg| arg.immediate()).collect::>(), None) - }, - sym::likely => { - self.expect(args[0].immediate(), true) - } - sym::unlikely => { - self.expect(args[0].immediate(), false) - } - kw::Try => { - try_intrinsic( - self, - args[0].immediate(), - args[1].immediate(), - args[2].immediate(), - llresult, - ); - return; - } - sym::breakpoint => { - unimplemented!(); - } - sym::va_copy => { - unimplemented!(); - } - sym::va_arg => { - unimplemented!(); - } + let llval = match name { + _ if simple.is_some() => { + // FIXME(antoyo): remove this cast when the API supports function. + let func = unsafe { std::mem::transmute(simple.expect("simple")) }; + self.call( + self.type_void(), + func, + &args.iter().map(|arg| arg.immediate()).collect::>(), + None, + ) + } + sym::likely => self.expect(args[0].immediate(), true), + sym::unlikely => self.expect(args[0].immediate(), false), + kw::Try => { + try_intrinsic( + self, + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + llresult, + ); + return; + } + sym::breakpoint => { + unimplemented!(); + } + sym::va_copy => { + unimplemented!(); + } + sym::va_arg => { + unimplemented!(); + } - sym::volatile_load | sym::unaligned_volatile_load => { - let tp_ty = substs.type_at(0); - let mut ptr = args[0].immediate(); - if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { - ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self))); - } - let load = self.volatile_load(ptr.get_type(), ptr); - // TODO(antoyo): set alignment. - self.to_immediate(load, self.layout_of(tp_ty)) - } - sym::volatile_store => { - let dst = args[0].deref(self.cx()); - args[1].val.volatile_store(self, dst); - return; + sym::volatile_load | sym::unaligned_volatile_load => { + let tp_ty = substs.type_at(0); + let mut ptr = args[0].immediate(); + if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { + ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self))); } - sym::unaligned_volatile_store => { - let dst = args[0].deref(self.cx()); - args[1].val.unaligned_volatile_store(self, dst); - return; - } - sym::prefetch_read_data - | sym::prefetch_write_data - | sym::prefetch_read_instruction - | sym::prefetch_write_instruction => { - unimplemented!(); - } - sym::ctlz - | sym::ctlz_nonzero - | sym::cttz - | sym::cttz_nonzero - | sym::ctpop - | sym::bswap - | sym::bitreverse - | sym::rotate_left - | sym::rotate_right - | sym::saturating_add - | sym::saturating_sub => { - let ty = arg_tys[0]; - match int_type_width_signed(ty, self) { - Some((width, signed)) => match name { - sym::ctlz | sym::cttz => { - let func = self.current_func.borrow().expect("func"); - let then_block = func.new_block("then"); - let else_block = func.new_block("else"); - let after_block = func.new_block("after"); - - let arg = args[0].immediate(); - let result = func.new_local(None, arg.get_type(), "zeros"); - let zero = self.cx.gcc_zero(arg.get_type()); - let cond = self.gcc_icmp(IntPredicate::IntEQ, arg, zero); - self.llbb().end_with_conditional(None, cond, then_block, else_block); - - let zero_result = self.cx.gcc_uint(arg.get_type(), width); - then_block.add_assignment(None, result, zero_result); - then_block.end_with_jump(None, after_block); - - // NOTE: since jumps were added in a place - // count_leading_zeroes() does not expect, the current block - // in the state need to be updated. - self.switch_to_block(else_block); - - let zeros = - match name { - sym::ctlz => self.count_leading_zeroes(width, arg), - sym::cttz => self.count_trailing_zeroes(width, arg), - _ => unreachable!(), - }; - self.llbb().add_assignment(None, result, zeros); - self.llbb().end_with_jump(None, after_block); - - // NOTE: since jumps were added in a place rustc does not - // expect, the current block in the state need to be updated. - self.switch_to_block(after_block); - - result.to_rvalue() - } - sym::ctlz_nonzero => { - self.count_leading_zeroes(width, args[0].immediate()) - }, - sym::cttz_nonzero => { - self.count_trailing_zeroes(width, args[0].immediate()) - } - sym::ctpop => self.pop_count(args[0].immediate()), - sym::bswap => { - if width == 8 { - args[0].immediate() // byte swap a u8/i8 is just a no-op - } - else { - self.gcc_bswap(args[0].immediate(), width) - } - }, - sym::bitreverse => self.bit_reverse(width, args[0].immediate()), - sym::rotate_left | sym::rotate_right => { - // TODO(antoyo): implement using algorithm from: - // https://blog.regehr.org/archives/1063 - // for other platforms. - let is_left = name == sym::rotate_left; - let val = args[0].immediate(); - let raw_shift = args[1].immediate(); - if is_left { - self.rotate_left(val, raw_shift, width) - } - else { - self.rotate_right(val, raw_shift, width) - } - }, - sym::saturating_add => { - self.saturating_add(args[0].immediate(), args[1].immediate(), signed, width) - }, - sym::saturating_sub => { - self.saturating_sub(args[0].immediate(), args[1].immediate(), signed, width) - }, - _ => bug!(), - }, - None => { - span_invalid_monomorphization_error( - tcx.sess, - span, - &format!( - "invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", - name, ty - ), - ); - return; + let load = self.volatile_load(ptr.get_type(), ptr); + // TODO(antoyo): set alignment. + self.to_immediate(load, self.layout_of(tp_ty)) + } + sym::volatile_store => { + let dst = args[0].deref(self.cx()); + args[1].val.volatile_store(self, dst); + return; + } + sym::unaligned_volatile_store => { + let dst = args[0].deref(self.cx()); + args[1].val.unaligned_volatile_store(self, dst); + return; + } + sym::prefetch_read_data + | sym::prefetch_write_data + | sym::prefetch_read_instruction + | sym::prefetch_write_instruction => { + unimplemented!(); + } + sym::ctlz + | sym::ctlz_nonzero + | sym::cttz + | sym::cttz_nonzero + | sym::ctpop + | sym::bswap + | sym::bitreverse + | sym::rotate_left + | sym::rotate_right + | sym::saturating_add + | sym::saturating_sub => { + let ty = arg_tys[0]; + match int_type_width_signed(ty, self) { + Some((width, signed)) => match name { + sym::ctlz | sym::cttz => { + let func = self.current_func.borrow().expect("func"); + let then_block = func.new_block("then"); + let else_block = func.new_block("else"); + let after_block = func.new_block("after"); + + let arg = args[0].immediate(); + let result = func.new_local(None, arg.get_type(), "zeros"); + let zero = self.cx.gcc_zero(arg.get_type()); + let cond = self.gcc_icmp(IntPredicate::IntEQ, arg, zero); + self.llbb() + .end_with_conditional(None, cond, then_block, else_block); + + let zero_result = self.cx.gcc_uint(arg.get_type(), width); + then_block.add_assignment(None, result, zero_result); + then_block.end_with_jump(None, after_block); + + // NOTE: since jumps were added in a place + // count_leading_zeroes() does not expect, the current block + // in the state need to be updated. + self.switch_to_block(else_block); + + let zeros = match name { + sym::ctlz => self.count_leading_zeroes(width, arg), + sym::cttz => self.count_trailing_zeroes(width, arg), + _ => unreachable!(), + }; + self.llbb().add_assignment(None, result, zeros); + self.llbb().end_with_jump(None, after_block); + + // NOTE: since jumps were added in a place rustc does not + // expect, the current block in the state need to be updated. + self.switch_to_block(after_block); + + result.to_rvalue() + } + sym::ctlz_nonzero => self.count_leading_zeroes(width, args[0].immediate()), + sym::cttz_nonzero => self.count_trailing_zeroes(width, args[0].immediate()), + sym::ctpop => self.pop_count(args[0].immediate()), + sym::bswap => { + if width == 8 { + args[0].immediate() // byte swap a u8/i8 is just a no-op + } else { + self.gcc_bswap(args[0].immediate(), width) } } - } - - sym::raw_eq => { - use rustc_target::abi::Abi::*; - let tp_ty = substs.type_at(0); - let layout = self.layout_of(tp_ty).layout; - let _use_integer_compare = match layout.abi() { - Scalar(_) | ScalarPair(_, _) => true, - Uninhabited | Vector { .. } => false, - Aggregate { .. } => { - // For rusty ABIs, small aggregates are actually passed - // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), - // so we re-use that same threshold here. - layout.size() <= self.data_layout().pointer_size * 2 + sym::bitreverse => self.bit_reverse(width, args[0].immediate()), + sym::rotate_left | sym::rotate_right => { + // TODO(antoyo): implement using algorithm from: + // https://blog.regehr.org/archives/1063 + // for other platforms. + let is_left = name == sym::rotate_left; + let val = args[0].immediate(); + let raw_shift = args[1].immediate(); + if is_left { + self.rotate_left(val, raw_shift, width) + } else { + self.rotate_right(val, raw_shift, width) + } } - }; - - let a = args[0].immediate(); - let b = args[1].immediate(); - if layout.size().bytes() == 0 { - self.const_bool(true) + sym::saturating_add => self.saturating_add( + args[0].immediate(), + args[1].immediate(), + signed, + width, + ), + sym::saturating_sub => self.saturating_sub( + args[0].immediate(), + args[1].immediate(), + signed, + width, + ), + _ => bug!(), + }, + None => { + span_invalid_monomorphization_error( + tcx.sess, + span, + &format!( + "invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", + name, ty + ), + ); + return; } - /*else if use_integer_compare { - let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits. - let ptr_ty = self.type_ptr_to(integer_ty); - let a_ptr = self.bitcast(a, ptr_ty); - let a_val = self.load(integer_ty, a_ptr, layout.align.abi); - let b_ptr = self.bitcast(b, ptr_ty); - let b_val = self.load(integer_ty, b_ptr, layout.align.abi); - self.icmp(IntPredicate::IntEQ, a_val, b_val) - }*/ - else { - let void_ptr_type = self.context.new_type::<*const ()>(); - let a_ptr = self.bitcast(a, void_ptr_type); - let b_ptr = self.bitcast(b, void_ptr_type); - let n = self.context.new_cast(None, self.const_usize(layout.size().bytes()), self.sizet_type); - let builtin = self.context.get_builtin_function("memcmp"); - let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]); - self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)) + } + } + + sym::raw_eq => { + use rustc_target::abi::Abi::*; + let tp_ty = substs.type_at(0); + let layout = self.layout_of(tp_ty).layout; + let _use_integer_compare = match layout.abi() { + Scalar(_) | ScalarPair(_, _) => true, + Uninhabited | Vector { .. } => false, + Aggregate { .. } => { + // For rusty ABIs, small aggregates are actually passed + // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), + // so we re-use that same threshold here. + layout.size() <= self.data_layout().pointer_size * 2 } + }; + + let a = args[0].immediate(); + let b = args[1].immediate(); + if layout.size().bytes() == 0 { + self.const_bool(true) } + /*else if use_integer_compare { + let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits. + let ptr_ty = self.type_ptr_to(integer_ty); + let a_ptr = self.bitcast(a, ptr_ty); + let a_val = self.load(integer_ty, a_ptr, layout.align.abi); + let b_ptr = self.bitcast(b, ptr_ty); + let b_val = self.load(integer_ty, b_ptr, layout.align.abi); + self.icmp(IntPredicate::IntEQ, a_val, b_val) + }*/ + else { + let void_ptr_type = self.context.new_type::<*const ()>(); + let a_ptr = self.bitcast(a, void_ptr_type); + let b_ptr = self.bitcast(b, void_ptr_type); + let n = self.context.new_cast( + None, + self.const_usize(layout.size().bytes()), + self.sizet_type, + ); + let builtin = self.context.get_builtin_function("memcmp"); + let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]); + self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)) + } + } - sym::black_box => { - args[0].val.store(self, result); + sym::black_box => { + args[0].val.store(self, result); - let block = self.llbb(); - let extended_asm = block.add_extended_asm(None, ""); - extended_asm.add_input_operand(None, "r", result.llval); - extended_asm.add_clobber("memory"); - extended_asm.set_volatile_flag(true); + let block = self.llbb(); + let extended_asm = block.add_extended_asm(None, ""); + extended_asm.add_input_operand(None, "r", result.llval); + extended_asm.add_clobber("memory"); + extended_asm.set_volatile_flag(true); - // We have copied the value to `result` already. - return; - } + // We have copied the value to `result` already. + return; + } - _ if name_str.starts_with("simd_") => { - match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { - Ok(llval) => llval, - Err(()) => return, - } + _ if name_str.starts_with("simd_") => { + match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { + Ok(llval) => llval, + Err(()) => return, } + } - _ => bug!("unknown intrinsic '{}'", name), - }; + _ => bug!("unknown intrinsic '{}'", name), + }; if !fn_abi.ret.is_ignore() { if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { let ptr_llty = self.type_ptr_to(ty.gcc_type(self)); let ptr = self.pointercast(result.llval, ptr_llty); self.store(llval, ptr, result.align); - } - else { + } else { OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) .val .store(self, result); @@ -376,11 +391,21 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { } impl<'a, 'gcc, 'tcx> ArgAbiMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { - fn store_fn_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>) { + fn store_fn_arg( + &mut self, + arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, + idx: &mut usize, + dst: PlaceRef<'tcx, Self::Value>, + ) { arg_abi.store_fn_arg(self, idx, dst) } - fn store_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) { + fn store_arg( + &mut self, + arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, + val: RValue<'gcc>, + dst: PlaceRef<'tcx, RValue<'gcc>>, + ) { arg_abi.store(self, val, dst) } @@ -391,8 +416,18 @@ impl<'a, 'gcc, 'tcx> ArgAbiMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { pub trait ArgAbiExt<'gcc, 'tcx> { fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; - fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>); - fn store_fn_arg(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>); + fn store( + &self, + bx: &mut Builder<'_, 'gcc, 'tcx>, + val: RValue<'gcc>, + dst: PlaceRef<'tcx, RValue<'gcc>>, + ); + fn store_fn_arg( + &self, + bx: &mut Builder<'_, 'gcc, 'tcx>, + idx: &mut usize, + dst: PlaceRef<'tcx, RValue<'gcc>>, + ); } impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { @@ -406,17 +441,20 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { /// place for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables /// or results of call/invoke instructions into their destinations. - fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) { + fn store( + &self, + bx: &mut Builder<'_, 'gcc, 'tcx>, + val: RValue<'gcc>, + dst: PlaceRef<'tcx, RValue<'gcc>>, + ) { if self.is_ignore() { return; } if self.is_sized_indirect() { OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst) - } - else if self.is_unsized_indirect() { + } else if self.is_unsized_indirect() { bug!("unsized `ArgAbi` must be handled through `store_fn_arg`"); - } - else if let PassMode::Cast(ref cast, _) = self.mode { + } else if let PassMode::Cast(ref cast, _) = self.mode { // FIXME(eddyb): Figure out when the simpler Store is safe, clang // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; @@ -424,8 +462,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx)); let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty); bx.store(val, cast_dst, self.layout.align.abi); - } - else { + } else { // The actual return type is a struct, but the ABI // adaptation code has cast it into some scalar type. The // code that follows is the only reliable way I have @@ -461,35 +498,49 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { bx.lifetime_end(llscratch, scratch_size); } - } - else { + } else { OperandValue::Immediate(val).store(bx, dst); } } - fn store_fn_arg<'a>(&self, bx: &mut Builder<'a, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>) { + fn store_fn_arg<'a>( + &self, + bx: &mut Builder<'a, 'gcc, 'tcx>, + idx: &mut usize, + dst: PlaceRef<'tcx, RValue<'gcc>>, + ) { let mut next = || { let val = bx.current_func().get_param(*idx as i32); *idx += 1; val.to_rvalue() }; match self.mode { - PassMode::Ignore => {}, + PassMode::Ignore => {} PassMode::Pair(..) => { OperandValue::Pair(next(), next()).store(bx, dst); - }, - PassMode::Indirect { extra_attrs: Some(_), .. } => { + } + PassMode::Indirect { + extra_attrs: Some(_), + .. + } => { OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst); - }, - PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(..) => { + } + PassMode::Direct(_) + | PassMode::Indirect { + extra_attrs: None, .. + } + | PassMode::Cast(..) => { let next_arg = next(); self.store(bx, next_arg, dst); - }, + } } } } -fn int_type_width_signed<'gcc, 'tcx>(ty: Ty<'tcx>, cx: &CodegenCx<'gcc, 'tcx>) -> Option<(u64, bool)> { +fn int_type_width_signed<'gcc, 'tcx>( + ty: Ty<'tcx>, + cx: &CodegenCx<'gcc, 'tcx>, +) -> Option<(u64, bool)> { match ty.kind() { ty::Int(t) => Some(( match t { @@ -522,172 +573,172 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let result_type = value.get_type(); let typ = result_type.to_unsigned(self.cx); - let value = - if result_type.is_signed(self.cx) { - self.gcc_int_cast(value, typ) - } - else { - value - }; + let value = if result_type.is_signed(self.cx) { + self.gcc_int_cast(value, typ) + } else { + value + }; let context = &self.cx.context; - let result = - match width { - 8 => { - // First step. - let left = self.and(value, context.new_rvalue_from_int(typ, 0xF0)); - let left = self.lshr(left, context.new_rvalue_from_int(typ, 4)); - let right = self.and(value, context.new_rvalue_from_int(typ, 0x0F)); - let right = self.shl(right, context.new_rvalue_from_int(typ, 4)); - let step1 = self.or(left, right); - - // Second step. - let left = self.and(step1, context.new_rvalue_from_int(typ, 0xCC)); - let left = self.lshr(left, context.new_rvalue_from_int(typ, 2)); - let right = self.and(step1, context.new_rvalue_from_int(typ, 0x33)); - let right = self.shl(right, context.new_rvalue_from_int(typ, 2)); - let step2 = self.or(left, right); - - // Third step. - let left = self.and(step2, context.new_rvalue_from_int(typ, 0xAA)); - let left = self.lshr(left, context.new_rvalue_from_int(typ, 1)); - let right = self.and(step2, context.new_rvalue_from_int(typ, 0x55)); - let right = self.shl(right, context.new_rvalue_from_int(typ, 1)); - let step3 = self.or(left, right); - - step3 - }, - 16 => { - // First step. - let left = self.and(value, context.new_rvalue_from_int(typ, 0x5555)); - let left = self.shl(left, context.new_rvalue_from_int(typ, 1)); - let right = self.and(value, context.new_rvalue_from_int(typ, 0xAAAA)); - let right = self.lshr(right, context.new_rvalue_from_int(typ, 1)); - let step1 = self.or(left, right); - - // Second step. - let left = self.and(step1, context.new_rvalue_from_int(typ, 0x3333)); - let left = self.shl(left, context.new_rvalue_from_int(typ, 2)); - let right = self.and(step1, context.new_rvalue_from_int(typ, 0xCCCC)); - let right = self.lshr(right, context.new_rvalue_from_int(typ, 2)); - let step2 = self.or(left, right); - - // Third step. - let left = self.and(step2, context.new_rvalue_from_int(typ, 0x0F0F)); - let left = self.shl(left, context.new_rvalue_from_int(typ, 4)); - let right = self.and(step2, context.new_rvalue_from_int(typ, 0xF0F0)); - let right = self.lshr(right, context.new_rvalue_from_int(typ, 4)); - let step3 = self.or(left, right); - - // Fourth step. - let left = self.and(step3, context.new_rvalue_from_int(typ, 0x00FF)); - let left = self.shl(left, context.new_rvalue_from_int(typ, 8)); - let right = self.and(step3, context.new_rvalue_from_int(typ, 0xFF00)); - let right = self.lshr(right, context.new_rvalue_from_int(typ, 8)); - let step4 = self.or(left, right); - - step4 - }, - 32 => { - // TODO(antoyo): Refactor with other implementations. - // First step. - let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 1)); - let right = self.and(value, context.new_rvalue_from_long(typ, 0xAAAAAAAA)); - let right = self.lshr(right, context.new_rvalue_from_long(typ, 1)); - let step1 = self.or(left, right); - - // Second step. - let left = self.and(step1, context.new_rvalue_from_long(typ, 0x33333333)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 2)); - let right = self.and(step1, context.new_rvalue_from_long(typ, 0xCCCCCCCC)); - let right = self.lshr(right, context.new_rvalue_from_long(typ, 2)); - let step2 = self.or(left, right); - - // Third step. - let left = self.and(step2, context.new_rvalue_from_long(typ, 0x0F0F0F0F)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 4)); - let right = self.and(step2, context.new_rvalue_from_long(typ, 0xF0F0F0F0)); - let right = self.lshr(right, context.new_rvalue_from_long(typ, 4)); - let step3 = self.or(left, right); - - // Fourth step. - let left = self.and(step3, context.new_rvalue_from_long(typ, 0x00FF00FF)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 8)); - let right = self.and(step3, context.new_rvalue_from_long(typ, 0xFF00FF00)); - let right = self.lshr(right, context.new_rvalue_from_long(typ, 8)); - let step4 = self.or(left, right); - - // Fifth step. - let left = self.and(step4, context.new_rvalue_from_long(typ, 0x0000FFFF)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 16)); - let right = self.and(step4, context.new_rvalue_from_long(typ, 0xFFFF0000)); - let right = self.lshr(right, context.new_rvalue_from_long(typ, 16)); - let step5 = self.or(left, right); - - step5 - }, - 64 => { - // First step. - let left = self.shl(value, context.new_rvalue_from_long(typ, 32)); - let right = self.lshr(value, context.new_rvalue_from_long(typ, 32)); - let step1 = self.or(left, right); - - // Second step. - let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 15)); - let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO(antoyo): transmute the number instead? - let right = self.lshr(right, context.new_rvalue_from_long(typ, 17)); - let step2 = self.or(left, right); - - // Third step. - let left = self.lshr(step2, context.new_rvalue_from_long(typ, 10)); - let left = self.xor(step2, left); - let temp = self.and(left, context.new_rvalue_from_long(typ, 0x003F801F003F801F)); - - let left = self.shl(temp, context.new_rvalue_from_long(typ, 10)); - let left = self.or(temp, left); - let step3 = self.xor(left, step2); - - // Fourth step. - let left = self.lshr(step3, context.new_rvalue_from_long(typ, 4)); - let left = self.xor(step3, left); - let temp = self.and(left, context.new_rvalue_from_long(typ, 0x0E0384210E038421)); - - let left = self.shl(temp, context.new_rvalue_from_long(typ, 4)); - let left = self.or(temp, left); - let step4 = self.xor(left, step3); - - // Fifth step. - let left = self.lshr(step4, context.new_rvalue_from_long(typ, 2)); - let left = self.xor(step4, left); - let temp = self.and(left, context.new_rvalue_from_long(typ, 0x2248884222488842)); - - let left = self.shl(temp, context.new_rvalue_from_long(typ, 2)); - let left = self.or(temp, left); - let step5 = self.xor(left, step4); - - step5 - }, - 128 => { - // TODO(antoyo): find a more efficient implementation? - let sixty_four = self.gcc_int(typ, 64); - let right_shift = self.gcc_lshr(value, sixty_four); - let high = self.gcc_int_cast(right_shift, self.u64_type); - let low = self.gcc_int_cast(value, self.u64_type); - - let reversed_high = self.bit_reverse(64, high); - let reversed_low = self.bit_reverse(64, low); - - let new_low = self.gcc_int_cast(reversed_high, typ); - let new_high = self.shl(self.gcc_int_cast(reversed_low, typ), sixty_four); - - self.gcc_or(new_low, new_high) - }, - _ => { - panic!("cannot bit reverse with width = {}", width); - }, - }; + let result = match width { + 8 => { + // First step. + let left = self.and(value, context.new_rvalue_from_int(typ, 0xF0)); + let left = self.lshr(left, context.new_rvalue_from_int(typ, 4)); + let right = self.and(value, context.new_rvalue_from_int(typ, 0x0F)); + let right = self.shl(right, context.new_rvalue_from_int(typ, 4)); + let step1 = self.or(left, right); + + // Second step. + let left = self.and(step1, context.new_rvalue_from_int(typ, 0xCC)); + let left = self.lshr(left, context.new_rvalue_from_int(typ, 2)); + let right = self.and(step1, context.new_rvalue_from_int(typ, 0x33)); + let right = self.shl(right, context.new_rvalue_from_int(typ, 2)); + let step2 = self.or(left, right); + + // Third step. + let left = self.and(step2, context.new_rvalue_from_int(typ, 0xAA)); + let left = self.lshr(left, context.new_rvalue_from_int(typ, 1)); + let right = self.and(step2, context.new_rvalue_from_int(typ, 0x55)); + let right = self.shl(right, context.new_rvalue_from_int(typ, 1)); + let step3 = self.or(left, right); + + step3 + } + 16 => { + // First step. + let left = self.and(value, context.new_rvalue_from_int(typ, 0x5555)); + let left = self.shl(left, context.new_rvalue_from_int(typ, 1)); + let right = self.and(value, context.new_rvalue_from_int(typ, 0xAAAA)); + let right = self.lshr(right, context.new_rvalue_from_int(typ, 1)); + let step1 = self.or(left, right); + + // Second step. + let left = self.and(step1, context.new_rvalue_from_int(typ, 0x3333)); + let left = self.shl(left, context.new_rvalue_from_int(typ, 2)); + let right = self.and(step1, context.new_rvalue_from_int(typ, 0xCCCC)); + let right = self.lshr(right, context.new_rvalue_from_int(typ, 2)); + let step2 = self.or(left, right); + + // Third step. + let left = self.and(step2, context.new_rvalue_from_int(typ, 0x0F0F)); + let left = self.shl(left, context.new_rvalue_from_int(typ, 4)); + let right = self.and(step2, context.new_rvalue_from_int(typ, 0xF0F0)); + let right = self.lshr(right, context.new_rvalue_from_int(typ, 4)); + let step3 = self.or(left, right); + + // Fourth step. + let left = self.and(step3, context.new_rvalue_from_int(typ, 0x00FF)); + let left = self.shl(left, context.new_rvalue_from_int(typ, 8)); + let right = self.and(step3, context.new_rvalue_from_int(typ, 0xFF00)); + let right = self.lshr(right, context.new_rvalue_from_int(typ, 8)); + let step4 = self.or(left, right); + + step4 + } + 32 => { + // TODO(antoyo): Refactor with other implementations. + // First step. + let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 1)); + let right = self.and(value, context.new_rvalue_from_long(typ, 0xAAAAAAAA)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 1)); + let step1 = self.or(left, right); + + // Second step. + let left = self.and(step1, context.new_rvalue_from_long(typ, 0x33333333)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 2)); + let right = self.and(step1, context.new_rvalue_from_long(typ, 0xCCCCCCCC)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 2)); + let step2 = self.or(left, right); + + // Third step. + let left = self.and(step2, context.new_rvalue_from_long(typ, 0x0F0F0F0F)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 4)); + let right = self.and(step2, context.new_rvalue_from_long(typ, 0xF0F0F0F0)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 4)); + let step3 = self.or(left, right); + + // Fourth step. + let left = self.and(step3, context.new_rvalue_from_long(typ, 0x00FF00FF)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 8)); + let right = self.and(step3, context.new_rvalue_from_long(typ, 0xFF00FF00)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 8)); + let step4 = self.or(left, right); + + // Fifth step. + let left = self.and(step4, context.new_rvalue_from_long(typ, 0x0000FFFF)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 16)); + let right = self.and(step4, context.new_rvalue_from_long(typ, 0xFFFF0000)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 16)); + let step5 = self.or(left, right); + + step5 + } + 64 => { + // First step. + let left = self.shl(value, context.new_rvalue_from_long(typ, 32)); + let right = self.lshr(value, context.new_rvalue_from_long(typ, 32)); + let step1 = self.or(left, right); + + // Second step. + let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 15)); + let right = self.and( + step1, + context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64), + ); // TODO(antoyo): transmute the number instead? + let right = self.lshr(right, context.new_rvalue_from_long(typ, 17)); + let step2 = self.or(left, right); + + // Third step. + let left = self.lshr(step2, context.new_rvalue_from_long(typ, 10)); + let left = self.xor(step2, left); + let temp = self.and(left, context.new_rvalue_from_long(typ, 0x003F801F003F801F)); + + let left = self.shl(temp, context.new_rvalue_from_long(typ, 10)); + let left = self.or(temp, left); + let step3 = self.xor(left, step2); + + // Fourth step. + let left = self.lshr(step3, context.new_rvalue_from_long(typ, 4)); + let left = self.xor(step3, left); + let temp = self.and(left, context.new_rvalue_from_long(typ, 0x0E0384210E038421)); + + let left = self.shl(temp, context.new_rvalue_from_long(typ, 4)); + let left = self.or(temp, left); + let step4 = self.xor(left, step3); + + // Fifth step. + let left = self.lshr(step4, context.new_rvalue_from_long(typ, 2)); + let left = self.xor(step4, left); + let temp = self.and(left, context.new_rvalue_from_long(typ, 0x2248884222488842)); + + let left = self.shl(temp, context.new_rvalue_from_long(typ, 2)); + let left = self.or(temp, left); + let step5 = self.xor(left, step4); + + step5 + } + 128 => { + // TODO(antoyo): find a more efficient implementation? + let sixty_four = self.gcc_int(typ, 64); + let right_shift = self.gcc_lshr(value, sixty_four); + let high = self.gcc_int_cast(right_shift, self.u64_type); + let low = self.gcc_int_cast(value, self.u64_type); + + let reversed_high = self.bit_reverse(64, high); + let reversed_low = self.bit_reverse(64, low); + + let new_low = self.gcc_int_cast(reversed_high, typ); + let new_high = self.shl(self.gcc_int_cast(reversed_low, typ), sixty_four); + + self.gcc_or(new_low, new_high) + } + _ => { + panic!("cannot bit reverse with width = {}", width); + } + }; self.gcc_int_cast(result, result_type) } @@ -769,14 +820,12 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { fn count_trailing_zeroes(&mut self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> { let result_type = arg.get_type(); - let arg = - if result_type.is_signed(self.cx) { - let new_type = result_type.to_unsigned(self.cx); - self.gcc_int_cast(arg, new_type) - } - else { - arg - }; + let arg = if result_type.is_signed(self.cx) { + let new_type = result_type.to_unsigned(self.cx); + self.gcc_int_cast(arg, new_type) + } else { + arg + }; let arg_type = arg.get_type(); let (count_trailing_zeroes, expected_type) = // TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here @@ -851,13 +900,11 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { return self.context.new_cast(None, res, result_type); }; let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes); - let arg = - if arg_type != expected_type { - self.context.new_cast(None, arg, expected_type) - } - else { - arg - }; + let arg = if arg_type != expected_type { + self.context.new_cast(None, arg, expected_type) + } else { + arg + }; let res = self.context.new_call(None, count_trailing_zeroes, &[arg]); self.context.new_cast(None, res, result_type) } @@ -867,13 +914,11 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let result_type = value.get_type(); let value_type = result_type.to_unsigned(self.cx); - let value = - if result_type.is_signed(self.cx) { - self.gcc_int_cast(value, value_type) - } - else { - value - }; + let value = if result_type.is_signed(self.cx) { + self.gcc_int_cast(value, value_type) + } else { + value + }; if value_type.is_u128(&self.cx) { // TODO(antoyo): implement in the normal algorithm below to have a more efficient @@ -890,21 +935,27 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } // First step. - let mask = self.context.new_rvalue_from_long(value_type, 0x5555555555555555); + let mask = self + .context + .new_rvalue_from_long(value_type, 0x5555555555555555); let left = value & mask; let shifted = value >> self.context.new_rvalue_from_int(value_type, 1); let right = shifted & mask; let value = left + right; // Second step. - let mask = self.context.new_rvalue_from_long(value_type, 0x3333333333333333); + let mask = self + .context + .new_rvalue_from_long(value_type, 0x3333333333333333); let left = value & mask; let shifted = value >> self.context.new_rvalue_from_int(value_type, 2); let right = shifted & mask; let value = left + right; // Third step. - let mask = self.context.new_rvalue_from_long(value_type, 0x0F0F0F0F0F0F0F0F); + let mask = self + .context + .new_rvalue_from_long(value_type, 0x0F0F0F0F0F0F0F0F); let left = value & mask; let shifted = value >> self.context.new_rvalue_from_int(value_type, 4); let right = shifted & mask; @@ -915,7 +966,9 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } // Fourth step. - let mask = self.context.new_rvalue_from_long(value_type, 0x00FF00FF00FF00FF); + let mask = self + .context + .new_rvalue_from_long(value_type, 0x00FF00FF00FF00FF); let left = value & mask; let shifted = value >> self.context.new_rvalue_from_int(value_type, 8); let right = shifted & mask; @@ -926,7 +979,9 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } // Fifth step. - let mask = self.context.new_rvalue_from_long(value_type, 0x0000FFFF0000FFFF); + let mask = self + .context + .new_rvalue_from_long(value_type, 0x0000FFFF0000FFFF); let left = value & mask; let shifted = value >> self.context.new_rvalue_from_int(value_type, 16); let right = shifted & mask; @@ -937,7 +992,9 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } // Sixth step. - let mask = self.context.new_rvalue_from_long(value_type, 0x00000000FFFFFFFF); + let mask = self + .context + .new_rvalue_from_long(value_type, 0x00000000FFFFFFFF); let left = value & mask; let shifted = value >> self.context.new_rvalue_from_int(value_type, 32); let right = shifted & mask; @@ -947,74 +1004,89 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } // Algorithm from: https://blog.regehr.org/archives/1063 - fn rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> { + fn rotate_left( + &mut self, + value: RValue<'gcc>, + shift: RValue<'gcc>, + width: u64, + ) -> RValue<'gcc> { let max = self.const_uint(shift.get_type(), width); let shift = self.urem(shift, max); let lhs = self.shl(value, shift); let result_neg = self.neg(shift); - let result_and = - self.and( - result_neg, - self.const_uint(shift.get_type(), width - 1), - ); + let result_and = self.and(result_neg, self.const_uint(shift.get_type(), width - 1)); let rhs = self.lshr(value, result_and); self.or(lhs, rhs) } // Algorithm from: https://blog.regehr.org/archives/1063 - fn rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> { + fn rotate_right( + &mut self, + value: RValue<'gcc>, + shift: RValue<'gcc>, + width: u64, + ) -> RValue<'gcc> { let max = self.const_uint(shift.get_type(), width); let shift = self.urem(shift, max); let lhs = self.lshr(value, shift); let result_neg = self.neg(shift); - let result_and = - self.and( - result_neg, - self.const_uint(shift.get_type(), width - 1), - ); + let result_and = self.and(result_neg, self.const_uint(shift.get_type(), width - 1)); let rhs = self.shl(value, result_and); self.or(lhs, rhs) } - fn saturating_add(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> { + fn saturating_add( + &mut self, + lhs: RValue<'gcc>, + rhs: RValue<'gcc>, + signed: bool, + width: u64, + ) -> RValue<'gcc> { let result_type = lhs.get_type(); if signed { // Based on algorithm from: https://stackoverflow.com/a/56531252/389119 let func = self.current_func.borrow().expect("func"); let res = func.new_local(None, result_type, "saturating_sum"); let supports_native_type = self.is_native_int_type(result_type); - let overflow = - if supports_native_type { - let func_name = - match width { - 8 => "__builtin_add_overflow", - 16 => "__builtin_add_overflow", - 32 => "__builtin_sadd_overflow", - 64 => "__builtin_saddll_overflow", - 128 => "__builtin_add_overflow", - _ => unreachable!(), - }; - let overflow_func = self.context.get_builtin_function(func_name); - self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None) - } - else { - let func_name = - match width { - 128 => "__rust_i128_addo", - _ => unreachable!(), - }; - let param_a = self.context.new_parameter(None, result_type, "a"); - let param_b = self.context.new_parameter(None, result_type, "b"); - let result_field = self.context.new_field(None, result_type, "result"); - let overflow_field = self.context.new_field(None, self.bool_type, "overflow"); - let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]); - let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false); - let result = self.context.new_call(None, func, &[lhs, rhs]); - let overflow = result.access_field(None, overflow_field); - let int_result = result.access_field(None, result_field); - self.llbb().add_assignment(None, res, int_result); - overflow + let overflow = if supports_native_type { + let func_name = match width { + 8 => "__builtin_add_overflow", + 16 => "__builtin_add_overflow", + 32 => "__builtin_sadd_overflow", + 64 => "__builtin_saddll_overflow", + 128 => "__builtin_add_overflow", + _ => unreachable!(), + }; + let overflow_func = self.context.get_builtin_function(func_name); + self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None) + } else { + let func_name = match width { + 128 => "__rust_i128_addo", + _ => unreachable!(), }; + let param_a = self.context.new_parameter(None, result_type, "a"); + let param_b = self.context.new_parameter(None, result_type, "b"); + let result_field = self.context.new_field(None, result_type, "result"); + let overflow_field = self.context.new_field(None, self.bool_type, "overflow"); + let return_type = self.context.new_struct_type( + None, + "result_overflow", + &[result_field, overflow_field], + ); + let func = self.context.new_function( + None, + FunctionType::Extern, + return_type.as_type(), + &[param_a, param_b], + func_name, + false, + ); + let result = self.context.new_call(None, func, &[lhs, rhs]); + let overflow = result.access_field(None, overflow_field); + let int_result = result.access_field(None, result_field); + self.llbb().add_assignment(None, res, int_result); + overflow + }; let then_block = func.new_block("then"); let after_block = func.new_block("after"); @@ -1022,21 +1094,28 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { // Return `result_type`'s maximum or minimum value on overflow // NOTE: convert the type to unsigned to have an unsigned shift. let unsigned_type = result_type.to_unsigned(&self.cx); - let shifted = self.gcc_lshr(self.gcc_int_cast(lhs, unsigned_type), self.gcc_int(unsigned_type, width as i64 - 1)); + let shifted = self.gcc_lshr( + self.gcc_int_cast(lhs, unsigned_type), + self.gcc_int(unsigned_type, width as i64 - 1), + ); let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0)); let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1)); - then_block.add_assignment(None, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type)); + then_block.add_assignment( + None, + res, + self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type), + ); then_block.end_with_jump(None, after_block); - self.llbb().end_with_conditional(None, overflow, then_block, after_block); + self.llbb() + .end_with_conditional(None, overflow, then_block, after_block); // NOTE: since jumps were added in a place rustc does not // expect, the current block in the state need to be updated. self.switch_to_block(after_block); res.to_rvalue() - } - else { + } else { // Algorithm from: http://locklessinc.com/articles/sat_arithmetic/ let res = self.gcc_add(lhs, rhs); let cond = self.gcc_icmp(IntPredicate::IntULT, res, lhs); @@ -1046,45 +1125,58 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } // Algorithm from: https://locklessinc.com/articles/sat_arithmetic/ - fn saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> { + fn saturating_sub( + &mut self, + lhs: RValue<'gcc>, + rhs: RValue<'gcc>, + signed: bool, + width: u64, + ) -> RValue<'gcc> { let result_type = lhs.get_type(); if signed { // Based on algorithm from: https://stackoverflow.com/a/56531252/389119 let func = self.current_func.borrow().expect("func"); let res = func.new_local(None, result_type, "saturating_diff"); let supports_native_type = self.is_native_int_type(result_type); - let overflow = - if supports_native_type { - let func_name = - match width { - 8 => "__builtin_sub_overflow", - 16 => "__builtin_sub_overflow", - 32 => "__builtin_ssub_overflow", - 64 => "__builtin_ssubll_overflow", - 128 => "__builtin_sub_overflow", - _ => unreachable!(), - }; - let overflow_func = self.context.get_builtin_function(func_name); - self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None) - } - else { - let func_name = - match width { - 128 => "__rust_i128_subo", - _ => unreachable!(), - }; - let param_a = self.context.new_parameter(None, result_type, "a"); - let param_b = self.context.new_parameter(None, result_type, "b"); - let result_field = self.context.new_field(None, result_type, "result"); - let overflow_field = self.context.new_field(None, self.bool_type, "overflow"); - let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]); - let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false); - let result = self.context.new_call(None, func, &[lhs, rhs]); - let overflow = result.access_field(None, overflow_field); - let int_result = result.access_field(None, result_field); - self.llbb().add_assignment(None, res, int_result); - overflow + let overflow = if supports_native_type { + let func_name = match width { + 8 => "__builtin_sub_overflow", + 16 => "__builtin_sub_overflow", + 32 => "__builtin_ssub_overflow", + 64 => "__builtin_ssubll_overflow", + 128 => "__builtin_sub_overflow", + _ => unreachable!(), }; + let overflow_func = self.context.get_builtin_function(func_name); + self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None) + } else { + let func_name = match width { + 128 => "__rust_i128_subo", + _ => unreachable!(), + }; + let param_a = self.context.new_parameter(None, result_type, "a"); + let param_b = self.context.new_parameter(None, result_type, "b"); + let result_field = self.context.new_field(None, result_type, "result"); + let overflow_field = self.context.new_field(None, self.bool_type, "overflow"); + let return_type = self.context.new_struct_type( + None, + "result_overflow", + &[result_field, overflow_field], + ); + let func = self.context.new_function( + None, + FunctionType::Extern, + return_type.as_type(), + &[param_a, param_b], + func_name, + false, + ); + let result = self.context.new_call(None, func, &[lhs, rhs]); + let overflow = result.access_field(None, overflow_field); + let int_result = result.access_field(None, result_field); + self.llbb().add_assignment(None, res, int_result); + overflow + }; let then_block = func.new_block("then"); let after_block = func.new_block("after"); @@ -1092,21 +1184,28 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { // Return `result_type`'s maximum or minimum value on overflow // NOTE: convert the type to unsigned to have an unsigned shift. let unsigned_type = result_type.to_unsigned(&self.cx); - let shifted = self.gcc_lshr(self.gcc_int_cast(lhs, unsigned_type), self.gcc_int(unsigned_type, width as i64 - 1)); + let shifted = self.gcc_lshr( + self.gcc_int_cast(lhs, unsigned_type), + self.gcc_int(unsigned_type, width as i64 - 1), + ); let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0)); let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1)); - then_block.add_assignment(None, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type)); + then_block.add_assignment( + None, + res, + self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type), + ); then_block.end_with_jump(None, after_block); - self.llbb().end_with_conditional(None, overflow, then_block, after_block); + self.llbb() + .end_with_conditional(None, overflow, then_block, after_block); // NOTE: since jumps were added in a place rustc does not // expect, the current block in the state need to be updated. self.switch_to_block(after_block); res.to_rvalue() - } - else { + } else { let res = self.gcc_sub(lhs, rhs); let comparison = self.gcc_icmp(IntPredicate::IntULE, res, lhs); let value = self.gcc_neg(self.gcc_int_cast(comparison, result_type)); @@ -1115,7 +1214,13 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } } -fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) { +fn try_intrinsic<'gcc, 'tcx>( + bx: &mut Builder<'_, 'gcc, 'tcx>, + try_func: RValue<'gcc>, + data: RValue<'gcc>, + _catch_func: RValue<'gcc>, + dest: RValue<'gcc>, +) { // NOTE: the `|| true` here is to use the panic=abort strategy with panic=unwind too if bx.sess().panic_strategy() == PanicStrategy::Abort || true { // TODO(bjorn3): Properly implement unwinding and remove the `|| true` once this is done. @@ -1124,11 +1229,9 @@ fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue< // we can never unwind. let ret_align = bx.tcx.data_layout.i32_align.abi; bx.store(bx.const_i32(0), dest, ret_align); - } - else if wants_msvc_seh(bx.sess()) { + } else if wants_msvc_seh(bx.sess()) { unimplemented!(); - } - else { + } else { unimplemented!(); } } diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs index 2401f3350186e..431662825822f 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs @@ -1,8 +1,8 @@ use std::cmp::Ordering; -use gccjit::{BinaryOp, RValue, Type, ToRValue}; +use gccjit::{BinaryOp, RValue, ToRValue, Type}; use rustc_codegen_ssa::base::compare_simd_types; -use rustc_codegen_ssa::common::{TypeKind, span_invalid_monomorphization_error}; +use rustc_codegen_ssa::common::{span_invalid_monomorphization_error, TypeKind}; use rustc_codegen_ssa::mir::operand::OperandRef; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods}; @@ -10,13 +10,21 @@ use rustc_hir as hir; use rustc_middle::span_bug; use rustc_middle::ty::layout::HasTyCtxt; use rustc_middle::ty::{self, Ty}; -use rustc_span::{Span, Symbol, sym}; +use rustc_span::{sym, Span, Symbol}; use rustc_target::abi::Align; use crate::builder::Builder; use crate::intrinsic; -pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, span: Span) -> Result, ()> { +pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( + bx: &mut Builder<'a, 'gcc, 'tcx>, + name: Symbol, + callee_ty: Ty<'tcx>, + args: &[OperandRef<'tcx, RValue<'gcc>>], + ret_ty: Ty<'tcx>, + llret_ty: Type<'gcc>, + span: Span, +) -> Result, ()> { // macros for error handling: #[allow(unused_macro_rules)] macro_rules! emit_error { @@ -50,7 +58,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, macro_rules! require_simd { ($ty: expr, $position: expr) => { - require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty) + require!( + $ty.is_simd(), + "expected SIMD {} type, found non-SIMD `{}`", + $position, + $ty + ) }; } @@ -91,7 +104,10 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let arg1 = args[1].immediate(); let arg1_type = arg1.get_type(); - let arg1_vector_type = arg1_type.unqualified().dyncast_vector().expect("vector type"); + let arg1_vector_type = arg1_type + .unqualified() + .dyncast_vector() + .expect("vector type"); let arg1_element_type = arg1_vector_type.get_element_type(); let mut elements = vec![]; @@ -101,7 +117,9 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, elements.push(element); mask = mask >> one; } - let vector_mask = bx.context.new_rvalue_from_vector(None, arg1_type, &elements); + let vector_mask = bx + .context + .new_rvalue_from_vector(None, arg1_type, &elements); return Ok(bx.vector_select(vector_mask, arg1, args[2].immediate())); } @@ -152,27 +170,28 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, } if let Some(stripped) = name.as_str().strip_prefix("simd_shuffle") { - let n: u64 = - if stripped.is_empty() { - // Make sure this is actually an array, since typeck only checks the length-suffixed - // version of this intrinsic. - match args[2].layout.ty.kind() { - ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => { - len.try_eval_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(|| { - span_bug!(span, "could not evaluate shuffle index array length") - }) - } - _ => return_error!( - "simd_shuffle index must be an array of `u32`, got `{}`", - args[2].layout.ty - ), - } + let n: u64 = if stripped.is_empty() { + // Make sure this is actually an array, since typeck only checks the length-suffixed + // version of this intrinsic. + match args[2].layout.ty.kind() { + ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => len + .try_eval_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()) + .unwrap_or_else(|| { + span_bug!(span, "could not evaluate shuffle index array length") + }), + _ => return_error!( + "simd_shuffle index must be an array of `u32`, got `{}`", + args[2].layout.ty + ), } - else { - stripped.parse().unwrap_or_else(|_| { - span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?") - }) - }; + } else { + stripped.parse().unwrap_or_else(|_| { + span_bug!( + span, + "bad `simd_shuffle` instruction only caught in codegen?" + ) + }) + }; require_simd!(ret_ty, "return"); @@ -196,14 +215,10 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let vector = args[2].immediate(); - return Ok(bx.shuffle_vector( - args[0].immediate(), - args[1].immediate(), - vector, - )); + return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), vector)); } - #[cfg(feature="master")] + #[cfg(feature = "master")] if name == sym::simd_insert { require!( in_elem == arg_tys[2], @@ -216,50 +231,54 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let index = args[1].immediate(); let value = args[2].immediate(); // TODO(antoyo): use a recursive unqualified() here. - let vector_type = vector.get_type().unqualified().dyncast_vector().expect("vector type"); + let vector_type = vector + .get_type() + .unqualified() + .dyncast_vector() + .expect("vector type"); let element_type = vector_type.get_element_type(); // NOTE: we cannot cast to an array and assign to its element here because the value might // not be an l-value. So, call a builtin to set the element. // TODO(antoyo): perhaps we could create a new vector or maybe there's a GIMPLE instruction for that? // TODO(antoyo): don't use target specific builtins here. - let func_name = - match in_len { - 2 => { - if element_type == bx.i64_type { - "__builtin_ia32_vec_set_v2di" - } - else { - unimplemented!(); - } - }, - 4 => { - if element_type == bx.i32_type { - "__builtin_ia32_vec_set_v4si" - } - else { - unimplemented!(); - } - }, - 8 => { - if element_type == bx.i16_type { - "__builtin_ia32_vec_set_v8hi" - } - else { - unimplemented!(); - } - }, - _ => unimplemented!("Len: {}", in_len), - }; + let func_name = match in_len { + 2 => { + if element_type == bx.i64_type { + "__builtin_ia32_vec_set_v2di" + } else { + unimplemented!(); + } + } + 4 => { + if element_type == bx.i32_type { + "__builtin_ia32_vec_set_v4si" + } else { + unimplemented!(); + } + } + 8 => { + if element_type == bx.i16_type { + "__builtin_ia32_vec_set_v8hi" + } else { + unimplemented!(); + } + } + _ => unimplemented!("Len: {}", in_len), + }; let builtin = bx.context.get_target_builtin_function(func_name); let param1_type = builtin.get_param(0).to_rvalue().get_type(); // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. let vector = bx.cx.bitcast_if_needed(vector, param1_type); - let result = bx.context.new_call(None, builtin, &[vector, value, bx.context.new_cast(None, index, bx.int_type)]); + let result = bx.context.new_call( + None, + builtin, + &[vector, value, bx.context.new_cast(None, index, bx.int_type)], + ); // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. return Ok(bx.context.new_bitcast(None, result, vector.get_type())); } - #[cfg(feature="master")] + #[cfg(feature = "master")] if name == sym::simd_extract { require!( ret_ty == in_elem, @@ -269,7 +288,10 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, ret_ty ); let vector = args[0].immediate(); - return Ok(bx.context.new_vector_access(None, vector, args[1].immediate()).to_rvalue()); + return Ok(bx + .context + .new_vector_access(None, vector, args[1].immediate()) + .to_rvalue()); } if name == sym::simd_select { @@ -287,7 +309,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, ty::Int(_) => {} _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty), } - return Ok(bx.vector_select(args[0].immediate(), args[1].immediate(), args[2].immediate())); + return Ok(bx.vector_select( + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + )); } if name == sym::simd_cast { @@ -318,11 +344,15 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, // disallowed before here, so this unwrap is safe. ty::Int(i) => ( Style::Int(true), - i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), + i.normalize(bx.tcx().sess.target.pointer_width) + .bit_width() + .unwrap(), ), ty::Uint(u) => ( Style::Int(false), - u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), + u.normalize(bx.tcx().sess.target.pointer_width) + .bit_width() + .unwrap(), ), ty::Float(f) => (Style::Float, f.bit_width()), _ => (Style::Unsupported, 0), @@ -330,11 +360,15 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let (out_style, out_width) = match out_elem.kind() { ty::Int(i) => ( Style::Int(true), - i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), + i.normalize(bx.tcx().sess.target.pointer_width) + .bit_width() + .unwrap(), ), ty::Uint(u) => ( Style::Int(false), - u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), + u.normalize(bx.tcx().sess.target.pointer_width) + .bit_width() + .unwrap(), ), ty::Float(f) => (Style::Float, f.bit_width()), _ => (Style::Unsupported, 0), @@ -349,19 +383,27 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let cast_vec_element = |index| { let index = bx.context.new_rvalue_from_int(bx.int_type, index); - bx.context.new_cast(None, bx.context.new_array_access(None, array, index).to_rvalue(), out_type) + bx.context.new_cast( + None, + bx.context.new_array_access(None, array, index).to_rvalue(), + out_type, + ) }; - bx.context.new_rvalue_from_vector(None, vector_type, &[ - cast_vec_element(0), - cast_vec_element(1), - cast_vec_element(2), - cast_vec_element(3), - cast_vec_element(4), - cast_vec_element(5), - cast_vec_element(6), - cast_vec_element(7), - ]) + bx.context.new_rvalue_from_vector( + None, + vector_type, + &[ + cast_vec_element(0), + cast_vec_element(1), + cast_vec_element(2), + cast_vec_element(3), + cast_vec_element(4), + cast_vec_element(5), + cast_vec_element(6), + cast_vec_element(7), + ], + ) }; match (in_style, out_style) { @@ -465,51 +507,53 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, } } - let (elem_ty_str, elem_ty) = - if let ty::Float(f) = in_elem.kind() { - let elem_ty = bx.cx.type_float_from_ty(*f); - match f.bit_width() { - 32 => ("f32", elem_ty), - 64 => ("f64", elem_ty), - _ => { - return_error!( - "unsupported element type `{}` of floating-point vector `{}`", - f.name_str(), - in_ty - ); - } + let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() { + let elem_ty = bx.cx.type_float_from_ty(*f); + match f.bit_width() { + 32 => ("f32", elem_ty), + 64 => ("f64", elem_ty), + _ => { + return_error!( + "unsupported element type `{}` of floating-point vector `{}`", + f.name_str(), + in_ty + ); } } - else { - return_error!("`{}` is not a floating-point type", in_ty); - }; + } else { + return_error!("`{}` is not a floating-point type", in_ty); + }; let vec_ty = bx.cx.type_vector(elem_ty, in_len); - let (intr_name, fn_ty) = - match name { - sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)), // TODO(antoyo): pand with 170141183420855150465331762880109871103 - sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)), - sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)), - sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)), - sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)), - _ => return_error!("unrecognized intrinsic `{}`", name), - }; + let (intr_name, fn_ty) = match name { + sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)), // TODO(antoyo): pand with 170141183420855150465331762880109871103 + sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)), + sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)), + sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)), + sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)), + _ => return_error!("unrecognized intrinsic `{}`", name), + }; let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str); let function = intrinsic::llvm::intrinsic(llvm_name, &bx.cx); let function: RValue<'gcc> = unsafe { std::mem::transmute(function) }; - let c = bx.call(fn_ty, function, &args.iter().map(|arg| arg.immediate()).collect::>(), None); + let c = bx.call( + fn_ty, + function, + &args.iter().map(|arg| arg.immediate()).collect::>(), + None, + ); Ok(c) } @@ -569,15 +613,23 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, simd_neg: Int => neg, Float => fneg; } - #[cfg(feature="master")] + #[cfg(feature = "master")] if name == sym::simd_saturating_add || name == sym::simd_saturating_sub { let lhs = args[0].immediate(); let rhs = args[1].immediate(); let is_add = name == sym::simd_saturating_add; let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _; let (signed, elem_width, elem_ty) = match *in_elem.kind() { - ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)), - ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)), + ty::Int(i) => ( + true, + i.bit_width().unwrap_or(ptr_bits), + bx.cx.type_int_from_ty(i), + ), + ty::Uint(i) => ( + false, + i.bit_width().unwrap_or(ptr_bits), + bx.cx.type_uint_from_ty(i), + ), _ => { return_error!( "expected element type `{}` of vector type `{}` \ @@ -587,18 +639,23 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, ); } }; - let builtin_name = - match (signed, is_add, in_len, elem_width) { - (true, true, 32, 8) => "__builtin_ia32_paddsb256", // TODO(antoyo): cast arguments to unsigned. - (false, true, 32, 8) => "__builtin_ia32_paddusb256", - (true, true, 16, 16) => "__builtin_ia32_paddsw256", - (false, true, 16, 16) => "__builtin_ia32_paddusw256", - (true, false, 16, 16) => "__builtin_ia32_psubsw256", - (false, false, 16, 16) => "__builtin_ia32_psubusw256", - (true, false, 32, 8) => "__builtin_ia32_psubsb256", - (false, false, 32, 8) => "__builtin_ia32_psubusb256", - _ => unimplemented!("signed: {}, is_add: {}, in_len: {}, elem_width: {}", signed, is_add, in_len, elem_width), - }; + let builtin_name = match (signed, is_add, in_len, elem_width) { + (true, true, 32, 8) => "__builtin_ia32_paddsb256", // TODO(antoyo): cast arguments to unsigned. + (false, true, 32, 8) => "__builtin_ia32_paddusb256", + (true, true, 16, 16) => "__builtin_ia32_paddsw256", + (false, true, 16, 16) => "__builtin_ia32_paddusw256", + (true, false, 16, 16) => "__builtin_ia32_psubsw256", + (false, false, 16, 16) => "__builtin_ia32_psubusw256", + (true, false, 32, 8) => "__builtin_ia32_psubsb256", + (false, false, 32, 8) => "__builtin_ia32_psubusb256", + _ => unimplemented!( + "signed: {}, is_add: {}, in_len: {}, elem_width: {}", + signed, + is_add, + in_len, + elem_width + ), + }; let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64); let func = bx.context.get_target_builtin_function(builtin_name); @@ -629,8 +686,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, // if overflow occurs, the result is the // mathematical result modulo 2^n: Ok(bx.$op(args[1].immediate(), r)) - } - else { + } else { Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op)) } } @@ -639,8 +695,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, // ordered arithmetic reductions take an accumulator let acc = args[1].immediate(); Ok(bx.$float_reduce(acc, args[0].immediate())) - } - else { + } else { Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op)) } } @@ -682,7 +737,9 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, ret_ty ); return match in_elem.kind() { - ty::Int(_) | ty::Uint(_) | ty::Float(_) => Ok(bx.$reduction(args[0].immediate())), + ty::Int(_) | ty::Uint(_) | ty::Float(_) => { + Ok(bx.$reduction(args[0].immediate())) + } _ => return_error!( "unsupported {} from `{}` with element `{}` to `{}`", sym::$name, @@ -730,7 +787,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, return match in_elem.kind() { ty::Int(_) | ty::Uint(_) => { let r = bx.vector_reduce_op(input, $op); - Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) }) + Ok(if !$boolean { + r + } else { + bx.zext(r, bx.type_bool()) + }) } _ => return_error!( "unsupported {} from `{}` with element `{}` to `{}`", diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs index 8a206c0368fcb..b3143bab85159 100644 --- a/compiler/rustc_codegen_gcc/src/lib.rs +++ b/compiler/rustc_codegen_gcc/src/lib.rs @@ -15,7 +15,7 @@ hash_raw_entry )] #![allow(broken_intra_doc_links)] -#![recursion_limit="256"] +#![recursion_limit = "256"] #![warn(rust_2018_idioms)] #![warn(unused_lifetimes)] @@ -58,24 +58,29 @@ mod type_of; use std::any::Any; use std::sync::{Arc, Mutex}; -use gccjit::{Context, OptimizationLevel, CType}; +use gccjit::{CType, Context, OptimizationLevel}; use rustc_ast::expand::allocator::AllocatorKind; -use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen}; -use rustc_codegen_ssa::base::codegen_crate; -use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryFn}; use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule}; +use rustc_codegen_ssa::back::write::{ + CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryFn, +}; +use rustc_codegen_ssa::base::codegen_crate; use rustc_codegen_ssa::target_features::supported_target_features; -use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods}; +use rustc_codegen_ssa::traits::{ + CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, + WriteBackendMethods, +}; +use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen}; use rustc_data_structures::fx::FxHashMap; use rustc_errors::{ErrorGuaranteed, Handler}; use rustc_metadata::EncodedMetadata; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; -use rustc_middle::ty::TyCtxt; use rustc_middle::ty::query::Providers; +use rustc_middle::ty::TyCtxt; use rustc_session::config::{Lto, OptLevel, OutputFilenames}; use rustc_session::Session; -use rustc_span::Symbol; use rustc_span::fatal_error::FatalError; +use rustc_span::Symbol; use tempfile::TempDir; pub struct PrintOnPanic String>(pub F); @@ -105,8 +110,12 @@ impl CodegenBackend for GccCodegenBackend { check_context.set_print_errors_to_stderr(false); let _int128_ty = check_context.new_c_type(CType::UInt128t); // NOTE: we cannot just call compile() as this would require other files than libgccjit.so. - check_context.compile_to_file(gccjit::OutputKind::Assembler, temp_file.to_str().expect("path to str")); - *self.supports_128bit_integers.lock().expect("lock") = check_context.get_last_error() == Ok(None); + check_context.compile_to_file( + gccjit::OutputKind::Assembler, + temp_file.to_str().expect("path to str"), + ); + *self.supports_128bit_integers.lock().expect("lock") = + check_context.get_last_error() == Ok(None); } fn provide(&self, providers: &mut Providers) { @@ -114,14 +123,30 @@ impl CodegenBackend for GccCodegenBackend { providers.global_backend_features = |_tcx, ()| vec![]; } - fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: EncodedMetadata, need_metadata_module: bool) -> Box { + fn codegen_crate<'tcx>( + &self, + tcx: TyCtxt<'tcx>, + metadata: EncodedMetadata, + need_metadata_module: bool, + ) -> Box { let target_cpu = target_cpu(tcx.sess); - let res = codegen_crate(self.clone(), tcx, target_cpu.to_string(), metadata, need_metadata_module); + let res = codegen_crate( + self.clone(), + tcx, + target_cpu.to_string(), + metadata, + need_metadata_module, + ); Box::new(res) } - fn join_codegen(&self, ongoing_codegen: Box, sess: &Session, _outputs: &OutputFilenames) -> Result<(CodegenResults, FxHashMap), ErrorGuaranteed> { + fn join_codegen( + &self, + ongoing_codegen: Box, + sess: &Session, + _outputs: &OutputFilenames, + ) -> Result<(CodegenResults, FxHashMap), ErrorGuaranteed> { let (codegen_results, work_products) = ongoing_codegen .downcast::>() .expect("Expected GccCodegenBackend's OngoingCodegen, found Box") @@ -130,7 +155,12 @@ impl CodegenBackend for GccCodegenBackend { Ok((codegen_results, work_products)) } - fn link(&self, sess: &Session, codegen_results: CodegenResults, outputs: &OutputFilenames) -> Result<(), ErrorGuaranteed> { + fn link( + &self, + sess: &Session, + codegen_results: CodegenResults, + outputs: &OutputFilenames, + ) -> Result<(), ErrorGuaranteed> { use rustc_codegen_ssa::back::link::link_binary; link_binary( @@ -147,23 +177,42 @@ impl CodegenBackend for GccCodegenBackend { } impl ExtraBackendMethods for GccCodegenBackend { - fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind, has_alloc_error_handler: bool) -> Self::Module { + fn codegen_allocator<'tcx>( + &self, + tcx: TyCtxt<'tcx>, + module_name: &str, + kind: AllocatorKind, + has_alloc_error_handler: bool, + ) -> Self::Module { let mut mods = GccContext { context: Context::default(), }; - unsafe { allocator::codegen(tcx, &mut mods, module_name, kind, has_alloc_error_handler); } + unsafe { + allocator::codegen(tcx, &mut mods, module_name, kind, has_alloc_error_handler); + } mods } - fn compile_codegen_unit<'tcx>(&self, tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen, u64) { - base::compile_codegen_unit(tcx, cgu_name, *self.supports_128bit_integers.lock().expect("lock")) + fn compile_codegen_unit<'tcx>( + &self, + tcx: TyCtxt<'tcx>, + cgu_name: Symbol, + ) -> (ModuleCodegen, u64) { + base::compile_codegen_unit( + tcx, + cgu_name, + *self.supports_128bit_integers.lock().expect("lock"), + ) } - fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel, _features: &[String]) -> TargetMachineFactoryFn { + fn target_machine_factory( + &self, + _sess: &Session, + _opt_level: OptLevel, + _features: &[String], + ) -> TargetMachineFactoryFn { // TODO(antoyo): set opt level. - Arc::new(|_| { - Ok(()) - }) + Arc::new(|_| Ok(())) } fn target_cpu<'b>(&self, _sess: &'b Session) -> &'b str { @@ -208,21 +257,31 @@ impl WriteBackendMethods for GccCodegenBackend { type ThinData = (); type ThinBuffer = ThinBuffer; - fn run_fat_lto(_cgcx: &CodegenContext, mut modules: Vec>, _cached_modules: Vec<(SerializedModule, WorkProduct)>) -> Result, FatalError> { + fn run_fat_lto( + _cgcx: &CodegenContext, + mut modules: Vec>, + _cached_modules: Vec<(SerializedModule, WorkProduct)>, + ) -> Result, FatalError> { // TODO(antoyo): implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins. // NOTE: implemented elsewhere. // TODO(antoyo): what is implemented elsewhere ^ ? - let module = - match modules.remove(0) { - FatLTOInput::InMemory(module) => module, - FatLTOInput::Serialized { .. } => { - unimplemented!(); - } - }; - Ok(LtoModuleCodegen::Fat { module, _serialized_bitcode: vec![] }) + let module = match modules.remove(0) { + FatLTOInput::InMemory(module) => module, + FatLTOInput::Serialized { .. } => { + unimplemented!(); + } + }; + Ok(LtoModuleCodegen::Fat { + module, + _serialized_bitcode: vec![], + }) } - fn run_thin_lto(_cgcx: &CodegenContext, _modules: Vec<(String, Self::ThinBuffer)>, _cached_modules: Vec<(SerializedModule, WorkProduct)>) -> Result<(Vec>, Vec), FatalError> { + fn run_thin_lto( + _cgcx: &CodegenContext, + _modules: Vec<(String, Self::ThinBuffer)>, + _cached_modules: Vec<(SerializedModule, WorkProduct)>, + ) -> Result<(Vec>, Vec), FatalError> { unimplemented!(); } @@ -230,21 +289,40 @@ impl WriteBackendMethods for GccCodegenBackend { unimplemented!(); } - unsafe fn optimize(_cgcx: &CodegenContext, _diag_handler: &Handler, module: &ModuleCodegen, config: &ModuleConfig) -> Result<(), FatalError> { - module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level)); + unsafe fn optimize( + _cgcx: &CodegenContext, + _diag_handler: &Handler, + module: &ModuleCodegen, + config: &ModuleConfig, + ) -> Result<(), FatalError> { + module + .module_llvm + .context + .set_optimization_level(to_gcc_opt_level(config.opt_level)); Ok(()) } - fn optimize_fat(_cgcx: &CodegenContext, _module: &mut ModuleCodegen) -> Result<(), FatalError> { + fn optimize_fat( + _cgcx: &CodegenContext, + _module: &mut ModuleCodegen, + ) -> Result<(), FatalError> { // TODO(antoyo) Ok(()) } - unsafe fn optimize_thin(_cgcx: &CodegenContext, _thin: ThinModule) -> Result, FatalError> { + unsafe fn optimize_thin( + _cgcx: &CodegenContext, + _thin: ThinModule, + ) -> Result, FatalError> { unimplemented!(); } - unsafe fn codegen(cgcx: &CodegenContext, diag_handler: &Handler, module: ModuleCodegen, config: &ModuleConfig) -> Result { + unsafe fn codegen( + cgcx: &CodegenContext, + diag_handler: &Handler, + module: ModuleCodegen, + config: &ModuleConfig, + ) -> Result { back::write::codegen(cgcx, diag_handler, module, config) } @@ -256,7 +334,11 @@ impl WriteBackendMethods for GccCodegenBackend { unimplemented!(); } - fn run_link(cgcx: &CodegenContext, diag_handler: &Handler, modules: Vec>) -> Result, FatalError> { + fn run_link( + cgcx: &CodegenContext, + diag_handler: &Handler, + modules: Vec>, + ) -> Result, FatalError> { back::write::link(cgcx, diag_handler, modules) } } @@ -272,14 +354,12 @@ pub fn __rustc_codegen_backend() -> Box { fn to_gcc_opt_level(optlevel: Option) -> OptimizationLevel { match optlevel { None => OptimizationLevel::None, - Some(level) => { - match level { - OptLevel::No => OptimizationLevel::None, - OptLevel::Less => OptimizationLevel::Limited, - OptLevel::Default => OptimizationLevel::Standard, - OptLevel::Aggressive => OptimizationLevel::Aggressive, - OptLevel::Size | OptLevel::SizeMin => OptimizationLevel::Limited, - } + Some(level) => match level { + OptLevel::No => OptimizationLevel::None, + OptLevel::Less => OptimizationLevel::Limited, + OptLevel::Default => OptimizationLevel::Standard, + OptLevel::Aggressive => OptimizationLevel::Aggressive, + OptLevel::Size | OptLevel::SizeMin => OptimizationLevel::Limited, }, } } @@ -302,28 +382,30 @@ pub fn target_cpu(sess: &Session) -> &str { pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec { supported_target_features(sess) .iter() - .filter_map( - |&(feature, gate)| { - if sess.is_nightly_build() || allow_unstable || gate.is_none() { Some(feature) } else { None } - }, - ) + .filter_map(|&(feature, gate)| { + if sess.is_nightly_build() || allow_unstable || gate.is_none() { + Some(feature) + } else { + None + } + }) .filter(|_feature| { // TODO(antoyo): implement a way to get enabled feature in libgccjit. // Probably using the equivalent of __builtin_cpu_supports. - #[cfg(feature="master")] + #[cfg(feature = "master")] { _feature.contains("sse") || _feature.contains("avx") } - #[cfg(not(feature="master"))] + #[cfg(not(feature = "master"))] { false } /* - adx, aes, avx, avx2, avx512bf16, avx512bitalg, avx512bw, avx512cd, avx512dq, avx512er, avx512f, avx512gfni, - avx512ifma, avx512pf, avx512vaes, avx512vbmi, avx512vbmi2, avx512vl, avx512vnni, avx512vp2intersect, avx512vpclmulqdq, - avx512vpopcntdq, bmi1, bmi2, cmpxchg16b, ermsb, f16c, fma, fxsr, lzcnt, movbe, pclmulqdq, popcnt, rdrand, rdseed, rtm, - sha, sse, sse2, sse3, sse4.1, sse4.2, sse4a, ssse3, tbm, xsave, xsavec, xsaveopt, xsaves - */ + adx, aes, avx, avx2, avx512bf16, avx512bitalg, avx512bw, avx512cd, avx512dq, avx512er, avx512f, avx512gfni, + avx512ifma, avx512pf, avx512vaes, avx512vbmi, avx512vbmi2, avx512vl, avx512vnni, avx512vp2intersect, avx512vpclmulqdq, + avx512vpopcntdq, bmi1, bmi2, cmpxchg16b, ermsb, f16c, fma, fxsr, lzcnt, movbe, pclmulqdq, popcnt, rdrand, rdseed, rtm, + sha, sse, sse2, sse3, sse4.1, sse4.2, sse4a, ssse3, tbm, xsave, xsavec, xsaveopt, xsaves + */ //false }) .map(|feature| Symbol::intern(feature)) diff --git a/compiler/rustc_codegen_gcc/src/mono_item.rs b/compiler/rustc_codegen_gcc/src/mono_item.rs index 9468a1ef4bbbb..ee6848e4c81ac 100644 --- a/compiler/rustc_codegen_gcc/src/mono_item.rs +++ b/compiler/rustc_codegen_gcc/src/mono_item.rs @@ -1,8 +1,8 @@ use rustc_codegen_ssa::traits::PreDefineMethods; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::mir::mono::{Linkage, Visibility}; -use rustc_middle::ty::{self, Instance, TypeVisitable}; use rustc_middle::ty::layout::{FnAbiOf, LayoutOf}; +use rustc_middle::ty::{self, Instance, TypeVisitable}; use rustc_span::def_id::DefId; use crate::base; @@ -10,7 +10,13 @@ use crate::context::CodegenCx; use crate::type_of::LayoutGccExt; impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> { - fn predefine_static(&self, def_id: DefId, _linkage: Linkage, _visibility: Visibility, symbol_name: &str) { + fn predefine_static( + &self, + def_id: DefId, + _linkage: Linkage, + _visibility: Visibility, + symbol_name: &str, + ) { let attrs = self.tcx.codegen_fn_attrs(def_id); let instance = Instance::mono(self.tcx, def_id); let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); @@ -23,7 +29,13 @@ impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> { self.instances.borrow_mut().insert(instance, global); } - fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, _visibility: Visibility, symbol_name: &str) { + fn predefine_fn( + &self, + instance: Instance<'tcx>, + linkage: Linkage, + _visibility: Visibility, + symbol_name: &str, + ) { assert!(!instance.substs.needs_infer()); let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty()); diff --git a/compiler/rustc_codegen_gcc/src/type_.rs b/compiler/rustc_codegen_gcc/src/type_.rs index 68bdb8d4e55f4..95bf474444002 100644 --- a/compiler/rustc_codegen_gcc/src/type_.rs +++ b/compiler/rustc_codegen_gcc/src/type_.rs @@ -1,10 +1,10 @@ use std::convert::TryInto; use gccjit::{RValue, Struct, Type}; -use rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, TypeMembershipMethods}; use rustc_codegen_ssa::common::TypeKind; -use rustc_middle::{bug, ty}; +use rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, TypeMembershipMethods}; use rustc_middle::ty::layout::TyAndLayout; +use rustc_middle::{bug, ty}; use rustc_target::abi::{AddressSpace, Align, Integer, Size}; use crate::common::TypeReflection; @@ -112,7 +112,8 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } fn type_func(&self, params: &[Type<'gcc>], return_type: Type<'gcc>) -> Type<'gcc> { - self.context.new_function_pointer_type(None, return_type, params, false) + self.context + .new_function_pointer_type(None, return_type, params, false) } fn type_struct(&self, fields: &[Type<'gcc>], packed: bool) -> Type<'gcc> { @@ -120,12 +121,20 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { if let Some(typ) = self.struct_types.borrow().get(fields) { return typ.clone(); } - let fields: Vec<_> = fields.iter().enumerate() - .map(|(index, field)| self.context.new_field(None, *field, &format!("field{}_TODO", index))) + let fields: Vec<_> = fields + .iter() + .enumerate() + .map(|(index, field)| { + self.context + .new_field(None, *field, &format!("field{}_TODO", index)) + }) .collect(); - let typ = self.context.new_struct_type(None, "struct", &fields).as_type(); + let typ = self + .context + .new_struct_type(None, "struct", &fields) + .as_type(); if packed { - #[cfg(feature="master")] + #[cfg(feature = "master")] typ.set_packed(); } self.struct_types.borrow_mut().insert(types, typ); @@ -135,17 +144,13 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn type_kind(&self, typ: Type<'gcc>) -> TypeKind { if self.is_int_type_or_bool(typ) { TypeKind::Integer - } - else if typ.is_compatible_with(self.float_type) { + } else if typ.is_compatible_with(self.float_type) { TypeKind::Float - } - else if typ.is_compatible_with(self.double_type) { + } else if typ.is_compatible_with(self.double_type) { TypeKind::Double - } - else if typ.is_vector() { + } else if typ.is_vector() { TypeKind::Vector - } - else { + } else { // TODO(antoyo): support other types. TypeKind::Void } @@ -163,14 +168,11 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> { if let Some(typ) = ty.dyncast_array() { typ - } - else if let Some(vector_type) = ty.dyncast_vector() { + } else if let Some(vector_type) = ty.dyncast_vector() { vector_type.get_element_type() - } - else if let Some(typ) = ty.get_pointee() { + } else if let Some(typ) = ty.get_pointee() { typ - } - else { + } else { unreachable!() } } @@ -184,11 +186,9 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { let f64 = self.context.new_type::(); if typ.is_compatible_with(f32) { 32 - } - else if typ.is_compatible_with(f64) { + } else if typ.is_compatible_with(f64) { 64 - } - else { + } else { panic!("Cannot get width of float type {:?}", typ); } // TODO(antoyo): support other sizes. @@ -213,12 +213,17 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } pub fn set_struct_body(&self, typ: Struct<'gcc>, fields: &[Type<'gcc>], packed: bool) { - let fields: Vec<_> = fields.iter().enumerate() - .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index))) + let fields: Vec<_> = fields + .iter() + .enumerate() + .map(|(index, field)| { + self.context + .new_field(None, *field, &format!("field_{}", index)) + }) .collect(); typ.set_fields(None, &fields); if packed { - #[cfg(feature="master")] + #[cfg(feature = "master")] typ.as_type().set_packed(); } } @@ -253,7 +258,10 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } } -pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>) -> (Vec>, bool) { +pub fn struct_fields<'gcc, 'tcx>( + cx: &CodegenCx<'gcc, 'tcx>, + layout: TyAndLayout<'tcx>, +) -> (Vec>, bool) { let field_count = layout.fields.count(); let mut packed = false; @@ -263,8 +271,11 @@ pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout for i in layout.fields.index_by_increasing_offset() { let target_offset = layout.fields.offset(i as usize); let field = layout.field(cx, i); - let effective_field_align = - layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset); + let effective_field_align = layout + .align + .abi + .min(field.align.abi) + .restrict_for_offset(target_offset); packed |= effective_field_align < field.align.abi; assert!(target_offset >= offset); @@ -279,7 +290,12 @@ pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout } if !layout.is_unsized() && field_count > 0 { if offset > layout.size { - bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset); + bug!( + "layout: {:#?} stride: {:?} offset: {:?}", + layout, + layout.size, + offset + ); } let padding = layout.size - offset; let padding_align = prev_effective_align; diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs index 524d10fb5e24d..27b748f8b0028 100644 --- a/compiler/rustc_codegen_gcc/src/type_of.rs +++ b/compiler/rustc_codegen_gcc/src/type_of.rs @@ -1,13 +1,16 @@ use std::fmt::Write; -use gccjit::{Struct, Type}; use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods}; +use gccjit::{Struct, Type}; use rustc_middle::bug; -use rustc_middle::ty::{self, Ty, TypeVisitable}; use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout}; use rustc_middle::ty::print::with_no_trimmed_paths; -use rustc_target::abi::{self, Abi, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants}; +use rustc_middle::ty::{self, Ty, TypeVisitable}; use rustc_target::abi::call::{CastTarget, FnAbi, Reg}; +use rustc_target::abi::{ + self, Abi, FieldsShape, Int, Integer, PointeeInfo, Pointer, Size, TyAbiInterface, Variants, + F32, F64, +}; use crate::abi::{FnAbiGccExt, GccType}; use crate::context::CodegenCx; @@ -25,7 +28,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } } - #[cfg(feature="master")] + #[cfg(feature = "master")] pub fn type_int_from_ty(&self, t: ty::IntTy) -> Type<'gcc> { match t { ty::IntTy::Isize => self.type_isize(), @@ -37,7 +40,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } } - #[cfg(feature="master")] + #[cfg(feature = "master")] pub fn type_uint_from_ty(&self, t: ty::UintTy) -> Type<'gcc> { match t { ty::UintTy::Usize => self.type_isize(), @@ -50,13 +53,17 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } } -pub fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>) -> Type<'gcc> { +pub fn uncached_gcc_type<'gcc, 'tcx>( + cx: &CodegenCx<'gcc, 'tcx>, + layout: TyAndLayout<'tcx>, + defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>, +) -> Type<'gcc> { match layout.abi { Abi::Scalar(_) => bug!("handled elsewhere"), Abi::Vector { ref element, count } => { let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO); return cx.context.new_vector_type(element, count); - }, + } Abi::ScalarPair(..) => { return cx.type_struct( &[ @@ -111,22 +118,23 @@ pub fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLa let gcc_type = cx.type_named_struct(name); cx.set_struct_body(gcc_type, &[fill], packed); gcc_type.as_type() - }, + } } } - FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).gcc_type(cx, true), count), - FieldsShape::Arbitrary { .. } => - match name { - None => { - let (gcc_fields, packed) = struct_fields(cx, layout); - cx.type_struct(&gcc_fields, packed) - }, - Some(ref name) => { - let gcc_type = cx.type_named_struct(name); - *defer = Some((gcc_type, layout)); - gcc_type.as_type() - }, - }, + FieldsShape::Array { count, .. } => { + cx.type_array(layout.field(cx, 0).gcc_type(cx, true), count) + } + FieldsShape::Arbitrary { .. } => match name { + None => { + let (gcc_fields, packed) = struct_fields(cx, layout); + cx.type_struct(&gcc_fields, packed) + } + Some(ref name) => { + let gcc_type = cx.type_named_struct(name); + *defer = Some((gcc_type, layout)); + gcc_type.as_type() + } + }, } } @@ -135,10 +143,24 @@ pub trait LayoutGccExt<'tcx> { fn is_gcc_scalar_pair(&self) -> bool; fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc>; fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; - fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>; - fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc>; + fn scalar_gcc_type_at<'gcc>( + &self, + cx: &CodegenCx<'gcc, 'tcx>, + scalar: &abi::Scalar, + offset: Size, + ) -> Type<'gcc>; + fn scalar_pair_element_gcc_type<'gcc>( + &self, + cx: &CodegenCx<'gcc, 'tcx>, + index: usize, + immediate: bool, + ) -> Type<'gcc>; fn gcc_field_index(&self, index: usize) -> u64; - fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option; + fn pointee_info_at<'gcc>( + &self, + cx: &CodegenCx<'gcc, 'tcx>, + offset: Size, + ) -> Option; } impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { @@ -176,27 +198,27 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) { return ty; } - let ty = - match *self.ty.kind() { - ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { - cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx, set_fields)) - } - ty::Adt(def, _) if def.is_box() => { - cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx, true)) - } - ty::FnPtr(sig) => cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty())), - _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO), - }; + let ty = match *self.ty.kind() { + ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { + cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx, set_fields)) + } + ty::Adt(def, _) if def.is_box() => { + cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx, true)) + } + ty::FnPtr(sig) => { + cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty())) + } + _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO), + }; cx.scalar_types.borrow_mut().insert(self.ty, ty); return ty; } // Check the cache. - let variant_index = - match self.variants { - Variants::Single { index } => Some(index), - _ => None, - }; + let variant_index = match self.variants { + Variants::Single { index } => Some(index), + _ => None, + }; let cached_type = cx.types.borrow().get(&(self.ty, variant_index)).cloned(); if let Some(ty) = cached_type { let type_to_set_fields = cx.types_with_fields_to_set.borrow_mut().remove(&ty); @@ -209,24 +231,26 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { return ty; } - assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty); + assert!( + !self.ty.has_escaping_bound_vars(), + "{:?} has escaping bound vars", + self.ty + ); // Make sure lifetimes are erased, to avoid generating distinct LLVM // types for Rust types that only differ in the choice of lifetimes. let normal_ty = cx.tcx.erase_regions(self.ty); let mut defer = None; - let ty = - if self.ty != normal_ty { - let mut layout = cx.layout_of(normal_ty); - if let Some(v) = variant_index { - layout = layout.for_variant(cx, v); - } - layout.gcc_type(cx, true) + let ty = if self.ty != normal_ty { + let mut layout = cx.layout_of(normal_ty); + if let Some(v) = variant_index { + layout = layout.for_variant(cx, v); } - else { - uncached_gcc_type(cx, *self, &mut defer) - }; + layout.gcc_type(cx, true) + } else { + uncached_gcc_type(cx, *self, &mut defer) + }; cx.types.borrow_mut().insert((self.ty, variant_index), ty); @@ -247,7 +271,12 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { self.gcc_type(cx, true) } - fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc> { + fn scalar_gcc_type_at<'gcc>( + &self, + cx: &CodegenCx<'gcc, 'tcx>, + scalar: &abi::Scalar, + offset: Size, + ) -> Type<'gcc> { match scalar.primitive() { Int(i, true) => cx.type_from_integer(i), Int(i, false) => cx.type_from_unsigned_integer(i), @@ -255,19 +284,22 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { F64 => cx.type_f64(), Pointer => { // If we know the alignment, pick something better than i8. - let pointee = - if let Some(pointee) = self.pointee_info_at(cx, offset) { - cx.type_pointee_for_align(pointee.align) - } - else { - cx.type_i8() - }; + let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) { + cx.type_pointee_for_align(pointee.align) + } else { + cx.type_i8() + }; cx.type_ptr_to(pointee) } } } - fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc> { + fn scalar_pair_element_gcc_type<'gcc>( + &self, + cx: &CodegenCx<'gcc, 'tcx>, + index: usize, + immediate: bool, + ) -> Type<'gcc> { // TODO(antoyo): remove llvm hack: // HACK(eddyb) special-case fat pointers until LLVM removes // pointee types, to avoid bitcasting every `OperandRef::deref`. @@ -279,14 +311,19 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { // thin pointer boxes with scalar allocators are handled by the general logic below ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => { let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty()); - return cx.layout_of(ptr_ty).scalar_pair_element_gcc_type(cx, index, immediate); + return cx + .layout_of(ptr_ty) + .scalar_pair_element_gcc_type(cx, index, immediate); } _ => {} } let (a, b) = match self.abi { Abi::ScalarPair(ref a, ref b) => (a, b), - _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self), + _ => bug!( + "TyAndLayout::scalar_pair_element_llty({:?}): not applicable", + self + ), }; let scalar = [a, b][index]; @@ -301,13 +338,11 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { return cx.type_i1(); } - let offset = - if index == 0 { - Size::ZERO - } - else { - a.size(cx).align_to(b.align(cx).abi) - }; + let offset = if index == 0 { + Size::ZERO + } else { + a.size(cx).align_to(b.align(cx).abi) + }; self.scalar_gcc_type_at(cx, scalar, offset) } @@ -336,7 +371,9 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset); - cx.pointee_infos.borrow_mut().insert((self.ty, offset), result); + cx.pointee_infos + .borrow_mut() + .insert((self.ty, offset), result); result } } @@ -362,7 +399,12 @@ impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { layout.gcc_field_index(index) } - fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, immediate: bool) -> Type<'gcc> { + fn scalar_pair_element_backend_type( + &self, + layout: TyAndLayout<'tcx>, + index: usize, + immediate: bool, + ) -> Type<'gcc> { layout.scalar_pair_element_gcc_type(self, index, immediate) } diff --git a/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs b/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs index 8e378177e2457..52cbf23504f93 100644 --- a/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs +++ b/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs @@ -25,15 +25,21 @@ pub fn main_inner(profile: Profile) { env::set_var("LD_LIBRARY_PATH", gcc_path); LangTester::new() .test_dir("tests/run") - .test_file_filter(|path| path.extension().expect("extension").to_str().expect("to_str") == "rs") + .test_file_filter(|path| { + path.extension() + .expect("extension") + .to_str() + .expect("to_str") + == "rs" + }) .test_extract(|source| { - let lines = - source.lines() - .skip_while(|l| !l.starts_with("//")) - .take_while(|l| l.starts_with("//")) - .map(|l| &l[2..]) - .collect::>() - .join("\n"); + let lines = source + .lines() + .skip_while(|l| !l.starts_with("//")) + .take_while(|l| l.starts_with("//")) + .map(|l| &l[2..]) + .collect::>() + .join("\n"); Some(lines) }) .test_cmds(move |path| { @@ -43,21 +49,25 @@ pub fn main_inner(profile: Profile) { exe.push(path.file_stem().expect("file_stem")); let mut compiler = Command::new("rustc"); compiler.args(&[ - &format!("-Zcodegen-backend={}/target/debug/librustc_codegen_gcc.so", current_dir), - "--sysroot", &format!("{}/build_sysroot/sysroot/", current_dir), + &format!( + "-Zcodegen-backend={}/target/debug/librustc_codegen_gcc.so", + current_dir + ), + "--sysroot", + &format!("{}/build_sysroot/sysroot/", current_dir), "-Zno-parallel-llvm", - "-C", "panic=abort", - "-C", "link-arg=-lc", - "-o", exe.to_str().expect("to_str"), + "-C", + "panic=abort", + "-C", + "link-arg=-lc", + "-o", + exe.to_str().expect("to_str"), path.to_str().expect("to_str"), ]); match profile { Profile::Debug => {} Profile::Release => { - compiler.args(&[ - "-C", "opt-level=3", - "-C", "lto=no", - ]); + compiler.args(&["-C", "opt-level=3", "-C", "lto=no"]); } } // Test command 2: run `tempdir/x`. From 08a3f492ee2d6d6b0227241f79b7c0917701e537 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Sat, 27 Aug 2022 16:44:47 -0700 Subject: [PATCH 3/3] ignore format commit --- .git-blame-ignore-revs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 307e22b0df1fd..c79b1cb360aab 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -4,3 +4,5 @@ a06baa56b95674fc626b3c3fd680d6a65357fe60 95e00bfed801e264e9c4ac817004153ca0f19eb6 # reformat with new rustfmt 971c549ca334b7b7406e61e958efcca9c4152822 +# Format rustc_codegen_gcc +47bf17d9633ab6d901668c6d561432be1da864cc