diff --git a/README.md b/README.md index c605993a..92a259b5 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,16 @@ -# rustc_codegen_clr +# rustc_codegen_clr > [!WARNING] > This project is still early in its developement. Bugs, crashes and miscompilations are expected. DO NOT USE IT FOR ANYTHING SERIOUS. -`rustc_codegen_clr` is an experimental Rust to .NET compiler backend. It allows the Rust compiler to turn Rust code into .NET assemblies. This translation is very high-level, and preserves things like types, -field/varaible names. +`rustc_codegen_clr` is an experimental Rust compiler backend(plugin), which allows you to transpile Rust into .NET assebmlies, or C source files. + +The end goal of the project is allowing Rust to be used in places where it could not be used before. + +## .NET Interop layer The project aims to provide a way to easily use Rust libraries in .NET. It comes with a Rust/.NET interop layer, which allows you to easily interact with .NET code from Rust: + ``` use mychorizza::*; fn main(){ @@ -18,7 +22,10 @@ fn main(){ mstring.AppendChar('.'); } ``` -The project will also include support for defining .NET classes from Rust. This is currently heavily WIP, and any feedback is appreciated. +This should allow you to integrate Rust code with exisitng .NET codebases, and should allow you to use .NET-specific libraries or APIs from Rust. + +The project will also include support for defining .NET classes from Rust, allowing .NET code to easily call Rust. +This is currently heavily WIP, and any feedback is appreciated. ``` // Early WIP syntax, subject to change. dotnet_typedef! { @@ -29,23 +36,47 @@ dotnet_typedef! { } } ``` + +With this approach, the classes and APIs exposed to .NET can be easily used from other .NET languages, like F# or C#. The safety of this glue layer can be checked by the Rust compiler, which should make interop issues much less likely. +## C support + +While .NET is the main foccus of my work, this project can also be used to compile Rust to C, by setting the `C_MODE` enviroment flag to `1`. + +This may seem like a strange and unrelated feature, but the project was written in such a way that this is not only possible, but relatively easy. + +My representation of .NETs IR maps nicely to C, which means that I was able to add support for compiling Rust to C in 2-3K LOC. Almost all of the codebase is reused, with the C and .NET specific code only +present in the very last stage of compilation. + +This means that, instead of having to maintain 2 separate projects, I can maintian one project. Bug fixes to the .NET side of things also fix C bugs. +Because of that, the support for C in the project is almost as good as support for .NET + ## Current state of the project -The project currently supports most Rust features (except async and proc macros), but it is not bug-free. It can compile a mostly working version of Rust std, but there are many minor bugs make such `std` not 100% functional. +The project currently supports most Rust features (except proc macros), but it is not bug-free. It can compile a mostly working version of Rust std, but there are many minor bugs make such `std` not 100% functional. -Most compoenets of `std` are about 95% working. +Most components of `std` are about 95% working in .NET, and 80% working in C. + +Currently, the GCC and clang C compilers are supported, with plans to add support +for `tcc`, and maybe even `sdcc`. So, you *can* compile a lot of existing Rust code, but it may not necessarily *work*. ### core, std, and alloc uint tests. +.NET | Name | Pass | Faliure | Crash \ Timeout| OK precentage |--------------------|--------|-------|-------|------| -| Core tests | 1635 | 38 | 41 | 95.39% | +| Core tests | 1662 | 39 | 12 | 97.02% | | Alloc tests | 616 |8 | 40 | 92.77% | | Alloc benches | 464 | 0 | 0 | 100.00% | | Test Harness tests | 57 | 0 | 100.00% | -| std tests | 955| 33 | 17 | 95.02% | -| Core benches | 490 | 2| | 98.39% | +| std tests | 931 | 43 | 64 | 89.69% | +| Core benches | 491 | 1| | 98.99% | + +C + +| Name | Pass | Faliure | OK precentage +|--------------------|--------|-------|------| +| Core tests | 1419 | 294 | 82.83% | ## FAQ ### Q: What is it? diff --git a/cilly/src/bin/linker/main.rs b/cilly/src/bin/linker/main.rs index 09136c84..13c95a47 100644 --- a/cilly/src/bin/linker/main.rs +++ b/cilly/src/bin/linker/main.rs @@ -383,6 +383,7 @@ fn main() { cilly::v2::builtins::int128::i128_mul_ovf_check(&mut final_assembly, &mut overrides); cilly::v2::builtins::f16::generate_f16_ops(&mut final_assembly, &mut overrides, *C_MODE); cilly::v2::builtins::atomics::generate_all_atomics(&mut final_assembly, &mut overrides); + cilly::v2::builtins::stack_addr(&mut final_assembly, &mut overrides); if *C_MODE { cilly::v2::builtins::insert_exeception_stub(&mut final_assembly, &mut overrides); externs.insert("__dso_handle", LIBC.clone()); diff --git a/cilly/src/cil_node.rs b/cilly/src/cil_node.rs index 03a10448..f2a42416 100644 --- a/cilly/src/cil_node.rs +++ b/cilly/src/cil_node.rs @@ -224,9 +224,17 @@ pub enum CILNode { } impl CILNode { - pub fn stack_addr(val: Self, tpe: TypeIdx) -> Self { + pub fn stack_addr(val: Self, tpe_idx: TypeIdx, asm: &mut Assembly) -> Self { + /*let main_module = *asm.main_module(); + let tpe = asm[tpe_idx]; + let sig = asm.sig([tpe], Type::Ptr(tpe_idx)); + let mref = asm.new_methodref(main_module, "stack_addr", sig, MethodKind::Static, vec![]); + CILNode::Call(Box::new(CallOpArgs { + args: Box::new([val]), + site: mref, + }))*/ CILNode::TemporaryLocal(Box::new(( - tpe, + tpe_idx, [CILRoot::SetTMPLocal { value: val }].into(), CILNode::LoadAddresOfTMPLocal, ))) @@ -403,13 +411,9 @@ impl CILNode { ))) } pub fn transmute_on_stack(self, src: Type, target: Type, asm: &mut Assembly) -> Self { - let tmp_loc = Self::TemporaryLocal(Box::new(( - asm.alloc_type(src), - Box::new([CILRoot::SetTMPLocal { value: self }]), - CILNode::LoadAddresOfTMPLocal, - ))); + let stack_addr = Self::stack_addr(self, asm.alloc_type(src), asm); Self::LdObj { - ptr: Box::new(CILNode::MRefToRawPtr(Box::new(tmp_loc)).cast_ptr(asm.nptr(target))), + ptr: Box::new(stack_addr.cast_ptr(asm.nptr(target))), obj: Box::new(target), } } diff --git a/cilly/src/v2/builtins/mod.rs b/cilly/src/v2/builtins/mod.rs index aae74e65..44d74ba1 100644 --- a/cilly/src/v2/builtins/mod.rs +++ b/cilly/src/v2/builtins/mod.rs @@ -527,6 +527,20 @@ fn insert_catch_unwind(asm: &mut Assembly, patcher: &mut MissingMethodPatcher) { } const ALLOC_CAP: u64 = u32::MAX as u64; pub(crate) const UNMANAGED_THREAD_START: &str = "UnmanagedThreadStart"; +/// THIS BUILTIN MUST ALWAYS BE INLINED! +pub fn stack_addr(asm: &mut Assembly, patcher: &mut MissingMethodPatcher) { + let name = asm.alloc_string("stack_addr"); + let generator = move |_, asm: &mut Assembly| { + let addr = asm.alloc_node(CILNode::LdArgA(0)); + let ptr = asm.alloc_node(CILNode::RefToPtr(addr)); + let ret = asm.alloc_root(CILRoot::Ret(ptr)); + MethodImpl::MethodBody { + blocks: vec![BasicBlock::new(vec![ret], 0, None)], + locals: vec![], + } + }; + patcher.insert(name, Box::new(generator)); +} pub fn argc_argv_init(asm: &mut Assembly, patcher: &mut MissingMethodPatcher) { let name = asm.alloc_string("argc_argv_init"); let generator = move |_, asm: &mut Assembly| { diff --git a/cilly/src/v2/c_exporter/mod.rs b/cilly/src/v2/c_exporter/mod.rs index 64400ca5..6cc7d4c4 100644 --- a/cilly/src/v2/c_exporter/mod.rs +++ b/cilly/src/v2/c_exporter/mod.rs @@ -1147,7 +1147,7 @@ impl Exporter for CExporter { .arg("-o") .arg(exe_out) .arg("-g") - .args(["-fsanitize=undefined","-fno-sanitize-recover"]) + .args(["-fsanitize=undefined,address,alignment","-fno-sanitize=leak","-fno-sanitize-recover"]) .arg("-Ofast") // .arg("-FOLD") saves up on space, consider enabling. ; diff --git a/cilly/src/v2/cilnode.rs b/cilly/src/v2/cilnode.rs index 5f74f3ee..021d1f39 100644 --- a/cilly/src/v2/cilnode.rs +++ b/cilly/src/v2/cilnode.rs @@ -817,7 +817,10 @@ impl CILNode { V1Node::LDFtn(method_ref) => Self::LdFtn(*method_ref), V1Node::Volatile(inner) => { let mut tmp = Self::from_v1(inner, asm); - if let Self::LdInd { volatile: volitale, .. } = &mut tmp { + if let Self::LdInd { + volatile: volitale, .. + } = &mut tmp + { *volitale = true; } else { panic!() diff --git a/cilly/src/v2/class.rs b/cilly/src/v2/class.rs index 170ab37a..8cebb97e 100644 --- a/cilly/src/v2/class.rs +++ b/cilly/src/v2/class.rs @@ -315,7 +315,7 @@ impl ClassRef { asm.alloc_class_ref(ClassRef::new(name, asm_name, true, [].into())) } #[must_use] - pub fn fixed_array(element: Type, length: usize, asm: &mut Assembly) -> ClassRefIdx { + pub fn fixed_array(element: Type, length: u64, asm: &mut Assembly) -> ClassRefIdx { let name = format!("{element}_{length}", element = element.mangle(asm)); let name = asm.alloc_string(name); let cref = ClassRef::new(name, None, true, [].into()); diff --git a/cilly/src/v2/il_exporter/mod.rs b/cilly/src/v2/il_exporter/mod.rs index 95e35fbd..d6348fb1 100644 --- a/cilly/src/v2/il_exporter/mod.rs +++ b/cilly/src/v2/il_exporter/mod.rs @@ -185,6 +185,7 @@ impl ILExporter { name: &str, sig: SigIdx, ) -> std::io::Result<()> { + //assert_ne!(name,"stack_addr", "The builtin 'stack_addr' cilly function must always be inlined, and can't be exported otherwise."); match mimpl{ MethodImpl::MethodBody { blocks, locals } => { let locals_string:String = locals.iter().map(|(name,tpe)|match name { diff --git a/src/aggregate.rs b/src/aggregate.rs index 36777092..e41b4489 100644 --- a/src/aggregate.rs +++ b/src/aggregate.rs @@ -69,7 +69,7 @@ pub fn handle_aggregate<'tcx>( } let element = ctx.monomorphize(*element); let element = ctx.type_from_cache(element); - let array_type = ClassRef::fixed_array(element, value_index.len(), ctx); + let array_type = ClassRef::fixed_array(element, value_index.len() as u64, ctx); let array_getter = super::place::place_adress(target_location, ctx); let sig = FnSig::new( Box::new([ctx.nref(array_type), Type::Int(Int::USize), element]), diff --git a/src/assembly.rs b/src/assembly.rs index a8ade9f1..df158aaa 100644 --- a/src/assembly.rs +++ b/src/assembly.rs @@ -745,6 +745,17 @@ pub fn add_allocation(alloc_id: u64, asm: &mut cilly::v2::Assembly, tcx: TyCtxt< CILNode::LDStaticField(Box::new(field_desc)) } +/* +pub fn alloc_buff_tpe(asm: &mut cilly::v2::Assembly, len: u64) -> Option { + match len { + 0 => None, + 1 => Some(Type::Int(Int::U8)), + 2 => Some(Type::Int(Int::U16)), + 4 => Some(Type::Int(Int::U32)), + 8 => Some(Type::Int(Int::U64)), + _ => array_type(), + } +}*/ pub fn add_const_value(asm: &mut cilly::v2::Assembly, bytes: u128) -> StaticFieldDesc { let uint8_ptr = Type::Int(Int::U128); let main_module_id = asm.main_module(); diff --git a/src/constant.rs b/src/constant.rs index 2ccbeba0..107cac86 100644 --- a/src/constant.rs +++ b/src/constant.rs @@ -66,41 +66,14 @@ pub(crate) fn load_const_value<'tcx>( CILNode::uninit_val(tpe, ctx) } ConstValue::Slice { data, meta } => { - let slice_type = get_type(const_ty, ctx); + let slice_type = ctx.type_from_cache(const_ty); let slice_dotnet = slice_type.as_class_ref().expect("Slice type invalid!"); - let metadata_field = FieldDesc::new( - slice_dotnet, - ctx.alloc_string(crate::METADATA), - cilly::v2::Type::Int(Int::USize), - ); - let ptr_field = FieldDesc::new( - slice_dotnet, - ctx.alloc_string(crate::DATA_PTR), - ctx.nptr(cilly::v2::Type::Void), - ); - // TODO: find a better way to get an alloc_id. This is likely to be incoreect. + let alloc_id = ctx.tcx().reserve_and_set_memory_alloc(data); let alloc_id: u64 = crate::utilis::alloc_id_to_u64(alloc_id); - let slice_type = ctx.type_from_cache(const_ty); - CILNode::TemporaryLocal(Box::new(( - ctx.alloc_type(slice_type), - [ - CILRoot::SetField { - addr: Box::new(CILNode::LoadAddresOfTMPLocal), - value: Box::new(CILNode::V2(ctx.alloc_node(Const::USize(meta)))), - desc: ctx.alloc_field(metadata_field), - }, - CILRoot::SetField { - addr: Box::new(CILNode::LoadAddresOfTMPLocal), - value: Box::new( - CILNode::LoadGlobalAllocPtr { alloc_id }.cast_ptr(ctx.nptr(Type::Void)), - ), - desc: ctx.alloc_field(ptr_field), - }, - ] - .into(), - CILNode::LoadTMPLocal, - ))) + let meta = CILNode::V2(ctx.alloc_node(Const::USize(meta))); + let ptr = CILNode::LoadGlobalAllocPtr { alloc_id }.cast_ptr(ctx.nptr(Type::Void)); + CILNode::create_slice(slice_dotnet, ctx, meta, ptr) } ConstValue::Indirect { alloc_id, offset } => { create_const_from_data(const_ty, alloc_id, offset.bytes(), ctx) @@ -152,6 +125,7 @@ fn load_scalar_ptr( site: ctx.alloc_methodref(mref), })), ctx.alloc_type(u8_ptr_ptr), + ctx, ); } let attrs = ctx.tcx().codegen_fn_attrs(def_id); @@ -160,7 +134,7 @@ fn load_scalar_ptr( // TODO: this could cause issues if the pointer to the static is not imediatly dereferenced. let site = get_fn_from_static_name(&name, ctx); let ptr_sig = Type::FnPtr(ctx[site].sig()); - return CILNode::stack_addr(CILNode::LDFtn(site), ctx.alloc_type(ptr_sig)); + return CILNode::stack_addr(CILNode::LDFtn(site), ctx.alloc_type(ptr_sig), ctx); } if let Some(section) = attrs.link_section { panic!("static {name} requires special linkage in section {section:?}"); diff --git a/src/operand.rs b/src/operand.rs index 9e5989aa..45138d99 100644 --- a/src/operand.rs +++ b/src/operand.rs @@ -29,7 +29,7 @@ pub(crate) fn operand_address<'tcx>( Operand::Constant(const_val) => { let local_type = ctx.type_from_cache(operand.ty(ctx.body(), ctx.tcx())); let constant = crate::constant::handle_constant(const_val, ctx); - let ptr = CILNode::stack_addr(constant, ctx.alloc_type(local_type)); + let ptr = CILNode::stack_addr(constant, ctx.alloc_type(local_type), ctx); crate::place::deref_op( crate::place::PlaceTy::Ty(operand.ty(ctx.body(), ctx.tcx())), ctx, diff --git a/src/place/body.rs b/src/place/body.rs index 3dc03c09..cc227e74 100644 --- a/src/place/body.rs +++ b/src/place/body.rs @@ -106,7 +106,7 @@ fn body_field<'a>( CILNode::stack_addr(CILNode::transmute_on_stack(CILNode::LdObj { ptr: Box::new(parrent_node), obj: Box::new(curr_type), - }, curr_type, field_type, ctx),ctx.alloc_type(field_type)) + }, curr_type, field_type, ctx),ctx.alloc_type(field_type), ctx) ) } } diff --git a/src/rvalue.rs b/src/rvalue.rs index 33563cfb..e9108391 100644 --- a/src/rvalue.rs +++ b/src/rvalue.rs @@ -66,7 +66,7 @@ pub fn handle_rvalue<'tcx>( dst, ) => (vec![], ptr_to_ptr(ctx, operand, *dst)), Rvalue::Cast(CastKind::PointerCoercion(PointerCoercion::Unsize, _), operand, target) => { - (vec![], crate::unsize::unsize2(ctx, operand, *target)) + crate::unsize::unsize2(ctx, operand, *target, *target_location) } Rvalue::BinaryOp(binop, operands) => ( vec![], diff --git a/src/type/mod.rs b/src/type/mod.rs index df0d1530..7daf744d 100644 --- a/src/type/mod.rs +++ b/src/type/mod.rs @@ -14,7 +14,7 @@ use cilly::{ cilnode::MethodKind, Access, BasicBlock, CILNode, CILRoot, ClassDef, ClassDefIdx, ClassRef, ClassRefIdx, Float, Int, MethodDef, MethodImpl, StringIdx, Type, }, - IntoAsmIndex, + Assembly, IntoAsmIndex, }; pub use r#type::*; use rustc_middle::ty::{AdtDef, AdtKind, FloatTy, IntTy, List, ParamEnv, Ty, TyKind, UintTy}; @@ -118,7 +118,7 @@ pub fn get_type<'tcx>(ty: Ty<'tcx>, ctx: &mut MethodCompileCtx<'tcx, '_>) -> Typ // Allocate a class reference to the closure let cref = ctx.alloc_class_ref(ClassRef::new(name, None, true, [].into())); // If there is no defition of this closure present, create the closure. - if ctx.asm().class_ref_to_def(cref).is_none() { + if ctx.class_ref_to_def(cref).is_none() { let type_def = closure_typedef(&fields, layout.layout, ctx, name); ctx.class_def(type_def); } @@ -127,7 +127,7 @@ pub fn get_type<'tcx>(ty: Ty<'tcx>, ctx: &mut MethodCompileCtx<'tcx, '_>) -> Typ TyKind::Dynamic(_list, _, _) => { let name = ctx.alloc_string("Dyn"); let cref = ctx.alloc_class_ref(ClassRef::new(name, None, true, [].into())); - if ctx.asm().class_ref_to_def(cref).is_none() { + if ctx.class_ref_to_def(cref).is_none() { ctx.class_def(ClassDef::new( name, true, @@ -191,7 +191,7 @@ pub fn get_type<'tcx>(ty: Ty<'tcx>, ctx: &mut MethodCompileCtx<'tcx, '_>) -> Typ let name = ctx.alloc_string(name); let cref = ClassRef::new(name, None, true, [].into()); // This only checks if a refernce to this class has already been allocated. In theory, allocating a class reference beforhand could break this, and make it not add the type definition - if !ctx.asm().contains_ref(&cref) { + if !ctx.contains_ref(&cref) { let layout = ctx.layout_of(ty); let _ = tuple_typedef(&types, layout.layout, ctx, name); } @@ -276,115 +276,8 @@ pub fn get_type<'tcx>(ty: Ty<'tcx>, ctx: &mut MethodCompileCtx<'tcx, '_>) -> Typ // Get the layout and size of this array let layout = ctx.layout_of(ty); let arr_size = layout.layout.size().bytes(); - - // Get the reference to the array class - let cref = ClassRef::fixed_array(element, length, ctx); - - // If the array definition not already present, add it. - if ctx.asm().class_ref_to_def(cref).is_none() { - let fields = vec![(element, ctx.alloc_string("f0"), Some(0))]; - let class_ref = ctx.asm().class_ref(cref).clone(); - let arr_align = layout.layout.align().pref.bytes(); - let size = if let Ok(size) = std::convert::TryInto::::try_into(arr_size) { - size - } else if *crate::config::ABORT_ON_ERROR { - panic!("Array {ty:?} size {arr_size} >= 2^32. Unsuported.") - } else { - eprintln!("WARNING: Array {ty:?} excceeds max size of 2^32. Clamping the size, this can cause UB."); - u32::MAX - }; - let arr = ctx.class_def(ClassDef::new( - class_ref.name(), - true, - 0, - None, - fields, - vec![], - Access::Public, - Some(NonZeroU32::new(size).unwrap()), - NonZeroU32::new(arr_align.try_into().unwrap()), - )); - // Common nodes - let ldarg_2 = ld_arg!(2).into_idx(ctx); - let elem_tpe_idx = ctx.alloc_type(element); - let elem_addr = add!( - ptr_cast!(ld_arg!(0), *elem_tpe_idx), - mul!(ld_arg!(1), cilly::size_of!(elem_tpe_idx)) - ) - .into_idx(ctx); - // Defintion of the set_Item method. - let set_item = ctx.alloc_string("set_Item"); - let this_ref = ctx.nref(Type::ClassRef(cref)); - let set_sig = ctx.sig([this_ref, Type::Int(Int::USize), element], Type::Void); - let arg_names = vec![ - Some(ctx.alloc_string("this")), - Some(ctx.alloc_string("idx")), - Some(ctx.alloc_string("elem")), - ]; - let set_root = ctx.alloc_root(CILRoot::StInd(Box::new(( - elem_addr, ldarg_2, element, false, - )))); - let void_ret = ctx.alloc_root(CILRoot::VoidRet); - ctx.new_method(MethodDef::new( - Access::Public, - arr, - set_item, - set_sig, - MethodKind::Instance, - MethodImpl::MethodBody { - blocks: vec![BasicBlock::new(vec![set_root, void_ret], 0, None)], - locals: vec![], - }, - arg_names, - )); - // Implementation of the get_Item method - let get_item = ctx.alloc_string("get_Item"); - let get_sig = ctx.sig([this_ref, Type::Int(Int::USize)], element); - let arg_names = vec![ - Some(ctx.alloc_string("this")), - Some(ctx.alloc_string("idx")), - ]; - let elem_val = ctx.alloc_node(CILNode::LdInd { - addr: elem_addr, - tpe: elem_tpe_idx, - volatile: false, - }); - let elem_ret = ctx.alloc_root(CILRoot::Ret(elem_val)); - ctx.new_method(MethodDef::new( - Access::Public, - arr, - get_item, - get_sig, - MethodKind::Instance, - MethodImpl::MethodBody { - blocks: vec![BasicBlock::new(vec![elem_ret], 0, None)], - locals: vec![], - }, - arg_names, - )); - // Implementation of the get_Address method - let get_address = ctx.alloc_string("get_Address"); - let elem_ref_tpe = ctx.nptr(element); - let addr_sig = ctx.sig([this_ref, Type::Int(Int::USize)], elem_ref_tpe); - let arg_names = vec![ - Some(ctx.alloc_string("this")), - Some(ctx.alloc_string("idx")), - ]; - - let elem_ret = ctx.alloc_root(CILRoot::Ret(elem_addr)); - ctx.new_method(MethodDef::new( - Access::Public, - arr, - get_address, - addr_sig, - MethodKind::Instance, - MethodImpl::MethodBody { - blocks: vec![BasicBlock::new(vec![elem_ret], 0, None)], - locals: vec![], - }, - arg_names, - )); - } + let arr_align = layout.layout.align().pref.bytes(); + let cref = fixed_array(ctx, element, length as u64, arr_size, arr_align); Type::ClassRef(cref) } TyKind::Alias(_, _) => panic!("Attempted to get the .NET type of an unmorphized type"), @@ -405,7 +298,7 @@ pub fn get_type<'tcx>(ty: Ty<'tcx>, ctx: &mut MethodCompileCtx<'tcx, '_>) -> Typ // Allocate a class reference to the coroutine let cref = ctx.alloc_class_ref(ClassRef::new(name, None, true, [].into())); // If there is no defition of this coroutine present, create the coroutine. - if ctx.asm().class_ref_to_def(cref).is_none() { + if ctx.class_ref_to_def(cref).is_none() { let mut type_def = closure_typedef(&fields, layout.layout, ctx, name); handle_tag(&layout.layout, ctx, ty, type_def.fields_mut()); ctx.class_def(type_def); @@ -416,15 +309,133 @@ pub fn get_type<'tcx>(ty: Ty<'tcx>, ctx: &mut MethodCompileCtx<'tcx, '_>) -> Typ _ => todo!("Can't yet get type {ty:?} from type cache."), } } +// +fn fixed_array( + asm: &mut Assembly, + element: Type, + length: u64, + arr_size: u64, + align: u64, +) -> ClassRefIdx { + // Get the reference to the array class + let cref = ClassRef::fixed_array(element, length, asm); + + // If the array definition not already present, add it. + if asm.class_ref_to_def(cref).is_none() { + let fields = vec![(element, asm.alloc_string("f0"), Some(0))]; + let class_ref = asm.class_ref(cref).clone(); + + let size = if let Ok(size) = std::convert::TryInto::::try_into(arr_size) { + size + } else { + panic!( + "Array of {element:?} with size {arr_size} >= 2^32. Unsuported.", + element = element.mangle(asm) + ) + }; + let arr = asm.class_def(ClassDef::new( + class_ref.name(), + true, + 0, + None, + fields, + vec![], + Access::Public, + Some(NonZeroU32::new(size).unwrap()), + NonZeroU32::new(align.try_into().unwrap()), + )); + // Common nodes + let ldarg_2 = ld_arg!(2).into_idx(asm); + let elem_tpe_idx = asm.alloc_type(element); + let elem_addr = add!( + ptr_cast!(ld_arg!(0), *elem_tpe_idx), + mul!(ld_arg!(1), cilly::size_of!(elem_tpe_idx)) + ) + .into_idx(asm); + // Defintion of the set_Item method. + let set_item = asm.alloc_string("set_Item"); + let this_ref = asm.nref(Type::ClassRef(cref)); + let set_sig = asm.sig([this_ref, Type::Int(Int::USize), element], Type::Void); + let arg_names = vec![ + Some(asm.alloc_string("this")), + Some(asm.alloc_string("idx")), + Some(asm.alloc_string("elem")), + ]; + let set_root = asm.alloc_root(CILRoot::StInd(Box::new(( + elem_addr, ldarg_2, element, false, + )))); + let void_ret = asm.alloc_root(CILRoot::VoidRet); + asm.new_method(MethodDef::new( + Access::Public, + arr, + set_item, + set_sig, + MethodKind::Instance, + MethodImpl::MethodBody { + blocks: vec![BasicBlock::new(vec![set_root, void_ret], 0, None)], + locals: vec![], + }, + arg_names, + )); + // Implementation of the get_Item method + let get_item = asm.alloc_string("get_Item"); + let get_sig = asm.sig([this_ref, Type::Int(Int::USize)], element); + let arg_names = vec![ + Some(asm.alloc_string("this")), + Some(asm.alloc_string("idx")), + ]; + let elem_val = asm.alloc_node(CILNode::LdInd { + addr: elem_addr, + tpe: elem_tpe_idx, + volatile: false, + }); + let elem_ret = asm.alloc_root(CILRoot::Ret(elem_val)); + asm.new_method(MethodDef::new( + Access::Public, + arr, + get_item, + get_sig, + MethodKind::Instance, + MethodImpl::MethodBody { + blocks: vec![BasicBlock::new(vec![elem_ret], 0, None)], + locals: vec![], + }, + arg_names, + )); + // Implementation of the get_Address method + let get_address = asm.alloc_string("get_Address"); + let elem_ref_tpe = asm.nptr(element); + let addr_sig = asm.sig([this_ref, Type::Int(Int::USize)], elem_ref_tpe); + let arg_names = vec![ + Some(asm.alloc_string("this")), + Some(asm.alloc_string("idx")), + ]; + + let elem_ret = asm.alloc_root(CILRoot::Ret(elem_addr)); + asm.new_method(MethodDef::new( + Access::Public, + arr, + get_address, + addr_sig, + MethodKind::Instance, + MethodImpl::MethodBody { + blocks: vec![BasicBlock::new(vec![elem_ret], 0, None)], + locals: vec![], + }, + arg_names, + )); + } + cref +} /// Returns a fat pointer to an inner type. pub fn fat_ptr_to<'tcx>(mut inner: Ty<'tcx>, ctx: &mut MethodCompileCtx<'tcx, '_>) -> ClassRefIdx { inner = ctx.monomorphize(inner); let inner_tpe = get_type(inner, ctx); - let name = format!("FatPtr{elem}", elem = inner_tpe.mangle(ctx.asm())); + let name = format!("FatPtr{elem}", elem = inner_tpe.mangle(ctx)); let name = ctx.alloc_string(name); let cref = ctx.alloc_class_ref(ClassRef::new(name, None, true, [].into())); - if ctx.asm().class_ref_to_def(cref).is_none() { + if ctx.class_ref_to_def(cref).is_none() { let def = ClassDef::new( name, true, @@ -458,7 +469,7 @@ pub fn closure_name( _sig: cilly::v2::SigIdx, ctx: &mut MethodCompileCtx<'_, '_>, ) -> String { - let mangled_fields: String = fields.iter().map(|tpe| tpe.mangle(ctx.asm())).collect(); + let mangled_fields: String = fields.iter().map(|tpe| tpe.mangle(ctx)).collect(); format!( "Closure{field_count}{mangled_fields}", field_count = fields.len() @@ -470,7 +481,7 @@ pub fn coroutine_name( fields: &[Type], ctx: &mut MethodCompileCtx<'_, '_>, ) -> String { - let mangled_fields: String = fields.iter().map(|tpe| tpe.mangle(ctx.asm())).collect(); + let mangled_fields: String = fields.iter().map(|tpe| tpe.mangle(ctx)).collect(); format!( "Coroutine{def_id:?}{field_count}{mangled_fields}", field_count = fields.len() diff --git a/src/unsize.rs b/src/unsize.rs index 332c5516..6492974e 100644 --- a/src/unsize.rs +++ b/src/unsize.rs @@ -1,5 +1,6 @@ use crate::assembly::MethodCompileCtx; use crate::operand::{handle_operand, operand_address}; +use crate::place::{place_address_raw, place_adress}; use crate::r#type::fat_ptr_to; use cilly::cil_node::CILNode; use cilly::cil_root::CILRoot; @@ -8,7 +9,7 @@ use cilly::v2::{FieldDesc, Int}; use cilly::{conv_u32, conv_usize, IntoAsmIndex}; use cilly::{Const, Type}; use rustc_middle::{ - mir::Operand, + mir::{Operand, Place}, ty::{layout::TyAndLayout, ParamEnv, PolyExistentialTraitRef, Ty, TyKind, UintTy}, }; use rustc_target::abi::FIRST_VARIANT; @@ -18,7 +19,8 @@ pub fn unsize2<'tcx>( ctx: &mut MethodCompileCtx<'tcx, '_>, operand: &Operand<'tcx>, target: Ty<'tcx>, -) -> CILNode { + destination: Place<'tcx>, +) -> (Vec, CILNode) { // Get the monomorphized source and target type let target = ctx.monomorphize(target); let source = ctx.monomorphize(operand.ty(ctx.body(), ctx.tcx())); @@ -47,8 +49,8 @@ pub fn unsize2<'tcx>( ctx.alloc_string(crate::DATA_PTR), ctx.nptr(cilly::v2::Type::Void), ); - - let target_ptr = CILNode::LoadAddresOfTMPLocal; + let dst = place_address_raw(&destination, ctx); + let target_ptr = dst.clone(); let init_metadata = CILRoot::set_field( target_ptr.clone().cast_ptr(ctx.nptr(fat_ptr_type)), @@ -96,7 +98,7 @@ pub fn unsize2<'tcx>( Box::new(addr), Box::new(CILNode::V2(ctx.alloc_node(8_isize))), ); - let dst_addr = CILNode::MRefToRawPtr(Box::new(CILNode::LoadAddresOfTMPLocal)); + let dst_addr = CILNode::MRefToRawPtr(Box::new(dst.clone())); let const_16 = CILNode::V2(ctx.alloc_node(16_isize)); let dst_addr = CILNode::Add(Box::new(dst_addr), Box::new(const_16)); eprintln!("WARNING:Can't propely unsize types with sized fields yet. unsize assumes that layout of Wrapper<&T> == layout of Wrapper>!"); @@ -108,17 +110,13 @@ pub fn unsize2<'tcx>( } else { CILRoot::Nop }; - CILNode::LdObj { - ptr: Box::new( - CILNode::TemporaryLocal(Box::new(( - ctx.alloc_type(Type::ClassRef(fat_ptr_type)), - [copy_val, init_metadata, init_ptr].into(), - CILNode::LoadAddresOfTMPLocal, - ))) - .cast_ptr(ctx.nptr(target_type)), - ), - obj: Box::new(target_type), - } + ( + [copy_val, init_metadata, init_ptr].into(), + CILNode::LdObj { + ptr: Box::new(dst.cast_ptr(ctx.nptr(target_type))), + obj: Box::new(target_type), + }, + ) } /// Adopted from fn unsized_info<'tcx>( diff --git a/test/bench/iter.rs b/test/bench/iter.rs index 5c151c7c..cf159842 100644 --- a/test/bench/iter.rs +++ b/test/bench/iter.rs @@ -1,4 +1,4 @@ -const BIG: i64 = 100_000_000; +const BIG: i64 = 10_000_000; fn bench_for_each_chain_fold() -> i64 { let mut acc = 0; let iter = (0i64..BIG).chain(0..BIG).map(std::hint::black_box);