diff --git a/lib/compiler-llvm/src/translator/code.rs b/lib/compiler-llvm/src/translator/code.rs index 742ec81a8f4..f74fe59a4a7 100644 --- a/lib/compiler-llvm/src/translator/code.rs +++ b/lib/compiler-llvm/src/translator/code.rs @@ -9388,6 +9388,88 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> { size.add_attribute(AttributeLoc::Function, self.intrinsics.readonly); self.state.push1(size.try_as_basic_value().left().unwrap()); } + Operator::MemoryInit { segment, mem } => { + let (dest, src, len) = self.state.pop3()?; + let mem = self + .intrinsics + .i32_ty + .const_int(mem.into(), false) + .as_basic_value_enum(); + let segment = self + .intrinsics + .i32_ty + .const_int(segment.into(), false) + .as_basic_value_enum(); + self.builder.build_call( + self.intrinsics.memory_init, + &[vmctx.as_basic_value_enum(), mem, segment, dest, src, len], + "", + ); + } + Operator::DataDrop { segment } => { + let segment = self + .intrinsics + .i32_ty + .const_int(segment.into(), false) + .as_basic_value_enum(); + self.builder.build_call( + self.intrinsics.data_drop, + &[vmctx.as_basic_value_enum(), segment], + "", + ); + } + Operator::MemoryCopy { src, dst } => { + // ignored until we support multiple memories + let _dst = dst; + let (memory_copy, src) = if let Some(local_memory_index) = self + .wasm_module + .local_memory_index(MemoryIndex::from_u32(src)) + { + (self.intrinsics.memory_copy, local_memory_index.as_u32()) + } else { + (self.intrinsics.imported_memory_copy, src) + }; + + let (dest_pos, src_pos, len) = self.state.pop3()?; + let src_index = self + .intrinsics + .i32_ty + .const_int(src.into(), false) + .as_basic_value_enum(); + self.builder.build_call( + memory_copy, + &[ + vmctx.as_basic_value_enum(), + src_index, + dest_pos, + src_pos, + len, + ], + "", + ); + } + Operator::MemoryFill { mem } => { + let (memory_fill, mem) = if let Some(local_memory_index) = self + .wasm_module + .local_memory_index(MemoryIndex::from_u32(mem)) + { + (self.intrinsics.memory_fill, local_memory_index.as_u32()) + } else { + (self.intrinsics.imported_memory_fill, mem) + }; + + let (dst, val, len) = self.state.pop3()?; + let mem_index = self + .intrinsics + .i32_ty + .const_int(mem.into(), false) + .as_basic_value_enum(); + self.builder.build_call( + memory_fill, + &[vmctx.as_basic_value_enum(), mem_index, dst, val, len], + "", + ); + } /*************************** * Reference types. * https://github.com/WebAssembly/reference-types/blob/master/proposals/reference-types/Overview.md diff --git a/lib/compiler-llvm/src/translator/intrinsics.rs b/lib/compiler-llvm/src/translator/intrinsics.rs index a2274cac5a7..ecc4d059e09 100644 --- a/lib/compiler-llvm/src/translator/intrinsics.rs +++ b/lib/compiler-llvm/src/translator/intrinsics.rs @@ -181,8 +181,15 @@ pub struct Intrinsics<'ctx> { pub imported_table_set: FunctionValue<'ctx>, pub table_grow: FunctionValue<'ctx>, pub imported_table_grow: FunctionValue<'ctx>, + pub memory_init: FunctionValue<'ctx>, + pub data_drop: FunctionValue<'ctx>, pub func_ref: FunctionValue<'ctx>, pub elem_drop: FunctionValue<'ctx>, + pub memory_copy: FunctionValue<'ctx>, + pub imported_memory_copy: FunctionValue<'ctx>, + pub memory_fill: FunctionValue<'ctx>, + pub imported_memory_fill: FunctionValue<'ctx>, + pub throw_trap: FunctionValue<'ctx>, // VM builtins. @@ -594,6 +601,82 @@ impl<'ctx> Intrinsics<'ctx> { ), None, ), + memory_init: module.add_function( + "wasmer_vm_memory32_init", + void_ty.fn_type( + &[ + ctx_ptr_ty.as_basic_type_enum(), + i32_ty_basic, + i32_ty_basic, + i32_ty_basic, + i32_ty_basic, + i32_ty_basic, + ], + false, + ), + None, + ), + memory_copy: module.add_function( + "wasmer_vm_memory32_copy", + void_ty.fn_type( + &[ + ctx_ptr_ty.as_basic_type_enum(), + i32_ty_basic, + i32_ty_basic, + i32_ty_basic, + i32_ty_basic, + ], + false, + ), + None, + ), + imported_memory_copy: module.add_function( + "wasmer_vm_imported_memory32_copy", + void_ty.fn_type( + &[ + ctx_ptr_ty.as_basic_type_enum(), + i32_ty_basic, + i32_ty_basic, + i32_ty_basic, + i32_ty_basic, + ], + false, + ), + None, + ), + memory_fill: module.add_function( + "wasmer_vm_memory32_fill", + void_ty.fn_type( + &[ + ctx_ptr_ty.as_basic_type_enum(), + i32_ty_basic, + i32_ty_basic, + i32_ty_basic, + i32_ty_basic, + ], + false, + ), + None, + ), + imported_memory_fill: module.add_function( + "wasmer_vm_imported_memory32_fill", + void_ty.fn_type( + &[ + ctx_ptr_ty.as_basic_type_enum(), + i32_ty_basic, + i32_ty_basic, + i32_ty_basic, + i32_ty_basic, + ], + false, + ), + None, + ), + data_drop: module.add_function( + "wasmer_vm_data_drop", + void_ty.fn_type(&[ctx_ptr_ty.as_basic_type_enum(), i32_ty_basic], false), + None, + ), func_ref: module.add_function( "wasmer_vm_func_ref", funcref_ty.fn_type(&[ctx_ptr_ty.as_basic_type_enum(), i32_ty_basic], false), diff --git a/lib/compiler-singlepass/src/codegen_x64.rs b/lib/compiler-singlepass/src/codegen_x64.rs index 2e3b25422d5..6a7670fcdaf 100644 --- a/lib/compiler-singlepass/src/codegen_x64.rs +++ b/lib/compiler-singlepass/src/codegen_x64.rs @@ -5679,6 +5679,160 @@ impl<'a> FuncGen<'a> { self.assembler .emit_mov(Size::S64, Location::GPR(GPR::RAX), ret); } + Operator::MemoryInit { segment, mem } => { + let len = self.value_stack.pop().unwrap(); + let src = self.value_stack.pop().unwrap(); + let dst = self.value_stack.pop().unwrap(); + self.machine.release_locations_only_regs(&[len, src, dst]); + + self.assembler.emit_mov( + Size::S64, + Location::Memory( + Machine::get_vmctx_reg(), + self.vmoffsets + .vmctx_builtin_function(VMBuiltinFunctionIndex::get_memory_init_index()) + as i32, + ), + Location::GPR(GPR::RAX), + ); + + // TODO: should this be 3? + self.machine.release_locations_only_osr_state(1); + + self.emit_call_sysv( + |this| { + this.assembler.emit_call_register(GPR::RAX); + }, + // [vmctx, memory_index, segment_index, dst, src, len] + [ + Location::Imm32(mem), + Location::Imm32(segment), + dst, + src, + len, + ] + .iter() + .cloned(), + )?; + self.machine + .release_locations_only_stack(&mut self.assembler, &[dst, src, len]); + } + Operator::DataDrop { segment } => { + self.assembler.emit_mov( + Size::S64, + Location::Memory( + Machine::get_vmctx_reg(), + self.vmoffsets + .vmctx_builtin_function(VMBuiltinFunctionIndex::get_data_drop_index()) + as i32, + ), + Location::GPR(GPR::RAX), + ); + + self.emit_call_sysv( + |this| { + this.assembler.emit_call_register(GPR::RAX); + }, + // [vmctx, segment_index] + iter::once(Location::Imm32(segment)), + )?; + } + Operator::MemoryCopy { src, dst } => { + // ignore until we support multiple memories + let _dst = dst; + let len = self.value_stack.pop().unwrap(); + let src_pos = self.value_stack.pop().unwrap(); + let dst_pos = self.value_stack.pop().unwrap(); + self.machine + .release_locations_only_regs(&[len, src_pos, dst_pos]); + + let memory_index = MemoryIndex::new(src as usize); + let (memory_copy_index, memory_index) = + if self.module.local_memory_index(memory_index).is_some() { + ( + VMBuiltinFunctionIndex::get_memory_copy_index(), + memory_index, + ) + } else { + ( + VMBuiltinFunctionIndex::get_imported_memory_copy_index(), + memory_index, + ) + }; + + self.assembler.emit_mov( + Size::S64, + Location::Memory( + Machine::get_vmctx_reg(), + self.vmoffsets.vmctx_builtin_function(memory_copy_index) as i32, + ), + Location::GPR(GPR::RAX), + ); + + // TODO: should this be 3? + self.machine.release_locations_only_osr_state(1); + + self.emit_call_sysv( + |this| { + this.assembler.emit_call_register(GPR::RAX); + }, + // [vmctx, memory_index, dst, src, len] + [ + Location::Imm32(memory_index.index() as u32), + dst_pos, + src_pos, + len, + ] + .iter() + .cloned(), + )?; + self.machine + .release_locations_only_stack(&mut self.assembler, &[dst_pos, src_pos, len]); + } + Operator::MemoryFill { mem } => { + let len = self.value_stack.pop().unwrap(); + let val = self.value_stack.pop().unwrap(); + let dst = self.value_stack.pop().unwrap(); + self.machine.release_locations_only_regs(&[len, val, dst]); + + let memory_index = MemoryIndex::new(mem as usize); + let (memory_fill_index, memory_index) = + if self.module.local_memory_index(memory_index).is_some() { + ( + VMBuiltinFunctionIndex::get_memory_fill_index(), + memory_index, + ) + } else { + ( + VMBuiltinFunctionIndex::get_imported_memory_fill_index(), + memory_index, + ) + }; + + self.assembler.emit_mov( + Size::S64, + Location::Memory( + Machine::get_vmctx_reg(), + self.vmoffsets.vmctx_builtin_function(memory_fill_index) as i32, + ), + Location::GPR(GPR::RAX), + ); + + // TODO: should this be 3? + self.machine.release_locations_only_osr_state(1); + + self.emit_call_sysv( + |this| { + this.assembler.emit_call_register(GPR::RAX); + }, + // [vmctx, memory_index, dst, src, len] + [Location::Imm32(memory_index.index() as u32), dst, val, len] + .iter() + .cloned(), + )?; + self.machine + .release_locations_only_stack(&mut self.assembler, &[dst, val, len]); + } Operator::MemoryGrow { mem, mem_byte: _ } => { let memory_index = MemoryIndex::new(mem as usize); let param_pages = self.value_stack.pop().unwrap(); diff --git a/tests/ignores.txt b/tests/ignores.txt index add4976cb9d..b02f7963984 100644 --- a/tests/ignores.txt +++ b/tests/ignores.txt @@ -134,11 +134,6 @@ cranelift::spec::simd::simd_store32_lane cranelift::spec::simd::simd_store64_lane cranelift::spec::simd::simd_store8_lane -# bulk memory -llvm::spec::bulk -llvm::spec::memory_copy -llvm::spec::memory_fill -llvm::spec::memory_init # new simd llvm::spec::simd::simd_align llvm::spec::simd::simd_conversions @@ -174,8 +169,3 @@ llvm::spec::simd::simd_store32_lane llvm::spec::simd::simd_store64_lane llvm::spec::simd::simd_store8_lane -# bulk memory -singlepass::spec::bulk -singlepass::spec::memory_copy -singlepass::spec::memory_fill -singlepass::spec::memory_init