Skip to content

Commit

Permalink
Merge #1777
Browse files Browse the repository at this point in the history
1777: Wasmparser update r=syrusakbary a=syrusakbary

<!-- 
Prior to submitting a PR, review the CONTRIBUTING.md document for recommendations on how to test:
https://github.com/wasmerio/wasmer/blob/master/CONTRIBUTING.md#pull-requests

-->

# Description

Upgrade to latest wasmparser (`0.65.x`). This PR fixes #1740
This update has been done and tested incrementally (version by version starting with `0.59.x`, you can find the separation in each of the commits.

<!-- 
Provide details regarding the change including motivation,
links to related issues, and the context of the PR.
-->


Co-authored-by: Syrus <[email protected]>
  • Loading branch information
bors[bot] and syrusakbary authored Oct 30, 2020
2 parents 72d5e88 + c60eab2 commit 3b23e75
Show file tree
Hide file tree
Showing 14 changed files with 418 additions and 263 deletions.
4 changes: 2 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion lib/compiler-cranelift/src/sink.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
use crate::translator::{irlibcall_to_libcall, irreloc_to_relocationkind};
use cranelift_codegen::binemit;
use cranelift_codegen::ir::{self, ExternalName};
use wasmer_compiler::{JumpTable, Relocation, RelocationTarget, SourceLoc, TrapInformation};
use wasmer_compiler::{JumpTable, Relocation, RelocationTarget, TrapInformation};
use wasmer_types::entity::EntityRef;
use wasmer_types::{FunctionIndex, LocalFunctionIndex};
use wasmer_vm::{ModuleInfo, TrapCode};
Expand Down
186 changes: 104 additions & 82 deletions lib/compiler-cranelift/src/translator/code_translator.rs

Large diffs are not rendered by default.

72 changes: 34 additions & 38 deletions lib/compiler-llvm/src/translator/code.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1110,21 +1110,13 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
}

fn trap_if_misaligned(&self, memarg: &MemoryImmediate, ptr: PointerValue<'ctx>) {
let align = match memarg.flags & 3 {
0 => {
return; /* No alignment to check. */
}
1 => 2,
2 => 4,
3 => 8,
_ => unreachable!("this match is fully covered"),
};
let align = memarg.align;
let value = self
.builder
.build_ptr_to_int(ptr, self.intrinsics.i64_ty, "");
let and = self.builder.build_and(
value,
self.intrinsics.i64_ty.const_int(align - 1, false),
self.intrinsics.i64_ty.const_int((align - 1).into(), false),
"misaligncheck",
);
let aligned =
Expand Down Expand Up @@ -1541,7 +1533,11 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
.get_insert_block()
.ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?;

let (label_depths, default_depth) = table.read_table().map_err(to_wasm_error)?;
let mut label_depths = table
.targets()
.collect::<Result<Vec<_>, _>>()
.map_err(to_wasm_error)?;
let default_depth = label_depths.pop().unwrap().0;

let index = self.state.pop1()?;

Expand All @@ -1561,7 +1557,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let cases: Vec<_> = label_depths
.iter()
.enumerate()
.map(|(case_index, &depth)| {
.map(|(case_index, &(depth, _))| {
let frame_result: Result<&ControlFrame, CompileError> =
self.state.frame_at_depth(depth);
let frame = match frame_result {
Expand Down Expand Up @@ -2495,7 +2491,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::I8x16AddSaturateS => {
Operator::I8x16AddSatS => {
let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?;
let (v1, _) = self.v128_into_i8x16(v1, i1);
let (v2, _) = self.v128_into_i8x16(v2, i2);
Expand All @@ -2512,7 +2508,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::I16x8AddSaturateS => {
Operator::I16x8AddSatS => {
let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?;
let (v1, _) = self.v128_into_i16x8(v1, i1);
let (v2, _) = self.v128_into_i16x8(v2, i2);
Expand All @@ -2529,7 +2525,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::I8x16AddSaturateU => {
Operator::I8x16AddSatU => {
let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?;
let (v1, _) = self.v128_into_i8x16(v1, i1);
let (v2, _) = self.v128_into_i8x16(v2, i2);
Expand All @@ -2546,7 +2542,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::I16x8AddSaturateU => {
Operator::I16x8AddSatU => {
let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?;
let (v1, _) = self.v128_into_i16x8(v1, i1);
let (v2, _) = self.v128_into_i16x8(v2, i2);
Expand Down Expand Up @@ -2603,7 +2599,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::I8x16SubSaturateS => {
Operator::I8x16SubSatS => {
let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?;
let (v1, _) = self.v128_into_i8x16(v1, i1);
let (v2, _) = self.v128_into_i8x16(v2, i2);
Expand All @@ -2620,7 +2616,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::I16x8SubSaturateS => {
Operator::I16x8SubSatS => {
let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?;
let (v1, _) = self.v128_into_i16x8(v1, i1);
let (v2, _) = self.v128_into_i16x8(v2, i2);
Expand All @@ -2637,7 +2633,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::I8x16SubSaturateU => {
Operator::I8x16SubSatU => {
let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?;
let (v1, _) = self.v128_into_i8x16(v1, i1);
let (v2, _) = self.v128_into_i8x16(v2, i2);
Expand All @@ -2654,7 +2650,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::I16x8SubSaturateU => {
Operator::I16x8SubSatU => {
let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?;
let (v1, _) = self.v128_into_i16x8(v1, i1);
let (v2, _) = self.v128_into_i16x8(v2, i2);
Expand Down Expand Up @@ -6975,7 +6971,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
};
self.state.push1_extra(res, info);
}
Operator::V8x16Swizzle => {
Operator::I8x16Swizzle => {
let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?;
let v1 = self.apply_pending_canonicalization(v1, i1);
let v1 = self
Expand Down Expand Up @@ -7043,7 +7039,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::V8x16Shuffle { lanes } => {
Operator::I8x16Shuffle { lanes } => {
let ((v1, i1), (v2, i2)) = self.state.pop2_extra()?;
let v1 = self.apply_pending_canonicalization(v1, i1);
let v1 = self
Expand All @@ -7066,7 +7062,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::I16x8Load8x8S { ref memarg } => {
Operator::V128Load8x8S { ref memarg } => {
let offset = self.state.pop1()?.into_int_value();
let memory_index = MemoryIndex::from_u32(0);
let effective_address = self.resolve_memory_ptr(
Expand All @@ -7087,7 +7083,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::I16x8Load8x8U { ref memarg } => {
Operator::V128Load8x8U { ref memarg } => {
let offset = self.state.pop1()?.into_int_value();
let memory_index = MemoryIndex::from_u32(0);
let effective_address = self.resolve_memory_ptr(
Expand All @@ -7108,7 +7104,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::I32x4Load16x4S { ref memarg } => {
Operator::V128Load16x4S { ref memarg } => {
let offset = self.state.pop1()?.into_int_value();
let memory_index = MemoryIndex::from_u32(0);
let effective_address = self.resolve_memory_ptr(
Expand All @@ -7129,7 +7125,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::I32x4Load16x4U { ref memarg } => {
Operator::V128Load16x4U { ref memarg } => {
let offset = self.state.pop1()?.into_int_value();
let memory_index = MemoryIndex::from_u32(0);
let effective_address = self.resolve_memory_ptr(
Expand All @@ -7150,7 +7146,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::I64x2Load32x2S { ref memarg } => {
Operator::V128Load32x2S { ref memarg } => {
let offset = self.state.pop1()?.into_int_value();
let memory_index = MemoryIndex::from_u32(0);
let effective_address = self.resolve_memory_ptr(
Expand All @@ -7171,7 +7167,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::I64x2Load32x2U { ref memarg } => {
Operator::V128Load32x2U { ref memarg } => {
let offset = self.state.pop1()?.into_int_value();
let memory_index = MemoryIndex::from_u32(0);
let effective_address = self.resolve_memory_ptr(
Expand All @@ -7192,7 +7188,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::V8x16LoadSplat { ref memarg } => {
Operator::V128Load8Splat { ref memarg } => {
let offset = self.state.pop1()?.into_int_value();
let memory_index = MemoryIndex::from_u32(0);
let effective_address = self.resolve_memory_ptr(
Expand All @@ -7213,7 +7209,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::V16x8LoadSplat { ref memarg } => {
Operator::V128Load16Splat { ref memarg } => {
let offset = self.state.pop1()?.into_int_value();
let memory_index = MemoryIndex::from_u32(0);
let effective_address = self.resolve_memory_ptr(
Expand All @@ -7234,7 +7230,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::V32x4LoadSplat { ref memarg } => {
Operator::V128Load32Splat { ref memarg } => {
let offset = self.state.pop1()?.into_int_value();
let memory_index = MemoryIndex::from_u32(0);
let effective_address = self.resolve_memory_ptr(
Expand All @@ -7255,7 +7251,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let res = self.builder.build_bitcast(res, self.intrinsics.i128_ty, "");
self.state.push1(res);
}
Operator::V64x2LoadSplat { ref memarg } => {
Operator::V128Load64Splat { ref memarg } => {
let offset = self.state.pop1()?.into_int_value();
let memory_index = MemoryIndex::from_u32(0);
let effective_address = self.resolve_memory_ptr(
Expand Down Expand Up @@ -9246,8 +9242,8 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
self.state.push1(old);
}

Operator::MemoryGrow { reserved } => {
let memory_index = MemoryIndex::from_u32(reserved);
Operator::MemoryGrow { mem, mem_byte: _ } => {
let memory_index = MemoryIndex::from_u32(mem);
let delta = self.state.pop1()?;
let grow_fn_ptr = self.ctx.memory_grow(memory_index, self.intrinsics);
let grow = self.builder.build_call(
Expand All @@ -9257,23 +9253,23 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
delta,
self.intrinsics
.i32_ty
.const_int(reserved.into(), false)
.const_int(mem.into(), false)
.as_basic_value_enum(),
],
"",
);
self.state.push1(grow.try_as_basic_value().left().unwrap());
}
Operator::MemorySize { reserved } => {
let memory_index = MemoryIndex::from_u32(reserved);
Operator::MemorySize { mem, mem_byte: _ } => {
let memory_index = MemoryIndex::from_u32(mem);
let size_fn_ptr = self.ctx.memory_size(memory_index, self.intrinsics);
let size = self.builder.build_call(
size_fn_ptr,
&[
vmctx.as_basic_value_enum(),
self.intrinsics
.i32_ty
.const_int(reserved.into(), false)
.const_int(mem.into(), false)
.as_basic_value_enum(),
],
"",
Expand Down
34 changes: 14 additions & 20 deletions lib/compiler-singlepass/src/codegen_x64.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1316,17 +1316,7 @@ impl<'a> FuncGen<'a> {
self.machine.release_temp_gpr(tmp_bound);
self.machine.release_temp_gpr(tmp_base);

let align = match memarg.flags & 3 {
0 => 1,
1 => 2,
2 => 4,
3 => 8,
_ => {
return Err(CodegenError {
message: "emit_memory_op align: unreachable value".to_string(),
})
}
};
let align = memarg.align;
if check_alignment && align != 1 {
let tmp_aligncheck = self.machine.acquire_temp_gpr().unwrap();
self.assembler.emit_mov(
Expand All @@ -1336,7 +1326,7 @@ impl<'a> FuncGen<'a> {
);
self.assembler.emit_and(
Size::S64,
Location::Imm32(align - 1),
Location::Imm32((align - 1).into()),
Location::GPR(tmp_aligncheck),
);
self.assembler
Expand Down Expand Up @@ -5615,8 +5605,8 @@ impl<'a> FuncGen<'a> {
// TODO: Re-enable interrupt signal check without branching
}
Operator::Nop => {}
Operator::MemorySize { reserved } => {
let memory_index = MemoryIndex::new(reserved as usize);
Operator::MemorySize { mem, mem_byte: _ } => {
let memory_index = MemoryIndex::new(mem as usize);
self.assembler.emit_mov(
Size::S64,
Location::Memory(
Expand Down Expand Up @@ -5653,8 +5643,8 @@ impl<'a> FuncGen<'a> {
self.assembler
.emit_mov(Size::S64, Location::GPR(GPR::RAX), ret);
}
Operator::MemoryGrow { reserved } => {
let memory_index = MemoryIndex::new(reserved as usize);
Operator::MemoryGrow { mem, mem_byte: _ } => {
let memory_index = MemoryIndex::new(mem as usize);
let param_pages = self.value_stack.pop().unwrap();

self.machine.release_locations_only_regs(&[param_pages]);
Expand Down Expand Up @@ -6304,9 +6294,13 @@ impl<'a> FuncGen<'a> {
self.assembler.emit_label(after);
}
Operator::BrTable { ref table } => {
let (targets, default_target) = table.read_table().map_err(|e| CodegenError {
message: format!("BrTable read_table: {:?}", e),
})?;
let mut targets = table
.targets()
.collect::<Result<Vec<_>, _>>()
.map_err(|e| CodegenError {
message: format!("BrTable read_table: {:?}", e),
})?;
let default_target = targets.pop().unwrap().0;
let cond = self.pop_value_released();
let table_label = self.assembler.get_label();
let mut table: Vec<DynamicLabel> = vec![];
Expand Down Expand Up @@ -6334,7 +6328,7 @@ impl<'a> FuncGen<'a> {
);
self.assembler.emit_jmp_location(Location::GPR(GPR::RDX));

for target in targets.iter() {
for (target, _) in targets.iter() {
let label = self.assembler.get_label();
self.assembler.emit_label(label);
table.push(label);
Expand Down
2 changes: 1 addition & 1 deletion lib/compiler/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ edition = "2018"
[dependencies]
wasmer-vm = { path = "../vm", version = "1.0.0-alpha4" }
wasmer-types = { path = "../wasmer-types", version = "1.0.0-alpha4", default-features = false }
wasmparser = { version = "0.57", optional = true, default-features = false }
wasmparser = { version = "0.65", optional = true, default-features = false }
target-lexicon = { version = "0.10", default-features = false }
enumset = "1.0"
hashbrown = { version = "0.8", optional = true }
Expand Down
Loading

0 comments on commit 3b23e75

Please sign in to comment.