diff --git a/.gitignore b/.gitignore index c28c8df7f777..cb23ff6204fb 100644 --- a/.gitignore +++ b/.gitignore @@ -23,4 +23,6 @@ vendor examples/build examples/.cache *.coredump +*.smt2 +cranelift/isle/veri/veri_engine/test_output crates/explorer/node_modules diff --git a/Cargo.lock b/Cargo.lock index 8c0a4c24b23a..14ad64691085 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -507,7 +507,7 @@ version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.60", @@ -1055,6 +1055,16 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" +[[package]] +name = "easy-smt" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cc74633db03a8b18de7c933fbd72402d45dfaf2d1736c1fd8ff9bbe461b4572" +dependencies = [ + "log", + "unicode-segmentation", +] + [[package]] name = "egg" version = "0.6.0" @@ -1463,6 +1473,12 @@ dependencies = [ "serde", ] +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + [[package]] name = "heck" version = "0.5.0" @@ -2220,7 +2236,7 @@ dependencies = [ [[package]] name = "pulley-interpreter" -version = "0.2.0" +version = "26.0.0" dependencies = [ "arbitrary", "cranelift-bitset", @@ -2738,6 +2754,25 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +[[package]] +name = "strum" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" + +[[package]] +name = "strum_macros" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "rustversion", + "syn 1.0.92", +] + [[package]] name = "subtle" version = "2.5.0" @@ -2869,7 +2904,7 @@ name = "test-programs-artifacts" version = "0.0.0" dependencies = [ "cargo_metadata", - "heck", + "heck 0.5.0", "wasmtime", "wit-component", ] @@ -3120,6 +3155,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-segmentation" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" + [[package]] name = "unicode-width" version = "0.1.9" @@ -3199,6 +3240,28 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +[[package]] +name = "veri_engine" +version = "0.1.0" +dependencies = [ + "anyhow", + "clap", + "cranelift-codegen", + "cranelift-codegen-meta", + "cranelift-isle", + "easy-smt", + "env_logger 0.11.5", + "itertools 0.12.1", + "log", + "strum", + "strum_macros", + "veri_ir", +] + +[[package]] +name = "veri_ir" +version = "0.1.0" + [[package]] name = "verify-component-adapter" version = "26.0.0" @@ -4072,7 +4135,7 @@ name = "wasmtime-wit-bindgen" version = "26.0.0" dependencies = [ "anyhow", - "heck", + "heck 0.5.0", "indexmap 2.2.6", "wit-parser", ] @@ -4175,7 +4238,7 @@ name = "wiggle-generate" version = "26.0.0" dependencies = [ "anyhow", - "heck", + "heck 0.5.0", "proc-macro2", "quote", "shellexpand", @@ -4240,7 +4303,7 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "winch-codegen" -version = "0.24.0" +version = "26.0.0" dependencies = [ "anyhow", "cranelift-codegen", @@ -4484,7 +4547,7 @@ version = "0.33.0" source = "git+https://github.com/wasmfx/wit-bindgenfx?tag=v0.33.0#08fb6541e847b36c7d914d37d4caaa4c3a33b6eb" dependencies = [ "anyhow", - "heck", + "heck 0.5.0", "wit-parser", ] @@ -4502,7 +4565,7 @@ version = "0.33.0" source = "git+https://github.com/wasmfx/wit-bindgenfx?tag=v0.33.0#08fb6541e847b36c7d914d37d4caaa4c3a33b6eb" dependencies = [ "anyhow", - "heck", + "heck 0.5.0", "indexmap 2.2.6", "prettyplease", "syn 2.0.60", diff --git a/Cargo.toml b/Cargo.toml index 2b4cf7defcfe..041bde77ef62 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -139,6 +139,8 @@ members = [ "cranelift", "cranelift/isle/fuzz", "cranelift/isle/islec", + "cranelift/isle/veri/veri_engine", + "cranelift/isle/veri/veri_ir", "cranelift/serde", "crates/bench-api", "crates/c-api/artifact", @@ -227,7 +229,7 @@ wasmtime-jit-icache-coherence = { path = "crates/jit-icache-coherence", version wasmtime-wit-bindgen = { path = "crates/wit-bindgen", version = "=26.0.0" } test-programs-artifacts = { path = 'crates/test-programs/artifacts' } -pulley-interpreter = { path = 'pulley', version = "=0.2.0" } +pulley-interpreter = { path = 'pulley', version = "=26.0.0" } pulley-interpreter-fuzz = { path = 'pulley/fuzz' } cranelift-codegen = { path = "cranelift/codegen", version = "0.113.0", default-features = false, features = ["std", "unwind"] } @@ -246,7 +248,7 @@ cranelift-bitset = { path = "cranelift/bitset", version = "0.113.0" } cranelift-control = { path = "cranelift/control", version = "0.113.0" } cranelift = { path = "cranelift/umbrella", version = "0.113.0" } -winch-codegen = { path = "winch/codegen", version = "=0.24.0" } +winch-codegen = { path = "winch/codegen", version = "=26.0.0" } wasi-preview1-component-adapter = { path = "crates/wasi-preview1-component-adapter" } byte-array-literals = { path = "crates/wasi-preview1-component-adapter/byte-array-literals" } diff --git a/ci/run-tests.sh b/ci/run-tests.sh index 5b80e91f941b..ab92437f99c9 100755 --- a/ci/run-tests.sh +++ b/ci/run-tests.sh @@ -12,6 +12,8 @@ # # - wasm-spec-interpreter: brings in OCaml which is a pain to configure for all # targets, tested as part of the wastime-fuzzing CI job. +# +# - veri_engine: requires an SMT solver (z3) cargo test \ --workspace \ @@ -21,6 +23,7 @@ cargo test \ --exclude wasmtime-fuzzing \ --exclude wasm-spec-interpreter \ --exclude wasmtime-winch \ + --exclude veri_engine \ $@ # NOTE(dhil): Several WasmFX features are conflicting, so we do not @@ -32,4 +35,5 @@ cargo test \ # --exclude wasmtime-wasi-nn \ # --exclude wasmtime-fuzzing \ # --exclude wasm-spec-interpreter \ +# --exclude veri_engine \ # $@ diff --git a/cranelift/codegen/meta/src/error.rs b/cranelift/codegen/meta/src/error.rs index 3490508145a4..e898dea4e296 100644 --- a/cranelift/codegen/meta/src/error.rs +++ b/cranelift/codegen/meta/src/error.rs @@ -18,6 +18,8 @@ impl Error { } } +impl std::error::Error for Error {} + impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.inner) diff --git a/cranelift/codegen/meta/src/isle.rs b/cranelift/codegen/meta/src/isle.rs index 3744c88075f0..da32cda673f6 100644 --- a/cranelift/codegen/meta/src/isle.rs +++ b/cranelift/codegen/meta/src/isle.rs @@ -11,13 +11,47 @@ pub struct IsleCompilations { pub items: Vec, } +impl IsleCompilations { + pub fn lookup(&self, name: &str) -> Option<&IsleCompilation> { + for compilation in &self.items { + if compilation.name == name { + return Some(compilation); + } + } + None + } +} + #[derive(Clone, Debug)] pub struct IsleCompilation { + pub name: String, pub output: std::path::PathBuf, pub inputs: Vec, pub untracked_inputs: Vec, } +impl IsleCompilation { + pub fn inputs(&self) -> Vec { + self.inputs + .iter() + .chain(self.untracked_inputs.iter()) + .cloned() + .collect() + } +} + +pub fn shared_isle_lower_paths(codegen_crate_dir: &std::path::Path) -> Vec { + let inst_specs_isle = codegen_crate_dir.join("src").join("inst_specs.isle"); + let prelude_isle = codegen_crate_dir.join("src").join("prelude.isle"); + let prelude_lower_isle = codegen_crate_dir.join("src").join("prelude_lower.isle"); + // The shared instruction selector logic. + vec![ + inst_specs_isle.clone(), + prelude_isle.clone(), + prelude_lower_isle.clone(), + ] +} + /// Construct the list of compilations (transformations from ISLE /// source to generated Rust source) that exist in the repository. pub fn get_isle_compilations( @@ -61,6 +95,7 @@ pub fn get_isle_compilations( items: vec![ // The mid-end optimization rules. IsleCompilation { + name: "opt".to_string(), output: gen_dir.join("isle_opt.rs"), inputs: vec![ prelude_isle.clone(), @@ -81,6 +116,7 @@ pub fn get_isle_compilations( }, // The x86-64 instruction selector. IsleCompilation { + name: "x64".to_string(), output: gen_dir.join("isle_x64.rs"), inputs: vec![ prelude_isle.clone(), @@ -92,6 +128,7 @@ pub fn get_isle_compilations( }, // The aarch64 instruction selector. IsleCompilation { + name: "aarch64".to_string(), output: gen_dir.join("isle_aarch64.rs"), inputs: vec![ prelude_isle.clone(), @@ -105,6 +142,7 @@ pub fn get_isle_compilations( }, // The s390x instruction selector. IsleCompilation { + name: "s390x".to_string(), output: gen_dir.join("isle_s390x.rs"), inputs: vec![ prelude_isle.clone(), @@ -116,6 +154,7 @@ pub fn get_isle_compilations( }, // The risc-v instruction selector. IsleCompilation { + name: "riscv64".to_string(), output: gen_dir.join("isle_riscv64.rs"), inputs: vec![ prelude_isle.clone(), @@ -128,6 +167,7 @@ pub fn get_isle_compilations( }, // The Pulley instruction selector. IsleCompilation { + name: "pulley".to_string(), output: gen_dir.join("isle_pulley_shared.rs"), inputs: vec![ prelude_isle.clone(), diff --git a/cranelift/codegen/src/inst_specs.isle b/cranelift/codegen/src/inst_specs.isle new file mode 100644 index 000000000000..83f703e4510c --- /dev/null +++ b/cranelift/codegen/src/inst_specs.isle @@ -0,0 +1,242 @@ +(model Imm64 (type (bv 64))) + +(model IntCC (enum + (Equal #x00) + (NotEqual #x01) + (SignedGreaterThan #x02) + (SignedGreaterThanOrEqual #x03) + (SignedLessThan #x04) + (SignedLessThanOrEqual #x05) + (UnsignedGreaterThan #x06) + (UnsignedGreaterThanOrEqual #x07) + (UnsignedLessThan #x08) + (UnsignedLessThanOrEqual #x09))) + +(spec (smin x y) + (provide (= result (if (bvsle x y) x y)))) +(instantiate smin bv_binary_8_to_64) + +(spec (umin x y) + (provide (= result (if (bvule x y) x y)))) +(instantiate umin bv_binary_8_to_64) + +(spec (smax x y) + (provide (= result (if (bvsge x y) x y)))) +(instantiate smax bv_binary_8_to_64) + +(spec (umax x y) + (provide (= result (if (bvuge x y) x y)))) +(instantiate umax bv_binary_8_to_64) + +(spec (iconst arg) + (provide (= arg (zero_ext 64 result)))) +(instantiate iconst + ((args (bv 64)) (ret (bv 8)) (canon (bv 8))) + ((args (bv 64)) (ret (bv 16)) (canon (bv 16))) + ((args (bv 64)) (ret (bv 32)) (canon (bv 32))) + ((args (bv 64)) (ret (bv 64)) (canon (bv 64))) +) + +(spec (bitselect c x y) + (provide (= result (bvor (bvand c x) (bvand (bvnot c) y))))) +(instantiate bitselect bv_ternary_8_to_64) + +(spec (icmp c x y) + (provide + (= result + (switch c + ((IntCC.Equal) (if (= x y) #x01 #x00)) + ((IntCC.NotEqual) (if (not (= x y)) #x01 #x00)) + ((IntCC.SignedGreaterThan) (if (bvsgt x y) #x01 #x00)) + ((IntCC.SignedGreaterThanOrEqual) (if (bvsge x y) #x01 #x00)) + ((IntCC.SignedLessThan) (if (bvslt x y) #x01 #x00)) + ((IntCC.SignedLessThanOrEqual) (if (bvsle x y) #x01 #x00)) + ((IntCC.UnsignedGreaterThan) (if (bvugt x y) #x01 #x00)) + ((IntCC.UnsignedGreaterThanOrEqual) (if (bvuge x y) #x01 #x00)) + ((IntCC.UnsignedLessThan) (if (bvult x y) #x01 #x00)) + ((IntCC.UnsignedLessThanOrEqual) (if (bvule x y) #x01 #x00))))) + (require + ;; AVH TODO: if we understand enums semantically, we can generate this + (or + (= c (IntCC.Equal)) + (= c (IntCC.NotEqual)) + (= c (IntCC.UnsignedGreaterThanOrEqual)) + (= c (IntCC.UnsignedGreaterThan)) + (= c (IntCC.UnsignedLessThanOrEqual)) + (= c (IntCC.UnsignedLessThan)) + (= c (IntCC.SignedGreaterThanOrEqual)) + (= c (IntCC.SignedGreaterThan)) + (= c (IntCC.SignedLessThanOrEqual)) + (= c (IntCC.SignedLessThan))))) +(instantiate icmp + ((args (bv 8) (bv 8) (bv 8)) (ret (bv 8)) (canon (bv 8))) + ((args (bv 8) (bv 16) (bv 16)) (ret (bv 8)) (canon (bv 16))) + ((args (bv 8) (bv 32) (bv 32)) (ret (bv 8)) (canon (bv 32))) + ((args (bv 8) (bv 64) (bv 64)) (ret (bv 8)) (canon (bv 64))) +) + +(spec (iadd x y) + (provide (= result (bvadd x y)))) +(instantiate iadd bv_binary_8_to_64) + +(spec (isub x y) + (provide (= result (bvsub x y)))) +(instantiate isub bv_binary_8_to_64) + +(spec (ineg x) + (provide (= result (bvneg x)))) +(instantiate ineg bv_unary_8_to_64) + +(spec (iabs x) + (provide (= result + (if (bvsge x (conv_to (widthof x) #x0000000000000000)) + x + (bvneg x))))) +(instantiate iabs bv_unary_8_to_64) + +(spec (imul x y) + (provide (= result (bvmul x y)))) +(instantiate imul bv_binary_8_to_64) + +(spec (udiv x y) + (provide (= result (bvudiv x y))) + (require (not (= y (zero_ext (widthof y) #b0))))) +(instantiate udiv bv_binary_8_to_64) + +(spec (sdiv x y) + (provide (= result (bvsdiv x y))) + (require (not (= y (zero_ext (widthof y) #b0))))) +(instantiate sdiv bv_binary_8_to_64) + +(spec (urem x y) + (provide (= result (bvurem x y))) + (require (not (= y (zero_ext (widthof y) #b0))))) +(instantiate urem bv_binary_8_to_64) + +(spec (srem x y) + (provide (= result (bvsrem x y))) + (require (not (= y (zero_ext (widthof y) #b0))))) +(instantiate srem bv_binary_8_to_64) + +(spec (imul_imm x y) + (provide (= result (bvmul (sign_ext 64 x) y)))) + +(spec (band x y) + (provide (= result (bvand x y)))) +(instantiate band bv_binary_8_to_64) + +(spec (bor x y) + (provide (= result (bvor x y)))) +(instantiate bor bv_binary_8_to_64) + +(spec (bxor x y) + (provide (= result (bvxor x y)))) +(instantiate bxor bv_binary_8_to_64) + +(spec (bnot x) + (provide (= result (bvnot x))) + (require (or (= (widthof x) 8) (= (widthof x) 16) (= (widthof x) 32) (= (widthof x) 64)))) +(instantiate bnot bv_unary_8_to_64) + +(spec (band_not x y) + (provide (= result (bvand x (bvnot y))))) +(instantiate band_not bv_binary_8_to_64) + +(spec (rotl x y) + (provide (= result (rotl x y)))) +(instantiate rotl bv_binary_8_to_64) + +(spec (rotr x y) + (provide (= result (rotr x y)))) +(instantiate rotr bv_binary_8_to_64) + +;; fn shift_mask(&mut self, ty: Type) -> ImmLogic { +;; let mask = (ty.lane_bits() - 1) as u64; +;; ImmLogic::maybe_from_u64(mask, I32).unwrap() +;; } +(spec (ishl x y) + (provide + (= result + (bvshl x + (bvand (conv_to (widthof y) (bvsub (int2bv 64 (widthof y)) + #x0000000000000001)) + y))))) +(instantiate ishl bv_binary_8_to_64) + +(spec (ushr x y) + (provide + (= result + (bvlshr x + (bvand (conv_to (widthof y) (bvsub (int2bv 64 (widthof y)) + #x0000000000000001)) + y))))) +(instantiate ushr bv_binary_8_to_64) + +(spec (sshr x y) + (provide + (= result + (bvashr x + (bvand (conv_to (widthof y) (bvsub (int2bv 64 (widthof y)) + #x0000000000000001)) + y))))) +(instantiate sshr bv_binary_8_to_64) + +(spec (clz x) + (provide (= result (clz x)))) +(instantiate clz bv_unary_8_to_64) + +(spec (cls x) + (provide (= result (cls x)))) +(instantiate cls bv_unary_8_to_64) + +(spec (ctz x) + (provide (= result (clz (rev x))))) +(instantiate ctz bv_unary_8_to_64) + +(spec (popcnt x) + (provide (= result (popcnt x)))) +(instantiate popcnt bv_unary_8_to_64) + +(form extend + ((args (bv 8)) (ret (bv 8)) (canon (bv 8))) + ((args (bv 8)) (ret (bv 16)) (canon (bv 8))) + ((args (bv 8)) (ret (bv 32)) (canon (bv 8))) + ((args (bv 8)) (ret (bv 64)) (canon (bv 8))) + ((args (bv 16)) (ret (bv 16)) (canon (bv 16))) + ((args (bv 16)) (ret (bv 32)) (canon (bv 16))) + ((args (bv 16)) (ret (bv 64)) (canon (bv 16))) + ((args (bv 32)) (ret (bv 32)) (canon (bv 32))) + ((args (bv 32)) (ret (bv 64)) (canon (bv 32))) + ((args (bv 64)) (ret (bv 64)) (canon (bv 64))) +) + +(spec (uextend x) + (provide (= result (zero_ext (widthof result) x)))) +(instantiate uextend extend) + +(spec (sextend x) + (provide (= result (sign_ext (widthof result) x)))) +(instantiate sextend extend) + + +(form load + ((args (bv 16) (bv 64) (bv 32)) (ret (bv 8)) (canon (bv 8))) + ((args (bv 16) (bv 64) (bv 32)) (ret (bv 16)) (canon (bv 16))) + ((args (bv 16) (bv 64) (bv 32)) (ret (bv 32)) (canon (bv 32))) + ((args (bv 16) (bv 64) (bv 32)) (ret (bv 64)) (canon (bv 64))) +) +(spec (load flags val offset) + (provide + (= result (load_effect flags (widthof result) (bvadd val (sign_ext 64 offset)))))) +(instantiate load load) + +(form store + ((args (bv 16) (bv 8) (bv 64) (bv 32)) (ret Unit) (canon (bv 8))) + ((args (bv 16) (bv 16) (bv 64) (bv 32)) (ret Unit) (canon (bv 16))) + ((args (bv 16) (bv 32) (bv 64) (bv 32)) (ret Unit) (canon (bv 32))) + ((args (bv 16) (bv 64) (bv 64) (bv 32)) (ret Unit) (canon (bv 64))) +) +(spec (store flags val_to_store addr offset) + (provide + (= result (store_effect flags (widthof val_to_store) val_to_store (bvadd (zero_ext 64 addr) (sign_ext 64 offset)))))) +(instantiate store store) diff --git a/cranelift/codegen/src/isa/aarch64/inst.isle b/cranelift/codegen/src/isa/aarch64/inst.isle index 0b17c14a6181..e5cd7d58e753 100644 --- a/cranelift/codegen/src/isa/aarch64/inst.isle +++ b/cranelift/codegen/src/isa/aarch64/inst.isle @@ -995,6 +995,23 @@ (end Reg) (step Imm12)))) +(model ALUOp (enum + (Add #x00) ;; 0 + (Sub #x01) + (Orr #x02) + (OrrNot #x03) + (And #x04) + (AndNot #x05) + (Eor #x06) + (EorNot #x07) + (SubS #x08) + (SDiv #x09) + (UDiv #x0a) + (RotR #x0b) + (Lsr #x0c) + (Asr #x0d) + (Lsl #x0e))) + ;; An ALU operation. This can be paired with several instruction formats ;; below (see `Inst`) in any combination. (type ALUOp @@ -1054,10 +1071,15 @@ )) (type UImm5 (primitive UImm5)) +(model Imm12 (type (bv 24))) (type Imm12 (primitive Imm12)) +(model ImmLogic (type (bv 64))) (type ImmLogic (primitive ImmLogic)) +(model ImmShift (type (bv 6))) (type ImmShift (primitive ImmShift)) +(model ShiftOpAndAmt (type (bv 16))) (type ShiftOpAndAmt (primitive ShiftOpAndAmt)) +(model MoveWideConst (type (bv 16))) (type MoveWideConst (primitive MoveWideConst)) (type NZCV (primitive NZCV)) (type ASIMDFPModImm (primitive ASIMDFPModImm)) @@ -1074,6 +1096,17 @@ (type CodeOffset (primitive CodeOffset)) (type VecMachLabel extern (enum)) +(model ExtendOp (enum + (UXTB #b000) + (UXTH #b001) + (UXTW #b010) + (UXTX #b011) + (SXTB #b100) + (SXTH #b101) + (SXTW #b110) + (SXTX #b111) +)) + (type ExtendOp extern (enum (UXTB) @@ -1223,6 +1256,10 @@ (type FPUOpRI extern (enum)) (type FPUOpRIMod extern (enum)) +(model OperandSize + (enum (Size32 32) + (Size64 64))) + (type OperandSize extern (enum Size32 Size64)) @@ -1230,9 +1267,27 @@ (type TestBitAndBranchKind (enum (Z) (NZ))) ;; Helper for calculating the `OperandSize` corresponding to a type +(spec (operand_size ty) + (provide + (= result (if (<= ty 32) 32 64))) + (require + (or (= ty 8) (= ty 16) (= ty 32) (= ty 64)))) +(instantiate operand_size + ((args Int) (ret Int) (canon (bv 8))) + ((args Int) (ret Int) (canon (bv 16))) + ((args Int) (ret Int) (canon (bv 32))) + ((args Int) (ret Int) (canon (bv 64))) +) (decl operand_size (Type) OperandSize) -(rule 1 (operand_size (fits_in_32 _ty)) (OperandSize.Size32)) -(rule (operand_size (fits_in_64 _ty)) (OperandSize.Size64)) +(rule operand_size_32 1 (operand_size (fits_in_32 _ty)) (OperandSize.Size32)) +(rule operand_size_64 (operand_size (fits_in_64 _ty)) (OperandSize.Size64)) + +(model ScalarSize + (enum (Size8 8) + (Size16 16) + (Size32 32) + (Size64 64) + (Size128 128))) (type ScalarSize extern (enum Size8 @@ -1274,6 +1329,12 @@ (rule (vector_lane_size (VectorSize.Size32x2)) (ScalarSize.Size32)) (rule (vector_lane_size (VectorSize.Size64x2)) (ScalarSize.Size64)) +(model Cond + (enum (Lo #x03) + (Hi #x08) + (Lt #x0b) + (Gt #x0c))) + (type Cond extern (enum (Eq) @@ -1294,6 +1355,16 @@ (Nv) )) +(model VectorSize + (enum + (Size8x8 #x00) + (Size8x16 #x01) + (Size16x4 #x02) + (Size16x8 #x03) + (Size32x2 #x04) + (Size32x4 #x05) + (Size64x2 #x06))) + (type VectorSize extern (enum (Size8x8) @@ -1740,21 +1811,40 @@ (decl pure partial imm_logic_from_imm64 (Type Imm64) ImmLogic) (extern constructor imm_logic_from_imm64 imm_logic_from_imm64) +(spec (imm_shift_from_imm64 ty x) + (provide (= result (extract 5 0 (bvand x (bvsub (int2bv 64 ty) #x0000000000000001))))) + (require (bvult (bvand x (bvsub (int2bv 64 ty) #x0000000000000001)) #x0000000000000040))) + (decl pure partial imm_shift_from_imm64 (Type Imm64) ImmShift) (extern constructor imm_shift_from_imm64 imm_shift_from_imm64) (decl imm_shift_from_u8 (u8) ImmShift) (extern constructor imm_shift_from_u8 imm_shift_from_u8) +(spec (imm12_from_u64 imm12) + (provide (= result (zero_ext 64 imm12))) + (require + ; REVIEW(mbm): correct formulation of imm12? + (or + (= imm12 (bvand imm12 #x000fff)) + (= imm12 (bvand imm12 #xfff000)) + ) + ) +) (decl imm12_from_u64 (Imm12) u64) (extern extractor imm12_from_u64 imm12_from_u64) (decl u8_into_uimm5 (u8) UImm5) (extern constructor u8_into_uimm5 u8_into_uimm5) +(spec (u8_into_imm12 arg) + (provide (= result (zero_ext 24 arg)))) (decl u8_into_imm12 (u8) Imm12) (extern constructor u8_into_imm12 u8_into_imm12) +(spec (u64_into_imm_logic ty a) + (provide (= result a)) + (require (or (= ty 32) (= ty 64)))) (decl u64_into_imm_logic (Type u64) ImmLogic) (extern constructor u64_into_imm_logic u64_into_imm_logic) @@ -1794,6 +1884,9 @@ (decl pure partial lshr_from_u64 (Type u64) ShiftOpAndAmt) (extern constructor lshr_from_u64 lshr_from_u64) +(spec (lshl_from_imm64 ty a) + (provide (= result (concat #x0e (extract 7 0 a)))) + (require (= (extract 63 8 a) #b00000000000000000000000000000000000000000000000000000000))) (decl pure partial lshl_from_imm64 (Type Imm64) ShiftOpAndAmt) (extern constructor lshl_from_imm64 lshl_from_imm64) @@ -1816,21 +1909,71 @@ (extern constructor is_zero_uimm12 is_zero_uimm12) ;; Helper to go directly from a `Value`, when it's an `iconst`, to an `Imm12`. +; REVIEW(mbm): is imm12_from_value spec correct? +; NOTE(mbm): compare with https://github.com/avanhatt/wasmtime/blob/94ccb9d4d55a479893cb04bc796ec620ed24cee2/cranelift/codegen/src/isa/aarch64/inst.isle#L1867-L1874 +(spec (imm12_from_value imm12) + (provide + ; REVIEW(mbm): zero_ext vs conv_to? + (= result (conv_to (widthof result) (zero_ext 64 imm12))) + (= imm12 (conv_to (widthof imm12) (zero_ext 64 result))) + ) + (require + ; REVIEW(mbm): correct formulation of imm12? + (or + (= imm12 (bvand imm12 #x000fff)) + (= imm12 (bvand imm12 #xfff000)) + ) + ) +) (decl imm12_from_value (Imm12) Value) (extractor (imm12_from_value n) (iconst (u64_from_imm64 (imm12_from_u64 n)))) - ;; Conceptually the same as `imm12_from_value`, but tries negating the constant ;; value (first sign-extending to handle narrow widths). +(spec (imm12_from_negated_value arg) + (provide + (= (bvneg (sign_ext 64 arg)) (zero_ext 64 result)) + ) + (require + ; REVIEW(mbm): correct formulation of imm12? + (or + (= result (bvand result #x000fff)) + (= result (bvand result #xfff000)) + ) + ) +) + +(instantiate imm12_from_negated_value + ((args (bv 8)) (ret (bv 24)) (canon (bv 8))) + ((args (bv 16)) (ret (bv 24)) (canon (bv 16))) + ((args (bv 32)) (ret (bv 24)) (canon (bv 32))) + ((args (bv 64)) (ret (bv 24)) (canon (bv 64))) +) (decl pure partial imm12_from_negated_value (Value) Imm12) -(rule +(rule imm12_from_negated_value (imm12_from_negated_value (has_type ty (iconst n))) (if-let (imm12_from_u64 imm) (i64_as_u64 (i64_neg (i64_sextend_imm64 ty n)))) imm) ;; Helper type to represent a value and an extend operation fused together. +(model ExtendedValue (type (bv 67))) (type ExtendedValue extern (enum)) +;; Only including the i8 to i32 opcodes, based on the impl of extended_value_from_value +(spec (extended_value_from_value x) + (provide + (switch (extract 66 64 x) + ((ExtendOp.UXTB) (= (extract 63 0 x) (zero_ext 64 (extract 7 0 (zero_ext 64 result))))) + ((ExtendOp.UXTH) (= (extract 63 0 x) (zero_ext 64 (extract 15 0 (zero_ext 64 result))))) + ((ExtendOp.UXTW) (= (extract 63 0 x) (zero_ext 64 (extract 31 0 (zero_ext 64 result))))) + ((ExtendOp.SXTB) (= (extract 63 0 x) (sign_ext 64 (extract 7 0 (zero_ext 64 result))))) + ((ExtendOp.SXTH) (= (extract 63 0 x) (sign_ext 64 (extract 15 0 (zero_ext 64 result))))) + ((ExtendOp.SXTW) (= (extract 63 0 x) (sign_ext 64 (extract 31 0 (zero_ext 64 result))))))) + (require + (bvult (extract 66 64 x) #b110) + (not (= (extract 66 64 x) #b011)) + (= result (conv_to (widthof result) x)) + (or (= 8 (widthof result)) (= 16 (widthof result)) (= 32 (widthof result))))) (decl extended_value_from_value (ExtendedValue) Value) (extern extractor extended_value_from_value extended_value_from_value) @@ -1855,6 +1998,7 @@ ;; Instruction creation helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Helper for creating the zero register. +(spec (zero_reg) (provide (= result #x0000000000000000))) (decl zero_reg () Reg) (extern constructor zero_reg zero_reg) @@ -1906,6 +2050,24 @@ dst)) ;; Helper for emitting `MInst.AluRRImmShift` instructions. +(spec (alu_rr_imm_shift op t a b) + (provide + (= result (switch op + ((ALUOp.Lsr) + (if (<= t 32) + (conv_to 64 (bvlshr (extract 31 0 a) (bvand (bvsub (int2bv 32 32) #x00000001) (extract 31 0 (zero_ext 64 b))))) + (bvlshr a (bvand (bvsub (int2bv 64 64) #x0000000000000001) (zero_ext 64 b))))) + ((ALUOp.Asr) + (if (<= t 32) + (conv_to 64 (bvashr (extract 31 0 a) (bvand (bvsub (int2bv 32 32) #x00000001) (extract 31 0 (zero_ext 64 b))))) + (bvashr a (bvand (bvsub (int2bv 64 64) #x0000000000000001) (zero_ext 64 b))))) + ((ALUOp.Lsl) + (if (<= t 32) + (conv_to 64 (bvshl (extract 31 0 a) (bvand (bvsub (int2bv 32 32) #x00000001) (extract 31 0 (zero_ext 64 b))))) + (bvshl a (bvand (bvsub (int2bv 64 64) #x0000000000000001) (zero_ext 64 b)))))))) + (require + (or (= op (ALUOp.Lsr)) (= op (ALUOp.Asr)) (= op (ALUOp.Lsl))) + (or (= t 8) (= t 16) (= t 32) (= t 64)))) (decl alu_rr_imm_shift (ALUOp Type Reg ImmShift) Reg) (rule (alu_rr_imm_shift op ty src imm) (let ((dst WritableReg (temp_writable_reg $I64)) @@ -1913,6 +2075,24 @@ dst)) ;; Helper for emitting `MInst.AluRRR` instructions. +(spec (alu_rrr op t a b) + (provide + (= result (switch op + ((ALUOp.Lsr) + (if (<= t 32) + (conv_to 64 (bvlshr (extract 31 0 a) (bvand (bvsub (int2bv 32 32) #x00000001) (extract 31 0 b)))) + (bvlshr a (bvand (bvsub (int2bv 64 64) #x0000000000000001) b)))) + ((ALUOp.Asr) + (if (<= t 32) + (conv_to 64 (bvashr (extract 31 0 a) (bvand (bvsub (int2bv 32 32) #x00000001) (extract 31 0 b)))) + (bvashr a (bvand (bvsub (int2bv 64 64) #x0000000000000001) b)))) + ((ALUOp.Lsl) + (if (<= t 32) + (conv_to 64 (bvshl (extract 31 0 a) (bvand (bvsub (int2bv 32 32) #x00000001) (extract 31 0 b)))) + (bvshl a (bvand (bvsub (int2bv 64 64) #x0000000000000001) b))))))) + (require + (or (= op (ALUOp.Lsr)) (= op (ALUOp.Asr)) (= op (ALUOp.Lsl))) + (or (= t 8) (= t 16) (= t 32) (= t 64)))) (decl alu_rrr (ALUOp Type Reg Reg) Reg) (rule (alu_rrr op ty src1 src2) (let ((dst WritableReg (temp_writable_reg $I64)) @@ -2132,12 +2312,19 @@ (MInst.AluRRImm12 (ALUOp.AddS) size (writable_zero_reg) src1 src2))) +(spec (cmp ty x y) + (provide (= result (subs ty x y))) + (require + (or (= ty 32) (= ty 64)))) (decl cmp (OperandSize Reg Reg) ProducesFlags) (rule (cmp size src1 src2) (ProducesFlags.ProducesFlagsSideEffect (MInst.AluRRR (ALUOp.SubS) size (writable_zero_reg) src1 src2))) +(spec (cmp_imm ty x y) + (provide (= result (subs ty x (zero_ext 64 y)))) + (require (or (= ty 32) (= ty 64)))) (decl cmp_imm (OperandSize Reg Imm12) ProducesFlags) (rule (cmp_imm size src1 src2) (ProducesFlags.ProducesFlagsSideEffect @@ -2148,6 +2335,20 @@ (rule (cmp64_imm src1 src2) (cmp_imm (OperandSize.Size64) src1 src2)) +(spec (cmp_extend ty x y extend) + (provide + (= result + (subs ty x + (switch extend + ((ExtendOp.UXTB) (zero_ext 64 (extract 7 0 y))) + ((ExtendOp.UXTH) (zero_ext 64 (extract 15 0 y))) + ((ExtendOp.UXTW) (zero_ext 64 (extract 31 0 y))) + ((ExtendOp.UXTX) (zero_ext 64 (extract 63 0 y))) + ((ExtendOp.SXTB) (sign_ext 64 (extract 7 0 y))) + ((ExtendOp.SXTH) (sign_ext 64 (extract 15 0 y))) + ((ExtendOp.SXTW) (sign_ext 64 (extract 31 0 y))) + ((ExtendOp.SXTX) (sign_ext 64 (extract 63 0 y))))))) + (require (or (= ty 32) (= ty 64)))) (decl cmp_extend (OperandSize Reg Reg ExtendOp) ProducesFlags) (rule (cmp_extend size src1 src2 extend) (ProducesFlags.ProducesFlagsSideEffect @@ -2295,6 +2496,8 @@ dst)) ;; Helper for emitting `MInst.MovToFpu` instructions. +(spec (mov_to_fpu x s) + (provide (= result (zero_ext 64 (conv_to s x))))) (decl mov_to_fpu (Reg ScalarSize) Reg) (rule (mov_to_fpu x size) (let ((dst WritableReg (temp_writable_reg $I8X16)) @@ -2326,6 +2529,30 @@ dst)) ;; Helper for emitting `MInst.MovFromVec` instructions. +(spec (mov_from_vec x i s) + (provide + (= result + (switch s + (8 + (switch i + (#x00 (zero_ext 64 (extract 7 0 x))) + (#x01 (zero_ext 64 (extract 15 8 x))) + (#x02 (zero_ext 64 (extract 23 16 x))) + (#x03 (zero_ext 64 (extract 31 24 x))) + (#x04 (zero_ext 64 (extract 39 32 x))) + (#x05 (zero_ext 64 (extract 47 40 x))) + (#x06 (zero_ext 64 (extract 55 48 x))) + (#x07 (zero_ext 64 (extract 63 56 x))))) + (16 + (switch i + (#x00 (zero_ext 64 (extract 15 0 x))) + (#x01 (zero_ext 64 (extract 31 16 x))) + (#x03 (zero_ext 64 (extract 47 32 x))) + (#x04 (zero_ext 64 (extract 63 48 x))))) + (32 + (switch i + (#x00 (zero_ext 64 (extract 31 0 x))) + (#x01 (zero_ext 64 (extract 63 32 x))))))))) (decl mov_from_vec (Reg u8 ScalarSize) Reg) (rule (mov_from_vec rn idx size) (let ((dst WritableReg (temp_writable_reg $I64)) @@ -2346,6 +2573,11 @@ dst)) ;; Helper for emitting `MInst.Extend` instructions. +(spec (extend a b c d) + (provide + (if b + (= result (sign_ext (bv2int d) (conv_to (bv2int c) a))) + (= result (zero_ext (bv2int d) (conv_to (bv2int c) a)))))) (decl extend (Reg bool u8 u8) Reg) (rule (extend rn signed from_bits to_bits) (let ((dst WritableReg (temp_writable_reg $I64)) @@ -2458,19 +2690,71 @@ (value_reg dst)))) ;; Helpers for generating `add` instructions. - +(spec (add ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvadd (extract 31 0 a) (extract 31 0 b))) + (bvadd a b))))) (decl add (Type Reg Reg) Reg) (rule (add ty x y) (alu_rrr (ALUOp.Add) ty x y)) +(spec (add_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvadd (extract 31 0 a) (zero_ext 32 b))) + (bvadd a (zero_ext 64 b))))) + (require + (or + (= b (bvand b #x000fff)) + (= b (bvand b #xfff000))))) (decl add_imm (Type Reg Imm12) Reg) (rule (add_imm ty x y) (alu_rr_imm12 (ALUOp.Add) ty x y)) +(spec (add_extend ty x y) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvadd (extract 31 0 x) + (switch (extract 66 64 y) + ((ExtendOp.UXTB) (zero_ext 32 (extract 7 0 y))) + ((ExtendOp.UXTH) (zero_ext 32 (extract 15 0 y))) + ((ExtendOp.UXTW) (zero_ext 32 (extract 31 0 y))) + ((ExtendOp.UXTX) (zero_ext 32 (extract 31 0 y))) + ((ExtendOp.SXTB) (sign_ext 32 (extract 7 0 y))) + ((ExtendOp.SXTH) (sign_ext 32 (extract 15 0 y))) + ((ExtendOp.SXTW) (sign_ext 32 (extract 31 0 y))) + ((ExtendOp.SXTX) (sign_ext 32 (extract 31 0 y)))))) + (bvadd x + (switch (extract 66 64 y) + ((ExtendOp.UXTB) (zero_ext 64 (extract 7 0 y))) + ((ExtendOp.UXTH) (zero_ext 64 (extract 15 0 y))) + ((ExtendOp.UXTW) (zero_ext 64 (extract 31 0 y))) + ((ExtendOp.UXTX) (zero_ext 64 (extract 63 0 y))) + ((ExtendOp.SXTB) (sign_ext 64 (extract 7 0 y))) + ((ExtendOp.SXTH) (sign_ext 64 (extract 15 0 y))) + ((ExtendOp.SXTW) (sign_ext 64 (extract 31 0 y))) + ((ExtendOp.SXTX) (sign_ext 64 (extract 63 0 y))))))))) (decl add_extend (Type Reg ExtendedValue) Reg) (rule (add_extend ty x y) (alu_rr_extend_reg (ALUOp.Add) ty x y)) (decl add_extend_op (Type Reg Reg ExtendOp) Reg) (rule (add_extend_op ty x y extend) (alu_rrr_extend (ALUOp.Add) ty x y extend)) +(spec (add_shift ty a b shift) + (provide + (= result (if (<= ty 32) + (conv_to 64 (bvadd (extract 31 0 a) + (switch (extract 15 8 shift) + ((ALUOp.Lsl) (bvshl (extract 31 0 b) (zero_ext 32 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Lsr) (bvlshr (extract 31 0 b) (zero_ext 32 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Asr) (bvashr (extract 31 0 b) (zero_ext 32 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift)))))))) + (bvadd a + (switch (extract 15 8 shift) + ((ALUOp.Lsl) (bvshl b (zero_ext 64 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Lsr) (bvlshr b (zero_ext 64 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Asr) (bvashr b (zero_ext 64 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))))))))) (decl add_shift (Type Reg Reg ShiftOpAndAmt) Reg) (rule (add_shift ty x y z) (alu_rrr_shift (ALUOp.Add) ty x y z)) @@ -2478,16 +2762,66 @@ (rule (add_vec x y size) (vec_rrr (VecALUOp.Add) x y size)) ;; Helpers for generating `sub` instructions. - +(spec (sub ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 a) (extract 31 0 b))) + (bvsub a b))))) (decl sub (Type Reg Reg) Reg) (rule (sub ty x y) (alu_rrr (ALUOp.Sub) ty x y)) +(spec (sub_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 a) (zero_ext 32 b))) + (bvsub a (zero_ext 64 b))))) + (require + (or + (= b (bvand b #x000fff)) + (= b (bvand b #xfff000))))) (decl sub_imm (Type Reg Imm12) Reg) (rule (sub_imm ty x y) (alu_rr_imm12 (ALUOp.Sub) ty x y)) +(spec (sub_extend ty x y) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 x) + (switch (extract 66 64 y) + ((ExtendOp.UXTB) (zero_ext 32 (extract 7 0 y))) + ((ExtendOp.UXTH) (zero_ext 32 (extract 15 0 y))) + ((ExtendOp.UXTW) (zero_ext 32 (extract 31 0 y))) + ((ExtendOp.UXTX) (zero_ext 32 (extract 31 0 y))) + ((ExtendOp.SXTB) (sign_ext 32 (extract 7 0 y))) + ((ExtendOp.SXTH) (sign_ext 32 (extract 15 0 y))) + ((ExtendOp.SXTW) (sign_ext 32 (extract 31 0 y))) + ((ExtendOp.SXTX) (sign_ext 32 (extract 31 0 y)))))) + (bvsub x + (switch (extract 66 64 y) + ((ExtendOp.UXTB) (zero_ext 64 (extract 7 0 y))) + ((ExtendOp.UXTH) (zero_ext 64 (extract 15 0 y))) + ((ExtendOp.UXTW) (zero_ext 64 (extract 31 0 y))) + ((ExtendOp.UXTX) (zero_ext 64 (extract 63 0 y))) + ((ExtendOp.SXTB) (sign_ext 64 (extract 7 0 y))) + ((ExtendOp.SXTH) (sign_ext 64 (extract 15 0 y))) + ((ExtendOp.SXTW) (sign_ext 64 (extract 31 0 y))) + ((ExtendOp.SXTX) (sign_ext 64 (extract 63 0 y))))))))) (decl sub_extend (Type Reg ExtendedValue) Reg) (rule (sub_extend ty x y) (alu_rr_extend_reg (ALUOp.Sub) ty x y)) +(spec (sub_shift ty a b shift) + (provide + (= result (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 a) (switch (extract 15 8 shift) + ((ALUOp.Lsl) (bvshl (extract 31 0 b) (zero_ext 32 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Lsr) (bvlshr (extract 31 0 b) (zero_ext 32 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Asr) (bvashr (extract 31 0 b) (zero_ext 32 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift)))))))) + (bvsub a (switch (extract 15 8 shift) + ((ALUOp.Lsl) (bvshl b (zero_ext 64 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Lsr) (bvlshr b (zero_ext 64 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Asr) (bvashr b (zero_ext 64 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))))))))) (decl sub_shift (Type Reg Reg ShiftOpAndAmt) Reg) (rule (sub_shift ty x y z) (alu_rrr_shift (ALUOp.Sub) ty x y z)) @@ -2513,12 +2847,22 @@ (sbc_paired $I64 x_hi y_hi)))) ;; Helpers for generating `madd` instructions. - +(spec (madd ty a b c) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvadd (extract 31 0 c) (bvmul (extract 31 0 a) (extract 31 0 b)))) + (bvadd c (bvmul a b)))))) (decl madd (Type Reg Reg Reg) Reg) (rule (madd ty x y z) (alu_rrrr (ALUOp3.MAdd) ty x y z)) ;; Helpers for generating `msub` instructions. - +(spec (msub ty a b c) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 c) (bvmul (extract 31 0 a) (extract 31 0 b)))) + (bvsub c (bvmul a b)))))) (decl msub (Type Reg Reg Reg) Reg) (rule (msub ty x y z) (alu_rrrr (ALUOp3.MSub) ty x y z)) @@ -2622,6 +2966,28 @@ (SideEffectNoResult.Inst (MInst.Brk))) ;; Helper for generating `addp` instructions. +(spec (addp x y s) + (provide + (= result + (switch s + (#x00 (concat + (bvadd (extract 55 48 x) (extract 63 56 x)) + (bvadd (extract 39 32 x) (extract 47 40 x)) + (bvadd (extract 23 16 x) (extract 31 24 x)) + (bvadd (extract 7 0 x) (extract 15 8 x)) + (bvadd (extract 55 48 y) (extract 63 56 y)) + (bvadd (extract 39 32 y) (extract 47 40 y)) + (bvadd (extract 23 16 y) (extract 31 24 y)) + (bvadd (extract 7 0 y) (extract 15 8 y)))) + (#x01 (concat + (bvadd (extract 47 32 x) (extract 63 48 x)) + (bvadd (extract 15 0 x) (extract 31 16 x)) + (bvadd (extract 47 32 y) (extract 63 48 y)) + (bvadd (extract 15 0 y) (extract 31 16 y)))) + (#x02 (concat + (bvadd (extract 31 0 x) (extract 63 32 x)) + (bvadd (extract 31 0 y) (extract 63 32 y))))))) + (require (or (= s #x00) (= s #x01) (= s #x02)))) (decl addp (Reg Reg VectorSize) Reg) (rule (addp x y size) (vec_rrr (VecALUOp.Addp) x y size)) @@ -2635,12 +3001,44 @@ ;; Helper for generating instruction sequences to calculate a scalar absolute ;; value. +(spec (abs s x) + (provide + (= result + (if (= s 32) + (conv_to 64 + (if (bvsge (extract 31 0 x) #x00000000) + (extract 31 0 x) + (bvneg (extract 31 0 x)))) + (if (bvsge x #x0000000000000000) x (bvneg x))))) + (require (or (= s 32) (= s 64)))) (decl abs (OperandSize Reg) Reg) (rule (abs size x) (value_regs_get (with_flags (cmp_imm size x (u8_into_imm12 0)) (csneg (Cond.Gt) x x)) 0)) ;; Helper for generating `addv` instructions. +(spec (addv x s) + (provide + (= result + (switch s + (#x00 (zero_ext 64 + (bvadd (extract 7 0 x) + (bvadd (extract 15 8 x) + (bvadd (extract 23 16 x) + (bvadd (extract 31 24 x) + (bvadd (extract 39 32 x) + (bvadd (extract 47 40 x) + (bvadd (extract 55 48 x) + (extract 63 56 x)))))))))) + (#x01 (zero_ext 64 + (bvadd (extract 15 0 x) + (bvadd (extract 31 16 x) + (bvadd (extract 47 32 x) + (extract 63 48 x)))))) + (#x02 (zero_ext 64 + (bvadd (extract 31 0 x) + (extract 63 32 x))))))) + (require (or (= s #x00) (or (= s #x01) (= s #x02))))) (decl addv (Reg VectorSize) Reg) (rule (addv x size) (vec_lanes (VecLanesOp.Addv) x size)) @@ -2698,24 +3096,60 @@ (rule (asr_imm ty x imm) (alu_rr_imm_shift (ALUOp.Asr) ty x imm)) ;; Helper for generating `lsr` instructions. +(spec (lsr ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvlshr (extract 31 0 a) (extract 31 0 b)))) + (64 (bvlshr a b)))))) (decl lsr (Type Reg Reg) Reg) (rule (lsr ty x y) (alu_rrr (ALUOp.Lsr) ty x y)) +(spec (lsr_imm ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvlshr (extract 31 0 a) (zero_ext 32 b)))) + (64 (bvlshr a (zero_ext 64 b))))))) (decl lsr_imm (Type Reg ImmShift) Reg) (rule (lsr_imm ty x imm) (alu_rr_imm_shift (ALUOp.Lsr) ty x imm)) ;; Helper for generating `lsl` instructions. +(spec (lsl ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvshl (extract 31 0 a) (extract 31 0 b)))) + (64 (bvshl a b)))))) (decl lsl (Type Reg Reg) Reg) (rule (lsl ty x y) (alu_rrr (ALUOp.Lsl) ty x y)) +(spec (lsl_imm ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvshl (extract 31 0 a) (zero_ext 32 b)))) + (64 (bvshl a (zero_ext 64 b))))))) (decl lsl_imm (Type Reg ImmShift) Reg) (rule (lsl_imm ty x imm) (alu_rr_imm_shift (ALUOp.Lsl) ty x imm)) ;; Helper for generating `udiv` instructions. +(spec (a64_udiv ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvudiv (extract 31 0 a) (extract 31 0 b))) + (bvudiv a b))))) (decl a64_udiv (Type Reg Reg) Reg) (rule (a64_udiv ty x y) (alu_rrr (ALUOp.UDiv) ty x y)) ;; Helper for generating `sdiv` instructions. +(spec (a64_sdiv ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsdiv (extract 31 0 a) (extract 31 0 b))) + (bvsdiv a b))))) (decl a64_sdiv (Type Reg Reg) Reg) (rule (a64_sdiv ty x y) (alu_rrr (ALUOp.SDiv) ty x y)) @@ -2724,18 +3158,44 @@ (rule (not x size) (vec_misc (VecMisc2.Not) x size)) ;; Helpers for generating `orr_not` instructions. - +(spec (orr_not ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvor (extract 31 0 a) (bvnot (extract 31 0 b)))) + (bvor a (bvnot b)))))) (decl orr_not (Type Reg Reg) Reg) (rule (orr_not ty x y) (alu_rrr (ALUOp.OrrNot) ty x y)) +(spec (orr_not_shift ty a b shift) + (provide + (= result (if (<= ty 32) + (conv_to 64 (bvor a (bvnot (bvshl b (zero_ext 64 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))))) + (bvor a (bvnot (bvshl b (zero_ext 64 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift)))))))))) (decl orr_not_shift (Type Reg Reg ShiftOpAndAmt) Reg) (rule (orr_not_shift ty x y shift) (alu_rrr_shift (ALUOp.OrrNot) ty x y shift)) ;; Helpers for generating `orr` instructions. - +(spec (orr ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvor (extract 31 0 a) (extract 31 0 b))) + (bvor a b)))) + (require (or (= ty 8) (= ty 16) (= ty 32) (= ty 64)))) (decl orr (Type Reg Reg) Reg) (rule (orr ty x y) (alu_rrr (ALUOp.Orr) ty x y)) +(spec (orr_imm ty x y) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvor (extract 31 0 x) (extract 31 0 y)))) + (64 (bvor x (zero_ext 64 y)))))) + (require + (or + (= y (bvand y #x0000000000000fff)) + (= y (bvand y #x0000000000fff000))))) (decl orr_imm (Type Reg ImmLogic) Reg) (rule (orr_imm ty x y) (alu_rr_imm_logic (ALUOp.Orr) ty x y)) @@ -2746,10 +3206,26 @@ (rule (orr_vec x y size) (vec_rrr (VecALUOp.Orr) x y size)) ;; Helpers for generating `and` instructions. - +(spec (and_reg ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvand (extract 31 0 a) (extract 31 0 b))) + (bvand a b)))) + (require (or (= ty 8) (= ty 16) (= ty 32) (= ty 64)))) (decl and_reg (Type Reg Reg) Reg) (rule (and_reg ty x y) (alu_rrr (ALUOp.And) ty x y)) +(spec (and_imm ty x y) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvand (extract 31 0 x) (extract 31 0 y)))) + (64 (bvand x (zero_ext 64 y)))))) + (require + (or + (= y (bvand y #x0000000000000fff)) + (= y (bvand y #x0000000000fff000))))) (decl and_imm (Type Reg ImmLogic) Reg) (rule (and_imm ty x y) (alu_rr_imm_logic (ALUOp.And) ty x y)) @@ -2764,7 +3240,15 @@ (rule (eor_vec x y size) (vec_rrr (VecALUOp.Eor) x y size)) ;; Helpers for generating `bic` instructions. - +(spec (bic ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvand (extract 31 0 a) (bvnot (extract 31 0 b)))) + (bvand a (bvnot b)) + ) + )) + (require (or (= ty 8) (= ty 16) (= ty 32) (= ty 64)))) (decl bic (Type Reg Reg) Reg) (rule (bic ty x y) (alu_rrr (ALUOp.AndNot) ty x y)) @@ -2792,25 +3276,56 @@ (rule (sshr_vec_imm x amt size) (vec_shift_imm (VecShiftImmOp.Sshr) amt x size)) ;; Helpers for generating `rotr` instructions. - +(spec (a64_rotr ty x y) + (provide + (= result + (if (= ty 32) + (zero_ext 64 (rotr (extract 31 0 x) (extract 31 0 y))) + (rotr x y)))) + (require (or (= ty 32) (= ty 64)))) (decl a64_rotr (Type Reg Reg) Reg) (rule (a64_rotr ty x y) (alu_rrr (ALUOp.RotR) ty x y)) +(spec (a64_rotr_imm ty x y) + (provide + (= result + (if (= ty 32) + (zero_ext 64 (rotr (extract 31 0 x) (zero_ext 32 y))) + (rotr x (zero_ext 64 y))))) + (require (or (= ty 32) (= ty 64)))) (decl a64_rotr_imm (Type Reg ImmShift) Reg) (rule (a64_rotr_imm ty x y) (alu_rr_imm_shift (ALUOp.RotR) ty x y)) ;; Helpers for generating `rbit` instructions. - +(spec (rbit ty a) + (provide + (= result + (if (= ty 32) + (conv_to 64 (rev (extract 31 0 a))) + (rev a)))) + (require (or (= ty 32) (= ty 64)))) (decl rbit (Type Reg) Reg) (rule (rbit ty x) (bit_rr (BitOp.RBit) ty x)) ;; Helpers for generating `clz` instructions. - +(spec (a64_clz ty a) + (provide + (= result + (if (= ty 32) + (conv_to 64 (clz (extract 31 0 a))) + (clz a)))) + (require (or (= ty 32) (= ty 64)))) (decl a64_clz (Type Reg) Reg) (rule (a64_clz ty x) (bit_rr (BitOp.Clz) ty x)) ;; Helpers for generating `cls` instructions. - +(spec (a64_cls ty a) + (provide + (= result + (if (= ty 32) + (conv_to 64 (cls (extract 31 0 a))) + (cls a)))) + (require (or (= ty 32) (= ty 64)))) (decl a64_cls (Type Reg) Reg) (rule (a64_cls ty x) (bit_rr (BitOp.Cls) ty x)) @@ -2831,7 +3346,24 @@ (rule (eon ty x y) (alu_rrr (ALUOp.EorNot) ty x y)) ;; Helpers for generating `cnt` instructions. - +(spec (vec_cnt x s) + (provide + (= result + (switch s + ((VectorSize.Size8x8) + (concat + (popcnt (extract 63 56 x)) + (popcnt (extract 55 48 x)) + (popcnt (extract 47 40 x)) + (popcnt (extract 39 32 x)) + (popcnt (extract 31 24 x)) + (popcnt (extract 23 16 x)) + (popcnt (extract 15 8 x)) + (popcnt (extract 7 0 x)))) + ((VectorSize.Size16x4) result) + ((VectorSize.Size32x2) result)))) + (require + (or (= s (VectorSize.Size8x8)) (= s (VectorSize.Size16x4)) (= s (VectorSize.Size32x2))))) (decl vec_cnt (Reg VectorSize) Reg) (rule (vec_cnt x size) (vec_misc (VecMisc2.Cnt) x size)) @@ -2850,6 +3382,9 @@ ;; Helpers for generating various load instructions, with varying ;; widths and sign/zero-extending properties. (decl aarch64_uload8 (AMode MemFlags) Reg) +(spec (aarch64_uload8 amode flags) + (provide (= result (zero_ext 32 (load_effect flags 8 amode)))) + (require (= 32 (widthof result)))) (rule (aarch64_uload8 amode flags) (let ((dst WritableReg (temp_writable_reg $I64)) (_ Unit (emit (MInst.ULoad8 dst amode flags)))) @@ -2860,6 +3395,9 @@ (_ Unit (emit (MInst.SLoad8 dst amode flags)))) dst)) (decl aarch64_uload16 (AMode MemFlags) Reg) +(spec (aarch64_uload16 amode flags) + (provide (= result (zero_ext 32 (load_effect flags 16 amode)))) + (require (= 32 (widthof result)))) (rule (aarch64_uload16 amode flags) (let ((dst WritableReg (temp_writable_reg $I64)) (_ Unit (emit (MInst.ULoad16 dst amode flags)))) @@ -2870,6 +3408,9 @@ (_ Unit (emit (MInst.SLoad16 dst amode flags)))) dst)) (decl aarch64_uload32 (AMode MemFlags) Reg) +(spec (aarch64_uload32 amode flags) + (provide (= result (load_effect flags 32 amode))) + (require (= 32 (widthof result)))) (rule (aarch64_uload32 amode flags) (let ((dst WritableReg (temp_writable_reg $I64)) (_ Unit (emit (MInst.ULoad32 dst amode flags)))) @@ -2880,6 +3421,9 @@ (_ Unit (emit (MInst.SLoad32 dst amode flags)))) dst)) (decl aarch64_uload64 (AMode MemFlags) Reg) +(spec (aarch64_uload64 amode flags) + (provide (= result (load_effect flags 64 amode))) + (require (= 64 (widthof result)))) (rule (aarch64_uload64 amode flags) (let ((dst WritableReg (temp_writable_reg $I64)) (_ Unit (emit (MInst.ULoad64 dst amode flags)))) @@ -2914,15 +3458,23 @@ ;; Helpers for generating various store instructions with varying ;; widths. (decl aarch64_store8 (AMode MemFlags Reg) SideEffectNoResult) +(spec (aarch64_store8 amode flags val) + (provide (= result (store_effect flags 8 (extract 7 0 val) amode)))) (rule (aarch64_store8 amode flags val) (SideEffectNoResult.Inst (MInst.Store8 val amode flags))) (decl aarch64_store16 (AMode MemFlags Reg) SideEffectNoResult) +(spec (aarch64_store16 amode flags val) + (provide (= result (store_effect flags 16 (extract 15 0 val) amode)))) (rule (aarch64_store16 amode flags val) (SideEffectNoResult.Inst (MInst.Store16 val amode flags))) (decl aarch64_store32 (AMode MemFlags Reg) SideEffectNoResult) +(spec (aarch64_store32 amode flags val) + (provide (= result (store_effect flags 32 (extract 31 0 val) amode)))) (rule (aarch64_store32 amode flags val) (SideEffectNoResult.Inst (MInst.Store32 val amode flags))) (decl aarch64_store64 (AMode MemFlags Reg) SideEffectNoResult) +(spec (aarch64_store64 amode flags val) + (provide (= result (store_effect flags 64 val amode)))) (rule (aarch64_store64 amode flags val) (SideEffectNoResult.Inst (MInst.Store64 val amode flags))) (decl aarch64_fpustore16 (AMode MemFlags Reg) SideEffectNoResult) @@ -2982,6 +3534,10 @@ ;; Immediate value helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Type of extension performed by an immediate helper +(model ImmExtend + (enum + (Sign #b0) + (Zero #b1))) (type ImmExtend (enum (Sign) @@ -2997,6 +3553,21 @@ ;; Note that, unlike the convention in the AArch64 backend, this helper leaves ;; all bits in the destination register in a defined state, i.e. smaller types ;; such as `I8` are either sign- or zero-extended. +(spec (imm ty ext x) + (provide + (= result + (switch ty + (8 (if (= ext #b1) (zero_ext 64 (extract 7 0 x)) (sign_ext 64 (extract 7 0 x)))) + (16 (if (= ext #b1) (zero_ext 64 (extract 15 0 x)) (sign_ext 64 (extract 15 0 x)))) + (32 (if (= ext #b1) (zero_ext 64 (extract 32 0 x)) (sign_ext 64 (extract 32 0 x)))) + (64 x)))) + (require (or (= ty 8) (= ty 16) (= ty 32) (= ty 64)))) +(instantiate imm + ((args Int (bv 64)) (ret (bv 64)) (canon (bv 8))) + ((args Int (bv 64)) (ret (bv 64)) (canon (bv 16))) + ((args Int (bv 64)) (ret (bv 64)) (canon (bv 32))) + ((args Int (bv 64)) (ret (bv 64)) (canon (bv 64))) +) (decl imm (Type ImmExtend u64) Reg) ;; Move wide immediate instructions; to simplify, we only match when we @@ -3031,6 +3602,12 @@ ;; Sign extension helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Place a `Value` into a register, sign extending it to 32-bits +(spec (put_in_reg_sext32 arg) + (provide + (= result + (if (<= (widthof arg) 32) + (conv_to 64 (sign_ext 32 arg)) + (conv_to 64 arg))))) (decl put_in_reg_sext32 (Value) Reg) (rule -1 (put_in_reg_sext32 val @ (value_type (fits_in_32 ty))) (extend val $true (ty_bits ty) 32)) @@ -3040,6 +3617,12 @@ (rule (put_in_reg_sext32 val @ (value_type $I64)) val) ;; Place a `Value` into a register, zero extending it to 32-bits +(spec (put_in_reg_zext32 arg) + (provide + (= result + (if (<= (widthof arg) 32) + (conv_to 64 (zero_ext 32 arg)) + (conv_to 64 arg))))) (decl put_in_reg_zext32 (Value) Reg) (rule -1 (put_in_reg_zext32 val @ (value_type (fits_in_32 ty))) (extend val $false (ty_bits ty) 32)) @@ -3049,6 +3632,8 @@ (rule (put_in_reg_zext32 val @ (value_type $I64)) val) ;; Place a `Value` into a register, sign extending it to 64-bits +(spec (put_in_reg_sext64 x) + (provide (= (sign_ext 64 x) result))) (decl put_in_reg_sext64 (Value) Reg) (rule 1 (put_in_reg_sext64 val @ (value_type (fits_in_32 ty))) (extend val $true (ty_bits ty) 64)) @@ -3057,6 +3642,8 @@ (rule (put_in_reg_sext64 val @ (value_type $I64)) val) ;; Place a `Value` into a register, zero extending it to 64-bits +(spec (put_in_reg_zext64 x) + (provide (= result (zero_ext 64 x)))) (decl put_in_reg_zext64 (Value) Reg) (rule 1 (put_in_reg_zext64 val @ (value_type (fits_in_32 ty))) (extend val $false (ty_bits ty) 64)) @@ -3078,6 +3665,13 @@ ;; Check for signed overflow. The only case is min_value / -1. ;; The following checks must be done in 32-bit or 64-bit, depending ;; on the input type. +(spec (trap_if_div_overflow ty x y) + (provide (= x result) + (if (= ty 32) + (not (and (= #x00000000 (extract 31 0 y)) + (= #x80000000 (extract 31 0 y)))) + (not (and (= #x0000000000000000 y) + (= #x8000000000000000 y)))))) (decl trap_if_div_overflow (Type Reg Reg) Reg) (rule (trap_if_div_overflow ty x y) (let ( @@ -3112,6 +3706,14 @@ ;; Helper for generating either an `AluRRR`, `AluRRRShift`, or `AluRRImmLogic` ;; instruction depending on the input. Note that this requires that the `ALUOp` ;; specified is commutative. +(spec (alu_rs_imm_logic_commutative op t a b) + (provide + (= result + (conv_to 64 + (switch op + ((ALUOp.Orr) (bvor a b)) + ((ALUOp.And) (bvand a b)) + ((ALUOp.Eor) (bvxor a b))))))) (decl alu_rs_imm_logic_commutative (ALUOp Type Value Value) Reg) ;; Base case of operating on registers. @@ -3136,6 +3738,14 @@ ;; Same as `alu_rs_imm_logic_commutative` above, except that it doesn't require ;; that the operation is commutative. +(spec (alu_rs_imm_logic op t a b) + (provide + (= result + (conv_to 64 + (switch op + ((ALUOp.OrrNot) (bvor a (bvnot b))) + ((ALUOp.EorNot) (bvxor a (bvnot b))) + ((ALUOp.AndNot) (bvand a (bvnot b)))))))) (decl alu_rs_imm_logic (ALUOp Type Value Value) Reg) (rule -1 (alu_rs_imm_logic op ty x y) (alu_rrr op ty x y)) @@ -3193,6 +3803,10 @@ ;; mid-end optimizations that fold constants into load/store immediate offsets ;; instead, but for now each backend needs to do this. (decl amode (Type Value i32) AMode) +(spec (amode ty val offset) + (provide (= result (bvadd val (sign_ext 64 offset)))) + (require (= 64 (widthof val)))) + (rule 0 (amode ty val offset) (amode_no_more_iconst ty val offset)) (rule 1 (amode ty (iadd x (i32_from_iconst y)) offset) @@ -3436,6 +4050,7 @@ (extern constructor fp_cond_code fp_cond_code) ;; Lower an integer cond code. +(spec (cond_code a) (provide (= a result))) (decl cond_code (IntCC) Cond) ;; TODO: Port lower_condcode() to ISLE. (extern constructor cond_code cond_code) @@ -3897,10 +4512,51 @@ (type FlagsAndCC (enum (FlagsAndCC (flags ProducesFlags) (cc IntCC)))) +(spec (flags_and_cc flags cc) + (provide + (= result (concat (extract 67 64 flags) cc))) + (require + (or + (= cc (IntCC.Equal)) + (= cc (IntCC.NotEqual)) + (= cc (IntCC.UnsignedGreaterThanOrEqual)) + (= cc (IntCC.UnsignedGreaterThan)) + (= cc (IntCC.UnsignedLessThanOrEqual)) + (= cc (IntCC.UnsignedLessThan)) + (= cc (IntCC.SignedGreaterThanOrEqual)) + (= cc (IntCC.SignedGreaterThan)) + (= cc (IntCC.SignedLessThanOrEqual)) + (= cc (IntCC.SignedLessThan))))) ;; Helper constructor for `FlagsAndCC`. (decl flags_and_cc (ProducesFlags IntCC) FlagsAndCC) (rule (flags_and_cc flags cc) (FlagsAndCC.FlagsAndCC flags cc)) +(spec (flags_and_cc_to_bool a) + (provide + (= result + (switch (extract 7 0 a) + ((IntCC.Equal) (if (= (extract 10 10 a) #b1) #x01 #x00)) + ((IntCC.NotEqual) (if (= (extract 10 10 a) #b0) #x01 #x00)) + ((IntCC.SignedGreaterThan) (if (and (= (extract 10 10 a) #b0) (= (extract 11 11 a) (extract 8 8 a))) #x01 #x00)) + ((IntCC.SignedGreaterThanOrEqual) (if (= (extract 11 11 a) (extract 8 8 a)) #x01 #x00)) + ((IntCC.SignedLessThan) (if (not (= (extract 11 11 a) (extract 8 8 a))) #x01 #x00)) + ((IntCC.SignedLessThanOrEqual) (if (or (= (extract 10 10 a) #b1) (not (= (extract 11 11 a) (extract 8 8 a)))) #x01 #x00)) + ((IntCC.UnsignedGreaterThan) (if (and (= (extract 9 9 a) #b1) (= (extract 10 10 a) #b0)) #x01 #x00)) + ((IntCC.UnsignedGreaterThanOrEqual) (if (= (extract 9 9 a) #b1) #x01 #x00)) + ((IntCC.UnsignedLessThan) (if (= (extract 9 9 a) #b0) #x01 #x00)) + ((IntCC.UnsignedLessThanOrEqual) (if (or (= (extract 9 9 a) #b0) (= (extract 10 10 a) #b1)) #x01 #x00))))) + (require + (or + (= (extract 7 0 a) (IntCC.Equal)) + (= (extract 7 0 a) (IntCC.NotEqual)) + (= (extract 7 0 a) (IntCC.UnsignedGreaterThanOrEqual)) + (= (extract 7 0 a) (IntCC.UnsignedGreaterThan)) + (= (extract 7 0 a) (IntCC.UnsignedLessThanOrEqual)) + (= (extract 7 0 a) (IntCC.UnsignedLessThan)) + (= (extract 7 0 a) (IntCC.SignedGreaterThanOrEqual)) + (= (extract 7 0 a) (IntCC.SignedGreaterThan)) + (= (extract 7 0 a) (IntCC.SignedLessThanOrEqual)) + (= (extract 7 0 a) (IntCC.SignedLessThan))))) ;; Materialize a `FlagsAndCC` into a boolean `ValueRegs`. (decl flags_and_cc_to_bool (FlagsAndCC) ValueRegs) (rule (flags_and_cc_to_bool (FlagsAndCC.FlagsAndCC flags cc)) @@ -3917,9 +4573,125 @@ ;; Helpers for lowering `icmp` sequences. ;; `lower_icmp` contains shared functionality for lowering `icmp` ;; sequences, which `lower_icmp_into_{reg,flags}` extend from. +(spec (lower_icmp c x y in_ty) + (provide + (= result + (concat + (extract 67 64 + (if (or (= c (IntCC.SignedGreaterThanOrEqual)) + (= c (IntCC.SignedGreaterThan)) + (= c (IntCC.SignedLessThanOrEqual)) + (= c (IntCC.SignedLessThan))) + (if (<= in_ty 32) + (subs 32 (sign_ext 64 x) (sign_ext 64 y)) + (subs 64 (sign_ext 64 x) (sign_ext 64 y))) + (if (<= in_ty 32) + (subs 32 (zero_ext 64 x) (zero_ext 64 y)) + (subs 64 (zero_ext 64 x) (zero_ext 64 y))))) + c))) + (require + (or + (= c (IntCC.Equal)) + (= c (IntCC.NotEqual)) + (= c (IntCC.UnsignedGreaterThanOrEqual)) + (= c (IntCC.UnsignedGreaterThan)) + (= c (IntCC.UnsignedLessThanOrEqual)) + (= c (IntCC.UnsignedLessThan)) + (= c (IntCC.SignedGreaterThanOrEqual)) + (= c (IntCC.SignedGreaterThan)) + (= c (IntCC.SignedLessThanOrEqual)) + (= c (IntCC.SignedLessThan))) + (or (= in_ty 8) + (= in_ty 16) + (= in_ty 32) + (= in_ty 64)) + (= in_ty (widthof x)) + (= in_ty (widthof y)))) +(instantiate lower_icmp + ((args (bv 8) (bv 8) (bv 8) Int) (ret (bv 12)) (canon (bv 8))) + ((args (bv 8) (bv 16) (bv 16) Int) (ret (bv 12)) (canon (bv 16))) + ((args (bv 8) (bv 32) (bv 32) Int) (ret (bv 12)) (canon (bv 32))) + ((args (bv 8) (bv 64) (bv 64) Int) (ret (bv 12)) (canon (bv 64))) +) (decl lower_icmp (IntCC Value Value Type) FlagsAndCC) + +(spec (lower_icmp_into_reg c x y in_ty out_ty) + (provide + (= result + (switch c + ((IntCC.Equal) (if (= x y) #x01 #x00)) + ((IntCC.NotEqual) (if (not (= x y)) #x01 #x00)) + ((IntCC.SignedGreaterThan) (if (bvsgt x y) #x01 #x00)) + ((IntCC.SignedGreaterThanOrEqual) (if (bvsge x y) #x01 #x00)) + ((IntCC.SignedLessThan) (if (bvslt x y) #x01 #x00)) + ((IntCC.SignedLessThanOrEqual) (if (bvsle x y) #x01 #x00)) + ((IntCC.UnsignedGreaterThan) (if (bvugt x y) #x01 #x00)) + ((IntCC.UnsignedGreaterThanOrEqual) (if (bvuge x y) #x01 #x00)) + ((IntCC.UnsignedLessThan) (if (bvult x y) #x01 #x00)) + ((IntCC.UnsignedLessThanOrEqual) (if (bvule x y) #x01 #x00))))) + (require + (or + (= c (IntCC.Equal)) + (= c (IntCC.NotEqual)) + (= c (IntCC.UnsignedGreaterThanOrEqual)) + (= c (IntCC.UnsignedGreaterThan)) + (= c (IntCC.UnsignedLessThanOrEqual)) + (= c (IntCC.UnsignedLessThan)) + (= c (IntCC.SignedGreaterThanOrEqual)) + (= c (IntCC.SignedGreaterThan)) + (= c (IntCC.SignedLessThanOrEqual)) + (= c (IntCC.SignedLessThan))) + (or (= in_ty 8) + (= in_ty 16) + (= in_ty 32) + (= in_ty 64)) + (= in_ty (widthof x)) + (= in_ty (widthof y)) + (= out_ty 8))) +(instantiate lower_icmp_into_reg + ((args (bv 8) (bv 8) (bv 8) Int Int) (ret (bv 8)) (canon (bv 8))) + ((args (bv 8) (bv 16) (bv 16) Int Int) (ret (bv 8)) (canon (bv 16))) + ((args (bv 8) (bv 32) (bv 32) Int Int) (ret (bv 8)) (canon (bv 32))) + ((args (bv 8) (bv 64) (bv 64) Int Int) (ret (bv 8)) (canon (bv 64))) +) (decl lower_icmp_into_reg (IntCC Value Value Type Type) ValueRegs) (decl lower_icmp_into_flags (IntCC Value Value Type) FlagsAndCC) + +(spec (lower_icmp_const c x y in_ty) + (provide + (= result + (concat (extract 67 64 + (if (or (= c (IntCC.SignedGreaterThanOrEqual)) + (= c (IntCC.SignedGreaterThan)) + (= c (IntCC.SignedLessThanOrEqual)) + (= c (IntCC.SignedLessThan))) + (if (<= in_ty 32) + (subs 32 (sign_ext 64 x) y) + (subs 64 (sign_ext 64 x) y)) + (if (<= in_ty 32) + (subs 32 (zero_ext 64 x) y) + (subs 64 (zero_ext 64 x) y)))) + c))) + (require + (or + (= c (IntCC.Equal)) + (= c (IntCC.NotEqual)) + (= c (IntCC.UnsignedGreaterThanOrEqual)) + (= c (IntCC.UnsignedGreaterThan)) + (= c (IntCC.UnsignedLessThanOrEqual)) + (= c (IntCC.UnsignedLessThan)) + (= c (IntCC.SignedGreaterThanOrEqual)) + (= c (IntCC.SignedGreaterThan)) + (= c (IntCC.SignedLessThanOrEqual)) + (= c (IntCC.SignedLessThan))) + (or (= in_ty 32) (= in_ty 64)) + (= in_ty (widthof x)))) +(instantiate lower_icmp_const + ((args (bv 8) (bv 8) (bv 64) Int) (ret (bv 12)) (canon (bv 8))) + ((args (bv 8) (bv 16) (bv 64) Int) (ret (bv 12)) (canon (bv 16))) + ((args (bv 8) (bv 32) (bv 64) Int) (ret (bv 12)) (canon (bv 32))) + ((args (bv 8) (bv 64) (bv 64) Int) (ret (bv 12)) (canon (bv 64))) +) (decl lower_icmp_const (IntCC Value u64 Type) FlagsAndCC) ;; For most cases, `lower_icmp_into_flags` is the same as `lower_icmp`, ;; except for some I128 cases (see below). @@ -3934,6 +4706,15 @@ (vec_cmp rn rm in_ty cond))) ;; Determines the appropriate extend op given the value type and the given ArgumentExtension. +(spec (lower_extend_op ty b) + (provide + (= result + (switch ty + (8 (switch b ((ArgumentExtension.Sext) (ExtendOp.SXTB)) + ((ArgumentExtension.Uext) (ExtendOp.UXTB)))) + (16 (switch b ((ArgumentExtension.Sext) (ExtendOp.SXTH)) + ((ArgumentExtension.Uext) (ExtendOp.UXTH))))))) + (require (or (= ty 8) (= ty 16) (= ty 32) (= ty 64)))) (decl lower_extend_op (Type ArgumentExtension) ExtendOp) (rule (lower_extend_op $I8 (ArgumentExtension.Sext)) (ExtendOp.SXTB)) (rule (lower_extend_op $I16 (ArgumentExtension.Sext)) (ExtendOp.SXTH)) @@ -3941,25 +4722,25 @@ (rule (lower_extend_op $I16 (ArgumentExtension.Uext)) (ExtendOp.UXTH)) ;; Integers <= 64-bits. -(rule -2 (lower_icmp_into_reg cond rn rm in_ty out_ty) +(rule lower_icmp_into_reg_8_16_32_64 -2 (lower_icmp_into_reg cond rn rm in_ty out_ty) (if (ty_int_ref_scalar_64 in_ty)) (let ((cc Cond (cond_code cond))) (flags_and_cc_to_bool (lower_icmp cond rn rm in_ty)))) -(rule 1 (lower_icmp cond rn rm (fits_in_16 ty)) +(rule lower_icmp_8_16_signed 1 (lower_icmp cond rn rm (fits_in_16 ty)) (if (signed_cond_code cond)) (let ((rn Reg (put_in_reg_sext32 rn))) (flags_and_cc (cmp_extend (operand_size ty) rn rm (lower_extend_op ty (ArgumentExtension.Sext))) cond))) -(rule -1 (lower_icmp cond rn (imm12_from_value rm) (fits_in_16 ty)) +(rule lower_icmp_8_16_unsigned_imm -1 (lower_icmp cond rn (imm12_from_value rm) (fits_in_16 ty)) (let ((rn Reg (put_in_reg_zext32 rn))) (flags_and_cc (cmp_imm (operand_size ty) rn rm) cond))) -(rule -2 (lower_icmp cond rn rm (fits_in_16 ty)) +(rule lower_icmp_8_16_unsigned -2 (lower_icmp cond rn rm (fits_in_16 ty)) (let ((rn Reg (put_in_reg_zext32 rn))) (flags_and_cc (cmp_extend (operand_size ty) rn rm (lower_extend_op ty (ArgumentExtension.Uext))) cond))) -(rule -3 (lower_icmp cond rn (u64_from_iconst c) ty) +(rule lower_icmp_32_64_const -3 (lower_icmp cond rn (u64_from_iconst c) ty) (if (ty_int_ref_scalar_64 ty)) (lower_icmp_const cond rn c ty)) -(rule -4 (lower_icmp cond rn rm ty) +(rule lower_icmp_32_64 -4 (lower_icmp cond rn rm ty) (if (ty_int_ref_scalar_64 ty)) (flags_and_cc (cmp (operand_size ty) rn rm) cond)) @@ -3969,21 +4750,22 @@ ;; A >= B + 1 ;; ==> A - 1 >= B ;; ==> A > B -(rule (lower_icmp_const (IntCC.UnsignedGreaterThanOrEqual) a b ty) +(rule lower_icmp_const_32_64_ugte (lower_icmp_const (IntCC.UnsignedGreaterThanOrEqual) a b ty) (if (ty_int_ref_scalar_64 ty)) (if-let $true (u64_is_odd b)) (if-let (imm12_from_u64 imm) (u64_sub b 1)) (flags_and_cc (cmp_imm (operand_size ty) a imm) (IntCC.UnsignedGreaterThan))) -(rule (lower_icmp_const (IntCC.SignedGreaterThanOrEqual) a b ty) + +(rule lower_icmp_const_32_64_sgte (lower_icmp_const (IntCC.SignedGreaterThanOrEqual) a b ty) (if (ty_int_ref_scalar_64 ty)) (if-let $true (u64_is_odd b)) (if-let (imm12_from_u64 imm) (u64_sub b 1)) (flags_and_cc (cmp_imm (operand_size ty) a imm) (IntCC.SignedGreaterThan))) -(rule -1 (lower_icmp_const cond rn (imm12_from_u64 c) ty) +(rule lower_icmp_const_32_64_imm -1 (lower_icmp_const cond rn (imm12_from_u64 c) ty) (if (ty_int_ref_scalar_64 ty)) (flags_and_cc (cmp_imm (operand_size ty) rn c) cond)) -(rule -2 (lower_icmp_const cond rn c ty) +(rule lower_icmp_const_32_64 -2 (lower_icmp_const cond rn c ty) (if (ty_int_ref_scalar_64 ty)) (flags_and_cc (cmp (operand_size ty) rn (imm ty (ImmExtend.Zero) c)) cond)) diff --git a/cranelift/codegen/src/isa/aarch64/lower.isle b/cranelift/codegen/src/isa/aarch64/lower.isle index df01ad0e9b4c..7dc1b4e9fe2d 100644 --- a/cranelift/codegen/src/isa/aarch64/lower.isle +++ b/cranelift/codegen/src/isa/aarch64/lower.isle @@ -2,6 +2,8 @@ ;; The main lowering constructor term: takes a clif `Inst` and returns the ;; register(s) within which the lowered instruction's result values live. +(spec (lower arg) + (provide (= result arg))) (decl partial lower (Inst) InstOutput) ;; Variant of the main lowering constructor term, which receives an @@ -16,7 +18,7 @@ ;;;; Rules for `iconst` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -(rule (lower (has_type ty (iconst (u64_from_imm64 n)))) +(rule iconst (lower (has_type ty (iconst (u64_from_imm64 n)))) (imm ty (ImmExtend.Zero) n)) ;;;; Rules for `f16const` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -49,55 +51,55 @@ ;; `i64` and smaller ;; Base case, simply adding things in registers. -(rule -1 (lower (has_type (fits_in_64 ty) (iadd x y))) +(rule iadd_base_case -1 (lower (has_type (fits_in_64 ty) (iadd x y))) (add ty x y)) ;; Special cases for when one operand is an immediate that fits in 12 bits. -(rule 4 (lower (has_type (fits_in_64 ty) (iadd x (imm12_from_value y)))) +(rule iadd_imm12_right 4 (lower (has_type (fits_in_64 ty) (iadd x (imm12_from_value y)))) (add_imm ty x y)) -(rule 5 (lower (has_type (fits_in_64 ty) (iadd (imm12_from_value x) y))) +(rule iadd_imm12_left 5 (lower (has_type (fits_in_64 ty) (iadd (imm12_from_value x) y))) (add_imm ty y x)) ;; Same as the previous special cases, except we can switch the addition to a ;; subtraction if the negated immediate fits in 12 bits. -(rule 2 (lower (has_type (fits_in_64 ty) (iadd x y))) +(rule iadd_imm12_neg_right 2 (lower (has_type (fits_in_64 ty) (iadd x y))) (if-let imm12_neg (imm12_from_negated_value y)) (sub_imm ty x imm12_neg)) -(rule 3 (lower (has_type (fits_in_64 ty) (iadd x y))) +(rule iadd_imm12_neg_left 3 (lower (has_type (fits_in_64 ty) (iadd x y))) (if-let imm12_neg (imm12_from_negated_value x)) (sub_imm ty y imm12_neg)) ;; Special cases for when we're adding an extended register where the extending ;; operation can get folded into the add itself. -(rule 0 (lower (has_type (fits_in_64 ty) (iadd x (extended_value_from_value y)))) +(rule iadd_extend_right 0 (lower (has_type (fits_in_64 ty) (iadd x (extended_value_from_value y)))) (add_extend ty x y)) -(rule 1 (lower (has_type (fits_in_64 ty) (iadd (extended_value_from_value x) y))) +(rule iadd_extend_left 1 (lower (has_type (fits_in_64 ty) (iadd (extended_value_from_value x) y))) (add_extend ty y x)) ;; Special cases for when we're adding the shift of a different ;; register by a constant amount and the shift can get folded into the add. -(rule 7 (lower (has_type (fits_in_64 ty) +(rule iadd_ishl_right 7 (lower (has_type (fits_in_64 ty) (iadd x (ishl y (iconst k))))) (if-let amt (lshl_from_imm64 ty k)) (add_shift ty x y amt)) -(rule 6 (lower (has_type (fits_in_64 ty) +(rule iadd_ishl_left 6 (lower (has_type (fits_in_64 ty) (iadd (ishl x (iconst k)) y))) (if-let amt (lshl_from_imm64 ty k)) (add_shift ty y x amt)) ;; Fold an `iadd` and `imul` combination into a `madd` instruction. -(rule 7 (lower (has_type (fits_in_64 ty) (iadd x (imul y z)))) +(rule iadd_imul_right 7 (lower (has_type (fits_in_64 ty) (iadd x (imul y z)))) (madd ty y z x)) -(rule 6 (lower (has_type (fits_in_64 ty) (iadd (imul x y) z))) +(rule iadd_imul_left 6 (lower (has_type (fits_in_64 ty) (iadd (imul x y) z))) (madd ty x y z)) ;; Fold an `isub` and `imul` combination into a `msub` instruction. -(rule (lower (has_type (fits_in_64 ty) (isub x (imul y z)))) +(rule isub_imul (lower (has_type (fits_in_64 ty) (isub x (imul y z)))) (msub ty y z x)) ;; vectors @@ -362,10 +364,10 @@ (rule -1 (lower (has_type ty @ (multi_lane _ _) (iabs x))) (vec_abs x (vector_size ty))) -(rule 2 (lower (has_type $I64 (iabs x))) +(rule iabs_64 2 (lower (has_type $I64 (iabs x))) (abs (OperandSize.Size64) x)) -(rule 1 (lower (has_type (fits_in_32 ty) (iabs x))) +(rule iabs_8_16_32 1 (lower (has_type (fits_in_32 ty) (iabs x))) (abs (OperandSize.Size32) (put_in_reg_sext32 x))) ; `rustc` implementation. @@ -734,27 +736,27 @@ ;; `i64` and smaller ;; Base case, simply subtracting things in registers. -(rule -4 (lower (has_type (fits_in_64 ty) (isub x y))) +(rule isub_base_case -4 (lower (has_type (fits_in_64 ty) (isub x y))) (sub ty x y)) ;; Special case for when one operand is an immediate that fits in 12 bits. -(rule 0 (lower (has_type (fits_in_64 ty) (isub x (imm12_from_value y)))) +(rule isub_imm12 0 (lower (has_type (fits_in_64 ty) (isub x (imm12_from_value y)))) (sub_imm ty x y)) ;; Same as the previous special case, except we can switch the subtraction to an ;; addition if the negated immediate fits in 12 bits. -(rule 2 (lower (has_type (fits_in_64 ty) (isub x y))) +(rule isub_imm12_neg 2 (lower (has_type (fits_in_64 ty) (isub x y))) (if-let imm12_neg (imm12_from_negated_value y)) (add_imm ty x imm12_neg)) ;; Special cases for when we're subtracting an extended register where the ;; extending operation can get folded into the sub itself. -(rule 1 (lower (has_type (fits_in_64 ty) (isub x (extended_value_from_value y)))) +(rule isub_extend 1 (lower (has_type (fits_in_64 ty) (isub x (extended_value_from_value y)))) (sub_extend ty x y)) ;; Finally a special case for when we're subtracting the shift of a different ;; register by a constant amount and the shift can get folded into the sub. -(rule -3 (lower (has_type (fits_in_64 ty) +(rule isub_ishl -3 (lower (has_type (fits_in_64 ty) (isub x (ishl y (iconst k))))) (if-let amt (lshl_from_imm64 ty k)) (sub_shift ty x y amt)) @@ -790,7 +792,7 @@ ;;;; Rules for `ineg` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; `i64` and smaller. -(rule 1 (lower (has_type (fits_in_64 ty) (ineg x))) +(rule ineg_base_case 1 (lower (has_type (fits_in_64 ty) (ineg x))) (sub ty (zero_reg) x)) ;; `i128` @@ -804,7 +806,7 @@ ;;;; Rules for `imul` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; `i64` and smaller. -(rule -3 (lower (has_type (fits_in_64 ty) (imul x y))) +(rule imul_base_case -3 (lower (has_type (fits_in_64 ty) (imul x y))) (madd ty x y (zero_reg))) ;; `i128`. @@ -1031,10 +1033,13 @@ ;; ;; Note that aarch64's `udiv` doesn't trap so to respect the semantics of ;; CLIF's `udiv` the check for zero needs to be manually performed. -(rule (lower (has_type (fits_in_64 ty) (udiv x y))) +(rule udiv (lower (has_type (fits_in_64 ty) (udiv x y))) (a64_udiv $I64 (put_in_reg_zext64 x) (put_nonzero_in_reg_zext64 y))) ;; Helper for placing a `Value` into a `Reg` and validating that it's nonzero. +(spec (put_nonzero_in_reg_zext64 x) + (provide (= result (zero_ext 64 x))) + (require (not (= result #x0000000000000000)))) (decl put_nonzero_in_reg_zext64 (Value) Reg) (rule -1 (put_nonzero_in_reg_zext64 val) (trap_if_zero_divisor (put_in_reg_zext64 val))) @@ -1064,7 +1069,7 @@ ;; ;; TODO: if `y` is -1 then a check that `x` is not INT_MIN is all that's ;; necessary, but right now `y` is checked to not be -1 as well. -(rule (lower (has_type (fits_in_64 ty) (sdiv x y))) +(rule sdiv_base_case (lower (has_type (fits_in_64 ty) (sdiv x y))) (let ((x64 Reg (put_in_reg_sext64 x)) (y64 Reg (put_nonzero_in_reg_sext64 y)) (valid_x64 Reg (trap_if_div_overflow ty x64 y64)) @@ -1073,11 +1078,14 @@ ;; Special case for `sdiv` where no checks are needed due to division by a ;; constant meaning the checks are always passed. -(rule 1 (lower (has_type (fits_in_64 ty) (sdiv x (iconst imm)))) +(rule sdiv_safe_divisor 1 (lower (has_type (fits_in_64 ty) (sdiv x (iconst imm)))) (if-let y (safe_divisor_from_imm64 ty imm)) (a64_sdiv $I64 (put_in_reg_sext64 x) (imm ty (ImmExtend.Sign) y))) ;; Helper for placing a `Value` into a `Reg` and validating that it's nonzero. + (spec (put_nonzero_in_reg_sext64 x) + (provide (= (sign_ext 64 x) result)) + (require (not (= #x0000000000000000 result)))) (decl put_nonzero_in_reg_sext64 (Value) Reg) (rule -1 (put_nonzero_in_reg_sext64 val) (trap_if_zero_divisor (put_in_reg_sext64 val))) @@ -1102,14 +1110,14 @@ ;; div rd, x, y ; rd = x / y ;; msub rd, rd, y, x ; rd = x - rd * y -(rule (lower (has_type (fits_in_64 ty) (urem x y))) +(rule urem (lower (has_type (fits_in_64 ty) (urem x y))) (let ((x64 Reg (put_in_reg_zext64 x)) (y64 Reg (put_nonzero_in_reg_zext64 y)) (div Reg (a64_udiv $I64 x64 y64)) (result Reg (msub $I64 div y64 x64))) result)) -(rule (lower (has_type (fits_in_64 ty) (srem x y))) +(rule srem (lower (has_type (fits_in_64 ty) (srem x y))) (let ((x64 Reg (put_in_reg_sext64 x)) (y64 Reg (put_nonzero_in_reg_sext64 y)) (div Reg (a64_sdiv $I64 x64 y64)) @@ -1122,6 +1130,25 @@ ;; cmp $x, $y ;; csel .., $x, $y, $cc + (spec (cmp_and_choose ty cc signed x y) + (provide + (= result + (switch cc + (#x03 (if (bvule x y) x y)) + (#x08 (if (bvuge x y) x y)) + (#x0b (if (bvsle x y) x y)) + (#x0c (if (bvsge x y) x y))))) + (require + (or (= ty 8) + (= ty 16) + (= ty 32) + (= ty 64)) + (or (= cc #x03) + (= cc #x08) + (= cc #x0b) + (= cc #x0c)) + (if signed (or (= cc #x0b) (= cc #x0c)) + (or (= cc #x03) (= cc #x08))))) (decl cmp_and_choose (Type Cond bool Value Value) ValueRegs) (rule (cmp_and_choose (fits_in_64 ty) cc _ x y) (let ((x Reg (put_in_reg x)) @@ -1137,13 +1164,13 @@ (with_flags_reg (cmp (operand_size ty) x y) (csel cc x y)))) -(rule 2 (lower (has_type (and (fits_in_64 ty) (ty_int _)) (umin x y))) +(rule umin 2 (lower (has_type (and (fits_in_64 ty) (ty_int _)) (umin x y))) (cmp_and_choose ty (Cond.Lo) $false x y)) -(rule 2 (lower (has_type (and (fits_in_64 ty) (ty_int _)) (smin x y))) +(rule smin 2 (lower (has_type (and (fits_in_64 ty) (ty_int _)) (smin x y))) (cmp_and_choose ty (Cond.Lt) $true x y)) -(rule 2 (lower (has_type (and (fits_in_64 ty) (ty_int _)) (umax x y))) +(rule umax 2 (lower (has_type (and (fits_in_64 ty) (ty_int _)) (umax x y))) (cmp_and_choose ty (Cond.Hi) $false x y)) -(rule 2 (lower (has_type (and (fits_in_64 ty) (ty_int _)) (smax x y))) +(rule smax 2 (lower (has_type (and (fits_in_64 ty) (ty_int _)) (smax x y))) (cmp_and_choose ty (Cond.Gt) $true x y)) ;; Vector types. @@ -1176,7 +1203,7 @@ ;; General rule for extending input to an output which fits in a single ;; register. -(rule -2 (lower (has_type (fits_in_64 out) (uextend x @ (value_type in)))) +(rule uextend -2 (lower (has_type (fits_in_64 out) (uextend x @ (value_type in)))) (extend x $false (ty_bits in) (ty_bits out))) ;; Extraction of a vector lane automatically extends as necessary, so we can @@ -1220,7 +1247,7 @@ ;; General rule for extending input to an output which fits in a single ;; register. -(rule -4 (lower (has_type (fits_in_64 out) (sextend x @ (value_type in)))) +(rule sextend -4 (lower (has_type (fits_in_64 out) (sextend x @ (value_type in)))) (extend x $true (ty_bits in) (ty_bits out))) ;; Extraction of a vector lane automatically extends as necessary, so we can @@ -1282,12 +1309,12 @@ ;; Note that bitwise negation is implemented here as ;; ;; NOT rd, rm ==> ORR_NOT rd, zero, rm -(rule -1 (lower (has_type (fits_in_64 ty) (bnot x))) +(rule bnot_base_case -1 (lower (has_type (fits_in_64 ty) (bnot x))) (orr_not ty (zero_reg) x)) ;; Special case to use `orr_not_shift` if it's a `bnot` of a const-left-shifted ;; value. -(rule 1 (lower (has_type (fits_in_64 ty) +(rule bnot_ishl 1 (lower (has_type (fits_in_64 ty) (bnot (ishl x (iconst k))))) (if-let amt (lshl_from_imm64 ty k)) (orr_not_shift ty (zero_reg) x amt)) @@ -1312,7 +1339,7 @@ ;;;; Rules for `band` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -(rule -1 (lower (has_type (fits_in_64 ty) (band x y))) +(rule band_fits_in_64 -1 (lower (has_type (fits_in_64 ty) (band x y))) (alu_rs_imm_logic_commutative (ALUOp.And) ty x y)) (rule (lower (has_type $I128 (band x y))) (i128_alu_bitop (ALUOp.And) $I64 x y)) @@ -1323,10 +1350,9 @@ ;; Specialized lowerings for `(band x (bnot y))` which is additionally produced ;; by Cranelift's `band_not` instruction that is legalized into the simpler ;; forms early on. - -(rule 1 (lower (has_type (fits_in_64 ty) (band x (bnot y)))) +(rule band_not_right 1 (lower (has_type (fits_in_64 ty) (band x (bnot y)))) (alu_rs_imm_logic (ALUOp.AndNot) ty x y)) -(rule 2 (lower (has_type (fits_in_64 ty) (band (bnot y) x))) +(rule band_not_left 2 (lower (has_type (fits_in_64 ty) (band (bnot y) x))) (alu_rs_imm_logic (ALUOp.AndNot) ty x y)) (rule 3 (lower (has_type $I128 (band x (bnot y)))) (i128_alu_bitop (ALUOp.AndNot) $I64 x y)) @@ -1339,7 +1365,7 @@ ;;;; Rules for `bor` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -(rule -1 (lower (has_type (fits_in_64 ty) (bor x y))) +(rule bor_fits_in_64 -1 (lower (has_type (fits_in_64 ty) (bor x y))) (alu_rs_imm_logic_commutative (ALUOp.Orr) ty x y)) (rule (lower (has_type $I128 (bor x y))) (i128_alu_bitop (ALUOp.Orr) $I64 x y)) @@ -1350,10 +1376,9 @@ ;; Specialized lowerings for `(bor x (bnot y))` which is additionally produced ;; by Cranelift's `bor_not` instruction that is legalized into the simpler ;; forms early on. - -(rule 1 (lower (has_type (fits_in_64 ty) (bor x (bnot y)))) +(rule bor_not_right 1 (lower (has_type (fits_in_64 ty) (bor x (bnot y)))) (alu_rs_imm_logic (ALUOp.OrrNot) ty x y)) -(rule 2 (lower (has_type (fits_in_64 ty) (bor (bnot y) x))) +(rule bor_not_left 2 (lower (has_type (fits_in_64 ty) (bor (bnot y) x))) (alu_rs_imm_logic (ALUOp.OrrNot) ty x y)) (rule 3 (lower (has_type $I128 (bor x (bnot y)))) (i128_alu_bitop (ALUOp.OrrNot) $I64 x y)) @@ -1361,7 +1386,7 @@ ;;;; Rules for `bxor` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -(rule -1 (lower (has_type (fits_in_64 ty) (bxor x y))) +(rule bxor_fits_in_64 -1 (lower (has_type (fits_in_64 ty) (bxor x y))) (alu_rs_imm_logic_commutative (ALUOp.Eor) ty x y)) (rule (lower (has_type $I128 (bxor x y))) (i128_alu_bitop (ALUOp.Eor) $I64 x y)) @@ -1373,9 +1398,9 @@ ;; by Cranelift's `bxor_not` instruction that is legalized into the simpler ;; forms early on. -(rule 1 (lower (has_type (fits_in_64 ty) (bxor x (bnot y)))) +(rule bxor_not_right 1 (lower (has_type (fits_in_64 ty) (bxor x (bnot y)))) (alu_rs_imm_logic (ALUOp.EorNot) ty x y)) -(rule 2 (lower (has_type (fits_in_64 ty) (bxor (bnot y) x))) +(rule bxor_not_left 2 (lower (has_type (fits_in_64 ty) (bxor (bnot y) x))) (alu_rs_imm_logic (ALUOp.EorNot) ty x y)) (rule 3 (lower (has_type $I128 (bxor x (bnot y)))) (i128_alu_bitop (ALUOp.EorNot) $I64 x y)) @@ -1384,11 +1409,11 @@ ;;;; Rules for `ishl` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Shift for i8/i16/i32. -(rule -1 (lower (has_type (fits_in_32 ty) (ishl x y))) +(rule ishl_fits_in_32 -1 (lower (has_type (fits_in_32 ty) (ishl x y))) (do_shift (ALUOp.Lsl) ty x y)) ;; Shift for i64. -(rule (lower (has_type $I64 (ishl x y))) +(rule ishl_64 (lower (has_type $I64 (ishl x y))) (do_shift (ALUOp.Lsl) $I64 x y)) ;; Shift for i128. @@ -1439,6 +1464,44 @@ ;; ;; Note that this automatically handles the clif semantics of masking the ;; shift amount where necessary. + (spec (do_shift op t a b) + (provide + (= result + (switch op + ((ALUOp.Lsr) (conv_to 64 + (bvlshr (conv_to t a) + (conv_to t (zero_ext 64 + (bvand (conv_to (widthof b) (bvsub (int2bv 64 (widthof b)) #x0000000000000001)) b)))))) + ((ALUOp.Asr) (conv_to 64 + (bvashr (conv_to t a) + (conv_to t (zero_ext 64 + (bvand (conv_to (widthof b) (bvsub (int2bv 64 (widthof b)) #x0000000000000001)) b)))))) + ((ALUOp.Lsl) (conv_to 64 + (bvshl (conv_to t a) + (conv_to t (zero_ext 64 + (bvand (conv_to (widthof b) (bvsub (int2bv 64 (widthof b)) #x0000000000000001)) b))))))))) + (require + (or (= op (ALUOp.Lsr)) (= op (ALUOp.Asr)) (= op (ALUOp.Lsl))) + (= t (widthof b)) + (or (= t 8) (= t 16) (= t 32) (= t 64)) + (switch op + ((ALUOp.Lsr) (switch t + (8 (= (extract 31 0 a) (zero_ext 32 (extract 7 0 a)))) + (16 (= (extract 31 0 a) (zero_ext 32 (extract 15 0 a)))) + (32 $true) + (64 $true))) + ((ALUOp.Asr) (switch t + (8 (= (extract 31 0 a) (sign_ext 32 (extract 7 0 a)))) + (16 (= (extract 31 0 a) (sign_ext 32 (extract 15 0 a)))) + (32 $true) + (64 $true))) + ((ALUOp.Lsl) $true)))) +(instantiate do_shift + ((args (bv 8) Int (bv 64) (bv 8)) (ret (bv 64)) (canon (bv 8))) + ((args (bv 8) Int (bv 64) (bv 16)) (ret (bv 64)) (canon (bv 16))) + ((args (bv 8) Int (bv 64) (bv 32)) (ret (bv 64)) (canon (bv 32))) + ((args (bv 8) Int (bv 64) (bv 64)) (ret (bv 64)) (canon (bv 64))) +) (decl do_shift (ALUOp Type Reg Value) Reg) ;; 8/16-bit shift base case. @@ -1450,17 +1513,19 @@ ;; On i32 and i64 types this matches what the aarch64 spec does, but on smaller ;; types (i16, i8) we need to do this manually, so we wrap the shift amount ;; with an AND instruction -(rule -1 (do_shift op (fits_in_16 ty) x y) +(rule do_shift_fits_in_16 -1 (do_shift op (fits_in_16 ty) x y) (let ((shift_amt Reg (value_regs_get y 0)) (masked_shift_amt Reg (and_imm $I32 shift_amt (shift_mask ty)))) (alu_rrr op $I32 x masked_shift_amt))) + (spec (shift_mask t) + (provide (= (bvsub (int2bv 64 t) #x0000000000000001) result))) (decl shift_mask (Type) ImmLogic) (extern constructor shift_mask shift_mask) ;; 32/64-bit shift base cases. -(rule (do_shift op $I32 x y) (alu_rrr op $I32 x (value_regs_get y 0))) -(rule (do_shift op $I64 x y) (alu_rrr op $I64 x (value_regs_get y 0))) +(rule do_shift_32_base_case (do_shift op $I32 x y) (alu_rrr op $I32 x (value_regs_get y 0))) +(rule do_shift_64_base_case (do_shift op $I64 x y) (alu_rrr op $I64 x (value_regs_get y 0))) ;; Special case for shifting by a constant value where the value can fit into an ;; `ImmShift`. @@ -1468,18 +1533,18 @@ ;; Note that this rule explicitly has a higher priority than the others ;; to ensure it's attempted first, otherwise the type-based filters on the ;; previous rules seem to take priority over this rule. -(rule 1 (do_shift op ty x (iconst k)) +(rule do_shift_imm 1 (do_shift op ty x (iconst k)) (if-let shift (imm_shift_from_imm64 ty k)) (alu_rr_imm_shift op ty x shift)) ;;;; Rules for `ushr` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Shift for i8/i16/i32. -(rule -1 (lower (has_type (fits_in_32 ty) (ushr x y))) +(rule ushr_fits_in_32 -1 (lower (has_type (fits_in_32 ty) (ushr x y))) (do_shift (ALUOp.Lsr) ty (put_in_reg_zext32 x) y)) ;; Shift for i64. -(rule (lower (has_type $I64 (ushr x y))) +(rule ushr_64 (lower (has_type $I64 (ushr x y))) (do_shift (ALUOp.Lsr) $I64 (put_in_reg_zext64 x) y)) ;; Shift for i128. @@ -1532,11 +1597,11 @@ ;;;; Rules for `sshr` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Shift for i8/i16/i32. -(rule -4 (lower (has_type (fits_in_32 ty) (sshr x y))) +(rule sshr_fits_in_32 -4 (lower (has_type (fits_in_32 ty) (sshr x y))) (do_shift (ALUOp.Asr) ty (put_in_reg_sext32 x) y)) ;; Shift for i64. -(rule (lower (has_type $I64 (sshr x y))) +(rule sshr_64 (lower (has_type $I64 (sshr x y))) (do_shift (ALUOp.Asr) $I64 (put_in_reg_sext64 x) y)) ;; Shift for i128. @@ -1592,13 +1657,13 @@ ;;;; Rules for `rotl` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; General 8/16-bit case. -(rule -2 (lower (has_type (fits_in_16 ty) (rotl x y))) +(rule rotl_fits_in_16 -2 (lower (has_type (fits_in_16 ty) (rotl x y))) (let ((amt Reg (value_regs_get y 0)) (neg_shift Reg (sub $I32 (zero_reg) amt))) (small_rotr ty (put_in_reg_zext32 x) neg_shift))) ;; Specialization for the 8/16-bit case when the rotation amount is an immediate. -(rule -1 (lower (has_type (fits_in_16 ty) (rotl x (iconst k)))) +(rule rotl_fits_in_16_imm -1 (lower (has_type (fits_in_16 ty) (rotl x (iconst k)))) (if-let n (imm_shift_from_imm64 ty k)) (small_rotr_imm ty (put_in_reg_zext32 x) (negate_imm_shift ty n))) @@ -1611,27 +1676,36 @@ ;; amount. ;; General 32-bit case. -(rule (lower (has_type $I32 (rotl x y))) +(rule rotl_32_base_case (lower (has_type $I32 (rotl x y))) (let ((amt Reg (value_regs_get y 0)) (neg_shift Reg (sub $I32 (zero_reg) amt))) (a64_rotr $I32 x neg_shift))) ;; General 64-bit case. -(rule (lower (has_type $I64 (rotl x y))) +(rule rotl_64_base_case (lower (has_type $I64 (rotl x y))) (let ((amt Reg (value_regs_get y 0)) (neg_shift Reg (sub $I64 (zero_reg) amt))) (a64_rotr $I64 x neg_shift))) ;; Specialization for the 32-bit case when the rotation amount is an immediate. -(rule 1 (lower (has_type $I32 (rotl x (iconst k)))) +(rule rotl_32_imm 1 (lower (has_type $I32 (rotl x (iconst k)))) (if-let n (imm_shift_from_imm64 $I32 k)) (a64_rotr_imm $I32 x (negate_imm_shift $I32 n))) ;; Specialization for the 64-bit case when the rotation amount is an immediate. -(rule 1 (lower (has_type $I64 (rotl x (iconst k)))) +(rule rotl_64_imm 1 (lower (has_type $I64 (rotl x (iconst k)))) (if-let n (imm_shift_from_imm64 $I64 k)) (a64_rotr_imm $I64 x (negate_imm_shift $I64 n))) +;; fn negate_imm_shift(&mut self, ty: Type, mut imm: ImmShift) -> ImmShift { +;; let size = u8::try_from(ty.bits()).unwrap(); +;; imm.imm = size.wrapping_sub(imm.value()); +;; imm.imm &= size - 1; +;; imm +;; } + (spec (negate_imm_shift ty x) + (provide + (= result (bvand (bvsub (int2bv 6 ty) x) (bvsub (int2bv 6 ty) #b000001))))) (decl negate_imm_shift (Type ImmShift) ImmShift) (extern constructor negate_imm_shift negate_imm_shift) @@ -1651,29 +1725,29 @@ ;;;; Rules for `rotr` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; General 8/16-bit case. -(rule -3 (lower (has_type (fits_in_16 ty) (rotr x y))) +(rule rotr_fits_in_16 -3 (lower (has_type (fits_in_16 ty) (rotr x y))) (small_rotr ty (put_in_reg_zext32 x) (value_regs_get y 0))) ;; General 32-bit case. -(rule -1 (lower (has_type $I32 (rotr x y))) +(rule rotr_32_base_case -1 (lower (has_type $I32 (rotr x y))) (a64_rotr $I32 x (value_regs_get y 0))) ;; General 64-bit case. -(rule -1 (lower (has_type $I64 (rotr x y))) +(rule rotr_64_base_case -1 (lower (has_type $I64 (rotr x y))) (a64_rotr $I64 x (value_regs_get y 0))) ;; Specialization for the 8/16-bit case when the rotation amount is an immediate. -(rule -2 (lower (has_type (fits_in_16 ty) (rotr x (iconst k)))) +(rule rotr_fits_in_16_imm -2 (lower (has_type (fits_in_16 ty) (rotr x (iconst k)))) (if-let n (imm_shift_from_imm64 ty k)) (small_rotr_imm ty (put_in_reg_zext32 x) n)) ;; Specialization for the 32-bit case when the rotation amount is an immediate. -(rule (lower (has_type $I32 (rotr x (iconst k)))) +(rule rotr_32_imm (lower (has_type $I32 (rotr x (iconst k)))) (if-let n (imm_shift_from_imm64 $I32 k)) (a64_rotr_imm $I32 x n)) ;; Specialization for the 64-bit case when the rotation amount is an immediate. -(rule (lower (has_type $I64 (rotr x (iconst k)))) +(rule rotr_64_imm (lower (has_type $I64 (rotr x (iconst k)))) (if-let n (imm_shift_from_imm64 $I64 k)) (a64_rotr_imm $I64 x n)) @@ -1689,8 +1763,21 @@ ;; lsr val_rshift, val, masked_amt ;; lsl val_lshift, val, neg_amt ;; orr rd, val_lshift val_rshift + (spec (small_rotr t x y) + (provide + (= result + (switch t + (8 (conv_to 64 (rotr (extract 7 0 x) (extract 7 0 y)))) + (16 (conv_to 64 (rotr (extract 15 0 x) (extract 15 0 y))))))) + (require + (or (= t 8) (= t 16)) + (switch t + (8 (= (extract 31 8 x) #x000000)) + (16 (= (extract 31 16 x) #x0000))))) +(instantiate small_rotr + ((args Int (bv 64) (bv 64)) (ret (bv 64)) (canon (bv 64)))) (decl small_rotr (Type Reg Reg) Reg) -(rule (small_rotr ty val amt) +(rule small_rotr (small_rotr ty val amt) (let ((masked_amt Reg (and_imm $I32 amt (rotr_mask ty))) (tmp_sub Reg (sub_imm $I32 masked_amt (u8_into_imm12 (ty_bits ty)))) (neg_amt Reg (sub $I32 (zero_reg) tmp_sub)) @@ -1698,6 +1785,7 @@ (val_lshift Reg (lsl $I32 val neg_amt))) (orr $I32 val_lshift val_rshift))) +(spec (rotr_mask x) (provide (= (bvsub (int2bv 64 x) #x0000000000000001) result))) (decl rotr_mask (Type) ImmLogic) (extern constructor rotr_mask rotr_mask) @@ -1710,12 +1798,30 @@ ;; lsr val_rshift, val, # ;; lsl val_lshift, val, ;; orr rd, val_lshift, val_rshift + +(spec (small_rotr_imm t x y) + (provide + (= result + (switch t + (8 (conv_to 64 (rotr (extract 7 0 x) (zero_ext 8 y)))) + (16 (conv_to 64 (rotr (extract 15 0 x) (zero_ext 16 y))))))) + (require + (or (= t 8) (= t 16)) + (switch t + (8 (= (extract 31 8 x) #x000000)) + (16 (= (extract 31 16 x) #x0000))) + (bvult y (int2bv 6 t)))) +(instantiate small_rotr_imm + ((args Int (bv 64) (bv 6)) (ret (bv 64)) (canon (bv 64)))) (decl small_rotr_imm (Type Reg ImmShift) Reg) -(rule (small_rotr_imm ty val amt) +(rule small_rotr_imm (small_rotr_imm ty val amt) (let ((val_rshift Reg (lsr_imm $I32 val amt)) (val_lshift Reg (lsl_imm $I32 val (rotr_opposite_amount ty amt)))) (orr $I32 val_lshift val_rshift))) +(spec (rotr_opposite_amount ty x) + (provide + (= (bvsub (int2bv 6 ty) (bvand x (bvsub (int2bv 6 ty) #b000001))) result))) (decl rotr_opposite_amount (Type ImmShift) ImmShift) (extern constructor rotr_opposite_amount rotr_opposite_amount) @@ -1758,16 +1864,16 @@ ;;;; Rules for `clz` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -(rule (lower (has_type $I8 (clz x))) +(rule clz_8 (lower (has_type $I8 (clz x))) (sub_imm $I32 (a64_clz $I32 (put_in_reg_zext32 x)) (u8_into_imm12 24))) -(rule (lower (has_type $I16 (clz x))) +(rule clz_16 (lower (has_type $I16 (clz x))) (sub_imm $I32 (a64_clz $I32 (put_in_reg_zext32 x)) (u8_into_imm12 16))) (rule (lower (has_type $I128 (clz x))) (lower_clz128 x)) -(rule -1 (lower (has_type ty (clz x))) +(rule clz_32_64 -1 (lower (has_type ty (clz x))) (a64_clz ty x)) ;; clz hi_clz, hi @@ -1788,10 +1894,10 @@ ;; then using a `clz` instruction since the tail zeros are the same as the ;; leading zeros of the reversed value. -(rule (lower (has_type $I8 (ctz x))) +(rule ctz_8 (lower (has_type $I8 (ctz x))) (a64_clz $I32 (orr_imm $I32 (rbit $I32 x) (u64_into_imm_logic $I32 0x800000)))) -(rule (lower (has_type $I16 (ctz x))) +(rule ctz_16 (lower (has_type $I16 (ctz x))) (a64_clz $I32 (orr_imm $I32 (rbit $I32 x) (u64_into_imm_logic $I32 0x8000)))) (rule (lower (has_type $I128 (ctz x))) @@ -1800,15 +1906,15 @@ (hi Reg (rbit $I64 (value_regs_get val 1)))) (lower_clz128 (value_regs hi lo)))) -(rule -1 (lower (has_type ty (ctz x))) +(rule ctz_32_64 -1 (lower (has_type ty (ctz x))) (a64_clz ty (rbit ty x))) ;;;; Rules for `cls` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -(rule (lower (has_type $I8 (cls x))) +(rule cls_8 (lower (has_type $I8 (cls x))) (sub_imm $I32 (a64_cls $I32 (put_in_reg_sext32 x)) (u8_into_imm12 24))) -(rule (lower (has_type $I16 (cls x))) +(rule cls_16 (lower (has_type $I16 (cls x))) (sub_imm $I32 (a64_cls $I32 (put_in_reg_sext32 x)) (u8_into_imm12 16))) ;; cls lo_cls, lo @@ -1834,7 +1940,7 @@ (csel (Cond.Eq) lo_sign_bits (zero_reg))))) (value_regs (add $I64 maybe_lo hi_cls) (imm $I64 (ImmExtend.Zero) 0)))) -(rule -1 (lower (has_type ty (cls x))) +(rule cls_32_64 -1 (lower (has_type ty (cls x))) (a64_cls ty x)) ;;;; Rules for `bswap` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -1878,25 +1984,25 @@ ;; if ty == i128: ;; mov out_hi, 0 -(rule (lower (has_type $I8 (popcnt x))) +(rule popcnt_8 (lower (has_type $I8 (popcnt x))) (let ((tmp Reg (mov_to_fpu x (ScalarSize.Size32))) (nbits Reg (vec_cnt tmp (VectorSize.Size8x8)))) (mov_from_vec nbits 0 (ScalarSize.Size8)))) ;; Note that this uses `addp` instead of `addv` as it's usually cheaper. -(rule (lower (has_type $I16 (popcnt x))) +(rule popcnt_16 (lower (has_type $I16 (popcnt x))) (let ((tmp Reg (mov_to_fpu x (ScalarSize.Size32))) (nbits Reg (vec_cnt tmp (VectorSize.Size8x8))) (added Reg (addp nbits nbits (VectorSize.Size8x8)))) (mov_from_vec added 0 (ScalarSize.Size8)))) -(rule (lower (has_type $I32 (popcnt x))) +(rule popcnt_32 (lower (has_type $I32 (popcnt x))) (let ((tmp Reg (mov_to_fpu x (ScalarSize.Size32))) (nbits Reg (vec_cnt tmp (VectorSize.Size8x8))) (added Reg (addv nbits (VectorSize.Size8x8)))) (mov_from_vec added 0 (ScalarSize.Size8)))) -(rule (lower (has_type $I64 (popcnt x))) +(rule popcnt_64 (lower (has_type $I64 (popcnt x))) (let ((tmp Reg (mov_to_fpu x (ScalarSize.Size64))) (nbits Reg (vec_cnt tmp (VectorSize.Size8x8))) (added Reg (addv nbits (VectorSize.Size8x8)))) @@ -1915,7 +2021,7 @@ ;;;; Rules for `bitselect` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -(rule (lower (has_type ty (bitselect c x y))) +(rule bitselect (lower (has_type ty (bitselect c x y))) (if (ty_int_ref_scalar_64 ty)) (let ((tmp1 Reg (and_reg ty x c)) (tmp2 Reg (bic ty y c))) @@ -1994,7 +2100,7 @@ (vec_size VectorSize (vector_size ty))) (value_reg (int_cmp_zero_swap cond rn vec_size)))) -(rule -1 (lower (icmp cond x @ (value_type in_ty) y)) +(rule icmp_8_16_32_64 -1 (lower (icmp cond x @ (value_type in_ty) y)) (lower_icmp_into_reg cond x y in_ty $I8)) ;;;; Rules for `trap` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -2359,16 +2465,16 @@ ;;;; Rules for loads ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -(rule (lower +(rule load_i8_aarch64_uload8 (lower (has_type $I8 (load flags address offset))) (aarch64_uload8 (amode $I8 address offset) flags)) -(rule (lower +(rule load_i16_aarch64_uload16 (lower (has_type $I16 (load flags address offset))) (aarch64_uload16 (amode $I16 address offset) flags)) -(rule (lower +(rule load_i32_aarch64_uload32 (lower (has_type $I32 (load flags address offset))) (aarch64_uload32 (amode $I32 address offset) flags)) -(rule (lower +(rule load_i64_aarch64_uload64 (lower (has_type $I64 (load flags address offset))) (aarch64_uload64 (amode $I64 address offset) flags)) (rule (lower @@ -2461,19 +2567,19 @@ ;;;; Rules for stores ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -(rule (lower +(rule store_i8_aarch64_store8 (lower (store flags value @ (value_type $I8) address offset)) (side_effect (aarch64_store8 (amode $I8 address offset) flags value))) -(rule (lower +(rule store_i16_aarch64_store16 (lower (store flags value @ (value_type $I16) address offset)) (side_effect (aarch64_store16 (amode $I16 address offset) flags value))) -(rule (lower +(rule store_i32_aarch64_store32 (lower (store flags value @ (value_type $I32) address offset)) (side_effect (aarch64_store32 (amode $I32 address offset) flags value))) -(rule (lower +(rule store_i64_aarch64_store64 (lower (store flags value @ (value_type $I64) address offset)) (side_effect (aarch64_store64 (amode $I64 address offset) flags value))) diff --git a/cranelift/codegen/src/isa/x64/inst.isle b/cranelift/codegen/src/isa/x64/inst.isle index 5a958bd0af60..0e144560798b 100644 --- a/cranelift/codegen/src/isa/x64/inst.isle +++ b/cranelift/codegen/src/isa/x64/inst.isle @@ -1051,6 +1051,7 @@ (decl synthetic_amode_to_reg_mem (SyntheticAmode) RegMem) (extern constructor synthetic_amode_to_reg_mem synthetic_amode_to_reg_mem) +(spec (amode_to_synthetic_amode amode) (provide (= result amode))) (decl amode_to_synthetic_amode (Amode) SyntheticAmode) (extern constructor amode_to_synthetic_amode amode_to_synthetic_amode) @@ -1075,6 +1076,32 @@ ;; the given MachLabel. (RipRelative (target MachLabel)))) +;; Model an Amode as a combination of flags and the calculated 64-bit address. +;; 16 bits 64 bits +;; [ flags | address ] +(model Amode (type (bv 80))) + +(spec (Amode.ImmReg simm base flags) + (provide (= result (concat flags (bvadd base (sign_ext 64 simm))))) + (require + (= (widthof simm) 32) + (= (widthof base) 64) + (= (widthof flags) 16))) + +(spec (Amode.ImmRegRegShift simm base index shift flags) + (provide + (= result + (concat flags + (bvadd + (bvadd base (sign_ext 64 simm)) + (bvshl index (zero_ext 64 shift)))))) + (require + (= (widthof simm) 32) + (= (widthof base) 64) + (= (widthof index) 64) + (= (widthof shift) 8) + (= (widthof flags) 16))) + ;; A helper to both check that the `Imm64` and `Offset32` values sum to less ;; than 32-bits AND return this summed `u32` value. Also, the `Imm64` will be ;; zero-extended from `Type` up to 64 bits. This is useful for `to_amode`. @@ -1115,6 +1142,10 @@ ;; In the future if mid-end optimizations fold constants into `Offset32` then ;; this in theory can "simply" delegate to the `amode_imm_reg` helper, and ;; below can delegate to `amode_imm_reg_reg_shift`, or something like that. +(spec (to_amode flags val offset) + (provide (= result (concat flags (bvadd val (sign_ext 64 offset))))) + (require + (= (widthof val) 64))) (decl to_amode (MemFlags Value Offset32) Amode) (rule 0 (to_amode flags base offset) (amode_imm_reg flags base offset)) @@ -1132,45 +1163,62 @@ ;; ;; In other words this function's job is to find constants and then defer to ;; `amode_imm_reg*`. +;; +(spec (to_amode_add flags x y offset) + (provide (= result (concat flags (bvadd (bvadd (sign_ext 64 x) (sign_ext 64 y)) (sign_ext 64 offset)))))) +(instantiate to_amode_add + ((args (bv 16) (bv 64) (bv 64) (bv 32)) (ret (bv 80)) (canon (bv 64)))) (decl to_amode_add (MemFlags Value Value Offset32) Amode) -(rule 0 (to_amode_add flags x y offset) +(rule to_amode_add_base_case 0 (to_amode_add flags x y offset) (amode_imm_reg_reg_shift flags x y offset)) -(rule 1 (to_amode_add flags x (i32_from_iconst c) offset) +(rule to_amode_add_const_rhs 1 (to_amode_add flags x (i32_from_iconst c) offset) (if-let sum (s32_add_fallible offset c)) (amode_imm_reg flags x sum)) -(rule 2 (to_amode_add flags (i32_from_iconst c) x offset) +(rule to_amode_add_const_lhs 2 (to_amode_add flags (i32_from_iconst c) x offset) (if-let sum (s32_add_fallible offset c)) (amode_imm_reg flags x sum)) -(rule 3 (to_amode_add flags (iadd x (i32_from_iconst c)) y offset) +(rule to_amode_add_const_fold_iadd_lhs_rhs 3 (to_amode_add flags (iadd x (i32_from_iconst c)) y offset) (if-let sum (s32_add_fallible offset c)) (amode_imm_reg_reg_shift flags x y sum)) -(rule 4 (to_amode_add flags (iadd (i32_from_iconst c) x) y offset) +(rule to_amode_add_const_fold_iadd_lhs_lhs 4 (to_amode_add flags (iadd (i32_from_iconst c) x) y offset) (if-let sum (s32_add_fallible offset c)) (amode_imm_reg_reg_shift flags x y sum)) -(rule 5 (to_amode_add flags x (iadd y (i32_from_iconst c)) offset) +(rule to_amode_add_const_fold_iadd_rhs_rhs 5 (to_amode_add flags x (iadd y (i32_from_iconst c)) offset) (if-let sum (s32_add_fallible offset c)) (amode_imm_reg_reg_shift flags x y sum)) -(rule 6 (to_amode_add flags x (iadd (i32_from_iconst c) y) offset) +(rule to_amode_add_const_fold_iadd_rhs_lhs 6 (to_amode_add flags x (iadd (i32_from_iconst c) y) offset) (if-let sum (s32_add_fallible offset c)) (amode_imm_reg_reg_shift flags x y sum)) ;; Final cases of amode lowering. Does not hunt for constants and only attempts ;; to pattern match add-of-shifts to generate fancier `ImmRegRegShift` modes, ;; otherwise falls back on `ImmReg`. +(spec (amode_imm_reg flags x offset) + (provide (= result (concat flags (bvadd (sign_ext 64 x) (sign_ext 64 offset)))))) +(instantiate amode_imm_reg + ((args (bv 16) (bv 64) (bv 32)) (ret (bv 80)) (canon (bv 64)))) (decl amode_imm_reg (MemFlags Value Offset32) Amode) -(rule 0 (amode_imm_reg flags base offset) +(rule amode_imm_reg_base 0 (amode_imm_reg flags base offset) (Amode.ImmReg offset base flags)) -(rule 1 (amode_imm_reg flags (iadd x y) offset) +(rule amode_imm_reg_iadd 1 (amode_imm_reg flags (iadd x y) offset) (amode_imm_reg_reg_shift flags x y offset)) +(spec (amode_imm_reg_reg_shift flags x y offset) + (provide (= result (concat flags (bvadd (sign_ext 64 (bvadd x y)) (sign_ext 64 offset))))) + (require + (= (widthof flags) 16) + (= (widthof x) (widthof y)) + (= (widthof offset) 32))) +(instantiate amode_imm_reg_reg_shift + ((args (bv 16) (bv 64) (bv 64) (bv 32)) (ret (bv 80)) (canon (bv 64)))) (decl amode_imm_reg_reg_shift (MemFlags Value Value Offset32) Amode) -(rule 0 (amode_imm_reg_reg_shift flags x y offset) +(rule amode_imm_reg_reg_shift_no_shift 0 (amode_imm_reg_reg_shift flags x y offset) (Amode.ImmRegRegShift offset x y 0 flags)) ;; 0 == y<<0 == "no shift" -(rule 1 (amode_imm_reg_reg_shift flags x (ishl y (iconst (uimm8 shift))) offset) +(rule amode_imm_reg_reg_shift_shl_rhs 1 (amode_imm_reg_reg_shift flags x (ishl y (iconst (uimm8 shift))) offset) (if (u32_lteq (u8_as_u32 shift) 3)) (Amode.ImmRegRegShift offset x y shift flags)) -(rule 2 (amode_imm_reg_reg_shift flags (ishl y (iconst (uimm8 shift))) x offset) +(rule amode_imm_reg_reg_shift_shl_lhs 2 (amode_imm_reg_reg_shift flags (ishl y (iconst (uimm8 shift))) x offset) (if (u32_lteq (u8_as_u32 shift) 3)) (Amode.ImmRegRegShift offset x y shift flags)) @@ -1180,6 +1228,7 @@ (extern constructor amode_offset amode_offset) ;; Return a zero offset as an `Offset32`. +(spec (zero_offset) (provide (= result #x00000000))) (decl zero_offset () Offset32) (extern constructor zero_offset zero_offset) @@ -1653,6 +1702,7 @@ ;; ;; Moves the value into a GPR if it is a type that would naturally go into an ;; XMM register. +(spec (put_in_gpr arg) (provide (= result (conv_to 64 arg)))) (decl put_in_gpr (Value) Gpr) ;; Case for when the value naturally lives in a GPR. @@ -1702,6 +1752,8 @@ (extern constructor put_in_xmm_mem_imm put_in_xmm_mem_imm) ;; Construct an `InstOutput` out of a single GPR register. +(spec (output_gpr x) + (provide (= result (conv_to (widthof result) x)))) (decl output_gpr (Gpr) InstOutput) (rule (output_gpr x) (output_reg (gpr_to_reg x))) @@ -1752,6 +1804,9 @@ (decl is_xmm_type (Type) Type) (extractor (is_xmm_type ty) (and (type_register_class (RegisterClass.Xmm)) ty)) +(spec (is_gpr_type arg) + (provide (= result arg)) + (require (<= arg 64))) (decl is_gpr_type (Type) Type) (extractor (is_gpr_type ty) (and (type_register_class (RegisterClass.Gpr _)) ty)) @@ -1846,6 +1901,8 @@ ;; instructions which load exactly the type width necessary use ;; `sinkable_load_exact`. (decl sinkable_load (SinkableLoad) Value) +(spec (sinkable_load inst) + (provide (= result inst))) (extern extractor sinkable_load sinkable_load) ;; Same as `sinkable_load` except that all type widths of loads are supported. @@ -1878,6 +1935,8 @@ (rule (sink_load_to_gpr_mem load) (RegMem.Mem load)) (decl sink_load_to_reg_mem_imm (SinkableLoad) RegMemImm) +(spec (sink_load_to_reg_mem_imm load) + (provide (= result load))) (rule (sink_load_to_reg_mem_imm load) (RegMemImm.Mem load)) ;;;; Helpers for constructing and emitting an `MInst` ;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -2193,9 +2252,33 @@ (type ExtendKind (enum Sign Zero)) +(model ExtMode (enum + (BL #b000) + (BQ #b001) + (WL #b010) + (WQ #b011) + (LQ #b100) +)) (type ExtMode extern (enum BL BQ WL WQ LQ)) ;; `ExtMode::new` + +(spec (ext_mode x y) + (provide (= result (switch x + (#x0008 (switch y + (#x0020 (ExtMode.BL)) + (#x0040 (ExtMode.BQ)) + )) + (#x0010 (switch y + (#x0020 (ExtMode.WL)) + (#x0040 (ExtMode.WQ)) + )) + (#x0020 (switch y + (#x0040 (ExtMode.LQ)) + )) + )) + ) +) (decl ext_mode (u16 u16) ExtMode) (extern constructor ext_mode ext_mode) @@ -2352,10 +2435,33 @@ (x64_movdqu_load addr)) (decl x64_mov (Amode) Reg) +(spec (x64_mov addr) + (provide (= result (conv_to 64 (load_effect (extract 79 64 addr) 64 (extract 63 0 addr)))))) (rule (x64_mov addr) (mov64_mr addr)) (decl x64_movzx (ExtMode GprMem) Gpr) +(spec (x64_movzx mode src) + (provide + (= result + (conv_to + 64 + (zero_ext + 32 + (load_effect + (extract 79 64 src) + (switch mode + ((ExtMode.BL) 8) + ((ExtMode.BQ) 8) + ((ExtMode.WL) 16) + ((ExtMode.WQ) 16) + ((ExtMode.LQ) 32)) + (extract 63 0 src)))))) + (require (or (= mode (ExtMode.BL)) + (= mode (ExtMode.BQ)) + (= mode (ExtMode.WL)) + (= mode (ExtMode.WQ)) + (= mode (ExtMode.LQ))))) (rule (x64_movzx mode src) (let ((dst WritableGpr (temp_writable_gpr)) (_ Unit (emit (MInst.MovzxRmR mode src dst)))) @@ -2512,6 +2618,8 @@ (xmm_unary_rm_r_vex (AvxOpcode.Vpmovzxdq) from)) (decl x64_movrm (Type SyntheticAmode Gpr) SideEffectNoResult) +(spec (x64_movrm ty addr data) + (provide (= result (store_effect (extract 79 64 addr) ty (conv_to ty data) (extract 63 0 addr))))) (rule (x64_movrm ty addr data) (let ((size OperandSize (raw_operand_size_of_type ty))) (SideEffectNoResult.Inst (MInst.MovRM size data addr)))) @@ -4261,6 +4369,9 @@ (inst MInst (MInst.Neg size src dst))) (ProducesFlags.ProducesFlagsReturnsResultWithConsumer inst dst))) +(spec (x64_lea ty amode) + (provide (= result amode)) + (require (or (= ty 32) (= ty 64)))) (decl x64_lea (Type SyntheticAmode) Gpr) (rule (x64_lea ty addr) (let ((dst WritableGpr (temp_writable_gpr)) @@ -4728,6 +4839,16 @@ (SideEffectNoResult.Inst (MInst.AluRM size opcode src1_dst src2)))) (decl x64_add_mem (Type Amode Gpr) SideEffectNoResult) +(spec (x64_add_mem ty addr val) + (provide (= result (store_effect + (extract 79 64 addr) + ty + (conv_to ty (bvadd (load_effect (extract 79 64 addr) ty (extract 63 0 addr)) (conv_to ty val))) + (extract 63 0 addr)) + ) + ) + (require (or (= ty 32) (= ty 64))) +) (rule (x64_add_mem ty addr val) (alu_rm ty (AluRmiROpcode.Add) addr val)) @@ -5370,6 +5491,9 @@ (value_reg w_xmm)) (decl synthetic_amode_to_gpr_mem (SyntheticAmode) GprMem) + +(spec (amode_to_gpr_mem amode) + (provide (= result amode))) (decl amode_to_gpr_mem (Amode) GprMem) (rule (amode_to_gpr_mem amode) (amode_to_synthetic_amode amode)) diff --git a/cranelift/codegen/src/isa/x64/lower.isle b/cranelift/codegen/src/isa/x64/lower.isle index e70e52d063aa..4d2267acb952 100644 --- a/cranelift/codegen/src/isa/x64/lower.isle +++ b/cranelift/codegen/src/isa/x64/lower.isle @@ -2,6 +2,8 @@ ;; The main lowering constructor term: takes a clif `Inst` and returns the ;; register(s) within which the lowered instruction's result values live. +(spec (lower arg) + (provide (= result arg))) (decl partial lower (Inst) InstOutput) ;; A variant of the main lowering constructor term, used for branches. @@ -61,7 +63,7 @@ ;; but the actual instruction emitted might be an `add` if it's equivalent. ;; For more details on this see the `emit.rs` logic to emit ;; `LoadEffectiveAddress`. -(rule -5 (lower (has_type (ty_32_or_64 ty) (iadd x y))) +(rule iadd_base_case_32_or_64_lea -5 (lower (has_type (ty_32_or_64 ty) (iadd x y))) (x64_lea ty (to_amode_add (mem_flags_trusted) x y (zero_offset)))) ;; Higher-priority cases than the previous two where a load can be sunk into @@ -2961,11 +2963,11 @@ ;; 8-bit loads. ;; ;; By default, we zero-extend all sub-64-bit loads to a GPR. -(rule -4 (lower (has_type (and (fits_in_32 ty) (is_gpr_type _)) (load flags address offset))) +(rule load_sub64_x64_movzx -4 (lower (has_type (and (fits_in_32 ty) (is_gpr_type _)) (load flags address offset))) (x64_movzx (ext_mode (ty_bits_u16 ty) 64) (to_amode flags address offset))) ;; But if we know that both the `from` and `to` are 64 bits, we simply load with ;; no extension. -(rule -1 (lower (has_type (ty_int_ref_64 ty) (load flags address offset))) +(rule load_64_x64_movzx -1 (lower (has_type (ty_int_ref_64 ty) (load flags address offset))) (x64_mov (to_amode flags address offset))) ;; Also, certain scalar loads have a specific `from` width and extension kind ;; (signed -> `sx`, zeroed -> `zx`). We overwrite the high bits of the 64-bit @@ -3048,7 +3050,7 @@ ;; Rules for `store*` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; 8-, 16-, 32- and 64-bit GPR stores. -(rule -2 (lower (store flags +(rule store_x64_movrm -2 (lower (store flags value @ (value_type (is_gpr_type ty)) address offset)) @@ -3200,7 +3202,7 @@ ;; Rules for `load*` + ALU op + `store*` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Add mem, reg -(rule 3 (lower +(rule store_x64_add_mem 3 (lower (store flags (has_type (ty_32_or_64 ty) (iadd (and diff --git a/cranelift/codegen/src/prelude.isle b/cranelift/codegen/src/prelude.isle index 3f4c8d12bf83..b4320b3c1627 100644 --- a/cranelift/codegen/src/prelude.isle +++ b/cranelift/codegen/src/prelude.isle @@ -8,30 +8,50 @@ ;; `()` (type Unit (primitive Unit)) +(model Unit (type Unit)) (decl pure unit () Unit) (extern constructor unit unit) +(model bool (type Bool)) (type bool (primitive bool)) (extern const $true bool) (extern const $false bool) +(model u8 (type (bv 8))) (type u8 (primitive u8)) + +(model u16 (type (bv 16))) (type u16 (primitive u16)) + +(model u32 (type (bv 32))) (type u32 (primitive u32)) + +(model u64 (type (bv 64))) (type u64 (primitive u64)) (type u128 (primitive u128)) + +(model usize (type (bv))) (type usize (primitive usize)) +(model i8 (type (bv 8))) (type i8 (primitive i8)) + +(model i16 (type (bv 16))) (type i16 (primitive i16)) + +(model i32 (type (bv 32))) (type i32 (primitive i32)) + +(model i64 (type (bv 64))) (type i64 (primitive i64)) (type i128 (primitive i128)) (type isize (primitive isize)) ;; `cranelift-entity`-based identifiers. +(model Type (type Int)) (type Type (primitive Type)) +(model Value (type (bv))) (type Value (primitive Value)) (type ValueList (primitive ValueList)) (type BlockCall (primitive BlockCall)) @@ -40,6 +60,7 @@ (type ValueSlice (primitive ValueSlice)) ;; Extract the type of a `Value`. +(spec (value_type arg) (provide (= arg (widthof result)))) (decl value_type (Type) Value) (extern extractor infallible value_type value_type) @@ -54,6 +75,11 @@ ;; Pure/fallible constructor that tests if one u32 is less than or ;; equal to another. +(spec (u32_lteq a b) + (provide (= result ())) + (require (<= a b) + (= (widthof a) 32) + (= (widthof b) 32))) (decl pure partial u32_lteq (u32 u32) Unit) (extern constructor u32_lteq u32_lteq) @@ -72,6 +98,11 @@ (decl pure u8_as_i8 (u8) i8) (extern constructor u8_as_i8 u8_as_i8) +(spec (u8_as_u32 arg) + (provide (= result (zero_ext 32 arg))) + (require + (= (widthof arg) 8) + (= (widthof result) 32))) (decl pure u8_as_u32 (u8) u32) (extern constructor u8_as_u32 u8_as_u32) (convert u8 u32 u8_as_u32) @@ -123,9 +154,11 @@ (extern constructor i32_as_i64 i32_as_i64) (convert i32 i64 i32_as_i64) +(spec (i64_as_u64 arg) (provide (= arg result))) (decl pure i64_as_u64 (i64) u64) (extern constructor i64_as_u64 i64_as_u64) +(spec (i64_neg x) (provide (= result (bvneg x)))) (decl pure i64_neg (i64) i64) (extern constructor i64_neg i64_neg) @@ -161,6 +194,7 @@ (decl pure u32_add (u32 u32) u32) (extern constructor u32_add u32_add) +(spec (u32_sub a b) (provide (= result (bvsub a b)))) (decl pure u32_sub (u32 u32) u32) (extern constructor u32_sub u32_sub) @@ -172,12 +206,17 @@ ;; Pure/fallible constructor that tries to add two `u32`s, interpreted ;; as signed values, and fails to match on overflow. +(spec (s32_add_fallible x y) + (provide (= result (bvadd x y))) + (require (not (bvsaddo x y)))) (decl pure partial s32_add_fallible (i32 i32) i32) (extern constructor s32_add_fallible s32_add_fallible) (decl pure u64_add (u64 u64) u64) (extern constructor u64_add u64_add) +(spec (u64_sub a b) + (provide (= result (bvsub a b)))) (decl pure u64_sub (u64 u64) u64) (extern constructor u64_sub u64_sub) @@ -233,6 +272,7 @@ (decl pure i64_sextend_u64 (Type u64) i64) (extern constructor i64_sextend_u64 i64_sextend_u64) +(spec (i64_sextend_imm64 ty a) (provide (= result (sign_ext 64 (conv_to ty a))))) (decl pure i64_sextend_imm64 (Type Imm64) i64) (extern constructor i64_sextend_imm64 i64_sextend_imm64) @@ -257,6 +297,8 @@ (decl i64_nonzero (i64) i64) (extractor (i64_nonzero x) (and (i64_is_zero $false) x)) +;; x & 1 == 1 +(spec (u64_is_odd arg) (provide (= result (= #b1 (extract 0 0 arg))))) (decl pure u64_is_odd (u64) bool) (extern constructor u64_is_odd u64_is_odd) @@ -395,10 +437,13 @@ (extern constructor ty_smax ty_smax) ;; Get the bit width of a given type. +(spec (ty_bits x) (provide (= result (int2bv 8 x)))) (decl pure ty_bits (Type) u8) (extern constructor ty_bits ty_bits) ;; Get the bit width of a given type. +(spec (ty_bits_u16 x) + (provide (= result (int2bv 16 x)))) (decl pure ty_bits_u16 (Type) u16) (extern constructor ty_bits_u16 ty_bits_u16) @@ -444,7 +489,12 @@ ;;;; `cranelift_codegen::ir::MemFlags ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Provide model for the MemFlags type (declared in generated clif_lower.isle). +(model MemFlags (type (bv 16))) + ;; `MemFlags::trusted` +(spec (mem_flags_trusted) + (provide (= result #x0003))) (decl pure mem_flags_trusted () MemFlags) (extern constructor mem_flags_trusted mem_flags_trusted) @@ -509,10 +559,16 @@ (extractor (sge ty x y) (icmp ty (IntCC.SignedGreaterThanOrEqual) x y)) ;; An extractor that only matches types that can fit in 16 bits. +(spec (fits_in_16 arg) + (provide (= result arg)) + (require (<= arg 16))) (decl fits_in_16 (Type) Type) (extern extractor fits_in_16 fits_in_16) ;; An extractor that only matches types that can fit in 32 bits. +(spec (fits_in_32 arg) + (provide (= result arg)) + (require (<= arg 32))) (decl fits_in_32 (Type) Type) (extern extractor fits_in_32 fits_in_32) @@ -521,6 +577,9 @@ (extern extractor lane_fits_in_32 lane_fits_in_32) ;; An extractor that only matches types that can fit in 64 bits. +(spec (fits_in_64 arg) + (provide (= result arg)) + (require (<= arg 64))) (decl fits_in_64 (Type) Type) (extern extractor fits_in_64 fits_in_64) @@ -534,11 +593,17 @@ ;; A pure constructor/extractor that only matches scalar integers, and ;; references that can fit in 64 bits. +(spec (ty_int_ref_scalar_64 arg) + (provide (= result arg)) + (require (<= arg 64))) (decl pure partial ty_int_ref_scalar_64 (Type) Type) (extern constructor ty_int_ref_scalar_64 ty_int_ref_scalar_64) (extern extractor ty_int_ref_scalar_64 ty_int_ref_scalar_64_extract) ;; An extractor that matches 32- and 64-bit types only. +(spec (ty_32_or_64 arg) + (provide (= result arg)) + (require (or (= arg 32) (= arg 64)))) (decl ty_32_or_64 (Type) Type) (extern extractor ty_32_or_64 ty_32_or_64) @@ -563,6 +628,7 @@ (extern extractor ty_int_ref_16_to_64 ty_int_ref_16_to_64) ;; An extractor that only matches integers. +(spec (ty_int a) (provide (= result a))) (decl ty_int (Type) Type) (extern extractor ty_int ty_int) @@ -637,6 +703,7 @@ (extern extractor infallible u64_from_bool u64_from_bool) ;; Extract a `u64` from an `Imm64`. +(spec (u64_from_imm64 arg) (provide (= arg result))) (decl u64_from_imm64 (u64) Imm64) (extern extractor infallible u64_from_imm64 u64_from_imm64) @@ -689,10 +756,12 @@ (extern extractor ty_dyn128_int ty_dyn128_int) ;; Convert an `Offset32` to a primitive number. +(spec (offset32_to_i32 offset) (provide (= result offset))) (decl pure offset32_to_i32 (Offset32) i32) (extern constructor offset32_to_i32 offset32_to_i32) ;; Convert a number to an `Offset32` +(spec (i32_to_offset32 x) (provide (= result x))) (decl pure i32_to_offset32 (i32) Offset32) (extern constructor i32_to_offset32 i32_to_offset32) @@ -703,6 +772,9 @@ (extern constructor intcc_unsigned intcc_unsigned) ;; Pure constructor that only matches signed integer cond codes. +(spec (signed_cond_code c) + (provide (= result c)) + (require (and (bvuge c #x02) (bvule c #x05)))) (decl pure partial signed_cond_code (IntCC) IntCC) (extern constructor signed_cond_code signed_cond_code) @@ -749,3 +821,29 @@ (convert Offset32 i32 offset32_to_i32) (convert i32 Offset32 i32_to_offset32) + +;;;; Common Term Signatures ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +(form + bv_unary_8_to_64 + ((args (bv 8)) (ret (bv 8)) (canon (bv 8))) + ((args (bv 16)) (ret (bv 16)) (canon (bv 16))) + ((args (bv 32)) (ret (bv 32)) (canon (bv 32))) + ((args (bv 64)) (ret (bv 64)) (canon (bv 64))) +) + +(form + bv_binary_8_to_64 + ((args (bv 8) (bv 8)) (ret (bv 8)) (canon (bv 8))) + ((args (bv 16) (bv 16)) (ret (bv 16)) (canon (bv 16))) + ((args (bv 32) (bv 32)) (ret (bv 32)) (canon (bv 32))) + ((args (bv 64) (bv 64)) (ret (bv 64)) (canon (bv 64))) +) + +(form + bv_ternary_8_to_64 + ((args (bv 8) (bv 8) (bv 8)) (ret (bv 8)) (canon (bv 8))) + ((args (bv 16) (bv 16) (bv 16)) (ret (bv 16)) (canon (bv 16))) + ((args (bv 32) (bv 32) (bv 32)) (ret (bv 32)) (canon (bv 32))) + ((args (bv 64) (bv 64) (bv 64)) (ret (bv 64)) (canon (bv 64))) +) diff --git a/cranelift/codegen/src/prelude_lower.isle b/cranelift/codegen/src/prelude_lower.isle index 5d5a9a9644c7..bba1ac81eda5 100644 --- a/cranelift/codegen/src/prelude_lower.isle +++ b/cranelift/codegen/src/prelude_lower.isle @@ -29,6 +29,7 @@ ;;;; Registers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +(model Reg (type (bv))) (type Reg (primitive Reg)) (type WritableReg (primitive WritableReg)) (type OptionWritableReg (primitive OptionWritableReg)) @@ -37,6 +38,8 @@ (type PReg (primitive PReg)) ;; Construct a `ValueRegs` of one register. +(spec (value_reg arg) + (provide (= result arg))) (decl value_reg (Reg) ValueRegs) (extern constructor value_reg value_reg) @@ -61,6 +64,8 @@ (extern constructor output_none output_none) ;; Construct a single-element `InstOutput`. +(spec (output arg) + (provide (= arg (conv_to (widthof arg) result)))) (decl output (ValueRegs) InstOutput) (extern constructor output output) @@ -69,8 +74,16 @@ (extern constructor output_pair output_pair) ;; Construct a single-element `InstOutput` from a single register. +(spec (output_reg arg) + (provide (= result (conv_to (widthof result) arg)))) +(instantiate output_reg + ((args (bv 64)) (ret (bv 8)) (canon (bv 8))) + ((args (bv 64)) (ret (bv 16)) (canon (bv 16))) + ((args (bv 64)) (ret (bv 32)) (canon (bv 32))) + ((args (bv 64)) (ret (bv 64)) (canon (bv 64))) +) (decl output_reg (Reg) InstOutput) -(rule (output_reg reg) (output (value_reg reg))) +(rule output_reg (output_reg reg) (output (value_reg reg))) ;; Construct a single-element `InstOutput` from a value. (decl output_value (Value) InstOutput) @@ -119,12 +132,15 @@ ;; multiple registers for its representation (like `i128` on x64 for example). ;; ;; As a side effect, this marks the value as used. +(spec (put_in_reg arg) + (provide (= result (conv_to 64 arg)))) (decl put_in_reg (Value) Reg) (extern constructor put_in_reg put_in_reg) ;; Put the given value into one or more registers. ;; ;; As a side effect, this marks the value as used. +(spec (put_in_regs arg) (provide (= (conv_to 64 arg) result))) (decl put_in_regs (Value) ValueRegs) (extern constructor put_in_regs put_in_regs) @@ -134,6 +150,8 @@ (extern constructor ensure_in_vreg ensure_in_vreg) ;; Get the `n`th register inside a `ValueRegs`. +(spec (value_regs_get arg i) + (provide (= arg result) (= (widthof i) 1))) (decl value_regs_get (ValueRegs usize) Reg) (extern constructor value_regs_get value_regs_get) @@ -275,22 +293,30 @@ ;; Extract the type of the instruction's first result and pass along the ;; instruction as well. +(spec (has_type ty arg) + (provide (= result arg)) + (require (= ty (widthof arg)))) (decl has_type (Type Inst) Inst) (extractor (has_type ty inst) (and (result_type ty) inst)) ;; Match the instruction that defines the given value, if any. +(spec (def_inst arg) (provide (= result arg))) (decl def_inst (Inst) Value) (extern extractor def_inst def_inst) ;; Extract a constant `u64` from a value defined by an `iconst`. +(spec (u64_from_iconst arg) (provide (= arg (zero_ext 64 result)))) (decl u64_from_iconst (u64) Value) (extractor (u64_from_iconst x) (def_inst (iconst (u64_from_imm64 x)))) ;; Extract a constant `i32` from a value defined by an `iconst`. ;; The value is sign extended to 32 bits. +(spec (i32_from_iconst arg) + (provide (= arg (extract 31 0 (sign_ext 64 result)))) + (require (= result (sign_ext (widthof result) arg)))) (decl i32_from_iconst (i32) Value) (extern extractor i32_from_iconst i32_from_iconst) @@ -313,6 +339,10 @@ (extern extractor maybe_uextend maybe_uextend) ;; Get an unsigned 8-bit immediate in a u8 from an Imm64, if possible. +(spec (uimm8 arg) + (provide (= result (zero_ext 64 arg))) + (require (bvslt result #x0000000000000100) + (= (widthof arg) 8))) (decl uimm8 (u8) Imm64) (extern extractor uimm8 uimm8) @@ -364,6 +394,8 @@ (inst2 MInst) (inst3 MInst)))) +(model SideEffectNoResult (type Unit)) + ;; Emit given side-effectful instruction. (decl emit_side_effect (SideEffectNoResult) Unit) (rule (emit_side_effect (SideEffectNoResult.Inst inst)) @@ -379,6 +411,8 @@ ;; Create an empty `InstOutput`, but do emit the given side-effectful ;; instruction. (decl side_effect (SideEffectNoResult) InstOutput) +(spec (side_effect v) + (provide (= result v))) (rule (side_effect inst) (let ((_ Unit (emit_side_effect inst))) (output_none))) @@ -974,6 +1008,11 @@ (type RealReg (primitive RealReg)) ;; Instruction on whether and how to extend an argument value. +(model ArgumentExtension + (enum + (None) + (Uext) + (Sext))) (type ArgumentExtension extern (enum (None) @@ -1063,6 +1102,10 @@ (extern constructor gen_return_call_indirect gen_return_call_indirect) ;; Helper for extracting an immediate that's not 0 and not -1 from an imm64. + (spec (safe_divisor_from_imm64 t x) + (provide (= (sign_ext 64 x) result)) + (require (not (= #x0000000000000000 result)) + (not (= #x1111111111111111 result)))) (decl pure partial safe_divisor_from_imm64 (Type Imm64) u64) (extern constructor safe_divisor_from_imm64 safe_divisor_from_imm64) diff --git a/cranelift/isle/isle/isle_examples/pass/veri_spec.isle b/cranelift/isle/isle/isle_examples/pass/veri_spec.isle new file mode 100644 index 000000000000..191b390f0fa4 --- /dev/null +++ b/cranelift/isle/isle/isle_examples/pass/veri_spec.isle @@ -0,0 +1,24 @@ +(type u8 (primitive u8)) + +(form + bv_unary_8_to_64 + ((args (bv 8)) (ret (bv 8)) (canon (bv 8))) + ((args (bv 16)) (ret (bv 16)) (canon (bv 16))) + ((args (bv 32)) (ret (bv 32)) (canon (bv 32))) + ((args (bv 64)) (ret (bv 64)) (canon (bv 64))) +) + +(spec (A i j) (provide (= (if $true (= i j) (= i (bvneg j))) (=> $false $true)))) +(instantiate A ((args (bv 8)) (ret (bv 8)) (canon (bv 8)))) +(decl A (u8 u8) u8) + +(spec (B i) + (provide (= (bvadd i #xff) #b00000000)) + (require (= (= 1 2) false))) +(instantiate B unary_bv_8_to_64) +(decl B (u8) u8) + +(rule first 1 (A x x) x) +(rule second 0 (A x _) 0) + +(rule third 1 (B x) x) diff --git a/cranelift/isle/isle/src/ast.rs b/cranelift/isle/isle/src/ast.rs index 6276645cb720..acc4bebc486f 100644 --- a/cranelift/isle/isle/src/ast.rs +++ b/cranelift/isle/isle/src/ast.rs @@ -13,6 +13,10 @@ pub enum Def { Rule(Rule), Extractor(Extractor), Decl(Decl), + Spec(Spec), + Model(Model), + Form(Form), + Instantiation(Instantiation), Extern(Extern), Converter(Converter), } @@ -79,6 +83,195 @@ pub struct Decl { pub pos: Pos, } +/// An expression used to specify term semantics, similar to SMT-LIB syntax. +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum SpecExpr { + /// An operator that matches a constant integer value. + ConstInt { + val: i128, + pos: Pos, + }, + /// An operator that matches a constant bitvector value. + ConstBitVec { + val: i128, + width: i8, + pos: Pos, + }, + /// An operator that matches a constant boolean value. + ConstBool { + val: i8, + pos: Pos, + }, + /// The Unit constant value. + ConstUnit { + pos: Pos, + }, + // A variable + Var { + var: Ident, + pos: Pos, + }, + /// An application of a type variant or term. + Op { + op: SpecOp, + args: Vec, + pos: Pos, + }, + /// Pairs, currently used for switch statements. + Pair { + l: Box, + r: Box, + }, + /// Enums variant values (enums defined by model) + Enum { + name: Ident, + }, +} + +/// An operation used to specify term semantics, similar to SMT-LIB syntax. +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum SpecOp { + // Boolean operations + Eq, + And, + Or, + Not, + Imp, + + // Integer comparisons + Lt, + Lte, + Gt, + Gte, + + // Bitwise bitvector operations (directly SMT-LIB) + BVNot, + BVAnd, + BVOr, + BVXor, + + // Bitvector arithmetic operations (directly SMT-LIB) + BVNeg, + BVAdd, + BVSub, + BVMul, + BVUdiv, + BVUrem, + BVSdiv, + BVSrem, + BVShl, + BVLshr, + BVAshr, + + // Bitvector comparison operations (directly SMT-LIB) + BVUle, + BVUlt, + BVUgt, + BVUge, + BVSlt, + BVSle, + BVSgt, + BVSge, + + // Bitvector overflow checks (SMT-LIB pending standardization) + BVSaddo, + + // Desugared bitvector arithmetic operations + Rotr, + Rotl, + Extract, + ZeroExt, + SignExt, + Concat, + + // Custom encodings + Subs, + Popcnt, + Clz, + Cls, + Rev, + + // Conversion operations + ConvTo, + Int2BV, + BV2Int, + WidthOf, + + // Control operations + If, + Switch, + + LoadEffect, + StoreEffect, +} + +/// A specification of the semantics of a term. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Spec { + /// The term name (must match a (decl ...)) + pub term: Ident, + /// Argument names + pub args: Vec, + /// Provide statements, which give the semantics of the produces value + pub provides: Vec, + /// Require statements, which express preconditions on the term + pub requires: Vec, +} + +/// A model of an SMT-LIB type. +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum ModelType { + /// SMT-LIB Int + Int, + /// SMT-LIB Bool + Bool, + /// SMT-LIB bitvector, but with a potentially-polymorphic width + BitVec(Option), + /// Unit (removed before conversion to SMT-LIB) + Unit, +} + +/// A construct's value in SMT-LIB +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum ModelValue { + /// Correspond to ISLE types + TypeValue(ModelType), + /// Correspond to ISLE enums, identifier is the enum variant name + EnumValues(Vec<(Ident, SpecExpr)>), +} + +/// A model of a construct into SMT-LIB (currently, types or enums) +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Model { + /// The name of the type or enum + pub name: Ident, + /// The value of the type or enum (potentially multiple values) + pub val: ModelValue, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Signature { + pub args: Vec, + pub ret: ModelType, + pub canonical: ModelType, + pub pos: Pos, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Form { + pub name: Ident, + pub signatures: Vec, + pub pos: Pos, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Instantiation { + pub term: Ident, + pub form: Option, + pub signatures: Vec, + pub pos: Pos, +} + #[derive(Clone, PartialEq, Eq, Debug)] pub struct Rule { pub pattern: Pattern, @@ -86,6 +279,7 @@ pub struct Rule { pub expr: Expr, pub pos: Pos, pub prio: Option, + pub name: Option, } #[derive(Clone, PartialEq, Eq, Debug)] diff --git a/cranelift/isle/isle/src/compile.rs b/cranelift/isle/isle/src/compile.rs index 6f5ca53f63d9..db3497fa9424 100644 --- a/cranelift/isle/isle/src/compile.rs +++ b/cranelift/isle/isle/src/compile.rs @@ -3,6 +3,7 @@ use std::path::Path; use std::sync::Arc; +use crate::ast::Def; use crate::error::Errors; use crate::files::Files; use crate::{ast, codegen, overlap, sema}; @@ -17,7 +18,7 @@ pub fn compile( Ok(type_env) => type_env, Err(errs) => return Err(Errors::new(errs, files)), }; - let term_env = match sema::TermEnv::from_ast(&mut type_env, defs) { + let term_env = match sema::TermEnv::from_ast(&mut type_env, defs, true) { Ok(term_env) => term_env, Err(errs) => return Err(Errors::new(errs, files)), }; @@ -63,3 +64,43 @@ pub fn from_files>( compile(files, &defs, options) } + +/// Construct the ISLE type and term environments for further analysis +/// (i.e., verification), without going all the way through codegen. +pub fn create_envs( + inputs: Vec, +) -> Result<(sema::TypeEnv, sema::TermEnv, Vec), Errors> { + let files = match Files::from_paths(inputs) { + Ok(files) => files, + Err((path, err)) => { + return Err(Errors::from_io( + err, + format!("cannot read file {}", path.display()), + )) + } + }; + let files = Arc::new(files); + let mut defs = Vec::new(); + for (file, src) in files.file_texts.iter().enumerate() { + let lexer = match crate::lexer::Lexer::new(file, src) { + Ok(lexer) => lexer, + Err(err) => return Err(Errors::new(vec![err], files)), + }; + + match crate::parser::parse(lexer) { + Ok(mut ds) => defs.append(&mut ds), + Err(err) => return Err(Errors::new(vec![err], files)), + } + } + let mut type_env = match sema::TypeEnv::from_ast(&defs) { + Ok(type_env) => type_env, + Err(errs) => return Err(Errors::new(errs, files)), + }; + // We want to allow annotations on terms with internal extractors, + // so we avoid expanding them within the sema rules. + let term_env = match sema::TermEnv::from_ast(&mut type_env, &defs, false) { + Ok(term_env) => term_env, + Err(errs) => return Err(Errors::new(errs, files)), + }; + Ok((type_env, term_env, defs)) +} diff --git a/cranelift/isle/isle/src/lexer.rs b/cranelift/isle/isle/src/lexer.rs index ec73f8ecdb42..bf1cd6cab360 100644 --- a/cranelift/isle/isle/src/lexer.rs +++ b/cranelift/isle/isle/src/lexer.rs @@ -91,14 +91,14 @@ impl<'src> Lexer<'src> { fn next_token(&mut self) -> Result> { fn is_sym_first_char(c: u8) -> bool { match c { - b'-' | b'0'..=b'9' | b'(' | b')' | b';' | b'<' | b'>' => false, + b'-' | b'0'..=b'9' | b'(' | b')' | b';' => false, c if c.is_ascii_whitespace() => false, _ => true, } } fn is_sym_other_char(c: u8) -> bool { match c { - b'(' | b')' | b';' | b'@' | b'<' => false, + b'(' | b')' | b';' | b'@' => false, c if c.is_ascii_whitespace() => false, _ => true, } diff --git a/cranelift/isle/isle/src/parser.rs b/cranelift/isle/isle/src/parser.rs index 08d6c882c78c..583f502f3f37 100644 --- a/cranelift/isle/isle/src/parser.rs +++ b/cranelift/isle/isle/src/parser.rs @@ -100,6 +100,20 @@ impl<'a> Parser<'a> { }) } + fn is_spec_bit_vector(&self) -> bool { + self.is(|tok| match tok { + Token::Symbol(tok_s) if tok_s.starts_with("#x") || tok_s.starts_with("#b") => true, + _ => false, + }) + } + + fn is_spec_bool(&self) -> bool { + self.is(|tok| match tok { + Token::Symbol(tok_s) if tok_s == "$true" || tok_s == "$false" => true, + _ => false, + }) + } + fn expect_lparen(&mut self) -> Result<()> { self.expect(|tok| *tok == Token::LParen).map(|_| ()) } @@ -147,6 +161,10 @@ impl<'a> Parser<'a> { "pragma" => Def::Pragma(self.parse_pragma()?), "type" => Def::Type(self.parse_type()?), "decl" => Def::Decl(self.parse_decl()?), + "spec" => Def::Spec(self.parse_spec()?), + "model" => Def::Model(self.parse_model()?), + "form" => Def::Form(self.parse_form()?), + "instantiate" => Def::Instantiation(self.parse_instantiation()?), "rule" => Def::Rule(self.parse_rule()?), "extractor" => Def::Extractor(self.parse_etor()?), "extern" => Def::Extern(self.parse_extern()?), @@ -319,6 +337,382 @@ impl<'a> Parser<'a> { }) } + fn parse_spec(&mut self) -> Result { + let pos = self.pos(); + self.expect_lparen()?; // term with args: (spec ( ) (provide ...) ...) + let term = self.parse_ident()?; + let mut args = vec![]; + while !self.is_rparen() { + args.push(self.parse_ident()?); + } + self.expect_rparen()?; // end term with args + + self.expect_lparen()?; // provide + if !self.eat_sym_str("provide")? { + return Err(self.error( + pos, + "Invalid spec: expected (spec ( ) (provide ...) ...)".to_string(), + )); + }; + let mut provides = vec![]; + while !self.is_rparen() { + provides.push(self.parse_spec_expr()?); + } + self.expect_rparen()?; // end provide + + let requires = if self.is_lparen() { + self.expect_lparen()?; + if !self.eat_sym_str("require")? { + return Err(self.error( + pos, + "Invalid spec: expected (spec ( ) (provide ...) (require ...))" + .to_string(), + )); + } + let mut require = vec![]; + while !self.is_rparen() { + require.push(self.parse_spec_expr()?); + } + self.expect_rparen()?; // end provide + require + } else { + vec![] + }; + + Ok(Spec { + term: term, + args, + provides, + requires, + }) + } + + fn parse_spec_expr(&mut self) -> Result { + let pos = self.pos(); + if self.is_spec_bit_vector() { + let (val, width) = self.parse_spec_bit_vector()?; + return Ok(SpecExpr::ConstBitVec { val, width, pos }); + } else if self.is_int() { + return Ok(SpecExpr::ConstInt { + val: self.expect_int()?, + pos, + }); + } else if self.is_spec_bool() { + let val = self.parse_spec_bool()?; + return Ok(SpecExpr::ConstBool { val, pos }); + } else if self.is_sym() { + let var = self.parse_ident()?; + return Ok(SpecExpr::Var { var, pos }); + } else if self.is_lparen() { + self.expect_lparen()?; + if self.eat_sym_str("switch")? { + let mut args = vec![]; + args.push(self.parse_spec_expr()?); + while !(self.is_rparen()) { + self.expect_lparen()?; + let l = Box::new(self.parse_spec_expr()?); + let r = Box::new(self.parse_spec_expr()?); + self.expect_rparen()?; + args.push(SpecExpr::Pair { l, r }); + } + self.expect_rparen()?; + return Ok(SpecExpr::Op { + op: SpecOp::Switch, + args, + pos, + }); + } + if self.is_sym() && !self.is_spec_bit_vector() { + let sym = self.expect_symbol()?; + if let Ok(op) = self.parse_spec_op(sym.as_str()) { + let mut args: Vec = vec![]; + while !self.is_rparen() { + args.push(self.parse_spec_expr()?); + } + self.expect_rparen()?; + return Ok(SpecExpr::Op { op, args, pos }); + }; + let ident = self.str_to_ident(pos, &sym)?; + if self.is_rparen() { + self.expect_rparen()?; + return Ok(SpecExpr::Enum { name: ident }); + }; + } + // Unit + if self.is_rparen() { + self.expect_rparen()?; + return Ok(SpecExpr::ConstUnit { pos }); + } + } + Err(self.error(pos, "Unexpected spec expression".into())) + } + + fn parse_spec_op(&mut self, s: &str) -> Result { + let pos = self.pos(); + match s { + "=" => Ok(SpecOp::Eq), + "and" => Ok(SpecOp::And), + "not" => Ok(SpecOp::Not), + "=>" => Ok(SpecOp::Imp), + "or" => Ok(SpecOp::Or), + "<=" => Ok(SpecOp::Lte), + "<" => Ok(SpecOp::Lt), + ">=" => Ok(SpecOp::Gte), + ">" => Ok(SpecOp::Gt), + "bvnot" => Ok(SpecOp::BVNot), + "bvand" => Ok(SpecOp::BVAnd), + "bvor" => Ok(SpecOp::BVOr), + "bvxor" => Ok(SpecOp::BVXor), + "bvneg" => Ok(SpecOp::BVNeg), + "bvadd" => Ok(SpecOp::BVAdd), + "bvsub" => Ok(SpecOp::BVSub), + "bvmul" => Ok(SpecOp::BVMul), + "bvudiv" => Ok(SpecOp::BVUdiv), + "bvurem" => Ok(SpecOp::BVUrem), + "bvsdiv" => Ok(SpecOp::BVSdiv), + "bvsrem" => Ok(SpecOp::BVSrem), + "bvshl" => Ok(SpecOp::BVShl), + "bvlshr" => Ok(SpecOp::BVLshr), + "bvashr" => Ok(SpecOp::BVAshr), + "bvsaddo" => Ok(SpecOp::BVSaddo), + "bvule" => Ok(SpecOp::BVUle), + "bvult" => Ok(SpecOp::BVUlt), + "bvugt" => Ok(SpecOp::BVUgt), + "bvuge" => Ok(SpecOp::BVUge), + "bvslt" => Ok(SpecOp::BVSlt), + "bvsle" => Ok(SpecOp::BVSle), + "bvsgt" => Ok(SpecOp::BVSgt), + "bvsge" => Ok(SpecOp::BVSge), + "rotr" => Ok(SpecOp::Rotr), + "rotl" => Ok(SpecOp::Rotl), + "extract" => Ok(SpecOp::Extract), + "zero_ext" => Ok(SpecOp::ZeroExt), + "sign_ext" => Ok(SpecOp::SignExt), + "concat" => Ok(SpecOp::Concat), + "conv_to" => Ok(SpecOp::ConvTo), + "int2bv" => Ok(SpecOp::Int2BV), + "bv2int" => Ok(SpecOp::BV2Int), + "widthof" => Ok(SpecOp::WidthOf), + "if" => Ok(SpecOp::If), + "switch" => Ok(SpecOp::Switch), + "subs" => Ok(SpecOp::Subs), + "popcnt" => Ok(SpecOp::Popcnt), + "rev" => Ok(SpecOp::Rev), + "cls" => Ok(SpecOp::Cls), + "clz" => Ok(SpecOp::Clz), + "load_effect" => Ok(SpecOp::LoadEffect), + "store_effect" => Ok(SpecOp::StoreEffect), + x => Err(self.error(pos, format!("Not a valid spec operator: {x}"))), + } + } + + fn parse_spec_bit_vector(&mut self) -> Result<(i128, i8)> { + let pos = self.pos(); + let s = self.expect_symbol()?; + if let Some(s) = s.strip_prefix("#b") { + match i128::from_str_radix(s, 2) { + Ok(i) => Ok((i, s.len() as i8)), + Err(_) => Err(self.error(pos, "Not a constant binary bit vector".to_string())), + } + } else if let Some(s) = s.strip_prefix("#x") { + match i128::from_str_radix(s, 16) { + Ok(i) => Ok((i, (s.len() as i8) * 4)), + Err(_) => Err(self.error(pos, "Not a constant hex bit vector".to_string())), + } + } else { + Err(self.error( + pos, + "Not a constant bit vector; must start with `#x` (hex) or `#b` (binary)" + .to_string(), + )) + } + } + + fn parse_spec_bool(&mut self) -> Result { + let pos = self.pos(); + let s = self.expect_symbol()?; + match s.as_str() { + "$true" => Ok(1), + "$false" => Ok(0), + x => Err(self.error(pos, format!("Not a valid spec boolean: {x}"))), + } + } + + fn parse_model(&mut self) -> Result { + let pos = self.pos(); + let name = self.parse_ident()?; + self.expect_lparen()?; // body + let val = if self.eat_sym_str("type")? { + let ty = self.parse_model_type(); + ModelValue::TypeValue(ty?) + } else if self.eat_sym_str("enum")? { + let mut variants = vec![]; + let mut has_explicit_value = false; + let mut implicit_idx = None; + + while !self.is_rparen() { + self.expect_lparen()?; // enum value + let name = self.parse_ident()?; + let val = if self.is_rparen() { + // has implicit enum value + if has_explicit_value { + return Err(self.error( + pos, + format!( + "Spec enum has unexpected implicit value after implicit value." + ), + )); + } + implicit_idx = Some(if let Some(idx) = implicit_idx { + idx + 1 + } else { + 0 + }); + SpecExpr::ConstInt { + val: implicit_idx.unwrap(), + pos, + } + } else { + if implicit_idx.is_some() { + return Err(self.error( + pos, + format!( + "Spec enum has unexpected explicit value after implicit value." + ), + )); + } + has_explicit_value = true; + self.parse_spec_expr()? + }; + self.expect_rparen()?; + variants.push((name, val)); + } + ModelValue::EnumValues(variants) + } else { + return Err(self.error(pos, "Model must be a type or enum".to_string())); + }; + + self.expect_rparen()?; // end body + Ok(Model { name, val }) + } + + fn parse_model_type(&mut self) -> Result { + let pos = self.pos(); + if self.eat_sym_str("Bool")? { + Ok(ModelType::Bool) + } else if self.eat_sym_str("Int")? { + Ok(ModelType::Int) + } else if self.eat_sym_str("Unit")? { + Ok(ModelType::Unit) + } else if self.is_lparen() { + self.expect_lparen()?; + let width = if self.eat_sym_str("bv")? { + if self.is_rparen() { + None + } else if self.is_int() { + Some(usize::try_from(self.expect_int()?).map_err(|err| { + self.error(pos, format!("Invalid BitVector width: {err}")) + })?) + } else { + return Err(self.error(pos, "Badly formed BitVector (bv ...)".to_string())); + } + } else { + return Err(self.error(pos, "Badly formed BitVector (bv ...)".to_string())); + }; + self.expect_rparen()?; + Ok(ModelType::BitVec(width)) + } else { + Err(self.error( + pos, + "Model type be a Bool, Int, or BitVector (bv ...)".to_string(), + )) + } + } + + fn parse_form(&mut self) -> Result
{ + let pos = self.pos(); + let name = self.parse_ident()?; + let signatures = self.parse_signatures()?; + Ok(Form { + name, + signatures, + pos, + }) + } + + fn parse_signatures(&mut self) -> Result> { + let mut signatures = vec![]; + while !self.is_rparen() { + signatures.push(self.parse_signature()?); + } + Ok(signatures) + } + + fn parse_signature(&mut self) -> Result { + self.expect_lparen()?; + let pos = self.pos(); + let args = self.parse_tagged_types("args")?; + let ret = self.parse_tagged_type("ret")?; + let canonical = self.parse_tagged_type("canon")?; + self.expect_rparen()?; + Ok(Signature { + args, + ret, + canonical, + pos, + }) + } + + fn parse_tagged_types(&mut self, tag: &str) -> Result> { + self.expect_lparen()?; + let pos = self.pos(); + if !self.eat_sym_str(tag)? { + return Err(self.error(pos, format!("Invalid {tag}: expected ({tag} ...)"))); + }; + let mut params = vec![]; + while !self.is_rparen() { + params.push(self.parse_model_type()?); + } + self.expect_rparen()?; + Ok(params) + } + + fn parse_tagged_type(&mut self, tag: &str) -> Result { + self.expect_lparen()?; + let pos = self.pos(); + if !self.eat_sym_str(tag)? { + return Err(self.error(pos, format!("Invalid {tag}: expected ({tag} )"))); + }; + let ty = self.parse_model_type()?; + self.expect_rparen()?; + Ok(ty) + } + + fn parse_instantiation(&mut self) -> Result { + let pos = self.pos(); + let term = self.parse_ident()?; + // Instantiation either has an explicit signatures list, which would + // open with a left paren. Or it has an identifier referencing a + // predefined set of signatures. + if self.is_lparen() { + let signatures = self.parse_signatures()?; + Ok(Instantiation { + term, + form: None, + signatures, + pos, + }) + } else { + let form = self.parse_ident()?; + Ok(Instantiation { + term, + form: Some(form), + signatures: vec![], + pos, + }) + } + } + fn parse_extern(&mut self) -> Result { let pos = self.pos(); if self.eat_sym_str("constructor")? { @@ -371,6 +765,14 @@ impl<'a> Parser<'a> { fn parse_rule(&mut self) -> Result { let pos = self.pos(); + let name = if self.is_sym() { + Some( + self.parse_ident() + .map_err(|err| self.error(pos, format!("Invalid rule name: {err:?}")))?, + ) + } else { + None + }; let prio = if self.is_int() { Some( i64::try_from(self.expect_int()?) @@ -393,6 +795,7 @@ impl<'a> Parser<'a> { expr, pos, prio, + name, }); } } diff --git a/cranelift/isle/isle/src/sema.rs b/cranelift/isle/isle/src/sema.rs index 41533b58901a..4668cb254872 100644 --- a/cranelift/isle/isle/src/sema.rs +++ b/cranelift/isle/isle/src/sema.rs @@ -189,6 +189,10 @@ pub struct TermEnv { /// defined implicit type-converter terms we can try to use to fit /// types together. pub converters: StableMap<(TypeId, TypeId), TermId>, + + /// Flag for whether to expand internal extractors in the + /// translation from the AST to sema. + pub expand_internal_extractors: bool, } /// A term. @@ -473,6 +477,8 @@ pub struct Rule { pub prio: i64, /// The source position where this rule is defined. pub pos: Pos, + /// The optional name for this rule. + pub name: Option, } /// A name bound in a pattern or let-expression. @@ -1098,7 +1104,8 @@ impl TypeEnv { self.sym_map.get(&ident.0).copied() } - fn get_type_by_name(&self, sym: &ast::Ident) -> Option { + /// Lookup type by name. + pub fn get_type_by_name(&self, sym: &ast::Ident) -> Option { self.intern(sym) .and_then(|sym| self.type_map.get(&sym)) .copied() @@ -1152,12 +1159,17 @@ impl Bindings { impl TermEnv { /// Construct the term environment from the AST and the type environment. - pub fn from_ast(tyenv: &mut TypeEnv, defs: &[ast::Def]) -> Result> { + pub fn from_ast( + tyenv: &mut TypeEnv, + defs: &[ast::Def], + expand_internal_extractors: bool, + ) -> Result> { let mut env = TermEnv { terms: vec![], term_map: StableMap::new(), rules: vec![], converters: StableMap::new(), + expand_internal_extractors, }; env.collect_pragmas(defs); @@ -1757,6 +1769,7 @@ impl TermEnv { vars: bindings.seen, prio, pos, + name: rule.name.as_ref().map(|i| tyenv.intern_mut(i)), }); } _ => {} @@ -2011,13 +2024,15 @@ impl TermEnv { extractor_kind: Some(ExtractorKind::InternalExtractor { ref template }), .. } => { - // Expand the extractor macro! We create a map - // from macro args to AST pattern trees and - // then evaluate the template with these - // substitutions. - log!("internal extractor macro args = {:?}", args); - let pat = template.subst_macro_args(&args)?; - return self.translate_pattern(tyenv, &pat, expected_ty, bindings); + if self.expand_internal_extractors { + // Expand the extractor macro! We create a map + // from macro args to AST pattern trees and + // then evaluate the template with these + // substitutions. + log!("internal extractor macro args = {:?}", args); + let pat = template.subst_macro_args(&args)?; + return self.translate_pattern(tyenv, &pat, expected_ty, bindings); + } } TermKind::Decl { extractor_kind: None, @@ -2372,7 +2387,8 @@ impl TermEnv { Some(IfLet { lhs, rhs }) } - fn get_term_by_name(&self, tyenv: &TypeEnv, sym: &ast::Ident) -> Option { + /// Lookup term by name. + pub fn get_term_by_name(&self, tyenv: &TypeEnv, sym: &ast::Ident) -> Option { tyenv .intern(sym) .and_then(|sym| self.term_map.get(&sym)) diff --git a/cranelift/isle/veri/README.md b/cranelift/isle/veri/README.md new file mode 100644 index 000000000000..55074defc5b8 --- /dev/null +++ b/cranelift/isle/veri/README.md @@ -0,0 +1,287 @@ +# Crocus: An SMT-based ISLE verification tool + +This directory contains Crocus, a tool for verifying instruction lowering and transformation rules written in ISLE. Crocus uses an underlying SMT solver to model values in ISLE rules in as logical bitvectors, searching over all possible inputs to find potential soundness counterexamples. The motivation and context project are described in detail in our ASPLOS 2024 paper: [Lightweight, Modular Verification for WebAssembly-to-Native Instruction Selection](https://dl.acm.org/doi/10.1145/3617232.3624862). + +Currently[^1], Crocus requires every ISLE term uses within a rule to have a user-provided specification, or `spec`, that provides the logical preconditions and effects of the term (`require` and `provide` blocks). +The syntax for these specs is embedded as an optional extension to ISLE itself: specs are written in the ISLE source files. + +[^1]: We have work in progress to lower this annotation burden. + +## Running on an individual rule + +The easiest way to run Crocus on an individual ISLE rule is to give that rule a name. + +For example, to verify the following `aarch64` rule: + +``` +(rule -1 (lower (has_type (fits_in_64 ty) (band x y))) + (alu_rs_imm_logic_commutative (ALUOp.And) ty x y)) +``` + +We can add a name (before the priority): +``` +(rule band_fits_in_64 -1 (lower (has_type (fits_in_64 ty) (band x y))) + (alu_rs_imm_logic_commutative (ALUOp.And) ty x y)) +``` + +We also require that the relevant (outermost) CLIF term on the left hand side has a "type instantiation" to specify the types, e.g. bitwidths, we are interested in verifying. In this case, this is provided with: + +``` +(form + bv_binary_8_to_64 + ((args (bv 8) (bv 8)) (ret (bv 8)) (canon (bv 8))) + ((args (bv 16) (bv 16)) (ret (bv 16)) (canon (bv 16))) + ((args (bv 32) (bv 32)) (ret (bv 32)) (canon (bv 32))) + ((args (bv 64) (bv 64)) (ret (bv 64)) (canon (bv 64))) +) + +(instantiate band bv_binary_8_to_64) +``` + + +We can then invoke the rule with the following, using `-t` or `--term` to specify the relevant CLIF instruction and `--names` to specify the name of the rule: + +``` +cargo run -- --codegen ../../../codegen --aarch64 -t band --names band_fits_in_64 +``` + +With the expected output: + +``` +Writing generated file: /Users/avh/research/wasmtime/cranelift/isle/veri/veri_engine/output/clif_opt.isle +Writing generated file: /Users/avh/research/wasmtime/cranelift/isle/veri/veri_engine/output/clif_lower.isle +Verification succeeded for band_fits_in_64, width 8 +Verification succeeded for band_fits_in_64, width 16 +Verification succeeded for band_fits_in_64, width 32 +Verification succeeded for band_fits_in_64, width 64 +``` + +If the rule was unsound, this will report counterexamples. For instance, if we change the rule to the following: + +``` +(rule band_fits_in_64 -1 (lower (has_type (fits_in_64 ty) (band x y))) + (alu_rs_imm_logic_commutative (ALUOp.Or) ty x y)) +``` + +Then the output would include counterexamples, like so: + +``` +Verification failed for band_fits_in_64, width 8 +Counterexample summary +(lower (has_type (fits_in_64 [ty|8]) (band [x|#x01|0b00000001] [y|#x00|0b00000000]))) +=> +(output_reg (alu_rs_imm_logic_commutative (ALUOp.Orr) [ty|8] [x|#x01|0b00000001] [y|#x00|0b00000000])) + +#x00|0b00000000 => +#x01|0b00000001 + +Failed condition: +(= ((_ extract 7 0) lower__13) ((_ extract 7 0) output_reg__16)) +``` + +## The annotation language + +The annotation maps closely to [SMT-LIB](https://smt-lib.org) theories of bitvectors and booleans, with a several added conveniences. + +### Top-level constructs + +We extend the ISLE parser with the following top-level constructs: + +- `model` specifies how an ISLE type maps to an SMT type. For example, the follow ISLE type definitions along with their models specify how booleans and `u8`s are modeled: +``` +(model u8 (type (bv 8))) +(type u8 (primitive u8)) +(model bool (type Bool)) +(type bool (primitive bool)) +``` + +Models can be `Bool`, `Int`, or `(bv)` with or without a specific bitwidth. If the bitwidth is not provided, Crocus type inference will verify the rule with all possible inferred widths + +- As in the example above, `instantiate` and `form` specify what type instantiations should be considered for a verification. + +- `spec` terms provide specifications for ISLE declarations, which can correspond to ISLE instructions, ISA instructions, external constructors/extractors defined in Rust, or transient, ISLE-only terms. Specs take the form `(spec (term arg1 ... argN) (provide p1 ... pM) (require r1 ... rO))`, providing the `term` termname (must be a defined ISLE decl), fresh variables `arg1 ... argN` to refer to the arguments, and zero or more provide and require expressions `p1, ..., pN, r1, ..., RN` that take the form of expressions with operations as described below. `spec` terms use the keyword `result` to constrain the return value of the term. + +### General SMT-LIB operations + +The following terms exactly match their general SMT-LIB meaning: + +- `=`: equality +- `and`: boolean and +- `or`: boolean or +- `not`: boolean negation +- `=>`: boolean implication + +We additionally support variadic uses of the `and` and `or` operations (these desugar to the binary SMT-LIB versions as expected). + +### Integer operations + +The following terms exactly match the [SMT-LIB theories `Int`](https://smt-lib.org/theories-Ints.shtml). + +- `<` +- `<=` +- `>` +- `>=` + +In specs, integer operations are primarily used for comparing the number of bits in an ISLE type. + +### Bitvector operations + +The following terms exactly match [SMT-LIB theory `FixedSizeBitVectors`](https://smt-lib.org/theories-FixedSizeBitVectors.shtml). + +There operations are typically used in specs for any operations on ISLE `Value`s. + +- `bvnot` +- `bvand` +- `bvor` +- `bvxor` +- `bvneg` +- `bvadd` +- `bvsub` +- `bvmul` +- `bvudiv` +- `bvurem` +- `bvsdiv` +- `bvsrem` +- `bvshl` +- `bvlshr` +- `bvashr` +- `bvsaddo` +- `bvule` +- `bvult` +- `bvugt` +- `bvuge` +- `bvslt` +- `bvsle` +- `bvsgt` +- `bvsge` + +### Custom bitvector operations + +- `int2bv`: equivalent to SMT-LIB `nat2bv`. +- `bv2int`: equivalent to SMT-LIB `bv2nat`. +- `extract`: `(extract h l e)` where `h` and `l` are integer literals and `e` is a bitvector is equivalent to SMT-LIB `((_ extract h l) e)`. +- `zero_ext`: `(zero_ext w e)` where `w : Int` and `e : (bv N)` is equivalent to SMT-LIB `((_ zero_extend M) e))` where `M = w - N`. +- `sign_ext`: `(sign_ext w e)` where `w : Int` and `e : (bv N)` is equivalent to SMT-LIB `((_ sign_extend M) e))` where `M = w - N`. +- `rotr`: `(rotr e1 e2)` where `e1, e2: (bv N)` resolves to `(bvor (bvlshr e1 e3) (bvshl e1 (bvsub (nat2bv N N) e3)))`, where `e3 = (bvurem e2 (nat2bv N N))`. Bitvector rotate right. +- `rotl`: `(rotl e1 e2)` where `e1, e2: (bv N)` resolves to `(bvor (bvshl e1 e3) (bvlshr e1 (bvsub (nat2bv N N) e3)))`, where `e3 = (bvurem e2 (nat2bv N N))`. Bitvector rotate left. +- `concat`: `(concat e_1... e_N)` resolves to `(concat e_1 (concat e_2 (concat ... e_N)))`. That is, this is a variadic version of the SMT-LIB `concat` operation. +- `widthof`: `(widthof e)` where `e : (bv N)` resolves to `N`. That is, returns the bitwidth of a supplied bitvector as an integer. +- `subs`: `(subs e1 e2)` returns the results of a subtraction with flags. +- `popcnt`: `(popcnt e)` where `e : (bv N)` returns the count of non-zero bits in `e`. +- `rev`: `(rev e)` where `e : (bv N)` reverses the order of bits in `e`. +- `cls`: `(cls e)` where `e : (bv N)` returns the count of leading sign bits in `e`. +- `clz`: `(clz e)` where `e : (bv N)` returns the count of leading zero bits in `e`. +- `convto`: `(convto w e)` where `w : Int` and `e : (bv N)` converts the bitvector `e` to the width `w`, leaving the upper bits unspecified in the case of a extension. That is, there are 3 cases: + 1. `w = N`: resolves to `e`. + 2. `w < N`: resolves to `((_ extract M 0) e)` where `M = N - 1`. + 3. `w > N`: resolves to `(concat e2 e)` where `e2` is a fresh bitvector with `w - N` unspecified bits. + +### Custom memory operations + +- `load_effect`: `(load_effect flags size address)` where `flags : (bv 16)`, `size: Int`, and `address : (bv 64)` models a load of `size` bits from address `address` with flags `flags`. Only 1 `load_effect` may be used per left hand and right hand side of a rule. +- `store_effect`: `(store_effect flags size val address)` where `flags : (bv 16)`, `size: Int`, and `val : (bv size)`, `address : (bv 64)` models a store of `val` (with `size` bits) to address `address` with flags `flags`. Only 1 `store_effect` may be used per left hand and right hand side of a rule. + +### Custom control operation + +- `if`: equivalent to SMT-LIB `ite`. +- `switch`: `(switch c (m1 e1) ... (mN eN))` resolves to a series of nested `ite` expressions, +`(ite(= c m1) e1 (ite (= c m2) e2 (ite ...eN)))`. It additionally adds a verification condition that some case must match, that is, `(or (= c m1) (or (= c m2)...(= c mN)))`. + +## Example + +Continuing the `band_fits_in_64` example from before, the full required specifications are places in the relevant ISLE files. + +``` +(rule band_fits_in_64 -1 (lower (has_type (fits_in_64 ty) (band x y))) + (alu_rs_imm_logic_commutative (ALUOp.And) ty x y)) +``` + +In `inst_specs.isle`: + +``` +;; The band spec uses the bitvector `bvand` on its arguments. +(spec (band x y) + (provide (= result (bvand x y)))) +(instantiate band bv_binary_8_to_64) +``` + +In `prelude_lower.isle`: + +``` +;; has_type checks that the integer modeling the type in matches the Inst bitwidth. +(spec (has_type ty arg) + (provide (= result arg)) + (require (= ty (widthof arg)))) +(decl has_type (Type Inst) Inst) + +;; fits_in_64 checks that the integer modeling the width is less than or equal to 64. +(spec (fits_in_64 arg) + (provide (= result arg)) + (require (<= arg 64))) +(decl fits_in_64 (Type) Type) +``` + +In `aarch64/lower.isle`: + +``` +;; lower is just modeled as an identity function +(spec (lower arg) (provide (= result arg))) +(decl partial lower (Inst) InstOutput) +``` + +In `aarch64/inst.isle`: + +``` +;; Enum models ALUOp as an 8-bit bitvector. +(model ALUOp (enum + (Add #x00) ;; 0 + (Sub #x01) + (Orr #x02) + (OrrNot #x03) + (And #x04) + (AndNot #x05) + (Eor #x06) + (EorNot #x07) + (SubS #x08) + (SDiv #x09) + (UDiv #x0a) + (RotR #x0b) + (Lsr #x0c) + (Asr #x0d) + (Lsl #x0e))) + +;; alu_rs_imm_logic_commutative uses a conv_to and switch. +(spec (alu_rs_imm_logic_commutative op t a b) + (provide + (= result + (conv_to 64 + (switch op + ((ALUOp.Orr) (bvor a b)) + ((ALUOp.And) (bvand a b)) + ((ALUOp.Eor) (bvxor a b))))))) +(decl alu_rs_imm_logic_commutative (ALUOp Type Value Value) Reg) +``` + +## Testing + +To see an all of our current output, run tests without capturing standard out: +```bash +cargo test -- --nocapture +``` + +To run a specific test, you can provide the test name (most rules are tested in `cranelift/isle/veri/veri_engine/tests/veri.rs`). Set `RUST_LOG=DEBUG` to see more detailed output on test cases that expect success. + +```bash +RUST_LOG=DEBUG cargo test test_named_band_fits_in_64 -- --nocapture +``` + +To see the x86-64 CVE repro, run: + +```bash +RUST_LOG=debug cargo run -- --codegen ../../../codegen --noprelude -t amode_add -i examples/x86/amode_add_uextend_shl.isle +``` + +To see the x86-64 CVE variant with a 32-bit address, run: +```bash +RUST_LOG=debug cargo run -- --codegen ../../../codegen --noprelude -t amode_add -i examples/x86/amode_add_shl.isle +``` \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/Cargo.toml b/cranelift/isle/veri/veri_engine/Cargo.toml new file mode 100644 index 000000000000..8460f1c1a2dd --- /dev/null +++ b/cranelift/isle/veri/veri_engine/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "veri_engine" +license = "Apache-2.0 WITH LLVM-exception" +authors = ["Alexa VanHattum", "Monica Pardeshi", "Michael McLoughlin", "Wellesley Programming Systems Lab"] +version = "0.1.0" +edition = "2021" +publish = false + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[lib] +name = "veri_engine_lib" +path = "src/lib.rs" + +[[bin]] +name = "crocus" +path = "src/main.rs" + +[dependencies] +cranelift-isle = { path = "../../isle" } +cranelift-codegen = { path = "../../../codegen", features = ["all-arch"] } +cranelift-codegen-meta = { path = "../../../codegen/meta" } +veri_ir = { path = "../veri_ir" } +easy-smt = "0.2.2" +clap = { workspace = true } +itertools = "0.12.0" +log = { workspace = true } +env_logger = { workspace = true } +anyhow = { workspace = true } + +[dev-dependencies] +strum = "0.24.0" +strum_macros = "0.24.0" diff --git a/cranelift/isle/veri/veri_engine/examples/broken/broken_32_general_rotl_to_rotr.isle b/cranelift/isle/veri/veri_engine/examples/broken/broken_32_general_rotl_to_rotr.isle new file mode 100644 index 000000000000..b0ed53a641c2 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/broken_32_general_rotl_to_rotr.isle @@ -0,0 +1,34 @@ + +(spec (lower arg) (provide (= result arg))) +(decl lower (Inst) InstOutput) + +;; Instruction formats. +(type MInst + (enum +)) + +(spec (sub ty a b) + (provide (= (bvsub a b) result))) +(decl sub (Type Reg Reg) Reg) +(extern constructor sub sub) + +(spec (zero_reg) + (provide (= (zero_ext 64 #x0000000000000000) result))) +(decl zero_reg () Reg) +(extern constructor zero_reg zero_reg) + +(spec (a64_rotr ty x y) + (provide (= result + (if (= ty 32) + (zero_ext 64 (rotr (extract 31 0 x) (extract 31 0 y))) + (rotr x y)))) + (require (or (= ty 32) (= ty 64)))) +(decl a64_rotr (Type Reg Reg) Reg) +(extern constructor a64_rotr a64_rotr) + +;; BROKEN: order of arguments to sub flipped +;; General 32-bit case. +(rule (lower (has_type $I32 (rotl x y))) + (let ((amt Reg (value_regs_get y 0)) + (neg_shift Reg (sub $I32 amt (zero_reg)))) + (a64_rotr $I32 x neg_shift))) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/broken_fits_in_16_rotl_to_rotr.isle b/cranelift/isle/veri/veri_engine/examples/broken/broken_fits_in_16_rotl_to_rotr.isle new file mode 100644 index 000000000000..101a5fb8b910 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/broken_fits_in_16_rotl_to_rotr.isle @@ -0,0 +1,125 @@ +(type ImmLogic (primitive ImmLogic)) +(type Imm12 (primitive Imm12)) + +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(spec (sub ty a b) (provide (= (bvsub a b) result))) +(decl sub (Type Reg Reg) Reg) +(extern constructor sub sub) + +(spec (zero_reg)(provide (= (zero_ext 64 #x0000000000000000) result))) +(decl zero_reg () Reg) +(extern constructor zero_reg zero_reg) + +(spec (extend a b c d) + (provide + (if b + (= result (sign_ext (bv2int d) (conv_to (bv2int c) a))) + (= result (zero_ext (bv2int d) (conv_to (bv2int c) a)))))) +(decl extend (Reg bool u8 u8) Reg) +(extern constructor extend extend) + +(spec (and_imm ty x y) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvand (extract 31 0 x) (extract 31 0 y)))) + (64 (bvand x (zero_ext 64 y)))))) + (require + (or + (= y (bvand y #x000fff)) + (= y (bvand y #xfff000))))) +(decl and_imm (Type Reg ImmLogic) Reg) +(extern constructor and_imm and_imm) + +;; Place a `Value` into a register, zero extending it to 32-bits +(spec (put_in_reg_zext32 arg) + (provide + (= result + (if (<= (widthof arg) 32) + (conv_to 64 (zero_ext 32 arg)) + (conv_to 64 arg))))) +(decl put_in_reg_zext32 (Value) Reg) +(extern constructor put_in_reg_zext32 put_in_reg_zext32) + +;; Corresponding rust: +;; fn rotr_mask(&mut self, ty: Type) -> ImmLogic { +;; ImmLogic::maybe_from_u64((ty.bits() - 1) as u64, I32).unwrap() +;; } +;; +(spec (rotr_mask x) (provide (= (bvsub (int2bv 64 x) #x0000000000000001) result))) +(decl rotr_mask (Type) ImmLogic) +(extern constructor rotr_mask rotr_mask) + +(spec (sub_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 a) (zero_ext 32 b))) + (bvsub a (zero_ext 64 b))))) + (require + (or + (= b (bvand b #x000fff)) + (= b (bvand b #xfff000))) + (= (widthof b) 24))) +(decl sub_imm (Type Reg Imm12) Reg) +(extern constructor sub_imm sub_imm) + +(spec (u8_into_imm12 arg) (provide (= result (zero_ext 24 arg)))) +(decl u8_into_imm12 (u8) Imm12) +(extern constructor u8_into_imm12 u8_into_imm12) + +(spec (lsr ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvlshr (extract 31 0 a) (extract 31 0 b)))) + (64 (bvlshr a b)))))) +(decl lsr (Type Reg Reg) Reg) +(extern constructor lsr lsr) + +(spec (lsl ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvshl (extract 31 0 a) (extract 31 0 b)))) + (64 (bvshl a b)))))) +(decl lsl (Type Reg Reg) Reg) +(extern constructor lsl lsl) + +(spec (orr ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvor (extract 31 0 a) (extract 31 0 b)))) + (64 (bvor a b)))))) +(decl orr (Type Reg Reg) Reg) +(extern constructor orr orr) + +;; Instruction formats. +(type MInst + (enum +)) + +;; General 8/16-bit case. +;; BROKEN: no negation +(rule -2 (lower (has_type (fits_in_16 ty) (rotl x y))) + (let ((amt Reg (value_regs_get y 0))) + (small_rotr ty (put_in_reg_zext32 x) amt))) + + +(spec (small_rotr t x y) + (provide + (= result + (switch t + (8 (conv_to 64 (rotr (extract 7 0 x) (extract 7 0 y)))) + (16 (conv_to 64 (rotr (extract 15 0 x) (extract 15 0 y))))))) + (require + (or (= t 8) (= t 16)) + (switch t + (8 (= (extract 31 8 x) #x000000)) + (16 (= (extract 31 16 x) #x0000))))) +(decl small_rotr (Type Reg Reg) Reg) +(extern constructor small_rotr small_rotr) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/broken_fits_in_16_with_imm_rotl_to_rotr.isle b/cranelift/isle/veri/veri_engine/examples/broken/broken_fits_in_16_with_imm_rotl_to_rotr.isle new file mode 100644 index 000000000000..834665a54f81 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/broken_fits_in_16_with_imm_rotl_to_rotr.isle @@ -0,0 +1,190 @@ +(type ImmLogic (primitive ImmLogic)) +(type ImmShift (primitive ImmShift)) +(type Imm12 (primitive Imm12)) + +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(spec (sub ty a b) (provide (= (bvsub a b) result))) +(decl sub (Type Reg Reg) Reg) +(extern constructor sub sub) + +(spec (zero_reg) + (provide (= (zero_ext 64 #x0000000000000000) result))) +(decl zero_reg () Reg) +(extern constructor zero_reg zero_reg) + +(spec (extend a b c d) + (provide + (if b + (= result (sign_ext (bv2int d) (conv_to (bv2int c) a))) + (= result (zero_ext (bv2int d) (conv_to (bv2int c) a)))))) +(decl extend (Reg bool u8 u8) Reg) +(extern constructor extend extend) + +(spec (and_imm ty x y) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvand (extract 31 0 x) (extract 31 0 y)))) + (64 (bvand x (zero_ext 64 y)))))) + (require + (or + (= y (bvand y #x000fff)) + (= y (bvand y #xfff000))))) +(decl and_imm (Type Reg ImmLogic) Reg) +(extern constructor and_imm and_imm) + +;; Place a `Value` into a register, zero extending it to 32-bits +(spec (put_in_reg_zext32 arg) + (provide + (= result + (if (<= (widthof arg) 32) + (conv_to 64 (zero_ext 32 arg)) + (conv_to 64 arg))))) +(decl put_in_reg_zext32 (Value) Reg) +(extern constructor put_in_reg_zext32 put_in_reg_zext32) + +(spec (sub_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 a) (zero_ext 32 b))) + (bvsub a (zero_ext 64 b))))) + (require + (or + (= b (bvand b #x000fff)) + (= b (bvand b #xfff000))))) +(decl sub_imm (Type Reg Imm12) Reg) +(extern constructor sub_imm sub_imm) + +(spec (u8_into_imm12 arg) (provide (= result (zero_ext 24 arg)))) +(decl u8_into_imm12 (u8) Imm12) +(extern constructor u8_into_imm12 u8_into_imm12) + +(spec (lsr ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvlshr (extract 31 0 a) (extract 31 0 b)))) + (64 (bvlshr a b)))))) +(decl lsr (Type Reg Reg) Reg) +(extern constructor lsr lsr) + +(spec (lsl ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvshl (extract 31 0 a) (extract 31 0 b)))) + (64 (bvshl a b)))))) +(decl lsl (Type Reg Reg) Reg) +(extern constructor lsl lsl) + +(spec (orr ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvor (extract 31 0 a) (extract 31 0 b)))) + (64 (bvor a b)))))) +(decl orr (Type Reg Reg) Reg) +(extern constructor orr orr) + + +;; fn imm_shift_from_imm64(&mut self, ty: Type, val: Imm64) -> Option { +;; let imm_value = (val.bits() as u64) & ((ty.bits() - 1) as u64); +;; ImmShift::maybe_from_u64(imm_value) +;; } + +;; Add an assertion that the value fits, using the constand with 56 1's then 6 0's (~63) +(spec (imm_shift_from_imm64 ty x) + (provide (= result (extract 5 0 (bvand x (bvsub (int2bv 64 ty) #x0000000000000001))))) + (require (bvult (bvand x (bvsub (int2bv 64 ty) #x0000000000000001)) #x0000000000000040))) +(decl pure imm_shift_from_imm64 (Type Imm64) ImmShift) +(extern constructor imm_shift_from_imm64 imm_shift_from_imm64) + +;; fn negate_imm_shift(&mut self, ty: Type, mut imm: ImmShift) -> ImmShift { +;; let size = u8::try_from(ty.bits()).unwrap(); +;; imm.imm = size.wrapping_sub(imm.value()); +;; imm.imm &= size - 1; +;; imm +;; } + +(spec (negate_imm_shift ty x) + (provide + (= result (bvand (bvsub (int2bv 6 ty) x) (bvsub (int2bv 6 ty) #b000001))))) +(decl negate_imm_shift (Type ImmShift) ImmShift) +(extern constructor negate_imm_shift negate_imm_shift) + +;; Helper for generating `lsr` instructions. +(spec (lsr_imm ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvlshr (extract 31 0 a) (zero_ext 32 b)))) + (64 (bvlshr a (zero_ext 64 b))))))) +(decl lsr_imm (Type Reg ImmShift) Reg) +(extern constructor lsr_imm lsr_imm) +(extern extractor lsr_imm lsr_imm) + +(spec (lsl_imm ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvshl (extract 31 0 a) (zero_ext 32 b)))) + (64 (bvshl a (zero_ext 64 b))))))) +(decl lsl_imm (Type Reg ImmShift) Reg) +(extern constructor lsl_imm lsl_imm) +(extern extractor lsl_imm lsl_imm) + +;; fn rotr_opposite_amount(&mut self, ty: Type, val: ImmShift) -> ImmShift { +;; let amount = val.value() & u8::try_from(ty.bits() - 1).unwrap(); +;; ImmShift::maybe_from_u64(u64::from(ty.bits()) - u64::from(amount)).unwrap() +;; } + +(spec (rotr_opposite_amount ty x) + (provide + (= (bvsub (int2bv 6 ty) (bvand x (bvsub (int2bv 6 ty) #b000001))) result))) +(decl rotr_opposite_amount (Type ImmShift) ImmShift) +(extern constructor rotr_opposite_amount rotr_opposite_amount) + +;; Instruction formats. +(type MInst + (enum +)) + +;; Specialization for the 8/16-bit case when the rotation amount is an immediate. +;; BROKEN: n is not negated in RHS +(rule -1 (lower (has_type (fits_in_16 ty) (rotl x (iconst k)))) + (if-let n (imm_shift_from_imm64 ty k)) + (small_rotr_imm ty (put_in_reg_zext32 x) n)) + +(spec (small_rotr_imm t x y) + (provide + (= result + (switch t + (8 (conv_to 64 (rotr (extract 7 0 x) (zero_ext 8 y)))) + (16 (conv_to 64 (rotr (extract 15 0 x) (zero_ext 16 y))))))) + (require + (or (= t 8) (= t 16)) + (switch t + (8 (= (extract 31 8 x) #x000000)) + (16 (= (extract 31 16 x) #x0000))) + (bvult y (int2bv 6 t)))) +(instantiate small_rotr_imm + ((args Int (bv 64) (bv 6)) (ret (bv 64)) (canon (bv 64)))) +(decl small_rotr_imm (Type Reg ImmShift) Reg) + +;; For a constant amount, we can instead do: +;; +;; rotr rd, val, #amt +;; +;; => +;; +;; lsr val_rshift, val, # +;; lsl val_lshift, val, +;; orr rd, val_lshift, val_rshift +(rule (small_rotr_imm ty val amt) + (let ((val_rshift Reg (lsr_imm $I32 val amt)) + (val_lshift Reg (lsl_imm $I32 val (rotr_opposite_amount ty amt)))) + (orr $I32 val_lshift val_rshift))) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/broken_fits_in_32_band.isle b/cranelift/isle/veri/veri_engine/examples/broken/broken_fits_in_32_band.isle new file mode 100644 index 000000000000..82f74dd76358 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/broken_fits_in_32_band.isle @@ -0,0 +1,80 @@ + +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +;; Instruction formats. +(type MInst + (enum +)) + +(model ALUOp (enum + (Add #x00) ;; 0 + (Sub #x01) + (Orr #x02) + (OrrNot #x03) + (And #x04) + (AndNot #x05) + (Eor #x06) + (EorNot #x07) + (SubS #x08) + (SDiv #x09) + (UDiv #x0a) + (RotR #x0b) + (Lsr #x0c) + (Asr #x0d) + (Lsl #x0e))) + +;; An ALU operation. This can be paired with several instruction formats +;; below (see `Inst`) in any combination. +(type ALUOp + (enum + (Add) + (Sub) + (Orr) + (OrrNot) + (And) + (AndS) + (AndNot) + ;; XOR (AArch64 calls this "EOR") + (Eor) + ;; XNOR (AArch64 calls this "EOR-NOT") + (EorNot) + ;; Add, setting flags + (AddS) + ;; Sub setting flags + (SubS) + ;; Signed multiplyhigh-word result + (SMulH) + ;; Unsigned multiplyhigh-word result + (UMulH) + (SDiv) + (UDiv) + (RotR) + (Lsr) + (Asr) + (Lsl) + ;; Add with carry + (Adc) + ;; Add with carrysettings flags + (AdcS) + ;; Subtract with carry + (Sbc) + ;; Subtract with carrysettings flags + (SbcS) +)) + +;; BROKEN: swapped AND and OR +(spec (alu_rs_imm_logic_commutative op t a b) + (provide + (= result + (conv_to 64 + (switch op + ((ALUOp.And) (bvor a b)) + ((ALUOp.Orr) (bvand a b)) + ((ALUOp.Eor) (bvxor a b))))))) +(decl alu_rs_imm_logic_commutative (ALUOp Type Value Value) Reg) +(extern constructor alu_rs_imm_logic_commutative alu_rs_imm_logic_commutative) + +(rule -1 (lower (has_type (fits_in_32 ty) (band x y))) + (alu_rs_imm_logic_commutative (ALUOp.And) ty x y)) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/broken_fits_in_32_bor.isle b/cranelift/isle/veri/veri_engine/examples/broken/broken_fits_in_32_bor.isle new file mode 100644 index 000000000000..d3113c90509e --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/broken_fits_in_32_bor.isle @@ -0,0 +1,80 @@ + +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +;; Instruction formats. +(type MInst + (enum +)) + +(model ALUOp (enum + (Add #x00) ;; 0 + (Sub #x01) + (Orr #x02) + (OrrNot #x03) + (And #x04) + (AndNot #x05) + (Eor #x06) + (EorNot #x07) + (SubS #x08) + (SDiv #x09) + (UDiv #x0a) + (RotR #x0b) + (Lsr #x0c) + (Asr #x0d) + (Lsl #x0e))) + +;; An ALU operation. This can be paired with several instruction formats +;; below (see `Inst`) in any combination. +(type ALUOp + (enum + (Add) + (Sub) + (Orr) + (OrrNot) + (And) + (AndS) + (AndNot) + ;; XOR (AArch64 calls this "EOR") + (Eor) + ;; XNOR (AArch64 calls this "EOR-NOT") + (EorNot) + ;; Add, setting flags + (AddS) + ;; Sub setting flags + (SubS) + ;; Signed multiplyhigh-word result + (SMulH) + ;; Unsigned multiplyhigh-word result + (UMulH) + (SDiv) + (UDiv) + (RotR) + (Lsr) + (Asr) + (Lsl) + ;; Add with carry + (Adc) + ;; Add with carrysettings flags + (AdcS) + ;; Subtract with carry + (Sbc) + ;; Subtract with carrysettings flags + (SbcS) +)) + +;; BROKEN: swapped AND and OR +(spec (alu_rs_imm_logic_commutative op t a b) + (provide + (= result + (conv_to 64 + (switch op + ((ALUOp.And) (bvor a b)) + ((ALUOp.Orr) (bvand a b)) + ((ALUOp.Eor) (bvxor a b))))))) +(decl alu_rs_imm_logic_commutative (ALUOp Type Value Value) Reg) +(extern constructor alu_rs_imm_logic_commutative alu_rs_imm_logic_commutative) + +(rule -1 (lower (has_type (fits_in_32 ty) (bor x y))) + (alu_rs_imm_logic_commutative (ALUOp.Orr) ty x y)) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/broken_imul.isle b/cranelift/isle/veri/veri_engine/examples/broken/broken_imul.isle new file mode 100644 index 000000000000..afb301f8ef9c --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/broken_imul.isle @@ -0,0 +1,59 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +;; Instruction formats. +(type MInst + (enum + ;; An ALU operation with two register sources and a register destination. + (AluRRR + (alu_op ALUOp) + (size OperandSize) +))) + +;; An ALU operation. This can be paired with several instruction formats +;; below (see `Inst`) in any combination. +(type ALUOp + (enum + (Add) +)) + +(type ALUOp3 + (enum + ;; Multiply-add + (MAdd) +)) + +(type OperandSize extern + (enum Size32 + Size64)) + +(decl alu_rrr (ALUOp Type Reg Reg) Reg) +(extern constructor alu_rrr alu_rrr) + +(decl alu_rrrr (ALUOp3 Type Reg Reg Reg) Reg) +(extern constructor alu_rrrr alu_rrrr) + +(spec (zero_reg) + (provide (= (zero_ext 64 #x0000000000000000) result))) +(decl zero_reg () Reg) +(extern constructor zero_reg zero_reg) + +;; Helper for calculating the `OperandSize` corresponding to a type +(decl operand_size (Type) OperandSize) +(rule (operand_size (fits_in_32 _ty)) (OperandSize.Size32)) +(rule (operand_size (fits_in_64 _ty)) (OperandSize.Size64)) + +;; Helpers for generating `madd` instructions. +(spec (madd ty a b c) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvadd (extract 31 0 c) (bvmul (extract 31 0 a) (extract 31 0 b)))) + (bvadd c (bvmul a b)))))) +(decl madd (Type Reg Reg Reg) Reg) +(rule (madd ty x y z) (alu_rrrr (ALUOp3.MAdd) ty x y z)) + +;; `i64` and smaller. +(rule (lower (has_type (fits_in_64 ty) (imul x y))) + (madd ty y y (zero_reg))) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/broken_mask_small_rotr.isle b/cranelift/isle/veri/veri_engine/examples/broken/broken_mask_small_rotr.isle new file mode 100644 index 000000000000..7a7e132d9aa7 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/broken_mask_small_rotr.isle @@ -0,0 +1,137 @@ +(type ImmLogic (primitive ImmLogic)) +(type Imm12 (primitive Imm12)) + +(spec (sub ty a b) (provide (= (bvsub a b) result))) +(decl sub (Type Reg Reg) Reg) +(extern constructor sub sub) + +(spec (zero_reg) + (provide (= (zero_ext 64 #x0000000000000000) result))) +(decl zero_reg () Reg) +(extern constructor zero_reg zero_reg) + +(spec (extend a b c d) + (provide + (if b + (= result (sign_ext (bv2int d) (conv_to (bv2int c) a))) + (= result (zero_ext (bv2int d) (conv_to (bv2int c) a)))))) +(decl extend (Reg bool u8 u8) Reg) +(extern constructor extend extend) + +(spec (and_imm ty x y) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvand (extract 31 0 x) (extract 31 0 y)))) + (64 (bvand x (zero_ext 64 y)))))) + (require + (or + (= y (bvand y #x0000000000000fff)) + (= y (bvand y #x0000000000fff000))))) +(decl and_imm (Type Reg ImmLogic) Reg) +(extern constructor and_imm and_imm) + +;; Place a `Value` into a register, zero extending it to 32-bits +(spec (put_in_reg_zext32 arg) + (provide + (= result + (if (<= (widthof arg) 32) + (conv_to 64 (zero_ext 32 arg)) + (conv_to 64 arg))))) +(decl put_in_reg_zext32 (Value) Reg) +(extern constructor put_in_reg_zext32 put_in_reg_zext32) + +;; BROKEN: subtracts 2 instead of 1 +;; Corresponding rust: +;; fn rotr_mask(&mut self, ty: Type) -> ImmLogic { +;; ImmLogic::maybe_from_u64((ty.bits() - 1) as u64, I32).unwrap() +;; } +;; +(spec (rotr_mask x) (provide (= (bvsub (int2bv 64 x) #x0000000000000002) result))) +(decl rotr_mask (Type) ImmLogic) +(extern constructor rotr_mask rotr_mask) + +(spec (sub_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 a) (zero_ext 32 b))) + (bvsub a (zero_ext 64 b))))) + (require (or (bvult (zero_ext 64 b) #x0000000000000fff) + (and (bvult (zero_ext 64 b) #x0000000000fff000) + (= (extract 2 0 (zero_ext 64 b)) #b000))) + (= (widthof b) 24))) +(decl sub_imm (Type Reg Imm12) Reg) +(extern constructor sub_imm sub_imm) + +(spec (u8_into_imm12 arg) (provide (= result (zero_ext 24 arg)))) +(decl u8_into_imm12 (u8) Imm12) +(extern constructor u8_into_imm12 u8_into_imm12) + +(spec (lsr ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvlshr (extract 31 0 a) (extract 31 0 b)))) + (64 (bvlshr a b)))))) +(decl lsr (Type Reg Reg) Reg) +(extern constructor lsr lsr) + +(spec (lsl ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvshl (extract 31 0 a) (extract 31 0 b)))) + (64 (bvshl a b)))))) +(decl lsl (Type Reg Reg) Reg) +(extern constructor lsl lsl) + +(spec (orr ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvor (extract 31 0 a) (extract 31 0 b)))) + (64 (bvor a b)))))) +(decl orr (Type Reg Reg) Reg) +(extern constructor orr orr) + +;; Instruction formats. +(type MInst + (enum +)) + + +(spec (small_rotr t x y) + (provide + (= result + (switch t + (8 (conv_to 64 (rotr (extract 7 0 x) (extract 7 0 y)))) + (16 (conv_to 64 (rotr (extract 15 0 x) (extract 15 0 y))))))) + (require + (or (= t 8) (= t 16)) + (switch t + (8 (= (extract 31 8 x) #x000000)) + (16 (= (extract 31 16 x) #x0000))))) +(instantiate small_rotr + ((args Int (bv 64) (bv 64)) (ret (bv 64)) (canon (bv 64)))) +(decl small_rotr (Type Reg Reg) Reg) + +;; For a < 32-bit rotate-right, we synthesize this as: +;; +;; rotr rd, val, amt +;; +;; => +;; +;; and masked_amt, amt, +;; sub tmp_sub masked_amt, +;; sub neg_amt, zero, tmp_sub ; neg +;; lsr val_rshift, val, masked_amt +;; lsl val_lshift, val, neg_amt +;; orr rd, val_lshift val_rshift +(rule (small_rotr ty val amt) + (let ((masked_amt Reg (and_imm $I32 amt (rotr_mask ty))) + (tmp_sub Reg (sub_imm $I32 masked_amt (u8_into_imm12 (ty_bits ty)))) + (neg_amt Reg (sub $I32 (zero_reg) tmp_sub)) + (val_rshift Reg (lsr $I32 val masked_amt)) + (val_lshift Reg (lsl $I32 val neg_amt))) + (orr $I32 val_lshift val_rshift))) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/broken_rule_or_small_rotr.isle b/cranelift/isle/veri/veri_engine/examples/broken/broken_rule_or_small_rotr.isle new file mode 100644 index 000000000000..3ee6714f3c22 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/broken_rule_or_small_rotr.isle @@ -0,0 +1,149 @@ +(type ImmLogic (primitive ImmLogic)) +(type Imm12 (primitive Imm12)) + +(spec (sub ty a b) (provide (= (bvsub a b) result))) +(decl sub (Type Reg Reg) Reg) +(extern constructor sub sub) + +(spec (zero_reg) + (provide (= (zero_ext 64 #x0000000000000000) result))) +(decl zero_reg () Reg) +(extern constructor zero_reg zero_reg) + +(spec (extend a b c d) + (provide + (if b + (= result (sign_ext (bv2int d) (conv_to (bv2int c) a))) + (= result (zero_ext (bv2int d) (conv_to (bv2int c) a)))))) +(decl extend (Reg bool u8 u8) Reg) +(extern constructor extend extend) + +(spec (and_imm ty x y) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvand (extract 31 0 x) (extract 31 0 y)))) + (64 (bvand x (zero_ext 64 y)))))) + (require + (or (<= (bv2int y) 4094) + (and (<= (bv2int y) 16773119) + (= (extract 2 0 y) #b000))))) +(decl and_imm (Type Reg ImmLogic) Reg) +(extern constructor and_imm and_imm) + +;; Place a `Value` into a register, zero extending it to 32-bits +(spec (put_in_reg_zext32 arg) + (provide + (= result + (if (<= (widthof arg) 32) + (conv_to 64 (zero_ext 32 arg)) + (conv_to 64 arg))))) +(decl put_in_reg_zext32 (Value) Reg) +(extern constructor put_in_reg_zext32 put_in_reg_zext32) + +;; Corresponding rust: +;; fn rotr_mask(&mut self, ty: Type) -> ImmLogic { +;; ImmLogic::maybe_from_u64((ty.bits() - 1) as u64, I32).unwrap() +;; } +;; +(spec (rotr_mask x) (provide (= (bvsub (int2bv 64 x) #x0000000000000001) result))) +(decl rotr_mask (Type) ImmLogic) +(extern constructor rotr_mask rotr_mask) + +;; Note that 4094 = 0xffe and 16773119 = 0xffefff +(spec (sub_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 a) (zero_ext 32 b))) + (bvsub a (zero_ext 64 b))))) + (require (or (bvult (zero_ext 64 b) #x0000000000000fff) + (and (bvult (zero_ext 64 b) #x0000000000fff000) + (= (extract 2 0 (zero_ext 64 b)) #b000))) + (= (widthof b) 24))) +(decl sub_imm (Type Reg Imm12) Reg) +(extern constructor sub_imm sub_imm) + +(spec (u8_into_imm12 arg) (provide (= result (zero_ext 24 arg)))) +(decl u8_into_imm12 (u8) Imm12) +(extern constructor u8_into_imm12 u8_into_imm12) + +(spec (lsr ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvlshr (extract 31 0 a) (extract 31 0 b)))) + (64 (bvlshr a b)))))) +(decl lsr (Type Reg Reg) Reg) +(extern constructor lsr lsr) + +(spec (lsl ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvshl (extract 31 0 a) (extract 31 0 b)))) + (64 (bvshl a b)))))) +(decl lsl (Type Reg Reg) Reg) +(extern constructor lsl lsl) + +(spec (orr ty a b) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvor (extract 31 0 a) (extract 31 0 b)))) + (64 (bvor a b)))))) +(decl orr (Type Reg Reg) Reg) +(extern constructor orr orr) + +;; Instruction formats. +(type MInst + (enum +)) + + +(spec (small_rotr t x y) + (provide + (= result + (switch t + (8 (conv_to 64 (rotr (extract 7 0 x) (extract 7 0 y)))) + (16 (conv_to 64 (rotr (extract 15 0 x) (extract 15 0 y))))))) + (require + (or (= t 8) (= t 16)) + (switch t + (8 (= (extract 31 8 x) #x000000)) + (16 (= (extract 31 16 x) #x0000))))) +(instantiate small_rotr + ((args Int (bv 64) (bv 64)) (ret (bv 64)) (canon (bv 64)))) +(decl small_rotr (Type Reg Reg) Reg) + +(spec (and_reg ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvand (extract 31 0 a) (extract 31 0 b))) + (bvand a b)))) + (require (or (= ty 8) (= ty 16) (= ty 32) (= ty 64)))) +(decl and_reg (Type Reg Reg) Reg) +(extern constructor and_reg and_reg) + +;; BROKEN: uses AND instead of OR + +;; For a < 32-bit rotate-right, we synthesize this as: +;; +;; rotr rd, val, amt +;; +;; => +;; +;; and masked_amt, amt, +;; sub tmp_sub masked_amt, +;; sub neg_amt, zero, tmp_sub ; neg +;; lsr val_rshift, val, masked_amt +;; lsl val_lshift, val, neg_amt +;; orr rd, val_lshift val_rshift +(rule (small_rotr ty val amt) + (let ((masked_amt Reg (and_imm $I32 amt (rotr_mask ty))) + (tmp_sub Reg (sub_imm $I32 masked_amt (u8_into_imm12 (ty_bits ty)))) + (neg_amt Reg (sub $I32 (zero_reg) tmp_sub)) + (val_rshift Reg (lsr $I32 val masked_amt)) + (val_lshift Reg (lsl $I32 val neg_amt))) + (and_reg $I32 val_lshift val_rshift))) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/broken_uextend.isle b/cranelift/isle/veri/veri_engine/examples/broken/broken_uextend.isle new file mode 100644 index 000000000000..556d5c8c3cd3 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/broken_uextend.isle @@ -0,0 +1,33 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +;; Instruction formats. +(type MInst + (enum + ;; A sign- or zero-extend operation. + (Extend + (rd WritableReg) + (rn Reg) + (signed bool) + (from_bits u8) + (to_bits u8)) +)) + +;; Helper for emitting `MInst.Extend` instructions. +;; BROKEN: zero_ext and sign_ext swapped +(spec (extend a b c d) + (provide + (if b + (= result (zero_ext (bv2int d) (conv_to (bv2int c) a))) + (= result (sign_ext (bv2int d) (conv_to (bv2int c) a)))))) +(decl extend (Reg bool u8 u8) Reg) +(rule (extend rn signed from_bits to_bits) + (let ((dst WritableReg (temp_writable_reg $I64)) + (_ Unit (emit (MInst.Extend dst rn signed from_bits to_bits)))) + dst)) + +;; General rule for extending input to an output which fits in a single +;; register. +(rule (lower (has_type (fits_in_64 out) (uextend x @ (value_type in)))) + (extend x $false (ty_bits in) (ty_bits out))) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/cls/broken_cls.isle b/cranelift/isle/veri/veri_engine/examples/broken/cls/broken_cls.isle new file mode 100644 index 000000000000..c238ad03bd4e --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/cls/broken_cls.isle @@ -0,0 +1,30 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum + (BitRR + (op BitOp)) +)) + +(type BitOp + (enum + (Cls) +)) + +(decl bit_rr (BitOp Type Reg) Reg) +(extern constructor bit_rr bit_rr) + +(spec (a64_cls ty a) + (provide + (= result + (if (= ty 32) + (conv_to 64 (cls (extract 31 0 a))) + (cls a)))) + (require (or (= ty 32) (= ty 64)))) +(decl a64_cls (Type Reg) Reg) +(rule (a64_cls ty x) (bit_rr (BitOp.Cls) ty x)) + +(rule (lower (has_type ty (cls x))) + (a64_cls ty x)) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/cls/broken_cls16.isle b/cranelift/isle/veri/veri_engine/examples/broken/cls/broken_cls16.isle new file mode 100644 index 000000000000..c0729935f9ac --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/cls/broken_cls16.isle @@ -0,0 +1,86 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum + (BitRR + (op BitOp)) + (Extend + (rd WritableReg) + (rn Reg) + (signed bool) + (from_bits u8) + (to_bits u8)) +)) + +(type ALUOp + (enum + (Sub) +)) + +(type BitOp + (enum + (Cls) +)) + +(type Imm12 (primitive Imm12)) + +(decl bit_rr (BitOp Type Reg) Reg) +(extern constructor bit_rr bit_rr) + +(decl alu_rr_imm12 (ALUOp Type Reg Imm12) Reg) +(extern constructor alu_rr_imm12 alu_rr_imm12) + +;; Note that 4094 = 0xffe and 16773119 = 0xffefff +(spec (sub_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 a) (zero_ext 32 b))) + (bvsub a (zero_ext 64 b))))) + (require (or (bvult (zero_ext 64 b) #x0000000000000fff) + (and (bvult (zero_ext 64 b) #x0000000000fff000) + (= (extract 2 0 (zero_ext 64 b)) #b000))) + (= (widthof b) 24))) +(decl sub_imm (Type Reg Imm12) Reg) +(rule (sub_imm ty x y) (alu_rr_imm12 (ALUOp.Sub) ty x y)) + +(spec (a64_cls ty a) + (provide + (= result + (if (= ty 32) + (conv_to 64 (cls (extract 31 0 a))) + (cls a)))) + (require (or (= ty 32) (= ty 64)))) +(decl a64_cls (Type Reg) Reg) +(rule (a64_cls ty x) (bit_rr (BitOp.Cls) ty x)) + +(decl extend (Reg bool u8 u8) Reg) +(rule (extend rn signed from_bits to_bits) + (let ((dst WritableReg (temp_writable_reg $I64)) + (_ Unit (emit (MInst.Extend dst rn signed from_bits to_bits)))) + dst)) + +;; Place a `Value` into a register, zero extending it to 32-bits +(spec (put_in_reg_zext32 arg) + (provide + (= result + (if (<= (widthof arg) 32) + (conv_to 64 (zero_ext 32 arg)) + (conv_to 64 arg))))) +(decl put_in_reg_zext32 (Value) Reg) +(rule (put_in_reg_zext32 val @ (value_type (fits_in_32 ty))) + (extend val $false (ty_bits ty) 32)) +(rule (put_in_reg_zext32 val @ (value_type $I32)) val) +(rule (put_in_reg_zext32 val @ (value_type $I64)) val) + +(spec (u8_into_imm12 arg) (provide (= result (zero_ext 24 arg)))) +(decl u8_into_imm12 (u8) Imm12) +(extern constructor u8_into_imm12 u8_into_imm12) + +;; A reproduction of the previously reported bug: +;; https://github.com/bytecodealliance/wasmtime/issues/3248. +;; This rule should sign extend instead of zero extending. +(rule (lower (has_type $I16 (cls x))) + (sub_imm $I32 (a64_cls $I32 (put_in_reg_zext32 x)) (u8_into_imm12 16))) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/cls/broken_cls8.isle b/cranelift/isle/veri/veri_engine/examples/broken/cls/broken_cls8.isle new file mode 100644 index 000000000000..744c1d6bbbc9 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/cls/broken_cls8.isle @@ -0,0 +1,86 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum + (BitRR + (op BitOp)) + (Extend + (rd WritableReg) + (rn Reg) + (signed bool) + (from_bits u8) + (to_bits u8)) +)) + +(type ALUOp + (enum + (Sub) +)) + +(type BitOp + (enum + (Cls) +)) + +(type Imm12 (primitive Imm12)) + +(decl bit_rr (BitOp Type Reg) Reg) +(extern constructor bit_rr bit_rr) + +(decl alu_rr_imm12 (ALUOp Type Reg Imm12) Reg) +(extern constructor alu_rr_imm12 alu_rr_imm12) + +;; Note that 4094 = 0xffe and 16773119 = 0xffefff +(spec (sub_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 a) (zero_ext 32 b))) + (bvsub a (zero_ext 64 b))))) + (require (or (bvult (zero_ext 64 b) #x0000000000000fff) + (and (bvult (zero_ext 64 b) #x0000000000fff000) + (= (extract 2 0 (zero_ext 64 b)) #b000))) + (= (widthof b) 24))) +(decl sub_imm (Type Reg Imm12) Reg) +(rule (sub_imm ty x y) (alu_rr_imm12 (ALUOp.Sub) ty x y)) + +(spec (a64_cls ty a) + (provide + (= result + (if (= ty 32) + (conv_to 64 (cls (extract 31 0 a))) + (cls a)))) + (require (or (= ty 32) (= ty 64)))) +(decl a64_cls (Type Reg) Reg) +(rule (a64_cls ty x) (bit_rr (BitOp.Cls) ty x)) + +(decl extend (Reg bool u8 u8) Reg) +(rule (extend rn signed from_bits to_bits) + (let ((dst WritableReg (temp_writable_reg $I64)) + (_ Unit (emit (MInst.Extend dst rn signed from_bits to_bits)))) + dst)) + +;; Place a `Value` into a register, zero extending it to 32-bits +(spec (put_in_reg_zext32 arg) + (provide + (= result + (if (<= (widthof arg) 32) + (conv_to 64 (zero_ext 32 arg)) + (conv_to 64 arg))))) +(decl put_in_reg_zext32 (Value) Reg) +(rule (put_in_reg_zext32 val @ (value_type (fits_in_32 ty))) + (extend val $false (ty_bits ty) 32)) +(rule (put_in_reg_zext32 val @ (value_type $I32)) val) +(rule (put_in_reg_zext32 val @ (value_type $I64)) val) + +(spec (u8_into_imm12 arg) (provide (= result (zero_ext 24 arg)))) +(decl u8_into_imm12 (u8) Imm12) +(extern constructor u8_into_imm12 u8_into_imm12) + +;; A reproduction of the previously reported bug: +;; https://github.com/bytecodealliance/wasmtime/issues/3248. +;; This rule should sign extend instead of zero extending. +(rule (lower (has_type $I8 (cls x))) + (sub_imm $I32 (a64_cls $I32 (put_in_reg_zext32 x)) (u8_into_imm12 24))) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/clz/broken_clz.isle b/cranelift/isle/veri/veri_engine/examples/broken/clz/broken_clz.isle new file mode 100644 index 000000000000..b6de2c2aef03 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/clz/broken_clz.isle @@ -0,0 +1,28 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum + (BitRR + (op BitOp)) +)) + +(type BitOp + (enum + (Clz) +)) + +(decl bit_rr (BitOp Type Reg) Reg) +(extern constructor bit_rr bit_rr) + +;; Broken: no distinction on ty +(spec (a64_clz ty a) + (provide + (= result (clz a))) + (require (or (= ty 32) (= ty 64)))) +(decl a64_clz (Type Reg) Reg) +(rule (a64_clz ty x) (bit_rr (BitOp.Clz) ty x)) + +(rule (lower (has_type ty (clz x))) + (a64_clz ty x)) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/clz/broken_clz16.isle b/cranelift/isle/veri/veri_engine/examples/broken/clz/broken_clz16.isle new file mode 100644 index 000000000000..9fdf8d59309f --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/clz/broken_clz16.isle @@ -0,0 +1,83 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum + (BitRR + (op BitOp)) + (Extend + (rd WritableReg) + (rn Reg) + (signed bool) + (from_bits u8) + (to_bits u8)) +)) + +(type ALUOp + (enum + (Sub) +)) + +(type BitOp + (enum + (Clz) +)) + +(type Imm12 (primitive Imm12)) + +(decl bit_rr (BitOp Type Reg) Reg) +(extern constructor bit_rr bit_rr) + +(decl alu_rr_imm12 (ALUOp Type Reg Imm12) Reg) +(extern constructor alu_rr_imm12 alu_rr_imm12) + +;; Note that 4094 = 0xffe and 16773119 = 0xffefff +(spec (sub_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 a) (zero_ext 32 b))) + (bvsub a (zero_ext 64 b))))) + (require (or (bvult (zero_ext 64 b) #x0000000000000fff) + (and (bvult (zero_ext 64 b) #x0000000000fff000) + (= (extract 2 0 (zero_ext 64 b)) #b000))) + (= (widthof b) 24))) +(decl sub_imm (Type Reg Imm12) Reg) +(rule (sub_imm ty x y) (alu_rr_imm12 (ALUOp.Sub) ty x y)) + +(spec (a64_clz ty a) + (provide + (= result + (if (= ty 32) + (conv_to 64 (clz (extract 31 0 a))) + (clz a)))) + (require (or (= ty 32) (= ty 64)))) +(decl a64_clz (Type Reg) Reg) +(rule (a64_clz ty x) (bit_rr (BitOp.Clz) ty x)) + +(decl extend (Reg bool u8 u8) Reg) +(rule (extend rn signed from_bits to_bits) + (let ((dst WritableReg (temp_writable_reg $I64)) + (_ Unit (emit (MInst.Extend dst rn signed from_bits to_bits)))) + dst)) + +;; Place a `Value` into a register, zero extending it to 32-bits +(spec (put_in_reg_zext32 arg) + (provide + (= result + (if (<= (widthof arg) 32) + (conv_to 64 (zero_ext 32 arg)) + (conv_to 64 arg))))) +(decl put_in_reg_zext32 (Value) Reg) +(rule (put_in_reg_zext32 val @ (value_type (fits_in_32 ty))) + (extend val $false (ty_bits ty) 32)) +(rule (put_in_reg_zext32 val @ (value_type $I32)) val) +(rule (put_in_reg_zext32 val @ (value_type $I64)) val) + +(spec (u8_into_imm12 arg) (provide (= result (zero_ext 24 arg)))) +(decl u8_into_imm12 (u8) Imm12) +(extern constructor u8_into_imm12 u8_into_imm12) + +(rule (lower (has_type $I16 (clz x))) + (sub_imm $I32 (a64_clz $I32 (put_in_reg_zext32 x)) (u8_into_imm12 15))) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/clz/broken_clz8.isle b/cranelift/isle/veri/veri_engine/examples/broken/clz/broken_clz8.isle new file mode 100644 index 000000000000..bf5643d44177 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/clz/broken_clz8.isle @@ -0,0 +1,84 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum + (BitRR + (op BitOp)) + (Extend + (rd WritableReg) + (rn Reg) + (signed bool) + (from_bits u8) + (to_bits u8)) +)) + +(type ALUOp + (enum + (Sub) +)) + +(type BitOp + (enum + (Clz) +)) + +(type Imm12 (primitive Imm12)) + +(decl bit_rr (BitOp Type Reg) Reg) +(extern constructor bit_rr bit_rr) + +(decl alu_rr_imm12 (ALUOp Type Reg Imm12) Reg) +(extern constructor alu_rr_imm12 alu_rr_imm12) + +;; Note that 4094 = 0xffe and 16773119 = 0xffefff +(spec (sub_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 a) (zero_ext 32 b))) + (bvsub a (zero_ext 64 b))))) + (require (or (bvult (zero_ext 64 b) #x0000000000000fff) + (and (bvult (zero_ext 64 b) #x0000000000fff000) + (= (extract 2 0 (zero_ext 64 b)) #b000))) + (= (widthof b) 24))) +(decl sub_imm (Type Reg Imm12) Reg) +(rule (sub_imm ty x y) (alu_rr_imm12 (ALUOp.Sub) ty x y)) + +(spec (a64_clz ty a) + (provide + (= result + (if (= ty 32) + (conv_to 64 (clz (extract 31 0 a))) + (clz a)))) + (require (or (= ty 32) (= ty 64)))) +(decl a64_clz (Type Reg) Reg) +(rule (a64_clz ty x) (bit_rr (BitOp.Clz) ty x)) + +(decl extend (Reg bool u8 u8) Reg) +(rule (extend rn signed from_bits to_bits) + (let ((dst WritableReg (temp_writable_reg $I64)) + (_ Unit (emit (MInst.Extend dst rn signed from_bits to_bits)))) + dst)) + +;; BROKEN: swapped order in comparison +;; Place a `Value` into a register, zero extending it to 32-bits +(spec (put_in_reg_zext32 arg) + (provide + (= result + (if (> (widthof arg) 32) + (conv_to 64 (zero_ext 32 arg)) + (conv_to 64 arg))))) +(decl put_in_reg_zext32 (Value) Reg) +(rule (put_in_reg_zext32 val @ (value_type (fits_in_32 ty))) + (extend val $false (ty_bits ty) 32)) +(rule (put_in_reg_zext32 val @ (value_type $I32)) val) +(rule (put_in_reg_zext32 val @ (value_type $I64)) val) + +(spec (u8_into_imm12 arg) (provide (= result (zero_ext 24 arg)))) +(decl u8_into_imm12 (u8) Imm12) +(extern constructor u8_into_imm12 u8_into_imm12) + +(rule (lower (has_type $I8 (clz x))) + (sub_imm $I32 (a64_clz $I32 (put_in_reg_zext32 x)) (u8_into_imm12 24))) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/ctz/broken_ctz.isle b/cranelift/isle/veri/veri_engine/examples/broken/ctz/broken_ctz.isle new file mode 100644 index 000000000000..b5f4ec3e2156 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/ctz/broken_ctz.isle @@ -0,0 +1,42 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum + (BitRR + (op BitOp)) +)) + +(type BitOp + (enum + (Clz) + (RBit) +)) + +(decl bit_rr (BitOp Type Reg) Reg) +(extern constructor bit_rr bit_rr) + +(spec (a64_clz ty a) + (provide + (= result + (if (= ty 32) + (conv_to 64 (clz (extract 31 0 a))) + (clz a)))) + (require (or (= ty 32) (= ty 64)))) +(decl a64_clz (Type Reg) Reg) +(rule (a64_clz ty x) (bit_rr (BitOp.Clz) ty x)) + +(spec (rbit ty a) + (provide + (= result + (if (= ty 32) + (conv_to 64 (rev (extract 31 0 a))) + (rev a)))) + (require (or (= ty 32) (= ty 64)))) +(decl rbit (Type Reg) Reg) +(rule (rbit ty x) (bit_rr (BitOp.RBit) ty x)) + +;; Broken: starts with clz instead of ctz +(rule -1 (lower (has_type ty (clz x))) + (a64_clz ty (rbit ty x))) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/ctz/broken_ctz16.isle b/cranelift/isle/veri/veri_engine/examples/broken/ctz/broken_ctz16.isle new file mode 100644 index 000000000000..dc34bb90cf16 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/ctz/broken_ctz16.isle @@ -0,0 +1,88 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum + (BitRR + (op BitOp)) + (AluRRImmLogic + (alu_op ALUOp) + (size OperandSize) + (rd WritableReg) + (rn Reg) + (imml ImmLogic)) +)) + +(type ALUOp + (enum + (Orr) +)) + +(type BitOp + (enum + (Clz) + (RBit) +)) + +(type ImmLogic (primitive ImmLogic)) + +(type OperandSize extern + (enum Size32 + Size64)) + +(decl operand_size (Type) OperandSize) +(rule (operand_size (fits_in_32 _ty)) (OperandSize.Size32)) +(rule (operand_size (fits_in_64 _ty)) (OperandSize.Size64)) + +(decl bit_rr (BitOp Type Reg) Reg) +(extern constructor bit_rr bit_rr) + +(decl alu_rr_imm_logic (ALUOp Type Reg ImmLogic) Reg) +(rule (alu_rr_imm_logic op ty src imm) + (let ((dst WritableReg (temp_writable_reg $I64)) + (_ Unit (emit (MInst.AluRRImmLogic op (operand_size ty) dst src imm)))) + dst)) + +(spec (a64_clz ty a) + (provide + (= result + (if (= ty 32) + (conv_to 64 (clz (extract 31 0 a))) + (clz a)))) + (require (or (= ty 32) (= ty 64)))) +(decl a64_clz (Type Reg) Reg) +(rule (a64_clz ty x) (bit_rr (BitOp.Clz) ty x)) + +(spec (orr_imm ty x y) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvor (extract 31 0 x) (extract 31 0 y)))) + (64 (bvor x (zero_ext 64 y)))))) + (require + (or (<= (bv2int y) 4094) + (and (<= (bv2int y) 16773119) + (= (extract 2 0 y) #b000))))) +(decl orr_imm (Type Reg ImmLogic) Reg) +(rule (orr_imm ty x y) (alu_rr_imm_logic (ALUOp.Orr) ty x y)) + +(spec (rbit ty a) + (provide + (= result + (if (= ty 32) + (conv_to 64 (rev (extract 31 0 a))) + (rev a)))) + (require (or (= ty 32) (= ty 64)))) +(decl rbit (Type Reg) Reg) +(rule (rbit ty x) (bit_rr (BitOp.RBit) ty x)) + +(spec (u64_into_imm_logic ty a) + (provide (= result a)) + (require (or (= ty 32) (= ty 64)))) +(decl u64_into_imm_logic (Type u64) ImmLogic) +(extern constructor u64_into_imm_logic u64_into_imm_logic) + +;; Broken: wrong constant +(rule (lower (has_type $I16 (ctz x))) + (a64_clz $I32 (orr_imm $I32 (rbit $I32 x) (u64_into_imm_logic $I32 0xFFFFFFFF)))) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/ctz/broken_ctz8.isle b/cranelift/isle/veri/veri_engine/examples/broken/ctz/broken_ctz8.isle new file mode 100644 index 000000000000..4b99998b766b --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/ctz/broken_ctz8.isle @@ -0,0 +1,87 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum + (BitRR + (op BitOp)) + (AluRRImmLogic + (alu_op ALUOp) + (size OperandSize) + (rd WritableReg) + (rn Reg) + (imml ImmLogic)) +)) + +(type ALUOp + (enum + (Orr) +)) + +(type BitOp + (enum + (Clz) + (RBit) +)) + +(type ImmLogic (primitive ImmLogic)) + +(type OperandSize extern + (enum Size32 + Size64)) + +(decl operand_size (Type) OperandSize) +(rule (operand_size (fits_in_32 _ty)) (OperandSize.Size32)) +(rule (operand_size (fits_in_64 _ty)) (OperandSize.Size64)) + +(decl bit_rr (BitOp Type Reg) Reg) +(extern constructor bit_rr bit_rr) + +(decl alu_rr_imm_logic (ALUOp Type Reg ImmLogic) Reg) +(rule (alu_rr_imm_logic op ty src imm) + (let ((dst WritableReg (temp_writable_reg $I64)) + (_ Unit (emit (MInst.AluRRImmLogic op (operand_size ty) dst src imm)))) + dst)) + +(spec (a64_clz ty a) + (provide + (= result + (if (= ty 32) + (conv_to 64 (clz (extract 31 0 a))) + (clz a)))) + (require (or (= ty 32) (= ty 64)))) +(decl a64_clz (Type Reg) Reg) +(rule (a64_clz ty x) (bit_rr (BitOp.Clz) ty x)) + +(spec (orr_imm ty x y) + (provide + (= result + (switch ty + (32 (conv_to 64 (bvor (extract 31 0 x) (extract 31 0 y)))) + (64 (bvor x (zero_ext 64 y)))))) + (require + (or (<= (bv2int y) 4094) + (and (<= (bv2int y) 16773119) + (= (extract 2 0 y) #b000))))) +(decl orr_imm (Type Reg ImmLogic) Reg) +(rule (orr_imm ty x y) (alu_rr_imm_logic (ALUOp.Orr) ty x y)) + +(spec (rbit ty a) + (provide + (= result + (if (= ty 32) + (conv_to 64 (rev (extract 31 0 a))) + (rev a)))) + (require (or (= ty 32) (= ty 64)))) +(decl rbit (Type Reg) Reg) +(rule (rbit ty x) (bit_rr (BitOp.RBit) ty x)) + +(spec (u64_into_imm_logic ty a) + (provide (= result a)) + (require (or (= ty 32) (= ty 64)))) +(decl u64_into_imm_logic (Type u64) ImmLogic) +(extern constructor u64_into_imm_logic u64_into_imm_logic) + +(rule (lower (has_type $I8 (ctz x))) + (a64_clz $I32 (orr_imm $I32 (rbit $I32 x) (u64_into_imm_logic $I32 0x80)))) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_add_extend.isle b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_add_extend.isle new file mode 100644 index 000000000000..9b93366567f8 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_add_extend.isle @@ -0,0 +1,109 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum +)) + +(type ALUOp + (enum + (Add) +)) + +;; Helper type to represent a value and an extend operation fused together. +(model ExtendedValue (type (bv 67))) +(type ExtendedValue extern (enum)) + +;; We represent ExtendedValue as a bv +;; where the three most significant bits +;; encode an extend op as follows: +;; UXTB = 0b000, +;; UXTH = 0b001, +;; UXTW = 0b010, +;; UXTX = 0b011, +;; SXTB = 0b100, +;; SXTH = 0b101, +;; SXTW = 0b110, +;; SXTX = 0b111, +;; and the remaining bits encode the value. + +(model ExtendOp (enum + (UXTB #b000) + (UXTH #b001) + (UXTW #b010) + (UXTX #b011) + (SXTB #b100) + (SXTH #b101) + (SXTW #b110) + (SXTX #b111) +)) + +(type ExtendOp extern + (enum + (UXTB) + (UXTH) + (UXTW) + (UXTX) + (SXTB) + (SXTH) + (SXTW) + (SXTX) +)) + +(decl alu_rr_extend_reg (ALUOp Type Reg ExtendedValue) Reg) +(extern constructor alu_rr_extend_reg alu_rr_extend_reg) + +;; (rule (alu_rr_extend_reg op ty src1 extended_reg) +;; (let ((src2 Reg (put_extended_in_reg extended_reg)) +;; (extend ExtendOp (get_extended_op extended_reg))) +;; (alu_rrr_extend op ty src1 src2 extend))) + +;; Only including the i8 to i32 opcodes, based on the impl of extended_value_from_value +(spec (extended_value_from_value x) + (provide + (switch (extract 66 64 x) + ((ExtendOp.UXTB) (= (extract 63 0 x) (zero_ext 64 (extract 7 0 (zero_ext 64 result))))) + ((ExtendOp.UXTH) (= (extract 63 0 x) (zero_ext 64 (extract 15 0 (zero_ext 64 result))))) + ((ExtendOp.UXTW) (= (extract 63 0 x) (zero_ext 64 (extract 31 0 (zero_ext 64 result))))) + ((ExtendOp.SXTB) (= (extract 63 0 x) (sign_ext 64 (extract 7 0 (zero_ext 64 result))))) + ((ExtendOp.SXTH) (= (extract 63 0 x) (sign_ext 64 (extract 15 0 (zero_ext 64 result))))) + ((ExtendOp.SXTW) (= (extract 63 0 x) (sign_ext 64 (extract 31 0 (zero_ext 64 result))))))) + (require + (bvult (extract 66 64 x) #b110) + (not (= (extract 66 64 x) #b011)) + (= result (conv_to (widthof result) x)) + (or (= 8 (widthof result)) (= 16 (widthof result)) (= 32 (widthof result))))) +(decl extended_value_from_value (ExtendedValue) Value) +(extern extractor extended_value_from_value extended_value_from_value) + +;; BROKEN: all sign_extend with no zero_extend +(spec (add_extend ty x y) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvadd (extract 31 0 x) + (switch (extract 66 64 y) + ((ExtendOp.UXTB) (sign_ext 32 (extract 7 0 y))) + ((ExtendOp.UXTH) (sign_ext 32 (extract 15 0 y))) + ((ExtendOp.UXTW) (sign_ext 32 (extract 31 0 y))) + ((ExtendOp.UXTX) (sign_ext 32 (extract 31 0 y))) + ((ExtendOp.SXTB) (sign_ext 32 (extract 7 0 y))) + ((ExtendOp.SXTH) (sign_ext 32 (extract 15 0 y))) + ((ExtendOp.SXTW) (sign_ext 32 (extract 31 0 y))) + ((ExtendOp.SXTX) (sign_ext 32 (extract 31 0 y)))))) + (bvadd x + (switch (extract 66 64 y) + ((ExtendOp.UXTB) (sign_ext 64 (extract 7 0 y))) + ((ExtendOp.UXTH) (sign_ext 64 (extract 15 0 y))) + ((ExtendOp.UXTW) (sign_ext 64 (extract 31 0 y))) + ((ExtendOp.UXTX) (sign_ext 64 (extract 63 0 y))) + ((ExtendOp.SXTB) (sign_ext 64 (extract 7 0 y))) + ((ExtendOp.SXTH) (sign_ext 64 (extract 15 0 y))) + ((ExtendOp.SXTW) (sign_ext 64 (extract 31 0 y))) + ((ExtendOp.SXTX) (sign_ext 64 (extract 63 0 y))))))))) +(decl add_extend (Type Reg ExtendedValue) Reg) +(rule (add_extend ty x y) (alu_rr_extend_reg (ALUOp.Add) ty x y)) + +(rule 0 (lower (has_type (fits_in_64 ty) (iadd x (extended_value_from_value y)))) + (add_extend ty x y)) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_base_case.isle b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_base_case.isle new file mode 100644 index 000000000000..b3dee5138170 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_base_case.isle @@ -0,0 +1,27 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum +)) + +(type ALUOp + (enum + (Add) +)) + +(decl alu_rrr (ALUOp Type Reg Reg) Reg) +(extern constructor alu_rrr alu_rrr) + +(spec (add ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvadd (extract 31 0 a) (extract 31 0 b))) + (bvadd a b))))) +(decl add (Type Reg Reg) Reg) +(rule (add ty x y) (alu_rrr (ALUOp.Add) ty x y)) + +(rule (lower (has_type (fits_in_64 ty) (iadd x y))) + (add ty x x)) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_imm12.isle b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_imm12.isle new file mode 100644 index 000000000000..17c1186bc70a --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_imm12.isle @@ -0,0 +1,43 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum +)) + +;; Imm12 modeled as the range of intermediates it can represent. +(model Imm12 (type (bv 24))) +(type Imm12 (primitive Imm12)) + +(type ALUOp + (enum + (Add) +)) + +;; Note that 4094 = 0xffe and 16773119 = 0xffefff +(spec (add_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvadd (extract 31 0 a) (zero_ext 32 b))) + (bvadd a (zero_ext 64 b))))) + (require (or (<= (bv2int b) 4094) + (and (<= (bv2int b) 16773119) + (= (extract 2 0 b) #b000))))) +(decl add_imm (Type Reg Imm12) Reg) +(rule (add_imm ty x y) (alu_rr_imm12 (ALUOp.Add) ty x y)) + +(decl alu_rr_imm12 (ALUOp Type Reg Imm12) Reg) +(extern constructor alu_rr_imm12 alu_rr_imm12) + +;; Broken: no require +(spec (imm12_from_value arg) + (provide + (= result (conv_to (widthof result) (zero_ext 64 arg))) + (= arg (conv_to (widthof arg) (zero_ext 64 result))))) +(decl imm12_from_value (Imm12) Value) +(extern extractor imm12_from_value imm12_from_value) + +(rule (lower (has_type (fits_in_64 ty) (iadd x (imm12_from_value y)))) + (add_imm ty x y)) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_imm12_2.isle b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_imm12_2.isle new file mode 100644 index 000000000000..4315aa1271e3 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_imm12_2.isle @@ -0,0 +1,47 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum +)) + +;; Imm12 modeled as the range of intermediates it can represent. +(model Imm12 (type (bv 24))) +(type Imm12 (primitive Imm12)) + +(type ALUOp + (enum + (Add) +)) + +;; BROKEN: subtract instead of add +;; Note that 4094 = 0xffe and 16773119 = 0xffefff +(spec (add_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 a) (zero_ext 32 b))) + (bvsub a (zero_ext 64 b))))) + (require (or (<= (bv2int b) 4094) + (and (<= (bv2int b) 16773119) + (= (extract 2 0 b) #b000))))) +(decl add_imm (Type Reg Imm12) Reg) +(rule (add_imm ty x y) (alu_rr_imm12 (ALUOp.Add) ty x y)) + +(decl alu_rr_imm12 (ALUOp Type Reg Imm12) Reg) +(extern constructor alu_rr_imm12 alu_rr_imm12) + +(spec (imm12_from_value arg) + (provide + (= result (conv_to (widthof result) (zero_ext 64 arg))) + (= arg (conv_to (widthof arg) (zero_ext 64 result)))) + (require + (or (bvult (zero_ext 64 result) #x0000000000000fff) + (and (bvult (zero_ext 64 result) #x0000000000fff000) + (= (extract 2 0 (zero_ext 64 result)) #b000))))) +(decl imm12_from_value (Imm12) Value) +(extern extractor imm12_from_value imm12_from_value) + +(rule (lower (has_type (fits_in_64 ty) (iadd (imm12_from_value x) y))) + (add_imm ty y x)) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_imm12neg.isle b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_imm12neg.isle new file mode 100644 index 000000000000..7d3ea214f8cc --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_imm12neg.isle @@ -0,0 +1,52 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum +)) + +;; Imm12 modeled as the range of intermediates it can represent. +(model Imm12 (type (bv 24))) +(type Imm12 (primitive Imm12)) + +(type ALUOp + (enum + (Sub) +)) + +;; AVH TODO: why don't more obvious errors, like sext instead of zext, break things? +;; BROKEN: * instead of sub +;; Note that 4094 = 0xffe and 16773119 = 0xffefff +(spec (sub_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvmul (extract 31 0 a) (zero_ext 32 b))) + (bvmul a (zero_ext 64 b))))) + (require (or (bvult (zero_ext 64 b) #x0000000000000fff) + (and (bvult (zero_ext 64 b) #x0000000000fff000) + (= (extract 2 0 (zero_ext 64 b)) #b000))) + (= (widthof b) 24))) +(decl sub_imm (Type Reg Imm12) Reg) +(rule (sub_imm ty x y) (alu_rr_imm12 (ALUOp.Sub) ty x y)) + +(decl alu_rr_imm12 (ALUOp Type Reg Imm12) Reg) +(extern constructor alu_rr_imm12 alu_rr_imm12) + +(spec (imm12_from_negated_value arg) + (provide (or (bvule (bvneg (zero_ext 64 result)) #x0000000000000fff) + (and (bvule (bvneg (zero_ext 64 result)) #x0000000000fff000) + (= (extract 2 0 (bvneg (zero_ext 64 result))) #b000))) + (= result (conv_to (widthof result) (bvneg (zero_ext 64 arg)))))) +(instantiate imm12_from_negated_value + ((args (bv 8)) (ret (bv 24)) (canon (bv 8))) + ((args (bv 16)) (ret (bv 24)) (canon (bv 16))) + ((args (bv 32)) (ret (bv 24)) (canon (bv 32))) + ((args (bv 64)) (ret (bv 24)) (canon (bv 64))) +) +(decl imm12_from_negated_value (Imm12) Value) +(extern extractor imm12_from_negated_value imm12_from_negated_value) + +(rule (lower (has_type (fits_in_64 ty) (iadd x (imm12_from_negated_value y)))) + (sub_imm ty x y)) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_imm12neg2.isle b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_imm12neg2.isle new file mode 100644 index 000000000000..7a8a734ece94 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_imm12neg2.isle @@ -0,0 +1,51 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum +)) + +;; Imm12 modeled as the range of intermediates it can represent. +(model Imm12 (type (bv 24))) +(type Imm12 (primitive Imm12)) + +(type ALUOp + (enum + (Sub) +)) + +;; BROKEN: * instead of - +;; Note that 4094 = 0xffe and 16773119 = 0xffefff +(spec (sub_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvmul (extract 31 0 a) (zero_ext 32 b))) + (bvmul a (zero_ext 64 b))))) + (require (or (bvult (zero_ext 64 b) #x0000000000000fff) + (and (bvult (zero_ext 64 b) #x0000000000fff000) + (= (extract 2 0 (zero_ext 64 b)) #b000))) + (= (widthof b) 24))) +(decl sub_imm (Type Reg Imm12) Reg) +(rule (sub_imm ty x y) (alu_rr_imm12 (ALUOp.Sub) ty x y)) + +(decl alu_rr_imm12 (ALUOp Type Reg Imm12) Reg) +(extern constructor alu_rr_imm12 alu_rr_imm12) + +(spec (imm12_from_negated_value arg) + (provide (or (bvule (bvneg (zero_ext 64 result)) #x0000000000000fff) + (and (bvule (bvneg (zero_ext 64 result)) #x0000000000fff000) + (= (extract 2 0 (bvneg (zero_ext 64 result))) #b000))) + (= result (conv_to (widthof result) (bvneg (zero_ext 64 arg)))))) +(instantiate imm12_from_negated_value + ((args (bv 8)) (ret (bv 24)) (canon (bv 8))) + ((args (bv 16)) (ret (bv 24)) (canon (bv 16))) + ((args (bv 32)) (ret (bv 24)) (canon (bv 32))) + ((args (bv 64)) (ret (bv 24)) (canon (bv 64))) +) +(decl imm12_from_negated_value (Imm12) Value) +(extern extractor imm12_from_negated_value imm12_from_negated_value) + +(rule (lower (has_type (fits_in_64 ty) (iadd (imm12_from_negated_value x) y))) + (sub_imm ty y x)) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_madd.isle b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_madd.isle new file mode 100644 index 000000000000..361e694635b7 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_madd.isle @@ -0,0 +1,28 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum +)) + +(type ALUOp3 + (enum + ;; Multiply-add + (MAdd) +)) + +(spec (madd ty a b c) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvadd (extract 31 0 c) (bvmul (extract 31 0 a) (extract 31 0 b)))) + (bvadd c (bvmul a b)))))) +(decl madd (Type Reg Reg Reg) Reg) +(rule (madd ty x y z) (alu_rrrr (ALUOp3.MAdd) ty x y z)) + +(decl alu_rrrr (ALUOp3 Type Reg Reg Reg) Reg) +(extern constructor alu_rrrr alu_rrrr) + +(rule (lower (has_type (fits_in_64 ty) (iadd x (imul y z)))) + (madd ty x y z)) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_madd2.isle b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_madd2.isle new file mode 100644 index 000000000000..86542aac6821 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_madd2.isle @@ -0,0 +1,28 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +;; Instruction formats. +(type MInst + (enum +)) + +(type ALUOp3 + (enum + (MAdd) +)) + +(spec (madd ty a b c) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvadd (extract 31 0 c) (bvmul (extract 31 0 a) (extract 31 0 b)))) + (bvadd c (bvmul a b)))))) +(decl madd (Type Reg Reg Reg) Reg) +(rule (madd ty x y z) (alu_rrrr (ALUOp3.MAdd) ty x y z)) + +(decl alu_rrrr (ALUOp3 Type Reg Reg Reg) Reg) +(extern constructor alu_rrrr alu_rrrr) + +(rule (lower (has_type (fits_in_64 ty) (iadd (imul x y) z))) + (madd ty x y x)) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_msub.isle b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_msub.isle new file mode 100644 index 000000000000..030486e50598 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_msub.isle @@ -0,0 +1,28 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +;; Instruction formats. +(type MInst + (enum +)) + +(type ALUOp3 + (enum + (MSub) +)) + +(spec (msub ty a b c) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 c) (bvmul (extract 31 0 a) (extract 31 0 b)))) + (bvsub c (bvmul a b)))))) +(decl msub (Type Reg Reg Reg) Reg) +(rule (msub ty x y z) (alu_rrrr (ALUOp3.MSub) ty x y z)) + +(decl alu_rrrr (ALUOp3 Type Reg Reg Reg) Reg) +(extern constructor alu_rrrr alu_rrrr) + +(rule (lower (has_type (fits_in_64 ty) (isub (imul y z) x))) + (msub ty y z x)) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_shift.isle b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_shift.isle new file mode 100644 index 000000000000..f9a851e32bf8 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_shift.isle @@ -0,0 +1,84 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum + (AluRRRShift + (shiftop ShiftOpAndAmt)) +)) + +;; ASSUMING 64 BIT MODE!!! +;; annotations will interpret this as an 10 bit field +;; the two msb encode the type of shift as follows: +;; 00: lsl +;; 01: lsr +;; 10: asr +;; 11: invalid +;; the rest will encode a 8-bit shift amount +(type ShiftOpAndAmt (primitive ShiftOpAndAmt)) + +(model ALUOp (enum + (Add #x00) ;; 0 + (Sub #x01) + (Orr #x02) + (OrrNot #x03) + (And #x04) + (AndNot #x05) + (Eor #x06) + (EorNot #x07) + (SubS #x08) + (SDiv #x09) + (UDiv #x0a) + (RotR #x0b) + (Lsr #x0c) + (Asr #x0d) + (Lsl #x0e))) + +(type ALUOp (enum + (Add) + (Sub) + (Orr) + (OrrNot) + (And) + (AndNot) + (Eor) + (EorNot) + (SubS) + (SDiv) + (UDiv) + (RotR) + (Lsr) + (Asr) + (Lsl))) + +(decl alu_rrr_shift (ALUOp Type Reg Reg ShiftOpAndAmt) Reg) +(extern constructor alu_rrr_shift alu_rrr_shift) + +;; BROKEN: swapped shl shr +(spec (add_shift ty a b shift) + (provide + (= result (if (<= ty 32) + (conv_to 64 (bvadd (extract 31 0 a) + (switch (extract 15 8 shift) + ((ALUOp.Lsr) (bvshl (extract 31 0 b) (zero_ext 32 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Lsl) (bvlshr (extract 31 0 b) (zero_ext 32 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Lsl) (bvashr (extract 31 0 b) (zero_ext 32 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift)))))))) + (bvadd a + (switch (extract 15 8 shift) + ((ALUOp.Lsr) (bvshl b (zero_ext 64 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Lsl) (bvlshr b (zero_ext 64 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Lsl) (bvashr b (zero_ext 64 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))))))))) +(decl add_shift (Type Reg Reg ShiftOpAndAmt) Reg) +(rule (add_shift ty x y z) (alu_rrr_shift (ALUOp.Add) ty x y z)) + +(spec (lshl_from_imm64 ty a) + (provide (= result (concat #x0e (extract 7 0 a)))) + (require (= (extract 63 8 a) #b00000000000000000000000000000000000000000000000000000000))) +(decl pure lshl_from_imm64 (Type Imm64) ShiftOpAndAmt) +(extern constructor lshl_from_imm64 lshl_from_imm64) + +(rule 7 (lower (has_type (fits_in_64 ty) + (iadd x (ishl y (iconst k))))) + (if-let amt (lshl_from_imm64 ty k)) + (add_shift ty x y amt)) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_shift2.isle b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_shift2.isle new file mode 100644 index 000000000000..94118e758d47 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/iadd/broken_shift2.isle @@ -0,0 +1,85 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum + (AluRRRShift + (shiftop ShiftOpAndAmt)) +)) + +;; ASSUMING 64 BIT MODE!!! +;; annotations will interpret this as an 10 bit field +;; the two msb encode the type of shift as follows: +;; 00: lsl +;; 01: lsr +;; 10: asr +;; 11: invalid +;; the rest will encode a 8-bit shift amount +(type ShiftOpAndAmt (primitive ShiftOpAndAmt)) + +(model ALUOp (enum + (Add #x00) ;; 0 + (Sub #x01) + (Orr #x02) + (OrrNot #x03) + (And #x04) + (AndNot #x05) + (Eor #x06) + (EorNot #x07) + (SubS #x08) + (SDiv #x09) + (UDiv #x0a) + (RotR #x0b) + (Lsr #x0c) + (Asr #x0d) + (Lsl #x0e))) + +(type ALUOp (enum + (Add) + (Sub) + (Orr) + (OrrNot) + (And) + (AndNot) + (Eor) + (EorNot) + (SubS) + (SDiv) + (UDiv) + (RotR) + (Lsr) + (Asr) + (Lsl))) + +(decl alu_rrr_shift (ALUOp Type Reg Reg ShiftOpAndAmt) Reg) +(extern constructor alu_rrr_shift alu_rrr_shift) + +(spec (add_shift ty a b shift) + (provide + (= result (if (<= ty 32) + (conv_to 64 (bvadd (extract 31 0 a) + (switch (extract 15 8 shift) + ((ALUOp.Lsl) (bvshl (extract 31 0 b) (zero_ext 32 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Lsr) (bvlshr (extract 31 0 b) (zero_ext 32 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Asr) (bvashr (extract 31 0 b) (zero_ext 32 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift)))))))) + (bvadd a + (switch (extract 15 8 shift) + ((ALUOp.Lsl) (bvshl b (zero_ext 64 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Lsr) (bvlshr b (zero_ext 64 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Asr) (bvashr b (zero_ext 64 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))))))))) + +(decl add_shift (Type Reg Reg ShiftOpAndAmt) Reg) +(rule (add_shift ty x y z) (alu_rrr_shift (ALUOp.Add) ty x y z)) + +(spec (lshr_from_u64 ty a) + (provide (= result (concat (ALUOp.Lsr) (extract 7 0 a)))) + (require (= (extract 63 8 a) #b00000000000000000000000000000000000000000000000000000000))) +(decl pure lshr_from_u64 (Type Imm64) ShiftOpAndAmt) +(extern constructor lshr_from_u64 lshr_from_u64) + +;; BROKEN: using lshr_from_u64 instead of lshr_from_u64 +(rule 6 (lower (has_type (fits_in_64 ty) + (iadd (ishl x (iconst k)) y))) + (if-let amt (lshr_from_u64 ty k)) + (add_shift ty y x amt)) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/imul/broken_imul.isle b/cranelift/isle/veri/veri_engine/examples/broken/imul/broken_imul.isle new file mode 100644 index 000000000000..afb301f8ef9c --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/imul/broken_imul.isle @@ -0,0 +1,59 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +;; Instruction formats. +(type MInst + (enum + ;; An ALU operation with two register sources and a register destination. + (AluRRR + (alu_op ALUOp) + (size OperandSize) +))) + +;; An ALU operation. This can be paired with several instruction formats +;; below (see `Inst`) in any combination. +(type ALUOp + (enum + (Add) +)) + +(type ALUOp3 + (enum + ;; Multiply-add + (MAdd) +)) + +(type OperandSize extern + (enum Size32 + Size64)) + +(decl alu_rrr (ALUOp Type Reg Reg) Reg) +(extern constructor alu_rrr alu_rrr) + +(decl alu_rrrr (ALUOp3 Type Reg Reg Reg) Reg) +(extern constructor alu_rrrr alu_rrrr) + +(spec (zero_reg) + (provide (= (zero_ext 64 #x0000000000000000) result))) +(decl zero_reg () Reg) +(extern constructor zero_reg zero_reg) + +;; Helper for calculating the `OperandSize` corresponding to a type +(decl operand_size (Type) OperandSize) +(rule (operand_size (fits_in_32 _ty)) (OperandSize.Size32)) +(rule (operand_size (fits_in_64 _ty)) (OperandSize.Size64)) + +;; Helpers for generating `madd` instructions. +(spec (madd ty a b c) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvadd (extract 31 0 c) (bvmul (extract 31 0 a) (extract 31 0 b)))) + (bvadd c (bvmul a b)))))) +(decl madd (Type Reg Reg Reg) Reg) +(rule (madd ty x y z) (alu_rrrr (ALUOp3.MAdd) ty x y z)) + +;; `i64` and smaller. +(rule (lower (has_type (fits_in_64 ty) (imul x y))) + (madd ty y y (zero_reg))) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/isub/broken_base_case.isle b/cranelift/isle/veri/veri_engine/examples/broken/isub/broken_base_case.isle new file mode 100644 index 000000000000..5637496d61cb --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/isub/broken_base_case.isle @@ -0,0 +1,27 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum +)) + +(type ALUOp + (enum + (Sub) +)) + +(spec (sub ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 a) (extract 31 0 b))) + (bvsub a b))))) +(decl sub (Type Reg Reg) Reg) +(extern constructor sub sub) + +(decl alu_rrr (ALUOp Type Reg Reg) Reg) +(extern constructor alu_rrr alu_rrr) + +(rule (lower (has_type (fits_in_64 ty) (isub x y))) + (sub ty y x)) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/isub/broken_imm12.isle b/cranelift/isle/veri/veri_engine/examples/broken/isub/broken_imm12.isle new file mode 100644 index 000000000000..06d2590c57d1 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/isub/broken_imm12.isle @@ -0,0 +1,44 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum +)) + +;; Imm12 modeled as the range of intermediates it can represent. +(model Imm12 (type (bv 24))) +(type Imm12 (primitive Imm12)) + +(type ALUOp + (enum + (Sub) +)) + +;; Note that 4094 = 0xffe and 16773119 = 0xffefff +(spec (sub_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 a) (zero_ext 32 b))) + (bvsub a (zero_ext 64 b))))) + (require (or (bvult (zero_ext 64 b) #x0000000000000fff) + (and (bvult (zero_ext 64 b) #x0000000000fff000) + (= (extract 2 0 (zero_ext 64 b)) #b000))) + (= (widthof b) 24))) +(decl sub_imm (Type Reg Imm12) Reg) +(rule (sub_imm ty x y) (alu_rr_imm12 (ALUOp.Sub) ty x y)) + +(decl alu_rr_imm12 (ALUOp Type Reg Imm12) Reg) +(extern constructor alu_rr_imm12 alu_rr_imm12) + +;; Broken: no require +(spec (imm12_from_value arg) + (provide + (= result (conv_to (widthof result) (zero_ext 64 arg))) + (= arg (conv_to (widthof arg) (zero_ext 64 result))))) +(decl imm12_from_value (Imm12) Value) +(extern extractor imm12_from_value imm12_from_value) + +(rule 0 (lower (has_type (fits_in_64 ty) (isub x (imm12_from_value y)))) + (sub_imm ty x y)) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/isub/broken_imm12neg.isle b/cranelift/isle/veri/veri_engine/examples/broken/isub/broken_imm12neg.isle new file mode 100644 index 000000000000..b9bbc1fa8b67 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/isub/broken_imm12neg.isle @@ -0,0 +1,50 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum +)) + +;; Imm12 modeled as the range of intermediates it can represent. +(model Imm12 (type (bv 24))) +(type Imm12 (primitive Imm12)) + +(type ALUOp + (enum + (Add) +)) + +;; BROKEN: * instead of - +;; Note that 4094 = 0xffe and 16773119 = 0xffefff +(spec (add_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvmul (extract 31 0 a) (zero_ext 32 b))) + (bvmul a (zero_ext 64 b))))) + (require (or (<= (bv2int b) 4094) + (and (<= (bv2int b) 16773119) + (= (extract 2 0 b) #b000))))) +(decl add_imm (Type Reg Imm12) Reg) +(rule (add_imm ty x y) (alu_rr_imm12 (ALUOp.Add) ty x y)) + +(decl alu_rr_imm12 (ALUOp Type Reg Imm12) Reg) +(extern constructor alu_rr_imm12 alu_rr_imm12) + +(spec (imm12_from_negated_value arg) + (provide (or (bvule (bvneg (zero_ext 64 result)) #x0000000000000fff) + (and (bvule (bvneg (zero_ext 64 result)) #x0000000000fff000) + (= (extract 2 0 (bvneg (zero_ext 64 result))) #b000))) + (= result (conv_to (widthof result) (bvneg (zero_ext 64 arg)))))) +(instantiate imm12_from_negated_value + ((args (bv 8)) (ret (bv 24)) (canon (bv 8))) + ((args (bv 16)) (ret (bv 24)) (canon (bv 16))) + ((args (bv 32)) (ret (bv 24)) (canon (bv 32))) + ((args (bv 64)) (ret (bv 24)) (canon (bv 64))) +) +(decl imm12_from_negated_value (Imm12) Value) +(extern extractor imm12_from_negated_value imm12_from_negated_value) + +(rule 2 (lower (has_type (fits_in_64 ty) (isub x (imm12_from_negated_value y)))) + (add_imm ty x y)) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/isub/broken_imm12neg_not_distinct.isle b/cranelift/isle/veri/veri_engine/examples/broken/isub/broken_imm12neg_not_distinct.isle new file mode 100644 index 000000000000..762604f6443d --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/isub/broken_imm12neg_not_distinct.isle @@ -0,0 +1,49 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum +)) + +;; Imm12 modeled as the range of intermediates it can represent. +(model Imm12 (type (bv 24))) +(type Imm12 (primitive Imm12)) + +(type ALUOp + (enum + (Add) +)) + +(spec (add_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvadd (extract 31 0 a) (zero_ext 32 b))) + (bvadd a (zero_ext 64 b))))) + (require (or (<= (bv2int b) 4094) + (and (<= (bv2int b) 16773119) + (= (extract 2 0 b) #b000))))) +(decl add_imm (Type Reg Imm12) Reg) +(rule (add_imm ty x y) (alu_rr_imm12 (ALUOp.Add) ty x y)) + +(decl alu_rr_imm12 (ALUOp Type Reg Imm12) Reg) +(extern constructor alu_rr_imm12 alu_rr_imm12) + +(spec (imm12_from_negated_value arg) + (provide (or (bvult (bvneg (zero_ext 64 result)) #x0000000000000fff) + (and (bvult (bvneg (zero_ext 64 result)) #x0000000000fff000) + (= (extract 2 0 (bvneg (zero_ext 64 result))) #b000))) + (= result (conv_to (widthof result) (bvneg (zero_ext 64 arg)))))) +(instantiate imm12_from_negated_value + ((args (bv 8)) (ret (bv 24)) (canon (bv 8))) + ((args (bv 16)) (ret (bv 24)) (canon (bv 16))) + ((args (bv 32)) (ret (bv 24)) (canon (bv 32))) + ((args (bv 64)) (ret (bv 24)) (canon (bv 64))) +) +(decl imm12_from_negated_value (Imm12) Value) +(extern extractor imm12_from_negated_value imm12_from_negated_value) + +;; BROKEN: for ty < 64, this only matches on zero +(rule 2 (lower (has_type (fits_in_64 ty) (isub x (imm12_from_negated_value y)))) + (add_imm ty x y)) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/isub/broken_shift.isle b/cranelift/isle/veri/veri_engine/examples/broken/isub/broken_shift.isle new file mode 100644 index 000000000000..6b1143419325 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/isub/broken_shift.isle @@ -0,0 +1,84 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum + (AluRRRShift + (shiftop ShiftOpAndAmt)) +)) + +;; ASSUMING 64 BIT MODE!!! +;; annotations will interpret this as an 10 bit field +;; the two msb encode the type of shift as follows: +;; 00: lsl +;; 01: lsr +;; 10: asr +;; 11: invalid +;; the rest will encode a 8-bit shift amount +(type ShiftOpAndAmt (primitive ShiftOpAndAmt)) + +(model ALUOp (enum + (Add #x00) ;; 0 + (Sub #x01) + (Orr #x02) + (OrrNot #x03) + (And #x04) + (AndNot #x05) + (Eor #x06) + (EorNot #x07) + (SubS #x08) + (SDiv #x09) + (UDiv #x0a) + (RotR #x0b) + (Lsr #x0c) + (Asr #x0d) + (Lsl #x0e))) + +(type ALUOp (enum + (Add) + (Sub) + (Orr) + (OrrNot) + (And) + (AndNot) + (Eor) + (EorNot) + (SubS) + (SDiv) + (UDiv) + (RotR) + (Lsr) + (Asr) + (Lsl))) + + +(decl alu_rrr_shift (ALUOp Type Reg Reg ShiftOpAndAmt) Reg) +(extern constructor alu_rrr_shift alu_rrr_shift) + +;; BROKEN: swapped shl, shr +(spec (sub_shift ty a b shift) + (provide + (= result (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 a) (switch (extract 15 8 shift) + ((ALUOp.Lsr) (bvshl (extract 31 0 b) (zero_ext 32 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Lsl) (bvlshr (extract 31 0 b) (zero_ext 32 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Lsl) (bvashr (extract 31 0 b) (zero_ext 32 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift)))))))) + (bvsub a (switch (extract 15 8 shift) + ((ALUOp.Lsr) (bvshl b (zero_ext 64 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Lsl) (bvlshr b (zero_ext 64 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))) + ((ALUOp.Lsl) (bvashr b (zero_ext 64 (bvand (bvsub (int2bv 8 ty) #x01) (extract 7 0 shift))))))))))) + +(decl sub_shift (Type Reg Reg ShiftOpAndAmt) Reg) +(rule (sub_shift ty x y z) (alu_rrr_shift (ALUOp.Sub) ty x y z)) + +(spec (lshl_from_imm64 ty a) + (provide (= result (concat #x0e (extract 7 0 a)))) + (require (= (extract 63 8 a) #b00000000000000000000000000000000000000000000000000000000))) +(decl pure lshl_from_imm64 (Type Imm64) ShiftOpAndAmt) +(extern constructor lshl_from_imm64 lshl_from_imm64) + +(rule -3 (lower (has_type (fits_in_64 ty) + (isub x (ishl y (iconst k))))) + (if-let amt (lshl_from_imm64 ty k)) + (sub_shift ty x y amt)) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/sdiv/broken_sdiv.isle b/cranelift/isle/veri/veri_engine/examples/broken/sdiv/broken_sdiv.isle new file mode 100644 index 000000000000..a43cdd8b78e9 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/sdiv/broken_sdiv.isle @@ -0,0 +1,55 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst (enum)) + +(type ALUOp + (enum + (SDiv) +)) + +(decl alu_rrr (ALUOp Type Reg Reg) Reg) +(extern constructor alu_rrr alu_rrr) + +;; BROKEN: zero-extends instead of sign-extends +;; Place a `Value` into a register, sign extending it to 64-bits +(spec (put_in_reg_sext64 x) (provide (= (zero_ext 64 x) result))) +(decl put_in_reg_sext64 (Value) Reg) +(extern constructor put_in_reg_sext64 put_in_reg_sext64) + +;; Helper for placing a `Value` into a `Reg` and validating that it's nonzero. + (spec (put_nonzero_in_reg_sext64 x) + (provide (= (sign_ext 64 x) result)) + (require (not (= #x0000000000000000 result)))) +(decl put_nonzero_in_reg_sext64 (Value) Reg) +(extern constructor put_nonzero_in_reg_sext64 put_nonzero_in_reg_sext64) + +(spec (a64_sdiv ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsdiv (extract 31 0 a) (extract 31 0 b))) + (bvsdiv a b))))) +(decl a64_sdiv (Type Reg Reg) Reg) +(rule (a64_sdiv ty x y) (alu_rrr (ALUOp.SDiv) ty x y)) + +;; Check for signed overflow. The only case is min_value / -1. +;; The following checks must be done in 32-bit or 64-bit, depending +;; on the input type. +(spec (trap_if_div_overflow ty x y) + (provide (= x result) + (if (= ty 32) + (not (and (= #x00000000 (extract 31 0 y)) + (= #x80000000 (extract 31 0 y)))) + (not (and (= #x0000000000000000 y) + (= #x8000000000000000 y)))))) +(decl trap_if_div_overflow (Type Reg Reg) Reg) +(extern constructor trap_if_div_overflow trap_if_div_overflow) + +(rule (lower (has_type (fits_in_64 ty) (sdiv x y))) + (let ((x64 Reg (put_in_reg_sext64 x)) + (y64 Reg (put_nonzero_in_reg_sext64 y)) + (valid_x64 Reg (trap_if_div_overflow ty x64 y64)) + (result Reg (a64_sdiv $I64 valid_x64 y64))) + result)) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/sdiv/broken_sdiv_safe_const.isle b/cranelift/isle/veri/veri_engine/examples/broken/sdiv/broken_sdiv_safe_const.isle new file mode 100644 index 000000000000..bb9d27089e48 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/sdiv/broken_sdiv_safe_const.isle @@ -0,0 +1,76 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst (enum)) + +(type ALUOp + (enum + (UDiv) +)) + +;; Model ImmExtend as an Int, where +;; Sign == 1 and Zero == 0 +(type ImmExtend + (enum + (Zero) + (Sign) +)) + +(model ImmExtend + (enum + (Sign #b0) + (Zero #b1))) + +(decl alu_rrr (ALUOp Type Reg Reg) Reg) +(extern constructor alu_rrr alu_rrr) + +(spec (a64_udiv ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvudiv (extract 31 0 a) (extract 31 0 b))) + (bvudiv a b))))) +(decl a64_udiv (Type Reg Reg) Reg) +(rule (a64_udiv ty x y) (alu_rrr (ALUOp.UDiv) ty x y)) + +(spec (imm ty ext x) + (provide + (= result + (switch ty + (8 (if (= ext #b1) (zero_ext 64 (extract 7 0 x)) (sign_ext 64 (extract 7 0 x)))) + (16 (if (= ext #b1) (zero_ext 64 (extract 15 0 x)) (sign_ext 64 (extract 15 0 x)))) + (32 (if (= ext #b1) (zero_ext 64 (extract 32 0 x)) (sign_ext 64 (extract 32 0 x)))) + (64 x)))) + (require (or (= ty 8) (= ty 16) (= ty 32) (= ty 64)))) +(decl imm (Type ImmExtend u64) Reg) +(extern constructor imm imm) + +;; Place a `Value` into a register, sign extending it to 64-bits +(spec (put_in_reg_sext64 x) (provide (= (sign_ext 64 x) result))) +(decl put_in_reg_sext64 (Value) Reg) +(extern constructor put_in_reg_sext64 put_in_reg_sext64) + +;; Helper for placing a `Value` into a `Reg` and validating that it's nonzero. +;; Broken: missing require + (spec (put_nonzero_in_reg_sext64 x) + (provide (= (sign_ext 64 x) result)) + ;; (require (not (= #x0000000000000000 result))) + ) +(decl put_nonzero_in_reg_sext64 (Value) Reg) +(extern constructor put_nonzero_in_reg_sext64 put_nonzero_in_reg_sext64) + +;; Helper for extracting an immediate that's not 0 and not -1 from an imm64. +;; (spec (safe_divisor_from_imm64 x) +;; (provide (= (sign_ext 64 x) result)) +;; (require (not (= #x0000000000000000 result)) +;; (not (= #x1111111111111111 result)))) +;; (decl safe_divisor_from_imm64 (u64) Imm64) +;; (extern extractor safe_divisor_from_imm64 safe_divisor_from_imm64) + +;; Special case for `sdiv` where no checks are needed due to division by a +;; constant meaning the checks are always passed. +;; BROKEN: uses udiv instead of sdiv +(rule sdiv_safe_divisor 1 (lower (has_type (fits_in_64 ty) (sdiv x (iconst imm)))) + (if-let y (safe_divisor_from_imm64 ty imm)) + (a64_udiv $I64 (put_in_reg_sext64 x) (imm ty (ImmExtend.Sign) y))) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/shifts/broken_do_shift_32.isle b/cranelift/isle/veri/veri_engine/examples/broken/shifts/broken_do_shift_32.isle new file mode 100644 index 000000000000..bec3e94147fa --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/shifts/broken_do_shift_32.isle @@ -0,0 +1,111 @@ +;; Instruction formats. +(type MInst + (enum +)) + +(model ALUOp (enum + (Add #x00) ;; 0 + (Sub #x01) + (Orr #x02) + (OrrNot #x03) + (And #x04) + (AndNot #x05) + (Eor #x06) + (EorNot #x07) + (SubS #x08) + (SDiv #x09) + (UDiv #x0a) + (RotR #x0b) + (Lsr #x0c) + (Asr #x0d) + (Lsl #x0e))) + +;; An ALU operation. This can be paired with several instruction formats +;; below (see `Inst`) in any combination. +(type ALUOp + (enum + (Add) + (Sub) + (Orr) + (OrrNot) + (And) + (AndS) + (AndNot) + ;; XOR (AArch64 calls this "EOR") + (Eor) + ;; XNOR (AArch64 calls this "EOR-NOT") + (EorNot) + ;; Add, setting flags + (AddS) + ;; Sub setting flags + (SubS) + ;; Signed multiplyhigh-word result + (SMulH) + ;; Unsigned multiplyhigh-word result + (UMulH) + (SDiv) + (UDiv) + (RotR) + (Lsr) + (Asr) + (Lsl) + ;; Add with carry + (Adc) + ;; Add with carrysettings flags + (AdcS) + ;; Subtract with carry + (Sbc) + ;; Subtract with carrysettings flags + (SbcS) +)) + +;; BROKEN: no restriction on op in annotation + (spec (do_shift op t a b) + (provide + (= result + (switch op + ((ALUOp.Lsr) (conv_to 64 + (bvlshr (conv_to t a) + (conv_to t (zero_ext 64 + (bvand (conv_to (widthof b) (bvsub (int2bv 64 (widthof b)) #x0000000000000001)) b)))))) + ((ALUOp.Asr) (conv_to 64 + (bvashr (conv_to t a) + (conv_to t (zero_ext 64 + (bvand (conv_to (widthof b) (bvsub (int2bv 64 (widthof b)) #x0000000000000001)) b)))))) + ((ALUOp.Lsl) (conv_to 64 + (bvshl (conv_to t a) + (conv_to t (zero_ext 64 + (bvand (conv_to (widthof b) (bvsub (int2bv 64 (widthof b)) #x0000000000000001)) b))))))))) + (require + (= t (widthof b)) + (or (= t 8) (= t 16) (= t 32) (= t 64)))) +(instantiate do_shift + ((args (bv 8) Int (bv 64) (bv 8)) (ret (bv 64)) (canon (bv 8))) + ((args (bv 8) Int (bv 64) (bv 16)) (ret (bv 64)) (canon (bv 16))) + ((args (bv 8) Int (bv 64) (bv 32)) (ret (bv 64)) (canon (bv 32))) + ((args (bv 8) Int (bv 64) (bv 64)) (ret (bv 64)) (canon (bv 64))) +) +(decl do_shift (ALUOp Type Reg Value) Reg) + +(spec (alu_rrr op t a b) + (provide + (= result (switch op + ((ALUOp.Lsr) + (if (<= t 32) + (conv_to 64 (bvlshr (extract 31 0 a) (bvand (bvsub (int2bv 32 32) #x00000001) (extract 31 0 b)))) + (bvlshr a (bvand (bvsub (int2bv 64 64) #x0000000000000001) b)))) + ((ALUOp.Asr) + (if (<= t 32) + (conv_to 64 (bvashr (extract 31 0 a) (bvand (bvsub (int2bv 32 32) #x00000001) (extract 31 0 b)))) + (bvashr a (bvand (bvsub (int2bv 64 64) #x0000000000000001) b)))) + ((ALUOp.Lsl) + (if (<= t 32) + (conv_to 64 (bvshl (extract 31 0 a) (bvand (bvsub (int2bv 32 32) #x00000001) (extract 31 0 b)))) + (bvshl a (bvand (bvsub (int2bv 64 64) #x0000000000000001) b))))))) + (require + (or (= op (ALUOp.Lsr)) (= op (ALUOp.Asr)) (= op (ALUOp.Lsl))) + (or (= t 8) (= t 16) (= t 32) (= t 64)))) +(decl alu_rrr (ALUOp Type Reg Reg) Reg) +(extern constructor alu_rrr alu_rrr) + +(rule (do_shift op $I32 x y) (alu_rrr op $I32 x (value_regs_get y 0))) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/shifts/broken_ishl_to_do_shift_64.isle b/cranelift/isle/veri/veri_engine/examples/broken/shifts/broken_ishl_to_do_shift_64.isle new file mode 100644 index 000000000000..d3b135f655b5 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/shifts/broken_ishl_to_do_shift_64.isle @@ -0,0 +1,110 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +;; Instruction formats. +(type MInst + (enum +)) + +(model ALUOp (enum + (Add #x00) ;; 0 + (Sub #x01) + (Orr #x02) + (OrrNot #x03) + (And #x04) + (AndNot #x05) + (Eor #x06) + (EorNot #x07) + (SubS #x08) + (SDiv #x09) + (UDiv #x0a) + (RotR #x0b) + (Lsr #x0c) + (Asr #x0d) + (Lsl #x0e))) + +;; An ALU operation. This can be paired with several instruction formats +;; below (see `Inst`) in any combination. +(type ALUOp + (enum + (Add) + (Sub) + (Orr) + (OrrNot) + (And) + (AndS) + (AndNot) + ;; XOR (AArch64 calls this "EOR") + (Eor) + ;; XNOR (AArch64 calls this "EOR-NOT") + (EorNot) + ;; Add, setting flags + (AddS) + ;; Sub setting flags + (SubS) + ;; Signed multiplyhigh-word result + (SMulH) + ;; Unsigned multiplyhigh-word result + (UMulH) + (SDiv) + (UDiv) + (RotR) + (Lsr) + (Asr) + (Lsl) + ;; Add with carry + (Adc) + ;; Add with carrysettings flags + (AdcS) + ;; Subtract with carry + (Sbc) + ;; Subtract with carrysettings flags + (SbcS) +)) + + (spec (do_shift op t a b) + (provide + (= result + (switch op + ((ALUOp.Lsr) (conv_to 64 + (bvlshr (conv_to t a) + (conv_to t (zero_ext 64 + (bvand (conv_to (widthof b) (bvsub (int2bv 64 (widthof b)) #x0000000000000001)) b)))))) + ((ALUOp.Asr) (conv_to 64 + (bvashr (conv_to t a) + (conv_to t (zero_ext 64 + (bvand (conv_to (widthof b) (bvsub (int2bv 64 (widthof b)) #x0000000000000001)) b)))))) + ((ALUOp.Lsl) (conv_to 64 + (bvshl (conv_to t a) + (conv_to t (zero_ext 64 + (bvand (conv_to (widthof b) (bvsub (int2bv 64 (widthof b)) #x0000000000000001)) b))))))))) + (require + (or (= op (ALUOp.Lsr)) (= op (ALUOp.Asr)) (= op (ALUOp.Lsl))) + (= t (widthof b)) + (or (= t 8) (= t 16) (= t 32) (= t 64)) + (switch op + ((ALUOp.Lsr) (switch t + (8 (= (extract 31 0 a) (zero_ext 32 (extract 7 0 a)))) + (16 (= (extract 31 0 a) (zero_ext 32 (extract 15 0 a)))) + (32 $true) + (64 $true))) + ((ALUOp.Asr) (switch t + (8 (= (extract 31 0 a) (sign_ext 32 (extract 7 0 a)))) + (16 (= (extract 31 0 a) (sign_ext 32 (extract 15 0 a)))) + (32 $true) + (64 $true))) + ((ALUOp.Lsl) $true)))) +(instantiate do_shift + ((args (bv 8) Int (bv 64) (bv 8)) (ret (bv 64)) (canon (bv 8))) + ((args (bv 8) Int (bv 64) (bv 16)) (ret (bv 64)) (canon (bv 16))) + ((args (bv 8) Int (bv 64) (bv 32)) (ret (bv 64)) (canon (bv 32))) + ((args (bv 8) Int (bv 64) (bv 64)) (ret (bv 64)) (canon (bv 64))) +) +(decl do_shift (ALUOp Type Reg Value) Reg) +(extern constructor do_shift do_shift) + +;; BROKEN: Asr instead of Lsl +;; Shift for i64. +(rule (lower (has_type $I64 (ishl x y))) + (do_shift (ALUOp.Asr) $I64 x y)) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/shifts/broken_sshr_to_do_shift_fits_in_32.isle b/cranelift/isle/veri/veri_engine/examples/broken/shifts/broken_sshr_to_do_shift_fits_in_32.isle new file mode 100644 index 000000000000..29d11e2db026 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/shifts/broken_sshr_to_do_shift_fits_in_32.isle @@ -0,0 +1,108 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +;; Instruction formats. +(type MInst + (enum +)) + +(model ALUOp (enum + (Add #x00) ;; 0 + (Sub #x01) + (Orr #x02) + (OrrNot #x03) + (And #x04) + (AndNot #x05) + (Eor #x06) + (EorNot #x07) + (SubS #x08) + (SDiv #x09) + (UDiv #x0a) + (RotR #x0b) + (Lsr #x0c) + (Asr #x0d) + (Lsl #x0e))) + +;; An ALU operation. This can be paired with several instruction formats +;; below (see `Inst`) in any combination. +(type ALUOp + (enum + (Add) + (Sub) + (Orr) + (OrrNot) + (And) + (AndS) + (AndNot) + ;; XOR (AArch64 calls this "EOR") + (Eor) + ;; XNOR (AArch64 calls this "EOR-NOT") + (EorNot) + ;; Add, setting flags + (AddS) + ;; Sub setting flags + (SubS) + ;; Signed multiplyhigh-word result + (SMulH) + ;; Unsigned multiplyhigh-word result + (UMulH) + (SDiv) + (UDiv) + (RotR) + (Lsr) + (Asr) + (Lsl) + ;; Add with carry + (Adc) + ;; Add with carrysettings flags + (AdcS) + ;; Subtract with carry + (Sbc) + ;; Subtract with carrysettings flags + (SbcS) +)) + +;; BROKEN: missing extension part of the spec + (spec (do_shift op t a b) + (provide + (= result + (switch op + ((ALUOp.Lsr) (conv_to 64 + (bvlshr (conv_to t a) + (conv_to t (zero_ext 64 + (bvand (conv_to (widthof b) (bvsub (int2bv 64 (widthof b)) #x0000000000000001)) b)))))) + ((ALUOp.Asr) (conv_to 64 + (bvashr (conv_to t a) + (conv_to t (zero_ext 64 + (bvand (conv_to (widthof b) (bvsub (int2bv 64 (widthof b)) #x0000000000000001)) b)))))) + ((ALUOp.Lsl) (conv_to 64 + (bvshl (conv_to t a) + (conv_to t (zero_ext 64 + (bvand (conv_to (widthof b) (bvsub (int2bv 64 (widthof b)) #x0000000000000001)) b))))))))) + (require + (or (= op (ALUOp.Lsr)) (= op (ALUOp.Asr)) (= op (ALUOp.Lsl))) + (= t (widthof b)) + (or (= t 8) (= t 16) (= t 32) (= t 64)))) +(instantiate do_shift + ((args (bv 8) Int (bv 64) (bv 8)) (ret (bv 64)) (canon (bv 8))) + ((args (bv 8) Int (bv 64) (bv 16)) (ret (bv 64)) (canon (bv 16))) + ((args (bv 8) Int (bv 64) (bv 32)) (ret (bv 64)) (canon (bv 32))) + ((args (bv 8) Int (bv 64) (bv 64)) (ret (bv 64)) (canon (bv 64))) +) +(decl do_shift (ALUOp Type Reg Value) Reg) +(extern constructor do_shift do_shift) + +(spec (put_in_reg_sext32 arg) + (provide + (= result + (if (<= (widthof arg) 32) + (conv_to 64 (sign_ext 32 arg)) + (conv_to 64 arg))))) +(decl put_in_reg_sext32 (Value) Reg) +(extern constructor put_in_reg_sext32 put_in_reg_sext32) + +;; BROKEN: Wrong opcode +;; Shift for i64. +(rule -2 (lower (has_type (fits_in_32 ty) (sshr x y))) + (do_shift (ALUOp.Lsr) ty (put_in_reg_sext32 x) y)) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/shifts/broken_ushr_to_do_shift_fits_in_32.isle b/cranelift/isle/veri/veri_engine/examples/broken/shifts/broken_ushr_to_do_shift_fits_in_32.isle new file mode 100644 index 000000000000..451c795333ce --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/shifts/broken_ushr_to_do_shift_fits_in_32.isle @@ -0,0 +1,120 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +;; Instruction formats. +(type MInst + (enum +)) + +(model ALUOp (enum + (Add #x00) ;; 0 + (Sub #x01) + (Orr #x02) + (OrrNot #x03) + (And #x04) + (AndNot #x05) + (Eor #x06) + (EorNot #x07) + (SubS #x08) + (SDiv #x09) + (UDiv #x0a) + (RotR #x0b) + (Lsr #x0c) + (Asr #x0d) + (Lsl #x0e))) + +;; An ALU operation. This can be paired with several instruction formats +;; below (see `Inst`) in any combination. +(type ALUOp + (enum + (Add) + (Sub) + (Orr) + (OrrNot) + (And) + (AndS) + (AndNot) + ;; XOR (AArch64 calls this "EOR") + (Eor) + ;; XNOR (AArch64 calls this "EOR-NOT") + (EorNot) + ;; Add, setting flags + (AddS) + ;; Sub setting flags + (SubS) + ;; Signed multiplyhigh-word result + (SMulH) + ;; Unsigned multiplyhigh-word result + (UMulH) + (SDiv) + (UDiv) + (RotR) + (Lsr) + (Asr) + (Lsl) + ;; Add with carry + (Adc) + ;; Add with carrysettings flags + (AdcS) + ;; Subtract with carry + (Sbc) + ;; Subtract with carrysettings flags + (SbcS) +)) + + (spec (do_shift op t a b) + (provide + (= result + (switch op + ((ALUOp.Lsr) (conv_to 64 + (bvlshr (conv_to t a) + (conv_to t (zero_ext 64 + (bvand (conv_to (widthof b) (bvsub (int2bv 64 (widthof b)) #x0000000000000001)) b)))))) + ((ALUOp.Asr) (conv_to 64 + (bvashr (conv_to t a) + (conv_to t (zero_ext 64 + (bvand (conv_to (widthof b) (bvsub (int2bv 64 (widthof b)) #x0000000000000001)) b)))))) + ((ALUOp.Lsl) (conv_to 64 + (bvshl (conv_to t a) + (conv_to t (zero_ext 64 + (bvand (conv_to (widthof b) (bvsub (int2bv 64 (widthof b)) #x0000000000000001)) b))))))))) + (require + (or (= op (ALUOp.Lsr)) (= op (ALUOp.Asr)) (= op (ALUOp.Lsl))) + (= t (widthof b)) + (or (= t 8) (= t 16) (= t 32) (= t 64)) + (switch op + ((ALUOp.Lsr) (switch t + (8 (= (extract 31 0 a) (zero_ext 32 (extract 7 0 a)))) + (16 (= (extract 31 0 a) (zero_ext 32 (extract 15 0 a)))) + (32 $true) + (64 $true))) + ((ALUOp.Asr) (switch t + (8 (= (extract 31 0 a) (sign_ext 32 (extract 7 0 a)))) + (16 (= (extract 31 0 a) (sign_ext 32 (extract 15 0 a)))) + (32 $true) + (64 $true))) + ((ALUOp.Lsl) $true)))) +(instantiate do_shift + ((args (bv 8) Int (bv 64) (bv 8)) (ret (bv 64)) (canon (bv 8))) + ((args (bv 8) Int (bv 64) (bv 16)) (ret (bv 64)) (canon (bv 16))) + ((args (bv 8) Int (bv 64) (bv 32)) (ret (bv 64)) (canon (bv 32))) + ((args (bv 8) Int (bv 64) (bv 64)) (ret (bv 64)) (canon (bv 64))) +) +(decl do_shift (ALUOp Type Reg Value) Reg) +(extern constructor do_shift do_shift) + +;; Place a `Value` into a register, zero extending it to 32-bits +(spec (put_in_reg_zext32 arg) + (provide + (= result + (if (<= (widthof arg) 32) + (conv_to 64 (zero_ext 32 arg)) + (conv_to 64 arg))))) +(decl put_in_reg_zext32 (Value) Reg) +(extern constructor put_in_reg_zext32 put_in_reg_zext32) + +;; BROKEN: wrong op +;; Shift for i8/i16/i32. +(rule -1 (lower (has_type (fits_in_32 ty) (ushr x y))) + (do_shift (ALUOp.Lsl) ty (put_in_reg_zext32 x) y)) diff --git a/cranelift/isle/veri/veri_engine/examples/broken/udiv/broken_udiv.isle b/cranelift/isle/veri/veri_engine/examples/broken/udiv/broken_udiv.isle new file mode 100644 index 000000000000..02f7d59dd1c2 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/udiv/broken_udiv.isle @@ -0,0 +1,37 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst (enum)) + +(type ALUOp + (enum + (UDiv) +)) + +(decl alu_rrr (ALUOp Type Reg Reg) Reg) +(extern constructor alu_rrr alu_rrr) + +(spec (a64_udiv ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvudiv (extract 31 0 a) (extract 31 0 b))) + (bvudiv a b))))) +(decl a64_udiv (Type Reg Reg) Reg) +(rule (a64_udiv ty x y) (alu_rrr (ALUOp.UDiv) ty x y)) + +(spec (put_nonzero_in_reg_zext64 x) + (provide (= result (zero_ext 64 x))) + (require (not (= result #x0000000000000000)))) +(decl put_nonzero_in_reg_zext64 (Value) Reg) +(extern constructor put_nonzero_in_reg_zext64 put_nonzero_in_reg_zext64) + +(spec (put_in_reg_sext64 x) (provide (= (sign_ext 64 x) result))) +(decl put_in_reg_sext64 (Value) Reg) +(extern constructor put_in_reg_sext64 put_in_reg_sext64) + +;; Note that aarch64's `udiv` doesn't trap so to respect the semantics of +;; CLIF's `udiv` the check for zero needs to be manually performed. +(rule (lower (has_type (fits_in_64 ty) (udiv x y))) + (a64_udiv $I64 (put_in_reg_sext64 x) (put_nonzero_in_reg_zext64 y))) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/udiv/udiv_cve.isle b/cranelift/isle/veri/veri_engine/examples/broken/udiv/udiv_cve.isle new file mode 100644 index 000000000000..bdc0504bef12 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/udiv/udiv_cve.isle @@ -0,0 +1,41 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst (enum)) + +(type ALUOp + (enum + (SDiv) +)) + +(decl alu_rrr (ALUOp Type Reg Reg) Reg) +(extern constructor alu_rrr alu_rrr) + +;; Helper for generating `udiv` instructions. +(spec (a64_udiv ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvudiv (extract 31 0 a) (extract 31 0 b))) + (bvudiv a b))))) +(decl a64_udiv (Type Reg Reg) Reg) +(extern constructor a64_udiv a64_udiv) + +(spec (imm ty x) (provide (= result (sign_ext 64 (conv_to ty x))))) +(decl imm (Type u64) Reg) +(extern constructor imm imm) + +(spec (put_in_reg_zext64 x) (provide (= result (zero_ext 64 x)))) +(decl put_in_reg_zext64 (Value) Reg) +(extern constructor put_in_reg_zext64 put_in_reg_zext64) + +;; Helper for placing a `Value` into a `Reg` and validating that it's nonzero. +(spec (put_nonzero_in_reg_zext64 x) + (provide (= result (zero_ext 64 x))) + (require (not (= result #x0000000000000000)))) +(decl put_nonzero_in_reg_zext64 (Value) Reg) +(extern constructor put_nonzero_in_reg_zext64 put_nonzero_in_reg_zext64) + +(rule udiv (lower (has_type (fits_in_64 ty) (udiv x y))) + (a64_udiv $I64 (put_in_reg_zext64 x) (put_nonzero_in_reg_zext64 y))) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/broken/udiv/udiv_cve_underlying.isle b/cranelift/isle/veri/veri_engine/examples/broken/udiv/udiv_cve_underlying.isle new file mode 100644 index 000000000000..1c913193c416 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/broken/udiv/udiv_cve_underlying.isle @@ -0,0 +1,26 @@ + +(type MInst (enum)) + +(spec (integral_ty ty) + (provide (= result ty)) + (require (or (= ty 8) (= ty 16) (= ty 32) (= ty 64)))) +(decl integral_ty (Type) Type) +(extern extractor integral_ty integral_ty) + +;; Try changing this "sign_ext" to "zero_ext": the test fails either way +;; (spec (imm ty x) (provide (= result (zero_ext 64 (conv_to ty x))))) +(spec (imm ty x) (provide (= result (sign_ext 64 (conv_to ty x))))) +(instantiate imm + ((args Int (bv 64)) (ret (bv 64)) (canon (bv 8))) + ((args Int (bv 64)) (ret (bv 64)) (canon (bv 16))) + ((args Int (bv 64)) (ret (bv 64)) (canon (bv 32))) + ((args Int (bv 64)) (ret (bv 64)) (canon (bv 64))) +) +(decl imm (Type u64) Reg) + +(spec (load_constant64_full x) (provide (= result x))) +(decl load_constant64_full (u64) Reg) +(extern constructor load_constant64_full load_constant64_full) + +(rule (imm (integral_ty _ty) n) + (load_constant64_full n)) diff --git a/cranelift/isle/veri/veri_engine/examples/constructs/if-let.isle b/cranelift/isle/veri/veri_engine/examples/constructs/if-let.isle new file mode 100644 index 000000000000..658116cd179b --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/constructs/if-let.isle @@ -0,0 +1,23 @@ +(spec (lower arg) (provide (= result arg))) +(decl lower (Inst) InstOutput) + +;; Instruction formats. +(type MInst (enum)) + +;; Constructor to test whether two values are same. +(spec (same_value x y) (provide (= result x ) (= x y))) +(decl pure same_value (Value Value) Value) +(extern constructor same_value same_value) + +(rule (lower (has_type (fits_in_64 ty) (iadd x y))) + (if-let z (same_value x y)) + (add ty z z)) + +(spec (add ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvadd (extract 31 0 a) (extract 31 0 b))) + (bvadd a b))))) +(decl add (Type Reg Reg) Reg) +(extern constructor add add) diff --git a/cranelift/isle/veri/veri_engine/examples/iadd/updated_iadd_imm12neg_left.isle b/cranelift/isle/veri/veri_engine/examples/iadd/updated_iadd_imm12neg_left.isle new file mode 100644 index 000000000000..53f33cb77e37 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/iadd/updated_iadd_imm12neg_left.isle @@ -0,0 +1,49 @@ +(spec (lower arg) (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum +)) + +;; Imm12 modeled as the range of intermediates it can represent. +(model Imm12 (type (bv 24))) +(type Imm12 (primitive Imm12)) + +(type ALUOp + (enum + (Sub) +)) + +(spec (sub_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 a) (zero_ext 32 b))) + (bvsub a (zero_ext 64 b))))) + (require (or (bvult (zero_ext 64 b) #x0000000000000fff) + (and (bvult (zero_ext 64 b) #x0000000000fff000) + (= (extract 2 0 (zero_ext 64 b)) #b000))) + (= (widthof b) 24))) +(decl sub_imm (Type Reg Imm12) Reg) +(rule (sub_imm ty x y) (alu_rr_imm12 (ALUOp.Sub) ty x y)) + +(decl alu_rr_imm12 (ALUOp Type Reg Imm12) Reg) +(extern constructor alu_rr_imm12 alu_rr_imm12) + +(spec (imm12_from_negated_value arg) + (provide (or (bvult (bvneg (sign_ext 64 arg)) #x0000000000000fff) + (and (bvult (bvneg (sign_ext 64 arg)) #x0000000000fff000) + (= (extract 2 0 (bvneg (sign_ext 64 arg))) #b000))) + (= result (extract 23 0 (bvneg (sign_ext 64 arg)))))) +(instantiate imm12_from_negated_value + ((args (bv 8)) (ret (bv 24)) (canon (bv 8))) + ((args (bv 16)) (ret (bv 24)) (canon (bv 16))) + ((args (bv 32)) (ret (bv 24)) (canon (bv 32))) + ((args (bv 64)) (ret (bv 24)) (canon (bv 64))) +) +(decl pure partial imm12_from_negated_value (Value) Imm12) +(extern extractor imm12_from_negated_value imm12_from_negated_value) + +(rule 3 (lower (has_type (fits_in_64 ty) (iadd x y))) + (if-let imm12_neg (imm12_from_negated_value x)) + (sub_imm ty y imm12_neg)) diff --git a/cranelift/isle/veri/veri_engine/examples/iadd/updated_iadd_imm12neg_right.isle b/cranelift/isle/veri/veri_engine/examples/iadd/updated_iadd_imm12neg_right.isle new file mode 100644 index 000000000000..79660ff3d235 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/iadd/updated_iadd_imm12neg_right.isle @@ -0,0 +1,49 @@ +(spec (lower arg) (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum +)) + +;; Imm12 modeled as the range of intermediates it can represent. +(model Imm12 (type (bv 24))) +(type Imm12 (primitive Imm12)) + +(type ALUOp + (enum + (Sub) +)) + +(spec (sub_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvsub (extract 31 0 a) (zero_ext 32 b))) + (bvsub a (zero_ext 64 b))))) + (require (or (bvult (zero_ext 64 b) #x0000000000000fff) + (and (bvult (zero_ext 64 b) #x0000000000fff000) + (= (extract 2 0 (zero_ext 64 b)) #b000))) + (= (widthof b) 24))) +(decl sub_imm (Type Reg Imm12) Reg) +(rule (sub_imm ty x y) (alu_rr_imm12 (ALUOp.Sub) ty x y)) + +(decl alu_rr_imm12 (ALUOp Type Reg Imm12) Reg) +(extern constructor alu_rr_imm12 alu_rr_imm12) + +(spec (imm12_from_negated_value arg) + (provide (or (bvult (bvneg (sign_ext 64 arg)) #x0000000000000fff) + (and (bvult (bvneg (sign_ext 64 arg)) #x0000000000fff000) + (= (extract 2 0 (bvneg (sign_ext 64 arg))) #b000))) + (= result (extract 23 0 (bvneg (sign_ext 64 arg)))))) +(instantiate imm12_from_negated_value + ((args (bv 8)) (ret (bv 24)) (canon (bv 8))) + ((args (bv 16)) (ret (bv 24)) (canon (bv 16))) + ((args (bv 32)) (ret (bv 24)) (canon (bv 32))) + ((args (bv 64)) (ret (bv 24)) (canon (bv 64))) +) +(decl pure partial imm12_from_negated_value (Value) Imm12) +(extern extractor imm12_from_negated_value imm12_from_negated_value) + +(rule 2 (lower (has_type (fits_in_64 ty) (iadd x y))) + (if-let imm12_neg (imm12_from_negated_value y)) + (sub_imm ty x imm12_neg)) diff --git a/cranelift/isle/veri/veri_engine/examples/isub/imm12neg_new.isle b/cranelift/isle/veri/veri_engine/examples/isub/imm12neg_new.isle new file mode 100644 index 000000000000..64e8436a25fa --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/isub/imm12neg_new.isle @@ -0,0 +1,50 @@ +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(type MInst + (enum +)) + +;; Imm12 modeled as the range of intermediates it can represent. +(model Imm12 (type (bv 24))) +(type Imm12 (primitive Imm12)) + +(type ALUOp + (enum + (Add) +)) + +;; Note that 4094 = 0xffe and 16773119 = 0xffefff +(spec (add_imm ty a b) + (provide + (= result + (if (<= ty 32) + (conv_to 64 (bvadd (extract 31 0 a) (zero_ext 32 b))) + (bvadd a (zero_ext 64 b))))) + (require (or (<= (bv2int b) 4094) + (and (<= (bv2int b) 16773119) + (= (extract 2 0 b) #b000))))) +(decl add_imm (Type Reg Imm12) Reg) +(rule (add_imm ty x y) (alu_rr_imm12 (ALUOp.Add) ty x y)) + +(decl alu_rr_imm12 (ALUOp Type Reg Imm12) Reg) +(extern constructor alu_rr_imm12 alu_rr_imm12) + +(spec (imm12_from_negated_value arg) + (provide (or (bvult (bvneg (sign_ext 64 arg)) #x0000000000000fff) + (and (bvult (bvneg (sign_ext 64 arg)) #x0000000000fff000) + (= (extract 2 0 (bvneg (sign_ext 64 arg))) #b000))) + (= result (extract 23 0 (bvneg (sign_ext 64 arg)))))) +(instantiate imm12_from_negated_value + ((args (bv 8)) (ret (bv 24)) (canon (bv 8))) + ((args (bv 16)) (ret (bv 24)) (canon (bv 16))) + ((args (bv 32)) (ret (bv 24)) (canon (bv 32))) + ((args (bv 64)) (ret (bv 24)) (canon (bv 64))) +) +(decl pure partial imm12_from_negated_value (Value) Imm12) +(extern extractor imm12_from_negated_value imm12_from_negated_value) + +(rule isub_imm12_neg 2 (lower (has_type (fits_in_64 ty) (isub x y))) + (if-let imm12_neg (imm12_from_negated_value y)) + (add_imm ty x imm12_neg)) diff --git a/cranelift/isle/veri/veri_engine/examples/load/load_add_panic.isle b/cranelift/isle/veri/veri_engine/examples/load/load_add_panic.isle new file mode 100644 index 000000000000..4d5bbea707e4 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/load/load_add_panic.isle @@ -0,0 +1,24 @@ +;; Instruction formats +(type MInst (enum)) + +;; Add with 2 loads spec +(form + lhs_form + ((args (bv 8) (bv 8)) (ret (bv 8)) (canon (bv 8))) + ((args (bv 16) (bv 16)) (ret (bv 16)) (canon (bv 16))) + ((args (bv 32) (bv 32)) (ret (bv 32)) (canon (bv 32))) + ((args (bv 64) (bv 64)) (ret (bv 64)) (canon (bv 64))) +) +(spec (lhs x y) + (provide (= result (bvadd (load_effect #x0000 (widthof x) x) (load #x0000 (widthof y) y))))) +(decl lhs (Value Value) Inst) +(extern extractor lhs lhs) +(instantiate lhs lhs_form) + +(spec (rhs x y) + (provide (= result (bvadd (load_effect #x0000 (widthof x) x) (load #x0000 (widthof y) y))))) +(decl rhs (Value Value) Inst) +(extern constructor rhs rhs) + +(rule (lhs x y) + (rhs x y)) diff --git a/cranelift/isle/veri/veri_engine/examples/load/load_conditional.isle b/cranelift/isle/veri/veri_engine/examples/load/load_conditional.isle new file mode 100644 index 000000000000..9f3238ebcd4d --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/load/load_conditional.isle @@ -0,0 +1,24 @@ +;; Instruction formats. +(type MInst (enum)) + +(form + lhs_form + ((args Bool (bv 8) (bv 8)) (ret (bv 8)) (canon (bv 8))) + ((args Bool (bv 16) (bv 16)) (ret (bv 16)) (canon (bv 16))) + ((args Bool (bv 32) (bv 32)) (ret (bv 32)) (canon (bv 32))) + ((args Bool (bv 64) (bv 64)) (ret (bv 64)) (canon (bv 64))) +) + +(spec (lhs cond x y) + (provide (= result (load_effect #x0000 (widthof (if cond x y)) (if cond x y))))) +(decl lhs (bool Value Value) Inst) +(extern extractor lhs lhs) +(instantiate lhs lhs_form) + +(spec (rhs x y) + (provide (= result (load_effect #x0000 (widthof x) x)))) +(decl rhs (Value Value) Inst) +(extern constructor rhs rhs) + +(rule (lhs $true x y) + (rhs x y)) diff --git a/cranelift/isle/veri/veri_engine/examples/mid-end/broken_bor_band_consts.isle b/cranelift/isle/veri/veri_engine/examples/mid-end/broken_bor_band_consts.isle new file mode 100644 index 000000000000..5dec3657b4ae --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/mid-end/broken_bor_band_consts.isle @@ -0,0 +1,74 @@ +(type Type (primitive Type)) +(type Value (primitive Value)) +(type u64 (primitive u64)) +(type Imm64 (primitive Imm64)) +(type bool (primitive bool)) +(extern const $true bool) + +(spec (simplify x) (provide (= x result))) +(instantiate simplify + ((args (bv 8)) (ret (bv 8)) (canon (bv 8))) + ((args (bv 16)) (ret (bv 16)) (canon (bv 16))) + ((args (bv 32)) (ret (bv 32)) (canon (bv 32))) + ((args (bv 64)) (ret (bv 64)) (canon (bv 64))) +) +(decl simplify (Value) Value) + +(spec (bor ty x y) + (provide (= (bvor x y) result)) + (require + (= ty (widthof x)) + (= ty (widthof y)))) +(decl bor (Type Value Value) Value) +(extern extractor bor bor) +(extern constructor bor bor) + +(spec (bnot ty x) + (provide (= (bvnot x) result)) + (require (= ty (widthof x)))) +(decl bnot (Type Value) Value) +(extern extractor bnot bnot) +(extern constructor bnot bnot) + +(spec (band ty x y) + (provide (= (bvand x y) result)) + (require + (= ty (widthof x)) + (= ty (widthof y)))) +(decl band (Type Value Value) Value) +(extern extractor band band) +(extern constructor band band) + +;; Specify to this rule with constants +(spec (iconst ty arg) + (provide (= arg (zero_ext 64 result))) + (require (= ty (widthof arg)))) +(decl iconst (Type Imm64) Value) +(extern constructor iconst iconst) +(extern extractor iconst iconst) + +;; Extract a `u64` from an `Imm64`. +(spec (u64_from_imm64 arg) (provide (= arg result))) +(decl u64_from_imm64 (u64) Imm64) +(extern extractor u64_from_imm64 u64_from_imm64) + +(spec (u64_eq x y) + (provide (= result (if (= x y) #x0000000000000000 #x0000000000000001)))) +(decl pure u64_eq (u64 u64) u64) +(extern constructor u64_eq u64_eq) + +(spec (u64_not arg) (provide (= (bvnot arg) result))) +(decl pure u64_not (u64) u64) +(extern constructor u64_not u64_not) + +;; `or(and(x, y), noty) == or(x, not(y))` specialized for constants, since +;; otherwise we may not know that `z == not(y)` since we don't generally expand +;; constants in the e-graph. +;; +;; (No need to duplicate for commutative `bor` for this constant version because +;; we move constants to the right.) +(rule (simplify (bor ty + (band ty x (iconst ty (u64_from_imm64 y))) + z @ (iconst ty (u64_from_imm64 zk)))) + (if (u64_eq zk (u64_not y))) + (bor ty x z)) diff --git a/cranelift/isle/veri/veri_engine/examples/store/broken_bvsub_store_with_load.isle b/cranelift/isle/veri/veri_engine/examples/store/broken_bvsub_store_with_load.isle new file mode 100644 index 000000000000..852321422230 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/store/broken_bvsub_store_with_load.isle @@ -0,0 +1,104 @@ +(type MInst (enum)) + +(type SinkableLoad extern (enum)) + +(type OperandSize extern + (enum Size8 + Size16 + Size32 + Size64)) + +(type Amode (enum + ;; Immediate sign-extended and a register + (ImmReg (simm32 i32) + (base Reg) + (flags MemFlags)) + + ;; Sign-extend-32-to-64(simm32) + base + (index << shift) + (ImmRegRegShift (simm32 i32) + (base Gpr) + (index Gpr) + (shift u8) + (flags MemFlags)) + + ;; Sign-extend-32-to-64(immediate) + RIP (instruction + ;; pointer). The appropriate relocation is emitted so + ;; that the resulting immediate makes this Amode refer to + ;; the given MachLabel. + (RipRelative (target MachLabel)))) + +(type Gpr (primitive Gpr)) + +(type RegMemImm extern + (enum + (Reg (reg Reg)) + (Mem (addr SyntheticAmode)) + (Imm (simm32 u32)))) + +(type SyntheticAmode extern (enum)) + +(convert SinkableLoad RegMemImm sink_load_to_reg_mem_imm) + +(convert Value Gpr put_in_gpr) + +(decl x64_add_mem (Type Amode Gpr) SideEffectNoResult) +(spec (x64_add_mem ty addr val) + (provide (= result (store_effect + (extract 79 64 addr) + ty + (conv_to ty (bvsub (load_effect (extract 79 64 addr) ty (extract 63 0 addr)) (conv_to ty val))) + (extract 63 0 addr)) + ) + ) + (require (or (= ty 32) (= ty 64))) +) +(extern constructor x64_add_mem x64_add_mem) + +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(decl sinkable_load (SinkableLoad) Value) +(spec (sinkable_load inst) + (provide (= result inst))) +(extern extractor sinkable_load sinkable_load) + +(decl sink_load_to_reg_mem_imm (SinkableLoad) RegMemImm) +(spec (sink_load_to_reg_mem_imm load) + (provide (= result load))) +(extern constructor sink_load_to_reg_mem_imm sink_load_to_reg_mem_imm) + +(spec (put_in_gpr arg) (provide (= result (conv_to 64 arg)))) +(decl put_in_gpr (Value) Gpr) +(extern constructor put_in_gpr put_in_gpr) + +(spec (to_amode flags val offset) + (provide (= result (concat flags (bvadd val (sign_ext 64 offset))))) + (require + (= (widthof val) 64))) +(decl to_amode (MemFlags Value Offset32) Amode) +(extern constructor to_amode to_amode) + +(decl operand_size_of_type_32_64 (Type) OperandSize) +(extern constructor operand_size_of_type_32_64 operand_size_of_type_32_64) + +(form store + ((args (bv 16) (bv 8) (bv 64) (bv 32)) (ret Unit) (canon (bv 8))) + ((args (bv 16) (bv 16) (bv 64) (bv 32)) (ret Unit) (canon (bv 16))) + ((args (bv 16) (bv 32) (bv 64) (bv 32)) (ret Unit) (canon (bv 32))) + ((args (bv 16) (bv 64) (bv 64) (bv 32)) (ret Unit) (canon (bv 64))) +) + + +(rule store_x64_add_mem 3 (lower + (store flags + (has_type (ty_32_or_64 ty) + (iadd (and + (sinkable_load sink) + (load flags addr offset)) + src2)) + addr + offset)) + (let ((_ RegMemImm sink)) + (side_effect + (x64_add_mem ty (to_amode flags addr offset) src2)))) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/store/broken_isub_store_with_load.isle b/cranelift/isle/veri/veri_engine/examples/store/broken_isub_store_with_load.isle new file mode 100644 index 000000000000..a2a7ec5f2a3c --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/store/broken_isub_store_with_load.isle @@ -0,0 +1,104 @@ +(type MInst (enum)) + +(type SinkableLoad extern (enum)) + +(type OperandSize extern + (enum Size8 + Size16 + Size32 + Size64)) + +(type Amode (enum + ;; Immediate sign-extended and a register + (ImmReg (simm32 i32) + (base Reg) + (flags MemFlags)) + + ;; Sign-extend-32-to-64(simm32) + base + (index << shift) + (ImmRegRegShift (simm32 i32) + (base Gpr) + (index Gpr) + (shift u8) + (flags MemFlags)) + + ;; Sign-extend-32-to-64(immediate) + RIP (instruction + ;; pointer). The appropriate relocation is emitted so + ;; that the resulting immediate makes this Amode refer to + ;; the given MachLabel. + (RipRelative (target MachLabel)))) + +(type Gpr (primitive Gpr)) + +(type RegMemImm extern + (enum + (Reg (reg Reg)) + (Mem (addr SyntheticAmode)) + (Imm (simm32 u32)))) + +(type SyntheticAmode extern (enum)) + +(convert SinkableLoad RegMemImm sink_load_to_reg_mem_imm) + +(convert Value Gpr put_in_gpr) + +(decl x64_add_mem (Type Amode Gpr) SideEffectNoResult) +(spec (x64_add_mem ty addr val) + (provide (= result (store_effect + (extract 79 64 addr) + ty + (conv_to ty (bvadd (load_effect (extract 79 64 addr) ty (extract 63 0 addr)) (conv_to ty val))) + (extract 63 0 addr)) + ) + ) + (require (or (= ty 32) (= ty 64))) +) +(extern constructor x64_add_mem x64_add_mem) + +(spec (lower arg) + (provide (= result arg))) +(decl lower (Inst) InstOutput) + +(decl sinkable_load (SinkableLoad) Value) +(spec (sinkable_load inst) + (provide (= result inst))) +(extern extractor sinkable_load sinkable_load) + +(decl sink_load_to_reg_mem_imm (SinkableLoad) RegMemImm) +(spec (sink_load_to_reg_mem_imm load) + (provide (= result load))) +(extern constructor sink_load_to_reg_mem_imm sink_load_to_reg_mem_imm) + +(spec (put_in_gpr arg) (provide (= result (conv_to 64 arg)))) +(decl put_in_gpr (Value) Gpr) +(extern constructor put_in_gpr put_in_gpr) + +(spec (to_amode flags val offset) + (provide (= result (concat flags (bvadd val (sign_ext 64 offset))))) + (require + (= (widthof val) 64))) +(decl to_amode (MemFlags Value Offset32) Amode) +(extern constructor to_amode to_amode) + +(decl operand_size_of_type_32_64 (Type) OperandSize) +(extern constructor operand_size_of_type_32_64 operand_size_of_type_32_64) + +(form store + ((args (bv 16) (bv 8) (bv 64) (bv 32)) (ret Unit) (canon (bv 8))) + ((args (bv 16) (bv 16) (bv 64) (bv 32)) (ret Unit) (canon (bv 16))) + ((args (bv 16) (bv 32) (bv 64) (bv 32)) (ret Unit) (canon (bv 32))) + ((args (bv 16) (bv 64) (bv 64) (bv 32)) (ret Unit) (canon (bv 64))) +) + + +(rule store_x64_add_mem 3 (lower + (store flags + (has_type (ty_32_or_64 ty) + (isub (and + (sinkable_load sink) + (load flags addr offset)) + src2)) + addr + offset)) + (let ((_ RegMemImm sink)) + (side_effect + (x64_add_mem ty (to_amode flags addr offset) src2)))) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/store/store_switch.isle b/cranelift/isle/veri/veri_engine/examples/store/store_switch.isle new file mode 100644 index 000000000000..5e88394d05ea --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/store/store_switch.isle @@ -0,0 +1,25 @@ +;; Instruction formats +(type MInst (enum)) + +(form + lhs_form + ((args Bool (bv 8) (bv 8)) (ret Unit) (canon (bv 8))) + ((args Bool (bv 16) (bv 16)) (ret Unit) (canon (bv 16))) + ((args Bool (bv 32) (bv 32)) (ret Unit) (canon (bv 32))) + ((args Bool (bv 64) (bv 64)) (ret Unit) (canon (bv 64))) +) + +(spec (lhs cond val2 val3) + (provide (= result (store_effect #x0000 (widthof (switch cond ($true val2) ($false val3))) (switch cond ($true val2) ($false val3)) #x0000000000000000))) + ) +(decl lhs (bool Value Value) Inst) +(extern extractor lhs lhs) +(instantiate lhs lhs_form) + +(spec (rhs val2 val3) + (provide (= result (store_effect #x0000 (widthof val2) val2 #x0000000000000000)))) +(decl rhs (Value Value) Inst) +(extern constructor rhs rhs) + +(rule (lhs $true val2 val3) + (rhs val2 val3)) diff --git a/cranelift/isle/veri/veri_engine/examples/x86/amode_add_shl.isle b/cranelift/isle/veri/veri_engine/examples/x86/amode_add_shl.isle new file mode 100644 index 000000000000..aec20bc42c81 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/x86/amode_add_shl.isle @@ -0,0 +1,160 @@ +(type Reg (primitive Reg)) +(type Value (primitive Value)) +(type Type (primitive Type)) +(type Inst (primitive Inst)) +(type Unit (primitive Unit)) +(type MemFlags (primitive MemFlags)) +(type Gpr (primitive Gpr)) +(type Imm64 (primitive Imm64)) +(type u32 (primitive u32)) +(type u8 (primitive u8)) + +(type MInst (enum)) + +(extern const $I64 Type) + +(type ExtendKind (enum Sign Zero)) + +(spec (iconst arg) (provide (= arg (zero_ext 64 result)))) +(decl iconst (Imm64) Inst) +(extern extractor iconst iconst) + +;; fn uimm8(&mut self, x: Imm64) -> Option { +;; let x64: i64 = x.into(); +;; let x8: u8 = x64.try_into().ok()?; +;; Some(x8) +;; } +(spec (uimm8 arg) + (provide (= result (zero_ext 64 arg))) + (require (bvslt result #x0000000000000100) + (= (widthof arg) 8))) +(decl uimm8 (u8) Imm64) +(extern extractor uimm8 uimm8) + +(spec (u8_as_u32 arg) + (provide (= result (zero_ext 32 arg))) + (require + (= (widthof arg) 8) + (= (widthof result) 32))) +(decl pure u8_as_u32 (u8) u32) +(extern constructor u8_as_u32 u8_as_u32) + +(spec (def_inst arg) (provide (= result arg))) +(decl def_inst (Inst) Value) +(extern extractor def_inst def_inst) +(convert Inst Value def_inst) + +(spec (put_in_reg arg) (provide (= result (conv_to 62 arg)))) +(decl put_in_reg (Value) Reg) +(extern constructor put_in_reg put_in_reg) +(convert Value Reg put_in_reg) + +(spec (gpr_to_reg arg) (provide (= result arg))) +(decl gpr_to_reg (Gpr) Reg) +(extern constructor gpr_to_reg gpr_to_reg) +(convert Gpr Reg gpr_to_reg) + +(spec (gpr_new arg) (provide (= result arg))) +(decl gpr_new (Reg) Gpr) +(extern constructor gpr_new gpr_new) +(convert Reg Gpr gpr_new) + +;; To make this case study specific to Wasm, contrain to 32 or 64 +(spec (uextend arg) + (provide (= result (zero_ext (widthof result) arg))) + (require + (or (= (widthof result) 32) (= (widthof result) 64)) + (or (= (widthof result) 32) (= (widthof result) 64)) + (<= (widthof arg) (widthof result)))) +(decl uextend (Value) Inst) +(extern extractor uextend uextend) + +;; fn shift_mask(&mut self, ty: Type) -> ImmLogic { +;; let mask = (ty.lane_bits() - 1) as u64; +;; ImmLogic::maybe_from_u64(mask, I32).unwrap() +;; } +(spec (ishl x y) + (provide + (= result + (bvshl x + (bvand (conv_to (widthof y) (bvsub (int2bv 64 (widthof y)) + #x0000000000000001)) + y))))) +(decl ishl (Value Value) Inst) +(extern extractor ishl ishl) + +;; NOTE: partial spec: ignoring the `flags` argument +;; NOTE: to get an easier counterexample, set base to 0 +;; Immediate sign-extended and a register +(spec (Amode.ImmReg simm base flags) + (provide (= result (bvadd base (sign_ext 64 simm)))) + (require + (= (widthof simm) 32) + (= (widthof base) 64) + (= base #x0000000000000000) + (= (widthof flags) 4))) + +;; NOTE: partial spec: ignoring the `flags` argument +;; Sign-extend-32-to-64(simm32) + base + (index << shift) +(spec (Amode.ImmRegRegShift simm base index shift flags) + (provide + (= result + (bvadd + (bvadd base (sign_ext 64 simm)) + (bvshl index (zero_ext 64 shift))))) + (require + (= (widthof simm) 32) + (= (widthof base) 64) + (= flags flags))) + +;; An `Amode` represents a possible addressing mode that can be used +;; in instructions. These denote a 64-bit value only. +(type Amode (enum + ;; Immediate sign-extended and a register + (ImmReg (simm32 u32) + (base Reg) + (flags MemFlags)) + + ;; Sign-extend-32-to-64(simm32) + base + (index << shift) + (ImmRegRegShift (simm32 u32) + (base Gpr) + (index Gpr) + (shift u8) + (flags MemFlags)) + ) +) + +(spec (amode_add x y) (provide (= result (bvadd x (zero_ext 64 y))))) +(form + amode + ((args (bv 64) (bv 32)) (ret (bv 64)) (canon (bv 32))) + ((args (bv 64) (bv 32)) (ret (bv 64)) (canon (bv 64))) +) +(instantiate amode_add amode) +(decl amode_add (Amode Value) Amode) +(extern extractor amode_add amode_add) + +(spec (valid_reg arg) (provide (= result arg))) +(decl valid_reg (Reg) Reg) +(extern extractor valid_reg valid_reg) + +(spec (u32_lteq a b) + (provide (= result ())) + (require (<= a b) + (= (widthof a) 32) + (= (widthof b) 32))) +(decl pure u32_lteq (u32 u32) Unit) +(extern constructor u32_lteq u32_lteq) + +(spec (ExtendKind.Zero) (provide (= result #x0000000000000000))) +(spec (ExtendKind.Sign) (provide (= result #x0000000000000001))) + +(spec (put_in_gpr arg) (provide (= result (conv_to 64 arg)))) +(decl put_in_gpr (Value) Gpr) +(extern constructor put_in_gpr put_in_gpr) +(convert Value Gpr put_in_gpr) + +;; The problematic rule itself +(rule 2 (amode_add (Amode.ImmReg off (valid_reg base) flags) (ishl index (iconst (uimm8 shift)))) + (if (u32_lteq (u8_as_u32 shift) 3)) + (Amode.ImmRegRegShift off base index shift flags)) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/examples/x86/amode_add_uextend_shl.isle b/cranelift/isle/veri/veri_engine/examples/x86/amode_add_uextend_shl.isle new file mode 100644 index 000000000000..f6675a3b2771 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/examples/x86/amode_add_uextend_shl.isle @@ -0,0 +1,176 @@ +;; We need to redefine some primitive ISLE types for this case study, since the source code +;; has changed to remove this bug on the current commit. +(type Reg (primitive Reg)) +(type Value (primitive Value)) +(type Type (primitive Type)) +(type Inst (primitive Inst)) +(type Unit (primitive Unit)) +(type MemFlags (primitive MemFlags)) +(type Gpr (primitive Gpr)) +(type Imm64 (primitive Imm64)) +(type u32 (primitive u32)) +(type u8 (primitive u8)) + +(type MInst (enum)) + +(extern const $I64 Type) + +(type ExtendKind (enum Sign Zero)) + +;; An Crocus specification; potentially-narrow IR values are zero-extended to 64 bits +(spec (iconst arg) (provide (= arg (zero_ext 64 result)))) +(decl iconst (Imm64) Inst) +(extern extractor iconst iconst) + +;; An Crocus specification modeling the following logic: +;; fn uimm8(&mut self, x: Imm64) -> Option { +;; let x64: i64 = x.into(); +;; let x8: u8 = x64.try_into().ok()?; +;; Some(x8) +;; } +(spec (uimm8 arg) + (provide (= result (zero_ext 64 arg))) + (require (bvslt result #x0000000000000100) + (= (widthof arg) 8))) +(decl uimm8 (u8) Imm64) +(extern extractor uimm8 uimm8) + +(spec (u8_as_u32 arg) + (provide (= result (zero_ext 32 arg))) + (require + (= (widthof arg) 8) + (= (widthof result) 32))) +(decl pure u8_as_u32 (u8) u32) +(extern constructor u8_as_u32 u8_as_u32) + +(spec (def_inst arg) (provide (= result arg))) +(decl def_inst (Inst) Value) +(extern extractor def_inst def_inst) +(convert Inst Value def_inst) + +(spec (put_in_reg arg) (provide (= result (conv_to 62 arg)))) +(decl put_in_reg (Value) Reg) +(extern constructor put_in_reg put_in_reg) +(convert Value Reg put_in_reg) + +(spec (gpr_to_reg arg) (provide (= result arg))) +(decl gpr_to_reg (Gpr) Reg) +(extern constructor gpr_to_reg gpr_to_reg) +(convert Gpr Reg gpr_to_reg) + +(spec (gpr_new arg) (provide (= result arg))) +(decl gpr_new (Reg) Gpr) +(extern constructor gpr_new gpr_new) +(convert Reg Gpr gpr_new) + +;; To make this case study specific to Wasm, contrain the widths of +;; the argument and returned value to 32 or 64 +(spec (uextend arg) + (provide (= result (zero_ext (widthof result) arg))) + (require + (or (= (widthof arg) 32) (= (widthof arg) 64)) + (or (= (widthof result) 32) (= (widthof result) 64)) + (<= (widthof arg) (widthof result)))) +(decl uextend (Value) Inst) +(extern extractor uextend uextend) + +;; Crocus specification to model the Wasm shift semantics: +;; fn shift_mask(&mut self, ty: Type) -> ImmLogic { +;; let mask = (ty.lane_bits() - 1) as u64; +;; ImmLogic::maybe_from_u64(mask, I32).unwrap() +;; } +;; NOTE: restricted to Wasm types for this case study +(spec (ishl x y) + (provide + (= result + (bvshl x + (bvand (conv_to (widthof y) (bvsub (int2bv 64 (widthof y)) + #x0000000000000001)) + y)))) + (require + (or (= (widthof x) 32) (= (widthof x) 64)) + (or (= (widthof y) 32) (= (widthof y) 64)))) +(decl ishl (Value Value) Inst) +(extern extractor ishl ishl) + +;; NOTE: partial spec: ignoring the `flags` argument +;; NOTE: to get an easier counterexample, set base to 0 +;; Immediate sign-extended and a register +(spec (Amode.ImmReg simm base flags) + (provide (= result (bvadd base (sign_ext 64 simm)))) + (require + (= (widthof simm) 32) + (= (widthof base) 64) + (= base #x0000000000000000) + (= (widthof flags) 4))) + +;; NOTE: partial spec: ignoring the `flags` argument +;; Sign-extend-32-to-64(simm32) + base + (index << shift) +(spec (Amode.ImmRegRegShift simm base index shift flags) + (provide + (= result + (bvadd + (bvadd base (sign_ext 64 simm)) + (bvshl index (zero_ext 64 shift))))) + (require + (= (widthof simm) 32) + (= (widthof base) 64) + (= (widthof base) 64) + (= flags flags))) + +;; An `Amode` represents a possible addressing mode that can be used +;; in instructions. These denote a 64-bit value only. +(type Amode (enum + ;; Immediate sign-extended and a register + (ImmReg (simm32 u32) + (base Reg) + (flags MemFlags)) + + ;; Sign-extend-32-to-64(simm32) + base + (index << shift) + (ImmRegRegShift (simm32 u32) + (base Gpr) + (index Gpr) + (shift u8) + (flags MemFlags)) + ) +) + +(spec (amode_add x y) (provide (= result (bvadd x (zero_ext 64 y))))) +(form + amode + ((args (bv 64) (bv 32)) (ret (bv 64)) (canon (bv 32))) + ((args (bv 64) (bv 32)) (ret (bv 64)) (canon (bv 64))) +) +(instantiate amode_add amode) +(decl amode_add (Amode Value) Amode) +(extern extractor amode_add amode_add) + +(spec (valid_reg arg) (provide (= result arg))) +(decl valid_reg (Reg) Reg) +(extern extractor valid_reg valid_reg) + +(spec (u32_lteq a b) + (provide (= result ())) + (require (<= a b) + (= (widthof a) 32) + (= (widthof b) 32))) +(decl pure u32_lteq (u32 u32) Unit) +(extern constructor u32_lteq u32_lteq) + +(spec (ExtendKind.Zero) (provide (= result #x0000000000000000))) +(spec (ExtendKind.Sign) (provide (= result #x0000000000000001))) + +(spec (extend_to_gpr v ty ext) + (provide + (= result + (if (= ext #x0000000000000000) + (zero_ext ty v) + (sign_ext ty v))))) +(decl extend_to_gpr (Value Type ExtendKind) Gpr) +(extern constructor extend_to_gpr extend_to_gpr) + +;; The problematic rule itself +(rule 2 (amode_add (Amode.ImmReg off (valid_reg base) flags) + (uextend (ishl index (iconst (uimm8 shift))))) + (if (u32_lteq (u8_as_u32 shift) 3)) + (Amode.ImmRegRegShift off base (extend_to_gpr index $I64 (ExtendKind.Zero)) shift flags)) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_engine/src/annotations.rs b/cranelift/isle/veri/veri_engine/src/annotations.rs new file mode 100644 index 000000000000..0f8d2aa65b76 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/src/annotations.rs @@ -0,0 +1,479 @@ +use cranelift_isle::ast::{self, Signature}; +use std::collections::HashMap; +use veri_ir::annotation_ir; + +use cranelift_isle::ast::{Def, Ident, Model, ModelType, SpecExpr, SpecOp}; +use cranelift_isle::lexer::Pos; +use cranelift_isle::sema::{TermEnv, TermId, TypeEnv, TypeId}; +use veri_ir::annotation_ir::Width; +use veri_ir::annotation_ir::{BoundVar, Const, Expr, TermAnnotation, TermSignature, Type}; +use veri_ir::TermSignature as TermTypeSignature; + +static RESULT: &str = "result"; + +#[derive(Clone, Debug)] +pub struct ParsingEnv<'a> { + pub typeenv: &'a TypeEnv, + pub enums: HashMap, +} + +#[derive(Clone, Debug)] +pub struct AnnotationEnv { + pub annotation_map: HashMap, + + // Mapping from ISLE term to its signature instantiations. + pub instantiations_map: HashMap>, + + // Mapping from ISLE type to its model (the annotation used to represent + // it). + pub model_map: HashMap, +} + +impl AnnotationEnv { + pub fn get_annotation_for_term(&self, term_id: &TermId) -> Option { + if self.annotation_map.contains_key(term_id) { + return Some(self.annotation_map[term_id].clone()); + } + None + } + + pub fn get_term_signatures_by_name( + &self, + termenv: &TermEnv, + typeenv: &TypeEnv, + ) -> HashMap> { + let mut term_signatures_by_name = HashMap::new(); + for (term_id, term_sigs) in &self.instantiations_map { + let sym = termenv.terms[term_id.index()].name; + let name = typeenv.syms[sym.index()].clone(); + term_signatures_by_name.insert(name, term_sigs.clone()); + } + term_signatures_by_name + } +} + +pub fn spec_to_annotation_bound_var(i: &Ident) -> BoundVar { + BoundVar { + name: i.0.clone(), + ty: None, + } +} + +fn spec_to_usize(s: &SpecExpr) -> Option { + match s { + SpecExpr::ConstInt { val, pos: _ } => Some(*val as usize), + _ => None, + } +} + +fn spec_op_to_expr(s: &SpecOp, args: &[SpecExpr], pos: &Pos, env: &ParsingEnv) -> Expr { + fn unop) -> Expr>( + u: F, + args: &[SpecExpr], + pos: &Pos, + env: &ParsingEnv, + ) -> Expr { + assert_eq!( + args.len(), + 1, + "Unexpected number of args for unary operator {:?}", + pos + ); + u(Box::new(spec_to_expr(&args[0], env))) + } + fn binop, Box) -> Expr>( + b: F, + args: &[SpecExpr], + _pos: &Pos, + env: &ParsingEnv, + ) -> Expr { + assert_eq!( + args.len(), + 2, + "Unexpected number of args for binary operator {:?}", + args + ); + b( + Box::new(spec_to_expr(&args[0], env)), + Box::new(spec_to_expr(&args[1], env)), + ) + } + + fn variadic_binop, Box) -> Expr>( + b: F, + args: &[SpecExpr], + pos: &Pos, + env: &ParsingEnv, + ) -> Expr { + assert!( + !args.is_empty(), + "Unexpected number of args for variadic binary operator {:?}", + pos + ); + let mut expr_args: Vec = args.iter().map(|a| spec_to_expr(a, env)).collect(); + let last = expr_args.remove(expr_args.len() - 1); + + // Reverse to keep the order of the original list + expr_args + .iter() + .rev() + .fold(last, |acc, a| b(Box::new(a.clone()), Box::new(acc))) + } + + match s { + // Unary + SpecOp::Not => unop(Expr::Not, args, pos, env), + SpecOp::BVNot => unop(Expr::BVNot, args, pos, env), + SpecOp::BVNeg => unop(Expr::BVNeg, args, pos, env), + SpecOp::Rev => unop(Expr::Rev, args, pos, env), + SpecOp::Clz => unop(Expr::CLZ, args, pos, env), + SpecOp::Cls => unop(Expr::CLS, args, pos, env), + SpecOp::Popcnt => unop(Expr::BVPopcnt, args, pos, env), + SpecOp::BV2Int => unop(Expr::BVToInt, args, pos, env), + + // Variadic binops + SpecOp::And => variadic_binop(Expr::And, args, pos, env), + SpecOp::Or => variadic_binop(Expr::Or, args, pos, env), + + // Binary + SpecOp::Eq => binop(Expr::Eq, args, pos, env), + SpecOp::Lt => binop(Expr::Lt, args, pos, env), + SpecOp::Lte => binop(Expr::Lte, args, pos, env), + SpecOp::Gt => binop(|x, y| Expr::Lt(y, x), args, pos, env), + SpecOp::Gte => binop(|x, y| Expr::Lte(y, x), args, pos, env), + SpecOp::Imp => binop(Expr::Imp, args, pos, env), + SpecOp::BVAnd => binop(Expr::BVAnd, args, pos, env), + SpecOp::BVOr => binop(Expr::BVOr, args, pos, env), + SpecOp::BVXor => binop(Expr::BVXor, args, pos, env), + SpecOp::BVAdd => binop(Expr::BVAdd, args, pos, env), + SpecOp::BVSub => binop(Expr::BVSub, args, pos, env), + SpecOp::BVMul => binop(Expr::BVMul, args, pos, env), + SpecOp::BVUdiv => binop(Expr::BVUDiv, args, pos, env), + SpecOp::BVUrem => binop(Expr::BVUrem, args, pos, env), + SpecOp::BVSdiv => binop(Expr::BVSDiv, args, pos, env), + SpecOp::BVSrem => binop(Expr::BVSrem, args, pos, env), + SpecOp::BVShl => binop(Expr::BVShl, args, pos, env), + SpecOp::BVLshr => binop(Expr::BVShr, args, pos, env), + SpecOp::BVAshr => binop(Expr::BVAShr, args, pos, env), + SpecOp::BVSaddo => binop(Expr::BVSaddo, args, pos, env), + SpecOp::BVUle => binop(Expr::BVUlte, args, pos, env), + SpecOp::BVUlt => binop(Expr::BVUlt, args, pos, env), + SpecOp::BVUgt => binop(Expr::BVUgt, args, pos, env), + SpecOp::BVUge => binop(Expr::BVUgte, args, pos, env), + SpecOp::BVSlt => binop(Expr::BVSlt, args, pos, env), + SpecOp::BVSle => binop(Expr::BVSlte, args, pos, env), + SpecOp::BVSgt => binop(Expr::BVSgt, args, pos, env), + SpecOp::BVSge => binop(Expr::BVSgte, args, pos, env), + SpecOp::Rotr => binop(Expr::BVRotr, args, pos, env), + SpecOp::Rotl => binop(Expr::BVRotl, args, pos, env), + SpecOp::ZeroExt => match spec_to_usize(&args[0]) { + Some(i) => Expr::BVZeroExtTo( + Box::new(Width::Const(i)), + Box::new(spec_to_expr(&args[1], env)), + ), + None => binop(Expr::BVZeroExtToVarWidth, args, pos, env), + }, + SpecOp::SignExt => match spec_to_usize(&args[0]) { + Some(i) => Expr::BVSignExtTo( + Box::new(Width::Const(i)), + Box::new(spec_to_expr(&args[1], env)), + ), + None => binop(Expr::BVSignExtToVarWidth, args, pos, env), + }, + SpecOp::ConvTo => binop(Expr::BVConvTo, args, pos, env), + SpecOp::Concat => { + let cases: Vec = args.iter().map(|a| spec_to_expr(a, env)).collect(); + Expr::BVConcat(cases) + } + SpecOp::Extract => { + assert_eq!( + args.len(), + 3, + "Unexpected number of args for extract operator {:?}", + pos + ); + Expr::BVExtract( + spec_to_usize(&args[0]).unwrap(), + spec_to_usize(&args[1]).unwrap(), + Box::new(spec_to_expr(&args[2], env)), + ) + } + SpecOp::Int2BV => { + assert_eq!( + args.len(), + 2, + "Unexpected number of args for Int2BV operator {:?}", + pos + ); + Expr::BVIntToBv( + spec_to_usize(&args[0]).unwrap(), + Box::new(spec_to_expr(&args[1], env)), + ) + } + SpecOp::Subs => { + assert_eq!( + args.len(), + 3, + "Unexpected number of args for subs operator {:?}", + pos + ); + Expr::BVSubs( + Box::new(spec_to_expr(&args[0], env)), + Box::new(spec_to_expr(&args[1], env)), + Box::new(spec_to_expr(&args[2], env)), + ) + } + SpecOp::WidthOf => unop(Expr::WidthOf, args, pos, env), + SpecOp::If => { + assert_eq!( + args.len(), + 3, + "Unexpected number of args for extract operator {:?}", + pos + ); + Expr::Conditional( + Box::new(spec_to_expr(&args[0], env)), + Box::new(spec_to_expr(&args[1], env)), + Box::new(spec_to_expr(&args[2], env)), + ) + } + SpecOp::Switch => { + assert!( + args.len() > 1, + "Unexpected number of args for switch operator {:?}", + pos + ); + let swith_on = spec_to_expr(&args[0], env); + let arms: Vec<(Expr, Expr)> = args[1..] + .iter() + .map(|a| match a { + SpecExpr::Pair { l, r } => { + let l_expr = spec_to_expr(l, env); + let r_expr = spec_to_expr(r, env); + (l_expr, r_expr) + } + _ => unreachable!(), + }) + .collect(); + Expr::Switch(Box::new(swith_on), arms) + } + SpecOp::LoadEffect => { + assert_eq!( + args.len(), + 3, + "Unexpected number of args for load operator {:?}", + pos + ); + Expr::LoadEffect( + Box::new(spec_to_expr(&args[0], env)), + Box::new(spec_to_expr(&args[1], env)), + Box::new(spec_to_expr(&args[2], env)), + ) + } + SpecOp::StoreEffect => { + assert_eq!( + args.len(), + 4, + "Unexpected number of args for store operator {:?}", + pos + ); + Expr::StoreEffect( + Box::new(spec_to_expr(&args[0], env)), + Box::new(spec_to_expr(&args[1], env)), + Box::new(spec_to_expr(&args[2], env)), + Box::new(spec_to_expr(&args[3], env)), + ) + } + } +} + +fn spec_to_expr(s: &SpecExpr, env: &ParsingEnv) -> Expr { + match s { + SpecExpr::ConstUnit { pos: _ } => Expr::Const(Const { + ty: Type::Unit, + value: 0, + width: 0, + }), + SpecExpr::ConstInt { val, pos: _ } => Expr::Const(Const { + ty: Type::Int, + value: *val, + width: 0, + }), + SpecExpr::ConstBitVec { val, width, pos: _ } => Expr::Const(Const { + ty: Type::BitVectorWithWidth(*width as usize), + value: *val, + width: (*width as usize), + }), + SpecExpr::ConstBool { val, pos: _ } => Expr::Const(Const { + ty: Type::Bool, + value: *val as i128, + width: 0, + }), + SpecExpr::Var { var, pos: _ } => Expr::Var(var.0.clone()), + SpecExpr::Op { op, args, pos } => spec_op_to_expr(op, args, pos, env), + SpecExpr::Pair { l, r } => { + unreachable!( + "pairs currently only parsed as part of Switch statements, {:?} {:?}", + l, r + ) + } + SpecExpr::Enum { name } => { + if let Some(e) = env.enums.get(&name.0) { + e.clone() + } else { + panic!("Can't find model for enum {}", name.0); + } + } + } +} + +fn model_type_to_type(model_type: &ModelType) -> veri_ir::Type { + match model_type { + ModelType::Int => veri_ir::Type::Int, + ModelType::Unit => veri_ir::Type::Unit, + ModelType::Bool => veri_ir::Type::Bool, + ModelType::BitVec(size) => veri_ir::Type::BitVector(*size), + } +} + +fn signature_to_term_type_signature(sig: &Signature) -> TermTypeSignature { + TermTypeSignature { + args: sig.args.iter().map(model_type_to_type).collect(), + ret: model_type_to_type(&sig.ret), + canonical_type: Some(model_type_to_type(&sig.canonical)), + } +} + +pub fn parse_annotations(defs: &[Def], termenv: &TermEnv, typeenv: &TypeEnv) -> AnnotationEnv { + let mut annotation_map = HashMap::new(); + let mut model_map = HashMap::new(); + + let mut env = ParsingEnv { + typeenv, + enums: HashMap::new(), + }; + + // Traverse models to process spec annotations for enums + for def in defs { + if let &ast::Def::Model(Model { ref name, ref val }) = def { + match val { + ast::ModelValue::TypeValue(model_type) => { + let type_id = typeenv.get_type_by_name(name).unwrap(); + let ir_type = match model_type { + ModelType::Int => annotation_ir::Type::Int, + ModelType::Unit => annotation_ir::Type::Unit, + ModelType::Bool => annotation_ir::Type::Bool, + ModelType::BitVec(None) => annotation_ir::Type::BitVector, + ModelType::BitVec(Some(size)) => { + annotation_ir::Type::BitVectorWithWidth(*size) + } + }; + model_map.insert(type_id, ir_type); + } + ast::ModelValue::EnumValues(vals) => { + for (v, e) in vals { + let ident = ast::Ident(format!("{}.{}", name.0, v.0), v.1); + let term_id = termenv.get_term_by_name(typeenv, &ident).unwrap(); + let val = spec_to_expr(e, &env); + let ty = match val { + Expr::Const(Const { ref ty, .. }) => ty, + _ => unreachable!(), + }; + env.enums.insert(ident.0.clone(), val.clone()); + let result = BoundVar { + name: RESULT.to_string(), + ty: Some(ty.clone()), + }; + let sig = TermSignature { + args: vec![], + ret: result, + }; + let annotation = TermAnnotation { + sig, + assumptions: vec![Box::new(Expr::Eq( + Box::new(Expr::Var(RESULT.to_string())), + Box::new(val), + ))], + assertions: vec![], + }; + annotation_map.insert(term_id, annotation); + } + } + } + } + } + + // Traverse defs to process spec annotations + for def in defs { + if let ast::Def::Spec(spec) = def { + let termname = spec.term.0.clone(); + let term_id = termenv + .get_term_by_name(typeenv, &spec.term) + .unwrap_or_else(|| panic!("Spec provided for unknown decl {termname}")); + assert!( + !annotation_map.contains_key(&term_id), + "duplicate spec for {}", + termname + ); + let sig = TermSignature { + args: spec.args.iter().map(spec_to_annotation_bound_var).collect(), + ret: BoundVar { + name: RESULT.to_string(), + ty: None, + }, + }; + + let mut assumptions = vec![]; + let mut assertions = vec![]; + for a in &spec.provides { + assumptions.push(Box::new(spec_to_expr(a, &env))); + } + + for a in &spec.requires { + assertions.push(Box::new(spec_to_expr(a, &env))); + } + + let annotation = TermAnnotation { + sig, + assumptions, + assertions, + }; + annotation_map.insert(term_id, annotation); + } + } + + // Collect term instantiations. + let mut forms_map = HashMap::new(); + for def in defs { + if let ast::Def::Form(form) = def { + let term_type_signatures: Vec<_> = form + .signatures + .iter() + .map(signature_to_term_type_signature) + .collect(); + forms_map.insert(form.name.0.clone(), term_type_signatures); + } + } + + let mut instantiations_map = HashMap::new(); + for def in defs { + if let ast::Def::Instantiation(inst) = def { + let term_id = termenv.get_term_by_name(typeenv, &inst.term).unwrap(); + let sigs = match &inst.form { + Some(form) => forms_map[&form.0].clone(), + None => inst + .signatures + .iter() + .map(signature_to_term_type_signature) + .collect(), + }; + instantiations_map.insert(term_id, sigs); + } + } + + AnnotationEnv { + annotation_map, + instantiations_map, + model_map, + } +} diff --git a/cranelift/isle/veri/veri_engine/src/interp.rs b/cranelift/isle/veri/veri_engine/src/interp.rs new file mode 100644 index 000000000000..35aaafcb4b9a --- /dev/null +++ b/cranelift/isle/veri/veri_engine/src/interp.rs @@ -0,0 +1,50 @@ +/// Interpret and build an assumption context from the LHS and RHS of rules. +use crate::type_inference::RuleSemantics; +use veri_ir::{BoundVar, Expr}; + +use std::collections::HashMap; +use std::fmt::Debug; + +use cranelift_isle as isle; +use isle::sema::{RuleId, VarId}; + +/// Assumption consist of single verification IR expressions, which must have +/// boolean type. +#[derive(Clone, Debug)] +pub struct Assumption { + assume: Expr, +} + +impl Assumption { + /// Create a new assumption, checking type. + pub fn new(assume: Expr) -> Self { + // assert!(assume.ty().is_bool()); + Self { assume } + } + + /// Get the assumption as an expression. + pub fn assume(&self) -> &Expr { + &self.assume + } +} +pub struct Context<'ctx> { + pub quantified_vars: Vec, + pub free_vars: Vec, + pub assumptions: Vec, + pub var_map: HashMap, + + // For type checking + pub typesols: &'ctx HashMap, +} + +impl<'ctx> Context<'ctx> { + pub fn new(typesols: &'ctx HashMap) -> Context<'ctx> { + Context { + quantified_vars: vec![], + free_vars: vec![], + assumptions: vec![], + var_map: HashMap::new(), + typesols, + } + } +} diff --git a/cranelift/isle/veri/veri_engine/src/lib.rs b/cranelift/isle/veri/veri_engine/src/lib.rs new file mode 100644 index 000000000000..7314839fe1e6 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/src/lib.rs @@ -0,0 +1,44 @@ +use easy_smt::SExpr; + +pub mod annotations; +pub mod interp; +pub mod solver; +pub mod termname; +pub mod type_inference; +pub mod verify; + +pub const REG_WIDTH: usize = 64; + +// Use a distinct with as the maximum width any value should have within type inference +pub const MAX_WIDTH: usize = 2 * REG_WIDTH; + +pub const FLAGS_WIDTH: usize = 4; + +pub const WIDTHS: [usize; 4] = [8, 16, 32, 64]; + +// Closure arguments: SMT context, arguments to the term, lhs, rhs +type CustomCondition = dyn Fn(&easy_smt::Context, Vec, SExpr, SExpr) -> SExpr; + +// Closure arguments: SMT context, arguments to the term +type CustomAssumption = dyn Fn(&easy_smt::Context, Vec) -> SExpr; + +pub struct Config { + pub term: String, + pub names: Option>, + pub distinct_check: bool, + + pub custom_verification_condition: Option>, + pub custom_assumptions: Option>, +} + +impl Config { + pub fn with_term_and_name(term: &str, name: &str) -> Self { + Config { + term: term.to_string(), + distinct_check: true, + custom_verification_condition: None, + custom_assumptions: None, + names: Some(vec![name.to_string()]), + } + } +} diff --git a/cranelift/isle/veri/veri_engine/src/main.rs b/cranelift/isle/veri/veri_engine/src/main.rs new file mode 100644 index 000000000000..59908986a241 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/src/main.rs @@ -0,0 +1,123 @@ +//! Prototype verification tool for Cranelift's ISLE lowering rules. + +use clap::{ArgAction, Parser}; +use cranelift_codegen_meta::{generate_isle, isle::get_isle_compilations}; +use std::path::PathBuf; +use std::{env, fs}; +use veri_engine_lib::verify::verify_rules; +use veri_engine_lib::Config; + +#[derive(Parser)] +#[clap(about, version, author)] +struct Args { + /// Path to codegen crate directory. + #[clap(long, required = true)] + codegen: std::path::PathBuf, + + /// Sets the input file + #[clap(short, long)] + input: Option, + + /// Which LHS root to verify + #[clap(short, long, default_value = "lower")] + term: String, + + /// Which width types to verify + #[clap(long)] + widths: Option>, + + /// Which named rule to verify + #[clap(long)] + names: Option>, + + /// Don't use the prelude ISLE files + #[clap(short, long, action=ArgAction::SetTrue)] + noprelude: bool, + + /// Include the aarch64 files + #[clap(short, long, action=ArgAction::SetTrue)] + aarch64: bool, + + /// Include the x64 files + #[clap(short, long, action=ArgAction::SetTrue)] + x64: bool, + + /// Don't check for distinct possible models + #[clap(long, action=ArgAction::SetTrue)] + nodistinct: bool, +} + +impl Args { + fn isle_input_files(&self) -> anyhow::Result> { + // Generate ISLE files. + let cur_dir = env::current_dir().expect("Can't access current working directory"); + let gen_dir = cur_dir.join("output"); + if !std::path::Path::new(gen_dir.as_path()).exists() { + fs::create_dir_all(gen_dir.as_path()).unwrap(); + } + generate_isle(gen_dir.as_path()).expect("Can't generate ISLE"); + + let inst_specs_isle = self.codegen.join("src").join("inst_specs.isle"); + + // Lookup ISLE compilations. + let compilations = get_isle_compilations(&self.codegen, gen_dir.as_path()); + + let name = match (self.aarch64, self.x64) { + (true, false) => "aarch64", + (false, true) => "x64", + _ => panic!("aarch64 of x64 backend must be provided"), + }; + + let mut inputs = compilations + .lookup(name) + .ok_or(anyhow::format_err!("unknown ISLE compilation: {}", name))? + .inputs(); + inputs.push(inst_specs_isle); + + // Return inputs from the matching compilation, if any. + Ok(inputs) + } +} + +fn main() -> anyhow::Result<()> { + env_logger::init(); + let args = Args::parse(); + + let valid_widths = ["I8", "I16", "I32", "I64"]; + if let Some(widths) = &args.widths { + for w in widths { + let w_str = w.as_str(); + if !valid_widths.contains(&w_str) { + panic!("Invalid width type: {}", w); + } + } + } + + let inputs = if args.noprelude { + vec![PathBuf::from( + args.input.expect("Missing input file in noprelude mode"), + )] + } else { + args.isle_input_files()? + }; + + let names = if let Some(names) = args.names { + let mut names = names; + names.sort(); + names.dedup(); + Some(names) + } else { + None + }; + + let config = Config { + term: args.term, + names, + distinct_check: !args.nodistinct, + custom_verification_condition: None, + custom_assumptions: None, + }; + + verify_rules(inputs, &config, &args.widths) + .map_err(|e| anyhow::anyhow!("failed to compile ISLE: {:?}", e)) +} diff --git a/cranelift/isle/veri/veri_engine/src/solver.rs b/cranelift/isle/veri/veri_engine/src/solver.rs new file mode 100644 index 000000000000..ffd13ce4ba91 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/src/solver.rs @@ -0,0 +1,2110 @@ +/// Convert our internal Verification IR to an external SMT AST and pass +/// queries to that solver. +/// +/// This uses the easy-smt crate to interact with any solver. +/// +use cranelift_isle as isle; +use isle::sema::{Pattern, Rule, TermEnv, TypeEnv}; + +use crate::solver::encoded_ops::popcnt::popcnt; +use crate::type_inference::RuleSemantics; +use crate::Config; +use easy_smt::{Response, SExpr}; +use std::cmp::Ordering; +use std::collections::HashMap; +use veri_ir::{ + BinaryOp, ConcreteTest, Counterexample, Expr, TermSignature, Terminal, Type, TypeContext, + UnaryOp, VerificationResult, +}; + +mod encoded_ops; + +use encoded_ops::cls; +use encoded_ops::clz; +use encoded_ops::rev; +use encoded_ops::subs; + +use crate::MAX_WIDTH; + +pub struct SolverCtx { + smt: easy_smt::Context, + pub find_widths: bool, + tyctx: TypeContext, + pub bitwidth: usize, + var_map: HashMap, + width_vars: HashMap, + width_assumptions: Vec, + pub additional_decls: Vec<(String, SExpr)>, + pub additional_assumptions: Vec, + pub additional_assertions: Vec, + fresh_bits_idx: usize, + lhs_load_args: Option>, + rhs_load_args: Option>, + lhs_store_args: Option>, + rhs_store_args: Option>, + load_return: Option, + lhs_flag: bool, +} + +pub struct RuleCtx<'a> { + rule_sem: &'a RuleSemantics, + rule: &'a Rule, + termenv: &'a TermEnv, + typeenv: &'a TypeEnv, + config: &'a Config, +} + +impl SolverCtx { + pub fn new_fresh_bits(&mut self, width: usize) -> SExpr { + let name = format!("fresh{}", self.fresh_bits_idx); + self.fresh_bits_idx += 1; + self.additional_decls + .push((name.clone(), self.smt.bit_vec_sort(self.smt.numeral(width)))); + self.smt.atom(name) + } + + fn new_fresh_int(&mut self) -> SExpr { + let name = format!("fresh{}", self.fresh_bits_idx); + self.fresh_bits_idx += 1; + self.additional_decls + .push((name.clone(), self.smt.int_sort())); + self.smt.atom(name) + } + + fn new_fresh_bool(&mut self) -> SExpr { + let name = format!("fresh{}", self.fresh_bits_idx); + self.fresh_bits_idx += 1; + self.additional_decls + .push((name.clone(), self.smt.bool_sort())); + self.smt.atom(name) + } + + fn declare(&mut self, name: String, typ: SExpr) -> SExpr { + let atom = self.smt.atom(&name); + self.additional_decls.push((name, typ)); + atom + } + + fn assume(&mut self, expr: SExpr) { + self.additional_assumptions.push(expr); + } + + fn assert(&mut self, expr: SExpr) { + self.additional_assertions.push(expr); + } + + /// Construct a constant bit-vector value of the given width. (This is used so pervasively that + /// perhaps we should submit it for inclusion in the easy_smt library...) + fn bv(&self, value: i128, width: usize) -> SExpr { + if value < 0 { + return self + .smt + .list(vec![self.smt.atom("bvneg"), self.bv(-value, width)]); + } + self.smt.list(vec![ + self.smt.atoms().und, + self.smt.atom(format!("bv{}", value)), + self.smt.numeral(width), + ]) + } + + /// Convert an SMT integer to a bit vector of a given width. + fn int2bv(&self, width: usize, value: SExpr) -> SExpr { + self.smt.list(vec![ + self.smt.list(vec![ + self.smt.atoms().und, + self.smt.atom("int2bv"), + self.smt.numeral(width), + ]), + value, + ]) + } + + /// Convert an SMT bit vector to a nat. + fn bv2nat(&self, value: SExpr) -> SExpr { + self.smt.list(vec![self.smt.atom("bv2nat"), value]) + } + + /// Zero-extend an SMT bit vector to a wider bit vector by adding `padding` zeroes to the + /// front. + fn zero_extend(&self, padding: usize, value: SExpr) -> SExpr { + if padding == 0 { + return value; + } + self.smt.list(vec![ + self.smt.list(vec![ + self.smt.atoms().und, + self.smt.atom("zero_extend"), + self.smt.numeral(padding), + ]), + value, + ]) + } + + /// Sign-extend an SMT bit vector to a wider bit vector by adding `padding` zeroes to the + /// front. + fn sign_extend(&self, padding: usize, value: SExpr) -> SExpr { + self.smt.list(vec![ + self.smt.list(vec![ + self.smt.atoms().und, + self.smt.atom("sign_extend"), + self.smt.numeral(padding), + ]), + value, + ]) + } + + // Extend with concrete source and destination sizes. Includes extracting relevant bits. + fn extend_concrete( + &mut self, + dest_width: usize, + source: SExpr, + source_width: usize, + op: &str, + ) -> SExpr { + if dest_width < source_width { + log::warn!( + "Unexpected extend widths for {}: dest {} source {} ", + self.smt.display(source), + dest_width, + source_width, + ); + self.assert(self.smt.false_()); + return self.bv( + 0, + if self.find_widths { + self.bitwidth + } else { + dest_width + }, + ); + } + + let delta = dest_width - source_width; + if !self.find_widths { + return self.smt.list(vec![ + self.smt.list(vec![ + self.smt.atoms().und, + self.smt.atom(op), + self.smt.numeral(delta), + ]), + source, + ]); + } + + // Extract the relevant bits of the source (which is modeled with a wider, + // register-width bitvector). + let extract = self + .smt + .extract(source_width.wrapping_sub(1).try_into().unwrap(), 0, source); + + // Do the extend itself. + let extend = self.smt.list(vec![ + self.smt.list(vec![ + self.smt.atoms().und, + self.smt.atom(op), + self.smt.numeral(delta), + ]), + extract, + ]); + + // Pad the extended result back to the full register bitwidth. Use the bits + // that were already in the source register. That is, given: + // reg - source width source width + // | | + // SOURCE: [ don't care bits | care bits ] + // + // dest width + // | + // OUT: [ same don't care bits | defined extend | care bits ] + let mut unconstrained_bits = 0; + if dest_width < self.bitwidth { + unconstrained_bits = self + .bitwidth + .checked_sub(delta) + .unwrap() + .checked_sub(source_width) + .unwrap(); + } + + // If we are extending to the full register width, no padding needed + if unconstrained_bits == 0 { + extend + } else { + let padding = self.smt.extract( + self.bitwidth.checked_sub(1).unwrap().try_into().unwrap(), + self.bitwidth + .checked_sub(unconstrained_bits) + .unwrap() + .try_into() + .unwrap(), + source, + ); + self.smt.concat(padding, extend) + } + } + + // SMT-LIB only supports extends (zero or sign) by concrete amounts, but we + // need symbolic ones. This method essentially does if-conversion over possible + // concrete forms, outputting nested ITE blocks. We consider both the starting + // width and the destination width to be potentially symbolic. + // For safety, we add an assertion that some arm of this ITE must match. + fn extend_symbolic( + &mut self, + dest_width: SExpr, + source: SExpr, + source_width: SExpr, + op: &str, + ) -> SExpr { + if self.find_widths { + return source; + } + // Symbolic expression for amount to shift + let shift = self.smt.sub(dest_width, source_width); + + let mut some_match = vec![]; + let mut ite_str = source; + + // Special case: if we are asked to extend by 0, just return the source + let matching = self.smt.eq(self.smt.numeral(0), shift); + some_match.push(matching); + ite_str = self.smt.ite(matching, source, ite_str); + + // Possible amounts to extend by + for possible_delta in 1..self.bitwidth + 1 { + // Possible starting widths + for possible_source in 1..self.bitwidth + 1 { + // For now, ignore extends beyond the bitwidth. This is safe because + // we will fail the rule feasibility check if this is violated. + if possible_source + possible_delta > self.bitwidth { + continue; + } + + // Statement meaning the symbolic case matches this concrete case + let matching = self.smt.and( + self.smt.eq(self.smt.numeral(possible_delta), shift), + self.smt.eq(self.smt.numeral(possible_source), source_width), + ); + some_match.push(matching); + let extend = self.extend_concrete( + possible_source + possible_delta, + source, + possible_source, + op, + ); + ite_str = self.smt.ite(matching, extend, ite_str); + } + } + let some_shift_matches = self.smt.or_many(some_match); + self.width_assumptions.push(some_shift_matches); + ite_str + } + + fn encode_rotate(&self, op: &str, source: SExpr, amount: SExpr, width: usize) -> SExpr { + // SMT bitvector rotate_left requires that the rotate amount be + // statically specified. Instead, to use a dynamic amount, desugar + // to shifts and bit arithmetic. + let width_as_bv = self.bv(width.try_into().unwrap(), width); + let wrapped_amount = self.smt.bvurem(amount, width_as_bv); + let wrapped_delta = self.smt.bvsub(width_as_bv, wrapped_amount); + match op { + "rotate_left" => self.smt.bvor( + self.smt.bvshl(source, wrapped_amount), + self.smt.bvlshr(source, wrapped_delta), + ), + "rotate_right" => self.smt.bvor( + self.smt.bvshl(source, wrapped_delta), + self.smt.bvlshr(source, wrapped_amount), + ), + _ => unreachable!(), + } + } + + // SMT bitvector rotate requires that the rotate amount be + // statically specified. Instead, to use a dynamic amount, desugar + // to shifts and bit arithmetic. + fn rotate_symbolic( + &mut self, + source: SExpr, + source_width: usize, + amount: SExpr, + op: &str, + ) -> SExpr { + if self.find_widths { + return source; + } + let (s, a) = if self.find_widths { + // Extract the relevant bits of the source (which is modeled with a wider, + // register-width bitvector). + let extract_source = self.smt.extract( + source_width.checked_sub(1).unwrap().try_into().unwrap(), + 0, + source, + ); + + let extract_amount = self.smt.extract( + source_width.checked_sub(1).unwrap().try_into().unwrap(), + 0, + amount, + ); + (extract_source, extract_amount) + } else { + (source, amount) + }; + + // Do the rotate itself. + let rotate = self.encode_rotate(op, s, a, source_width); + + // Pad the extended result back to the full register bitwidth. Use the bits + // that were already in the source register. That is, given: + // reg - source width source width + // | | + // SOURCE: [ don't care bits | care bits ] + // + // dest width + // | + // OUT: [ same don't care bits | care bits ] + let unconstrained_bits = self.bitwidth.checked_sub(source_width).unwrap(); + + // If we are extending to the full register width, no padding needed + if unconstrained_bits == 0 || !self.find_widths { + rotate + } else { + let padding = self.smt.extract( + self.bitwidth.checked_sub(1).unwrap().try_into().unwrap(), + self.bitwidth + .checked_sub(unconstrained_bits) + .unwrap() + .try_into() + .unwrap(), + source, + ); + self.smt.concat(padding, rotate) + } + } + + // SMT-LIB only supports rotates by concrete amounts, but we + // need symbolic ones. This method essentially does if-conversion over possible + // concrete forms, outputting nested ITE blocks. We consider both the starting + // width and the rotate amount to be potentially symbolic. + // For safety, we add an assertion that some arm of this ITE must match. + fn rotate_symbolic_dyn_source_width( + &mut self, + source: SExpr, + source_width: SExpr, + amount: SExpr, + op: &str, + ) -> SExpr { + if self.find_widths { + return source; + } + let mut some_match = vec![]; + let mut ite_str = source; + + // Special case: if we are asked to rotate by 0, just return the source + let matching = self.smt.eq(self.bv(0, self.bitwidth), amount); + some_match.push(matching); + ite_str = self.smt.ite(matching, source, ite_str); + + // Possible starting widths + for possible_source in [8usize, 16, 32, 64] { + // Statement meaning the symbolic case matches this concrete case + let matching = self.smt.eq(self.smt.numeral(possible_source), source_width); + some_match.push(matching); + + // Extract the relevant bits of the source (which is modeled with a wider, + // register-width bitvector). + let extract_source = self.smt.extract( + possible_source.checked_sub(1).unwrap().try_into().unwrap(), + 0, + source, + ); + let extract_amount = self.smt.extract( + possible_source.checked_sub(1).unwrap().try_into().unwrap(), + 0, + amount, + ); + + // SMT bitvector rotate_left requires that the rotate amount be + // statically specified. Instead, to use a dynamic amount, desugar + // to shifts and bit arithmetic. + let rotate = self.encode_rotate(op, extract_source, extract_amount, possible_source); + + // Pad the extended result back to the full register bitwidth. Use the bits + // that were already in the source register. That is, given: + // reg - source width source width + // | | + // SOURCE: [ don't care bits | care bits ] + // + // dest width + // | + // OUT: [ same don't care bits | care bits ] + let unconstrained_bits = self.bitwidth.checked_sub(possible_source).unwrap(); + + // If we are extending to the full register width, no padding needed + let rotate = if unconstrained_bits == 0 { + rotate + } else { + let padding = self.smt.extract( + self.bitwidth.checked_sub(1).unwrap().try_into().unwrap(), + self.bitwidth + .checked_sub(unconstrained_bits) + .unwrap() + .try_into() + .unwrap(), + source, + ); + self.smt.concat(padding, rotate) + }; + + ite_str = self.smt.ite(matching, rotate, ite_str); + } + let some_shift_matches = self.smt.or_many(some_match); + self.width_assumptions.push(some_shift_matches); + ite_str + } + + pub fn widen_to_register_width( + &mut self, + tyvar: u32, + narrow_width: usize, + narrow_decl: SExpr, + name: Option, + ) -> SExpr { + let width = self.bitwidth.checked_sub(narrow_width).unwrap(); + if width > 0 { + let mut narrow_name = format!("narrow__{}", tyvar); + let mut wide_name = format!("wide__{}", tyvar); + if let Some(s) = name { + narrow_name = format!("{}_{}", s, narrow_name); + wide_name = format!("{}_{}", s, wide_name); + } + self.assume(self.smt.eq(self.smt.atom(&narrow_name), narrow_decl)); + self.additional_decls.push(( + narrow_name.clone(), + self.smt.bit_vec_sort(self.smt.numeral(narrow_width)), + )); + self.additional_decls.push(( + wide_name.clone(), + self.smt.bit_vec_sort(self.smt.numeral(self.bitwidth)), + )); + let padding = self.new_fresh_bits(width); + self.assume(self.smt.eq( + self.smt.atom(&wide_name), + self.smt.concat(padding, self.smt.atom(narrow_name)), + )); + self.smt.atom(wide_name) + } else if let Some(s) = name { + self.assume(self.smt.eq(self.smt.atom(&s), narrow_decl)); + self.smt.atom(&s) + } else { + narrow_decl + } + } + + pub fn get_expr_width_var(&self, e: &Expr) -> Option { + if let Some(tyvar) = self.tyctx.tyvars.get(e) { + self.width_vars.get(tyvar).map(|s| self.smt.atom(s)) + } else { + None + } + } + + pub fn vir_to_smt_ty(&self, ty: &Type) -> SExpr { + match ty { + Type::BitVector(w) => { + let width = w.unwrap_or(self.bitwidth); + self.smt.bit_vec_sort(self.smt.numeral(width)) + } + Type::Int => self.smt.int_sort(), + Type::Bool | Type::Unit => self.smt.bool_sort(), + } + } + + pub fn get_type(&self, x: &Expr) -> Option<&Type> { + self.tyctx.tymap.get(self.tyctx.tyvars.get(x)?) + } + + pub fn get_expr_value(&self, e: &Expr) -> Option { + if let Some(tyvar) = self.tyctx.tyvars.get(e) { + self.tyctx.tyvals.get(tyvar).copied() + } else { + None + } + } + + pub fn static_width(&self, x: &Expr) -> Option { + match self.get_type(x) { + Some(Type::BitVector(w)) => *w, + _ => None, + } + } + + pub fn assume_same_width(&mut self, x: &Expr, y: &Expr) { + let xw = self.get_expr_width_var(x).unwrap(); + let yw = self.get_expr_width_var(y).unwrap(); + self.width_assumptions.push(self.smt.eq(xw, yw)); + } + + pub fn assume_same_width_from_sexpr(&mut self, x: SExpr, y: &Expr) { + let yw = self.get_expr_width_var(y).unwrap(); + self.width_assumptions.push(self.smt.eq(x, yw)); + } + + pub fn assume_comparable_types(&mut self, x: &Expr, y: &Expr) { + match (self.get_type(x), self.get_type(y)) { + (None, _) | (_, None) => panic!("Missing type(s) {:?} {:?}", x, y), + (Some(Type::Bool), Some(Type::Bool)) + | (Some(Type::Int), Some(Type::Int)) + | (Some(Type::Unit), Some(Type::Unit)) => (), + (Some(Type::BitVector(Some(xw))), Some(Type::BitVector(Some(yw)))) => { + assert_eq!(xw, yw, "incompatible {:?} {:?}", x, y) + } + (_, _) => self.assume_same_width(x, y), + } + } + + pub fn vir_expr_to_sexp(&mut self, e: Expr) -> SExpr { + let tyvar = self.tyctx.tyvars.get(&e); + let ty = self.get_type(&e); + let width = self.get_expr_width_var(&e); + let static_expr_width = self.static_width(&e); + match e { + Expr::Terminal(t) => match t { + Terminal::Literal(v, tyvar) => { + let lit = self.smt.atom(v); + if self.find_widths && matches!(ty.unwrap(), Type::BitVector(_)) { + self.widen_to_register_width(tyvar, static_expr_width.unwrap(), lit, None) + } else { + lit + } + } + Terminal::Var(v) => match self.var_map.get(&v) { + Some(o) => *o, + None => self.smt.atom(v), + }, + Terminal::Const(i, _) => match ty.unwrap() { + Type::BitVector(w) => { + let width = w.unwrap_or(self.bitwidth); + let narrow_decl = self.bv(i, width); + if self.find_widths { + self.zero_extend(self.bitwidth - width, narrow_decl) + } else { + narrow_decl + } + } + Type::Int => self.smt.numeral(i), + Type::Bool => { + if i == 0 { + self.smt.false_() + } else { + self.smt.true_() + } + } + Type::Unit => self.smt.true_(), + }, + Terminal::True => self.smt.true_(), + Terminal::False => self.smt.false_(), + Terminal::Wildcard(_) => match ty.unwrap() { + Type::BitVector(Some(w)) if !self.find_widths => self.new_fresh_bits(*w), + Type::BitVector(_) => self.new_fresh_bits(self.bitwidth), + Type::Int => self.new_fresh_int(), + Type::Bool => self.new_fresh_bool(), + Type::Unit => self.smt.true_(), + }, + }, + Expr::Unary(op, arg) => { + let op = match op { + UnaryOp::Not => "not", + UnaryOp::BVNeg => { + if self.find_widths { + self.assume_same_width_from_sexpr(width.unwrap(), &arg); + } + "bvneg" + } + UnaryOp::BVNot => { + if self.find_widths { + self.assume_same_width_from_sexpr(width.unwrap(), &arg); + } + "bvnot" + } + }; + let subexp = self.vir_expr_to_sexp(*arg); + self.smt.list(vec![self.smt.atom(op), subexp]) + } + Expr::Binary(op, x, y) => { + if self.find_widths { + match op { + BinaryOp::BVMul + | BinaryOp::BVUDiv + | BinaryOp::BVSDiv + | BinaryOp::BVUrem + | BinaryOp::BVSrem + | BinaryOp::BVAdd + | BinaryOp::BVSub + | BinaryOp::BVAnd + | BinaryOp::BVOr + | BinaryOp::BVShl + | BinaryOp::BVShr + | BinaryOp::BVAShr + | BinaryOp::BVRotl + | BinaryOp::BVRotr => self.assume_same_width_from_sexpr(width.unwrap(), &x), + BinaryOp::Eq => { + if let Some(Type::BitVector(_)) = self.get_type(&x) { + self.assume_comparable_types(&x, &y) + } + } + _ => (), + }; + self.assume_comparable_types(&x, &y); + } + match op { + BinaryOp::BVRotl => { + let source_width = self.static_width(&x); + match source_width { + Some(w) => { + let xs = self.vir_expr_to_sexp(*x); + let ys = self.vir_expr_to_sexp(*y); + return self.rotate_symbolic(xs, w, ys, "rotate_left"); + } + None => { + let arg_width = self.get_expr_width_var(&x).unwrap(); + let xs = self.vir_expr_to_sexp(*x); + let ys = self.vir_expr_to_sexp(*y); + return self.rotate_symbolic_dyn_source_width( + xs, + arg_width, + ys, + "rotate_left", + ); + } + } + } + BinaryOp::BVRotr => { + let source_width = self.static_width(&x); + match source_width { + Some(w) => { + let xs = self.vir_expr_to_sexp(*x); + let ys = self.vir_expr_to_sexp(*y); + return self.rotate_symbolic(xs, w, ys, "rotate_right"); + } + None => { + let arg_width = self.get_expr_width_var(&x).unwrap(); + let xs = self.vir_expr_to_sexp(*x); + let ys = self.vir_expr_to_sexp(*y); + return self.rotate_symbolic_dyn_source_width( + xs, + arg_width, + ys, + "rotate_right", + ); + } + } + } + // To shift right, we need to make sure the bits to the right get zeroed. Shift left first. + BinaryOp::BVShr => { + let arg_width = if self.find_widths { + self.get_expr_width_var(&x).unwrap() + } else { + self.smt.numeral(self.static_width(&x).unwrap()) + }; + let xs = self.vir_expr_to_sexp(*x); + + // Strategy: shift left by (bitwidth - arg width) to zero bits to the right + // of the bits in the argument size. Then shift right by (amt + (bitwidth - arg width)) + + // Width math + if self.find_widths { + // The shift arg needs to be extracted to the right width, default to 8 if unknown + let y_static_width = self.static_width(&y).unwrap_or(8); + let y_rec = self.vir_expr_to_sexp(*y); + if self.find_widths { + return xs; + } + let extract = self.smt.extract( + y_static_width.checked_sub(1).unwrap().try_into().unwrap(), + 0, + y_rec, + ); + let ys = self.zero_extend(self.bitwidth - y_static_width, extract); + let arg_width_as_bv = self.int2bv(self.bitwidth, arg_width); + let bitwidth_as_bv = + self.bv(self.bitwidth.try_into().unwrap(), self.bitwidth); + let extra_shift = self.smt.bvsub(bitwidth_as_bv, arg_width_as_bv); + let shl_to_zero = self.smt.bvshl(xs, extra_shift); + + let amt_plus_extra = self.smt.bvadd(ys, extra_shift); + return self.smt.bvlshr(shl_to_zero, amt_plus_extra); + } else { + let ys = self.vir_expr_to_sexp(*y); + return self.smt.bvlshr(xs, ys); + } + } + BinaryOp::BVAShr => { + let arg_width = if self.find_widths { + self.get_expr_width_var(&x).unwrap() + } else { + self.smt.numeral(self.static_width(&x).unwrap()) + }; + let xs = self.vir_expr_to_sexp(*x); + + // Strategy: shift left by (bitwidth - arg width) to eliminate bits to the left + // of the bits in the argument size. Then shift right by (amt + (bitwidth - arg width)) + + // Width math + if self.find_widths { + // The shift arg needs to be extracted to the right width, default to 8 if unknown + let y_static_width = self.static_width(&y).unwrap_or(8); + let ys = self.vir_expr_to_sexp(*y); + let extract = self.smt.extract( + y_static_width.checked_sub(1).unwrap().try_into().unwrap(), + 0, + ys, + ); + let ysext = self.zero_extend(self.bitwidth - y_static_width, extract); + + let arg_width_as_bv = self.int2bv(self.bitwidth, arg_width); + let bitwidth_as_bv = + self.bv(self.bitwidth.try_into().unwrap(), self.bitwidth); + let extra_shift = self.smt.bvsub(bitwidth_as_bv, arg_width_as_bv); + let shl_to_zero = self.smt.bvshl(xs, extra_shift); + + let amt_plus_extra = self.smt.bvadd(ysext, extra_shift); + return self.smt.bvashr(shl_to_zero, amt_plus_extra); + } else { + let ys = self.vir_expr_to_sexp(*y); + return self.smt.bvashr(xs, ys); + } + } + _ => (), + }; + let op_str = match op { + BinaryOp::And => "and", + BinaryOp::Or => "or", + BinaryOp::Imp => "=>", + BinaryOp::Eq => "=", + BinaryOp::Lte => match (self.get_type(&x), self.get_type(&y)) { + (Some(Type::Int), Some(Type::Int)) => "<=", + (Some(Type::BitVector(_)), Some(Type::BitVector(_))) => "bvule", + _ => unreachable!(), + }, + BinaryOp::Lt => match (self.get_type(&x), self.get_type(&y)) { + (Some(Type::Int), Some(Type::Int)) => "<", + (Some(Type::BitVector(_)), Some(Type::BitVector(_))) => "bvult", + _ => unreachable!(), + }, + BinaryOp::BVSgt => "bvsgt", + BinaryOp::BVSgte => "bvsge", + BinaryOp::BVSlt => "bvslt", + BinaryOp::BVSlte => "bvsle", + BinaryOp::BVUgt => "bvugt", + BinaryOp::BVUgte => "bvuge", + BinaryOp::BVUlt => "bvult", + BinaryOp::BVUlte => "bvule", + BinaryOp::BVMul => "bvmul", + BinaryOp::BVUDiv => "bvudiv", + BinaryOp::BVSDiv => "bvsdiv", + BinaryOp::BVAdd => "bvadd", + BinaryOp::BVSub => "bvsub", + BinaryOp::BVUrem => "bvurem", + BinaryOp::BVSrem => "bvsrem", + BinaryOp::BVAnd => "bvand", + BinaryOp::BVOr => "bvor", + BinaryOp::BVXor => "bvxor", + BinaryOp::BVShl => "bvshl", + BinaryOp::BVSaddo => "bvsaddo", + _ => unreachable!("{:?}", op), + }; + // If we have some static width that isn't the bitwidth, extract based on it + // before performing the operation for the dynamic case. + match static_expr_width { + Some(w) if w < self.bitwidth && self.find_widths => { + let h: i32 = (w - 1).try_into().unwrap(); + let x_sexp = self.vir_expr_to_sexp(*x); + let y_sexp = self.vir_expr_to_sexp(*y); + self.zero_extend( + self.bitwidth.checked_sub(w).unwrap(), + self.smt.list(vec![ + self.smt.atom(op_str), + self.smt.extract(h, 0, x_sexp), + self.smt.extract(h, 0, y_sexp), + ]), + ) + } + _ => { + let x_sexp = self.vir_expr_to_sexp(*x); + let y_sexp = self.vir_expr_to_sexp(*y); + self.smt.list(vec![self.smt.atom(op_str), x_sexp, y_sexp]) + } + } + } + Expr::BVIntToBV(w, x) => { + let x_sexp = self.vir_expr_to_sexp(*x); + if self.find_widths { + let padded_width = self.bitwidth - w; + self.zero_extend(padded_width, self.int2bv(w, x_sexp)) + } else { + self.int2bv(w, x_sexp) + } + } + Expr::BVToInt(x) => { + let x_sexp = self.vir_expr_to_sexp(*x); + self.bv2nat(x_sexp) + } + Expr::BVZeroExtTo(i, x) => { + let arg_width = if self.find_widths { + let expr_width = width.unwrap(); + self.width_assumptions + .push(self.smt.eq(expr_width, self.smt.numeral(i))); + self.get_expr_width_var(&x).unwrap() + } else { + self.smt.numeral(self.static_width(&x).unwrap()) + }; + let static_width = self.static_width(&x); + let xs = self.vir_expr_to_sexp(*x); + if let Some(size) = static_width { + self.extend_concrete(i, xs, size, "zero_extend") + } else { + self.extend_symbolic(self.smt.numeral(i), xs, arg_width, "zero_extend") + } + } + Expr::BVZeroExtToVarWidth(i, x) => { + let static_arg_width = self.static_width(&x); + let arg_width = self.get_expr_width_var(&x); + let is = self.vir_expr_to_sexp(*i); + let xs = self.vir_expr_to_sexp(*x); + if self.find_widths { + let expr_width = width.unwrap(); + self.width_assumptions.push(self.smt.eq(expr_width, is)); + } + if let (Some(arg_size), Some(e_size)) = (static_arg_width, static_expr_width) { + self.extend_concrete(e_size, xs, arg_size, "zero_extend") + } else { + self.extend_symbolic(is, xs, arg_width.unwrap(), "zero_extend") + } + } + Expr::BVSignExtTo(i, x) => { + let arg_width = if self.find_widths { + let expr_width = width.unwrap(); + self.width_assumptions + .push(self.smt.eq(expr_width, self.smt.numeral(i))); + self.get_expr_width_var(&x).unwrap() + } else { + self.smt.numeral(self.static_width(&x).unwrap()) + }; + let static_width = self.static_width(&x); + let xs = self.vir_expr_to_sexp(*x); + if let Some(size) = static_width { + self.extend_concrete(i, xs, size, "sign_extend") + } else { + self.extend_symbolic(self.smt.numeral(i), xs, arg_width, "sign_extend") + } + } + Expr::BVSignExtToVarWidth(i, x) => { + let static_arg_width = self.static_width(&x); + let arg_width = self.get_expr_width_var(&x); + let is = self.vir_expr_to_sexp(*i); + let xs = self.vir_expr_to_sexp(*x); + if self.find_widths { + let expr_width = width.unwrap(); + self.width_assumptions.push(self.smt.eq(expr_width, is)); + } + if let (Some(arg_size), Some(e_size)) = (static_arg_width, static_expr_width) { + self.extend_concrete(e_size, xs, arg_size, "sign_extend") + } else { + self.extend_symbolic(is, xs, arg_width.unwrap(), "sign_extend") + } + } + Expr::BVConvTo(x, y) => { + if self.find_widths { + let expr_width = width.unwrap(); + let dyn_width = self.vir_expr_to_sexp(*x); + let eq = self.smt.eq(expr_width, dyn_width); + self.width_assumptions.push(eq); + self.vir_expr_to_sexp(*y) + } else { + let arg_width = self.static_width(&y).unwrap(); + match ty { + Some(Type::BitVector(Some(w))) => match arg_width.cmp(w) { + Ordering::Less => { + let padding = + self.new_fresh_bits(w.checked_sub(arg_width).unwrap()); + let ys = self.vir_expr_to_sexp(*y); + self.smt.concat(padding, ys) + } + Ordering::Greater => { + let new = (w - 1).try_into().unwrap(); + let ys = self.vir_expr_to_sexp(*y); + self.smt.extract(new, 0, ys) + } + Ordering::Equal => self.vir_expr_to_sexp(*y), + }, + _ => unreachable!("{:?}, {:?}", x, y), + } + } + } + Expr::WidthOf(x) => { + if self.find_widths { + self.get_expr_width_var(&x).unwrap() + } else { + self.smt.numeral(self.static_width(&x).unwrap()) + } + } + Expr::BVExtract(i, j, x) => { + assert!(i >= j); + if self.get_type(&x).is_some() { + let xs = self.vir_expr_to_sexp(*x); + // No-op if we are extracting exactly the full bitwidth + if j == 0 && i == self.bitwidth - 1 && self.find_widths { + return xs; + } + let extract = + self.smt + .extract(i.try_into().unwrap(), j.try_into().unwrap(), xs); + let new_width = i - j + 1; + if new_width < self.bitwidth && self.find_widths { + let padding = + self.new_fresh_bits(self.bitwidth.checked_sub(new_width).unwrap()); + self.smt.concat(padding, extract) + } else { + extract + } + } else { + unreachable!("Must perform extraction on bv with known width") + } + } + Expr::Conditional(c, t, e) => { + if self.find_widths && matches!(ty, Some(Type::BitVector(_))) { + self.assume_same_width_from_sexpr(width.unwrap(), &t); + self.assume_same_width_from_sexpr(width.unwrap(), &e); + } + let cs = self.vir_expr_to_sexp(*c); + let ts = self.vir_expr_to_sexp(*t); + let es = self.vir_expr_to_sexp(*e); + self.smt.ite(cs, ts, es) + } + Expr::Switch(c, cases) => { + if self.find_widths { + if matches!(ty, Some(Type::BitVector(_))) { + for (_, b) in &cases { + self.assume_same_width_from_sexpr(width.unwrap(), b); + } + } + let cty = self.get_type(&c); + if matches!(cty, Some(Type::BitVector(_))) { + let cwidth = self.get_expr_width_var(&c); + for (m, _) in &cases { + self.assume_same_width_from_sexpr(cwidth.unwrap(), m); + } + } + } + let cs = self.vir_expr_to_sexp(*c); + let mut case_sexprs: Vec<(SExpr, SExpr)> = cases + .iter() + .map(|(m, b)| { + ( + self.vir_expr_to_sexp(m.clone()), + self.vir_expr_to_sexp(b.clone()), + ) + }) + .collect(); + + // Assert that some case must match + let some_case_matches: Vec = case_sexprs + .iter() + .map(|(m, _)| self.smt.eq(cs, *m)) + .collect(); + self.assert(self.smt.or_many(some_case_matches.clone())); + + let (_, last_body) = case_sexprs.remove(case_sexprs.len() - 1); + + // Reverse to keep the order of the switch + case_sexprs.iter().rev().fold(last_body, |acc, (m, b)| { + self.smt.ite(self.smt.eq(cs, *m), *b, acc) + }) + } + Expr::CLZ(e) => { + let tyvar = *tyvar.unwrap(); + if self.find_widths { + self.assume_same_width_from_sexpr(width.unwrap(), &e); + } + let es = self.vir_expr_to_sexp(*e); + match static_expr_width { + Some(1) => clz::clz1(self, es, tyvar), + Some(8) => clz::clz8(self, es, tyvar), + Some(16) => clz::clz16(self, es, tyvar), + Some(32) => clz::clz32(self, es, tyvar), + Some(64) => clz::clz64(self, es, tyvar), + Some(w) => unreachable!("Unexpected CLZ width {}", w), + None => unreachable!("Need static CLZ width"), + } + } + Expr::CLS(e) => { + let tyvar = *tyvar.unwrap(); + if self.find_widths { + self.assume_same_width_from_sexpr(width.unwrap(), &e); + } + let es = self.vir_expr_to_sexp(*e); + match static_expr_width { + Some(1) => cls::cls1(self, tyvar), + Some(8) => cls::cls8(self, es, tyvar), + Some(16) => cls::cls16(self, es, tyvar), + Some(32) => cls::cls32(self, es, tyvar), + Some(64) => cls::cls64(self, es, tyvar), + Some(w) => unreachable!("Unexpected CLS width {}", w), + None => unreachable!("Need static CLS width"), + } + } + Expr::Rev(e) => { + let tyvar = *tyvar.unwrap(); + if self.find_widths { + self.assume_same_width_from_sexpr(width.unwrap(), &e); + } + let es = self.vir_expr_to_sexp(*e); + match static_expr_width { + Some(1) => rev::rev1(self, es, tyvar), + Some(8) => rev::rev8(self, es, tyvar), + Some(16) => rev::rev16(self, es, tyvar), + Some(32) => rev::rev32(self, es, tyvar), + Some(64) => rev::rev64(self, es, tyvar), + Some(w) => unreachable!("Unexpected CLS width {}", w), + None => unreachable!("Need static CLS width"), + } + } + Expr::BVSubs(ty, x, y) => { + let tyvar = *tyvar.unwrap(); + if self.find_widths { + self.assume_comparable_types(&x, &y); + } + let ety = self.vir_expr_to_sexp(*ty); + let ex = self.vir_expr_to_sexp(*x); + let ey = self.vir_expr_to_sexp(*y); + + let encoded_32 = subs::subs(self, 32, ex, ey, tyvar); + let encoded_64 = subs::subs(self, 64, ex, ey, tyvar); + + self.smt.ite( + self.smt.eq(ety, self.smt.numeral(32)), + encoded_32, + encoded_64, + ) + } + Expr::BVPopcnt(x) => { + let tyvar = *tyvar.unwrap(); + if self.find_widths { + self.assume_same_width_from_sexpr(width.unwrap(), &x); + } + let ex = self.vir_expr_to_sexp(*x); + + match static_expr_width { + Some(8) => { + let p = popcnt(self, 8, ex, tyvar); + if self.find_widths { + self.zero_extend(self.bitwidth - 8, p) + } else { + p + } + } + Some(16) => { + let p = popcnt(self, 16, ex, tyvar); + if self.find_widths { + self.zero_extend(self.bitwidth - 8, p) + } else { + self.zero_extend(8, p) + } + } + Some(32) => { + let p = popcnt(self, 32, ex, tyvar); + if self.find_widths { + self.zero_extend(self.bitwidth - 8, p) + } else { + self.zero_extend(24, p) + } + } + Some(64) => { + let p = popcnt(self, 64, ex, tyvar); + if self.find_widths { + self.zero_extend(self.bitwidth - 8, p) + } else { + self.zero_extend(56, p) + } + } + Some(w) => unreachable!("Unexpected popcnt width {}", w), + None => unreachable!("Need static popcnt width"), + } + } + Expr::BVConcat(xs) => { + if self.find_widths { + let widths: Vec = xs + .iter() + .map(|x| self.get_expr_width_var(x).unwrap()) + .collect(); + let sum = self.smt.plus_many(widths); + self.width_assumptions + .push(self.smt.eq(width.unwrap(), sum)); + } + let mut sexprs: Vec = xs + .iter() + .map(|x| self.vir_expr_to_sexp(x.clone())) + .collect(); + let last = sexprs.remove(sexprs.len() - 1); + + // Width hack for now + if self.find_widths { + return sexprs[0]; + } + // Reverse to keep the order of the cases + sexprs + .iter() + .rev() + .fold(last, |acc, x| self.smt.concat(*x, acc)) + } + Expr::LoadEffect(x, y, z) => { + let ex = self.vir_expr_to_sexp(*x); + let ey = self.vir_expr_to_sexp(*y); + let ez = self.vir_expr_to_sexp(*z); + + if self.find_widths { + self.width_assumptions.push(self.smt.eq(width.unwrap(), ey)); + } + + if self.lhs_flag { + if self.lhs_load_args.is_some() { + panic!("Only one load on the LHS currently supported, found multiple.") + } + self.lhs_load_args = Some(vec![ex, ey, ez]); + let load_ret = if self.find_widths { + self.new_fresh_bits(self.bitwidth) + } else { + self.new_fresh_bits(static_expr_width.unwrap()) + }; + self.load_return = Some(load_ret); + load_ret + } else { + if self.rhs_load_args.is_some() { + panic!("Only one load on the RHS currently supported, found miltiple.") + } + self.rhs_load_args = Some(vec![ex, ey, ez]); + self.load_return.unwrap() + } + } + Expr::StoreEffect(w, x, y, z) => { + let ew = self.vir_expr_to_sexp(*w); + let ex = self.vir_expr_to_sexp(*x); + let ez = self.vir_expr_to_sexp(*z); + + if self.find_widths { + let y_width = self.get_expr_width_var(&y).unwrap(); + self.width_assumptions.push(self.smt.eq(y_width, ex)); + } + let ey = self.vir_expr_to_sexp(*y); + + if self.lhs_flag { + self.lhs_store_args = Some(vec![ew, ex, ey, ez]); + } else { + self.rhs_store_args = Some(vec![ew, ex, ey, ez]); + } + self.smt.atom("true") + } + } + } + + // Checks whether the assumption list is always false + fn check_assumptions_feasibility( + &mut self, + assumptions: &[SExpr], + term_input_bs: &[String], + config: &Config, + ) -> VerificationResult { + log::debug!("Checking assumption feasibility"); + self.smt.push().unwrap(); + for (i, a) in assumptions.iter().enumerate() { + self.smt + .assert(self.smt.named(format!("assum{i}"), *a)) + .unwrap(); + } + + let res = match self.smt.check() { + Ok(Response::Sat) => { + if !config.distinct_check || term_input_bs.is_empty() { + log::debug!("Assertion list is feasible for at least one input!"); + self.smt.pop().unwrap(); + return VerificationResult::Success; + } + // Check that there is a model with distinct bitvector inputs + let mut not_all_same = vec![]; + let atoms: Vec = term_input_bs.iter().map(|n| self.smt.atom(n)).collect(); + let solution = self.smt.get_value(atoms).unwrap(); + assert_eq!(term_input_bs.len(), solution.len()); + for (variable, value) in solution { + not_all_same.push(self.smt.not(self.smt.eq(variable, value))); + } + match not_all_same.len().cmp(&1) { + Ordering::Equal => self.smt.assert(not_all_same[0]).unwrap(), + Ordering::Greater => self.smt.assert(self.smt.and_many(not_all_same)).unwrap(), + Ordering::Less => unreachable!("must have some BV inputs"), + } + match self.smt.check() { + Ok(Response::Sat) => { + log::debug!("Assertion list is feasible for two distinct inputs"); + VerificationResult::Success + } + Ok(Response::Unsat) => { + log::debug!("Assertion list is only feasible for one input with distinct BV values!"); + VerificationResult::NoDistinctModels + } + Ok(Response::Unknown) => { + panic!("Solver said 'unk'"); + } + Err(err) => { + unreachable!("Error! {:?}", err); + } + } + } + Ok(Response::Unsat) => { + log::debug!("Assertion list is infeasible!"); + let unsat = self.smt.get_unsat_core().unwrap(); + log::debug!("Unsat core:\n{}", self.smt.display(unsat)); + VerificationResult::InapplicableRule + } + Ok(Response::Unknown) => { + panic!("Solver said 'unk'"); + } + Err(err) => { + unreachable!("Error! {:?}", err); + } + }; + self.smt.pop().unwrap(); + res + } + + fn display_hex_to_bin(&self, value: SExpr) -> String { + let sexpr_hex_prefix = "#x"; + let val_str = self.smt.display(value).to_string(); + if val_str.starts_with(sexpr_hex_prefix) { + let without_prefix = val_str.trim_start_matches("#x"); + let as_unsigned = u128::from_str_radix(without_prefix, 16).unwrap(); + // SMT-LIB: bvhexX where X is a hexadecimal numeral of length m defines the bitvector + // constant with value X and size 4*m. + match without_prefix.len() { + 2 => format!("{}|{:#010b}", self.smt.display(value), as_unsigned), + 3 => format!("{}|{:#014b}", self.smt.display(value), as_unsigned), + 4 => format!("{}|{:#018b}", self.smt.display(value), as_unsigned), + 8 => format!("{}|{:#034b}", self.smt.display(value), as_unsigned), + 16 => format!("{}|{:#068b}", self.smt.display(value), as_unsigned), + 17 => format!("{}|{:#070b}", self.smt.display(value), as_unsigned), + 32 => format!("{}|{:#0130b}", self.smt.display(value), as_unsigned), + _ => { + format!("{}|{:#b}", self.smt.display(value), as_unsigned) + } + } + } else { + val_str + } + } + + fn display_value(&self, variable: SExpr, value: SExpr) -> (String, String) { + let var_str = self.smt.display(variable).to_string(); + (var_str, self.display_hex_to_bin(value)) + } + + fn display_isle_pattern( + &mut self, + termenv: &TermEnv, + typeenv: &TypeEnv, + vars: &Vec<(String, String)>, + rule: &Rule, + pat: &Pattern, + ) -> SExpr { + let mut to_sexpr = |p| self.display_isle_pattern(termenv, typeenv, vars, rule, p); + + match pat { + isle::sema::Pattern::Term(_, term_id, args) => { + let sym = termenv.terms[term_id.index()].name; + let name = typeenv.syms[sym.index()].clone(); + + let mut sexprs = args.iter().map(&mut to_sexpr).collect::>(); + + sexprs.insert(0, self.smt.atom(name)); + self.smt.list(sexprs) + } + isle::sema::Pattern::Var(_, var_id) => { + let sym = rule.vars[var_id.index()].name; + let ident = typeenv.syms[sym.index()].clone(); + let smt_ident_prefix = format!("{}__clif{}__", ident, var_id.index()); + + let var = self.display_var_from_smt_prefix(vars, &ident, &smt_ident_prefix); + self.smt.atom(var) + } + isle::sema::Pattern::BindPattern(_, var_id, subpat) => { + let sym = rule.vars[var_id.index()].name; + let ident = &typeenv.syms[sym.index()]; + let smt_ident_prefix = format!("{}__clif{}__", ident, var_id.index(),); + let subpat_node = to_sexpr(subpat); + + let var = self.display_var_from_smt_prefix(vars, ident, &smt_ident_prefix); + + // Special case: elide bind patterns to wildcars + if matches!(**subpat, isle::sema::Pattern::Wildcard(_)) { + self.smt.atom(var) + } else { + self.smt + .list(vec![self.smt.atom(var), self.smt.atom("@"), subpat_node]) + } + } + isle::sema::Pattern::Wildcard(_) => self.smt.list(vec![self.smt.atom("_")]), + isle::sema::Pattern::ConstPrim(_, sym) => { + let name = typeenv.syms[sym.index()].clone(); + self.smt.list(vec![self.smt.atom(name)]) + } + isle::sema::Pattern::ConstInt(_, num) => { + let _smt_name_prefix = format!("{}__", num); + self.smt.list(vec![self.smt.atom(num.to_string())]) + } + isle::sema::Pattern::And(_, subpats) => { + let mut sexprs = subpats.iter().map(to_sexpr).collect::>(); + + sexprs.insert(0, self.smt.atom("and")); + self.smt.list(sexprs) + } + } + } + + fn display_var_from_smt_prefix( + &self, + vars: &Vec<(String, String)>, + ident: &str, + prefix: &str, + ) -> String { + let matches: Vec<&(String, String)> = + vars.iter().filter(|(v, _)| v.starts_with(prefix)).collect(); + if matches.is_empty() { + panic!("Can't find match for: {}\n{:?}", prefix, vars); + } else if matches.len() == 3 { + assert!( + self.find_widths, + "Only expect multiple matches with dynamic widths" + ); + for (name, model) in matches { + if name.contains("narrow") { + return format!("[{}|{}]", self.smt.display(self.smt.atom(ident)), model); + } + } + panic!("narrow not found"); + } else if matches.len() == 1 { + let model = &matches.first().unwrap().1; + format!("[{}|{}]", self.smt.display(self.smt.atom(ident)), model) + } else { + panic!("Unexpected number of matches!") + } + } + + fn display_isle_expr( + &self, + termenv: &TermEnv, + typeenv: &TypeEnv, + vars: &Vec<(String, String)>, + rule: &Rule, + expr: &isle::sema::Expr, + ) -> SExpr { + let to_sexpr = |e| self.display_isle_expr(termenv, typeenv, vars, rule, e); + + match expr { + isle::sema::Expr::Term(_, term_id, args) => { + let sym = termenv.terms[term_id.index()].name; + let name = typeenv.syms[sym.index()].clone(); + + let mut sexprs = args.iter().map(to_sexpr).collect::>(); + + sexprs.insert(0, self.smt.atom(name)); + self.smt.list(sexprs) + } + isle::sema::Expr::Var(_, var_id) => { + let sym = rule.vars[var_id.index()].name; + let ident = typeenv.syms[sym.index()].clone(); + let smt_ident_prefix = format!("{}__clif{}__", ident, var_id.index()); + + let var = self.display_var_from_smt_prefix(vars, &ident, &smt_ident_prefix); + self.smt.atom(var) + } + isle::sema::Expr::ConstPrim(_, sym) => { + let name = typeenv.syms[sym.index()].clone(); + self.smt.list(vec![self.smt.atom(name)]) + } + isle::sema::Expr::ConstInt(_, num) => { + let _smt_name_prefix = format!("{}__", num); + self.smt.list(vec![self.smt.atom(num.to_string())]) + } + isle::sema::Expr::Let { bindings, body, .. } => { + let mut sexprs = vec![]; + for (varid, _, expr) in bindings { + let sym = rule.vars[varid.index()].name; + let ident = typeenv.syms[sym.index()].clone(); + let smt_prefix = format!("{}__clif{}__", ident, varid.index()); + let var = self.display_var_from_smt_prefix(vars, &ident, &smt_prefix); + + sexprs.push(self.smt.list(vec![self.smt.atom(var), to_sexpr(expr)])); + } + self.smt.list(vec![ + self.smt.atom("let"), + self.smt.list(sexprs), + to_sexpr(body), + ]) + } + } + } + + fn display_model( + &mut self, + termenv: &TermEnv, + typeenv: &TypeEnv, + rule: &Rule, + lhs_sexpr: SExpr, + rhs_sexpr: SExpr, + ) { + let mut vars = vec![]; + let mut lhs_value = None; + let mut rhs_value = None; + for (name, atom) in &self.var_map { + let solution = self + .smt + .get_value(vec![self.smt.atom(name), *atom]) + .unwrap(); + for (variable, value) in solution { + let display = self.display_value(variable, value); + vars.push(display.clone()); + if variable == lhs_sexpr { + lhs_value = Some(display.1); + } else if variable == rhs_sexpr { + rhs_value = Some(display.1); + } + } + } + for (name, _) in &self.additional_decls { + let solution = self.smt.get_value(vec![self.smt.atom(name)]).unwrap(); + for (variable, value) in solution { + vars.push(self.display_value(variable, value)); + } + } + vars.sort_by_key(|x| x.0.clone()); + vars.dedup(); + + // TODO VERBOSE + println!("Counterexample summary"); + let lhs = self.display_isle_pattern( + termenv, + typeenv, + &vars, + rule, + &Pattern::Term( + cranelift_isle::sema::TypeId(0), + rule.root_term, + rule.args.clone(), + ), + ); + println!("{}", self.smt.display(lhs)); + + // if-let statement processing + if !&rule.iflets.is_empty() { + print!("(if-let "); + } + for if_let_struct in &rule.iflets { + let if_lhs = &if_let_struct.lhs; + let if_rhs: &cranelift_isle::sema::Expr = &if_let_struct.rhs; + + let if_lhs_expr = self.display_isle_pattern(termenv, typeenv, &vars, rule, if_lhs); + + let if_rhs_expr = self.display_isle_expr(termenv, typeenv, &vars, rule, if_rhs); + + println!( + "({} {})", + self.smt.display(if_lhs_expr), + self.smt.display(if_rhs_expr) + ); + } + println!(")"); + + println!("=>"); + let rhs = self.display_isle_expr(termenv, typeenv, &vars, rule, &rule.rhs); + println!("{}", self.smt.display(rhs)); + + println!("\n{} =>\n{}\n", lhs_value.unwrap(), rhs_value.unwrap(),); + } + + fn declare_variables( + &mut self, + rule_sem: &RuleSemantics, + config: &Config, + ) -> (Vec, Vec) { + let mut assumptions: Vec = vec![]; + log::trace!("Declaring quantified variables"); + for v in &rule_sem.quantified_vars { + let name = &v.name; + let ty = self.tyctx.tymap[&v.tyvar]; + let var_ty = self.vir_to_smt_ty(&ty); + log::trace!("\t{} : {}", name, self.smt.display(var_ty)); + if let Type::BitVector(w) = ty { + if self.find_widths { + let wide = self.widen_to_register_width( + v.tyvar, + w.unwrap_or(self.bitwidth), + self.smt.atom(name), + Some(name.to_string()), + ); + self.var_map.insert(name.clone(), wide); + } else { + self.var_map.insert(name.clone(), self.smt.atom(name)); + } + } else { + self.var_map.insert(name.clone(), self.smt.atom(name)); + } + self.smt.declare_const(name, var_ty).unwrap(); + } + self.lhs_flag = true; + for a in &rule_sem.lhs_assumptions { + let p = self.vir_expr_to_sexp(a.clone()); + assumptions.push(p) + } + self.lhs_flag = false; + for a in &rule_sem.rhs_assumptions { + let p = self.vir_expr_to_sexp(a.clone()); + assumptions.push(p) + } + if self.find_widths { + for a in &self.width_assumptions { + assumptions.push(*a); + } + } + self.additional_assumptions.is_empty(); + for a in &self.additional_assumptions { + assumptions.push(*a); + } + // Look at RHS assertions, which are checked, not trusted + let assertions: Vec = rule_sem + .rhs_assertions + .iter() + .map(|a| self.vir_expr_to_sexp(a.clone())) + .collect(); + + for (name, ty) in &self.additional_decls { + self.smt.declare_const(name, *ty).unwrap(); + } + + if let Some(a) = &config.custom_assumptions { + let term_args = rule_sem + .term_args + .iter() + .map(|s| self.smt.atom(s)) + .collect(); + let custom_assumptions = a(&self.smt, term_args); + log::debug!( + "Custom assumptions:\n\t{}\n", + self.smt.display(custom_assumptions) + ); + assumptions.push(custom_assumptions); + } + (assumptions, assertions) + } +} + +/// Overall query for single rule: +/// +/// (not (=> (= )))))) +pub fn run_solver( + rule_sem: &RuleSemantics, + rule: &Rule, + termenv: &TermEnv, + typeenv: &TypeEnv, + concrete: &Option, + config: &Config, + _types: &TermSignature, +) -> VerificationResult { + if std::env::var("SKIP_SOLVER").is_ok() { + log::debug!("Environment variable SKIP_SOLVER set, returning Unknown"); + return VerificationResult::Unknown; + } + + let mut solver = easy_smt::ContextBuilder::new() + .replay_file(Some(std::fs::File::create("dynamic_widths.smt2").unwrap())) + .solver("z3", ["-smt2", "-in"]) + .build() + .unwrap(); + + solver + .set_option(":produce-unsat-cores", solver.true_()) + .unwrap(); + + // We start with logic to determine the width of all bitvectors + let mut ctx = SolverCtx { + smt: solver, + // Always find widths at first + find_widths: true, + tyctx: rule_sem.tyctx.clone(), + bitwidth: MAX_WIDTH, + var_map: HashMap::new(), + width_vars: HashMap::new(), + width_assumptions: vec![], + additional_decls: vec![], + additional_assumptions: vec![], + additional_assertions: vec![], + fresh_bits_idx: 0, + lhs_load_args: None, + rhs_load_args: None, + lhs_store_args: None, + rhs_store_args: None, + load_return: None, + lhs_flag: true, + }; + + let mut unresolved_widths = vec![]; + + // Check whether the non-solver type inference was able to resolve all bitvector widths, + // and add assumptions for known widths + for (_e, t) in &ctx.tyctx.tyvars { + let ty = &ctx.tyctx.tymap[t]; + if let Type::BitVector(w) = ty { + let width_name = format!("width__{}", t); + ctx.additional_decls + .push((width_name.clone(), ctx.smt.int_sort())); + match *w { + Some(bitwidth) => { + let eq = ctx + .smt + .eq(ctx.smt.atom(&width_name), ctx.smt.numeral(bitwidth)); + ctx.width_assumptions.push(eq); + } + None => { + log::debug!("Unresolved width: {:?} ({})", &_e, *t); + ctx.width_assumptions + .push(ctx.smt.gt(ctx.smt.atom(&width_name), ctx.smt.numeral(0))); + unresolved_widths.push(width_name.clone()); + } + }; + ctx.width_vars.insert(*t, width_name.clone()); + } + } + + if unresolved_widths.is_empty() { + log::debug!("All widths resolved after basic type inference"); + return run_solver_with_static_widths( + &RuleCtx { + rule_sem, + rule, + termenv, + typeenv, + config, + }, + &ctx.tyctx, + concrete, + ); + } + + log::debug!("Some unresolved widths after basic type inference"); + log::debug!("Finding widths from the solver"); + ctx.find_widths = true; + let (assumptions, _) = ctx.declare_variables(rule_sem, config); + ctx.smt.push().unwrap(); + for (i, a) in assumptions.iter().enumerate() { + ctx.smt + .assert(ctx.smt.named(format!("dyn{i}"), *a)) + .unwrap(); + } + + resolve_dynamic_widths( + RuleCtx { + rule_sem, + rule, + termenv, + typeenv, + config, + }, + concrete, + &mut ctx, + unresolved_widths, + 0, + ) +} + +fn resolve_dynamic_widths( + rulectx: RuleCtx, + concrete: &Option, + ctx: &mut SolverCtx, + unresolved_widths: Vec, + attempt: usize, +) -> VerificationResult { + if attempt > 10 { + panic!("Unexpected number of attempts to resolve dynamic widths!") + } + match ctx.smt.check() { + Ok(Response::Sat) => { + let mut cur_tyctx = ctx.tyctx.clone(); + let mut width_resolutions = HashMap::new(); + for (e, t) in &ctx.tyctx.tyvars { + let ty = &ctx.tyctx.tymap[t]; + if let Type::BitVector(w) = ty { + let width_name = format!("width__{}", t); + let atom = ctx.smt.atom(&width_name); + let width = ctx.smt.get_value(vec![atom]).unwrap().first().unwrap().1; + let width_int = u8::try_from(ctx.smt.get(width)).unwrap(); + + // Check that we haven't contradicted previous widths + if let Some(before_width) = w { + assert_eq!(*before_width, width_int as usize) + }; + + // Check that the width is nonzero + if width_int == 0 { + panic!("Unexpected, zero width! {} {:?}", t, e); + } + + if unresolved_widths.contains(&width_name) { + log::debug!("\tResolved width: {}, {}", width_name, width_int); + width_resolutions.insert(width_name, width_int); + cur_tyctx + .tymap + .insert(*t, Type::BitVector(Some(width_int as usize))); + } + } + } + let static_result = run_solver_with_static_widths(&rulectx, &cur_tyctx, concrete); + + // If we have a failure or unknown, return right away + if !matches!(static_result, VerificationResult::Success) { + return static_result; + } + + // Otherwise, try again, but adding the assertion that some width is + // different than our current assigment + let not_equals = width_resolutions.iter().map(|(s, w)| { + ctx.smt.not( + ctx.smt + .eq(ctx.smt.atom(s.clone()), ctx.smt.atom((*w).to_string())), + ) + }); + ctx.smt.assert(ctx.smt.or_many(not_equals)).unwrap(); + + resolve_dynamic_widths(rulectx, concrete, ctx, unresolved_widths, attempt + 1) + } + Ok(Response::Unsat) => { + if attempt == 0 { + log::warn!( + "Rule not applicable as written for rule assumptions, skipping full query" + ); + let unsat = ctx.smt.get_unsat_core().unwrap(); + log::warn!("Unsat core:\n{}", ctx.smt.display(unsat)); + VerificationResult::InapplicableRule + } else { + // If this is not the first attempt, some previous width assignment must + // have succeeded. + VerificationResult::Success + } + } + Ok(Response::Unknown) => { + panic!("Solver said 'unk'"); + } + Err(err) => { + unreachable!("Error! {:?}", err); + } + } +} + +pub fn run_solver_with_static_widths( + rulectx: &RuleCtx, + tyctx: &TypeContext, + concrete: &Option, +) -> VerificationResult { + // Declare variables again, this time with all static widths + let mut solver = easy_smt::ContextBuilder::new() + .replay_file(Some(std::fs::File::create("static_widths.smt2").unwrap())) + .solver("z3", ["-smt2", "-in"]) + .build() + .unwrap(); + solver + .set_option(":produce-unsat-cores", solver.true_()) + .unwrap(); + let mut ctx = SolverCtx { + smt: solver, + find_widths: false, + tyctx: tyctx.clone(), + bitwidth: MAX_WIDTH, + var_map: HashMap::new(), + width_vars: HashMap::new(), + width_assumptions: vec![], + additional_decls: vec![], + additional_assumptions: vec![], + additional_assertions: vec![], + fresh_bits_idx: 0, + lhs_load_args: None, + rhs_load_args: None, + lhs_store_args: None, + rhs_store_args: None, + load_return: None, + lhs_flag: true, + }; + let (assumptions, mut assertions) = ctx.declare_variables(rulectx.rule_sem, rulectx.config); + + let lhs = ctx.vir_expr_to_sexp(rulectx.rule_sem.lhs.clone()); + ctx.lhs_flag = false; + let rhs = ctx.vir_expr_to_sexp(rulectx.rule_sem.rhs.clone()); + + // For debugging + let unnamed_rule = String::from(""); + let rulename = rulectx + .rule + .name + .map(|name| &rulectx.typeenv.syms[name.index()]) + .unwrap_or(&unnamed_rule); + let unit = "()".to_string(); + let widthname = ctx + .static_width(&rulectx.rule_sem.lhs) + .map_or(unit, |s| format!("width {}", s)); + + // Check whether the assumptions are possible + let feasibility = ctx.check_assumptions_feasibility( + &assumptions, + &rulectx.rule_sem.term_input_bvs, + rulectx.config, + ); + if feasibility != VerificationResult::Success { + log::warn!("Rule not applicable as written for rule assumptions, skipping full query"); + return feasibility; + } + + // Correctness query + // Verification condition: first rule's LHS and RHS are equal + if let Some(concrete) = concrete { + return test_concrete_with_static_widths( + rulectx, + concrete, + lhs, + rhs, + &mut ctx, + assumptions, + ); + } + + let condition = if let Some(condition) = &rulectx.config.custom_verification_condition { + let term_args = rulectx + .rule_sem + .term_args + .iter() + .map(|s| ctx.smt.atom(s)) + .collect(); + let custom_condition = condition(&ctx.smt, term_args, lhs, rhs); + log::debug!( + "Custom verification condition:\n\t{}\n", + ctx.smt.display(custom_condition) + ); + custom_condition + } else { + // Note: this is where we ask if the LHS and the RHS are equal + let side_equality = ctx.smt.eq(lhs, rhs); + log::debug!( + "LHS and RHS equality condition:{}", + ctx.smt.display(side_equality) + ); + side_equality + }; + + for a in &ctx.additional_assertions { + assertions.push(*a); + } + + let assumption_conjunction = ctx.smt.and_many(assumptions); + let mut full_condition = if !assertions.is_empty() { + let assertion_conjunction = ctx.smt.and_many(assertions.clone()); + ctx.smt.and(condition, assertion_conjunction) + } else { + condition + }; + + let mut load_conditions = vec![]; + match (&ctx.lhs_load_args, &ctx.rhs_load_args) { + (Some(_), Some(_)) => { + let lhs_args_vec = ctx.lhs_load_args.clone().unwrap(); + let rhs_args_vec = ctx.rhs_load_args.clone().unwrap(); + log::debug!("Load argument conditions:"); + for i in 0..lhs_args_vec.len() { + let arg_equal = ctx.smt.eq(lhs_args_vec[i], rhs_args_vec[i]); + load_conditions.push(arg_equal); + log::debug!("\t{}", ctx.smt.display(arg_equal)); + full_condition = ctx.smt.and(full_condition, arg_equal); + } + } + (None, None) => (), + (Some(_), None) => { + log::error!("Verification failed for {}, {}", rulename, widthname); + log::error!("Left hand side has load statement but right hand side does not."); + return VerificationResult::Failure(Counterexample {}); + } + (None, Some(_)) => { + log::error!("Verification failed for {}, {}", rulename, widthname); + log::error!("Right hand side has load statement but left hand side does not."); + return VerificationResult::Failure(Counterexample {}); + } + } + + let mut store_conditions = vec![]; + match (&ctx.lhs_store_args, &ctx.rhs_store_args) { + (Some(_), Some(_)) => { + let lhs_args_vec = ctx.lhs_store_args.clone().unwrap(); + let rhs_args_vec = ctx.rhs_store_args.clone().unwrap(); + log::debug!("Store argument conditions:"); + + for i in 0..lhs_args_vec.len() { + let arg_equal = ctx.smt.eq(lhs_args_vec[i], rhs_args_vec[i]); + store_conditions.push(arg_equal); + log::debug!("\t{}", ctx.smt.display(arg_equal)); + full_condition = ctx.smt.and(full_condition, arg_equal) + } + } + (None, None) => (), + (Some(_), None) => { + log::error!("Verification failed for {}, {}", rulename, widthname); + log::error!("Left hand side has store statement but right hand side does not."); + return VerificationResult::Failure(Counterexample {}); + } + (None, Some(_)) => { + log::error!("Verification failed for {}, {}", rulename, widthname); + log::error!("Right hand side has store statement but left hand side does not."); + return VerificationResult::Failure(Counterexample {}); + } + } + + log::trace!( + "Full verification condition:{}", + ctx.smt.display(full_condition) + ); + let query = ctx + .smt + .not(ctx.smt.imp(assumption_conjunction, full_condition)); + log::trace!("Running query"); + ctx.smt.assert(query).unwrap(); + + match ctx.smt.check() { + Ok(Response::Sat) => { + println!("Verification failed for {}, {}", rulename, widthname); + ctx.display_model(rulectx.termenv, rulectx.typeenv, rulectx.rule, lhs, rhs); + let vals = ctx.smt.get_value(vec![condition]).unwrap(); + for (variable, value) in vals { + if value == ctx.smt.false_() { + println!("Failed condition:\n{}", ctx.smt.display(variable)); + } else if value == ctx.smt.true_() { + println!("Condition met, but failed some assertion(s).") + } + } + + if !assertions.is_empty() { + let vals = ctx.smt.get_value(assertions).unwrap(); + for (variable, value) in vals { + if value == ctx.smt.false_() { + println!("Failed assertion:\n{}", ctx.smt.display(variable)); + } + } + } + + if !load_conditions.is_empty() { + let vals = ctx.smt.get_value(load_conditions).unwrap(); + for (variable, value) in vals { + if value == ctx.smt.false_() { + log::error!("Failed load condition:\n{}", ctx.smt.display(variable)); + } + } + } + VerificationResult::Failure(Counterexample {}) + } + Ok(Response::Unsat) => { + println!("Verification succeeded for {}, {}", rulename, widthname); + VerificationResult::Success + } + Ok(Response::Unknown) => { + panic!("Solver said 'unk'"); + } + Err(err) => { + unreachable!("Error! {:?}", err); + } + } +} + +pub fn test_concrete_with_static_widths( + rulectx: &RuleCtx, + concrete: &ConcreteTest, + lhs: SExpr, + rhs: SExpr, + ctx: &mut SolverCtx, + assumptions: Vec, +) -> VerificationResult { + // Test code only: test against concrete input/output + // Check that our expected output is valid + for (i, a) in assumptions.iter().enumerate() { + ctx.smt + .assert(ctx.smt.named(format!("conc{i}"), *a)) + .unwrap(); + } + for (i, e) in ctx.additional_assertions.iter().enumerate() { + ctx.smt + .assert(ctx.smt.named(format!("conc_assert{i}"), *e)) + .unwrap(); + } + ctx.smt.push().unwrap(); + let eq = ctx + .smt + .eq(rhs, ctx.smt.atom(concrete.output.literal.clone())); + + ctx.smt + .assert(ctx.smt.named("conceq".to_string(), eq)) + .unwrap(); + + for (i, a) in rulectx.rule_sem.rhs_assertions.iter().enumerate() { + let p = ctx.vir_expr_to_sexp(a.clone()); + ctx.smt + .assert(ctx.smt.named(format!("rhs_assert{i}"), p)) + .unwrap(); + } + + if !matches!(ctx.smt.check(), Ok(Response::Sat)) { + // Bad! This is a bug! + // Pop the output assertion + ctx.smt.pop().unwrap(); + // Try again + assert!(matches!(ctx.smt.check(), Ok(Response::Sat))); + // Get the value for what output is to panic with a useful message + let val = ctx.smt.get_value(vec![rhs]).unwrap()[0].1; + ctx.display_model(rulectx.termenv, rulectx.typeenv, rulectx.rule, lhs, rhs); + panic!( + "Expected {}, got {}", + concrete.output.literal, + ctx.display_hex_to_bin(val) + ); + } else { + log::debug!( + "Expected concrete result matched: {}", + concrete.output.literal + ); + ctx.smt.pop().unwrap(); + } + + // Check that there is no other possible output + ctx.smt.push().unwrap(); + ctx.smt + .assert( + ctx.smt.not( + ctx.smt + .eq(rhs, ctx.smt.atom(concrete.output.literal.clone())), + ), + ) + .unwrap(); + if !matches!(ctx.smt.check(), Ok(Response::Unsat)) { + // Get the value for what output is to panic with a useful message + let val = ctx.smt.get_value(vec![rhs]).unwrap()[0].1; + ctx.display_model(rulectx.termenv, rulectx.typeenv, rulectx.rule, lhs, rhs); + // AVH TODO: should probably elevate back to an error with custom verification condition + log::error!( + "WARNING: Expected ONLY {}, got POSSIBLE {}", + concrete.output.literal, + ctx.display_hex_to_bin(val) + ); + } + ctx.smt.pop().unwrap(); + VerificationResult::Success +} diff --git a/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/cls.rs b/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/cls.rs new file mode 100644 index 000000000000..8f38718e8ab9 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/cls.rs @@ -0,0 +1,3501 @@ +use crate::solver::SolverCtx; +use easy_smt::SExpr; + +// Future work: possibly move these into the annotation language or an SMTLIB prelude +// Adapted from https://stackoverflow.com/questions/23856596/how-to-count-leading-zeros-in-a-32-bit-unsigned-integer + +pub fn cls64(solver: &mut SolverCtx, x: SExpr, id: u32) -> SExpr { + // Generated code. + // total zeros counter + let zret0 = solver.declare( + format!("zret0_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.eq( + zret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + )); + // round 1 + let zret1 = solver.declare( + format!("zret1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let zy32 = solver.declare( + format!("zy32_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let zx32 = solver.declare( + format!("zx32_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.eq( + zy32, + solver.smt.bvlshr(x, solver.smt.atom("#x0000000000000020")), + )); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy32, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(zret1, zret0), + solver.smt.eq( + zret1, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv32"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy32, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(zx32, zy32), + solver.smt.eq(zx32, x), + ])); + // round 2 + let zret2 = solver.declare( + format!("zret2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let zy16 = solver.declare( + format!("zy16_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let zx16 = solver.declare( + format!("zx16_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume( + solver.smt.eq( + zy16, + solver + .smt + .bvlshr(zx32, solver.smt.atom("#x0000000000000010")), + ), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy16, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(zret2, zret1), + solver.smt.eq( + zret2, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv16"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy16, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(zx16, zy16), + solver.smt.eq(zx16, zx32), + ])); + // round 3 + let zret3 = solver.declare( + format!("zret3_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let zy8 = solver.declare( + format!("zy8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let zx8 = solver.declare( + format!("zx8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume( + solver.smt.eq( + zy8, + solver + .smt + .bvlshr(zx16, solver.smt.atom("#x0000000000000008")), + ), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(zret3, zret2), + solver.smt.eq( + zret3, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv8"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(zx8, zy8), + solver.smt.eq(zx8, zx16), + ])); + // round 4 + let zret4 = solver.declare( + format!("zret4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let zy4 = solver.declare( + format!("zy4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let zx4 = solver.declare( + format!("zx4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume( + solver.smt.eq( + zy4, + solver + .smt + .bvlshr(zx8, solver.smt.atom("#x0000000000000004")), + ), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(zret4, zret3), + solver.smt.eq( + zret4, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret3, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(zx4, zy4), + solver.smt.eq(zx4, zx8), + ])); + // round 5 + let zret5 = solver.declare( + format!("zret5_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let zy2 = solver.declare( + format!("zy2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let zx2 = solver.declare( + format!("zx2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume( + solver.smt.eq( + zy2, + solver + .smt + .bvlshr(zx4, solver.smt.atom("#x0000000000000002")), + ), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(zret5, zret4), + solver.smt.eq( + zret5, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv2"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(zx2, zy2), + solver.smt.eq(zx2, zx4), + ])); + // round 6 + let zret6 = solver.declare( + format!("zret6_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let zy1 = solver.declare( + format!("zy1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let zx1 = solver.declare( + format!("zx1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume( + solver.smt.eq( + zy1, + solver + .smt + .bvlshr(zx2, solver.smt.atom("#x0000000000000001")), + ), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(zret6, zret5), + solver.smt.eq( + zret6, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret5, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(zx1, zy1), + solver.smt.eq(zx1, zx2), + ])); + // last round + let zret7 = solver.declare( + format!("zret7_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zx1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(zret7, zret6), + solver.smt.eq( + zret7, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret6, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + let clzret = solver.declare( + format!("clzret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.eq( + zret7, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + solver.smt.eq(clzret, zret7), + solver.smt.eq( + clzret, + solver.smt.list(vec![ + solver.smt.atom("bvsub"), + zret7, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + // total zeros counter + let sret0 = solver.declare( + format!("sret0_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.eq( + sret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + )); + // round 1 + let sret1 = solver.declare( + format!("sret1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let sy32 = solver.declare( + format!("sy32_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let sx32 = solver.declare( + format!("sx32_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.eq( + sy32, + solver.smt.bvashr(x, solver.smt.atom("#x0000000000000020")), + )); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy32, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv18446744073709551615"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(sret1, sret0), + solver.smt.eq( + sret1, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv32"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy32, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv18446744073709551615"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(sx32, sy32), + solver.smt.eq(sx32, x), + ])); + // round 2 + let sret2 = solver.declare( + format!("sret2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let sy16 = solver.declare( + format!("sy16_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let sx16 = solver.declare( + format!("sx16_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume( + solver.smt.eq( + sy16, + solver + .smt + .bvashr(sx32, solver.smt.atom("#x0000000000000010")), + ), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy16, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv18446744073709551615"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(sret2, sret1), + solver.smt.eq( + sret2, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv16"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy16, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv18446744073709551615"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(sx16, sy16), + solver.smt.eq(sx16, sx32), + ])); + // round 3 + let sret3 = solver.declare( + format!("sret3_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let sy8 = solver.declare( + format!("sy8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let sx8 = solver.declare( + format!("sx8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume( + solver.smt.eq( + sy8, + solver + .smt + .bvashr(sx16, solver.smt.atom("#x0000000000000008")), + ), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv18446744073709551615"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(sret3, sret2), + solver.smt.eq( + sret3, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv8"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv18446744073709551615"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(sx8, sy8), + solver.smt.eq(sx8, sx16), + ])); + // round 4 + let sret4 = solver.declare( + format!("sret4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let sy4 = solver.declare( + format!("sy4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let sx4 = solver.declare( + format!("sx4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume( + solver.smt.eq( + sy4, + solver + .smt + .bvashr(sx8, solver.smt.atom("#x0000000000000004")), + ), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv18446744073709551615"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(sret4, sret3), + solver.smt.eq( + sret4, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret3, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv18446744073709551615"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(sx4, sy4), + solver.smt.eq(sx4, sx8), + ])); + // round 5 + let sret5 = solver.declare( + format!("sret5_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let sy2 = solver.declare( + format!("sy2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let sx2 = solver.declare( + format!("sx2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume( + solver.smt.eq( + sy2, + solver + .smt + .bvashr(sx4, solver.smt.atom("#x0000000000000002")), + ), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv18446744073709551615"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(sret5, sret4), + solver.smt.eq( + sret5, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv2"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv18446744073709551615"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(sx2, sy2), + solver.smt.eq(sx2, sx4), + ])); + // round 6 + let sret6 = solver.declare( + format!("sret6_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let sy1 = solver.declare( + format!("sy1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let sx1 = solver.declare( + format!("sx1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume( + solver.smt.eq( + sy1, + solver + .smt + .bvashr(sx2, solver.smt.atom("#x0000000000000001")), + ), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv18446744073709551615"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(sret6, sret5), + solver.smt.eq( + sret6, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret5, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv18446744073709551615"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(sx1, sy1), + solver.smt.eq(sx1, sx2), + ])); + // last round + let sret7 = solver.declare( + format!("sret7_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sx1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv18446744073709551615"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(sret7, sret6), + solver.smt.eq( + sret7, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret6, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + let clsret = solver.declare( + format!("clsret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.eq( + sret7, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + solver.smt.eq(clsret, sret7), + solver.smt.eq( + clsret, + solver.smt.list(vec![ + solver.smt.atom("bvsub"), + sret7, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + let cls64ret = solver.declare( + format!("cls64ret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("bvsle"), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + x, + ]), + solver.smt.eq(cls64ret, clzret), + solver.smt.eq(cls64ret, clsret), + ])); + + cls64ret +} + +pub fn cls32(solver: &mut SolverCtx, x: SExpr, id: u32) -> SExpr { + let x = solver.smt.extract(31, 0, x); + + // Generated code. + // total zeros counter + let zret0 = solver.declare( + format!("zret0_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume(solver.smt.eq( + zret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + )); + // round 1 + let zret2 = solver.declare( + format!("zret2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let zy16 = solver.declare( + format!("zy16_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let zx16 = solver.declare( + format!("zx16_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume( + solver + .smt + .eq(zy16, solver.smt.bvlshr(x, solver.smt.atom("#x00000010"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy16, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(zret2, zret0), + solver.smt.eq( + zret2, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv16"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy16, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(zx16, zy16), + solver.smt.eq(zx16, x), + ])); + // round 2 + let zret3 = solver.declare( + format!("zret3_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let zy8 = solver.declare( + format!("zy8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let zx8 = solver.declare( + format!("zx8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume( + solver + .smt + .eq(zy8, solver.smt.bvlshr(zx16, solver.smt.atom("#x00000008"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(zret3, zret2), + solver.smt.eq( + zret3, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv8"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(zx8, zy8), + solver.smt.eq(zx8, zx16), + ])); + // round 3 + let zret4 = solver.declare( + format!("zret4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let zy4 = solver.declare( + format!("zy4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let zx4 = solver.declare( + format!("zx4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume( + solver + .smt + .eq(zy4, solver.smt.bvlshr(zx8, solver.smt.atom("#x00000004"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(zret4, zret3), + solver.smt.eq( + zret4, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret3, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(zx4, zy4), + solver.smt.eq(zx4, zx8), + ])); + // round 4 + let zret5 = solver.declare( + format!("zret5_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let zy2 = solver.declare( + format!("zy2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let zx2 = solver.declare( + format!("zx2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume( + solver + .smt + .eq(zy2, solver.smt.bvlshr(zx4, solver.smt.atom("#x00000002"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(zret5, zret4), + solver.smt.eq( + zret5, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv2"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(zx2, zy2), + solver.smt.eq(zx2, zx4), + ])); + // round 5 + let zret6 = solver.declare( + format!("zret6_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let zy1 = solver.declare( + format!("zy1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let zx1 = solver.declare( + format!("zx1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume( + solver + .smt + .eq(zy1, solver.smt.bvlshr(zx2, solver.smt.atom("#x00000001"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(zret6, zret5), + solver.smt.eq( + zret6, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret5, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(zx1, zy1), + solver.smt.eq(zx1, zx2), + ])); + // last round + let zret7 = solver.declare( + format!("zret7_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zx1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(zret7, zret6), + solver.smt.eq( + zret7, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret6, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + let clzret = solver.declare( + format!("clzret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.eq( + zret7, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + solver.smt.eq(clzret, zret7), + solver.smt.eq( + clzret, + solver.smt.list(vec![ + solver.smt.atom("bvsub"), + zret7, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + // total zeros counter + let sret0 = solver.declare( + format!("sret0_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume(solver.smt.eq( + sret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + )); + // round 1 + let sret2 = solver.declare( + format!("sret2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let sy16 = solver.declare( + format!("sy16_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let sx16 = solver.declare( + format!("sx16_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume( + solver + .smt + .eq(sy16, solver.smt.bvashr(x, solver.smt.atom("#x00000010"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy16, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4294967295"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(sret2, sret0), + solver.smt.eq( + sret2, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv16"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy16, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4294967295"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(sx16, sy16), + solver.smt.eq(sx16, x), + ])); + // round 2 + let sret3 = solver.declare( + format!("sret3_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let sy8 = solver.declare( + format!("sy8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let sx8 = solver.declare( + format!("sx8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume( + solver + .smt + .eq(sy8, solver.smt.bvashr(sx16, solver.smt.atom("#x00000008"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4294967295"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(sret3, sret2), + solver.smt.eq( + sret3, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv8"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4294967295"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(sx8, sy8), + solver.smt.eq(sx8, sx16), + ])); + // round 3 + let sret4 = solver.declare( + format!("sret4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let sy4 = solver.declare( + format!("sy4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let sx4 = solver.declare( + format!("sx4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume( + solver + .smt + .eq(sy4, solver.smt.bvashr(sx8, solver.smt.atom("#x00000004"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4294967295"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(sret4, sret3), + solver.smt.eq( + sret4, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret3, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4294967295"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(sx4, sy4), + solver.smt.eq(sx4, sx8), + ])); + // round 4 + let sret5 = solver.declare( + format!("sret5_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let sy2 = solver.declare( + format!("sy2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let sx2 = solver.declare( + format!("sx2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume( + solver + .smt + .eq(sy2, solver.smt.bvashr(sx4, solver.smt.atom("#x00000002"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4294967295"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(sret5, sret4), + solver.smt.eq( + sret5, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv2"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4294967295"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(sx2, sy2), + solver.smt.eq(sx2, sx4), + ])); + // round 5 + let sret6 = solver.declare( + format!("sret6_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let sy1 = solver.declare( + format!("sy1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let sx1 = solver.declare( + format!("sx1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume( + solver + .smt + .eq(sy1, solver.smt.bvashr(sx2, solver.smt.atom("#x00000001"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4294967295"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(sret6, sret5), + solver.smt.eq( + sret6, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret5, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4294967295"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(sx1, sy1), + solver.smt.eq(sx1, sx2), + ])); + // last round + let sret7 = solver.declare( + format!("sret7_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sx1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4294967295"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(sret7, sret6), + solver.smt.eq( + sret7, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret6, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + let clsret = solver.declare( + format!("clsret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.eq( + sret7, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + solver.smt.eq(clsret, sret7), + solver.smt.eq( + clsret, + solver.smt.list(vec![ + solver.smt.atom("bvsub"), + sret7, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + let cls32ret = solver.declare( + format!("cls32ret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("bvsle"), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + x, + ]), + solver.smt.eq(cls32ret, clzret), + solver.smt.eq(cls32ret, clsret), + ])); + + if solver.find_widths { + let padding = solver.new_fresh_bits(solver.bitwidth - 32); + solver.smt.concat(padding, cls32ret) + } else { + cls32ret + } +} + +pub fn cls16(solver: &mut SolverCtx, x: SExpr, id: u32) -> SExpr { + let x = solver.smt.extract(15, 0, x); + + // Generated code. + // total zeros counter + let zret0 = solver.declare( + format!("zret0_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume(solver.smt.eq( + zret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + )); + // round 1 + let zret3 = solver.declare( + format!("zret3_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let zy8 = solver.declare( + format!("zy8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let zx8 = solver.declare( + format!("zx8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume( + solver + .smt + .eq(zy8, solver.smt.bvlshr(x, solver.smt.atom("#x0008"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(zret3, zret0), + solver.smt.eq( + zret3, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv8"), + solver.smt.numeral(16), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(zx8, zy8), + solver.smt.eq(zx8, x), + ])); + // round 2 + let zret4 = solver.declare( + format!("zret4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let zy4 = solver.declare( + format!("zy4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let zx4 = solver.declare( + format!("zx4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume( + solver + .smt + .eq(zy4, solver.smt.bvlshr(zx8, solver.smt.atom("#x0004"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(zret4, zret3), + solver.smt.eq( + zret4, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret3, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4"), + solver.smt.numeral(16), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(zx4, zy4), + solver.smt.eq(zx4, zx8), + ])); + // round 3 + let zret5 = solver.declare( + format!("zret5_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let zy2 = solver.declare( + format!("zy2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let zx2 = solver.declare( + format!("zx2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume( + solver + .smt + .eq(zy2, solver.smt.bvlshr(zx4, solver.smt.atom("#x0002"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(zret5, zret4), + solver.smt.eq( + zret5, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv2"), + solver.smt.numeral(16), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(zx2, zy2), + solver.smt.eq(zx2, zx4), + ])); + // round 4 + let zret6 = solver.declare( + format!("zret6_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let zy1 = solver.declare( + format!("zy1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let zx1 = solver.declare( + format!("zx1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume( + solver + .smt + .eq(zy1, solver.smt.bvlshr(zx2, solver.smt.atom("#x0001"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(zret6, zret5), + solver.smt.eq( + zret6, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret5, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(16), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(zx1, zy1), + solver.smt.eq(zx1, zx2), + ])); + // last round + let zret7 = solver.declare( + format!("zret7_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zx1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(zret7, zret6), + solver.smt.eq( + zret7, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret6, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(16), + ]), + ]), + ), + ])); + let clzret = solver.declare( + format!("clzret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.eq( + zret7, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + solver.smt.eq(clzret, zret7), + solver.smt.eq( + clzret, + solver.smt.list(vec![ + solver.smt.atom("bvsub"), + zret7, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(16), + ]), + ]), + ), + ])); + // total zeros counter + let sret0 = solver.declare( + format!("sret0_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume(solver.smt.eq( + sret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + )); + // round 1 + let sret3 = solver.declare( + format!("sret3_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let sy8 = solver.declare( + format!("sy8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let sx8 = solver.declare( + format!("sx8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume( + solver + .smt + .eq(sy8, solver.smt.bvashr(x, solver.smt.atom("#x0008"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv65535"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(sret3, sret0), + solver.smt.eq( + sret3, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv8"), + solver.smt.numeral(16), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv65535"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(sx8, sy8), + solver.smt.eq(sx8, x), + ])); + // round 2 + let sret4 = solver.declare( + format!("sret4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let sy4 = solver.declare( + format!("sy4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let sx4 = solver.declare( + format!("sx4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume( + solver + .smt + .eq(sy4, solver.smt.bvashr(sx8, solver.smt.atom("#x0004"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv65535"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(sret4, sret3), + solver.smt.eq( + sret4, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret3, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4"), + solver.smt.numeral(16), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv65535"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(sx4, sy4), + solver.smt.eq(sx4, sx8), + ])); + // round 3 + let sret5 = solver.declare( + format!("sret5_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let sy2 = solver.declare( + format!("sy2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let sx2 = solver.declare( + format!("sx2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume( + solver + .smt + .eq(sy2, solver.smt.bvashr(sx4, solver.smt.atom("#x0002"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv65535"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(sret5, sret4), + solver.smt.eq( + sret5, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv2"), + solver.smt.numeral(16), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv65535"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(sx2, sy2), + solver.smt.eq(sx2, sx4), + ])); + // round 4 + let sret6 = solver.declare( + format!("sret6_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let sy1 = solver.declare( + format!("sy1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let sx1 = solver.declare( + format!("sx1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume( + solver + .smt + .eq(sy1, solver.smt.bvashr(sx2, solver.smt.atom("#x0001"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv65535"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(sret6, sret5), + solver.smt.eq( + sret6, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret5, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(16), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv65535"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(sx1, sy1), + solver.smt.eq(sx1, sx2), + ])); + // last round + let sret7 = solver.declare( + format!("sret7_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sx1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv65535"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(sret7, sret6), + solver.smt.eq( + sret7, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret6, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(16), + ]), + ]), + ), + ])); + let clsret = solver.declare( + format!("clsret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.eq( + sret7, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + solver.smt.eq(clsret, sret7), + solver.smt.eq( + clsret, + solver.smt.list(vec![ + solver.smt.atom("bvsub"), + sret7, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(16), + ]), + ]), + ), + ])); + let cls16ret = solver.declare( + format!("cls16ret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("bvsle"), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + x, + ]), + solver.smt.eq(cls16ret, clzret), + solver.smt.eq(cls16ret, clsret), + ])); + + if solver.find_widths { + let padding = solver.new_fresh_bits(solver.bitwidth - 16); + solver.smt.concat(padding, cls16ret) + } else { + cls16ret + } +} + +pub fn cls8(solver: &mut SolverCtx, x: SExpr, id: u32) -> SExpr { + let x = solver.smt.extract(7, 0, x); + + // Generated code. + // total zeros counter + let zret0 = solver.declare( + format!("zret0_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume(solver.smt.eq( + zret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + )); + // round 1 + let zret4 = solver.declare( + format!("zret4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let zy4 = solver.declare( + format!("zy4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let zx4 = solver.declare( + format!("zx4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume( + solver + .smt + .eq(zy4, solver.smt.bvlshr(x, solver.smt.atom("#x04"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(zret4, zret0), + solver.smt.eq( + zret4, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4"), + solver.smt.numeral(8), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(zx4, zy4), + solver.smt.eq(zx4, x), + ])); + // round 2 + let zret5 = solver.declare( + format!("zret5_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let zy2 = solver.declare( + format!("zy2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let zx2 = solver.declare( + format!("zx2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume( + solver + .smt + .eq(zy2, solver.smt.bvlshr(zx4, solver.smt.atom("#x02"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(zret5, zret4), + solver.smt.eq( + zret5, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv2"), + solver.smt.numeral(8), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(zx2, zy2), + solver.smt.eq(zx2, zx4), + ])); + // round 3 + let zret6 = solver.declare( + format!("zret6_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let zy1 = solver.declare( + format!("zy1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let zx1 = solver.declare( + format!("zx1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume( + solver + .smt + .eq(zy1, solver.smt.bvlshr(zx2, solver.smt.atom("#x01"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(zret6, zret5), + solver.smt.eq( + zret6, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret5, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(8), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zy1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(zx1, zy1), + solver.smt.eq(zx1, zx2), + ])); + // last round + let zret7 = solver.declare( + format!("zret7_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + zx1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(zret7, zret6), + solver.smt.eq( + zret7, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + zret6, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(8), + ]), + ]), + ), + ])); + let clzret = solver.declare( + format!("clzret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.eq( + zret7, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + ), + solver.smt.eq(clzret, zret7), + solver.smt.eq( + clzret, + solver.smt.list(vec![ + solver.smt.atom("bvsub"), + zret7, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(8), + ]), + ]), + ), + ])); + // total zeros counter + let sret0 = solver.declare( + format!("sret0_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume(solver.smt.eq( + sret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + )); + // round 1 + let sret4 = solver.declare( + format!("sret4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let sy4 = solver.declare( + format!("sy4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let sx4 = solver.declare( + format!("sx4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume( + solver + .smt + .eq(sy4, solver.smt.bvashr(x, solver.smt.atom("#x04"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv255"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(sret4, sret0), + solver.smt.eq( + sret4, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4"), + solver.smt.numeral(8), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv255"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(sx4, sy4), + solver.smt.eq(sx4, x), + ])); + // round 2 + let sret5 = solver.declare( + format!("sret5_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let sy2 = solver.declare( + format!("sy2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let sx2 = solver.declare( + format!("sx2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume( + solver + .smt + .eq(sy2, solver.smt.bvashr(sx4, solver.smt.atom("#x02"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv255"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(sret5, sret4), + solver.smt.eq( + sret5, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv2"), + solver.smt.numeral(8), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv255"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(sx2, sy2), + solver.smt.eq(sx2, sx4), + ])); + // round 3 + let sret6 = solver.declare( + format!("sret6_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let sy1 = solver.declare( + format!("sy1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let sx1 = solver.declare( + format!("sx1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume( + solver + .smt + .eq(sy1, solver.smt.bvashr(sx2, solver.smt.atom("#x01"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv255"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(sret6, sret5), + solver.smt.eq( + sret6, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret5, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(8), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sy1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv255"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(sx1, sy1), + solver.smt.eq(sx1, sx2), + ])); + // last round + let sret7 = solver.declare( + format!("sret7_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + sx1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv255"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(sret7, sret6), + solver.smt.eq( + sret7, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + sret6, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(8), + ]), + ]), + ), + ])); + let clsret = solver.declare( + format!("clsret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.eq( + sret7, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + ), + solver.smt.eq(clsret, sret7), + solver.smt.eq( + clsret, + solver.smt.list(vec![ + solver.smt.atom("bvsub"), + sret7, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(8), + ]), + ]), + ), + ])); + let cls8ret = solver.declare( + format!("cls8ret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("bvsle"), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + x, + ]), + solver.smt.eq(cls8ret, clzret), + solver.smt.eq(cls8ret, clsret), + ])); + + if solver.find_widths { + let padding = solver.new_fresh_bits(solver.bitwidth - 8); + solver.smt.concat(padding, cls8ret) + } else { + cls8ret + } +} + +pub fn cls1(solver: &mut SolverCtx, id: u32) -> SExpr { + // Generated code. + let cls1ret = solver.declare( + format!("cls1ret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(1), + ]), + ); + solver.assume(solver.smt.eq( + cls1ret, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(1), + ]), + )); + + if solver.find_widths { + let padding = solver.new_fresh_bits(solver.bitwidth - 1); + solver.smt.concat(padding, cls1ret) + } else { + cls1ret + } +} diff --git a/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/clz.rs b/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/clz.rs new file mode 100644 index 000000000000..0e7aaa9607db --- /dev/null +++ b/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/clz.rs @@ -0,0 +1,1607 @@ +use crate::solver::SolverCtx; +use easy_smt::SExpr; + +// Future work: possibly move these into the annotation language or an SMTLIB prelude +// Adapted from https://stackoverflow.com/questions/23856596/how-to-count-leading-zeros-in-a-32-bit-unsigned-integer + +pub fn clz64(solver: &mut SolverCtx, x: SExpr, id: u32) -> SExpr { + // Generated code. + // total zeros counter + let ret0 = solver.declare( + format!("ret0_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.eq( + ret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + )); + // round 1 + let ret1 = solver.declare( + format!("ret1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let y32 = solver.declare( + format!("y32_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let x32 = solver.declare( + format!("x32_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.eq( + y32, + solver.smt.bvlshr(x, solver.smt.atom("#x0000000000000020")), + )); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y32, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(ret1, ret0), + solver.smt.eq( + ret1, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv32"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y32, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(x32, y32), + solver.smt.eq(x32, x), + ])); + // round 2 + let ret2 = solver.declare( + format!("ret2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let y16 = solver.declare( + format!("y16_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let x16 = solver.declare( + format!("x16_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume( + solver.smt.eq( + y16, + solver + .smt + .bvlshr(x32, solver.smt.atom("#x0000000000000010")), + ), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y16, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(ret2, ret1), + solver.smt.eq( + ret2, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv16"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y16, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(x16, y16), + solver.smt.eq(x16, x32), + ])); + // round 3 + let ret3 = solver.declare( + format!("ret3_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let y8 = solver.declare( + format!("y8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let x8 = solver.declare( + format!("x8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume( + solver.smt.eq( + y8, + solver + .smt + .bvlshr(x16, solver.smt.atom("#x0000000000000008")), + ), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(ret3, ret2), + solver.smt.eq( + ret3, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv8"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(x8, y8), + solver.smt.eq(x8, x16), + ])); + // round 4 + let ret4 = solver.declare( + format!("ret4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let y4 = solver.declare( + format!("y4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let x4 = solver.declare( + format!("x4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.eq( + y4, + solver.smt.bvlshr(x8, solver.smt.atom("#x0000000000000004")), + )); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(ret4, ret3), + solver.smt.eq( + ret4, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret3, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(x4, y4), + solver.smt.eq(x4, x8), + ])); + // round 5 + let ret5 = solver.declare( + format!("ret5_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let y2 = solver.declare( + format!("y2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let x2 = solver.declare( + format!("x2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.eq( + y2, + solver.smt.bvlshr(x4, solver.smt.atom("#x0000000000000002")), + )); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(ret5, ret4), + solver.smt.eq( + ret5, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv2"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(x2, y2), + solver.smt.eq(x2, x4), + ])); + // round 6 + let ret6 = solver.declare( + format!("ret6_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let y1 = solver.declare( + format!("y1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + let x1 = solver.declare( + format!("x1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.eq( + y1, + solver.smt.bvlshr(x2, solver.smt.atom("#x0000000000000001")), + )); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(ret6, ret5), + solver.smt.eq( + ret6, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret5, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(x1, y1), + solver.smt.eq(x1, x2), + ])); + + // last round + let ret7 = solver.declare( + format!("ret7_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + x1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(64), + ]), + ), + ]), + solver.smt.eq(ret7, ret6), + solver.smt.eq( + ret7, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret6, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(64), + ]), + ]), + ), + ])); + + ret7 +} + +pub fn clz32(solver: &mut SolverCtx, x: SExpr, id: u32) -> SExpr { + let x = solver.smt.extract(31, 0, x); + + // Generated code. + // total zeros counter + let ret0 = solver.declare( + format!("ret0_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume(solver.smt.eq( + ret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + )); + // round 1 + let ret1 = solver.declare( + format!("ret1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let y16 = solver.declare( + format!("y16_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let x16 = solver.declare( + format!("x16_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume( + solver + .smt + .eq(y16, solver.smt.bvlshr(x, solver.smt.atom("#x00000010"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y16, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(ret1, ret0), + solver.smt.eq( + ret1, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv16"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y16, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(x16, y16), + solver.smt.eq(x16, x), + ])); + // round 2 + let ret2 = solver.declare( + format!("ret2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let y8 = solver.declare( + format!("y8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let x8 = solver.declare( + format!("x8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume( + solver + .smt + .eq(y8, solver.smt.bvlshr(x16, solver.smt.atom("#x00000008"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(ret2, ret1), + solver.smt.eq( + ret2, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv8"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(x8, y8), + solver.smt.eq(x8, x16), + ])); + // round 3 + let ret3 = solver.declare( + format!("ret3_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let y4 = solver.declare( + format!("y4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let x4 = solver.declare( + format!("x4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume( + solver + .smt + .eq(y4, solver.smt.bvlshr(x8, solver.smt.atom("#x00000004"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(ret3, ret2), + solver.smt.eq( + ret3, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(x4, y4), + solver.smt.eq(x4, x8), + ])); + // round 4 + let ret4 = solver.declare( + format!("ret4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let y2 = solver.declare( + format!("y2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let x2 = solver.declare( + format!("x2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume( + solver + .smt + .eq(y2, solver.smt.bvlshr(x4, solver.smt.atom("#x00000002"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(ret4, ret3), + solver.smt.eq( + ret4, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret3, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv2"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(x2, y2), + solver.smt.eq(x2, x4), + ])); + // round 5 + let ret5 = solver.declare( + format!("ret5_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let y1 = solver.declare( + format!("y1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + let x1 = solver.declare( + format!("x1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume( + solver + .smt + .eq(y1, solver.smt.bvlshr(x2, solver.smt.atom("#x00000001"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(ret5, ret4), + solver.smt.eq( + ret5, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(x1, y1), + solver.smt.eq(x1, x2), + ])); + + // last round + let ret6 = solver.declare( + format!("ret6_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + x1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(32), + ]), + ), + ]), + solver.smt.eq(ret6, ret5), + solver.smt.eq( + ret6, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret5, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(32), + ]), + ]), + ), + ])); + + if solver.find_widths { + let padding = solver.new_fresh_bits(solver.bitwidth - 32); + solver.smt.concat(padding, ret6) + } else { + ret6 + } +} + +pub fn clz16(solver: &mut SolverCtx, x: SExpr, id: u32) -> SExpr { + let x = solver.smt.extract(15, 0, x); + + // Generated code. + // total zeros counter + let ret1 = solver.declare( + format!("ret1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume(solver.smt.eq( + ret1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + )); + // round 1 + let ret2 = solver.declare( + format!("ret2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let y8 = solver.declare( + format!("y8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let x8 = solver.declare( + format!("x8_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume( + solver + .smt + .eq(y8, solver.smt.bvlshr(x, solver.smt.atom("#x0008"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(ret2, ret1), + solver.smt.eq( + ret2, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv8"), + solver.smt.numeral(16), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y8, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(x8, y8), + solver.smt.eq(x8, x), + ])); + // round 2 + let ret3 = solver.declare( + format!("ret3_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let y4 = solver.declare( + format!("y4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let x4 = solver.declare( + format!("x4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume( + solver + .smt + .eq(y4, solver.smt.bvlshr(x8, solver.smt.atom("#x0004"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(ret3, ret2), + solver.smt.eq( + ret3, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4"), + solver.smt.numeral(16), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(x4, y4), + solver.smt.eq(x4, x8), + ])); + // round 3 + let ret4 = solver.declare( + format!("ret4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let y2 = solver.declare( + format!("y2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let x2 = solver.declare( + format!("x2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume( + solver + .smt + .eq(y2, solver.smt.bvlshr(x4, solver.smt.atom("#x0002"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(ret4, ret3), + solver.smt.eq( + ret4, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret3, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv2"), + solver.smt.numeral(16), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(x2, y2), + solver.smt.eq(x2, x4), + ])); + // round 4 + let ret5 = solver.declare( + format!("ret5_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let y1 = solver.declare( + format!("y1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + let x1 = solver.declare( + format!("x1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume( + solver + .smt + .eq(y1, solver.smt.bvlshr(x2, solver.smt.atom("#x0001"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(ret5, ret4), + solver.smt.eq( + ret5, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(16), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(x1, y1), + solver.smt.eq(x1, x2), + ])); + + // last round + let ret6 = solver.declare( + format!("ret6_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + x1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(16), + ]), + ), + ]), + solver.smt.eq(ret6, ret5), + solver.smt.eq( + ret6, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret5, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(16), + ]), + ]), + ), + ])); + + if solver.find_widths { + let padding = solver.new_fresh_bits(solver.bitwidth - 16); + solver.smt.concat(padding, ret6) + } else { + ret6 + } +} + +pub fn clz8(solver: &mut SolverCtx, x: SExpr, id: u32) -> SExpr { + let x = solver.smt.extract(7, 0, x); + + // Generated code. + // total zeros counter + let ret0 = solver.declare( + format!("ret0_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume(solver.smt.eq( + ret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + )); + // round 1 + let ret3 = solver.declare( + format!("ret3_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let y4 = solver.declare( + format!("y4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let x4 = solver.declare( + format!("x4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume( + solver + .smt + .eq(y4, solver.smt.bvlshr(x, solver.smt.atom("#x04"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(ret3, ret0), + solver.smt.eq( + ret3, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret0, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv4"), + solver.smt.numeral(8), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(x4, y4), + solver.smt.eq(x4, x), + ])); + // round 2 + let ret4 = solver.declare( + format!("ret4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let y2 = solver.declare( + format!("y2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let x2 = solver.declare( + format!("x2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume( + solver + .smt + .eq(y2, solver.smt.bvlshr(x4, solver.smt.atom("#x02"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(ret4, ret3), + solver.smt.eq( + ret4, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret3, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv2"), + solver.smt.numeral(8), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y2, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(x2, y2), + solver.smt.eq(x2, x4), + ])); + // round 3 + let ret5 = solver.declare( + format!("ret5_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let y1 = solver.declare( + format!("y1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + let x1 = solver.declare( + format!("x1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume( + solver + .smt + .eq(y1, solver.smt.bvlshr(x2, solver.smt.atom("#x01"))), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(ret5, ret4), + solver.smt.eq( + ret5, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret4, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(8), + ]), + ]), + ), + ])); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + y1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(x1, y1), + solver.smt.eq(x1, x2), + ])); + // last round + let ret6 = solver.declare( + format!("ret6_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume(solver.smt.list(vec![ + solver.smt.atom("ite"), + solver.smt.list(vec![ + solver.smt.atom("not"), + solver.smt.eq( + x1, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv0"), + solver.smt.numeral(8), + ]), + ), + ]), + solver.smt.eq(ret6, ret5), + solver.smt.eq( + ret6, + solver.smt.list(vec![ + solver.smt.atom("bvadd"), + ret5, + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("bv1"), + solver.smt.numeral(8), + ]), + ]), + ), + ])); + + if solver.find_widths { + let padding = solver.new_fresh_bits(solver.bitwidth - 8); + solver.smt.concat(padding, ret6) + } else { + ret6 + } +} + +pub fn clz1(solver: &mut SolverCtx, x: SExpr, id: u32) -> SExpr { + let x = solver.smt.extract(0, 0, x); + + // Generated code. + let clz1ret = solver.declare( + format!("clz1ret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(1), + ]), + ); + solver.assume( + solver + .smt + .eq(clz1ret, solver.smt.list(vec![solver.smt.atom("bvnot"), x])), + ); + + if solver.find_widths { + let padding = solver.new_fresh_bits(solver.bitwidth - 1); + solver.smt.concat(padding, clz1ret) + } else { + clz1ret + } +} diff --git a/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/mod.rs b/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/mod.rs new file mode 100644 index 000000000000..44c4a9cbd445 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/mod.rs @@ -0,0 +1,444 @@ +pub mod cls; +pub mod clz; +pub mod popcnt; +pub mod rev; +pub mod subs; + +#[cfg(test)] +mod tests { + use super::*; + use crate::solver::SolverCtx; + use easy_smt::{Response, SExpr}; + use std::collections::HashMap; + use veri_ir::TypeContext; + + fn get_ctx() -> SolverCtx { + let smt = easy_smt::ContextBuilder::new() + .replay_file(Some(std::fs::File::create("encoding_tests.smt2").unwrap())) + .solver("z3", ["-smt2", "-in"]) + .build() + .unwrap(); + SolverCtx { + smt, + find_widths: false, + tyctx: TypeContext { + tyvars: HashMap::new(), + tymap: HashMap::new(), + tyvals: HashMap::new(), + bv_unknown_width_sets: HashMap::new(), + }, + bitwidth: 64, + var_map: HashMap::new(), + width_vars: HashMap::new(), + width_assumptions: vec![], + additional_decls: vec![], + additional_assumptions: vec![], + additional_assertions: vec![], + fresh_bits_idx: 0, + lhs_load_args: None, + rhs_load_args: None, + lhs_store_args: None, + rhs_store_args: None, + lhs_flag: true, + load_return: None, + } + } + + /// Check that the solver encoding meets expectations for the given input and output. + /// Right now, only works for encodings with a single argument that return a value with + /// the same width as the input. + /// Check that the output is equal to the expected output, and no other output is possible. + fn check_unary_encoding_with_solver(encoding: &str, input: &str, output: &str, width: usize) { + let mut ctx = get_ctx(); + + // Set up an input variable + let ty = ctx.smt.bit_vec_sort(ctx.smt.numeral(width)); + let input_var = ctx.declare("input".to_string(), ty); + + // Set the input equal to our expected input + ctx.additional_assumptions + .push(ctx.smt.eq(input_var, ctx.smt.atom(input))); + + // Call the encoding function to be tested + let output_from_call = match (encoding, width) { + ("rev", 8) => rev::rev8(&mut ctx, input_var, 0), + ("rev", 16) => rev::rev16(&mut ctx, input_var, 0), + ("rev", 32) => rev::rev32(&mut ctx, input_var, 0), + ("rev", 64) => rev::rev64(&mut ctx, input_var, 0), + + ("clz", 8) => clz::clz8(&mut ctx, input_var, 0), + ("clz", 16) => clz::clz16(&mut ctx, input_var, 0), + ("clz", 32) => clz::clz32(&mut ctx, input_var, 0), + ("clz", 64) => clz::clz64(&mut ctx, input_var, 0), + + ("cls", 8) => cls::cls8(&mut ctx, input_var, 0), + ("cls", 16) => cls::cls16(&mut ctx, input_var, 0), + ("cls", 32) => cls::cls32(&mut ctx, input_var, 0), + ("cls", 64) => cls::cls64(&mut ctx, input_var, 0), + + ("popcnt", ty) => popcnt::popcnt(&mut ctx, ty, input_var, 0), + _ => unreachable!(), + }; + check_encoding_with_solver(ctx, output_from_call, output, width) + } + + fn check_encoding_with_solver(mut ctx: SolverCtx, call: SExpr, output: &str, width: usize) { + // Extract the width of bits that we care about. + let output_care_bits = ctx.smt.extract((width - 1).try_into().unwrap(), 0, call); + ctx.smt.display(output_care_bits).to_string(); + + // Bookkeeping: declare declarations, assert assumptions + for (name, ty) in &ctx.additional_decls { + ctx.smt.declare_const(name, *ty).unwrap(); + } + if ctx.additional_assumptions.len() > 1 { + ctx.smt + .assert(ctx.smt.and_many(ctx.additional_assumptions.clone())) + .unwrap(); + } + + // Check that our expected output is valid + ctx.smt.push().unwrap(); + ctx.smt + .assert(ctx.smt.eq(output_care_bits, ctx.smt.atom(output))) + .unwrap(); + if !matches!(ctx.smt.check(), Ok(Response::Sat)) { + // Bad! This is a bug! + // Pop the output assertion + ctx.smt.pop().unwrap(); + // Try again + assert!(matches!(ctx.smt.check(), Ok(Response::Sat))); + + let model = ctx.smt.get_model().unwrap(); + println!("{}", ctx.smt.display(model)); + + // Get the value for what output is to panic with a useful message + let val = ctx.smt.get_value(vec![output_care_bits]).unwrap()[0].1; + + panic!("Expected {}, got {}", output, ctx.display_hex_to_bin(val)); + } else { + ctx.smt.pop().unwrap(); + } + + // Check that there is no other possible output + ctx.smt.push().unwrap(); + ctx.smt + .assert( + ctx.smt + .not(ctx.smt.eq(output_care_bits, ctx.smt.atom(output))), + ) + .unwrap(); + if !matches!(ctx.smt.check(), Ok(Response::Unsat)) { + let model = ctx.smt.get_model().unwrap(); + println!("{}", ctx.smt.display(model)); + + // Get the value for what output is to panic with a useful message + let val = ctx.smt.get_value(vec![output_care_bits]).unwrap()[0].1; + panic!( + "Multiple possible outputs! Expected only {}, got {}", + output, + ctx.display_hex_to_bin(val) + ); + } + ctx.smt.pop().unwrap(); + } + + fn check(ctx: &SolverCtx, expr: SExpr, expected: &str) { + let expr_s = format!("{}", ctx.smt.display(expr)); + assert_eq!(expr_s, expected); + } + + #[test] + fn rev1_test() { + let mut ctx = get_ctx(); + + let x = ctx.smt.atom("x"); + let res = rev::rev1(&mut ctx, x, 42); + + check(&ctx, res, "(concat fresh0 rev1ret_42)"); + check(&ctx, ctx.additional_decls[0].1, "(_ BitVec 1)"); + check( + &ctx, + ctx.additional_assumptions[0], + "(= rev1ret_42 ((_ extract 0 0) x))", + ); + } + + #[test] + fn test_rev8_with_solver() { + check_unary_encoding_with_solver("rev", "#b01010101", "#b10101010", 8); + check_unary_encoding_with_solver("rev", "#b11110000", "#b00001111", 8); + check_unary_encoding_with_solver("rev", "#b00000000", "#b00000000", 8); + check_unary_encoding_with_solver("rev", "#b11111111", "#b11111111", 8); + } + + #[test] + fn test_rev16_with_solver() { + check_unary_encoding_with_solver("rev", "#b0101010101010101", "#b1010101010101010", 16); + check_unary_encoding_with_solver("rev", "#b1111111100000000", "#b0000000011111111", 16); + check_unary_encoding_with_solver("rev", "#b0000000000000000", "#b0000000000000000", 16); + check_unary_encoding_with_solver("rev", "#b1111111111111111", "#b1111111111111111", 16); + } + + #[test] + fn test_rev32_with_solver() { + check_unary_encoding_with_solver( + "rev", + "#b01010101010101010101010101010101", + "#b10101010101010101010101010101010", + 32, + ); + check_unary_encoding_with_solver( + "rev", + "#b11111111111111110000000000000000", + "#b00000000000000001111111111111111", + 32, + ); + check_unary_encoding_with_solver( + "rev", + "#b00000000000000000000000000000000", + "#b00000000000000000000000000000000", + 32, + ); + check_unary_encoding_with_solver( + "rev", + "#b11111111111111111111111111111111", + "#b11111111111111111111111111111111", + 32, + ); + } + + #[test] + fn test_rev64_with_solver() { + check_unary_encoding_with_solver( + "rev", + "#b0101010101010101010101010101010101010101010101010101010101010101", + "#b1010101010101010101010101010101010101010101010101010101010101010", + 64, + ); + check_unary_encoding_with_solver( + "rev", + "#b1111111111111111111111111111111100000000000000000000000000000000", + "#b0000000000000000000000000000000011111111111111111111111111111111", + 64, + ); + check_unary_encoding_with_solver( + "rev", + "#b0000000000000000000000000000000000000000000000000000000000000000", + "#b0000000000000000000000000000000000000000000000000000000000000000", + 64, + ); + check_unary_encoding_with_solver( + "rev", + "#b1111111111111111111111111111111111111111111111111111111111111111", + "#b1111111111111111111111111111111111111111111111111111111111111111", + 64, + ); + } + + #[test] + fn test_clz8_with_solver() { + check_unary_encoding_with_solver("clz", "#b00000000", "#b00001000", 8); + check_unary_encoding_with_solver("clz", "#b01111111", "#b00000001", 8); + check_unary_encoding_with_solver("clz", "#b11111111", "#b00000000", 8); + } + + #[test] + fn test_clz16_with_solver() { + check_unary_encoding_with_solver("clz", "#b0000000000000000", "#b0000000000010000", 16); + check_unary_encoding_with_solver("clz", "#b0000000000000001", "#b0000000000001111", 16); + check_unary_encoding_with_solver("clz", "#b0111111111111111", "#b0000000000000001", 16); + check_unary_encoding_with_solver("clz", "#b1111111111111111", "#b0000000000000000", 16); + } + + #[test] + fn test_clz32_with_solver() { + check_unary_encoding_with_solver( + "clz", + "#b00000000000000000000000000000000", + "#b00000000000000000000000000100000", + 32, + ); + check_unary_encoding_with_solver( + "clz", + "#b00000000000000000000000000000001", + "#b00000000000000000000000000011111", + 32, + ); + check_unary_encoding_with_solver( + "clz", + "#b01000000000000000000000000000000", + "#b00000000000000000000000000000001", + 32, + ); + check_unary_encoding_with_solver( + "clz", + "#b11111111111111111111111111111111", + "#b00000000000000000000000000000000", + 32, + ); + } + + #[test] + fn test_clz64_with_solver() { + check_unary_encoding_with_solver( + "clz", + "#b0000000000000000000000000000000000000000000000000000000000000000", + "#b0000000000000000000000000000000000000000000000000000000001000000", + 64, + ); + check_unary_encoding_with_solver( + "clz", + "#b0000000000000000000000000000000000000000000000000000000000000001", + "#b0000000000000000000000000000000000000000000000000000000000111111", + 64, + ); + check_unary_encoding_with_solver( + "clz", + "#b0100000000000000000000000000000000000000000000000000000000000000", + "#b0000000000000000000000000000000000000000000000000000000000000001", + 64, + ); + check_unary_encoding_with_solver( + "clz", + "#b1111111111111111111111111111111111111111111111111111111111111111", + "#b0000000000000000000000000000000000000000000000000000000000000000", + 64, + ); + } + + #[test] + fn test_cls8_with_solver() { + check_unary_encoding_with_solver("cls", "#b00000000", "#b00000111", 8); + check_unary_encoding_with_solver("cls", "#b01111111", "#b00000000", 8); + check_unary_encoding_with_solver("cls", "#b00111111", "#b00000001", 8); + check_unary_encoding_with_solver("cls", "#b11000000", "#b00000001", 8); + check_unary_encoding_with_solver("cls", "#b11111111", "#b00000111", 8); + } + + #[test] + fn test_cls16_with_solver() { + check_unary_encoding_with_solver("cls", "#b0000000000000000", "#b0000000000001111", 16); + check_unary_encoding_with_solver("cls", "#b0111111111111111", "#b0000000000000000", 16); + check_unary_encoding_with_solver("cls", "#b0011111111111111", "#b0000000000000001", 16); + check_unary_encoding_with_solver("cls", "#b1111111111111111", "#b0000000000001111", 16); + } + + #[test] + fn test_cls32_with_solver() { + check_unary_encoding_with_solver( + "cls", + "#b00000000000000000000000000000000", + "#b00000000000000000000000000011111", + 32, + ); + check_unary_encoding_with_solver( + "cls", + "#b01111111111111111111111111111111", + "#b00000000000000000000000000000000", + 32, + ); + check_unary_encoding_with_solver( + "cls", + "#b00100000000000000000000000000000", + "#b00000000000000000000000000000001", + 32, + ); + check_unary_encoding_with_solver( + "cls", + "#b11111111111111111111111111111111", + "#b00000000000000000000000000011111", + 32, + ); + } + + #[test] + fn test_cls64_with_solver() { + check_unary_encoding_with_solver( + "cls", + "#b0000000000000000000000000000000000000000000000000000000000000000", + "#b0000000000000000000000000000000000000000000000000000000000111111", + 64, + ); + check_unary_encoding_with_solver( + "cls", + "#b0010000000000000000000000000000000000000000000000000000000000000", + "#b0000000000000000000000000000000000000000000000000000000000000001", + 64, + ); + check_unary_encoding_with_solver( + "cls", + "#b0111111111111111111111111111111111111111111111111111111111111111", + "#b0000000000000000000000000000000000000000000000000000000000000000", + 64, + ); + check_unary_encoding_with_solver( + "cls", + "#b1111111111111111111111111111111111111111111111111111111111111111", + "#b0000000000000000000000000000000000000000000000000000000000111111", + 64, + ); + } + + #[test] + fn test_popcnt_8_with_solver() { + check_unary_encoding_with_solver("popcnt", "#b00000000", "#b00000000", 8); + check_unary_encoding_with_solver("popcnt", "#b11111111", "#b00001000", 8); + check_unary_encoding_with_solver("popcnt", "#b01010101", "#b00000100", 8); + } + + fn check_subs_with_solver(width: usize, x_str: &str, y_str: &str, output: &str) { + let mut ctx = get_ctx(); + + // Set up an input variable + let ty = ctx.smt.bit_vec_sort(ctx.smt.numeral(width)); + let x = ctx.declare("x".to_string(), ty); + let y = ctx.declare("y".to_string(), ty); + + // Set the input equal to our expected input + ctx.additional_assumptions + .push(ctx.smt.eq(x, ctx.smt.atom(x_str))); + ctx.additional_assumptions + .push(ctx.smt.eq(y, ctx.smt.atom(y_str))); + + // Call the encoding function to be tested + let call = subs::subs(&mut ctx, width, x, y, 0); + + // Output width always 68 bits + check_encoding_with_solver(ctx, call, output, 68) + } + + #[test] + fn test_subs_32_with_solver() { + check_subs_with_solver( + 32, + "#b00000000000000000000000000000000", + "#b00000000000000000000000000000000", + "#b01100000000000000000000000000000000000000000000000000000000000000000", + ); + + check_subs_with_solver( + 32, + "#b11111111111111111111111111111111", + "#b00000000000000000000000000000000", + "#b10100000000000000000000000000000000011111111111111111111111111111111", + ); + + check_subs_with_solver( + 32, + "#b10000000000010000000000000000000", + "#b00100111110000100011111110111000", + "#b00110000000000000000000000000000000001011000010001011100000001001000", + ); + } + + #[test] + fn test_subs_64_with_solver() { + check_subs_with_solver( + 64, + "#b0000000000000000000000000000000000000000000000000000000000000000", + "#b0000000000000000000000000000000000000000000000000000000000000000", + "#b01100000000000000000000000000000000000000000000000000000000000000000", + ); + } +} diff --git a/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/popcnt.rs b/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/popcnt.rs new file mode 100644 index 000000000000..283fc27ec337 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/popcnt.rs @@ -0,0 +1,26 @@ +use crate::solver::SolverCtx; +use easy_smt::SExpr; + +// Future work: possibly move these into the annotation language or an SMTLIB prelude + +// Encoding strategy borrowed from +// https://github.com/fitzgen/synth-loop-free-prog/blob/6d04857693e4688eff4a36537840ba682353c2f3/src/component.rs#L219 +pub fn popcnt(s: &mut SolverCtx, ty: usize, x: SExpr, id: u32) -> SExpr { + let mut bits: Vec<_> = (0..ty) + .map(|i| s.zero_extend(7, s.smt.extract(i as i32, i as i32, x))) + .collect(); + let initial = bits.pop().unwrap(); + let r = bits.iter().fold(initial, |a, b| s.smt.bvadd(a, *b)); + + let id = format!("{ty}_{id}"); + let result = s.declare( + format!("popcnt_{id}"), + s.smt.list(vec![ + s.smt.atoms().und, + s.smt.atom("BitVec"), + s.smt.numeral(8), + ]), + ); + s.assume(s.smt.eq(result, r)); + result +} diff --git a/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/rev.rs b/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/rev.rs new file mode 100644 index 000000000000..16543f7b734a --- /dev/null +++ b/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/rev.rs @@ -0,0 +1,408 @@ +use crate::solver::SolverCtx; +use easy_smt::SExpr; + +// Future work: possibly move these into the annotation language or an SMTLIB prelude + +pub fn rev64(solver: &mut SolverCtx, x: SExpr, id: u32) -> SExpr { + // Generated code. + let x1 = solver.declare( + format!("x1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.eq( + x1, + solver.smt.bvor( + solver.smt.bvlshr(x, solver.smt.atom("#x0000000000000020")), + solver.smt.bvshl(x, solver.smt.atom("#x0000000000000020")), + ), + )); + let x2 = solver.declare( + format!("x2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.eq( + x2, + solver.smt.bvor( + solver.smt.bvlshr( + solver.smt.bvand(x1, solver.smt.atom("#xffff0000ffff0000")), + solver.smt.atom("#x0000000000000010"), + ), + solver.smt.bvshl( + solver.smt.bvand(x1, solver.smt.atom("#x0000ffff0000ffff")), + solver.smt.atom("#x0000000000000010"), + ), + ), + )); + let x3 = solver.declare( + format!("x3_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.eq( + x3, + solver.smt.bvor( + solver.smt.bvlshr( + solver.smt.bvand(x2, solver.smt.atom("#xff00ff00ff00ff00")), + solver.smt.atom("#x0000000000000008"), + ), + solver.smt.bvshl( + solver.smt.bvand(x2, solver.smt.atom("#x00ff00ff00ff00ff")), + solver.smt.atom("#x0000000000000008"), + ), + ), + )); + let x4 = solver.declare( + format!("x4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.eq( + x4, + solver.smt.bvor( + solver.smt.bvlshr( + solver.smt.bvand(x3, solver.smt.atom("#xf0f0f0f0f0f0f0f0")), + solver.smt.atom("#x0000000000000004"), + ), + solver.smt.bvshl( + solver.smt.bvand(x3, solver.smt.atom("#x0f0f0f0f0f0f0f0f")), + solver.smt.atom("#x0000000000000004"), + ), + ), + )); + let x5 = solver.declare( + format!("x5_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.eq( + x5, + solver.smt.bvor( + solver.smt.bvlshr( + solver.smt.bvand(x4, solver.smt.atom("#xcccccccccccccccc")), + solver.smt.atom("#x0000000000000002"), + ), + solver.smt.bvshl( + solver.smt.bvand(x4, solver.smt.atom("#x3333333333333333")), + solver.smt.atom("#x0000000000000002"), + ), + ), + )); + let rev64ret = solver.declare( + format!("rev64ret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(64), + ]), + ); + solver.assume(solver.smt.eq( + rev64ret, + solver.smt.bvor( + solver.smt.bvlshr( + solver.smt.bvand(x5, solver.smt.atom("#xaaaaaaaaaaaaaaaa")), + solver.smt.atom("#x0000000000000001"), + ), + solver.smt.bvshl( + solver.smt.bvand(x5, solver.smt.atom("#x5555555555555555")), + solver.smt.atom("#x0000000000000001"), + ), + ), + )); + + rev64ret +} + +pub fn rev32(solver: &mut SolverCtx, x: SExpr, id: u32) -> SExpr { + let x = solver.smt.extract(31, 0, x); + + // Generated code. + let x1 = solver.declare( + format!("x1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume(solver.smt.eq( + x1, + solver.smt.bvor( + solver.smt.bvlshr(x, solver.smt.atom("#x00000010")), + solver.smt.bvshl(x, solver.smt.atom("#x00000010")), + ), + )); + let x2 = solver.declare( + format!("x2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume(solver.smt.eq( + x2, + solver.smt.bvor( + solver.smt.bvlshr( + solver.smt.bvand(x1, solver.smt.atom("#xff00ff00")), + solver.smt.atom("#x00000008"), + ), + solver.smt.bvshl( + solver.smt.bvand(x1, solver.smt.atom("#x00ff00ff")), + solver.smt.atom("#x00000008"), + ), + ), + )); + let x3 = solver.declare( + format!("x3_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume(solver.smt.eq( + x3, + solver.smt.bvor( + solver.smt.bvlshr( + solver.smt.bvand(x2, solver.smt.atom("#xf0f0f0f0")), + solver.smt.atom("#x00000004"), + ), + solver.smt.bvshl( + solver.smt.bvand(x2, solver.smt.atom("#x0f0f0f0f")), + solver.smt.atom("#x00000004"), + ), + ), + )); + let x4 = solver.declare( + format!("x4_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume(solver.smt.eq( + x4, + solver.smt.bvor( + solver.smt.bvlshr( + solver.smt.bvand(x3, solver.smt.atom("#xcccccccc")), + solver.smt.atom("#x00000002"), + ), + solver.smt.bvshl( + solver.smt.bvand(x3, solver.smt.atom("#x33333333")), + solver.smt.atom("#x00000002"), + ), + ), + )); + let rev32ret = solver.declare( + format!("rev32ret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(32), + ]), + ); + solver.assume(solver.smt.eq( + rev32ret, + solver.smt.bvor( + solver.smt.bvlshr( + solver.smt.bvand(x4, solver.smt.atom("#xaaaaaaaa")), + solver.smt.atom("#x00000001"), + ), + solver.smt.bvshl( + solver.smt.bvand(x4, solver.smt.atom("#x55555555")), + solver.smt.atom("#x00000001"), + ), + ), + )); + + rev32ret +} + +pub fn rev16(solver: &mut SolverCtx, x: SExpr, id: u32) -> SExpr { + let x = solver.smt.extract(15, 0, x); + + // Generated code. + let x1 = solver.declare( + format!("x1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume(solver.smt.eq( + x1, + solver.smt.bvor( + solver.smt.bvlshr(x, solver.smt.atom("#x0008")), + solver.smt.bvshl(x, solver.smt.atom("#x0008")), + ), + )); + let x2 = solver.declare( + format!("x2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume(solver.smt.eq( + x2, + solver.smt.bvor( + solver.smt.bvlshr( + solver.smt.bvand(x1, solver.smt.atom("#xf0f0")), + solver.smt.atom("#x0004"), + ), + solver.smt.bvshl( + solver.smt.bvand(x1, solver.smt.atom("#x0f0f")), + solver.smt.atom("#x0004"), + ), + ), + )); + let x3 = solver.declare( + format!("x3_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume(solver.smt.eq( + x3, + solver.smt.bvor( + solver.smt.bvlshr( + solver.smt.bvand(x2, solver.smt.atom("#xcccc")), + solver.smt.atom("#x0002"), + ), + solver.smt.bvshl( + solver.smt.bvand(x2, solver.smt.atom("#x3333")), + solver.smt.atom("#x0002"), + ), + ), + )); + let rev16ret = solver.declare( + format!("rev16ret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(16), + ]), + ); + solver.assume(solver.smt.eq( + rev16ret, + solver.smt.bvor( + solver.smt.bvlshr( + solver.smt.bvand(x3, solver.smt.atom("#xaaaa")), + solver.smt.atom("#x0001"), + ), + solver.smt.bvshl( + solver.smt.bvand(x3, solver.smt.atom("#x5555")), + solver.smt.atom("#x0001"), + ), + ), + )); + + let padding = solver.new_fresh_bits(solver.bitwidth - 16); + solver.smt.concat(padding, rev16ret) +} + +pub fn rev8(solver: &mut SolverCtx, x: SExpr, id: u32) -> SExpr { + let x = solver.smt.extract(7, 0, x); + + // Generated code. + let x1 = solver.declare( + format!("x1_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume(solver.smt.eq( + x1, + solver.smt.bvor( + solver.smt.bvlshr(x, solver.smt.atom("#x04")), + solver.smt.bvshl(x, solver.smt.atom("#x04")), + ), + )); + let x2 = solver.declare( + format!("x2_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume(solver.smt.eq( + x2, + solver.smt.bvor( + solver.smt.bvlshr( + solver.smt.bvand(x1, solver.smt.atom("#xcc")), + solver.smt.atom("#x02"), + ), + solver.smt.bvshl( + solver.smt.bvand(x1, solver.smt.atom("#x33")), + solver.smt.atom("#x02"), + ), + ), + )); + let rev8ret = solver.declare( + format!("rev8ret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(8), + ]), + ); + solver.assume(solver.smt.eq( + rev8ret, + solver.smt.bvor( + solver.smt.bvlshr( + solver.smt.bvand(x2, solver.smt.atom("#xaa")), + solver.smt.atom("#x01"), + ), + solver.smt.bvshl( + solver.smt.bvand(x2, solver.smt.atom("#x55")), + solver.smt.atom("#x01"), + ), + ), + )); + + let padding = solver.new_fresh_bits(solver.bitwidth - 8); + solver.smt.concat(padding, rev8ret) +} + +pub fn rev1(solver: &mut SolverCtx, x: SExpr, id: u32) -> SExpr { + let x = solver.smt.extract(0, 0, x); + + // Generated code. + let rev1ret = solver.declare( + format!("rev1ret_{id}", id = id), + solver.smt.list(vec![ + solver.smt.atoms().und, + solver.smt.atom("BitVec"), + solver.smt.numeral(1), + ]), + ); + solver.assume(solver.smt.eq(rev1ret, x)); + + let padding = solver.new_fresh_bits(solver.bitwidth - 1); + solver.smt.concat(padding, rev1ret) +} diff --git a/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/subs.rs b/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/subs.rs new file mode 100644 index 000000000000..042836ee7d6f --- /dev/null +++ b/cranelift/isle/veri/veri_engine/src/solver/encoded_ops/subs.rs @@ -0,0 +1,147 @@ +use crate::solver::SolverCtx; +use easy_smt::SExpr; + +// Future work: likely remove this when we add rule-chaining + +// Build the results of a subtraction with flags. Put the 4 flags in the high bits. +// Encoding adapted from SAIL ISLA: https://github.com/rems-project/isla +// +// N: Set to 1 when the result of the operation is negative +// Z: Set to 1 when the result of the operation is zero +// C: Set to 1 when the operation results in a carry, or when a subtraction results in no borrow +// V: Set to 1 when the operation causes overflow +// +// 67 66 65 64 63 ... 0 +// [ N | Z | C | V | ... result ... ] +pub fn subs(s: &mut SolverCtx, ty: usize, x: SExpr, y: SExpr, id: u32) -> SExpr { + let id = format!("{ty}_{id}"); + let (size, wide_size, x, y, zero, one, w_minus_one) = match ty { + 32 => ( + s.smt.numeral(32), + s.smt.numeral(32 * 2), + s.smt.extract(31, 0, x), + s.smt.extract(31, 0, y), + s.bv(0, 32), + s.bv(1, 32 * 2), + s.bv(31, 32), + ), + 64 => ( + s.smt.numeral(64), + s.smt.numeral(64 * 2), + s.smt.extract(63, 0, x), + s.smt.extract(63, 0, y), + s.bv(0, 64), + s.bv(1, 64 * 2), + s.bv(63, 64), + ), + _ => unreachable!(), + }; + + let b0 = s.bv(0, 1); + let b1 = s.bv(1, 1); + + // (define-const ynot (bvnot y)) + let ynot = s.declare( + format!("ynot_{id}", id = id), + s.smt + .list(vec![s.smt.atoms().und, s.smt.atom("BitVec"), size]), + ); + s.assume(s.smt.eq(ynot, s.smt.bvnot(y))); + + // (define-const + // subs_wide + // (bvadd (bvadd ((_ zero_extend 64) x) ((_ zero_extend 64) ynot)) #x00000000000000000000000000000001)) + let subs_wide = s.declare( + format!("subs_wide_{id}", id = id), + s.smt + .list(vec![s.smt.atoms().und, s.smt.atom("BitVec"), wide_size]), + ); + s.assume(s.smt.eq( + subs_wide, + s.smt.bvadd( + s.smt.bvadd(s.zero_extend(ty, x), s.zero_extend(ty, ynot)), + one, + ), + )); + + // (define-const subs ((_ extract 63 0) subs_wide)) + let subs = s.declare( + format!("subs_{id}", id = id), + s.smt + .list(vec![s.smt.atoms().und, s.smt.atom("BitVec"), size]), + ); + s.assume(s.smt.eq( + subs, + s.smt.extract((ty - 1).try_into().unwrap(), 0, subs_wide), + )); + + // (define-const flags + // (concat (concat (concat + // ((_ extract 0 0) (bvlshr subs #x000000000000003f)) + // (ite (= subs #x0000000000000000) #b1 #b0)) + // (ite (= ((_ zero_extend 64) subs) subs_wide) #b0 #b1)) + // (ite (= ((_ sign_extend 64) subs) (bvadd (bvadd ((_ sign_extend 64) x) ((_ sign_extend 64) ynot)) #x00000000000000000000000000000001)) #b0 #b1))) + let flags = s.declare( + format!("flags_{id}", id = id), + s.smt.list(vec![ + s.smt.atoms().und, + s.smt.atom("BitVec"), + s.smt.numeral(4), + ]), + ); + + // N: Set to 1 when the result of the operation is negative + // Z: Set to 1 when the result of the operation is zero + // C: Set to 1 when the operation results in a carry, or when a subtraction results in no borrow + // V: Set to 1 when the operation causes overflow + s.assume( + s.smt.eq( + flags, + s.smt.concat( + s.smt.concat( + s.smt.concat( + // N flag: result is negative + s.smt.extract(0, 0, s.smt.bvlshr(subs, w_minus_one)), + // Z flag: result is zero + s.smt.ite(s.smt.eq(subs, zero), b1, b0), + ), + // C flag: result has carry/subtraction has no borrow + s.smt + .ite(s.smt.eq(s.zero_extend(ty, subs), subs_wide), b0, b1), + ), + // V: operation causes overflow + s.smt.ite( + s.smt.eq( + s.sign_extend(ty, subs), + s.smt.bvadd( + s.smt.bvadd(s.sign_extend(ty, x), s.sign_extend(ty, ynot)), + one, + ), + ), + b0, + b1, + ), + ), + ), + ); + + let ret = s.declare( + format!("subs_ret_{id}", id = id), + s.smt.list(vec![ + s.smt.atoms().und, + s.smt.atom("BitVec"), + s.smt.numeral(68), + ]), + ); + + s.assume(s.smt.eq( + ret, + match ty { + // Pad 32 back to full reg width of 64 before adding flags to the left + 32 => s.smt.concat(flags, s.zero_extend(ty, subs)), + 64 => s.smt.concat(flags, subs), + _ => unreachable!(), + }, + )); + ret +} diff --git a/cranelift/isle/veri/veri_engine/src/termname.rs b/cranelift/isle/veri/veri_engine/src/termname.rs new file mode 100644 index 000000000000..7e3b2cb9b6db --- /dev/null +++ b/cranelift/isle/veri/veri_engine/src/termname.rs @@ -0,0 +1,30 @@ +use cranelift_isle as isle; +use isle::sema::{Pattern, TermEnv, TypeEnv}; + +/// Check whether the pattern (the LHS term) contains a given term name, +/// including in any subterms. +pub fn pattern_contains_termname( + pat: &Pattern, + name: &str, + termenv: &TermEnv, + typeenv: &TypeEnv, +) -> bool { + match pat { + Pattern::BindPattern(..) + | Pattern::Var(..) + | Pattern::ConstInt(..) + | Pattern::ConstPrim(..) + | Pattern::Wildcard(..) => false, + Pattern::Term(_, termid, arg_patterns) => { + let term = &termenv.terms[termid.index()]; + let term_name = &typeenv.syms[term.name.index()]; + (term_name == name) + || arg_patterns + .iter() + .any(|p| pattern_contains_termname(p, name, termenv, typeenv)) + } + Pattern::And(_, children) => children + .iter() + .any(|p| pattern_contains_termname(p, name, termenv, typeenv)), + } +} diff --git a/cranelift/isle/veri/veri_engine/src/type_inference.rs b/cranelift/isle/veri/veri_engine/src/type_inference.rs new file mode 100644 index 000000000000..85e79a362ca2 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/src/type_inference.rs @@ -0,0 +1,2420 @@ +use itertools::Itertools; +use std::collections::{HashMap, HashSet}; +use std::hash::Hash; + +use crate::annotations::AnnotationEnv; +use crate::termname::pattern_contains_termname; +use cranelift_isle as isle; +use isle::sema::{Pattern, TermEnv, TermId, TypeEnv, VarId}; +use itertools::izip; +use veri_ir::{annotation_ir, ConcreteTest, Expr, TermSignature, Type, TypeContext}; + +use crate::{Config, FLAGS_WIDTH, REG_WIDTH}; + +#[derive(Clone, Debug)] +struct RuleParseTree { + // a map of var name to type variable, where var could be + // Pattern::Var or var used in Pattern::BindPattern + varid_to_type_var_map: HashMap, + // a map of type var to value, if known + type_var_to_val_map: HashMap, + // bookkeeping that tells the next unused type var + next_type_var: u32, + // combined constraints from all nodes + concrete_constraints: HashSet, + var_constraints: HashSet, + bv_constraints: HashSet, + + ty_vars: HashMap, + quantified_vars: HashSet<(String, u32)>, + free_vars: HashSet<(String, u32)>, + // Used to check distinct models + term_input_bvs: Vec, + // Used for custom verification conditions + term_args: Vec, + lhs_assumptions: Vec, + rhs_assumptions: Vec, + + rhs_assertions: Vec, + concrete: Option, +} + +#[derive(Clone, Debug)] +pub enum TypeVarConstruct { + Var, + BindPattern, + Wildcard(u32), + Term(TermId), + Const(i128), + Let(Vec), + And, +} + +#[derive(Clone, Debug)] +pub struct TypeVarNode { + ident: String, + construct: TypeVarConstruct, + type_var: u32, + children: Vec, + assertions: Vec, +} + +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +// Constraints either assign concrete types to type variables +// or set them equal to other type variables +enum TypeExpr { + Concrete(u32, annotation_ir::Type), + Variable(u32, u32), + // The type variable of the first arg is equal to the value of the second + WidthInt(u32, u32), +} + +#[derive(Debug)] +pub struct AnnotationTypeInfo { + // map of annotation variable to assigned type var + pub term: String, + pub var_to_type_var: HashMap, +} + +#[derive(Debug)] +pub struct RuleSemantics { + pub annotation_infos: Vec, + + // map of type var to solved type + pub type_var_to_type: HashMap, + + pub lhs: veri_ir::Expr, + pub rhs: veri_ir::Expr, + pub quantified_vars: Vec, + pub free_vars: Vec, + pub term_input_bvs: Vec, + // Used for custom verification conditions + pub term_args: Vec, + pub lhs_assumptions: Vec, + pub rhs_assumptions: Vec, + pub rhs_assertions: Vec, + pub tyctx: TypeContext, +} + +pub fn type_rules_with_term_and_types( + termenv: &TermEnv, + typeenv: &TypeEnv, + annotation_env: &AnnotationEnv, + config: &Config, + types: &TermSignature, + concrete: &Option, +) -> HashMap { + let mut solutions = HashMap::new(); + + for rule in &termenv.rules { + // Only type rules with the given term on the LHS + if !pattern_contains_termname( + // Hack for now: typeid not used + &Pattern::Term( + cranelift_isle::sema::TypeId(0), + rule.root_term, + rule.args.clone(), + ), + &config.term, + termenv, + typeenv, + ) { + continue; + } + if let Some(names) = &config.names { + if rule.name.is_none() { + continue; + } + let name = &typeenv.syms[rule.name.unwrap().index()]; + if !names.contains(name) { + continue; + } + } + if let Some(s) = type_annotations_using_rule( + rule, + annotation_env, + typeenv, + termenv, + &config.term, + types, + concrete, + ) { + solutions.insert(rule.id, s); + } + } + solutions +} + +fn convert_type(aty: &annotation_ir::Type) -> veri_ir::Type { + match aty { + annotation_ir::Type::BitVectorUnknown(..) => veri_ir::Type::BitVector(None), + annotation_ir::Type::BitVector => veri_ir::Type::BitVector(None), + annotation_ir::Type::BitVectorWithWidth(w) => veri_ir::Type::BitVector(Some(*w)), + annotation_ir::Type::Int => veri_ir::Type::Int, + annotation_ir::Type::Bool => veri_ir::Type::Bool, + annotation_ir::Type::Unit => veri_ir::Type::Unit, + annotation_ir::Type::Poly(_) => veri_ir::Type::BitVector(None), + } +} + +fn type_annotations_using_rule<'a>( + rule: &'a isle::sema::Rule, + annotation_env: &'a AnnotationEnv, + typeenv: &'a TypeEnv, + termenv: &'a TermEnv, + term: &String, + types: &TermSignature, + concrete: &'a Option, +) -> Option { + let mut parse_tree = RuleParseTree { + varid_to_type_var_map: HashMap::new(), + type_var_to_val_map: HashMap::new(), + next_type_var: 1, + concrete_constraints: HashSet::new(), + var_constraints: HashSet::new(), + bv_constraints: HashSet::new(), + ty_vars: HashMap::new(), + quantified_vars: HashSet::new(), + free_vars: HashSet::new(), + term_input_bvs: vec![], + term_args: vec![], + lhs_assumptions: vec![], + rhs_assumptions: vec![], + rhs_assertions: vec![], + concrete: concrete.clone(), + }; + + let mut annotation_infos = vec![]; + if !rule.iflets.is_empty() { + for iflet in &rule.iflets { + let iflet_lhs = &mut create_parse_tree_pattern( + rule, + &iflet.lhs, + &mut parse_tree, + typeenv, + termenv, + term, + types, + ); + let iflet_rhs = + &mut create_parse_tree_expr(rule, &iflet.rhs, &mut parse_tree, typeenv, termenv); + + let iflet_lhs_expr = add_rule_constraints( + &mut parse_tree, + iflet_lhs, + termenv, + typeenv, + annotation_env, + &mut annotation_infos, + false, + ); + iflet_lhs_expr.as_ref()?; + + let iflet_rhs_expr = add_rule_constraints( + &mut parse_tree, + iflet_rhs, + termenv, + typeenv, + annotation_env, + &mut annotation_infos, + false, + ); + iflet_rhs_expr.as_ref()?; + parse_tree + .var_constraints + .insert(TypeExpr::Variable(iflet_lhs.type_var, iflet_rhs.type_var)); + // Add if-lets to the LHS + parse_tree.lhs_assumptions.push(veri_ir::Expr::Binary( + veri_ir::BinaryOp::Eq, + Box::new(iflet_lhs_expr.unwrap()), + Box::new(iflet_rhs_expr.unwrap()), + )); + } + } + let lhs = &mut create_parse_tree_pattern( + rule, + // Hack for now: typeid not used + &isle::sema::Pattern::Term( + cranelift_isle::sema::TypeId(0), + rule.root_term, + rule.args.clone(), + ), + &mut parse_tree, + typeenv, + termenv, + term, + types, + ); + let rhs = &mut create_parse_tree_expr(rule, &rule.rhs, &mut parse_tree, typeenv, termenv); + + log::trace!("LHS:"); + let lhs_expr = add_rule_constraints( + &mut parse_tree, + lhs, + termenv, + typeenv, + annotation_env, + &mut annotation_infos, + false, + ); + lhs_expr.as_ref()?; + log::trace!("\n\tRHS:"); + let rhs_expr = add_rule_constraints( + &mut parse_tree, + rhs, + termenv, + typeenv, + annotation_env, + &mut annotation_infos, + true, + ); + rhs_expr.as_ref()?; + + match (lhs_expr, rhs_expr) { + (Some(lhs_expr), Some(rhs_expr)) => { + parse_tree + .var_constraints + .insert(TypeExpr::Variable(lhs.type_var, rhs.type_var)); + + let (solution, bv_unknown_width_sets) = solve_constraints( + parse_tree.concrete_constraints, + parse_tree.var_constraints, + parse_tree.bv_constraints, + &mut parse_tree.type_var_to_val_map, + Some(&parse_tree.ty_vars), + ); + + let mut tymap = HashMap::new(); + + for (expr, t) in &parse_tree.ty_vars { + if let Some(ty) = solution.get(t) { + tymap.insert(*t, convert_type(ty)); + } else { + panic!("missing type variable {} in solution for: {:?}", t, expr); + } + } + let mut quantified_vars = vec![]; + for (s, t) in parse_tree.quantified_vars.iter().sorted() { + let expr = veri_ir::Expr::Terminal(veri_ir::Terminal::Var(s.clone())); + if let Some(ty) = solution.get(t) { + let ty = convert_type(ty); + parse_tree.ty_vars.insert(expr, *t); + tymap.insert(*t, ty); + quantified_vars.push(veri_ir::BoundVar { + name: s.clone(), + tyvar: *t, + }); + } else { + panic!("missing type variable {} in solution for: {:?}", t, expr); + } + } + let mut free_vars = vec![]; + for (s, t) in parse_tree.free_vars { + let expr = veri_ir::Expr::Terminal(veri_ir::Terminal::Var(s.clone())); + if let Some(ty) = solution.get(&t) { + let ty = convert_type(ty); + parse_tree.ty_vars.insert(expr, t); + tymap.insert(t, ty); + free_vars.push(veri_ir::BoundVar { name: s, tyvar: t }); + } else { + panic!("missing type variable {} in solution for: {:?}", t, expr); + } + } + + Some(RuleSemantics { + annotation_infos, + type_var_to_type: solution, + lhs: lhs_expr, + rhs: rhs_expr, + lhs_assumptions: parse_tree.lhs_assumptions, + rhs_assumptions: parse_tree.rhs_assumptions, + rhs_assertions: parse_tree.rhs_assertions, + quantified_vars, + free_vars, + term_input_bvs: parse_tree.term_input_bvs, + term_args: parse_tree.term_args, + tyctx: TypeContext { + tyvars: parse_tree.ty_vars.clone(), + tymap, + tyvals: parse_tree.type_var_to_val_map, + bv_unknown_width_sets, + }, + }) + } + _ => None, + } +} + +fn const_fold_to_int(e: &veri_ir::Expr) -> Option { + match e { + Expr::Terminal(veri_ir::Terminal::Const(c, _)) => Some(*c), + _ => None, + } +} + +fn add_annotation_constraints( + expr: annotation_ir::Expr, + tree: &mut RuleParseTree, + annotation_info: &mut AnnotationTypeInfo, +) -> (veri_ir::Expr, u32) { + let (e, t) = match expr { + annotation_ir::Expr::Var(x, ..) => { + if !annotation_info.var_to_type_var.contains_key(&x) { + panic!("Error: unbound variable: {}", x); + } + let t = annotation_info.var_to_type_var[&x]; + let name = format!("{}__{}__{}", annotation_info.term, x, t); + (veri_ir::Expr::Terminal(veri_ir::Terminal::Var(name)), t) + } + annotation_ir::Expr::Const(c, ..) => { + let t = tree.next_type_var; + let e = veri_ir::Expr::Terminal(veri_ir::Terminal::Const(c.value, t)); + match c.ty { + annotation_ir::Type::BitVector => { + let ty = annotation_ir::Type::BitVectorWithWidth(c.width); + tree.concrete_constraints.insert(TypeExpr::Concrete(t, ty)); + } + _ => { + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, c.ty.clone())); + } + } + tree.next_type_var += 1; + + // If constant is known, add the value to the tree. Useful for + // capturing isleTypes + tree.type_var_to_val_map.insert(t, c.value); + (e, t) + } + annotation_ir::Expr::True => { + let t = tree.next_type_var; + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + + tree.next_type_var += 1; + (veri_ir::Expr::Terminal(veri_ir::Terminal::True), t) + } + annotation_ir::Expr::False => { + let t = tree.next_type_var; + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + + tree.next_type_var += 1; + (veri_ir::Expr::Terminal(veri_ir::Terminal::False), t) + } + + annotation_ir::Expr::WidthOf(x) => { + let (ex, tx) = add_annotation_constraints(*x.clone(), tree, annotation_info); + let t = tree.next_type_var; + tree.next_type_var += 1; + tree.bv_constraints + .insert(TypeExpr::Concrete(tx, annotation_ir::Type::BitVector)); + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Int)); + tree.concrete_constraints.insert(TypeExpr::WidthInt(tx, t)); + (veri_ir::Expr::WidthOf(Box::new(ex)), t) + } + + annotation_ir::Expr::Eq(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::Eq, Box::new(e1), Box::new(e2)), + t, + ) + } + annotation_ir::Expr::Imp(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::Bool)); + tree.concrete_constraints + .insert(TypeExpr::Concrete(t2, annotation_ir::Type::Bool)); + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::Imp, Box::new(e1), Box::new(e2)), + t, + ) + } + annotation_ir::Expr::Lte(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::Lte, Box::new(e1), Box::new(e2)), + t, + ) + } + + annotation_ir::Expr::Not(x) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::Bool)); + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + + tree.next_type_var += 1; + (veri_ir::Expr::Unary(veri_ir::UnaryOp::Not, Box::new(e1)), t) + } + annotation_ir::Expr::Or(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + tree.concrete_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::Bool)); + tree.concrete_constraints + .insert(TypeExpr::Concrete(t2, annotation_ir::Type::Bool)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::Or, Box::new(e1), Box::new(e2)), + t, + ) + } + annotation_ir::Expr::And(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + tree.concrete_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::Bool)); + tree.concrete_constraints + .insert(TypeExpr::Concrete(t2, annotation_ir::Type::Bool)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::And, Box::new(e1), Box::new(e2)), + t, + ) + } + + annotation_ir::Expr::BVSgt(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVSgt, Box::new(e1), Box::new(e2)), + t, + ) + } + + annotation_ir::Expr::BVSgte(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVSgte, Box::new(e1), Box::new(e2)), + t, + ) + } + + annotation_ir::Expr::BVSlt(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVSlt, Box::new(e1), Box::new(e2)), + t, + ) + } + + annotation_ir::Expr::BVSlte(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVSlte, Box::new(e1), Box::new(e2)), + t, + ) + } + + annotation_ir::Expr::BVUgt(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVUgt, Box::new(e1), Box::new(e2)), + t, + ) + } + + annotation_ir::Expr::BVUgte(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVUgte, Box::new(e1), Box::new(e2)), + t, + ) + } + + annotation_ir::Expr::BVUlt(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVUlt, Box::new(e1), Box::new(e2)), + t, + ) + } + + annotation_ir::Expr::BVUlte(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVUlte, Box::new(e1), Box::new(e2)), + t, + ) + } + + annotation_ir::Expr::BVSaddo(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVSaddo, Box::new(e1), Box::new(e2)), + t, + ) + } + + annotation_ir::Expr::BVNeg(x) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + + let t = tree.next_type_var; + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t, t1)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Unary(veri_ir::UnaryOp::BVNeg, Box::new(e1)), + t, + ) + } + annotation_ir::Expr::BVNot(x) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + + let t = tree.next_type_var; + tree.var_constraints.insert(TypeExpr::Variable(t, t1)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Unary(veri_ir::UnaryOp::BVNot, Box::new(e1)), + t, + ) + } + + annotation_ir::Expr::BVMul(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t2, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + tree.var_constraints.insert(TypeExpr::Variable(t, t1)); + tree.var_constraints.insert(TypeExpr::Variable(t, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVMul, Box::new(e1), Box::new(e2)), + t, + ) + } + annotation_ir::Expr::BVUDiv(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t2, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + tree.var_constraints.insert(TypeExpr::Variable(t, t1)); + tree.var_constraints.insert(TypeExpr::Variable(t, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVUDiv, Box::new(e1), Box::new(e2)), + t, + ) + } + annotation_ir::Expr::BVSDiv(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t2, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + tree.var_constraints.insert(TypeExpr::Variable(t, t1)); + tree.var_constraints.insert(TypeExpr::Variable(t, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVSDiv, Box::new(e1), Box::new(e2)), + t, + ) + } + annotation_ir::Expr::BVAdd(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t2, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + tree.var_constraints.insert(TypeExpr::Variable(t, t1)); + tree.var_constraints.insert(TypeExpr::Variable(t, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVAdd, Box::new(e1), Box::new(e2)), + t, + ) + } + annotation_ir::Expr::BVSub(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t2, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + tree.var_constraints.insert(TypeExpr::Variable(t, t1)); + tree.var_constraints.insert(TypeExpr::Variable(t, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVSub, Box::new(e1), Box::new(e2)), + t, + ) + } + annotation_ir::Expr::BVUrem(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t2, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + tree.var_constraints.insert(TypeExpr::Variable(t, t1)); + tree.var_constraints.insert(TypeExpr::Variable(t, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVUrem, Box::new(e1), Box::new(e2)), + t, + ) + } + annotation_ir::Expr::BVSrem(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t2, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + tree.var_constraints.insert(TypeExpr::Variable(t, t1)); + tree.var_constraints.insert(TypeExpr::Variable(t, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVSrem, Box::new(e1), Box::new(e2)), + t, + ) + } + + annotation_ir::Expr::BVAnd(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t2, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + tree.var_constraints.insert(TypeExpr::Variable(t, t1)); + tree.var_constraints.insert(TypeExpr::Variable(t, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVAnd, Box::new(e1), Box::new(e2)), + t, + ) + } + annotation_ir::Expr::BVOr(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t2, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + tree.var_constraints.insert(TypeExpr::Variable(t, t1)); + tree.var_constraints.insert(TypeExpr::Variable(t, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVOr, Box::new(e1), Box::new(e2)), + t, + ) + } + annotation_ir::Expr::BVXor(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t2, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + tree.var_constraints.insert(TypeExpr::Variable(t, t1)); + tree.var_constraints.insert(TypeExpr::Variable(t, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVXor, Box::new(e1), Box::new(e2)), + t, + ) + } + annotation_ir::Expr::BVRotl(x, a) => { + let (xe, xt) = add_annotation_constraints(*x, tree, annotation_info); + let (ae, at) = add_annotation_constraints(*a, tree, annotation_info); + let t = tree.next_type_var; + tree.next_type_var += 1; + + tree.bv_constraints + .insert(TypeExpr::Concrete(xt, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(at, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t, xt)); + + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVRotl, Box::new(xe), Box::new(ae)), + t, + ) + } + annotation_ir::Expr::BVRotr(x, a) => { + let (xe, xt) = add_annotation_constraints(*x, tree, annotation_info); + let (ae, at) = add_annotation_constraints(*a, tree, annotation_info); + let t = tree.next_type_var; + tree.next_type_var += 1; + + tree.bv_constraints + .insert(TypeExpr::Concrete(xt, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(at, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t, xt)); + + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVRotr, Box::new(xe), Box::new(ae)), + t, + ) + } + annotation_ir::Expr::BVShl(x, a) => { + let (xe, xt) = add_annotation_constraints(*x, tree, annotation_info); + let (ae, at) = add_annotation_constraints(*a, tree, annotation_info); + let t = tree.next_type_var; + tree.next_type_var += 1; + + tree.bv_constraints + .insert(TypeExpr::Concrete(xt, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(at, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t, xt)); + tree.var_constraints.insert(TypeExpr::Variable(xt, at)); + + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVShl, Box::new(xe), Box::new(ae)), + t, + ) + } + annotation_ir::Expr::BVShr(x, a) => { + let (xe, xt) = add_annotation_constraints(*x, tree, annotation_info); + let (ae, at) = add_annotation_constraints(*a, tree, annotation_info); + let t = tree.next_type_var; + tree.next_type_var += 1; + + tree.bv_constraints + .insert(TypeExpr::Concrete(xt, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(at, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t, xt)); + tree.var_constraints.insert(TypeExpr::Variable(xt, at)); + + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVShr, Box::new(xe), Box::new(ae)), + t, + ) + } + annotation_ir::Expr::BVAShr(x, a) => { + let (xe, xt) = add_annotation_constraints(*x, tree, annotation_info); + let (ae, at) = add_annotation_constraints(*a, tree, annotation_info); + let t = tree.next_type_var; + tree.next_type_var += 1; + + tree.bv_constraints + .insert(TypeExpr::Concrete(xt, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(at, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t, xt)); + tree.var_constraints.insert(TypeExpr::Variable(at, xt)); + + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::BVAShr, Box::new(xe), Box::new(ae)), + t, + ) + } + annotation_ir::Expr::Lt(x, y) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Bool)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Binary(veri_ir::BinaryOp::Lt, Box::new(e1), Box::new(e2)), + t, + ) + } + annotation_ir::Expr::BVConvTo(w, x) => { + let (we, wt) = add_annotation_constraints(*w, tree, annotation_info); + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let t = tree.next_type_var; + tree.next_type_var += 1; + + // In the dynamic case, we don't know the width at this point + tree.concrete_constraints + .insert(TypeExpr::Concrete(wt, annotation_ir::Type::Int)); + + if let Some(w) = const_fold_to_int(&we) { + tree.concrete_constraints.insert(TypeExpr::Concrete( + t, + annotation_ir::Type::BitVectorWithWidth(w.try_into().unwrap()), + )); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + let t2 = tree.next_type_var; + tree.next_type_var += 1; + let width = Expr::Terminal(veri_ir::Terminal::Const(w, t2)); + tree.type_var_to_val_map.insert(t2, w); + tree.ty_vars.insert(width.clone(), t2); + tree.concrete_constraints + .insert(TypeExpr::Concrete(t2, annotation_ir::Type::Int)); + (veri_ir::Expr::BVConvTo(Box::new(width), Box::new(e1)), t) + } else { + tree.concrete_constraints.insert(TypeExpr::WidthInt(t, wt)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + + (veri_ir::Expr::BVConvTo(Box::new(we), Box::new(e1)), t) + } + } + annotation_ir::Expr::BVSignExtToVarWidth(w, x) => { + let (we, wt) = add_annotation_constraints(*w, tree, annotation_info); + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let t = tree.next_type_var; + tree.next_type_var += 1; + + // In the dynamic case, we don't know the width at this point + tree.concrete_constraints + .insert(TypeExpr::Concrete(wt, annotation_ir::Type::Int)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + + ( + veri_ir::Expr::BVSignExtToVarWidth(Box::new(we), Box::new(e1)), + t, + ) + } + annotation_ir::Expr::BVZeroExtTo(w, x) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let t = tree.next_type_var; + tree.next_type_var += 1; + + let width = match *w { + veri_ir::annotation_ir::Width::Const(c) => c, + veri_ir::annotation_ir::Width::RegWidth => REG_WIDTH, + }; + + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.concrete_constraints.insert(TypeExpr::Concrete( + t, + annotation_ir::Type::BitVectorWithWidth(width), + )); + + (veri_ir::Expr::BVZeroExtTo(width, Box::new(e1)), t) + } + annotation_ir::Expr::BVZeroExtToVarWidth(w, x) => { + let (we, wt) = add_annotation_constraints(*w, tree, annotation_info); + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let t = tree.next_type_var; + tree.next_type_var += 1; + + // In the dynamic case, we don't know the width at this point + tree.concrete_constraints + .insert(TypeExpr::Concrete(wt, annotation_ir::Type::Int)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + + ( + veri_ir::Expr::BVZeroExtToVarWidth(Box::new(we), Box::new(e1)), + t, + ) + } + annotation_ir::Expr::BVSignExtTo(w, x) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let t = tree.next_type_var; + + let width = match *w { + veri_ir::annotation_ir::Width::Const(c) => c, + veri_ir::annotation_ir::Width::RegWidth => REG_WIDTH, + }; + + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.concrete_constraints.insert(TypeExpr::Concrete( + t, + annotation_ir::Type::BitVectorWithWidth(width), + )); + + tree.next_type_var += 1; + + (veri_ir::Expr::BVSignExtTo(width, Box::new(e1)), t) + } + annotation_ir::Expr::BVExtract(l, r, x) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let t = tree.next_type_var; + + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.concrete_constraints.insert(TypeExpr::Concrete( + t, + annotation_ir::Type::BitVectorWithWidth(l - r + 1), + )); + + tree.next_type_var += 1; + + (veri_ir::Expr::BVExtract(l, r, Box::new(e1)), t) + } + annotation_ir::Expr::BVConcat(xs) => { + // AVH todo: doesn't sum the various widths, has to be done in the solver + let t = tree.next_type_var; + tree.next_type_var += 1; + + let mut exprs = vec![]; + for x in xs { + let (xe, xt) = add_annotation_constraints(x, tree, annotation_info); + tree.bv_constraints + .insert(TypeExpr::Concrete(xt, annotation_ir::Type::BitVector)); + exprs.push(xe); + } + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + + tree.next_type_var += 1; + + (veri_ir::Expr::BVConcat(exprs), t) + } + annotation_ir::Expr::BVIntToBv(w, x) => { + let (ex, tx) = add_annotation_constraints(*x.clone(), tree, annotation_info); + + let t = tree.next_type_var; + tree.next_type_var += 1; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(tx, annotation_ir::Type::Int)); + + tree.concrete_constraints.insert(TypeExpr::Concrete( + t, + annotation_ir::Type::BitVectorWithWidth(w), + )); + + (veri_ir::Expr::BVIntToBV(w, Box::new(ex)), t) + } + annotation_ir::Expr::BVToInt(x) => { + let (ex, tx) = add_annotation_constraints(*x.clone(), tree, annotation_info); + + let t = tree.next_type_var; + tree.next_type_var += 1; + + tree.bv_constraints + .insert(TypeExpr::Concrete(tx, annotation_ir::Type::BitVector)); + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Int)); + + (veri_ir::Expr::BVToInt(Box::new(ex)), t) + } + annotation_ir::Expr::Conditional(c, t, e) => { + let (e1, t1) = add_annotation_constraints(*c, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*t, tree, annotation_info); + let (e3, t3) = add_annotation_constraints(*e, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::Bool)); + tree.var_constraints.insert(TypeExpr::Variable(t2, t3)); + tree.var_constraints.insert(TypeExpr::Variable(t, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::Conditional(Box::new(e1), Box::new(e2), Box::new(e3)), + t, + ) + } + annotation_ir::Expr::Switch(c, cases) => { + let (c_expr, c_t) = add_annotation_constraints(*c, tree, annotation_info); + + let t = tree.next_type_var; + tree.next_type_var += 1; + + let mut case_exprs = vec![]; + for (m, b) in cases { + let (case_expr, case_t) = + add_annotation_constraints(m.clone(), tree, annotation_info); + let (body_expr, body_t) = + add_annotation_constraints(b.clone(), tree, annotation_info); + + tree.var_constraints.insert(TypeExpr::Variable(c_t, case_t)); + tree.var_constraints.insert(TypeExpr::Variable(t, body_t)); + case_exprs.push((case_expr, body_expr)); + } + (veri_ir::Expr::Switch(Box::new(c_expr), case_exprs), t) + } + annotation_ir::Expr::CLZ(x) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + + let t = tree.next_type_var; + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t, t1)); + + tree.next_type_var += 1; + (veri_ir::Expr::CLZ(Box::new(e1)), t) + } + annotation_ir::Expr::CLS(x) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + + let t = tree.next_type_var; + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t, t1)); + + tree.next_type_var += 1; + (veri_ir::Expr::CLS(Box::new(e1)), t) + } + annotation_ir::Expr::Rev(x) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + + let t = tree.next_type_var; + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t, t1)); + + tree.next_type_var += 1; + (veri_ir::Expr::Rev(Box::new(e1)), t) + } + annotation_ir::Expr::BVSubs(ty, x, y) => { + let (e0, t0) = add_annotation_constraints(*ty, tree, annotation_info); + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + + let t = tree.next_type_var; + + // For aarch64, subs sets 4 flags. Model these as 4 bit appended to the left of the + // register. + tree.concrete_constraints.insert(TypeExpr::Concrete( + t, + annotation_ir::Type::BitVectorWithWidth(REG_WIDTH + FLAGS_WIDTH), + )); + tree.concrete_constraints + .insert(TypeExpr::Concrete(t0, annotation_ir::Type::Int)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t2, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t1, t2)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::BVSubs(Box::new(e0), Box::new(e1), Box::new(e2)), + t, + ) + } + annotation_ir::Expr::BVPopcnt(x) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + + let t = tree.next_type_var; + + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + tree.var_constraints.insert(TypeExpr::Variable(t, t1)); + + tree.next_type_var += 1; + (veri_ir::Expr::BVPopcnt(Box::new(e1)), t) + } + annotation_ir::Expr::LoadEffect(x, y, z) => { + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let (e3, t3) = add_annotation_constraints(*z, tree, annotation_info); + let t = tree.next_type_var; + + tree.bv_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::BitVector)); + tree.concrete_constraints + .insert(TypeExpr::Concrete(t2, annotation_ir::Type::Int)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t3, annotation_ir::Type::BitVector)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::LoadEffect(Box::new(e1), Box::new(e2), Box::new(e3)), + t, + ) + } + annotation_ir::Expr::StoreEffect(w, x, y, z) => { + let (e0, t0) = add_annotation_constraints(*w, tree, annotation_info); + let (e1, t1) = add_annotation_constraints(*x, tree, annotation_info); + let (e2, t2) = add_annotation_constraints(*y, tree, annotation_info); + let (e3, t3) = add_annotation_constraints(*z, tree, annotation_info); + let t = tree.next_type_var; + + tree.concrete_constraints + .insert(TypeExpr::Concrete(t, annotation_ir::Type::Unit)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t0, annotation_ir::Type::BitVector)); + tree.concrete_constraints + .insert(TypeExpr::Concrete(t1, annotation_ir::Type::Int)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t2, annotation_ir::Type::BitVector)); + tree.bv_constraints + .insert(TypeExpr::Concrete(t3, annotation_ir::Type::BitVector)); + + tree.next_type_var += 1; + ( + veri_ir::Expr::StoreEffect(Box::new(e0), Box::new(e1), Box::new(e2), Box::new(e3)), + t, + ) + } + }; + tree.ty_vars.insert(e.clone(), t); + // let fmt = format!("{}:\t{:?}", t, e); + // dbg!(fmt); + (e, t) +} + +fn add_isle_constraints( + term: &isle::sema::Term, + tree: &mut RuleParseTree, + annotation_env: &AnnotationEnv, + annotation_info: &mut AnnotationTypeInfo, + annotation: annotation_ir::TermSignature, +) { + let mut annotation_vars = vec![]; + for a in annotation.args { + annotation_vars.push(a.name); + } + annotation_vars.push(annotation.ret.name); + + let mut isle_types = vec![]; + for arg_ty in term.arg_tys.iter() { + isle_types.push(*arg_ty); + } + isle_types.push(term.ret_ty); + assert_eq!(annotation_vars.len(), isle_types.len()); + + for (isle_type_id, annotation_var) in isle_types.iter().zip(annotation_vars) { + // in case the var was not in the annotation + if !annotation_info + .var_to_type_var + .contains_key(&annotation_var) + { + let type_var = tree.next_type_var; + tree.next_type_var += 1; + + annotation_info + .var_to_type_var + .insert(annotation_var.clone(), type_var); + } + + if let Some(ir_type) = annotation_env.model_map.get(isle_type_id) { + let type_var = annotation_info.var_to_type_var[&annotation_var]; + match ir_type { + annotation_ir::Type::BitVector => tree + .bv_constraints + .insert(TypeExpr::Concrete(type_var, ir_type.clone())), + _ => tree + .concrete_constraints + .insert(TypeExpr::Concrete(type_var, ir_type.clone())), + }; + } + } +} + +fn add_rule_constraints( + tree: &mut RuleParseTree, + curr: &mut TypeVarNode, + termenv: &TermEnv, + typeenv: &TypeEnv, + annotation_env: &AnnotationEnv, + annotation_infos: &mut Vec, + rhs: bool, +) -> Option { + // Only relate args to annotations for terms. For leaves, return immediately. + // For recursive definitions without annotations (like And and Let), recur. + let mut children = vec![]; + for child in &mut curr.children { + if let Some(e) = add_rule_constraints( + tree, + child, + termenv, + typeenv, + annotation_env, + annotation_infos, + rhs, + ) { + children.push(e); + } else { + return None; + } + } + let e = match &curr.construct { + TypeVarConstruct::Var => { + tree.quantified_vars + .insert((curr.ident.clone(), curr.type_var)); + tree.free_vars.insert((curr.ident.clone(), curr.type_var)); + Some(veri_ir::Expr::Terminal(veri_ir::Terminal::Var( + curr.ident.clone(), + ))) + } + TypeVarConstruct::BindPattern => { + assert_eq!(children.len(), 2); + let eq = veri_ir::Expr::Binary( + veri_ir::BinaryOp::Eq, + Box::new(children[0].clone()), + Box::new(children[1].clone()), + ); + if rhs { + tree.rhs_assumptions.push(eq); + } else { + tree.lhs_assumptions.push(eq); + } + Some(children[0].clone()) + } + TypeVarConstruct::Wildcard(i) => { + Some(veri_ir::Expr::Terminal(veri_ir::Terminal::Wildcard(*i))) + } + TypeVarConstruct::Const(i) => { + // If constant is known, add the value to the tree. Useful for + // capturing isleTypes + tree.type_var_to_val_map.insert(curr.type_var, *i); + + Some(veri_ir::Expr::Terminal(veri_ir::Terminal::Const( + *i, + curr.type_var, + ))) + } + TypeVarConstruct::And => { + tree.quantified_vars + .insert((curr.ident.clone(), curr.type_var)); + let first = &children[0]; + for (i, e) in children.iter().enumerate() { + if i != 0 { + let eq = veri_ir::Expr::Binary( + veri_ir::BinaryOp::Eq, + Box::new(first.clone()), + Box::new(e.clone()), + ); + if rhs { + tree.rhs_assumptions.push(eq); + } else { + tree.lhs_assumptions.push(eq); + } + } + } + Some(first.to_owned()) + } + TypeVarConstruct::Let(bound) => { + tree.quantified_vars + .insert((curr.ident.clone(), curr.type_var)); + for (e, s) in children.iter().zip(bound) { + let eq = veri_ir::Expr::Binary( + veri_ir::BinaryOp::Eq, + Box::new(veri_ir::Expr::Terminal(veri_ir::Terminal::Var( + s.to_owned(), + ))), + Box::new(e.to_owned()), + ); + if rhs { + tree.rhs_assumptions.push(eq); + } else { + tree.lhs_assumptions.push(eq); + } + } + children.last().cloned() + } + TypeVarConstruct::Term(term_id) => { + let term = &termenv.terms[term_id.index()]; + let term_name = typeenv.syms[term.name.index()].clone(); + + // Print term for debugging + log::trace!(" {}", term_name); + + tree.quantified_vars + .insert((curr.ident.clone(), curr.type_var)); + let a = annotation_env.get_annotation_for_term(term_id); + if a.is_none() { + log::error!("\nSkipping rule with unannotated term: {}", term_name); + return None; + } + let annotation = a.unwrap(); + + // Test code only: support providing concrete inputs + if let Some(concrete) = &tree.concrete { + if concrete.termname == term_name { + for (child, node, input) in + izip!(&children, curr.children.iter(), &concrete.args) + { + let type_var = tree.next_type_var; + tree.next_type_var += 1; + let lit = veri_ir::Expr::Terminal(veri_ir::Terminal::Literal( + input.literal.clone(), + type_var, + )); + tree.var_constraints + .insert(TypeExpr::Variable(node.type_var, type_var)); + tree.ty_vars.insert(lit.clone(), type_var); + let eq = veri_ir::Expr::Binary( + veri_ir::BinaryOp::Eq, + Box::new(child.clone()), + Box::new(lit), + ); + curr.assertions.push(eq.clone()); + if rhs { + tree.rhs_assumptions.push(eq); + } else { + tree.lhs_assumptions.push(eq); + } + } + } + } + + // use a fresh mapping for each term + // keep the same mapping between assertions in the same annotation + let mut annotation_info = AnnotationTypeInfo { + term: curr.ident.clone(), + var_to_type_var: HashMap::new(), + }; + for arg in &annotation.sig.args { + annotation_info + .var_to_type_var + .insert(arg.name.clone(), tree.next_type_var); + tree.next_type_var += 1; + } + annotation_info + .var_to_type_var + .insert(annotation.sig.ret.name.clone(), tree.next_type_var); + tree.next_type_var += 1; + + for expr in annotation.assumptions { + let (typed_expr, _) = add_annotation_constraints(*expr, tree, &mut annotation_info); + curr.assertions.push(typed_expr.clone()); + if rhs { + tree.rhs_assumptions.push(typed_expr); + } else { + tree.lhs_assumptions.push(typed_expr); + } + add_isle_constraints( + term, + tree, + annotation_env, + &mut annotation_info, + annotation.sig.clone(), + ); + } + // For assertions, global assume if not RHS, otherwise assert + for expr in annotation.assertions { + let (typed_expr, _) = add_annotation_constraints(*expr, tree, &mut annotation_info); + curr.assertions.push(typed_expr.clone()); + add_isle_constraints( + term, + tree, + annotation_env, + &mut annotation_info, + annotation.sig.clone(), + ); + if rhs { + tree.rhs_assertions.push(typed_expr); + } else { + tree.lhs_assumptions.push(typed_expr); + } + } + + // set args in rule equal to args in annotation + for (child, arg) in curr.children.iter().zip(&annotation.sig.args) { + let rule_type_var = child.type_var; + if !annotation_info.var_to_type_var.contains_key(&arg.name) { + continue; + } + let annotation_type_var = annotation_info.var_to_type_var[&arg.name]; + + // essentially constant propagate: if we know the value from the rule arg being + // provided as a literal, propagate this to the annotation. + if let Some(c) = tree.type_var_to_val_map.get(&rule_type_var) { + tree.type_var_to_val_map.insert(annotation_type_var, *c); + } + tree.var_constraints + .insert(TypeExpr::Variable(rule_type_var, annotation_type_var)); + } + + for (child, arg) in children.iter().zip(&annotation.sig.args) { + let annotation_type_var = annotation_info.var_to_type_var[&arg.name]; + let arg_name = format!( + "{}__{}__{}", + annotation_info.term, arg.name, annotation_type_var + ); + tree.quantified_vars + .insert((arg_name.clone(), annotation_type_var)); + let eq = veri_ir::Expr::Binary( + veri_ir::BinaryOp::Eq, + Box::new(child.clone()), + Box::new(veri_ir::Expr::Terminal(veri_ir::Terminal::Var(arg_name))), + ); + if rhs { + tree.rhs_assumptions.push(eq); + } else { + tree.lhs_assumptions.push(eq); + } + } + // set term ret var equal to annotation ret var + let ret_var = annotation_info.var_to_type_var[&annotation.sig.ret.name]; + tree.var_constraints + .insert(TypeExpr::Variable(curr.type_var, ret_var)); + let ret_name = format!( + "{}__{}__{}", + annotation_info.term, annotation.sig.ret.name, ret_var + ); + tree.quantified_vars.insert((ret_name.clone(), ret_var)); + let eq = veri_ir::Expr::Binary( + veri_ir::BinaryOp::Eq, + Box::new(veri_ir::Expr::Terminal(veri_ir::Terminal::Var( + curr.ident.clone(), + ))), + Box::new(veri_ir::Expr::Terminal(veri_ir::Terminal::Var(ret_name))), + ); + if rhs { + tree.rhs_assumptions.push(eq); + } else { + tree.lhs_assumptions.push(eq); + } + + annotation_infos.push(annotation_info); + Some(veri_ir::Expr::Terminal(veri_ir::Terminal::Var( + curr.ident.clone(), + ))) + } + }; + if let Some(e) = e { + tree.ty_vars.insert(e.clone(), curr.type_var); + Some(e) + } else { + None + } +} + +// Solve constraints as follows: +// - process concrete constraints first +// - then process variable constraints +// - constraints involving bv without widths are last priority +// +// for example: +// t2 = bv16 +// t3 = bv8 +// +// t5 = t4 +// t6 = t1 +// t3 = t4 +// t1 = t2 +// t7 = t8 +// +// t4 = bv +// t1 = bv +// t7 = bv +// +// would result in: +// bv16 -> t2, t6, t1 +// bv8 -> t3, t5, t4 +// poly(0) -> t5, t4 (intermediate group that gets removed) +// poly(1) -> t6, t1 (intermediate group that gets removed) +// poly(2) -> t7, t8 (intermediate group that gets removed) +// bv -> t7, t8 + +// TODO: clean up +fn solve_constraints( + concrete: HashSet, + var: HashSet, + bv: HashSet, + vals: &mut HashMap, + ty_vars: Option<&HashMap>, +) -> (HashMap, HashMap) { + // maintain a union find that maps types to sets of type vars that have that type + let mut union_find = HashMap::new(); + let mut poly = 0; + + let mut iterate = || { + // initialize union find with groups corresponding to concrete constraints + for c in &concrete { + match c { + TypeExpr::Concrete(v, t) => { + if !union_find.contains_key(t) { + union_find.insert(t.clone(), HashSet::new()); + } + if let Some(group) = union_find.get_mut(t) { + group.insert(*v); + } + } + TypeExpr::WidthInt(v, w) => { + if let Some(c) = vals.get(w) { + let width: usize = (*c).try_into().unwrap(); + let ty = annotation_ir::Type::BitVectorWithWidth(width); + if !union_find.contains_key(&ty) { + union_find.insert(ty.clone(), HashSet::new()); + } + if let Some(group) = union_find.get_mut(&ty) { + group.insert(*v); + } + } + } + _ => panic!( + "Non-concrete constraint found in concrete constraints: {:#?}", + c + ), + }; + } + + // process variable constraints as follows: + // if t1 = t2 and only t1 has been typed, add t2 to the same set as t1 + // if t1 = t2 and only t2 has been typed, add t1 to the same set t2 + // if t1 = t2 and neither has been typed, create a new poly type and add both to the set + // if t1 = t2 and both have been typed, union appropriately + for v in &var { + match v { + TypeExpr::Variable(v1, v2) => { + let t1 = get_var_type(*v1, &union_find); + let t2 = get_var_type(*v2, &union_find); + + match (t1, t2) { + (Some(x), Some(y)) => { + match (x.is_poly(), y.is_poly()) { + (false, false) => { + if x != y { + let e1 = ty_vars.unwrap().iter().find_map(|(k, &v)| { + if v == *v1 { + Some(k) + } else { + None + } + }); + let e2 = ty_vars.unwrap().iter().find_map(|(k, &v)| { + if v == *v2 { + Some(k) + } else { + None + } + }); + match (e1, e2) { + (Some(e1), Some(e2)) => + panic!( + "type conflict\n\t{}\nhas type\n\t{}\nbut\n\t{}\nhas type\n\t{}", + e1, x, e2, y + ), + _ => continue, + } + } + } + // union t1 and t2, keeping t2 as the leader + (true, false) => { + let g1 = + union_find.remove(&x).expect("expected key in union find"); + let g2 = + union_find.get_mut(&y).expect("expected key in union find"); + g2.extend(g1.iter()); + } + // union t1 and t2, keeping t1 as the leader + (_, true) => { + // guard against the case where x and y have the same poly type + // so we remove the key and can't find it in the next line + if x != y { + let g2 = union_find + .remove(&y) + .expect("expected key in union find"); + let g1 = union_find + .get_mut(&x) + .expect("expected key in union find"); + g1.extend(g2.iter()); + } + } + }; + } + (Some(x), None) => { + if let Some(group) = union_find.get_mut(&x) { + group.insert(*v2); + } + } + (None, Some(x)) => { + if let Some(group) = union_find.get_mut(&x) { + group.insert(*v1); + } + } + (None, None) => { + let t = annotation_ir::Type::Poly(poly); + union_find.insert(t.clone(), HashSet::new()); + if let Some(group) = union_find.get_mut(&t) { + group.insert(*v1); + group.insert(*v2); + } + poly += 1; + } + } + } + _ => panic!("Non-variable constraint found in var constraints: {:#?}", v), + } + } + + for b in &bv { + match b { + TypeExpr::Concrete(v, ref t) => { + match t { + annotation_ir::Type::BitVector => { + // if there's a bv constraint and the var has already + // been typed (with a width), ignore the constraint + if let Some(var_type) = get_var_type_concrete(*v, &union_find) { + match var_type { + annotation_ir::Type::BitVectorWithWidth(_) => { + continue; + } + annotation_ir::Type::BitVectorUnknown(_) => { + continue; + } + _ => { + let e = ty_vars + .unwrap() + .iter() + .find_map( + |(k, &u)| if u == *v { Some(k) } else { None }, + ) + .unwrap(); + panic!("Var was already typed as {:#?} but currently processing constraint: {:#?}\n{:?}", var_type, b, e) + } + } + + // otherwise add it to a generic bv bucket + } else { + // if !union_find.contains_key(t) { + // union_find.insert(t.clone(), HashSet::new()); + // } + // if let Some(group) = union_find.get_mut(t) { + // group.insert(v); + // } + let unknown_by_tyvar = annotation_ir::Type::BitVectorUnknown(*v); + let mut set = HashSet::new(); + set.insert(*v); + union_find.insert(unknown_by_tyvar.clone(), set); + + // if this type var also has a polymorphic type, union + if let Some(var_type) = get_var_type_poly(*v, &union_find) { + let poly_bucket = union_find + .remove(&var_type) + .expect("expected key in union find"); + let bv_bucket = union_find + .get_mut(&unknown_by_tyvar) + .expect("expected key in union find"); + bv_bucket.extend(poly_bucket.iter()); + } + } + } + _ => panic!("Non-bv constraint found in bv constraints: {:#?}", b), + } + } + TypeExpr::Variable(_, _) => { + panic!("Non-bv constraint found in bv constraints: {:#?}", b) + } + TypeExpr::WidthInt(_, _) => { + panic!("Non-bv constraint found in bv constraints: {:#?}", b) + } + } + } + for c in &concrete { + if let TypeExpr::WidthInt(v, w) = c { + if let Some(annotation_ir::Type::BitVectorWithWidth(width)) = + get_var_type_concrete(*v, &union_find) + { + vals.insert(*w, width as i128); + } + } + } + }; + + iterate(); + + let mut result = HashMap::new(); + let mut bv_unknown_width_sets = HashMap::new(); + let mut bv_unknown_width_idx = 0u32; + for (t, vars) in union_find { + for var in &vars { + result.insert(*var, t.clone()); + } + if matches!(t, annotation_ir::Type::BitVectorUnknown(..)) { + for var in &vars { + bv_unknown_width_sets.insert(*var, bv_unknown_width_idx); + } + bv_unknown_width_idx += 1; + } + } + (result, bv_unknown_width_sets) +} + +// if the union find already contains the type var, return its type +// otherwise return None +fn get_var_type( + t: u32, + u: &HashMap>, +) -> Option { + for (ty, vars) in u { + if vars.contains(&t) { + return Some(ty.clone()); + } + } + None +} + +// If the union find contains the type var and it has a non-polymorphic, specific type +// return it. Otherwise return None. +fn get_var_type_concrete( + t: u32, + u: &HashMap>, +) -> Option { + for (ty, vars) in u { + match ty { + annotation_ir::Type::Poly(_) | annotation_ir::Type::BitVector => continue, + _ => { + if vars.contains(&t) { + return Some(ty.clone()); + } + } + } + } + None +} + +// If the union find contains the type var and it has a polymorphic type, +// return the polymorphic type. Otherwise return None. +fn get_var_type_poly( + t: u32, + u: &HashMap>, +) -> Option { + for (ty, vars) in u { + match ty { + annotation_ir::Type::Poly(_) => { + if vars.contains(&t) { + return Some(ty.clone()); + } + } + _ => continue, + } + } + None +} + +fn annotation_type_for_vir_type(ty: &Type) -> annotation_ir::Type { + match ty { + Type::BitVector(Some(x)) => annotation_ir::Type::BitVectorWithWidth(*x), + Type::BitVector(None) => annotation_ir::Type::BitVector, + Type::Bool => annotation_ir::Type::Bool, + Type::Int => annotation_ir::Type::Int, + Type::Unit => annotation_ir::Type::Unit, + } +} + +fn create_parse_tree_pattern( + rule: &isle::sema::Rule, + pattern: &isle::sema::Pattern, + tree: &mut RuleParseTree, + typeenv: &TypeEnv, + termenv: &TermEnv, + term: &String, + types: &TermSignature, +) -> TypeVarNode { + match pattern { + isle::sema::Pattern::Term(_, term_id, args) => { + let sym = termenv.terms[term_id.index()].name; + let name = typeenv.syms[sym.index()].clone(); + + let mut assertions = vec![]; + // process children first + let mut children = vec![]; + for (i, arg) in args.iter().enumerate() { + let child = + create_parse_tree_pattern(rule, arg, tree, typeenv, termenv, term, types); + + // Our specified input term, use external types + if name.eq(term) { + tree.concrete_constraints.insert(TypeExpr::Concrete( + child.type_var, + annotation_type_for_vir_type(&types.args[i]), + )); + + // If this is a bitvector, mark the name for the assumption feasibility check + if let Type::BitVector(Some(w)) = &types.args[i] { + tree.term_input_bvs.push(child.ident.clone()); + + // Hack: width matching + let lit = veri_ir::Expr::Terminal(veri_ir::Terminal::Const(*w as i128, 0)); + let eq = veri_ir::Expr::Binary( + veri_ir::BinaryOp::Eq, + Box::new(veri_ir::Expr::WidthOf(Box::new(veri_ir::Expr::Terminal( + veri_ir::Terminal::Var(child.ident.clone()), + )))), + Box::new(lit), + ); + assertions.push(eq); + } + tree.term_args.push(child.ident.clone()) + } + children.push(child); + } + let type_var = tree.next_type_var; + tree.next_type_var += 1; + + if name.eq(term) { + tree.concrete_constraints.insert(TypeExpr::Concrete( + type_var, + annotation_type_for_vir_type(&types.ret), + )); + // Hack: width matching + if let Type::BitVector(Some(w)) = &types.ret { + let lit = veri_ir::Expr::Terminal(veri_ir::Terminal::Const(*w as i128, 0)); + let eq = veri_ir::Expr::Binary( + veri_ir::BinaryOp::Eq, + Box::new(veri_ir::Expr::WidthOf(Box::new(veri_ir::Expr::Terminal( + veri_ir::Terminal::Var(format!("{}__{}", name, type_var)), + )))), + Box::new(lit), + ); + assertions.push(eq); + } + } + + TypeVarNode { + ident: format!("{}__{}", name, type_var), + construct: TypeVarConstruct::Term(*term_id), + type_var, + children, + assertions, + } + } + isle::sema::Pattern::Var(_, var_id) => { + let sym = rule.vars[var_id.index()].name; + let ident = typeenv.syms[sym.index()].clone(); + + let type_var = tree + .varid_to_type_var_map + .entry(*var_id) + .or_insert(tree.next_type_var); + if *type_var == tree.next_type_var { + tree.next_type_var += 1; + } + let ident = format!("{}__clif{}__{}", ident, var_id.index(), *type_var); + // this is a base case so there are no children + TypeVarNode { + ident, + construct: TypeVarConstruct::Var, + type_var: *type_var, + children: vec![], + assertions: vec![], + } + } + isle::sema::Pattern::BindPattern(_, var_id, subpat) => { + let sym = rule.vars[var_id.index()].name; + + let type_var = *tree + .varid_to_type_var_map + .entry(*var_id) + .or_insert(tree.next_type_var); + if type_var == tree.next_type_var { + tree.next_type_var += 1; + } + + let ident = format!( + "{}__clif{}__{}", + typeenv.syms[sym.index()], + var_id.index(), + type_var + ); + + // this is a base case so there are no children + let var_node = TypeVarNode { + ident: ident.clone(), + construct: TypeVarConstruct::Var, + type_var, + children: vec![], + assertions: vec![], + }; + + let subpat_node = + create_parse_tree_pattern(rule, subpat, tree, typeenv, termenv, term, types); + + let bind_type_var = tree.next_type_var; + tree.next_type_var += 1; + + tree.var_constraints + .insert(TypeExpr::Variable(type_var, subpat_node.type_var)); + tree.var_constraints + .insert(TypeExpr::Variable(bind_type_var, type_var)); + tree.var_constraints + .insert(TypeExpr::Variable(bind_type_var, subpat_node.type_var)); + + TypeVarNode { + ident, + construct: TypeVarConstruct::BindPattern, + type_var, + children: vec![var_node, subpat_node], + assertions: vec![], + } + } + isle::sema::Pattern::Wildcard(_) => { + let type_var = tree.next_type_var; + tree.next_type_var += 1; + TypeVarNode { + ident: format!("wildcard__{}", type_var), + construct: TypeVarConstruct::Wildcard(type_var), + type_var, + children: vec![], + assertions: vec![], + } + } + isle::sema::Pattern::ConstPrim(_, sym) => { + let type_var = tree.next_type_var; + tree.next_type_var += 1; + let name = typeenv.syms[sym.index()].clone(); + let val = match name.as_str() { + "I64" => 64, + "I32" => 32, + "I16" => 16, + "I8" => 8, + "true" => 1, + "false" => 0, + // Not currently used, but parsed + "I128" => 16, + _ => todo!("{:?}", &name), + }; + let name = format!("{}__{}", name, type_var); + + TypeVarNode { + ident: name, + construct: TypeVarConstruct::Const(val), + type_var, + children: vec![], + assertions: vec![], + } + } + isle::sema::Pattern::ConstInt(_, num) => { + let type_var = tree.next_type_var; + tree.next_type_var += 1; + let name = format!("{}__{}", num, type_var); + TypeVarNode { + ident: name, + construct: TypeVarConstruct::Const(*num), + type_var, + children: vec![], + assertions: vec![], + } + } + isle::sema::Pattern::And(_, subpats) => { + let mut children = vec![]; + let mut ty_vars = vec![]; + for p in subpats { + let child = create_parse_tree_pattern(rule, p, tree, typeenv, termenv, term, types); + ty_vars.push(child.type_var); + children.push(child); + } + let type_var = tree.next_type_var; + tree.next_type_var += 1; + + // Assert all sub type constraints are equivalent to the first subexpression + let first = ty_vars[0]; + for e in &ty_vars[1..] { + tree.var_constraints + .insert(TypeExpr::Variable(first, e.to_owned())); + } + + TypeVarNode { + ident: String::from("and"), + construct: TypeVarConstruct::And, + type_var, + children, + assertions: vec![], + } + } + } +} + +fn create_parse_tree_expr( + rule: &isle::sema::Rule, + expr: &isle::sema::Expr, + tree: &mut RuleParseTree, + typeenv: &TypeEnv, + termenv: &TermEnv, +) -> TypeVarNode { + match expr { + isle::sema::Expr::Term(_, term_id, args) => { + let sym = termenv.terms[term_id.index()].name; + let name = typeenv.syms[sym.index()].clone(); + + // process children first + let mut children = vec![]; + for arg in args { + let child = create_parse_tree_expr(rule, arg, tree, typeenv, termenv); + children.push(child); + } + let type_var = tree.next_type_var; + tree.next_type_var += 1; + + TypeVarNode { + ident: format!("{}__{}", name, type_var), + construct: TypeVarConstruct::Term(*term_id), + type_var, + children, + assertions: vec![], + } + } + isle::sema::Expr::Var(_, var_id) => { + let mut ident = var_id.0.to_string(); + if var_id.index() < rule.vars.len() { + let sym = rule.vars[var_id.index()].name; + ident.clone_from(&typeenv.syms[sym.index()]) + } else { + println!("var {} not found, using var id instead", var_id.0); + ident = format!("v{ident}"); + } + + let type_var = tree + .varid_to_type_var_map + .entry(*var_id) + .or_insert(tree.next_type_var); + if *type_var == tree.next_type_var { + tree.next_type_var += 1; + } + let ident = format!("{}__clif{}__{}", ident, var_id.index(), *type_var); + // this is a base case so there are no children + TypeVarNode { + ident, + construct: TypeVarConstruct::Var, + type_var: *type_var, + children: vec![], + assertions: vec![], + } + } + isle::sema::Expr::ConstPrim(_, sym) => { + let type_var = tree.next_type_var; + tree.next_type_var += 1; + let name = typeenv.syms[sym.index()].clone(); + let val = match name.as_str() { + "I8" => 8, + "I16" => 16, + "I64" => 64, + "I32" => 32, + "false" => 0, + "true" => 1, + _ => todo!("{:?}", &name), + }; + let name = format!("{}__{}", name, type_var); + TypeVarNode { + ident: name, + construct: TypeVarConstruct::Const(val), + type_var, + children: vec![], + assertions: vec![], + } + } + isle::sema::Expr::ConstInt(_, num) => { + let type_var = tree.next_type_var; + tree.next_type_var += 1; + let name = format!("{}__{}", num, type_var); + TypeVarNode { + ident: name, + construct: TypeVarConstruct::Const(*num), + type_var, + children: vec![], + assertions: vec![], + } + } + isle::sema::Expr::Let { bindings, body, .. } => { + let mut children = vec![]; + let mut bound = vec![]; + for (varid, _, expr) in bindings { + let sym = rule.vars[varid.index()].name; + let var = typeenv.syms[sym.index()].clone(); + let subpat_node = create_parse_tree_expr(rule, expr, tree, typeenv, termenv); + + let ty_var = tree.next_type_var; + tree.next_type_var += 1; + + tree.var_constraints + .insert(TypeExpr::Variable(ty_var, subpat_node.type_var)); + + tree.varid_to_type_var_map.insert(*varid, ty_var); + children.push(subpat_node); + let ident = format!("{}__clif{}__{}", var, varid.index(), ty_var); + tree.quantified_vars.insert((ident.clone(), ty_var)); + bound.push(ident); + } + let body = create_parse_tree_expr(rule, body, tree, typeenv, termenv); + let body_var = body.type_var; + children.push(body); + + let type_var = tree.next_type_var; + tree.next_type_var += 1; + + let name = format!("let__{}", type_var); + + // The let should have the same type as the body + tree.var_constraints + .insert(TypeExpr::Variable(type_var, body_var)); + + TypeVarNode { + ident: name, + construct: TypeVarConstruct::Let(bound), + type_var, + children, + assertions: vec![], + } + } + } +} + +// TODO mod tests? +#[test] +fn test_solve_constraints() { + // simple with specific and generic bvs + let concrete = HashSet::from([ + TypeExpr::Concrete(2, annotation_ir::Type::BitVectorWithWidth(16)), + TypeExpr::Concrete(3, annotation_ir::Type::BitVectorWithWidth(8)), + ]); + let var = HashSet::from([ + TypeExpr::Variable(5, 4), + TypeExpr::Variable(6, 1), + TypeExpr::Variable(3, 4), + TypeExpr::Variable(1, 2), + ]); + let bv = HashSet::from([ + TypeExpr::Concrete(1, annotation_ir::Type::BitVector), + TypeExpr::Concrete(4, annotation_ir::Type::BitVector), + ]); + let expected = HashMap::from([ + (1, annotation_ir::Type::BitVectorWithWidth(16)), + (2, annotation_ir::Type::BitVectorWithWidth(16)), + (3, annotation_ir::Type::BitVectorWithWidth(8)), + (4, annotation_ir::Type::BitVectorWithWidth(8)), + (5, annotation_ir::Type::BitVectorWithWidth(8)), + (6, annotation_ir::Type::BitVectorWithWidth(16)), + ]); + let (sol, bvsets) = solve_constraints(concrete, var, bv, &mut HashMap::new(), None); + assert_eq!(expected, sol); + assert!(bvsets.is_empty()); + + // slightly more complicated with specific and generic bvs + let concrete = HashSet::from([ + TypeExpr::Concrete(2, annotation_ir::Type::BitVectorWithWidth(16)), + TypeExpr::Concrete(3, annotation_ir::Type::BitVectorWithWidth(8)), + ]); + let var = HashSet::from([ + TypeExpr::Variable(5, 4), + TypeExpr::Variable(6, 1), + TypeExpr::Variable(3, 4), + TypeExpr::Variable(1, 2), + TypeExpr::Variable(7, 8), + ]); + let bv = HashSet::from([ + TypeExpr::Concrete(1, annotation_ir::Type::BitVector), + TypeExpr::Concrete(4, annotation_ir::Type::BitVector), + TypeExpr::Concrete(7, annotation_ir::Type::BitVector), + ]); + let expected = HashMap::from([ + (1, annotation_ir::Type::BitVectorWithWidth(16)), + (2, annotation_ir::Type::BitVectorWithWidth(16)), + (3, annotation_ir::Type::BitVectorWithWidth(8)), + (4, annotation_ir::Type::BitVectorWithWidth(8)), + (5, annotation_ir::Type::BitVectorWithWidth(8)), + (6, annotation_ir::Type::BitVectorWithWidth(16)), + (7, annotation_ir::Type::BitVectorUnknown(7)), + (8, annotation_ir::Type::BitVectorUnknown(7)), + ]); + let expected_bvsets = HashMap::from([(7, 0), (8, 0)]); + let (sol, bvsets) = solve_constraints(concrete, var, bv, &mut HashMap::new(), None); + assert_eq!(expected, sol); + assert_eq!(expected_bvsets, bvsets); +} + +#[test] +#[should_panic] +fn test_solve_constraints_ill_typed() { + // ill-typed + let concrete = HashSet::from([ + TypeExpr::Concrete(2, annotation_ir::Type::BitVectorWithWidth(16)), + TypeExpr::Concrete(3, annotation_ir::Type::BitVectorWithWidth(8)), + ]); + let var = HashSet::from([ + TypeExpr::Variable(5, 4), + TypeExpr::Variable(6, 1), + TypeExpr::Variable(4, 6), + TypeExpr::Variable(3, 4), + TypeExpr::Variable(1, 2), + ]); + let bv = HashSet::from([ + TypeExpr::Concrete(1, annotation_ir::Type::BitVector), + TypeExpr::Concrete(4, annotation_ir::Type::BitVector), + ]); + solve_constraints(concrete, var, bv, &mut HashMap::new(), None); +} diff --git a/cranelift/isle/veri/veri_engine/src/verify.rs b/cranelift/isle/veri/veri_engine/src/verify.rs new file mode 100644 index 000000000000..4cd846cd98f1 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/src/verify.rs @@ -0,0 +1,136 @@ +use crate::type_inference::type_rules_with_term_and_types; +use crate::Config; +use cranelift_isle::error::Errors; +use cranelift_isle::{self as isle}; +use isle::compile::create_envs; +use isle::sema::{Pattern, RuleId, TermEnv, TypeEnv}; +use std::collections::HashMap; +use std::path::PathBuf; + +use crate::annotations::parse_annotations; +use crate::solver::run_solver; +use crate::type_inference::RuleSemantics; +use crate::{interp::Context, termname::pattern_contains_termname}; +use veri_ir::{ConcreteTest, TermSignature, VerificationResult}; + +pub fn verify_rules( + inputs: Vec, + config: &Config, + widths: &Option>, +) -> Result<(), Errors> { + // Produces environments including terms, rules, and maps from symbols and + // names to types + let (typeenv, termenv, defs) = create_envs(inputs).unwrap(); + + let annotation_env = parse_annotations(&defs, &termenv, &typeenv); + + // Get the types/widths for this particular term + let types = annotation_env + .get_term_signatures_by_name(&termenv, &typeenv) + .get(&config.term as &str) + .unwrap_or_else(|| panic!("Missing term type instantiation for {}", config.term)) + .clone(); + + let types_filtered = if let Some(widths) = widths { + let mut width_types = Vec::new(); + + for w in widths { + let width_type = match w.as_str() { + "I8" => veri_ir::Type::BitVector(Some(8)), + "I16" => veri_ir::Type::BitVector(Some(16)), + "I32" => veri_ir::Type::BitVector(Some(32)), + "I64" => veri_ir::Type::BitVector(Some(64)), + _ => panic!("Invalid width type: {}", w), + }; + width_types.push(width_type); + } + + types + .into_iter() + .filter(|t| { + if let Some(canonical_type) = &t.canonical_type { + width_types.contains(canonical_type) + } else { + false + } + }) + .collect::>() + } else { + types + }; + + for type_instantiation in types_filtered { + let type_sols = type_rules_with_term_and_types( + &termenv, + &typeenv, + &annotation_env, + config, + &type_instantiation, + &None, + ); + verify_rules_for_term( + &termenv, + &typeenv, + &type_sols, + type_instantiation, + &None, + config, + ); + } + Ok(()) +} + +pub fn verify_rules_for_term( + termenv: &TermEnv, + typeenv: &TypeEnv, + typesols: &HashMap, + types: TermSignature, + concrete: &Option, + config: &Config, +) -> VerificationResult { + let mut rules_checked = 0; + for rule in &termenv.rules { + // Only type rules with the given term on the LHS + if !pattern_contains_termname( + // Hack for now: typeid not used + &Pattern::Term( + cranelift_isle::sema::TypeId(0), + rule.root_term, + rule.args.clone(), + ), + &config.term, + termenv, + typeenv, + ) { + continue; + } + if let Some(names) = &config.names { + if rule.name.is_none() { + continue; + } + let name = &typeenv.syms[rule.name.unwrap().index()]; + if !names.contains(name) { + continue; + } else { + log::debug!("Verifying rule: {}", name); + } + } + let ctx = Context::new(typesols); + if ctx.typesols.get(&rule.id).is_none() { + continue; + } + let rule_sem = &ctx.typesols[&rule.id]; + log::debug!("Term: {}", config.term); + log::debug!("Type instantiation: {}", types); + let result = run_solver(rule_sem, rule, termenv, typeenv, concrete, config, &types); + rules_checked += 1; + if result != VerificationResult::Success { + return result; + } + } + if rules_checked > 0 { + VerificationResult::Success + } else { + panic!("No rules checked!") + } +} diff --git a/cranelift/isle/veri/veri_engine/tests/utils/mod.rs b/cranelift/isle/veri/veri_engine/tests/utils/mod.rs new file mode 100644 index 000000000000..a9a225cbb252 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/tests/utils/mod.rs @@ -0,0 +1,347 @@ +use cranelift_codegen_meta::{ + generate_isle, + isle::{get_isle_compilations, shared_isle_lower_paths}, +}; +use cranelift_isle::compile::create_envs; +use std::env; +use std::path::PathBuf; +use strum::IntoEnumIterator; +use strum_macros::EnumIter; +use veri_engine_lib::annotations::parse_annotations; +use veri_engine_lib::type_inference::type_rules_with_term_and_types; +use veri_engine_lib::verify::verify_rules_for_term; +use veri_engine_lib::Config; +use veri_ir::{ConcreteTest, Counterexample, TermSignature, VerificationResult}; + +#[derive(Debug, EnumIter, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] +#[repr(usize)] +pub enum Bitwidth { + I8 = 8, + I16 = 16, + I32 = 32, + I64 = 64, +} + +pub enum TestResult { + Simple(Vec<(Bitwidth, VerificationResult)>), + Expect(fn(&TermSignature) -> VerificationResult), +} + +type TestResultBuilder = dyn Fn(Bitwidth) -> (Bitwidth, VerificationResult); + +use std::sync::Once; + +static INIT: Once = Once::new(); + +pub fn get_isle_files(name: &str) -> Vec { + let cur_dir = env::current_dir().expect("Can't access current working directory"); + let gen_dir = cur_dir.join("test_output"); + INIT.call_once(|| { + // Logger + env_logger::init(); + // Test directory + if !gen_dir.is_dir() { + std::fs::create_dir(gen_dir.as_path()).unwrap(); + } + // Generate ISLE files. + generate_isle(gen_dir.as_path()).expect("Can't generate ISLE"); + }); + + let codegen_crate_dir = cur_dir.join("../../../codegen"); + let inst_specs_isle = codegen_crate_dir.join("src").join("inst_specs.isle"); + + match name { + "shared_lower" => { + let mut shared = shared_isle_lower_paths(codegen_crate_dir.as_path()); + shared.push(gen_dir.join("clif_lower.isle")); + shared + } + _ => { + // Lookup ISLE shared . + let compilations = + get_isle_compilations(codegen_crate_dir.as_path(), gen_dir.as_path()); + + // Return inputs from the matching compilation, if any. + let mut inputs = compilations.lookup(name).unwrap().inputs(); + inputs.push(inst_specs_isle); + inputs + } + } +} + +// Some examples of functions we might need +#[allow(dead_code)] +pub fn just_8_result() -> TestResult { + TestResult::Simple(vec![(Bitwidth::I8, VerificationResult::Success)]) +} + +#[allow(dead_code)] +pub fn just_16_result() -> TestResult { + TestResult::Simple(vec![(Bitwidth::I16, VerificationResult::Success)]) +} + +#[allow(dead_code)] +pub fn just_32_result() -> TestResult { + TestResult::Simple(vec![(Bitwidth::I32, VerificationResult::Success)]) +} + +#[allow(dead_code)] +pub fn just_64_result() -> TestResult { + TestResult::Simple(vec![(Bitwidth::I64, VerificationResult::Success)]) +} + +/// All bitwidths verify +#[allow(dead_code)] +pub fn all_success_result() -> Vec<(Bitwidth, VerificationResult)> { + custom_result(&|w| (w, VerificationResult::Success)) +} + +/// All bitwidths fail +#[allow(dead_code)] +pub fn all_failure_result() -> Vec<(Bitwidth, VerificationResult)> { + custom_result(&|w| (w, VerificationResult::Failure(Counterexample {}))) +} + +/// Specify a custom set expected result (helpful if you want to test all the bitwidths and expect +/// a range of different success, failure, and inapplicable outcomes) +pub fn custom_result(f: &TestResultBuilder) -> Vec<(Bitwidth, VerificationResult)> { + Bitwidth::iter().map(f).collect() +} + +fn test_rules_with_term(inputs: Vec, tr: TestResult, config: Config) { + let (typeenv, termenv, defs) = create_envs(inputs).unwrap(); + let annotation_env = parse_annotations(&defs, &termenv, &typeenv); + + let term_signatures = annotation_env + .get_term_signatures_by_name(&termenv, &typeenv) + .get(config.term.as_str()) + .unwrap_or_else(|| panic!("Missing term type instantiation for {}", config.term)) + .clone(); + let instantiations = match tr { + TestResult::Simple(s) => { + let mut res = vec![]; + for (width, result) in s { + let ty = match width { + Bitwidth::I8 => veri_ir::Type::BitVector(Some(8)), + Bitwidth::I16 => veri_ir::Type::BitVector(Some(16)), + Bitwidth::I32 => veri_ir::Type::BitVector(Some(32)), + Bitwidth::I64 => veri_ir::Type::BitVector(Some(64)), + }; + // Find the type instantiations with this as the canonical type + let all_instantiations: Vec<&TermSignature> = term_signatures + .iter() + .filter(|sig| sig.canonical_type.unwrap() == ty) + .collect(); + if all_instantiations.is_empty() { + panic!("Missing type instantiation for width {:?}", width); + } + for i in all_instantiations { + res.push((i.clone(), result.clone())); + } + } + res + } + TestResult::Expect(expect) => term_signatures + .iter() + .map(|sig| (sig.clone(), expect(sig))) + .collect(), + }; + + for (type_instantiation, expected_result) in instantiations { + log::debug!("Expected result: {:?}", expected_result); + let type_sols = type_rules_with_term_and_types( + &termenv, + &typeenv, + &annotation_env, + &config, + &type_instantiation, + &None, + ); + let result = verify_rules_for_term( + &termenv, + &typeenv, + &type_sols, + type_instantiation, + &None, + &config, + ); + assert_eq!(result, expected_result); + } +} + +pub fn test_from_file_with_lhs_termname_simple( + file: &str, + termname: String, + tr: Vec<(Bitwidth, VerificationResult)>, +) { + test_from_file_with_lhs_termname(file, termname, TestResult::Simple(tr)) +} + +pub fn test_from_file_with_lhs_termname(file: &str, termname: String, tr: TestResult) { + println!("Verifying {} rules in file: {}", termname, file); + let mut inputs = get_isle_files("shared_lower"); + inputs.push(PathBuf::from(file)); + let config = Config { + term: termname, + distinct_check: true, + custom_verification_condition: None, + custom_assumptions: None, + names: None, + }; + test_rules_with_term(inputs, tr, config); +} + +pub fn test_aarch64_rule_with_lhs_termname_simple( + rulename: &str, + termname: &str, + tr: Vec<(Bitwidth, VerificationResult)>, +) { + test_aarch64_rule_with_lhs_termname(rulename, termname, TestResult::Simple(tr)) +} + +pub fn test_aarch64_rule_with_lhs_termname(rulename: &str, termname: &str, tr: TestResult) { + println!("Verifying rule `{}` with termname {} ", rulename, termname); + let inputs = get_isle_files("aarch64"); + let config = Config { + term: termname.to_string(), + distinct_check: true, + custom_verification_condition: None, + custom_assumptions: None, + names: Some(vec![rulename.to_string()]), + }; + test_rules_with_term(inputs, tr, config); +} + +pub fn test_x64_rule_with_lhs_termname_simple( + rulename: &str, + termname: &str, + tr: Vec<(Bitwidth, VerificationResult)>, +) { + test_x64_rule_with_lhs_termname(rulename, termname, TestResult::Simple(tr)) +} + +pub fn test_x64_rule_with_lhs_termname(rulename: &str, termname: &str, tr: TestResult) { + println!("Verifying rule `{}` with termname {} ", rulename, termname); + let inputs = get_isle_files("x64"); + let config = Config { + term: termname.to_string(), + distinct_check: true, + custom_verification_condition: None, + custom_assumptions: None, + names: Some(vec![rulename.to_string()]), + }; + test_rules_with_term(inputs, tr, config); +} + +pub fn test_from_file_with_config_simple( + file: &str, + config: Config, + tr: Vec<(Bitwidth, VerificationResult)>, +) { + test_from_file_with_config(file, config, TestResult::Simple(tr)) +} +pub fn test_from_file_with_config(file: &str, config: Config, tr: TestResult) { + println!("Verifying {} rules in file: {}", config.term, file); + let mut inputs = get_isle_files("shared_lower"); + inputs.push(PathBuf::from(file)); + test_rules_with_term(inputs, tr, config); +} + +pub fn test_aarch64_with_config_simple(config: Config, tr: Vec<(Bitwidth, VerificationResult)>) { + test_aarch64_with_config(config, TestResult::Simple(tr)) +} + +pub fn test_aarch64_with_config(config: Config, tr: TestResult) { + println!( + "Verifying rules {:?} with termname {}", + config.names, config.term + ); + let inputs = get_isle_files("aarch64"); + test_rules_with_term(inputs, tr, config); +} + +pub fn test_concrete_aarch64_rule_with_lhs_termname( + rulename: &str, + termname: &str, + concrete: ConcreteTest, +) { + println!( + "Verifying concrete input rule `{}` with termname {} ", + rulename, termname + ); + let inputs = get_isle_files("aarch64"); + let (typeenv, termenv, defs) = create_envs(inputs).unwrap(); + let annotation_env = parse_annotations(&defs, &termenv, &typeenv); + + let config = Config { + term: termname.to_string(), + distinct_check: false, + custom_verification_condition: None, + custom_assumptions: None, + names: Some(vec![rulename.to_string()]), + }; + + // Get the types/widths for this particular term + let args = concrete.args.iter().map(|i| i.ty).collect(); + let ret = concrete.output.ty; + let t = TermSignature { + args, + ret, + canonical_type: None, + }; + + let type_sols = type_rules_with_term_and_types( + &termenv, + &typeenv, + &annotation_env, + &config, + &t, + &Some(concrete.clone()), + ); + let result = verify_rules_for_term(&termenv, &typeenv, &type_sols, t, &Some(concrete), &config); + assert_eq!(result, VerificationResult::Success); +} + +pub fn test_concrete_input_from_file_with_lhs_termname( + file: &str, + termname: String, + concrete: ConcreteTest, +) { + println!( + "Verifying concrete input {} rule in file: {}", + termname, file + ); + let mut inputs = get_isle_files("shared_lower"); + inputs.push(PathBuf::from(file)); + + let (typeenv, termenv, defs) = create_envs(inputs).unwrap(); + let annotation_env = parse_annotations(&defs, &termenv, &typeenv); + + let config = Config { + term: termname.clone(), + distinct_check: false, + custom_verification_condition: None, + custom_assumptions: None, + names: None, + }; + + // Get the types/widths for this particular term + let args = concrete.args.iter().map(|i| i.ty).collect(); + let ret = concrete.output.ty; + let t = TermSignature { + args, + ret, + canonical_type: None, + }; + + let type_sols = type_rules_with_term_and_types( + &termenv, + &typeenv, + &annotation_env, + &config, + &t, + &Some(concrete.clone()), + ); + let result = verify_rules_for_term(&termenv, &typeenv, &type_sols, t, &Some(concrete), &config); + assert_eq!(result, VerificationResult::Success); +} diff --git a/cranelift/isle/veri/veri_engine/tests/veri.rs b/cranelift/isle/veri/veri_engine/tests/veri.rs new file mode 100644 index 000000000000..4fe0f83545e2 --- /dev/null +++ b/cranelift/isle/veri/veri_engine/tests/veri.rs @@ -0,0 +1,3224 @@ +mod utils; +use utils::{all_failure_result, all_success_result}; +use utils::{ + test_aarch64_rule_with_lhs_termname_simple, test_aarch64_with_config_simple, + test_concrete_aarch64_rule_with_lhs_termname, test_concrete_input_from_file_with_lhs_termname, + test_from_file_with_config_simple, test_from_file_with_lhs_termname, + test_from_file_with_lhs_termname_simple, test_x64_rule_with_lhs_termname_simple, Bitwidth, + TestResult, +}; +use veri_engine_lib::Config; +use veri_ir::{ConcreteInput, ConcreteTest, Counterexample, VerificationResult}; + +#[test] +fn test_named_iadd_base_concrete() { + test_concrete_aarch64_rule_with_lhs_termname( + "iadd_base_case", + "iadd", + ConcreteTest { + termname: "iadd".to_string(), + args: vec![ + ConcreteInput { + literal: "#b00000001".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ConcreteInput { + literal: "#b00000001".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ], + output: ConcreteInput { + literal: "#b00000010".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + }, + ) +} + +#[test] +fn test_named_iadd_base() { + test_aarch64_rule_with_lhs_termname_simple("iadd_base_case", "iadd", all_success_result()) +} + +#[test] +fn test_named_iadd_imm12_right() { + test_aarch64_rule_with_lhs_termname_simple("iadd_imm12_right", "iadd", all_success_result()) +} + +#[test] +fn test_named_iadd_imm12_left() { + test_aarch64_rule_with_lhs_termname_simple("iadd_imm12_left", "iadd", all_success_result()) +} + +#[test] +fn test_named_iadd_imm12_neg_left() { + test_aarch64_rule_with_lhs_termname_simple( + "iadd_imm12_neg_left", + "iadd", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_iadd_imm12_neg_right() { + test_aarch64_rule_with_lhs_termname_simple( + "iadd_imm12_neg_right", + "iadd", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +// Need a file test because this is a change on top of our latest rebase +#[test] +fn test_named_imm12_from_negated_value() { + test_aarch64_rule_with_lhs_termname_simple( + "imm12_from_negated_value", + "imm12_from_negated_value", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +// Need a file test because this is a change on top of our latest rebase +#[test] +fn test_updated_iadd_imm12neg_right() { + test_from_file_with_lhs_termname_simple( + "./examples/iadd/updated_iadd_imm12neg_right.isle", + "iadd".to_string(), + all_success_result(), + ) +} + +// Need a file test because this is a change on top of our latest rebase +#[test] +fn test_updated_iadd_imm12neg_left() { + test_from_file_with_lhs_termname_simple( + "./examples/iadd/updated_iadd_imm12neg_left.isle", + "iadd".to_string(), + all_success_result(), + ) +} + +#[test] +fn test_named_iadd_extend_right() { + test_aarch64_rule_with_lhs_termname_simple( + "iadd_extend_right", + "iadd", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_iadd_extend_right_concrete() { + test_concrete_aarch64_rule_with_lhs_termname( + "iadd_extend_right", + "iadd", + ConcreteTest { + termname: "iadd".to_string(), + args: vec![ + ConcreteInput { + literal: "#b0000000000000001".to_string(), + ty: veri_ir::Type::BitVector(Some(16)), + }, + ConcreteInput { + literal: "#b1111111111111111".to_string(), + ty: veri_ir::Type::BitVector(Some(16)), + }, + ], + output: ConcreteInput { + literal: "#b0000000000000000".to_string(), + ty: veri_ir::Type::BitVector(Some(16)), + }, + }, + ); + test_concrete_aarch64_rule_with_lhs_termname( + "iadd_extend_right", + "iadd", + ConcreteTest { + termname: "iadd".to_string(), + args: vec![ + ConcreteInput { + literal: "#b01000000000000000000000000000000".to_string(), + ty: veri_ir::Type::BitVector(Some(32)), + }, + ConcreteInput { + literal: "#b00000000000000001111111111111111".to_string(), + ty: veri_ir::Type::BitVector(Some(32)), + }, + ], + output: ConcreteInput { + literal: "#b01000000000000001111111111111111".to_string(), + ty: veri_ir::Type::BitVector(Some(32)), + }, + }, + ) +} + +#[test] +fn test_named_iadd_extend_left() { + test_aarch64_rule_with_lhs_termname_simple( + "iadd_extend_left", + "iadd", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_broken_iadd_extend() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/iadd/broken_add_extend.isle", + "iadd".to_string(), + vec![ + // The type of the iadd is the destination type, so for i8 there is no bad extend-to + (Bitwidth::I8, VerificationResult::Success), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_iadd_ishl_left() { + test_aarch64_rule_with_lhs_termname_simple("iadd_ishl_left", "iadd", all_success_result()) +} + +#[test] +fn test_named_iadd_ishl_right() { + test_aarch64_rule_with_lhs_termname_simple("iadd_ishl_right", "iadd", all_success_result()) +} + +#[test] +fn test_named_iadd_imul_right() { + test_aarch64_rule_with_lhs_termname_simple( + "iadd_imul_right", + "iadd", + vec![ + (Bitwidth::I8, VerificationResult::Success), + // Too slow right now: https://github.com/avanhatt/wasmtime/issues/36 + // (Bitwidth::I16, VerificationResult::Success), + // (Bitwidth::I32, VerificationResult::Success), + // (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +#[ignore] +fn test_named_slow_iadd_imul_right() { + test_aarch64_rule_with_lhs_termname_simple( + "iadd_imul_right", + "iadd", + vec![ + (Bitwidth::I16, VerificationResult::Unknown), + (Bitwidth::I32, VerificationResult::Unknown), + (Bitwidth::I64, VerificationResult::Unknown), + ], + ) +} + +#[test] +fn test_named_iadd_imul_left() { + test_aarch64_rule_with_lhs_termname_simple( + "iadd_imul_left", + "iadd", + vec![ + (Bitwidth::I8, VerificationResult::Success), + // Too slow right now: https://github.com/avanhatt/wasmtime/issues/36 + // (Bitwidth::I16, VerificationResult::Success), + // (Bitwidth::I32, VerificationResult::Success), + // (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +#[ignore] +fn test_named_slow_iadd_imul_left() { + test_aarch64_rule_with_lhs_termname_simple( + "iadd_imul_left", + "iadd", + vec![ + (Bitwidth::I16, VerificationResult::Unknown), + (Bitwidth::I32, VerificationResult::Unknown), + (Bitwidth::I64, VerificationResult::Unknown), + ], + ) +} + +#[test] +fn test_named_isub_imul() { + test_aarch64_rule_with_lhs_termname_simple( + "isub_imul", + "isub", + vec![ + (Bitwidth::I8, VerificationResult::Success), + // Too slow right now: https://github.com/avanhatt/wasmtime/issues/36 + // (Bitwidth::I16, VerificationResult::Success), + // (Bitwidth::I32, VerificationResult::Success), + // (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +#[ignore] +fn test_named_slow_isub_imul() { + test_aarch64_rule_with_lhs_termname_simple( + "isub_imul", + "isub", + vec![ + // Too slow right now: https://github.com/avanhatt/wasmtime/issues/36 + (Bitwidth::I16, VerificationResult::Unknown), + (Bitwidth::I32, VerificationResult::Unknown), + (Bitwidth::I64, VerificationResult::Unknown), + ], + ) +} + +#[test] +fn test_broken_iadd_base_case() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/iadd/broken_base_case.isle", + "iadd".to_string(), + all_failure_result(), + ) +} + +#[test] +fn test_broken_iadd_imm12() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/iadd/broken_imm12.isle", + "iadd".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Success), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I64, + VerificationResult::Failure(Counterexample {}), + ), + ], + ) +} + +#[test] +fn test_broken_iadd_imm12_2() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/iadd/broken_imm12_2.isle", + "iadd".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I64, + VerificationResult::Failure(Counterexample {}), + ), + ], + ) +} + +#[test] +fn test_broken_iadd_imm12neg_not_distinct() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/iadd/broken_imm12neg.isle", + "iadd".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::NoDistinctModels), + (Bitwidth::I16, VerificationResult::NoDistinctModels), + (Bitwidth::I32, VerificationResult::NoDistinctModels), + ( + Bitwidth::I64, + VerificationResult::Failure(Counterexample {}), + ), + ], + ) +} + +#[test] +fn test_broken_iadd_imm12neg_2_not_distinct() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/iadd/broken_imm12neg2.isle", + "iadd".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::NoDistinctModels), + (Bitwidth::I16, VerificationResult::NoDistinctModels), + (Bitwidth::I32, VerificationResult::NoDistinctModels), + ( + Bitwidth::I64, + VerificationResult::Failure(Counterexample {}), + ), + ], + ) +} + +#[test] +fn test_broken_iadd_imul_right() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/iadd/broken_madd.isle", + "iadd".to_string(), + all_failure_result(), + ) +} + +#[test] +fn test_broken_iadd_imul_left() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/iadd/broken_madd2.isle", + "iadd".to_string(), + all_failure_result(), + ) +} + +#[test] +fn test_broken_iadd_msub() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/iadd/broken_msub.isle", + "isub".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I64, + VerificationResult::Failure(Counterexample {}), + ), + ], + ) +} + +#[test] +fn test_broken_iadd_shift() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/iadd/broken_shift.isle", + "iadd".to_string(), + all_failure_result(), + ) +} + +#[test] +fn test_broken_iadd_shift2() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/iadd/broken_shift2.isle", + "iadd".to_string(), + all_failure_result(), + ) +} + +#[test] +fn test_named_isub_base_case() { + test_aarch64_rule_with_lhs_termname_simple("isub_base_case", "isub", all_success_result()) +} + +#[test] +fn test_named_isub_imm12() { + test_aarch64_rule_with_lhs_termname_simple("isub_imm12", "isub", all_success_result()) +} + +#[test] +fn test_named_isub_imm12_concrete() { + test_concrete_aarch64_rule_with_lhs_termname( + "isub_imm12", + "isub", + ConcreteTest { + termname: "isub".to_string(), + args: vec![ + ConcreteInput { + literal: "#b00000001".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ConcreteInput { + literal: "#b11111111".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ], + output: ConcreteInput { + literal: "#b00000010".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + }, + ) +} + +#[test] +fn test_named_isub_imm12_neg() { + test_aarch64_rule_with_lhs_termname_simple( + "isub_imm12_neg", + "isub", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ); +} + +// The older version, which did not have distinct models for i8, i16, or i32. +#[test] +fn test_isub_imm12_neg_not_distinct() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/isub/broken_imm12neg_not_distinct.isle", + "isub".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::NoDistinctModels), + (Bitwidth::I16, VerificationResult::NoDistinctModels), + (Bitwidth::I32, VerificationResult::NoDistinctModels), + (Bitwidth::I64, VerificationResult::Success), + ], + ); +} + +#[test] +fn test_isub_imm12_neg_not_distinct_16_32() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/isub/broken_imm12neg_not_distinct.isle", + "isub".to_string(), + vec![ + (Bitwidth::I16, VerificationResult::NoDistinctModels), + (Bitwidth::I32, VerificationResult::NoDistinctModels), + ], + ); +} + +// Need a file test because this is a change on top of our latest rebase +#[test] +fn test_isub_imm12neg_new() { + test_from_file_with_lhs_termname_simple( + "./examples/isub/imm12neg_new.isle", + "isub".to_string(), + all_success_result(), + ); +} + +#[test] +fn test_named_isub_imm12_neg_concrete32() { + test_concrete_aarch64_rule_with_lhs_termname( + "isub_imm12_neg", + "isub", + ConcreteTest { + termname: "isub".to_string(), + args: vec![ + ConcreteInput { + literal: "#b0000000000000000000000000000000000000000000000000000000000000001" + .to_string(), + ty: veri_ir::Type::BitVector(Some(64)), + }, + ConcreteInput { + literal: "#b1111111111111111111111111111111111111111111111111111111111111111" + .to_string(), + ty: veri_ir::Type::BitVector(Some(64)), + }, + ], + output: ConcreteInput { + literal: "#b0000000000000000000000000000000000000000000000000000000000000010" + .to_string(), + ty: veri_ir::Type::BitVector(Some(64)), + }, + }, + ) +} + +#[test] +fn test_named_isub_imm12_neg_concrete64() { + test_concrete_aarch64_rule_with_lhs_termname( + "isub_imm12_neg", + "isub", + ConcreteTest { + termname: "isub".to_string(), + args: vec![ + ConcreteInput { + literal: "#b0000000000000000000000000000000000000000000000000000000000000001" + .to_string(), + ty: veri_ir::Type::BitVector(Some(64)), + }, + ConcreteInput { + literal: "#b1111111111111111111111111111111111111111111111111111111111111111" + .to_string(), + ty: veri_ir::Type::BitVector(Some(64)), + }, + ], + output: ConcreteInput { + literal: "#b0000000000000000000000000000000000000000000000000000000000000010" + .to_string(), + ty: veri_ir::Type::BitVector(Some(64)), + }, + }, + ) +} + +#[test] +fn test_named_isub_extend() { + test_aarch64_rule_with_lhs_termname_simple( + "isub_extend", + "isub", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_isub_ishl() { + test_aarch64_rule_with_lhs_termname_simple("isub_ishl", "isub", all_success_result()) +} + +#[test] +fn test_broken_isub_base_case() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/isub/broken_base_case.isle", + "isub".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I64, + VerificationResult::Failure(Counterexample {}), + ), + ], + ); +} + +#[test] +fn test_broken_isub_imm12() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/isub/broken_imm12.isle", + "isub".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Success), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I64, + VerificationResult::Failure(Counterexample {}), + ), + ], + ); +} + +#[test] +fn test_broken_isub_imm12neg_not_distinct() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/isub/broken_imm12neg.isle", + "isub".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::NoDistinctModels), + (Bitwidth::I16, VerificationResult::NoDistinctModels), + (Bitwidth::I32, VerificationResult::NoDistinctModels), + ( + Bitwidth::I64, + VerificationResult::Failure(Counterexample {}), + ), + ], + ); +} + +#[test] +fn test_broken_isub_shift() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/isub/broken_shift.isle", + "isub".to_string(), + all_failure_result(), + ); +} + +#[test] +fn test_named_ineg_base_case() { + test_aarch64_rule_with_lhs_termname_simple("ineg_base_case", "ineg", all_success_result()) +} + +#[test] +fn test_named_imul_base_case() { + test_aarch64_rule_with_lhs_termname_simple( + "imul_base_case", + "imul", + // Too slow right now: https://github.com/avanhatt/wasmtime/issues/36 + vec![ + (Bitwidth::I8, VerificationResult::Success), + // (Bitwidth::I16, VerificationResult::Success), + // (Bitwidth::I32, VerificationResult::Success), + // (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +#[ignore] +fn test_named_slow_imul_base_case() { + test_aarch64_rule_with_lhs_termname_simple( + "imul_base_case", + "imul", + // Too slow right now: https://github.com/avanhatt/wasmtime/issues/36 + vec![ + // (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Unknown), + (Bitwidth::I32, VerificationResult::Unknown), + (Bitwidth::I64, VerificationResult::Unknown), + ], + ) +} + +// TODO traps https://github.com/avanhatt/wasmtime/issues/31 +#[test] +fn test_named_udiv() { + test_aarch64_rule_with_lhs_termname_simple( + "udiv", + "udiv", + // Too slow right now: https://github.com/avanhatt/wasmtime/issues/36 + vec![ + (Bitwidth::I8, VerificationResult::Success), + // (Bitwidth::I16, VerificationResult::Success), + // (Bitwidth::I32, VerificationResult::Success), + // (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +#[ignore] +fn test_named_slow_udiv() { + test_aarch64_rule_with_lhs_termname_simple( + "udiv", + "udiv", + // Too slow right now: https://github.com/avanhatt/wasmtime/issues/36 + vec![ + // (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Unknown), + (Bitwidth::I32, VerificationResult::Unknown), + (Bitwidth::I64, VerificationResult::Unknown), + ], + ) +} + +#[test] +fn test_broken_udiv() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/udiv/broken_udiv.isle", + "udiv".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_sdiv_base_case() { + test_aarch64_rule_with_lhs_termname_simple( + "sdiv_base_case", + "sdiv", + vec![ + (Bitwidth::I8, VerificationResult::Success), + // Too slow right now: https://github.com/avanhatt/wasmtime/issues/36 + // (Bitwidth::I16, VerificationResult::Success), + // (Bitwidth::I32, VerificationResult::Success), + // (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +#[ignore] +fn test_named_slow_sdiv_base_case() { + test_aarch64_rule_with_lhs_termname_simple( + "sdiv_base_case", + "sdiv", + vec![ + // Too slow right now: https://github.com/avanhatt/wasmtime/issues/36 + (Bitwidth::I16, VerificationResult::Unknown), + (Bitwidth::I32, VerificationResult::Unknown), + (Bitwidth::I64, VerificationResult::Unknown), + ], + ) +} + +#[test] +fn test_named_sdiv_safe_divisor() { + test_aarch64_rule_with_lhs_termname_simple( + "sdiv_safe_divisor", + "sdiv", + vec![ + (Bitwidth::I8, VerificationResult::Success), + // Too slow right now: https://github.com/avanhatt/wasmtime/issues/36 + // (Bitwidth::I16, VerificationResult::Success), + // (Bitwidth::I32, VerificationResult::Success), + // (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +#[ignore] +fn test_named_slow_sdiv_safe_divisor() { + test_aarch64_rule_with_lhs_termname_simple( + "sdiv_safe_divisor", + "sdiv", + vec![ + // Too slow right now: https://github.com/avanhatt/wasmtime/issues/36 + (Bitwidth::I16, VerificationResult::Unknown), + (Bitwidth::I32, VerificationResult::Unknown), + (Bitwidth::I64, VerificationResult::Unknown), + ], + ) +} + +#[test] +fn test_broken_sdiv_safe_const() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/sdiv/broken_sdiv_safe_const.isle", + "sdiv".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I64, + VerificationResult::Failure(Counterexample {}), + ), + ], + ) +} + +#[test] +fn test_broken_sdiv() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/sdiv/broken_sdiv.isle", + "sdiv".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_srem() { + test_aarch64_rule_with_lhs_termname_simple( + "srem", + "srem", + vec![ + (Bitwidth::I8, VerificationResult::Success), + // Too slow right now: https://github.com/avanhatt/wasmtime/issues/36 + // (Bitwidth::I16, VerificationResult::Success), + // (Bitwidth::I32, VerificationResult::Success), + // (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +#[ignore] +fn test_named_slow_srem() { + test_aarch64_rule_with_lhs_termname_simple( + "srem", + "srem", + vec![ + (Bitwidth::I16, VerificationResult::Unknown), + (Bitwidth::I32, VerificationResult::Unknown), + (Bitwidth::I64, VerificationResult::Unknown), + ], + ) +} + +#[test] +fn test_named_urem() { + test_aarch64_rule_with_lhs_termname_simple( + "urem", + "urem", + vec![ + (Bitwidth::I8, VerificationResult::Success), + // Too slow right now: https://github.com/avanhatt/wasmtime/issues/36 + // (Bitwidth::I16, VerificationResult::Success), + // (Bitwidth::I32, VerificationResult::Success), + // (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +#[ignore] +fn test_named_slow_urem() { + test_aarch64_rule_with_lhs_termname_simple( + "urem", + "urem", + vec![ + (Bitwidth::I16, VerificationResult::Unknown), + (Bitwidth::I32, VerificationResult::Unknown), + (Bitwidth::I64, VerificationResult::Unknown), + ], + ) +} + +#[test] +fn test_named_urem_concrete() { + test_concrete_aarch64_rule_with_lhs_termname( + "urem", + "urem", + ConcreteTest { + termname: "urem".to_string(), + args: vec![ + ConcreteInput { + literal: "#b11111110".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ConcreteInput { + literal: "#b00110001".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ], + output: ConcreteInput { + literal: "#b00001001".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + }, + ) +} + +#[test] +fn test_named_uextend() { + test_aarch64_rule_with_lhs_termname_simple("uextend", "uextend", all_success_result()) +} + +#[test] +fn test_named_sextend() { + test_aarch64_rule_with_lhs_termname_simple("sextend", "sextend", all_success_result()) +} + +#[test] +fn test_broken_uextend() { + test_from_file_with_lhs_termname( + "./examples/broken/broken_uextend.isle", + "uextend".to_string(), + TestResult::Expect(|sig| { + // In the spec for extend, zero_extend and sign_extend are swapped. + // However, this should still succeed if the input and output + // widths are the same + if sig.args[0] == sig.ret { + VerificationResult::Success + } else { + VerificationResult::Failure(Counterexample {}) + } + }), + ); +} + +// AVH TODO: this rule requires priorities to be correct for narrow cases +// https://github.com/avanhatt/wasmtime/issues/32 +#[test] +fn test_named_clz_32_64() { + test_aarch64_rule_with_lhs_termname_simple( + "clz_32_64", + "clz", + vec![ + // (Bitwidth::I8, VerificationResult::InapplicableRule), + // (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_clz_8() { + test_aarch64_rule_with_lhs_termname_simple( + "clz_8", + "clz", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_clz_16() { + test_aarch64_rule_with_lhs_termname_simple( + "clz_16", + "clz", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_broken_clz() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/clz/broken_clz.isle", + "clz".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_broken_clz8() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/clz/broken_clz8.isle", + "clz".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_broken_clz_n6() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/clz/broken_clz16.isle", + "clz".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +// AVH TODO: this rule requires priorities to be correct for narrow cases +// https://github.com/avanhatt/wasmtime/issues/32 +#[test] +fn test_named_cls_32_64() { + test_aarch64_rule_with_lhs_termname_simple( + "cls_32_64", + "cls", + vec![ + // (Bitwidth::I8, VerificationResult::InapplicableRule), + // (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_cls_8() { + test_aarch64_rule_with_lhs_termname_simple( + "cls_8", + "cls", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_cls_16() { + test_aarch64_rule_with_lhs_termname_simple( + "cls_16", + "cls", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_broken_cls_32_64() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/cls/broken_cls.isle", + "cls".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_broken_cls_8() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/cls/broken_cls8.isle", + "cls".to_string(), + vec![(Bitwidth::I8, VerificationResult::Failure(Counterexample {}))], + ) +} + +#[test] +fn test_broken_cls_16() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/cls/broken_cls16.isle", + "cls".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_ctz_32_64() { + test_aarch64_rule_with_lhs_termname_simple( + "ctz_32_64", + "ctz", + vec![ + // (Bitwidth::I8, VerificationResult::InapplicableRule), + // (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_ctz_8() { + test_aarch64_rule_with_lhs_termname_simple( + "ctz_8", + "ctz", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_ctz_16() { + test_aarch64_rule_with_lhs_termname_simple( + "ctz_16", + "ctz", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_broken_ctz_32_64() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/ctz/broken_ctz.isle", + "clz".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I64, + VerificationResult::Failure(Counterexample {}), + ), + ], + ) +} + +#[test] +fn test_broken_ctz_8() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/ctz/broken_ctz8.isle", + "ctz".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_broken_ctz_16() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/ctz/broken_ctz16.isle", + "ctz".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_small_rotr() { + let config = Config { + term: "small_rotr".to_string(), + distinct_check: true, + custom_assumptions: None, + custom_verification_condition: Some(Box::new(|smt, args, lhs, rhs| { + let ty_arg = *args.first().unwrap(); + let lower_8_bits_eq = { + let mask = smt.atom("#x00000000000000FF"); + smt.eq(smt.bvand(mask, lhs), smt.bvand(mask, rhs)) + }; + let lower_16_bits_eq = { + let mask = smt.atom("#x000000000000FFFF"); + smt.eq(smt.bvand(mask, lhs), smt.bvand(mask, rhs)) + }; + smt.ite( + smt.eq(ty_arg, smt.atom("8")), + lower_8_bits_eq, + lower_16_bits_eq, + ) + })), + names: Some(vec!["small_rotr".to_string()]), + }; + test_aarch64_with_config_simple(config, vec![(Bitwidth::I64, VerificationResult::Success)]); +} + +#[test] +fn test_broken_small_rotr_to_shifts() { + let config = Config { + term: "small_rotr".to_string(), + distinct_check: true, + custom_assumptions: None, + custom_verification_condition: Some(Box::new(|smt, args, lhs, rhs| { + let ty_arg = *args.first().unwrap(); + let lower_8_bits_eq = { + let mask = smt.atom("#x00000000000000FF"); + smt.eq(smt.bvand(mask, lhs), smt.bvand(mask, rhs)) + }; + let lower_16_bits_eq = { + let mask = smt.atom("#x000000000000FFFF"); + smt.eq(smt.bvand(mask, lhs), smt.bvand(mask, rhs)) + }; + smt.ite( + smt.eq(ty_arg, smt.atom("8")), + lower_8_bits_eq, + lower_16_bits_eq, + ) + })), + names: None, + }; + test_from_file_with_config_simple( + "./examples/broken/broken_mask_small_rotr.isle", + config, + vec![( + Bitwidth::I64, + VerificationResult::Failure(Counterexample {}), + )], + ); +} + +#[test] +fn test_broken_small_rotr_to_shifts_2() { + let config = Config { + term: "small_rotr".to_string(), + distinct_check: true, + custom_assumptions: None, + custom_verification_condition: Some(Box::new(|smt, args, lhs, rhs| { + let ty_arg = *args.first().unwrap(); + let lower_8_bits_eq = { + let mask = smt.atom("#x00000000000000FF"); + smt.eq(smt.bvand(mask, lhs), smt.bvand(mask, rhs)) + }; + let lower_16_bits_eq = { + let mask = smt.atom("#x000000000000FFFF"); + smt.eq(smt.bvand(mask, lhs), smt.bvand(mask, rhs)) + }; + smt.ite( + smt.eq(ty_arg, smt.atom("8")), + lower_8_bits_eq, + lower_16_bits_eq, + ) + })), + names: None, + }; + test_from_file_with_config_simple( + "./examples/broken/broken_rule_or_small_rotr.isle", + config, + vec![( + Bitwidth::I64, + VerificationResult::Failure(Counterexample {}), + )], + ); +} + +#[test] +fn test_named_small_rotr_imm() { + let config = Config { + term: "small_rotr_imm".to_string(), + distinct_check: true, + custom_assumptions: None, + custom_verification_condition: Some(Box::new(|smt, args, lhs, rhs| { + let ty_arg = *args.first().unwrap(); + let lower_8_bits_eq = { + let mask = smt.atom("#x00000000000000FF"); + smt.eq(smt.bvand(mask, lhs), smt.bvand(mask, rhs)) + }; + let lower_16_bits_eq = { + let mask = smt.atom("#x000000000000FFFF"); + smt.eq(smt.bvand(mask, lhs), smt.bvand(mask, rhs)) + }; + smt.ite( + smt.eq(ty_arg, smt.atom("8")), + lower_8_bits_eq, + lower_16_bits_eq, + ) + })), + names: Some(vec!["small_rotr_imm".to_string()]), + }; + test_aarch64_with_config_simple(config, vec![(Bitwidth::I64, VerificationResult::Success)]); +} + +#[test] +fn test_named_rotl_fits_in_16() { + test_aarch64_rule_with_lhs_termname_simple( + "rotl_fits_in_16", + "rotl", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_rotl_32_base_case() { + test_aarch64_rule_with_lhs_termname_simple( + "rotl_32_base_case", + "rotl", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_broken_32_general_rotl_to_rotr() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/broken_32_general_rotl_to_rotr.isle", + "rotl".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_rotl_64_base_case() { + test_aarch64_rule_with_lhs_termname_simple( + "rotl_64_base_case", + "rotl", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_broken_fits_in_16_rotl_to_rotr() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/broken_fits_in_16_rotl_to_rotr.isle", + "rotl".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_rotl_fits_in_16_imm() { + test_aarch64_rule_with_lhs_termname_simple( + "rotl_fits_in_16_imm", + "rotl", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_rotl_64_imm() { + test_aarch64_rule_with_lhs_termname_simple( + "rotl_64_imm", + "rotl", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_rotl_32_imm() { + test_aarch64_rule_with_lhs_termname_simple( + "rotl_32_imm", + "rotl", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_broken_fits_in_16_with_imm_rotl_to_rotr() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/broken_fits_in_16_with_imm_rotl_to_rotr.isle", + "rotl".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_rotr_fits_in_16() { + test_aarch64_rule_with_lhs_termname_simple( + "rotr_fits_in_16", + "rotr", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_rotr_fits_in_16_imm() { + test_aarch64_rule_with_lhs_termname_simple( + "rotr_fits_in_16_imm", + "rotr", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_rotr_32_base_case() { + test_aarch64_rule_with_lhs_termname_simple( + "rotr_32_base_case", + "rotr", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_rotr_32_imm() { + test_aarch64_rule_with_lhs_termname_simple( + "rotr_32_imm", + "rotr", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_rotr_64_base_case() { + test_aarch64_rule_with_lhs_termname_simple( + "rotr_64_base_case", + "rotr", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_rotr_64_imm() { + test_aarch64_rule_with_lhs_termname_simple( + "rotr_64_imm", + "rotr", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_band_fits_in_64() { + test_aarch64_rule_with_lhs_termname_simple( + "band_fits_in_64", + "band", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_broken_band_fits_in_32() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/broken_fits_in_32_band.isle", + "band".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_bor_fits_in_64() { + test_aarch64_rule_with_lhs_termname_simple( + "bor_fits_in_64", + "bor", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_broken_bor_fits_in_32() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/broken_fits_in_32_bor.isle", + "bor".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_bxor_fits_in_64() { + test_aarch64_rule_with_lhs_termname_simple( + "bxor_fits_in_64", + "bxor", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_band_not_right() { + test_aarch64_rule_with_lhs_termname_simple( + "band_not_right", + "band", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_band_not_left() { + test_aarch64_rule_with_lhs_termname_simple( + "band_not_left", + "band", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_bor_not_right() { + test_aarch64_rule_with_lhs_termname_simple( + "bor_not_right", + "bor", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_bor_not_left() { + test_aarch64_rule_with_lhs_termname_simple( + "bor_not_left", + "bor", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_bxor_not_right() { + test_aarch64_rule_with_lhs_termname_simple( + "bxor_not_right", + "bxor", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_bxor_not_left() { + test_aarch64_rule_with_lhs_termname_simple( + "bxor_not_left", + "bxor", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_bnot() { + test_aarch64_rule_with_lhs_termname_simple("bnot_base_case", "bnot", all_success_result()) +} + +#[test] +fn test_named_bnot_ishl() { + test_aarch64_rule_with_lhs_termname_simple("bnot_ishl", "bnot", all_success_result()) +} + +#[test] +fn test_named_ishl_64() { + test_aarch64_rule_with_lhs_termname_simple( + "ishl_64", + "ishl", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_ishl_64_concrete() { + test_concrete_aarch64_rule_with_lhs_termname( + "ishl_64", + "ishl", + ConcreteTest { + termname: "ishl".to_string(), + args: vec![ + ConcreteInput { + literal: "#b0000000000000000000000000000000000000000000000000000000000000001" + .to_string(), + ty: veri_ir::Type::BitVector(Some(64)), + }, + ConcreteInput { + literal: "#b0000000000000000000000000000000000000000000000000000000000000010" + .to_string(), + ty: veri_ir::Type::BitVector(Some(64)), + }, + ], + output: ConcreteInput { + literal: "#b0000000000000000000000000000000000000000000000000000000000000100" + .to_string(), + ty: veri_ir::Type::BitVector(Some(64)), + }, + }, + ) +} + +#[test] +fn test_named_ishl_fits_in_32() { + test_aarch64_rule_with_lhs_termname_simple( + "ishl_fits_in_32", + "ishl", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_ishl_fits_in_32_concrete() { + test_concrete_aarch64_rule_with_lhs_termname( + "ishl_fits_in_32", + "ishl", + ConcreteTest { + termname: "ishl".to_string(), + args: vec![ + ConcreteInput { + literal: "#b00000001".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ConcreteInput { + literal: "#b00000010".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ], + output: ConcreteInput { + literal: "#b00000100".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + }, + ) +} + +#[test] +fn test_named_sshr_64() { + test_aarch64_rule_with_lhs_termname_simple( + "sshr_64", + "sshr", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_sshr_fits_in_32() { + test_aarch64_rule_with_lhs_termname_simple( + "sshr_fits_in_32", + "sshr", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_sshr_fits_in_32_concrete() { + test_concrete_aarch64_rule_with_lhs_termname( + "sshr_fits_in_32", + "sshr", + ConcreteTest { + termname: "sshr".to_string(), + args: vec![ + ConcreteInput { + literal: "#b10100000".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ConcreteInput { + literal: "#b00000001".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ], + output: ConcreteInput { + literal: "#b11010000".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + }, + ) +} + +#[test] +fn test_named_ushr_64() { + test_aarch64_rule_with_lhs_termname_simple( + "ushr_64", + "ushr", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_ushr_fits_in_32() { + test_aarch64_rule_with_lhs_termname_simple( + "ushr_fits_in_32", + "ushr", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_ushr_fits_in_32_concrete() { + test_concrete_aarch64_rule_with_lhs_termname( + "ushr_fits_in_32", + "ushr", + ConcreteTest { + termname: "ushr".to_string(), + args: vec![ + ConcreteInput { + literal: "#b10100000".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ConcreteInput { + literal: "#b00000001".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ], + output: ConcreteInput { + literal: "#b01010000".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + }, + ) +} + +#[test] +fn test_named_do_shift_64_base_case() { + test_aarch64_rule_with_lhs_termname_simple( + "do_shift_64_base_case", + "do_shift", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_do_shift_imm() { + let config = Config { + term: "do_shift".to_string(), + distinct_check: true, + custom_assumptions: None, + custom_verification_condition: Some(Box::new(|smt, _args, lhs, rhs| { + let mask = smt.atom("#x00000000000000FF"); + smt.eq(smt.bvand(mask, lhs), smt.bvand(mask, rhs)) + })), + names: Some(vec!["do_shift_imm".to_string()]), + }; + test_aarch64_with_config_simple(config, vec![(Bitwidth::I8, VerificationResult::Success)]); + let config = Config { + term: "do_shift".to_string(), + distinct_check: true, + custom_assumptions: None, + custom_verification_condition: Some(Box::new(|smt, _args, lhs, rhs| { + let mask = smt.atom("#x000000000000FFFF"); + smt.eq(smt.bvand(mask, lhs), smt.bvand(mask, rhs)) + })), + names: Some(vec!["do_shift_imm".to_string()]), + }; + test_aarch64_with_config_simple(config, vec![(Bitwidth::I16, VerificationResult::Success)]); + let config = Config { + term: "do_shift".to_string(), + distinct_check: true, + custom_assumptions: None, + custom_verification_condition: Some(Box::new(|smt, _args, lhs, rhs| { + let mask = smt.atom("#x00000000FFFFFFFF"); + smt.eq(smt.bvand(mask, lhs), smt.bvand(mask, rhs)) + })), + names: Some(vec!["do_shift_imm".to_string()]), + }; + test_aarch64_with_config_simple(config, vec![(Bitwidth::I32, VerificationResult::Success)]); + test_aarch64_rule_with_lhs_termname_simple( + "do_shift_imm", + "do_shift", + vec![(Bitwidth::I64, VerificationResult::Success)], + ) +} + +#[test] +fn test_named_do_shift_fits_in_16() { + let config = Config { + term: "do_shift".to_string(), + distinct_check: true, + custom_assumptions: None, + custom_verification_condition: Some(Box::new(|smt, args, lhs, rhs| { + let ty_arg = args[1]; + let lower_8_bits_eq = { + let mask = smt.atom("#x00000000000000FF"); + smt.eq(smt.bvand(mask, lhs), smt.bvand(mask, rhs)) + }; + let lower_16_bits_eq = { + let mask = smt.atom("#x000000000000FFFF"); + smt.eq(smt.bvand(mask, lhs), smt.bvand(mask, rhs)) + }; + smt.ite( + smt.eq(ty_arg, smt.atom("8")), + lower_8_bits_eq, + lower_16_bits_eq, + ) + })), + names: Some(vec!["do_shift_fits_in_16".to_string()]), + }; + test_aarch64_with_config_simple( + config, + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + ], + ); + + test_aarch64_rule_with_lhs_termname_simple( + "do_shift_fits_in_16", + "do_shift", + vec![ + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_do_shift_fits_in_16_concrete() { + // (decl do_shift (ALUOp Type Reg Value) Reg) + + test_concrete_aarch64_rule_with_lhs_termname( + "do_shift_fits_in_16", + "do_shift", + ConcreteTest { + termname: "do_shift".to_string(), + args: vec![ + ConcreteInput { + literal: "#x0e".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ConcreteInput { + literal: "16".to_string(), + ty: veri_ir::Type::Int, + }, + ConcreteInput { + literal: "#b0000000000000000000000000000000000000000000000000000000000000001" + .to_string(), + ty: veri_ir::Type::BitVector(Some(64)), + }, + ConcreteInput { + literal: "#b0000000000000001".to_string(), + ty: veri_ir::Type::BitVector(Some(16)), + }, + ], + output: ConcreteInput { + literal: "#b0000000000000000000000000000000000000000000000000000000000000010" + .to_string(), + ty: veri_ir::Type::BitVector(Some(64)), + }, + }, + ) +} + +#[test] +fn test_named_do_shift_32_base_case() { + test_aarch64_rule_with_lhs_termname_simple( + "do_shift_32_base_case", + "do_shift", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ); + let config = Config { + term: "do_shift".to_string(), + distinct_check: true, + custom_assumptions: None, + custom_verification_condition: Some(Box::new(|smt, _args, lhs, rhs| { + let mask = smt.atom("#x00000000FFFFFFFF"); + smt.eq(smt.bvand(mask, lhs), smt.bvand(mask, rhs)) + })), + names: Some(vec!["do_shift_32_base_case".to_string()]), + }; + test_aarch64_with_config_simple(config, vec![(Bitwidth::I32, VerificationResult::Success)]); +} + +#[test] +fn test_broken_do_shift_32() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/shifts/broken_do_shift_32.isle", + "do_shift".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ); + let config = Config { + term: "do_shift".to_string(), + distinct_check: true, + custom_assumptions: None, + custom_verification_condition: Some(Box::new(|smt, _args, lhs, rhs| { + let mask = smt.atom("#x00000000FFFFFFFF"); + smt.eq(smt.bvand(mask, lhs), smt.bvand(mask, rhs)) + })), + names: None, + }; + test_from_file_with_config_simple( + "./examples/broken/shifts/broken_do_shift_32.isle", + config, + vec![( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + )], + ); +} + +#[test] +fn test_broken_ishl_to_do_shift_64() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/shifts/broken_ishl_to_do_shift_64.isle", + "ishl".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + ( + Bitwidth::I64, + VerificationResult::Failure(Counterexample {}), + ), + ], + ) +} + +#[test] +fn test_broken_sshr_to_do_shift_fits_in_32() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/shifts/broken_sshr_to_do_shift_fits_in_32.isle", + "sshr".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_broken_sshr_to_do_shift_fits_in_32_concrete() { + test_concrete_input_from_file_with_lhs_termname( + "./examples/broken/shifts/broken_sshr_to_do_shift_fits_in_32.isle", + "sshr".to_string(), + ConcreteTest { + termname: "sshr".to_string(), + args: vec![ + ConcreteInput { + literal: "#b10100000".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ConcreteInput { + literal: "#b00000001".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ], + // Wrong output: + output: ConcreteInput { + literal: "#b01010000".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + }, + ) +} + +#[test] +fn test_broken_ushr_to_do_shift_fits_in_32() { + test_from_file_with_lhs_termname_simple( + "./examples/broken/shifts/broken_ushr_to_do_shift_fits_in_32.isle", + "ushr".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_if_let() { + test_from_file_with_lhs_termname_simple( + "./examples/constructs/if-let.isle", + "iadd".to_string(), + all_success_result(), + ); +} + +#[test] +fn test_named_icmp_8_16_32_64() { + test_aarch64_rule_with_lhs_termname_simple( + "icmp_8_16_32_64", + "icmp", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_lower_icmp_into_reg_8_16_32_64() { + test_aarch64_rule_with_lhs_termname_simple( + "lower_icmp_into_reg_8_16_32_64", + "lower_icmp_into_reg", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_lower_icmp_into_reg_8_16_32_64_concrete_1() { + test_concrete_aarch64_rule_with_lhs_termname( + "lower_icmp_into_reg_8_16_32_64", + "lower_icmp_into_reg", + ConcreteTest { + termname: "lower_icmp_into_reg".to_string(), + args: vec![ + ConcreteInput { + literal: "#b00000000".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ConcreteInput { + literal: "#b00000000".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ConcreteInput { + literal: "#b00000001".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ConcreteInput { + literal: "8".to_string(), + ty: veri_ir::Type::Int, + }, + ConcreteInput { + literal: "8".to_string(), + ty: veri_ir::Type::Int, + }, + ], + output: ConcreteInput { + literal: "#b00000000".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + }, + ) +} + +#[test] +fn test_named_lower_icmp_into_reg_8_16_32_64_concrete_2() { + test_concrete_aarch64_rule_with_lhs_termname( + "lower_icmp_into_reg_8_16_32_64", + "lower_icmp_into_reg", + ConcreteTest { + termname: "lower_icmp_into_reg".to_string(), + args: vec![ + ConcreteInput { + literal: "#b00000000".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ConcreteInput { + literal: "#b00000000".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ConcreteInput { + literal: "#b00000000".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + ConcreteInput { + literal: "8".to_string(), + ty: veri_ir::Type::Int, + }, + ConcreteInput { + literal: "8".to_string(), + ty: veri_ir::Type::Int, + }, + ], + output: ConcreteInput { + literal: "#b00000001".to_string(), + ty: veri_ir::Type::BitVector(Some(8)), + }, + }, + ) +} + +// Narrow types fail because of rule priorities +// https://github.com/avanhatt/wasmtime/issues/32 +#[test] +fn test_named_lower_icmp_32_64() { + test_aarch64_rule_with_lhs_termname_simple( + "lower_icmp_32_64", + "lower_icmp", + vec![ + // (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + // ( + // Bitwidth::I16, + // VerificationResult::Failure(Counterexample {}), + // ), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_lower_icmp_8_16_signed() { + test_aarch64_rule_with_lhs_termname_simple( + "lower_icmp_8_16_signed", + "lower_icmp", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +// TODO AVH: Currently fails because needs priorities to show this +// only applies to unsigned cond codes +// https://github.com/avanhatt/wasmtime/issues/32 +#[test] +fn test_named_lower_icmp_8_16_unsigned_imm() { + test_aarch64_rule_with_lhs_termname_simple( + "lower_icmp_8_16_unsigned_imm", + "lower_icmp", + vec![ + // (Bitwidth::I8, VerificationResult::Success), + // (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +// TODO AVH: Currently fails because needs priorities to show this +// only applies to unsigned cond codes +// https://github.com/avanhatt/wasmtime/issues/32 +#[test] +fn test_named_lower_icmp_8_16_unsigned() { + test_aarch64_rule_with_lhs_termname_simple( + "lower_icmp_8_16_unsigned", + "lower_icmp", + vec![ + // (Bitwidth::I8, VerificationResult::Success), + // (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +// AVH TODO: this rule requires priorities to be correct for narrow cases +// https://github.com/avanhatt/wasmtime/issues/32 +#[test] +fn test_named_lower_icmp_32_64_const() { + test_aarch64_rule_with_lhs_termname_simple( + "lower_icmp_32_64_const", + "lower_icmp", + vec![ + // (Bitwidth::I8, VerificationResult::InapplicableRule), + // (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_lower_icmp_const_32_64_imm() { + test_aarch64_rule_with_lhs_termname_simple( + "lower_icmp_const_32_64_imm", + "lower_icmp_const", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +// AVH TODO: this rule requires priorities and a custom verification condition +// https://github.com/avanhatt/wasmtime/issues/32 +#[test] +fn test_named_lower_icmp_const_32_64_sgte() { + // Note: only one distinct condition code is matched on, so need to disable + // distinctness check + + let config = Config { + term: "lower_icmp_const".to_string(), + distinct_check: false, + custom_verification_condition: None, + custom_assumptions: None, + names: Some(vec!["lower_icmp_const_32_64_sgte".to_string()]), + }; + test_aarch64_with_config_simple( + config, + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + // Currently fails! The rewrite is not semantics-preserving + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I64, + VerificationResult::Failure(Counterexample {}), + ), + ], + ) +} + +// AVH TODO: this rule requires priorities and a custom verification condition +// https://github.com/avanhatt/wasmtime/issues/32 +#[test] +fn test_named_lower_icmp_const_32_64_ugte() { + // Note: only one distinct condition code is matched on, so need to disable + // distinctness check + + let config = Config { + term: "lower_icmp_const".to_string(), + distinct_check: false, + custom_verification_condition: None, + custom_assumptions: None, + names: Some(vec!["lower_icmp_const_32_64_ugte".to_string()]), + }; + test_aarch64_with_config_simple( + config, + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + // Currently fails! The rewrite is not semantics-preserving + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I64, + VerificationResult::Failure(Counterexample {}), + ), + ], + ) +} + +#[test] +fn test_named_lower_icmp_const_32_64() { + test_aarch64_rule_with_lhs_termname_simple( + "lower_icmp_const_32_64", + "lower_icmp_const", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_umax() { + test_aarch64_rule_with_lhs_termname_simple( + "umax", + "umax", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_smax() { + test_aarch64_rule_with_lhs_termname_simple( + "smax", + "smax", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_umin() { + test_aarch64_rule_with_lhs_termname_simple( + "umin", + "umin", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_smin() { + test_aarch64_rule_with_lhs_termname_simple( + "smin", + "smin", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_iabs_64() { + test_aarch64_rule_with_lhs_termname_simple( + "iabs_64", + "iabs", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_iabs_8_16_32() { + test_aarch64_rule_with_lhs_termname_simple( + "iabs_8_16_32", + "iabs", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_bitselect() { + test_aarch64_rule_with_lhs_termname_simple("bitselect", "bitselect", all_success_result()) +} + +#[test] +fn test_named_iconst() { + test_aarch64_rule_with_lhs_termname_simple("iconst", "iconst", all_success_result()) +} + +// Can't currently verify because ConsumesFlags requires a non-functional +// interpretation +// #[test] +// fn test_named_cmp_and_choose_8_16() { +// +// let config = Config { +// dyn_width: false, +// term: "cmp_and_choose".to_string(), +// distinct_check: true, +// custom_verification_condition: Some(Box::new(|smt, args, lhs, rhs| { +// let ty_arg = *args.first().unwrap(); +// let lower_8_bits_eq = { +// let mask = smt.atom("#x00000000000000FF"); +// smt.eq(smt.bvand(mask, lhs), smt.bvand(mask, rhs)) +// }; +// let lower_16_bits_eq = { +// let mask = smt.atom("#x000000000000FFFF"); +// smt.eq(smt.bvand(mask, lhs), smt.bvand(mask, rhs)) +// }; +// smt.ite( +// smt.eq(ty_arg, smt.atom("8")), +// lower_8_bits_eq, +// lower_16_bits_eq, +// ) +// })), +// names: Some(vec!["cmp_and_choose_8_16".to_string()]), +// }; +// test_aarch64_with_config_simple( +// config, +// vec![ +// (Bitwidth::I8, VerificationResult::Failure(Counterexample { })), +// (Bitwidth::I16, VerificationResult::Failure(Counterexample { })), +// (Bitwidth::I32, VerificationResult::InapplicableRule), +// (Bitwidth::I64, VerificationResult::InapplicableRule), +// ], +// ); +// }) +// } + +#[test] +fn test_named_popcnt_8() { + test_aarch64_rule_with_lhs_termname_simple( + "popcnt_8", + "popcnt", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_popcnt_16() { + test_aarch64_rule_with_lhs_termname_simple( + "popcnt_16", + "popcnt", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_popcnt_32() { + test_aarch64_rule_with_lhs_termname_simple( + "popcnt_32", + "popcnt", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +// Currently too slow +// https://github.com/avanhatt/wasmtime/issues/36 +#[test] +fn test_named_popcnt_64() { + test_aarch64_rule_with_lhs_termname_simple( + "popcnt_64", + "popcnt", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + // (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +// Currently too slow +// https://github.com/avanhatt/wasmtime/issues/36 +#[test] +#[ignore] +fn test_named_slow_popcnt_64() { + test_aarch64_rule_with_lhs_termname_simple( + "popcnt_64", + "popcnt", + vec![(Bitwidth::I64, VerificationResult::Unknown)], + ) +} + +#[test] +fn test_named_operand_size_32() { + // Since there are no bitvectors in the signature, need a custom assumption + // hook to pass through the value of the type argument + + static EXPECTED: [(Bitwidth, VerificationResult); 4] = [ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ]; + for (ty, result) in &EXPECTED { + let config = Config { + term: "operand_size".to_string(), + distinct_check: true, + custom_verification_condition: None, + custom_assumptions: Some(Box::new(|smt, args| { + let ty_arg = *args.first().unwrap(); + smt.eq(ty_arg, smt.numeral(*ty as usize)) + })), + names: Some(vec!["operand_size_32".to_string()]), + }; + test_aarch64_with_config_simple(config, vec![(*ty, result.clone())]); + } +} + +#[test] +fn test_named_operand_size_64() { + // Since there are no bitvectors in the signature, need a custom assumption + // hook to pass through the value of the type argument + + // Lower types precluded by priorities + static EXPECTED: [(Bitwidth, VerificationResult); 1] = [ + // (Bitwidth::I8, VerificationResult::Success), + // (Bitwidth::I16, VerificationResult::Success), + // (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ]; + for (ty, result) in &EXPECTED { + let config = Config { + term: "operand_size".to_string(), + distinct_check: true, + custom_verification_condition: None, + custom_assumptions: Some(Box::new(|smt, args| { + let ty_arg = *args.first().unwrap(); + smt.eq(ty_arg, smt.numeral(*ty as usize)) + })), + names: Some(vec!["operand_size_64".to_string()]), + }; + test_aarch64_with_config_simple(config, vec![(*ty, result.clone())]); + } +} + +#[test] +fn test_named_output_reg() { + test_aarch64_rule_with_lhs_termname_simple( + "output_reg", + "output_reg", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_broken_imm_udiv_cve_underlying() { + // Since there are no bitvectors in the signature, need a custom assumption + // hook to pass through the value of the type argument + + static EXPECTED: [(Bitwidth, VerificationResult); 4] = [ + (Bitwidth::I8, VerificationResult::Failure(Counterexample {})), + ( + Bitwidth::I16, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + (Bitwidth::I64, VerificationResult::Success), + ]; + for (ty, result) in &EXPECTED { + let config = Config { + term: "imm".to_string(), + distinct_check: true, + custom_verification_condition: None, + custom_assumptions: Some(Box::new(|smt, args| { + let ty_arg = *args.first().unwrap(); + smt.eq(ty_arg, smt.numeral(*ty as usize)) + })), + names: None, + }; + test_from_file_with_config_simple( + "./examples/broken/udiv/udiv_cve_underlying.isle", + config, + vec![(*ty, result.clone())], + ); + } +} + +#[test] +fn test_broken_imm_udiv_cve_underlying_32() { + // Since there are no bitvectors in the signature, need a custom assumption + // hook to pass through the value of the type argument + + static EXPECTED: [(Bitwidth, VerificationResult); 1] = [( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + )]; + for (ty, result) in &EXPECTED { + let config = Config { + term: "imm".to_string(), + distinct_check: true, + custom_verification_condition: None, + custom_assumptions: Some(Box::new(|smt, args| { + let ty_arg = *args.first().unwrap(); + smt.eq(ty_arg, smt.numeral(*ty as usize)) + })), + names: None, + }; + test_from_file_with_config_simple( + "./examples/broken/udiv/udiv_cve_underlying.isle", + config, + vec![(*ty, result.clone())], + ); + } +} + +// x64 + +#[test] +fn test_named_x64_iadd_base_case_32_or_64_lea() { + test_x64_rule_with_lhs_termname_simple( + "iadd_base_case_32_or_64_lea", + "iadd", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_x64_to_amode_add_base_case() { + test_x64_rule_with_lhs_termname_simple( + "to_amode_add_base_case", + "to_amode_add", + vec![(Bitwidth::I64, VerificationResult::Success)], + ) +} + +#[test] +fn test_named_x64_to_amode_add_const_rhs() { + test_x64_rule_with_lhs_termname_simple( + "to_amode_add_const_rhs", + "to_amode_add", + vec![ + // TODO: make this work for I32 + // (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_x64_to_amode_add_const_lhs() { + test_x64_rule_with_lhs_termname_simple( + "to_amode_add_const_lhs", + "to_amode_add", + vec![(Bitwidth::I64, VerificationResult::Success)], + ) +} + +#[test] +fn test_named_x64_to_amode_add_const_fold_iadd_lhs_rhs() { + test_x64_rule_with_lhs_termname_simple( + "to_amode_add_const_fold_iadd_lhs_rhs", + "to_amode_add", + vec![(Bitwidth::I64, VerificationResult::Success)], + ) +} + +#[test] +fn test_named_x64_to_amode_add_const_fold_iadd_lhs_lhs() { + test_x64_rule_with_lhs_termname_simple( + "to_amode_add_const_fold_iadd_lhs_lhs", + "to_amode_add", + vec![(Bitwidth::I64, VerificationResult::Success)], + ) +} + +#[test] +fn test_named_x64_to_amode_add_const_fold_iadd_rhs_rhs() { + test_x64_rule_with_lhs_termname_simple( + "to_amode_add_const_fold_iadd_rhs_rhs", + "to_amode_add", + vec![(Bitwidth::I64, VerificationResult::Success)], + ) +} + +#[test] +fn test_named_x64_to_amode_add_const_fold_iadd_rhs_lhs() { + test_x64_rule_with_lhs_termname_simple( + "to_amode_add_const_fold_iadd_rhs_lhs", + "to_amode_add", + vec![(Bitwidth::I64, VerificationResult::Success)], + ) +} + +#[test] +fn test_named_x64_amode_imm_reg_base() { + test_x64_rule_with_lhs_termname_simple( + "amode_imm_reg_base", + "amode_imm_reg", + vec![(Bitwidth::I64, VerificationResult::Success)], + ) +} + +#[test] +fn test_named_x64_amode_imm_reg_iadd() { + test_x64_rule_with_lhs_termname_simple( + "amode_imm_reg_iadd", + "amode_imm_reg", + vec![(Bitwidth::I64, VerificationResult::Success)], + ) +} + +#[test] +fn test_named_x64_amode_imm_reg_reg_shift_no_shift() { + test_x64_rule_with_lhs_termname_simple( + "amode_imm_reg_reg_shift_no_shift", + "amode_imm_reg_reg_shift", + vec![(Bitwidth::I64, VerificationResult::Success)], + ) +} + +#[test] +fn test_named_x64_amode_imm_reg_reg_shift_shl_rhs() { + test_x64_rule_with_lhs_termname_simple( + "amode_imm_reg_reg_shift_shl_rhs", + "amode_imm_reg_reg_shift", + vec![(Bitwidth::I64, VerificationResult::Success)], + ) +} + +#[test] +fn test_named_x64_amode_imm_reg_reg_shift_shl_lhs() { + test_x64_rule_with_lhs_termname_simple( + "amode_imm_reg_reg_shift_shl_lhs", + "amode_imm_reg_reg_shift", + vec![(Bitwidth::I64, VerificationResult::Success)], + ) +} + +#[test] +fn test_named_load_i8_aarch64_uload8() { + test_aarch64_rule_with_lhs_termname_simple( + "load_i8_aarch64_uload8", + "load", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_load_i16_aarch64_uload16() { + test_aarch64_rule_with_lhs_termname_simple( + "load_i16_aarch64_uload16", + "load", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_load_i32_aarch64_uload32() { + test_aarch64_rule_with_lhs_termname_simple( + "load_i32_aarch64_uload32", + "load", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_load_i64_aarch64_uload64() { + test_aarch64_rule_with_lhs_termname_simple( + "load_i64_aarch64_uload64", + "load", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_store_i8_aarch64_store8() { + test_aarch64_rule_with_lhs_termname_simple( + "store_i8_aarch64_store8", + "store", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_store_i16_aarch64_store16() { + test_aarch64_rule_with_lhs_termname_simple( + "store_i16_aarch64_store16", + "store", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_store_i32_aarch64_store32() { + test_aarch64_rule_with_lhs_termname_simple( + "store_i32_aarch64_store32", + "store", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_store_i64_aarch64_store64() { + test_aarch64_rule_with_lhs_termname_simple( + "store_i64_aarch64_store64", + "store", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::InapplicableRule), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_named_load_sub64_x64_movzx() { + test_x64_rule_with_lhs_termname_simple( + "load_sub64_x64_movzx", + "load", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::InapplicableRule), + ], + ) +} + +#[test] +fn test_named_store_x64_add_mem() { + test_x64_rule_with_lhs_termname_simple( + "store_x64_add_mem", + "store", + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} +#[test] +fn test_named_store_x64_movrm() { + test_x64_rule_with_lhs_termname_simple( + "store_x64_movrm", + "store", + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ) +} + +#[test] +fn test_load_conditional() { + test_from_file_with_lhs_termname_simple( + "./examples/load/load_conditional.isle", + "lhs".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ); +} + +#[test] +fn test_store_switch() { + test_from_file_with_lhs_termname_simple( + "./examples/store/store_switch.isle", + "lhs".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::Success), + (Bitwidth::I16, VerificationResult::Success), + (Bitwidth::I32, VerificationResult::Success), + (Bitwidth::I64, VerificationResult::Success), + ], + ); +} + +#[test] +#[should_panic] +fn test_load_add_panic() { + test_from_file_with_lhs_termname_simple( + "./examples/load/load_add_panic.isle", + "lhs".to_string(), + all_failure_result(), + ); +} + +#[test] +fn test_broken_isub_store_with_load() { + test_from_file_with_lhs_termname_simple( + "./examples/store/broken_isub_store_with_load.isle", + "store".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I64, + VerificationResult::Failure(Counterexample {}), + ), + ], + ); +} + +#[test] +fn test_broken_bvsub_store_with_load() { + test_from_file_with_lhs_termname_simple( + "./examples/store/broken_bvsub_store_with_load.isle", + "store".to_string(), + vec![ + (Bitwidth::I8, VerificationResult::InapplicableRule), + (Bitwidth::I16, VerificationResult::InapplicableRule), + ( + Bitwidth::I32, + VerificationResult::Failure(Counterexample {}), + ), + ( + Bitwidth::I64, + VerificationResult::Failure(Counterexample {}), + ), + ], + ); +} diff --git a/cranelift/isle/veri/veri_ir/Cargo.toml b/cranelift/isle/veri/veri_ir/Cargo.toml new file mode 100644 index 000000000000..95d216763e3d --- /dev/null +++ b/cranelift/isle/veri/veri_ir/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "veri_ir" +version = "0.1.0" +edition = "2021" +license = "Apache-2.0 WITH LLVM-exception" +authors = ["Alexa VanHattum", "Monica Pardeshi", "Michael McLoughlin", "Wellesley Programming Systems Lab"] +publish = false + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/cranelift/isle/veri/veri_ir/README.md b/cranelift/isle/veri/veri_ir/README.md new file mode 100644 index 000000000000..9fc23dd8cb97 --- /dev/null +++ b/cranelift/isle/veri/veri_ir/README.md @@ -0,0 +1,6 @@ +# Verification Intermediate Representation + +This crate defines two intermediate representations for verifying ISLE rules. + +The core, lower-level Verification IR defined typed expressions for bitvectors, booleans, and integers. +The higher-level Annotation IR `src/annotation_ir.rs` only requires types on some expressions (currently, constants and function definitions) and has some syntactic sugar for easier bitvector conversions (currently, `VIRExpr::BVConvTo and VIRExpr::BVConvFrom`) \ No newline at end of file diff --git a/cranelift/isle/veri/veri_ir/src/annotation_ir.rs b/cranelift/isle/veri/veri_ir/src/annotation_ir.rs new file mode 100644 index 000000000000..e3362a1fcdd6 --- /dev/null +++ b/cranelift/isle/veri/veri_ir/src/annotation_ir.rs @@ -0,0 +1,251 @@ +/// A higher-level annotation IR that does not specify bitvector widths. +/// This allows annotations to be generic over possible types, which +/// corresponds to how ISLE rewrites are written. +use std::fmt; +/// A bound variable, including the VIR type +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct BoundVar { + pub name: String, + pub ty: Option, +} + +impl BoundVar { + /// Construct a new bound variable + pub fn new_with_ty(name: &str, ty: &Type) -> Self { + BoundVar { + name: name.to_string(), + ty: Some(ty.clone()), + } + } + + /// Construct a new bound variable, cloning from references + pub fn new(name: &str) -> Self { + BoundVar { + name: name.to_string(), + ty: None, + } + } + + /// An expression with the bound variable's name + pub fn as_expr(&self) -> Expr { + Expr::Var(self.name.clone()) + } +} + +/// A function signature annotation, including the bound variable names for all +/// arguments and the return value. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TermSignature { + pub args: Vec, + pub ret: BoundVar, +} + +/// Verification IR annotations for an ISLE term consist of the function +/// signature and a list of assertions. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TermAnnotation { + pub sig: TermSignature, + // Note: extra Box for now for ease of parsing + #[allow(clippy::vec_box)] + pub assumptions: Vec>, + + #[allow(clippy::vec_box)] + pub assertions: Vec>, +} + +impl TermAnnotation { + /// New annotation + pub fn new(sig: TermSignature, assumptions: Vec, assertions: Vec) -> Self { + TermAnnotation { + sig, + assumptions: assumptions.iter().map(|x| Box::new(x.clone())).collect(), + assertions: assertions.iter().map(|x| Box::new(x.clone())).collect(), + } + } + + pub fn sig(&self) -> &TermSignature { + &self.sig + } + + pub fn assertions(&self) -> Vec { + self.assumptions.iter().map(|x| *x.clone()).collect() + } +} + +/// Higher-level type, not including bitwidths. +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub enum Type { + /// Internal type used solely for type inference + Poly(u32), + + /// The expression is a bitvector, currently modeled in the + /// logic QF_BV https://SMT-LIB.cs.uiowa.edu/version1/logics/QF_BV.smt + /// This corresponds to Cranelift's Isle type: + /// (type Value (primitive Value)) + BitVector, + + /// Use if the width is known + BitVectorWithWidth(usize), + + // Use if the width is unknown after inference, indexed by a + // cannonical type variable + BitVectorUnknown(u32), + + /// The expression is an integer (currently used for ISLE type, + /// representing bitwidth) + Int, + + /// The expression is a boolean. + Bool, + + /// Unit, removed before SMT-Lib + Unit, +} + +impl fmt::Display for Type { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Type::Poly(_) => write!(f, "poly"), + Type::BitVector => write!(f, "bv"), + Type::BitVectorWithWidth(w) => write!(f, "bv{}", *w), + Type::BitVectorUnknown(_) => write!(f, "bv"), + Type::Int => write!(f, "Int"), + Type::Bool => write!(f, "Bool"), + Type::Unit => write!(f, "Unit"), + } + } +} + +impl Type { + pub fn is_poly(&self) -> bool { + matches!(self, Type::Poly(_)) + } +} + +/// Type-specified constants +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Const { + pub ty: Type, + pub value: i128, + pub width: usize, +} + +/// Width arguments +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Width { + Const(usize), + RegWidth, +} + +/// Typed expressions (u32 is the type var) +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Expr { + // Terminal nodes + Var(String), + Const(Const), + True, + False, + + // Get the width of a bitvector + WidthOf(Box), + + // Boolean operations + Not(Box), + And(Box, Box), + Or(Box, Box), + Imp(Box, Box), + Eq(Box, Box), + Lte(Box, Box), + Lt(Box, Box), + + BVSgt(Box, Box), + BVSgte(Box, Box), + BVSlt(Box, Box), + BVSlte(Box, Box), + BVUgt(Box, Box), + BVUgte(Box, Box), + BVUlt(Box, Box), + BVUlte(Box, Box), + + BVSaddo(Box, Box), + + // Bitvector operations + // Note: these follow the naming conventions of the SMT theory of bitvectors: + // https://SMT-LIB.cs.uiowa.edu/version1/logics/QF_BV.smt + // Unary operators + BVNeg(Box), + BVNot(Box), + CLZ(Box), + CLS(Box), + Rev(Box), + BVPopcnt(Box), + + // Binary operators + BVMul(Box, Box), + BVUDiv(Box, Box), + BVSDiv(Box, Box), + BVAdd(Box, Box), + BVSub(Box, Box), + BVUrem(Box, Box), + BVSrem(Box, Box), + BVAnd(Box, Box), + BVOr(Box, Box), + BVXor(Box, Box), + BVRotl(Box, Box), + BVRotr(Box, Box), + BVShl(Box, Box), + BVShr(Box, Box), + BVAShr(Box, Box), + + // Includes type + BVSubs(Box, Box, Box), + + // Conversions + // Zero extend, static and dynamic width + BVZeroExtTo(Box, Box), + BVZeroExtToVarWidth(Box, Box), + + // Sign extend, static and dynamic width + BVSignExtTo(Box, Box), + BVSignExtToVarWidth(Box, Box), + + // Extract specified bits + BVExtract(usize, usize, Box), + + // Concat two bitvectors + BVConcat(Vec), + + // Convert integer to bitvector + BVIntToBv(usize, Box), + + // Convert bitvector to integer + BVToInt(Box), + + // Conversion to wider/narrower bits, without an explicit extend + // Allow the destination width to be symbolic. + BVConvTo(Box, Box), + + // Conditional if-then-else + Conditional(Box, Box, Box), + + // Switch + Switch(Box, Vec<(Expr, Expr)>), + + LoadEffect(Box, Box, Box), + + StoreEffect(Box, Box, Box, Box), +} + +impl Expr { + pub fn var(s: &str) -> Expr { + Expr::Var(s.to_string()) + } + + pub fn unary) -> Expr>(f: F, x: Expr) -> Expr { + f(Box::new(x)) + } + + pub fn binary, Box) -> Expr>(f: F, x: Expr, y: Expr) -> Expr { + f(Box::new(x), Box::new(y)) + } +} diff --git a/cranelift/isle/veri/veri_ir/src/lib.rs b/cranelift/isle/veri/veri_ir/src/lib.rs new file mode 100644 index 000000000000..4f2dc129af07 --- /dev/null +++ b/cranelift/isle/veri/veri_ir/src/lib.rs @@ -0,0 +1,318 @@ +//! Verification Intermediate Representation for relevant types, eventually to +//! be lowered to SMT. The goal is to leave some freedom to change term +//! encodings or the specific solver backend. +//! +//! Note: annotations use the higher-level IR in annotation_ir.rs. +pub mod annotation_ir; +use core::fmt; +use std::collections::HashMap; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TypeContext { + pub tyvars: HashMap, + pub tymap: HashMap, + pub tyvals: HashMap, + // map of type var to set index + pub bv_unknown_width_sets: HashMap, +} + +// Used for providing concrete inputs to test rule semantics +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ConcreteInput { + // SMT-LIB-formatted bitvector literal + pub literal: String, + pub ty: Type, +} +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ConcreteTest { + pub termname: String, + // List of name, bitvector literal, widths + pub args: Vec, + pub output: ConcreteInput, +} + +/// A bound variable, including the VIR type +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct BoundVar { + pub name: String, + pub tyvar: u32, +} + +/// Verification type +#[derive(Clone, Debug, PartialEq, Eq, Hash, Copy)] +pub enum Type { + /// The expression is a bitvector, currently modeled in the + /// logic QF_BV https://SMT-LIB.cs.uiowa.edu/version1/logics/QF_BV.smt + /// This corresponds to Cranelift's Isle type: + /// (type Value (primitive Value)) + BitVector(Option), + + /// The expression is a boolean. This does not directly correspond + /// to a specific Cranelift Isle type, rather, we use it for the + /// language of assertions. + Bool, + + /// The expression is an Isle type. This is separate from BitVector + /// because it allows us to use a different solver type (e.h., Int) + //. for assertions (e.g., fits_in_64). + /// This corresponds to Cranelift's Isle type: + /// (type Type (primitive Type)) + Int, + + Unit, +} + +impl fmt::Display for Type { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Type::BitVector(None) => write!(f, "bv"), + Type::BitVector(Some(s)) => write!(f, "(bv {})", *s), + Type::Bool => write!(f, "Bool"), + Type::Int => write!(f, "Int"), + Type::Unit => write!(f, "Unit"), + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct TermSignature { + pub args: Vec, + pub ret: Type, + + // Which type varies for different bitwidth Values, that is, the type that + // is used as a key for testing for that type. + pub canonical_type: Option, +} + +impl fmt::Display for TermSignature { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let args = self + .args + .iter() + .map(|a| a.to_string()) + .collect::>() + .join(" "); + let canon = self + .canonical_type + .map(|c| format!("(canon {})", c)) + .unwrap_or_default(); + write!(f, "((args {}) (ret {}) {})", args, self.ret, canon) + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum Terminal { + Var(String), + + // Literal SMT value, for testing (plus type variable) + Literal(String, u32), + + // Value, type variable + Const(i128, u32), + True, + False, + Wildcard(u32), +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum UnaryOp { + // Boolean operations + Not, + + // Bitvector operations + BVNeg, + BVNot, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum BinaryOp { + // Boolean operations + And, + Or, + Imp, + Eq, + Lte, + Lt, + + // Bitvector operations + BVSgt, + BVSgte, + BVSlt, + BVSlte, + BVUgt, + BVUgte, + BVUlt, + BVUlte, + + BVMul, + BVUDiv, + BVSDiv, + BVAdd, + BVSub, + BVUrem, + BVSrem, + BVAnd, + BVOr, + BVXor, + BVRotl, + BVRotr, + BVShl, + BVShr, + BVAShr, + + BVSaddo, +} + +/// Expressions (combined across all types). +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum Expr { + // Terminal nodes + Terminal(Terminal), + + // Opcode nodes + Unary(UnaryOp, Box), + Binary(BinaryOp, Box, Box), + + // Count leading zeros + CLZ(Box), + CLS(Box), + Rev(Box), + + BVPopcnt(Box), + + BVSubs(Box, Box, Box), + + // ITE + Conditional(Box, Box, Box), + + // Switch + Switch(Box, Vec<(Expr, Expr)>), + + // Conversions + // Extract specified bits + BVExtract(usize, usize, Box), + + // Concat bitvectors + BVConcat(Vec), + + // Convert integer to bitvector with that value + BVIntToBV(usize, Box), + + // Convert bitvector to integer with that value + BVToInt(Box), + + // Zero extend, with static or dynamic width + BVZeroExtTo(usize, Box), + BVZeroExtToVarWidth(Box, Box), + + // Sign extend, with static or dynamic width + BVSignExtTo(usize, Box), + BVSignExtToVarWidth(Box, Box), + + // Conversion to wider/narrower bits, without an explicit extend + BVConvTo(Box, Box), + + WidthOf(Box), + + LoadEffect(Box, Box, Box), + StoreEffect(Box, Box, Box, Box), +} + +impl fmt::Display for Expr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Expr::Terminal(t) => match t { + Terminal::Var(v) => write!(f, "{}", v), + Terminal::Literal(v, _) => write!(f, "{}", v), + Terminal::Const(c, _) => write!(f, "{}", c), + Terminal::True => write!(f, "true"), + Terminal::False => write!(f, "false"), + Terminal::Wildcard(_) => write!(f, "_"), + }, + Expr::Unary(o, e) => { + let op = match o { + UnaryOp::Not => "not", + UnaryOp::BVNeg => "bvneg", + UnaryOp::BVNot => "bvnot", + }; + write!(f, "({} {})", op, e) + } + Expr::Binary(o, x, y) => { + let op = match o { + BinaryOp::And => "and", + BinaryOp::Or => "or", + BinaryOp::Imp => "=>", + BinaryOp::Eq => "=", + BinaryOp::Lte => "<=", + BinaryOp::Lt => "<", + BinaryOp::BVSgt => "bvsgt", + BinaryOp::BVSgte => "bvsgte", + BinaryOp::BVSlt => "bvslt", + BinaryOp::BVSlte => "bvslte", + BinaryOp::BVUgt => "bvugt", + BinaryOp::BVUgte => "bvugte", + BinaryOp::BVUlt => "bvult", + BinaryOp::BVUlte => "bvulte", + BinaryOp::BVMul => "bvmul", + BinaryOp::BVUDiv => "bvudiv", + BinaryOp::BVSDiv => "bvsdiv", + BinaryOp::BVAdd => "bvadd", + BinaryOp::BVSub => "bvsub", + BinaryOp::BVUrem => "bvurem", + BinaryOp::BVSrem => "bvsrem", + BinaryOp::BVAnd => "bvand", + BinaryOp::BVOr => "bvor", + BinaryOp::BVXor => "bvxor", + BinaryOp::BVRotl => "rotl", + BinaryOp::BVRotr => "rotr", + BinaryOp::BVShl => "bvshl", + BinaryOp::BVShr => "bvshr", + BinaryOp::BVAShr => "bvashr", + BinaryOp::BVSaddo => "bvsaddo", + }; + write!(f, "({} {} {})", op, x, y) + } + Expr::CLZ(e) => write!(f, "(clz {})", e), + Expr::CLS(e) => write!(f, "(cls {})", e), + Expr::Rev(e) => write!(f, "(rev {})", e), + Expr::BVPopcnt(e) => write!(f, "(popcnt {})", e), + Expr::BVSubs(t, x, y) => write!(f, "(subs {} {} {})", t, x, y), + Expr::Conditional(c, t, e) => write!(f, "(if {} {} {})", c, t, e), + Expr::Switch(m, cs) => { + let cases: Vec = cs.iter().map(|(c, m)| format!("({} {})", c, m)).collect(); + write!(f, "(switch {} {})", m, cases.join("")) + } + Expr::BVExtract(h, l, e) => write!(f, "(extract {} {} {})", *h, *l, e), + Expr::BVConcat(es) => { + let vs: Vec = es.iter().map(|v| format!("{}", v)).collect(); + write!(f, "(concat {})", vs.join("")) + } + Expr::BVIntToBV(t, e) => write!(f, "(int2bv {} {})", t, e), + Expr::BVToInt(b) => write!(f, "(bv2int {})", b), + Expr::BVZeroExtTo(d, e) => write!(f, "(zero_ext {} {})", *d, e), + Expr::BVZeroExtToVarWidth(d, e) => write!(f, "(zero_ext {} {})", d, e), + Expr::BVSignExtTo(d, e) => write!(f, "(sign_ext {} {})", *d, e), + Expr::BVSignExtToVarWidth(d, e) => write!(f, "(sign_ext {} {})", *d, e), + Expr::BVConvTo(x, y) => write!(f, "(conv_to {} {})", x, y), + Expr::WidthOf(e) => write!(f, "(widthof {})", e), + Expr::LoadEffect(x, y, z) => write!(f, "(load_effect {} {} {})", x, y, z), + Expr::StoreEffect(w, x, y, z) => write!(f, "(store_effect {} {} {} {})", w, x, y, z), + } + } +} + +/// To-be-flushed-out verification counterexample for failures +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Counterexample {} + +/// To-be-flushed-out verification result +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum VerificationResult { + InapplicableRule, + Success, + Failure(Counterexample), + Unknown, + // Optional: heuristic that a rule is bad if there is only + // a single model with distinct bitvector inputs + NoDistinctModels, +} diff --git a/cranelift/reader/Cargo.toml b/cranelift/reader/Cargo.toml index a0ea0935b70b..deb3644ae442 100644 --- a/cranelift/reader/Cargo.toml +++ b/cranelift/reader/Cargo.toml @@ -18,3 +18,7 @@ anyhow = { workspace = true, features = ['std'] } cranelift-codegen = { workspace = true } smallvec = { workspace = true } target-lexicon = { workspace = true, features = ['std'] } + +[dev-dependencies] +# Some tests require that the x86_64 target parses for the target specification. +cranelift-codegen = { workspace = true, features = ['x86'] } diff --git a/crates/fuzzing/src/oracles/diff_spec.rs b/crates/fuzzing/src/oracles/diff_spec.rs index 90b53a3656b5..df4b4f3143da 100644 --- a/crates/fuzzing/src/oracles/diff_spec.rs +++ b/crates/fuzzing/src/oracles/diff_spec.rs @@ -23,6 +23,8 @@ impl SpecInterpreter { config.threads_enabled = false; config.bulk_memory_enabled = false; config.reference_types_enabled = false; + config.tail_call_enabled = false; + config.relaxed_simd_enabled = false; Self } diff --git a/crates/wasi-preview1-component-adapter/provider/README.md b/crates/wasi-preview1-component-adapter/provider/README.md index aed6afb35fc7..0adb37f35e9b 100644 --- a/crates/wasi-preview1-component-adapter/provider/README.md +++ b/crates/wasi-preview1-component-adapter/provider/README.md @@ -36,10 +36,10 @@ use wasi_preview1_component_adapter_provider::WASI_SNAPSHOT_PREVIEW1_REACTOR_ADA use wit_component::ComponentEncoder; fn main() -> Result<(), Box> { - let wasm_p1_bytes = std::fs::read("path/to/your/your-component.p1.wasm"); + let wasm_p1_bytes = std::fs::read("path/to/your/your-component.p1.wasm")?; let wasm_p2_bytes = ComponentEncoder::default() - .module(&wasm_module_bytes)? + .module(&wasm_p1_bytes)? .adapter( "wasi_snapshot_preview1", WASI_SNAPSHOT_PREVIEW1_REACTOR_ADAPTER, diff --git a/crates/wasmtime/src/runtime/gc/enabled/arrayref.rs b/crates/wasmtime/src/runtime/gc/enabled/arrayref.rs index ae10d239b1eb..cad25fe994d3 100644 --- a/crates/wasmtime/src/runtime/gc/enabled/arrayref.rs +++ b/crates/wasmtime/src/runtime/gc/enabled/arrayref.rs @@ -694,7 +694,7 @@ impl ArrayRef { store: &mut AutoAssertNoGc<'_>, gc_ref: VMGcRef, ) -> Rooted { - debug_assert!(!gc_ref.is_i31()); + debug_assert!(gc_ref.is_arrayref(&*store.unwrap_gc_store().gc_heap)); Rooted::new(store, gc_ref) } } diff --git a/crates/wasmtime/src/runtime/gc/enabled/structref.rs b/crates/wasmtime/src/runtime/gc/enabled/structref.rs index 9989133a309f..a138c13f081d 100644 --- a/crates/wasmtime/src/runtime/gc/enabled/structref.rs +++ b/crates/wasmtime/src/runtime/gc/enabled/structref.rs @@ -561,7 +561,7 @@ impl StructRef { store: &mut AutoAssertNoGc<'_>, gc_ref: VMGcRef, ) -> Rooted { - debug_assert!(!gc_ref.is_i31()); + debug_assert!(gc_ref.is_structref(&*store.unwrap_gc_store().gc_heap)); Rooted::new(store, gc_ref) } } diff --git a/crates/wasmtime/src/runtime/vm/libcalls.rs b/crates/wasmtime/src/runtime/vm/libcalls.rs index ab225ae83dd8..0e08dcd9aff8 100644 --- a/crates/wasmtime/src/runtime/vm/libcalls.rs +++ b/crates/wasmtime/src/runtime/vm/libcalls.rs @@ -800,6 +800,7 @@ unsafe fn array_new_elem( }; use wasmtime_environ::{ModuleInternedTypeIndex, TableSegmentElements}; + // Convert indices to their typed forms. let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index); let elem_index = ElemIndex::from_u32(elem_index); @@ -881,15 +882,113 @@ unsafe fn array_new_elem( #[cfg(feature = "gc")] unsafe fn array_init_elem( - _instance: &mut Instance, - _array_type_index: u32, - _array: u32, - _dst_index: u32, - _elem_index: u32, - _src: u32, - _len: u32, + instance: &mut Instance, + array_type_index: u32, + array: u32, + dst: u32, + elem_index: u32, + src: u32, + len: u32, ) -> Result<()> { - bail!("the `array.init_elem` instruction is not yet implemented") + use crate::{ + store::AutoAssertNoGc, + vm::const_expr::{ConstEvalContext, ConstExprEvaluator}, + ArrayRef, Func, Val, + }; + use wasmtime_environ::{ModuleInternedTypeIndex, TableSegmentElements}; + + // NB: Don't use `OpaqueRootScope` here because we need to borrow the store + // through `instance` during const evaluation, which is within the same + // region that the `OpaqueRootScope` would otherwise span while borrowing + // the same store, resulting in double borrows. + instance.with_gc_lifo_scope(|instance| { + // Convert the indices into their typed forms. + let _array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index); + let elem_index = ElemIndex::from_u32(elem_index); + + log::trace!( + "array.init_elem(array={array:#x}, dst={dst}, elem_index={elem_index:?}, src={src}, len={len})", + ); + + // Convert the raw GC ref into a `Rooted`. + let array = + VMGcRef::from_raw_u32(array).ok_or_else(|| Trap::NullReference.into_anyhow())?; + let array = { + let mut no_gc = AutoAssertNoGc::new((*instance.store()).store_opaque_mut()); + ArrayRef::from_cloned_gc_ref(&mut no_gc, array) + }; + + // Bounds check the destination within the array. + let array_len = array._len((*instance.store()).store_opaque())?; + log::trace!("array_len = {array_len}"); + if dst + .checked_add(len) + .ok_or_else(|| Trap::ArrayOutOfBounds.into_anyhow())? + > array_len + { + return Err(Trap::ArrayOutOfBounds.into_anyhow()); + } + + // Get the passive element segment. + let mut storage = None; + let elements = instance.passive_element_segment(&mut storage, elem_index); + + // Convert array offsets into `usize`s. + let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds.into_anyhow())?; + let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds.into_anyhow())?; + + // Turn the elements into `Val`s. + let vals = match elements { + TableSegmentElements::Functions(fs) => fs + .get(src..) + .and_then(|s| s.get(..len)) + .ok_or_else(|| Trap::TableOutOfBounds.into_anyhow())? + .iter() + .map(|f| { + let raw_func_ref = instance.get_func_ref(*f).unwrap_or(core::ptr::null_mut()); + let func = Func::from_vm_func_ref( + (*instance.store()).store_opaque_mut(), + raw_func_ref, + ); + Val::FuncRef(func) + }) + .collect::>(), + TableSegmentElements::Expressions(xs) => { + let elem_ty = array + ._ty((*instance.store()).store_opaque())? + .element_type(); + let elem_ty = elem_ty.unwrap_val_type(); + + let mut const_context = ConstEvalContext::new(instance); + let mut const_evaluator = ConstExprEvaluator::default(); + + xs.get(src..) + .and_then(|s| s.get(..len)) + .ok_or_else(|| Trap::TableOutOfBounds.into_anyhow())? + .iter() + .map(|x| unsafe { + let raw = const_evaluator + .eval(&mut const_context, x) + .expect("const expr should be valid"); + let mut store = AutoAssertNoGc::new( + (*const_context.instance.store()).store_opaque_mut(), + ); + Val::_from_raw(&mut store, raw, elem_ty) + }) + .collect::>() + } + }; + + // Copy the values into the array. + let store = (*instance.store()).store_opaque_mut(); + for (i, val) in vals.into_iter().enumerate() { + let i = u32::try_from(i).unwrap(); + let j = dst.checked_add(i).unwrap(); + array._set(store, j, val)?; + } + + Ok(()) + }) } #[cfg(feature = "gc")] diff --git a/fuzz/fuzz_targets/cranelift-fuzzgen.rs b/fuzz/fuzz_targets/cranelift-fuzzgen.rs index 0582b035cc1a..af82db560ac0 100644 --- a/fuzz/fuzz_targets/cranelift-fuzzgen.rs +++ b/fuzz/fuzz_targets/cranelift-fuzzgen.rs @@ -107,6 +107,9 @@ impl Default for Statistics { // Pre-Register all trap codes since we can't modify this hashmap atomically. let mut run_result_trap = HashMap::new(); run_result_trap.insert(CraneliftTrap::Debug, AtomicU64::new(0)); + run_result_trap.insert(CraneliftTrap::BadSignature, AtomicU64::new(0)); + run_result_trap.insert(CraneliftTrap::UnreachableCodeReached, AtomicU64::new(0)); + run_result_trap.insert(CraneliftTrap::HeapMisaligned, AtomicU64::new(0)); for trapcode in TrapCode::non_user_traps() { run_result_trap.insert(CraneliftTrap::User(*trapcode), AtomicU64::new(0)); } diff --git a/pulley/Cargo.toml b/pulley/Cargo.toml index 0c3677641a1b..9d95056fda15 100644 --- a/pulley/Cargo.toml +++ b/pulley/Cargo.toml @@ -7,14 +7,14 @@ license = "Apache-2.0 WITH LLVM-exception" name = "pulley-interpreter" readme = "./README.md" repository = "https://github.com/bytecodealliance/wasmtime/tree/main/pulley" -version = "0.2.0" +version.workspace = true [lints] workspace = true [dependencies] arbitrary = { workspace = true, optional = true } -cranelift-bitset = { workspace = true } +cranelift-bitset = { workspace = true } log = { workspace = true } sptr = { workspace = true } diff --git a/supply-chain/audits.toml b/supply-chain/audits.toml index 8528fea25727..cf8417bd6b60 100644 --- a/supply-chain/audits.toml +++ b/supply-chain/audits.toml @@ -1470,6 +1470,12 @@ who = "Benjamin Bouvier " criteria = "safe-to-deploy" delta = "0.9.0 -> 0.10.3" +[[audits.easy-smt]] +who = "Alex Crichton " +criteria = "safe-to-deploy" +version = "0.2.2" +notes = "This crate is authored by trusted Bytecode Alliance members." + [[audits.ed25519]] who = "Alex Crichton " criteria = "safe-to-deploy" diff --git a/supply-chain/imports.lock b/supply-chain/imports.lock index eb3344e53f06..a3d879a51f54 100644 --- a/supply-chain/imports.lock +++ b/supply-chain/imports.lock @@ -221,6 +221,10 @@ audited_as = "0.111.0" version = "0.2.0" audited_as = "0.1.1" +[[unpublished.pulley-interpreter]] +version = "26.0.0" +audited_as = "0.1.1" + [[unpublished.wasi-common]] version = "24.0.0" audited_as = "23.0.1" @@ -577,6 +581,10 @@ audited_as = "0.21.1" version = "0.24.0" audited_as = "0.22.0" +[[unpublished.winch-codegen]] +version = "26.0.0" +audited_as = "0.23.1" + [[publisher.aho-corasick]] version = "1.0.2" when = "2023-06-04" @@ -1500,6 +1508,13 @@ user-id = 6743 user-login = "epage" user-name = "Ed Page" +[[publisher.unicode-segmentation]] +version = "1.11.0" +when = "2024-02-07" +user-id = 1139 +user-login = "Manishearth" +user-name = "Manish Goregaokar" + [[publisher.unicode-width]] version = "0.1.9" when = "2021-09-16" @@ -2086,6 +2101,12 @@ when = "2024-08-20" user-id = 73222 user-login = "wasmtime-publish" +[[publisher.winch-codegen]] +version = "0.23.1" +when = "2024-09-24" +user-id = 73222 +user-login = "wasmtime-publish" + [[publisher.windows]] version = "0.52.0" when = "2023-11-15" @@ -2590,6 +2611,18 @@ criteria = "safe-to-deploy" version = "1.0.0" notes = "No unsafe usage or ambient capabilities" +[[audits.embark-studios.audits.strum]] +who = "Johan Andersson " +criteria = "safe-to-deploy" +version = "0.24.1" +notes = "Tiny layer on top of the proc macro crate, found no unsafe or system usage" + +[[audits.embark-studios.audits.strum_macros]] +who = "Johan Andersson " +criteria = "safe-to-deploy" +version = "0.24.3" +notes = "Proc macro. No unsafe or added ambient capabilities" + [[audits.embark-studios.audits.utf8parse]] who = "Johan Andersson " criteria = "safe-to-deploy" @@ -2767,6 +2800,15 @@ renew = false notes = "I've reviewed every source contribution that was neither authored nor reviewed by Mozilla." aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml" +[[audits.mozilla.wildcard-audits.unicode-segmentation]] +who = "Manish Goregaokar " +criteria = "safe-to-deploy" +user-id = 1139 # Manish Goregaokar (Manishearth) +start = "2019-05-15" +end = "2024-05-03" +notes = "All code written or reviewed by Manish" +aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml" + [[audits.mozilla.wildcard-audits.unicode-width]] who = "Manish Goregaokar " criteria = "safe-to-deploy" diff --git a/tests/wast.rs b/tests/wast.rs index 5c0c7749d55a..d358f04dc95d 100644 --- a/tests/wast.rs +++ b/tests/wast.rs @@ -204,7 +204,6 @@ fn should_fail(test: &Path, strategy: Strategy) -> bool { } let unsupported_gc_tests = [ "array_copy.wast", - "array_init_elem.wast", "binary_gc.wast", "br_on_cast_fail.wast", "br_on_cast.wast", diff --git a/winch/codegen/Cargo.toml b/winch/codegen/Cargo.toml index 724671a58b31..44841e55ed53 100644 --- a/winch/codegen/Cargo.toml +++ b/winch/codegen/Cargo.toml @@ -4,7 +4,7 @@ name = "winch-codegen" description = "Winch code generation library" license = "Apache-2.0 WITH LLVM-exception" repository = "https://github.com/bytecodealliance/wasmtime" -version = "0.24.0" +version.workspace = true edition.workspace = true rust-version.workspace = true