1use crate::address::{Address, AddressSize};
4use crate::frame::Frame;
5use crate::instruction::InstructionContext;
6use crate::state::{InterpreterFunctionRef, MemoryError, State};
7use crate::value::{DataValueExt, ValueConversionKind, ValueError, ValueResult};
8use cranelift_codegen::data_value::DataValue;
9use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
10use cranelift_codegen::ir::{
11 types, AbiParam, AtomicRmwOp, Block, BlockArg, BlockCall, Endianness, ExternalName, FuncRef,
12 Function, InstructionData, MemFlags, Opcode, TrapCode, Type, Value as ValueRef,
13};
14use log::trace;
15use smallvec::{smallvec, SmallVec};
16use std::fmt::Debug;
17use std::ops::RangeFrom;
18use thiserror::Error;
19
20fn validate_signature_params(sig: &[AbiParam], args: &[DataValue]) -> bool {
22 args.iter()
23 .map(|r| r.ty())
24 .zip(sig.iter().map(|r| r.value_type))
25 .all(|(a, b)| match (a, b) {
26 (a, b) if a.is_vector() && b.is_vector() => true,
33 (a, b) => a == b,
34 })
35}
36
37fn sum_unsigned(head: DataValue, tail: SmallVec<[DataValue; 1]>) -> ValueResult<u128> {
39 let mut acc = head;
40 for t in tail {
41 acc = DataValueExt::add(acc, t)?;
42 }
43 acc.into_int_unsigned()
44}
45
46fn collect_block_args(
48 frame: &Frame,
49 args: impl Iterator<Item = BlockArg>,
50) -> SmallVec<[DataValue; 1]> {
51 args.into_iter()
52 .map(|n| match n {
53 BlockArg::Value(n) => frame.get(n).clone(),
54 _ => panic!("exceptions not supported"),
55 })
56 .collect()
57}
58
59pub fn step<'a, I>(state: &mut dyn State<'a>, inst_context: I) -> Result<ControlFlow<'a>, StepError>
63where
64 I: InstructionContext,
65{
66 let inst = inst_context.data();
67 let ctrl_ty = inst_context.controlling_type().unwrap();
68 trace!(
69 "Step: {}{}",
70 inst.opcode(),
71 if ctrl_ty.is_invalid() {
72 String::new()
73 } else {
74 format!(".{ctrl_ty}")
75 }
76 );
77
78 let arg = |index: usize| -> DataValue {
83 let value_ref = inst_context.args()[index];
84 state.current_frame().get(value_ref).clone()
85 };
86
87 let args = || -> SmallVec<[DataValue; 1]> { state.collect_values(inst_context.args()) };
89
90 let args_range = |indexes: RangeFrom<usize>| -> Result<SmallVec<[DataValue; 1]>, StepError> {
92 Ok(SmallVec::<[DataValue; 1]>::from(&args()[indexes]))
93 };
94
95 let imm = || -> DataValue {
97 DataValue::from(match inst {
98 InstructionData::UnaryConst {
99 constant_handle,
100 opcode,
101 } => {
102 let buffer = state
103 .get_current_function()
104 .dfg
105 .constants
106 .get(constant_handle);
107 match (ctrl_ty.bytes(), opcode) {
108 (_, Opcode::F128const) => DataValue::F128(buffer.try_into().expect("a 16-byte data buffer")),
109 (16, Opcode::Vconst) => DataValue::V128(buffer.as_slice().try_into().expect("a 16-byte data buffer")),
110 (8, Opcode::Vconst) => DataValue::V64(buffer.as_slice().try_into().expect("an 8-byte data buffer")),
111 (4, Opcode::Vconst) => DataValue::V32(buffer.as_slice().try_into().expect("a 4-byte data buffer")),
112 (2, Opcode::Vconst) => DataValue::V16(buffer.as_slice().try_into().expect("a 2-byte data buffer")),
113 (length, opcode) => panic!("unexpected UnaryConst controlling type size {length} for opcode {opcode:?}"),
114 }
115 }
116 InstructionData::Shuffle { imm, .. } => {
117 let mask = state
118 .get_current_function()
119 .dfg
120 .immediates
121 .get(imm)
122 .unwrap()
123 .as_slice();
124 match mask.len() {
125 16 => DataValue::V128(mask.try_into().expect("a 16-byte vector mask")),
126 8 => DataValue::V64(mask.try_into().expect("an 8-byte vector mask")),
127 4 => DataValue::V32(mask.try_into().expect("a 4-byte vector mask")),
128 2 => DataValue::V16(mask.try_into().expect("a 2-byte vector mask")),
129 length => panic!("unexpected Shuffle mask length {length}"),
130 }
131 }
132 InstructionData::BinaryImm8 { imm, .. } | InstructionData::TernaryImm8 { imm, .. } => {
134 DataValue::from(imm as i8) }
136 InstructionData::UnaryIeee16 { imm, .. } => DataValue::from(imm),
138 InstructionData::UnaryIeee32 { imm, .. } => DataValue::from(imm),
140 InstructionData::Load { offset, .. }
141 | InstructionData::Store { offset, .. }
142 | InstructionData::StackLoad { offset, .. }
143 | InstructionData::StackStore { offset, .. } => DataValue::from(offset),
144 InstructionData::UnaryImm { imm, .. }
146 | InstructionData::BinaryImm64 { imm, .. }
147 | InstructionData::IntCompareImm { imm, .. } => DataValue::from(imm.bits()),
148 InstructionData::UnaryIeee64 { imm, .. } => DataValue::from(imm),
149 _ => unreachable!(),
150 })
151 };
152
153 let imm_as_ctrl_ty = || -> Result<DataValue, ValueError> {
157 DataValue::convert(imm(), ValueConversionKind::Exact(ctrl_ty))
158 };
159
160 let assign = |value: DataValue| ControlFlow::Assign(smallvec![value]);
162
163 let assign_multiple = |values: &[DataValue]| ControlFlow::Assign(SmallVec::from(values));
165
166 let assign_or_trap = |value: ValueResult<DataValue>| match value {
168 Ok(v) => Ok(assign(v)),
169 Err(ValueError::IntegerDivisionByZero) => Ok(ControlFlow::Trap(CraneliftTrap::User(
170 TrapCode::INTEGER_DIVISION_BY_ZERO,
171 ))),
172 Err(ValueError::IntegerOverflow) => Ok(ControlFlow::Trap(CraneliftTrap::User(
173 TrapCode::INTEGER_OVERFLOW,
174 ))),
175 Err(e) => Err(e),
176 };
177
178 let memerror_to_trap = |e: MemoryError| match e {
179 MemoryError::InvalidAddress(_)
180 | MemoryError::InvalidAddressType(_)
181 | MemoryError::InvalidOffset { .. }
182 | MemoryError::InvalidEntry { .. } => CraneliftTrap::User(TrapCode::HEAP_OUT_OF_BOUNDS),
183 MemoryError::OutOfBoundsStore { mem_flags, .. }
184 | MemoryError::OutOfBoundsLoad { mem_flags, .. } => CraneliftTrap::User(
185 mem_flags
186 .trap_code()
187 .expect("op with notrap flag should not trap"),
188 ),
189 MemoryError::MisalignedLoad { .. } => CraneliftTrap::HeapMisaligned,
190 MemoryError::MisalignedStore { .. } => CraneliftTrap::HeapMisaligned,
191 };
192
193 let assign_or_memtrap = |res| match res {
195 Ok(v) => assign(v),
196 Err(e) => ControlFlow::Trap(memerror_to_trap(e)),
197 };
198
199 let continue_or_memtrap = |res| match res {
201 Ok(_) => ControlFlow::Continue,
202 Err(e) => ControlFlow::Trap(memerror_to_trap(e)),
203 };
204
205 let calculate_addr =
206 |addr_ty: Type, imm: DataValue, args: SmallVec<[DataValue; 1]>| -> ValueResult<u64> {
207 let imm = imm.convert(ValueConversionKind::ZeroExtend(addr_ty))?;
208 let args = args
209 .into_iter()
210 .map(|v| v.convert(ValueConversionKind::ZeroExtend(addr_ty)))
211 .collect::<ValueResult<SmallVec<[DataValue; 1]>>>()?;
212
213 Ok(sum_unsigned(imm, args)? as u64)
214 };
215
216 let unary =
219 |op: fn(DataValue) -> ValueResult<DataValue>, arg: DataValue| -> ValueResult<ControlFlow> {
220 let ctrl_ty = inst_context.controlling_type().unwrap();
221 let res = unary_arith(arg, ctrl_ty, op)?;
222 Ok(assign(res))
223 };
224
225 let binary = |op: fn(DataValue, DataValue) -> ValueResult<DataValue>,
228 left: DataValue,
229 right: DataValue|
230 -> ValueResult<ControlFlow> {
231 let ctrl_ty = inst_context.controlling_type().unwrap();
232 let res = binary_arith(left, right, ctrl_ty, op)?;
233 Ok(assign(res))
234 };
235
236 let binary_can_trap = |op: fn(DataValue, DataValue) -> ValueResult<DataValue>,
238 left: DataValue,
239 right: DataValue|
240 -> ValueResult<ControlFlow> {
241 let ctrl_ty = inst_context.controlling_type().unwrap();
242 let res = binary_arith(left, right, ctrl_ty, op);
243 assign_or_trap(res)
244 };
245
246 let choose = |condition: bool, left: DataValue, right: DataValue| -> ControlFlow {
248 assign(if condition { left } else { right })
249 };
250
251 let continue_at = |block: BlockCall| {
254 let branch_args = collect_block_args(
255 state.current_frame(),
256 block.args(&state.get_current_function().dfg.value_lists),
257 );
258 Ok(ControlFlow::ContinueAt(
259 block.block(&state.get_current_function().dfg.value_lists),
260 branch_args,
261 ))
262 };
263
264 #[expect(unused_variables, reason = "here in case it's needed in the future")]
266 let branch_when = |condition: bool, block| -> Result<ControlFlow, StepError> {
267 if condition {
268 continue_at(block)
269 } else {
270 Ok(ControlFlow::Continue)
271 }
272 };
273
274 let trap_code = || -> TrapCode { inst.trap_code().unwrap() };
276
277 let trap_when = |condition: bool, trap: CraneliftTrap| -> ControlFlow {
279 if condition {
280 ControlFlow::Trap(trap)
281 } else {
282 ControlFlow::Continue
283 }
284 };
285
286 let call_func =
288 |func_ref: InterpreterFunctionRef<'a>,
289 args: SmallVec<[DataValue; 1]>,
290 make_ctrl_flow: fn(&'a Function, SmallVec<[DataValue; 1]>) -> ControlFlow<'a>|
291 -> Result<ControlFlow<'a>, StepError> {
292 let signature = func_ref.signature();
293
294 let args_match = validate_signature_params(&signature.params[..], &args[..]);
297 if !args_match {
298 return Ok(ControlFlow::Trap(CraneliftTrap::BadSignature));
299 }
300
301 Ok(match func_ref {
302 InterpreterFunctionRef::Function(func) => make_ctrl_flow(func, args),
303 InterpreterFunctionRef::LibCall(libcall) => {
304 debug_assert!(
305 !matches!(
306 inst.opcode(),
307 Opcode::ReturnCall | Opcode::ReturnCallIndirect,
308 ),
309 "Cannot tail call to libcalls"
310 );
311 let libcall_handler = state.get_libcall_handler();
312
313 let res = libcall_handler(libcall, args);
315 let res = match res {
316 Err(trap) => return Ok(ControlFlow::Trap(trap)),
317 Ok(rets) => rets,
318 };
319
320 if validate_signature_params(&signature.returns[..], &res[..]) {
322 ControlFlow::Assign(res)
323 } else {
324 ControlFlow::Trap(CraneliftTrap::BadSignature)
325 }
326 }
327 })
328 };
329
330 Ok(match inst.opcode() {
332 Opcode::Jump => {
333 if let InstructionData::Jump { destination, .. } = inst {
334 continue_at(destination)?
335 } else {
336 unreachable!()
337 }
338 }
339 Opcode::Brif => {
340 if let InstructionData::Brif {
341 arg,
342 blocks: [block_then, block_else],
343 ..
344 } = inst
345 {
346 let arg = state.current_frame().get(arg).clone();
347
348 let condition = arg.convert(ValueConversionKind::ToBoolean)?.into_bool()?;
349
350 if condition {
351 continue_at(block_then)?
352 } else {
353 continue_at(block_else)?
354 }
355 } else {
356 unreachable!()
357 }
358 }
359 Opcode::BrTable => {
360 if let InstructionData::BranchTable { table, .. } = inst {
361 let jt_data = &state.get_current_function().stencil.dfg.jump_tables[table];
362
363 let jump_target = usize::try_from(arg(0).into_int_unsigned()?)
365 .ok()
366 .and_then(|i| jt_data.as_slice().get(i))
367 .copied()
368 .unwrap_or(jt_data.default_block());
369
370 continue_at(jump_target)?
371 } else {
372 unreachable!()
373 }
374 }
375 Opcode::Trap => ControlFlow::Trap(CraneliftTrap::User(trap_code())),
376 Opcode::Debugtrap => ControlFlow::Trap(CraneliftTrap::Debug),
377 Opcode::Trapz => trap_when(!arg(0).into_bool()?, CraneliftTrap::User(trap_code())),
378 Opcode::Trapnz => trap_when(arg(0).into_bool()?, CraneliftTrap::User(trap_code())),
379 Opcode::Return => ControlFlow::Return(args()),
380 Opcode::Call | Opcode::ReturnCall => {
381 let func_ref = if let InstructionData::Call { func_ref, .. } = inst {
382 func_ref
383 } else {
384 unreachable!()
385 };
386
387 let curr_func = state.get_current_function();
388 let ext_data = curr_func
389 .dfg
390 .ext_funcs
391 .get(func_ref)
392 .ok_or(StepError::UnknownFunction(func_ref))?;
393
394 let args = args();
395 let func = match ext_data.name {
396 ExternalName::User(_) | ExternalName::TestCase(_) => {
398 let function = state
399 .get_function(func_ref)
400 .ok_or(StepError::UnknownFunction(func_ref))?;
401 InterpreterFunctionRef::Function(function)
402 }
403 ExternalName::LibCall(libcall) => InterpreterFunctionRef::LibCall(libcall),
404 ExternalName::KnownSymbol(_) => unimplemented!(),
405 };
406
407 let make_control_flow = match inst.opcode() {
408 Opcode::Call => ControlFlow::Call,
409 Opcode::ReturnCall => ControlFlow::ReturnCall,
410 _ => unreachable!(),
411 };
412
413 call_func(func, args, make_control_flow)?
414 }
415 Opcode::CallIndirect | Opcode::ReturnCallIndirect => {
416 let args = args();
417 let addr_dv = DataValue::I64(arg(0).into_int_unsigned()? as i64);
418 let addr = Address::try_from(addr_dv.clone()).map_err(StepError::MemoryError)?;
419
420 let func = state
421 .get_function_from_address(addr)
422 .ok_or_else(|| StepError::MemoryError(MemoryError::InvalidAddress(addr_dv)))?;
423
424 let call_args: SmallVec<[DataValue; 1]> = SmallVec::from(&args[1..]);
425
426 let make_control_flow = match inst.opcode() {
427 Opcode::CallIndirect => ControlFlow::Call,
428 Opcode::ReturnCallIndirect => ControlFlow::ReturnCall,
429 _ => unreachable!(),
430 };
431
432 call_func(func, call_args, make_control_flow)?
433 }
434 Opcode::FuncAddr => {
435 let func_ref = if let InstructionData::FuncAddr { func_ref, .. } = inst {
436 func_ref
437 } else {
438 unreachable!()
439 };
440
441 let ext_data = state
442 .get_current_function()
443 .dfg
444 .ext_funcs
445 .get(func_ref)
446 .ok_or(StepError::UnknownFunction(func_ref))?;
447
448 let addr_ty = inst_context.controlling_type().unwrap();
449 assign_or_memtrap({
450 AddressSize::try_from(addr_ty).and_then(|addr_size| {
451 let addr = state.function_address(addr_size, &ext_data.name)?;
452 let dv = DataValue::try_from(addr)?;
453 Ok(dv.into())
454 })
455 })
456 }
457 Opcode::Load
458 | Opcode::Uload8
459 | Opcode::Sload8
460 | Opcode::Uload16
461 | Opcode::Sload16
462 | Opcode::Uload32
463 | Opcode::Sload32
464 | Opcode::Uload8x8
465 | Opcode::Sload8x8
466 | Opcode::Uload16x4
467 | Opcode::Sload16x4
468 | Opcode::Uload32x2
469 | Opcode::Sload32x2 => {
470 let ctrl_ty = inst_context.controlling_type().unwrap();
471 let (load_ty, kind) = match inst.opcode() {
472 Opcode::Load => (ctrl_ty, None),
473 Opcode::Uload8 => (types::I8, Some(ValueConversionKind::ZeroExtend(ctrl_ty))),
474 Opcode::Sload8 => (types::I8, Some(ValueConversionKind::SignExtend(ctrl_ty))),
475 Opcode::Uload16 => (types::I16, Some(ValueConversionKind::ZeroExtend(ctrl_ty))),
476 Opcode::Sload16 => (types::I16, Some(ValueConversionKind::SignExtend(ctrl_ty))),
477 Opcode::Uload32 => (types::I32, Some(ValueConversionKind::ZeroExtend(ctrl_ty))),
478 Opcode::Sload32 => (types::I32, Some(ValueConversionKind::SignExtend(ctrl_ty))),
479 Opcode::Uload8x8
480 | Opcode::Sload8x8
481 | Opcode::Uload16x4
482 | Opcode::Sload16x4
483 | Opcode::Uload32x2
484 | Opcode::Sload32x2 => unimplemented!(),
485 _ => unreachable!(),
486 };
487
488 let addr_value = calculate_addr(types::I64, imm(), args())?;
489 let mem_flags = inst.memflags().expect("instruction to have memory flags");
490 let loaded = assign_or_memtrap(
491 Address::try_from(addr_value)
492 .and_then(|addr| state.checked_load(addr, load_ty, mem_flags)),
493 );
494
495 match (loaded, kind) {
496 (ControlFlow::Assign(ret), Some(c)) => ControlFlow::Assign(
497 ret.into_iter()
498 .map(|loaded| loaded.convert(c.clone()))
499 .collect::<ValueResult<SmallVec<[DataValue; 1]>>>()?,
500 ),
501 (cf, _) => cf,
502 }
503 }
504 Opcode::Store | Opcode::Istore8 | Opcode::Istore16 | Opcode::Istore32 => {
505 let kind = match inst.opcode() {
506 Opcode::Store => None,
507 Opcode::Istore8 => Some(ValueConversionKind::Truncate(types::I8)),
508 Opcode::Istore16 => Some(ValueConversionKind::Truncate(types::I16)),
509 Opcode::Istore32 => Some(ValueConversionKind::Truncate(types::I32)),
510 _ => unreachable!(),
511 };
512
513 let addr_value = calculate_addr(types::I64, imm(), args_range(1..)?)?;
514 let mem_flags = inst.memflags().expect("instruction to have memory flags");
515 let reduced = if let Some(c) = kind {
516 arg(0).convert(c)?
517 } else {
518 arg(0)
519 };
520 continue_or_memtrap(
521 Address::try_from(addr_value)
522 .and_then(|addr| state.checked_store(addr, reduced, mem_flags)),
523 )
524 }
525 Opcode::StackLoad => {
526 let load_ty = inst_context.controlling_type().unwrap();
527 let slot = inst.stack_slot().unwrap();
528 let offset = sum_unsigned(imm(), args())? as u64;
529 let mem_flags = MemFlags::new();
530 assign_or_memtrap({
531 state
532 .stack_address(AddressSize::_64, slot, offset)
533 .and_then(|addr| state.checked_load(addr, load_ty, mem_flags))
534 })
535 }
536 Opcode::StackStore => {
537 let arg = arg(0);
538 let slot = inst.stack_slot().unwrap();
539 let offset = sum_unsigned(imm(), args_range(1..)?)? as u64;
540 let mem_flags = MemFlags::new();
541 continue_or_memtrap({
542 state
543 .stack_address(AddressSize::_64, slot, offset)
544 .and_then(|addr| state.checked_store(addr, arg, mem_flags))
545 })
546 }
547 Opcode::StackAddr => {
548 let load_ty = inst_context.controlling_type().unwrap();
549 let slot = inst.stack_slot().unwrap();
550 let offset = sum_unsigned(imm(), args())? as u64;
551 assign_or_memtrap({
552 AddressSize::try_from(load_ty).and_then(|addr_size| {
553 let addr = state.stack_address(addr_size, slot, offset)?;
554 let dv = DataValue::try_from(addr)?;
555 Ok(dv.into())
556 })
557 })
558 }
559 Opcode::DynamicStackAddr => unimplemented!("DynamicStackSlot"),
560 Opcode::DynamicStackLoad => unimplemented!("DynamicStackLoad"),
561 Opcode::DynamicStackStore => unimplemented!("DynamicStackStore"),
562 Opcode::GlobalValue | Opcode::SymbolValue | Opcode::TlsValue => {
563 if let InstructionData::UnaryGlobalValue { global_value, .. } = inst {
564 assign_or_memtrap(state.resolve_global_value(global_value))
565 } else {
566 unreachable!()
567 }
568 }
569 Opcode::GetPinnedReg => assign(state.get_pinned_reg()),
570 Opcode::SetPinnedReg => {
571 let arg0 = arg(0);
572 state.set_pinned_reg(arg0);
573 ControlFlow::Continue
574 }
575 Opcode::Iconst => assign(DataValueExt::int(imm().into_int_signed()?, ctrl_ty)?),
576 Opcode::F16const => assign(imm()),
577 Opcode::F32const => assign(imm()),
578 Opcode::F64const => assign(imm()),
579 Opcode::F128const => assign(imm()),
580 Opcode::Vconst => assign(imm()),
581 Opcode::Nop => ControlFlow::Continue,
582 Opcode::Select | Opcode::SelectSpectreGuard => choose(arg(0).into_bool()?, arg(1), arg(2)),
583 Opcode::Bitselect => assign(bitselect(arg(0), arg(1), arg(2))?),
584 Opcode::Icmp => assign(icmp(ctrl_ty, inst.cond_code().unwrap(), &arg(0), &arg(1))?),
585 Opcode::IcmpImm => assign(icmp(
586 ctrl_ty,
587 inst.cond_code().unwrap(),
588 &arg(0),
589 &imm_as_ctrl_ty()?,
590 )?),
591 Opcode::Smin => {
592 if ctrl_ty.is_vector() {
593 let icmp = icmp(ctrl_ty, IntCC::SignedGreaterThan, &arg(1), &arg(0))?;
594 assign(bitselect(icmp, arg(0), arg(1))?)
595 } else {
596 assign(arg(0).smin(arg(1))?)
597 }
598 }
599 Opcode::Umin => {
600 if ctrl_ty.is_vector() {
601 let icmp = icmp(ctrl_ty, IntCC::UnsignedGreaterThan, &arg(1), &arg(0))?;
602 assign(bitselect(icmp, arg(0), arg(1))?)
603 } else {
604 assign(arg(0).umin(arg(1))?)
605 }
606 }
607 Opcode::Smax => {
608 if ctrl_ty.is_vector() {
609 let icmp = icmp(ctrl_ty, IntCC::SignedGreaterThan, &arg(0), &arg(1))?;
610 assign(bitselect(icmp, arg(0), arg(1))?)
611 } else {
612 assign(arg(0).smax(arg(1))?)
613 }
614 }
615 Opcode::Umax => {
616 if ctrl_ty.is_vector() {
617 let icmp = icmp(ctrl_ty, IntCC::UnsignedGreaterThan, &arg(0), &arg(1))?;
618 assign(bitselect(icmp, arg(0), arg(1))?)
619 } else {
620 assign(arg(0).umax(arg(1))?)
621 }
622 }
623 Opcode::AvgRound => {
624 let sum = DataValueExt::add(arg(0), arg(1))?;
625 let one = DataValueExt::int(1, arg(0).ty())?;
626 let inc = DataValueExt::add(sum, one)?;
627 let two = DataValueExt::int(2, arg(0).ty())?;
628 binary(DataValueExt::udiv, inc, two)?
629 }
630 Opcode::Iadd => binary(DataValueExt::add, arg(0), arg(1))?,
631 Opcode::UaddSat => assign(binary_arith(
632 arg(0),
633 arg(1),
634 ctrl_ty,
635 DataValueExt::uadd_sat,
636 )?),
637 Opcode::SaddSat => assign(binary_arith(
638 arg(0),
639 arg(1),
640 ctrl_ty,
641 DataValueExt::sadd_sat,
642 )?),
643 Opcode::Isub => binary(DataValueExt::sub, arg(0), arg(1))?,
644 Opcode::UsubSat => assign(binary_arith(
645 arg(0),
646 arg(1),
647 ctrl_ty,
648 DataValueExt::usub_sat,
649 )?),
650 Opcode::SsubSat => assign(binary_arith(
651 arg(0),
652 arg(1),
653 ctrl_ty,
654 DataValueExt::ssub_sat,
655 )?),
656 Opcode::Ineg => binary(DataValueExt::sub, DataValueExt::int(0, ctrl_ty)?, arg(0))?,
657 Opcode::Iabs => {
658 let (min_val, _) = ctrl_ty.lane_type().bounds(true);
659 let min_val: DataValue = DataValueExt::int(min_val as i128, ctrl_ty.lane_type())?;
660 let arg0 = extractlanes(&arg(0), ctrl_ty)?;
661 let new_vec = arg0
662 .into_iter()
663 .map(|lane| {
664 if lane == min_val {
665 Ok(min_val.clone())
666 } else {
667 DataValueExt::int(lane.into_int_signed()?.abs(), ctrl_ty.lane_type())
668 }
669 })
670 .collect::<ValueResult<SimdVec<DataValue>>>()?;
671 assign(vectorizelanes(&new_vec, ctrl_ty)?)
672 }
673 Opcode::Imul => binary(DataValueExt::mul, arg(0), arg(1))?,
674 Opcode::Umulhi | Opcode::Smulhi => {
675 let double_length = match ctrl_ty.lane_bits() {
676 8 => types::I16,
677 16 => types::I32,
678 32 => types::I64,
679 64 => types::I128,
680 _ => unimplemented!("Unsupported integer length {}", ctrl_ty.bits()),
681 };
682 let conv_type = if inst.opcode() == Opcode::Umulhi {
683 ValueConversionKind::ZeroExtend(double_length)
684 } else {
685 ValueConversionKind::SignExtend(double_length)
686 };
687 let arg0 = extractlanes(&arg(0), ctrl_ty)?;
688 let arg1 = extractlanes(&arg(1), ctrl_ty)?;
689
690 let res = arg0
691 .into_iter()
692 .zip(arg1)
693 .map(|(x, y)| {
694 let x = x.convert(conv_type.clone())?;
695 let y = y.convert(conv_type.clone())?;
696
697 Ok(DataValueExt::mul(x, y)?
698 .convert(ValueConversionKind::ExtractUpper(ctrl_ty.lane_type()))?)
699 })
700 .collect::<ValueResult<SimdVec<DataValue>>>()?;
701
702 assign(vectorizelanes(&res, ctrl_ty)?)
703 }
704 Opcode::Udiv => binary_can_trap(DataValueExt::udiv, arg(0), arg(1))?,
705 Opcode::Sdiv => binary_can_trap(DataValueExt::sdiv, arg(0), arg(1))?,
706 Opcode::Urem => binary_can_trap(DataValueExt::urem, arg(0), arg(1))?,
707 Opcode::Srem => binary_can_trap(DataValueExt::srem, arg(0), arg(1))?,
708 Opcode::IaddImm => binary(DataValueExt::add, arg(0), imm_as_ctrl_ty()?)?,
709 Opcode::ImulImm => binary(DataValueExt::mul, arg(0), imm_as_ctrl_ty()?)?,
710 Opcode::UdivImm => binary_can_trap(DataValueExt::udiv, arg(0), imm_as_ctrl_ty()?)?,
711 Opcode::SdivImm => binary_can_trap(DataValueExt::sdiv, arg(0), imm_as_ctrl_ty()?)?,
712 Opcode::UremImm => binary_can_trap(DataValueExt::urem, arg(0), imm_as_ctrl_ty()?)?,
713 Opcode::SremImm => binary_can_trap(DataValueExt::srem, arg(0), imm_as_ctrl_ty()?)?,
714 Opcode::IrsubImm => binary(DataValueExt::sub, imm_as_ctrl_ty()?, arg(0))?,
715 Opcode::UaddOverflow => {
716 let (sum, carry) = arg(0).uadd_overflow(arg(1))?;
717 assign_multiple(&[sum, DataValueExt::bool(carry, false, types::I8)?])
718 }
719 Opcode::SaddOverflow => {
720 let (sum, carry) = arg(0).sadd_overflow(arg(1))?;
721 assign_multiple(&[sum, DataValueExt::bool(carry, false, types::I8)?])
722 }
723 Opcode::UsubOverflow => {
724 let (sum, carry) = arg(0).usub_overflow(arg(1))?;
725 assign_multiple(&[sum, DataValueExt::bool(carry, false, types::I8)?])
726 }
727 Opcode::SsubOverflow => {
728 let (sum, carry) = arg(0).ssub_overflow(arg(1))?;
729 assign_multiple(&[sum, DataValueExt::bool(carry, false, types::I8)?])
730 }
731 Opcode::UmulOverflow => {
732 let (sum, carry) = arg(0).umul_overflow(arg(1))?;
733 assign_multiple(&[sum, DataValueExt::bool(carry, false, types::I8)?])
734 }
735 Opcode::SmulOverflow => {
736 let (sum, carry) = arg(0).smul_overflow(arg(1))?;
737 assign_multiple(&[sum, DataValueExt::bool(carry, false, types::I8)?])
738 }
739 Opcode::SaddOverflowCin => {
740 let (mut sum, mut carry) = arg(0).sadd_overflow(arg(1))?;
741
742 if DataValueExt::into_bool(arg(2))? {
743 let (sum2, carry2) = sum.sadd_overflow(DataValueExt::int(1, ctrl_ty)?)?;
744 carry |= carry2;
745 sum = sum2;
746 }
747
748 assign_multiple(&[sum, DataValueExt::bool(carry, false, types::I8)?])
749 }
750 Opcode::UaddOverflowCin => {
751 let (mut sum, mut carry) = arg(0).uadd_overflow(arg(1))?;
752
753 if DataValueExt::into_bool(arg(2))? {
754 let (sum2, carry2) = sum.uadd_overflow(DataValueExt::int(1, ctrl_ty)?)?;
755 carry |= carry2;
756 sum = sum2;
757 }
758
759 assign_multiple(&[sum, DataValueExt::bool(carry, false, types::I8)?])
760 }
761 Opcode::UaddOverflowTrap => {
762 let sum = DataValueExt::add(arg(0), arg(1))?;
763 let carry = sum < arg(0) && sum < arg(1);
764 if carry {
765 ControlFlow::Trap(CraneliftTrap::User(trap_code()))
766 } else {
767 assign(sum)
768 }
769 }
770 Opcode::SsubOverflowBin => {
771 let (mut sub, mut carry) = arg(0).ssub_overflow(arg(1))?;
772
773 if DataValueExt::into_bool(arg(2))? {
774 let (sub2, carry2) = sub.ssub_overflow(DataValueExt::int(1, ctrl_ty)?)?;
775 carry |= carry2;
776 sub = sub2;
777 }
778
779 assign_multiple(&[sub, DataValueExt::bool(carry, false, types::I8)?])
780 }
781 Opcode::UsubOverflowBin => {
782 let (mut sub, mut carry) = arg(0).usub_overflow(arg(1))?;
783
784 if DataValueExt::into_bool(arg(2))? {
785 let (sub2, carry2) = sub.usub_overflow(DataValueExt::int(1, ctrl_ty)?)?;
786 carry |= carry2;
787 sub = sub2;
788 }
789
790 assign_multiple(&[sub, DataValueExt::bool(carry, false, types::I8)?])
791 }
792 Opcode::Band => binary(DataValueExt::and, arg(0), arg(1))?,
793 Opcode::Bor => binary(DataValueExt::or, arg(0), arg(1))?,
794 Opcode::Bxor => binary(DataValueExt::xor, arg(0), arg(1))?,
795 Opcode::Bnot => unary(DataValueExt::not, arg(0))?,
796 Opcode::BandNot => binary(DataValueExt::and, arg(0), DataValueExt::not(arg(1))?)?,
797 Opcode::BorNot => binary(DataValueExt::or, arg(0), DataValueExt::not(arg(1))?)?,
798 Opcode::BxorNot => binary(DataValueExt::xor, arg(0), DataValueExt::not(arg(1))?)?,
799 Opcode::BandImm => binary(DataValueExt::and, arg(0), imm_as_ctrl_ty()?)?,
800 Opcode::BorImm => binary(DataValueExt::or, arg(0), imm_as_ctrl_ty()?)?,
801 Opcode::BxorImm => binary(DataValueExt::xor, arg(0), imm_as_ctrl_ty()?)?,
802 Opcode::Rotl => binary(DataValueExt::rotl, arg(0), shift_amt(ctrl_ty, arg(1))?)?,
803 Opcode::Rotr => binary(DataValueExt::rotr, arg(0), shift_amt(ctrl_ty, arg(1))?)?,
804 Opcode::RotlImm => binary(DataValueExt::rotl, arg(0), shift_amt(ctrl_ty, imm())?)?,
805 Opcode::RotrImm => binary(DataValueExt::rotr, arg(0), shift_amt(ctrl_ty, imm())?)?,
806 Opcode::Ishl => binary(DataValueExt::shl, arg(0), shift_amt(ctrl_ty, arg(1))?)?,
807 Opcode::Ushr => binary(DataValueExt::ushr, arg(0), shift_amt(ctrl_ty, arg(1))?)?,
808 Opcode::Sshr => binary(DataValueExt::sshr, arg(0), shift_amt(ctrl_ty, arg(1))?)?,
809 Opcode::IshlImm => binary(DataValueExt::shl, arg(0), shift_amt(ctrl_ty, imm())?)?,
810 Opcode::UshrImm => binary(DataValueExt::ushr, arg(0), shift_amt(ctrl_ty, imm())?)?,
811 Opcode::SshrImm => binary(DataValueExt::sshr, arg(0), shift_amt(ctrl_ty, imm())?)?,
812 Opcode::Bitrev => unary(DataValueExt::reverse_bits, arg(0))?,
813 Opcode::Bswap => unary(DataValueExt::swap_bytes, arg(0))?,
814 Opcode::Clz => unary(DataValueExt::leading_zeros, arg(0))?,
815 Opcode::Cls => {
816 let count = if arg(0) < DataValueExt::int(0, ctrl_ty)? {
817 arg(0).leading_ones()?
818 } else {
819 arg(0).leading_zeros()?
820 };
821 assign(DataValueExt::sub(count, DataValueExt::int(1, ctrl_ty)?)?)
822 }
823 Opcode::Ctz => unary(DataValueExt::trailing_zeros, arg(0))?,
824 Opcode::Popcnt => {
825 let count = if arg(0).ty().is_int() {
826 arg(0).count_ones()?
827 } else {
828 let lanes = extractlanes(&arg(0), ctrl_ty)?
829 .into_iter()
830 .map(|lane| lane.count_ones())
831 .collect::<ValueResult<SimdVec<DataValue>>>()?;
832 vectorizelanes(&lanes, ctrl_ty)?
833 };
834 assign(count)
835 }
836
837 Opcode::Fcmp => {
838 let arg0 = extractlanes(&arg(0), ctrl_ty)?;
839 let arg1 = extractlanes(&arg(1), ctrl_ty)?;
840
841 assign(vectorizelanes(
842 &(arg0
843 .into_iter()
844 .zip(arg1.into_iter())
845 .map(|(x, y)| {
846 DataValue::bool(
847 fcmp(inst.fp_cond_code().unwrap(), &x, &y).unwrap(),
848 ctrl_ty.is_vector(),
849 ctrl_ty.lane_type().as_truthy(),
850 )
851 })
852 .collect::<ValueResult<SimdVec<DataValue>>>()?),
853 ctrl_ty,
854 )?)
855 }
856 Opcode::Fadd => binary(DataValueExt::add, arg(0), arg(1))?,
857 Opcode::Fsub => binary(DataValueExt::sub, arg(0), arg(1))?,
858 Opcode::Fmul => binary(DataValueExt::mul, arg(0), arg(1))?,
859 Opcode::Fdiv => binary(DataValueExt::sdiv, arg(0), arg(1))?,
860 Opcode::Sqrt => unary(DataValueExt::sqrt, arg(0))?,
861 Opcode::Fma => {
862 let arg0 = extractlanes(&arg(0), ctrl_ty)?;
863 let arg1 = extractlanes(&arg(1), ctrl_ty)?;
864 let arg2 = extractlanes(&arg(2), ctrl_ty)?;
865
866 assign(vectorizelanes(
867 &(arg0
868 .into_iter()
869 .zip(arg1.into_iter())
870 .zip(arg2.into_iter())
871 .map(|((x, y), z)| DataValueExt::fma(x, y, z))
872 .collect::<ValueResult<SimdVec<DataValue>>>()?),
873 ctrl_ty,
874 )?)
875 }
876 Opcode::Fneg => unary(DataValueExt::neg, arg(0))?,
877 Opcode::Fabs => unary(DataValueExt::abs, arg(0))?,
878 Opcode::Fcopysign => binary(DataValueExt::copysign, arg(0), arg(1))?,
879 Opcode::Fmin => assign(match (arg(0), arg(1)) {
880 (a, _) if a.is_nan()? => a,
881 (_, b) if b.is_nan()? => b,
882 (a, b) if a.is_zero()? && b.is_zero()? && a.is_negative()? => a,
883 (a, b) if a.is_zero()? && b.is_zero()? && b.is_negative()? => b,
884 (a, b) => a.smin(b)?,
885 }),
886 Opcode::Fmax => assign(match (arg(0), arg(1)) {
887 (a, _) if a.is_nan()? => a,
888 (_, b) if b.is_nan()? => b,
889 (a, b) if a.is_zero()? && b.is_zero()? && a.is_negative()? => b,
890 (a, b) if a.is_zero()? && b.is_zero()? && b.is_negative()? => a,
891 (a, b) => a.smax(b)?,
892 }),
893 Opcode::Ceil => unary(DataValueExt::ceil, arg(0))?,
894 Opcode::Floor => unary(DataValueExt::floor, arg(0))?,
895 Opcode::Trunc => unary(DataValueExt::trunc, arg(0))?,
896 Opcode::Nearest => unary(DataValueExt::nearest, arg(0))?,
897 Opcode::Bitcast | Opcode::ScalarToVector => {
898 let input_ty = inst_context.type_of(inst_context.args()[0]).unwrap();
899 let lanes = &if input_ty.is_vector() {
900 assert_eq!(
901 inst.memflags()
902 .expect("byte order flag to be set")
903 .endianness(Endianness::Little),
904 Endianness::Little,
905 "Only little endian bitcasts on vectors are supported"
906 );
907 extractlanes(&arg(0), ctrl_ty)?
908 } else {
909 extractlanes(&arg(0), input_ty)?
910 .into_iter()
911 .map(|x| DataValue::convert(x, ValueConversionKind::Exact(ctrl_ty.lane_type())))
912 .collect::<ValueResult<SimdVec<DataValue>>>()?
913 };
914 assign(match inst.opcode() {
915 Opcode::Bitcast => vectorizelanes(lanes, ctrl_ty)?,
916 Opcode::ScalarToVector => vectorizelanes_all(lanes, ctrl_ty)?,
917 _ => unreachable!(),
918 })
919 }
920 Opcode::Ireduce => assign(DataValueExt::convert(
921 arg(0),
922 ValueConversionKind::Truncate(ctrl_ty),
923 )?),
924 Opcode::Snarrow | Opcode::Unarrow | Opcode::Uunarrow => {
925 let arg0 = extractlanes(&arg(0), ctrl_ty)?;
926 let arg1 = extractlanes(&arg(1), ctrl_ty)?;
927 let new_type = ctrl_ty.split_lanes().unwrap();
928 let (min, max) = new_type.bounds(inst.opcode() == Opcode::Snarrow);
929 let min: DataValue = DataValueExt::int(min as i128, ctrl_ty.lane_type())?;
930 let max: DataValue = DataValueExt::int(max as i128, ctrl_ty.lane_type())?;
931 let narrow = |mut lane: DataValue| -> ValueResult<DataValue> {
932 if inst.opcode() == Opcode::Uunarrow {
933 lane = DataValueExt::umax(lane, min.clone())?;
934 lane = DataValueExt::umin(lane, max.clone())?;
935 } else {
936 lane = DataValueExt::smax(lane, min.clone())?;
937 lane = DataValueExt::smin(lane, max.clone())?;
938 }
939 lane = lane.convert(ValueConversionKind::Truncate(new_type.lane_type()))?;
940 Ok(lane)
941 };
942 let new_vec = arg0
943 .into_iter()
944 .chain(arg1)
945 .map(|lane| narrow(lane))
946 .collect::<ValueResult<Vec<_>>>()?;
947 assign(vectorizelanes(&new_vec, new_type)?)
948 }
949 Opcode::Bmask => assign({
950 let bool = arg(0);
951 let bool_ty = ctrl_ty.as_truthy_pedantic();
952 let lanes = extractlanes(&bool, bool_ty)?
953 .into_iter()
954 .map(|lane| lane.convert(ValueConversionKind::Mask(ctrl_ty.lane_type())))
955 .collect::<ValueResult<SimdVec<DataValue>>>()?;
956 vectorizelanes(&lanes, ctrl_ty)?
957 }),
958 Opcode::Sextend => assign(DataValueExt::convert(
959 arg(0),
960 ValueConversionKind::SignExtend(ctrl_ty),
961 )?),
962 Opcode::Uextend => assign(DataValueExt::convert(
963 arg(0),
964 ValueConversionKind::ZeroExtend(ctrl_ty),
965 )?),
966 Opcode::Fpromote => assign(DataValueExt::convert(
967 arg(0),
968 ValueConversionKind::Exact(ctrl_ty),
969 )?),
970 Opcode::Fdemote => assign(DataValueExt::convert(
971 arg(0),
972 ValueConversionKind::RoundNearestEven(ctrl_ty),
973 )?),
974 Opcode::Shuffle => {
975 let mask = imm().into_array()?;
976 let a = DataValueExt::into_array(&arg(0))?;
977 let b = DataValueExt::into_array(&arg(1))?;
978 let mut new = [0u8; 16];
979 for i in 0..mask.len() {
980 if (mask[i] as usize) < a.len() {
981 new[i] = a[mask[i] as usize];
982 } else if (mask[i] as usize - a.len()) < b.len() {
983 new[i] = b[mask[i] as usize - a.len()];
984 } }
986 assign(DataValueExt::vector(new, types::I8X16)?)
987 }
988 Opcode::Swizzle => {
989 let x = DataValueExt::into_array(&arg(0))?;
990 let s = DataValueExt::into_array(&arg(1))?;
991 let mut new = [0u8; 16];
992 for i in 0..new.len() {
993 if (s[i] as usize) < new.len() {
994 new[i] = x[s[i] as usize];
995 } }
997 assign(DataValueExt::vector(new, types::I8X16)?)
998 }
999 Opcode::Splat => assign(splat(ctrl_ty, arg(0))?),
1000 Opcode::Insertlane => {
1001 let idx = imm().into_int_unsigned()? as usize;
1002 let mut vector = extractlanes(&arg(0), ctrl_ty)?;
1003 vector[idx] = arg(1);
1004 assign(vectorizelanes(&vector, ctrl_ty)?)
1005 }
1006 Opcode::Extractlane => {
1007 let idx = imm().into_int_unsigned()? as usize;
1008 let lanes = extractlanes(&arg(0), ctrl_ty)?;
1009 assign(lanes[idx].clone())
1010 }
1011 Opcode::VhighBits => {
1012 let vector_type = inst_context
1015 .type_of(inst_context.args()[0])
1016 .unwrap()
1017 .as_int();
1018 let a = extractlanes(&arg(0), vector_type)?;
1019 let mut result: u128 = 0;
1020 for (i, val) in a.into_iter().enumerate() {
1021 let val = val.reverse_bits()?.into_int_unsigned()?; result |= (val & 1) << i;
1023 }
1024 assign(DataValueExt::int(result as i128, ctrl_ty)?)
1025 }
1026 Opcode::VanyTrue => {
1027 let simd_ty = ctrl_ty.as_int();
1028 let lane_ty = simd_ty.lane_type();
1029 let init = DataValue::bool(false, true, lane_ty)?;
1030 let any = fold_vector(arg(0), simd_ty, init.clone(), |acc, lane| acc.or(lane))?;
1031 assign(DataValue::bool(any != init, false, types::I8)?)
1032 }
1033 Opcode::VallTrue => assign(DataValue::bool(
1034 !(arg(0)
1035 .iter_lanes(ctrl_ty.as_int())?
1036 .try_fold(false, |acc, lane| {
1037 Ok::<bool, ValueError>(acc | lane.is_zero()?)
1038 })?),
1039 false,
1040 types::I8,
1041 )?),
1042 Opcode::SwidenLow | Opcode::SwidenHigh | Opcode::UwidenLow | Opcode::UwidenHigh => {
1043 let new_type = ctrl_ty.merge_lanes().unwrap();
1044 let conv_type = match inst.opcode() {
1045 Opcode::SwidenLow | Opcode::SwidenHigh => {
1046 ValueConversionKind::SignExtend(new_type.lane_type())
1047 }
1048 Opcode::UwidenLow | Opcode::UwidenHigh => {
1049 ValueConversionKind::ZeroExtend(new_type.lane_type())
1050 }
1051 _ => unreachable!(),
1052 };
1053 let vec_iter = extractlanes(&arg(0), ctrl_ty)?.into_iter();
1054 let new_vec = match inst.opcode() {
1055 Opcode::SwidenLow | Opcode::UwidenLow => vec_iter
1056 .take(new_type.lane_count() as usize)
1057 .map(|lane| lane.convert(conv_type.clone()))
1058 .collect::<ValueResult<Vec<_>>>()?,
1059 Opcode::SwidenHigh | Opcode::UwidenHigh => vec_iter
1060 .skip(new_type.lane_count() as usize)
1061 .map(|lane| lane.convert(conv_type.clone()))
1062 .collect::<ValueResult<Vec<_>>>()?,
1063 _ => unreachable!(),
1064 };
1065 assign(vectorizelanes(&new_vec, new_type)?)
1066 }
1067 Opcode::FcvtToUint | Opcode::FcvtToSint => {
1068 if arg(0).is_nan()? {
1070 return Ok(ControlFlow::Trap(CraneliftTrap::User(
1071 TrapCode::BAD_CONVERSION_TO_INTEGER,
1072 )));
1073 }
1074 let x = arg(0).into_float()? as i128;
1075 let is_signed = inst.opcode() == Opcode::FcvtToSint;
1076 let (min, max) = ctrl_ty.bounds(is_signed);
1077 let overflow = if is_signed {
1078 x < (min as i128) || x > (max as i128)
1079 } else {
1080 x < 0 || (x as u128) > max
1081 };
1082 if overflow {
1084 return Ok(ControlFlow::Trap(CraneliftTrap::User(
1085 TrapCode::INTEGER_OVERFLOW,
1086 )));
1087 }
1088 assign(DataValueExt::int(x, ctrl_ty)?)
1090 }
1091 Opcode::FcvtToUintSat | Opcode::FcvtToSintSat => {
1092 let in_ty = inst_context.type_of(inst_context.args()[0]).unwrap();
1093 let cvt = |x: DataValue| -> ValueResult<DataValue> {
1094 if x.is_nan()? {
1096 DataValue::int(0, ctrl_ty.lane_type())
1097 } else {
1098 let is_signed = inst.opcode() == Opcode::FcvtToSintSat;
1099 let (min, max) = ctrl_ty.bounds(is_signed);
1100 let x = x.into_float()? as i128;
1101 let x = if is_signed {
1102 let x = i128::max(x, min as i128);
1103 let x = i128::min(x, max as i128);
1104 x
1105 } else {
1106 let x = if x < 0 { 0 } else { x };
1107 let x = u128::min(x as u128, max);
1108 x as i128
1109 };
1110
1111 DataValue::int(x, ctrl_ty.lane_type())
1112 }
1113 };
1114
1115 let x = extractlanes(&arg(0), in_ty)?;
1116
1117 assign(vectorizelanes(
1118 &x.into_iter()
1119 .map(cvt)
1120 .collect::<ValueResult<SimdVec<DataValue>>>()?,
1121 ctrl_ty,
1122 )?)
1123 }
1124 Opcode::FcvtFromUint | Opcode::FcvtFromSint => {
1125 let x = extractlanes(
1126 &arg(0),
1127 inst_context.type_of(inst_context.args()[0]).unwrap(),
1128 )?;
1129 let bits = |x: DataValue| -> ValueResult<u64> {
1130 Ok(match ctrl_ty.lane_type() {
1131 types::F32 => (if inst.opcode() == Opcode::FcvtFromUint {
1132 x.into_int_unsigned()? as f32
1133 } else {
1134 x.into_int_signed()? as f32
1135 })
1136 .to_bits() as u64,
1137 types::F64 => (if inst.opcode() == Opcode::FcvtFromUint {
1138 x.into_int_unsigned()? as f64
1139 } else {
1140 x.into_int_signed()? as f64
1141 })
1142 .to_bits(),
1143 _ => unimplemented!("unexpected conversion to {:?}", ctrl_ty.lane_type()),
1144 })
1145 };
1146 assign(vectorizelanes(
1147 &x.into_iter()
1148 .map(|x| DataValue::float(bits(x)?, ctrl_ty.lane_type()))
1149 .collect::<ValueResult<SimdVec<DataValue>>>()?,
1150 ctrl_ty,
1151 )?)
1152 }
1153 Opcode::FvpromoteLow => {
1154 let in_ty = inst_context.type_of(inst_context.args()[0]).unwrap();
1155 assert_eq!(in_ty, types::F32X4);
1156 let out_ty = types::F64X2;
1157 let x = extractlanes(&arg(0), in_ty)?;
1158 assign(vectorizelanes(
1159 &x[..(out_ty.lane_count() as usize)]
1160 .into_iter()
1161 .map(|x| {
1162 DataValue::convert(
1163 x.to_owned(),
1164 ValueConversionKind::Exact(out_ty.lane_type()),
1165 )
1166 })
1167 .collect::<ValueResult<SimdVec<DataValue>>>()?,
1168 out_ty,
1169 )?)
1170 }
1171 Opcode::Fvdemote => {
1172 let in_ty = inst_context.type_of(inst_context.args()[0]).unwrap();
1173 assert_eq!(in_ty, types::F64X2);
1174 let out_ty = types::F32X4;
1175 let x = extractlanes(&arg(0), in_ty)?;
1176 let x = &mut x
1177 .into_iter()
1178 .map(|x| {
1179 DataValue::convert(x, ValueConversionKind::RoundNearestEven(out_ty.lane_type()))
1180 })
1181 .collect::<ValueResult<SimdVec<DataValue>>>()?;
1182 for _ in 0..(out_ty.lane_count() as usize - x.len()) {
1184 x.push(DataValue::float(0, out_ty.lane_type())?);
1185 }
1186 assign(vectorizelanes(x, out_ty)?)
1187 }
1188 Opcode::Isplit => assign_multiple(&[
1189 DataValueExt::convert(arg(0), ValueConversionKind::Truncate(types::I64))?,
1190 DataValueExt::convert(arg(0), ValueConversionKind::ExtractUpper(types::I64))?,
1191 ]),
1192 Opcode::Iconcat => assign(DataValueExt::concat(arg(0), arg(1))?),
1193 Opcode::AtomicRmw => {
1194 let op = inst.atomic_rmw_op().unwrap();
1195 let val = arg(1);
1196 let addr = arg(0).into_int_unsigned()? as u64;
1197 let mem_flags = inst.memflags().expect("instruction to have memory flags");
1198 let loaded = Address::try_from(addr)
1199 .and_then(|addr| state.checked_load(addr, ctrl_ty, mem_flags));
1200 let prev_val = match loaded {
1201 Ok(v) => v,
1202 Err(e) => return Ok(ControlFlow::Trap(memerror_to_trap(e))),
1203 };
1204 let prev_val_to_assign = prev_val.clone();
1205 let replace = match op {
1206 AtomicRmwOp::Xchg => Ok(val),
1207 AtomicRmwOp::Add => DataValueExt::add(prev_val, val),
1208 AtomicRmwOp::Sub => DataValueExt::sub(prev_val, val),
1209 AtomicRmwOp::And => DataValueExt::and(prev_val, val),
1210 AtomicRmwOp::Or => DataValueExt::or(prev_val, val),
1211 AtomicRmwOp::Xor => DataValueExt::xor(prev_val, val),
1212 AtomicRmwOp::Nand => DataValueExt::and(prev_val, val).and_then(DataValue::not),
1213 AtomicRmwOp::Smax => DataValueExt::smax(prev_val, val),
1214 AtomicRmwOp::Smin => DataValueExt::smin(prev_val, val),
1215 AtomicRmwOp::Umax => DataValueExt::umax(val, prev_val),
1216 AtomicRmwOp::Umin => DataValueExt::umin(val, prev_val),
1217 }?;
1218 let stored = Address::try_from(addr)
1219 .and_then(|addr| state.checked_store(addr, replace, mem_flags));
1220 assign_or_memtrap(stored.map(|_| prev_val_to_assign))
1221 }
1222 Opcode::AtomicCas => {
1223 let addr = arg(0).into_int_unsigned()? as u64;
1224 let mem_flags = inst.memflags().expect("instruction to have memory flags");
1225 let loaded = Address::try_from(addr)
1226 .and_then(|addr| state.checked_load(addr, ctrl_ty, mem_flags));
1227 let loaded_val = match loaded {
1228 Ok(v) => v,
1229 Err(e) => return Ok(ControlFlow::Trap(memerror_to_trap(e))),
1230 };
1231 let expected_val = arg(1);
1232 let val_to_assign = if loaded_val == expected_val {
1233 let val_to_store = arg(2);
1234 Address::try_from(addr)
1235 .and_then(|addr| state.checked_store(addr, val_to_store, mem_flags))
1236 .map(|_| loaded_val)
1237 } else {
1238 Ok(loaded_val)
1239 };
1240 assign_or_memtrap(val_to_assign)
1241 }
1242 Opcode::AtomicLoad => {
1243 let load_ty = inst_context.controlling_type().unwrap();
1244 let addr = arg(0).into_int_unsigned()? as u64;
1245 let mem_flags = inst.memflags().expect("instruction to have memory flags");
1246 assign_or_memtrap(
1248 Address::try_from(addr)
1249 .and_then(|addr| state.checked_load(addr, load_ty, mem_flags)),
1250 )
1251 }
1252 Opcode::AtomicStore => {
1253 let val = arg(0);
1254 let addr = arg(1).into_int_unsigned()? as u64;
1255 let mem_flags = inst.memflags().expect("instruction to have memory flags");
1256 continue_or_memtrap(
1258 Address::try_from(addr).and_then(|addr| state.checked_store(addr, val, mem_flags)),
1259 )
1260 }
1261 Opcode::Fence => {
1262 ControlFlow::Continue
1265 }
1266 Opcode::SqmulRoundSat => {
1267 let lane_type = ctrl_ty.lane_type();
1268 let double_width = ctrl_ty.double_width().unwrap().lane_type();
1269 let arg0 = extractlanes(&arg(0), ctrl_ty)?;
1270 let arg1 = extractlanes(&arg(1), ctrl_ty)?;
1271 let (min, max) = lane_type.bounds(true);
1272 let min: DataValue = DataValueExt::int(min as i128, double_width)?;
1273 let max: DataValue = DataValueExt::int(max as i128, double_width)?;
1274 let new_vec = arg0
1275 .into_iter()
1276 .zip(arg1.into_iter())
1277 .map(|(x, y)| {
1278 let x = x.into_int_signed()?;
1279 let y = y.into_int_signed()?;
1280 let z: DataValue = DataValueExt::int(
1282 (x * y + (1 << (lane_type.bits() - 2))) >> (lane_type.bits() - 1),
1283 double_width,
1284 )?;
1285 let z = DataValueExt::smin(z, max.clone())?;
1287 let z = DataValueExt::smax(z, min.clone())?;
1288 let z = z.convert(ValueConversionKind::Truncate(lane_type))?;
1289 Ok(z)
1290 })
1291 .collect::<ValueResult<SimdVec<_>>>()?;
1292 assign(vectorizelanes(&new_vec, ctrl_ty)?)
1293 }
1294 Opcode::IaddPairwise => {
1295 assign(binary_pairwise(arg(0), arg(1), ctrl_ty, DataValueExt::add)?)
1296 }
1297 Opcode::ExtractVector => {
1298 unimplemented!("ExtractVector not supported");
1299 }
1300 Opcode::GetFramePointer => unimplemented!("GetFramePointer"),
1301 Opcode::GetStackPointer => unimplemented!("GetStackPointer"),
1302 Opcode::GetReturnAddress => unimplemented!("GetReturnAddress"),
1303 Opcode::X86Pshufb => unimplemented!("X86Pshufb"),
1304 Opcode::X86Blendv => unimplemented!("X86Blendv"),
1305 Opcode::X86Pmulhrsw => unimplemented!("X86Pmulhrsw"),
1306 Opcode::X86Pmaddubsw => unimplemented!("X86Pmaddubsw"),
1307 Opcode::X86Cvtt2dq => unimplemented!("X86Cvtt2dq"),
1308 Opcode::StackSwitch => unimplemented!("StackSwitch"),
1309
1310 Opcode::TryCall => unimplemented!("TryCall"),
1311 Opcode::TryCallIndirect => unimplemented!("TryCallIndirect"),
1312 })
1313}
1314
1315#[derive(Error, Debug)]
1316pub enum StepError {
1317 #[error("unable to retrieve value from SSA reference: {0}")]
1318 UnknownValue(ValueRef),
1319 #[error("unable to find the following function: {0}")]
1320 UnknownFunction(FuncRef),
1321 #[error("cannot step with these values")]
1322 ValueError(#[from] ValueError),
1323 #[error("failed to access memory")]
1324 MemoryError(#[from] MemoryError),
1325}
1326
1327#[derive(Debug, PartialEq)]
1330pub enum ControlFlow<'a> {
1331 Assign(SmallVec<[DataValue; 1]>),
1334 Continue,
1337 ContinueAt(Block, SmallVec<[DataValue; 1]>),
1342 Call(&'a Function, SmallVec<[DataValue; 1]>),
1344 ReturnCall(&'a Function, SmallVec<[DataValue; 1]>),
1346 Return(SmallVec<[DataValue; 1]>),
1348 Trap(CraneliftTrap),
1351}
1352
1353#[derive(Error, Debug, PartialEq, Eq, Hash)]
1354pub enum CraneliftTrap {
1355 #[error("user code: {0}")]
1356 User(TrapCode),
1357 #[error("bad signature")]
1358 BadSignature,
1359 #[error("unreachable code has been reached")]
1360 UnreachableCodeReached,
1361 #[error("heap is misaligned")]
1362 HeapMisaligned,
1363 #[error("user debug")]
1364 Debug,
1365}
1366
1367fn icmp(
1369 ctrl_ty: types::Type,
1370 code: IntCC,
1371 left: &DataValue,
1372 right: &DataValue,
1373) -> ValueResult<DataValue> {
1374 let cmp = |bool_ty: types::Type,
1375 code: IntCC,
1376 left: &DataValue,
1377 right: &DataValue|
1378 -> ValueResult<DataValue> {
1379 Ok(DataValueExt::bool(
1380 match code {
1381 IntCC::Equal => left == right,
1382 IntCC::NotEqual => left != right,
1383 IntCC::SignedGreaterThan => left > right,
1384 IntCC::SignedGreaterThanOrEqual => left >= right,
1385 IntCC::SignedLessThan => left < right,
1386 IntCC::SignedLessThanOrEqual => left <= right,
1387 IntCC::UnsignedGreaterThan => {
1388 left.clone().into_int_unsigned()? > right.clone().into_int_unsigned()?
1389 }
1390 IntCC::UnsignedGreaterThanOrEqual => {
1391 left.clone().into_int_unsigned()? >= right.clone().into_int_unsigned()?
1392 }
1393 IntCC::UnsignedLessThan => {
1394 left.clone().into_int_unsigned()? < right.clone().into_int_unsigned()?
1395 }
1396 IntCC::UnsignedLessThanOrEqual => {
1397 left.clone().into_int_unsigned()? <= right.clone().into_int_unsigned()?
1398 }
1399 },
1400 ctrl_ty.is_vector(),
1401 bool_ty,
1402 )?)
1403 };
1404
1405 let dst_ty = ctrl_ty.as_truthy();
1406 let left = extractlanes(left, ctrl_ty)?;
1407 let right = extractlanes(right, ctrl_ty)?;
1408
1409 let res = left
1410 .into_iter()
1411 .zip(right.into_iter())
1412 .map(|(l, r)| cmp(dst_ty.lane_type(), code, &l, &r))
1413 .collect::<ValueResult<SimdVec<DataValue>>>()?;
1414
1415 Ok(vectorizelanes(&res, dst_ty)?)
1416}
1417
1418fn fcmp(code: FloatCC, left: &DataValue, right: &DataValue) -> ValueResult<bool> {
1420 Ok(match code {
1421 FloatCC::Ordered => left == right || left < right || left > right,
1422 FloatCC::Unordered => DataValueExt::uno(left, right)?,
1423 FloatCC::Equal => left == right,
1424 FloatCC::NotEqual => left < right || left > right || DataValueExt::uno(left, right)?,
1425 FloatCC::OrderedNotEqual => left < right || left > right,
1426 FloatCC::UnorderedOrEqual => left == right || DataValueExt::uno(left, right)?,
1427 FloatCC::LessThan => left < right,
1428 FloatCC::LessThanOrEqual => left <= right,
1429 FloatCC::GreaterThan => left > right,
1430 FloatCC::GreaterThanOrEqual => left >= right,
1431 FloatCC::UnorderedOrLessThan => DataValueExt::uno(left, right)? || left < right,
1432 FloatCC::UnorderedOrLessThanOrEqual => DataValueExt::uno(left, right)? || left <= right,
1433 FloatCC::UnorderedOrGreaterThan => DataValueExt::uno(left, right)? || left > right,
1434 FloatCC::UnorderedOrGreaterThanOrEqual => DataValueExt::uno(left, right)? || left >= right,
1435 })
1436}
1437
1438pub type SimdVec<DataValue> = SmallVec<[DataValue; 4]>;
1439
1440pub(crate) fn extractlanes(
1443 x: &DataValue,
1444 vector_type: types::Type,
1445) -> ValueResult<SimdVec<DataValue>> {
1446 let lane_type = vector_type.lane_type();
1447 let mut lanes = SimdVec::new();
1448 if !x.ty().is_vector() {
1450 lanes.push(x.clone());
1451 return Ok(lanes);
1452 }
1453
1454 let iterations = match lane_type {
1455 types::I8 => 1,
1456 types::I16 | types::F16 => 2,
1457 types::I32 | types::F32 => 4,
1458 types::I64 | types::F64 => 8,
1459 _ => unimplemented!("vectors with lanes wider than 64-bits are currently unsupported."),
1460 };
1461
1462 let x = x.into_array()?;
1463 for i in 0..vector_type.lane_count() {
1464 let mut lane: i128 = 0;
1465 for j in 0..iterations {
1466 lane += (x[((i * iterations) + j) as usize] as i128) << (8 * j);
1467 }
1468
1469 let lane_val: DataValue = if lane_type.is_float() {
1470 DataValueExt::float(lane as u64, lane_type)?
1471 } else {
1472 DataValueExt::int(lane, lane_type)?
1473 };
1474 lanes.push(lane_val);
1475 }
1476 return Ok(lanes);
1477}
1478
1479fn vectorizelanes(x: &[DataValue], vector_type: types::Type) -> ValueResult<DataValue> {
1482 if x.len() == 1 {
1484 Ok(x[0].clone())
1485 } else {
1486 vectorizelanes_all(x, vector_type)
1487 }
1488}
1489
1490fn vectorizelanes_all(x: &[DataValue], vector_type: types::Type) -> ValueResult<DataValue> {
1492 let lane_type = vector_type.lane_type();
1493 let iterations = match lane_type {
1494 types::I8 => 1,
1495 types::I16 | types::F16 => 2,
1496 types::I32 | types::F32 => 4,
1497 types::I64 | types::F64 => 8,
1498 _ => unimplemented!("vectors with lanes wider than 64-bits are currently unsupported."),
1499 };
1500 let mut result: [u8; 16] = [0; 16];
1501 for (i, val) in x.iter().enumerate() {
1502 let lane_val: i128 = val
1503 .clone()
1504 .convert(ValueConversionKind::Exact(lane_type.as_int()))?
1505 .into_int_unsigned()? as i128;
1506
1507 for j in 0..iterations {
1508 result[(i * iterations) + j] = (lane_val >> (8 * j)) as u8;
1509 }
1510 }
1511 DataValueExt::vector(result, vector_type)
1512}
1513
1514fn fold_vector<F>(v: DataValue, ty: types::Type, init: DataValue, op: F) -> ValueResult<DataValue>
1516where
1517 F: FnMut(DataValue, DataValue) -> ValueResult<DataValue>,
1518{
1519 extractlanes(&v, ty)?.into_iter().try_fold(init, op)
1520}
1521
1522fn unary_arith<F>(x: DataValue, vector_type: types::Type, op: F) -> ValueResult<DataValue>
1524where
1525 F: Fn(DataValue) -> ValueResult<DataValue>,
1526{
1527 let arg = extractlanes(&x, vector_type)?;
1528
1529 let result = arg
1530 .into_iter()
1531 .map(|arg| Ok(op(arg)?))
1532 .collect::<ValueResult<SimdVec<DataValue>>>()?;
1533
1534 vectorizelanes(&result, vector_type)
1535}
1536
1537fn binary_arith<F>(
1539 x: DataValue,
1540 y: DataValue,
1541 vector_type: types::Type,
1542 op: F,
1543) -> ValueResult<DataValue>
1544where
1545 F: Fn(DataValue, DataValue) -> ValueResult<DataValue>,
1546{
1547 let arg0 = extractlanes(&x, vector_type)?;
1548 let arg1 = extractlanes(&y, vector_type)?;
1549
1550 let result = arg0
1551 .into_iter()
1552 .zip(arg1)
1553 .map(|(lhs, rhs)| Ok(op(lhs, rhs)?))
1554 .collect::<ValueResult<SimdVec<DataValue>>>()?;
1555
1556 vectorizelanes(&result, vector_type)
1557}
1558
1559fn binary_pairwise<F>(
1563 x: DataValue,
1564 y: DataValue,
1565 vector_type: types::Type,
1566 op: F,
1567) -> ValueResult<DataValue>
1568where
1569 F: Fn(DataValue, DataValue) -> ValueResult<DataValue>,
1570{
1571 let arg0 = extractlanes(&x, vector_type)?;
1572 let arg1 = extractlanes(&y, vector_type)?;
1573
1574 let result = arg0
1575 .chunks(2)
1576 .chain(arg1.chunks(2))
1577 .map(|pair| op(pair[0].clone(), pair[1].clone()))
1578 .collect::<ValueResult<SimdVec<DataValue>>>()?;
1579
1580 vectorizelanes(&result, vector_type)
1581}
1582
1583fn bitselect(c: DataValue, x: DataValue, y: DataValue) -> ValueResult<DataValue> {
1584 let mask_x = DataValueExt::and(c.clone(), x)?;
1585 let mask_y = DataValueExt::and(DataValueExt::not(c)?, y)?;
1586 DataValueExt::or(mask_x, mask_y)
1587}
1588
1589fn splat(ty: Type, val: DataValue) -> ValueResult<DataValue> {
1590 let mut new_vector = SimdVec::new();
1591 for _ in 0..ty.lane_count() {
1592 new_vector.push(val.clone());
1593 }
1594 vectorizelanes(&new_vector, ty)
1595}
1596
1597fn shift_amt(ty: Type, val: DataValue) -> ValueResult<DataValue> {
1600 splat(ty, val.convert(ValueConversionKind::Exact(ty.lane_type()))?)
1601}