Trait InstBuilder
pub trait InstBuilder<'f>: InstBuilderBase<'f> {
Show 220 methods
// Provided methods
fn jump(self, block_call_label: Block, block_call_args: &[Value]) -> Inst { ... }
fn brif(
self,
c: Value,
block_then_label: Block,
block_then_args: &[Value],
block_else_label: Block,
block_else_args: &[Value],
) -> Inst { ... }
fn br_table(self, x: Value, JT: JumpTable) -> Inst { ... }
fn debugtrap(self) -> Inst { ... }
fn trap<T1>(self, code: T1) -> Inst
where T1: Into<TrapCode> { ... }
fn trapz<T1>(self, c: Value, code: T1) -> Inst
where T1: Into<TrapCode> { ... }
fn trapnz<T1>(self, c: Value, code: T1) -> Inst
where T1: Into<TrapCode> { ... }
fn return_(self, rvals: &[Value]) -> Inst { ... }
fn call(self, FN: FuncRef, args: &[Value]) -> Inst { ... }
fn call_indirect(self, SIG: SigRef, callee: Value, args: &[Value]) -> Inst { ... }
fn return_call(self, FN: FuncRef, args: &[Value]) -> Inst { ... }
fn return_call_indirect(
self,
SIG: SigRef,
callee: Value,
args: &[Value],
) -> Inst { ... }
fn func_addr(self, iAddr: Type, FN: FuncRef) -> Value { ... }
fn splat(self, TxN: Type, x: Value) -> Value { ... }
fn swizzle(self, x: Value, y: Value) -> Value { ... }
fn x86_pshufb(self, x: Value, y: Value) -> Value { ... }
fn insertlane<T1>(self, x: Value, y: Value, Idx: T1) -> Value
where T1: Into<u8> { ... }
fn extractlane<T1>(self, x: Value, Idx: T1) -> Value
where T1: Into<u8> { ... }
fn smin(self, x: Value, y: Value) -> Value { ... }
fn umin(self, x: Value, y: Value) -> Value { ... }
fn smax(self, x: Value, y: Value) -> Value { ... }
fn umax(self, x: Value, y: Value) -> Value { ... }
fn avg_round(self, x: Value, y: Value) -> Value { ... }
fn uadd_sat(self, x: Value, y: Value) -> Value { ... }
fn sadd_sat(self, x: Value, y: Value) -> Value { ... }
fn usub_sat(self, x: Value, y: Value) -> Value { ... }
fn ssub_sat(self, x: Value, y: Value) -> Value { ... }
fn load<T1, T2>(
self,
Mem: Type,
MemFlags: T1,
p: Value,
Offset: T2,
) -> Value
where T1: Into<MemFlags>,
T2: Into<Offset32> { ... }
fn store<T1, T2>(self, MemFlags: T1, x: Value, p: Value, Offset: T2) -> Inst
where T1: Into<MemFlags>,
T2: Into<Offset32> { ... }
fn uload8<T1, T2>(
self,
iExt8: Type,
MemFlags: T1,
p: Value,
Offset: T2,
) -> Value
where T1: Into<MemFlags>,
T2: Into<Offset32> { ... }
fn sload8<T1, T2>(
self,
iExt8: Type,
MemFlags: T1,
p: Value,
Offset: T2,
) -> Value
where T1: Into<MemFlags>,
T2: Into<Offset32> { ... }
fn istore8<T1, T2>(
self,
MemFlags: T1,
x: Value,
p: Value,
Offset: T2,
) -> Inst
where T1: Into<MemFlags>,
T2: Into<Offset32> { ... }
fn uload16<T1, T2>(
self,
iExt16: Type,
MemFlags: T1,
p: Value,
Offset: T2,
) -> Value
where T1: Into<MemFlags>,
T2: Into<Offset32> { ... }
fn sload16<T1, T2>(
self,
iExt16: Type,
MemFlags: T1,
p: Value,
Offset: T2,
) -> Value
where T1: Into<MemFlags>,
T2: Into<Offset32> { ... }
fn istore16<T1, T2>(
self,
MemFlags: T1,
x: Value,
p: Value,
Offset: T2,
) -> Inst
where T1: Into<MemFlags>,
T2: Into<Offset32> { ... }
fn uload32<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
where T1: Into<MemFlags>,
T2: Into<Offset32> { ... }
fn sload32<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
where T1: Into<MemFlags>,
T2: Into<Offset32> { ... }
fn istore32<T1, T2>(
self,
MemFlags: T1,
x: Value,
p: Value,
Offset: T2,
) -> Inst
where T1: Into<MemFlags>,
T2: Into<Offset32> { ... }
fn stack_switch(
self,
store_context_ptr: Value,
load_context_ptr: Value,
in_payload0: Value,
) -> Value { ... }
fn uload8x8<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
where T1: Into<MemFlags>,
T2: Into<Offset32> { ... }
fn sload8x8<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
where T1: Into<MemFlags>,
T2: Into<Offset32> { ... }
fn uload16x4<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
where T1: Into<MemFlags>,
T2: Into<Offset32> { ... }
fn sload16x4<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
where T1: Into<MemFlags>,
T2: Into<Offset32> { ... }
fn uload32x2<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
where T1: Into<MemFlags>,
T2: Into<Offset32> { ... }
fn sload32x2<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
where T1: Into<MemFlags>,
T2: Into<Offset32> { ... }
fn stack_load<T1>(self, Mem: Type, SS: StackSlot, Offset: T1) -> Value
where T1: Into<Offset32> { ... }
fn stack_store<T1>(self, x: Value, SS: StackSlot, Offset: T1) -> Inst
where T1: Into<Offset32> { ... }
fn stack_addr<T1>(self, iAddr: Type, SS: StackSlot, Offset: T1) -> Value
where T1: Into<Offset32> { ... }
fn dynamic_stack_load(self, Mem: Type, DSS: DynamicStackSlot) -> Value { ... }
fn dynamic_stack_store(self, x: Value, DSS: DynamicStackSlot) -> Inst { ... }
fn dynamic_stack_addr(self, iAddr: Type, DSS: DynamicStackSlot) -> Value { ... }
fn global_value(self, Mem: Type, GV: GlobalValue) -> Value { ... }
fn symbol_value(self, Mem: Type, GV: GlobalValue) -> Value { ... }
fn tls_value(self, Mem: Type, GV: GlobalValue) -> Value { ... }
fn get_pinned_reg(self, iAddr: Type) -> Value { ... }
fn set_pinned_reg(self, addr: Value) -> Inst { ... }
fn get_frame_pointer(self, iAddr: Type) -> Value { ... }
fn get_stack_pointer(self, iAddr: Type) -> Value { ... }
fn get_return_address(self, iAddr: Type) -> Value { ... }
fn iconst<T1>(self, NarrowInt: Type, N: T1) -> Value
where T1: Into<Imm64> { ... }
fn f16const<T1>(self, N: T1) -> Value
where T1: Into<Ieee16> { ... }
fn f32const<T1>(self, N: T1) -> Value
where T1: Into<Ieee32> { ... }
fn f64const<T1>(self, N: T1) -> Value
where T1: Into<Ieee64> { ... }
fn f128const<T1>(self, N: T1) -> Value
where T1: Into<Constant> { ... }
fn vconst<T1>(self, TxN: Type, N: T1) -> Value
where T1: Into<Constant> { ... }
fn shuffle<T1>(self, a: Value, b: Value, mask: T1) -> Value
where T1: Into<Immediate> { ... }
fn nop(self) -> Inst { ... }
fn select(self, c: Value, x: Value, y: Value) -> Value { ... }
fn select_spectre_guard(self, c: Value, x: Value, y: Value) -> Value { ... }
fn bitselect(self, c: Value, x: Value, y: Value) -> Value { ... }
fn x86_blendv(self, c: Value, x: Value, y: Value) -> Value { ... }
fn vany_true(self, a: Value) -> Value { ... }
fn vall_true(self, a: Value) -> Value { ... }
fn vhigh_bits(self, NarrowInt: Type, a: Value) -> Value { ... }
fn icmp<T1>(self, Cond: T1, x: Value, y: Value) -> Value
where T1: Into<IntCC> { ... }
fn icmp_imm<T1, T2>(self, Cond: T1, x: Value, Y: T2) -> Value
where T1: Into<IntCC>,
T2: Into<Imm64> { ... }
fn iadd(self, x: Value, y: Value) -> Value { ... }
fn isub(self, x: Value, y: Value) -> Value { ... }
fn ineg(self, x: Value) -> Value { ... }
fn iabs(self, x: Value) -> Value { ... }
fn imul(self, x: Value, y: Value) -> Value { ... }
fn umulhi(self, x: Value, y: Value) -> Value { ... }
fn smulhi(self, x: Value, y: Value) -> Value { ... }
fn sqmul_round_sat(self, x: Value, y: Value) -> Value { ... }
fn x86_pmulhrsw(self, x: Value, y: Value) -> Value { ... }
fn udiv(self, x: Value, y: Value) -> Value { ... }
fn sdiv(self, x: Value, y: Value) -> Value { ... }
fn urem(self, x: Value, y: Value) -> Value { ... }
fn srem(self, x: Value, y: Value) -> Value { ... }
fn iadd_imm<T1>(self, x: Value, Y: T1) -> Value
where T1: Into<Imm64> { ... }
fn imul_imm<T1>(self, x: Value, Y: T1) -> Value
where T1: Into<Imm64> { ... }
fn udiv_imm<T1>(self, x: Value, Y: T1) -> Value
where T1: Into<Imm64> { ... }
fn sdiv_imm<T1>(self, x: Value, Y: T1) -> Value
where T1: Into<Imm64> { ... }
fn urem_imm<T1>(self, x: Value, Y: T1) -> Value
where T1: Into<Imm64> { ... }
fn srem_imm<T1>(self, x: Value, Y: T1) -> Value
where T1: Into<Imm64> { ... }
fn irsub_imm<T1>(self, x: Value, Y: T1) -> Value
where T1: Into<Imm64> { ... }
fn sadd_overflow_cin(
self,
x: Value,
y: Value,
c_in: Value,
) -> (Value, Value) { ... }
fn uadd_overflow_cin(
self,
x: Value,
y: Value,
c_in: Value,
) -> (Value, Value) { ... }
fn uadd_overflow(self, x: Value, y: Value) -> (Value, Value) { ... }
fn sadd_overflow(self, x: Value, y: Value) -> (Value, Value) { ... }
fn usub_overflow(self, x: Value, y: Value) -> (Value, Value) { ... }
fn ssub_overflow(self, x: Value, y: Value) -> (Value, Value) { ... }
fn umul_overflow(self, x: Value, y: Value) -> (Value, Value) { ... }
fn smul_overflow(self, x: Value, y: Value) -> (Value, Value) { ... }
fn uadd_overflow_trap<T1>(self, x: Value, y: Value, code: T1) -> Value
where T1: Into<TrapCode> { ... }
fn ssub_overflow_bin(
self,
x: Value,
y: Value,
b_in: Value,
) -> (Value, Value) { ... }
fn usub_overflow_bin(
self,
x: Value,
y: Value,
b_in: Value,
) -> (Value, Value) { ... }
fn band(self, x: Value, y: Value) -> Value { ... }
fn bor(self, x: Value, y: Value) -> Value { ... }
fn bxor(self, x: Value, y: Value) -> Value { ... }
fn bnot(self, x: Value) -> Value { ... }
fn band_not(self, x: Value, y: Value) -> Value { ... }
fn bor_not(self, x: Value, y: Value) -> Value { ... }
fn bxor_not(self, x: Value, y: Value) -> Value { ... }
fn band_imm<T1>(self, x: Value, Y: T1) -> Value
where T1: Into<Imm64> { ... }
fn bor_imm<T1>(self, x: Value, Y: T1) -> Value
where T1: Into<Imm64> { ... }
fn bxor_imm<T1>(self, x: Value, Y: T1) -> Value
where T1: Into<Imm64> { ... }
fn rotl(self, x: Value, y: Value) -> Value { ... }
fn rotr(self, x: Value, y: Value) -> Value { ... }
fn rotl_imm<T1>(self, x: Value, Y: T1) -> Value
where T1: Into<Imm64> { ... }
fn rotr_imm<T1>(self, x: Value, Y: T1) -> Value
where T1: Into<Imm64> { ... }
fn ishl(self, x: Value, y: Value) -> Value { ... }
fn ushr(self, x: Value, y: Value) -> Value { ... }
fn sshr(self, x: Value, y: Value) -> Value { ... }
fn ishl_imm<T1>(self, x: Value, Y: T1) -> Value
where T1: Into<Imm64> { ... }
fn ushr_imm<T1>(self, x: Value, Y: T1) -> Value
where T1: Into<Imm64> { ... }
fn sshr_imm<T1>(self, x: Value, Y: T1) -> Value
where T1: Into<Imm64> { ... }
fn bitrev(self, x: Value) -> Value { ... }
fn clz(self, x: Value) -> Value { ... }
fn cls(self, x: Value) -> Value { ... }
fn ctz(self, x: Value) -> Value { ... }
fn bswap(self, x: Value) -> Value { ... }
fn popcnt(self, x: Value) -> Value { ... }
fn fcmp<T1>(self, Cond: T1, x: Value, y: Value) -> Value
where T1: Into<FloatCC> { ... }
fn fadd(self, x: Value, y: Value) -> Value { ... }
fn fsub(self, x: Value, y: Value) -> Value { ... }
fn fmul(self, x: Value, y: Value) -> Value { ... }
fn fdiv(self, x: Value, y: Value) -> Value { ... }
fn sqrt(self, x: Value) -> Value { ... }
fn fma(self, x: Value, y: Value, z: Value) -> Value { ... }
fn fneg(self, x: Value) -> Value { ... }
fn fabs(self, x: Value) -> Value { ... }
fn fcopysign(self, x: Value, y: Value) -> Value { ... }
fn fmin(self, x: Value, y: Value) -> Value { ... }
fn fmax(self, x: Value, y: Value) -> Value { ... }
fn ceil(self, x: Value) -> Value { ... }
fn floor(self, x: Value) -> Value { ... }
fn trunc(self, x: Value) -> Value { ... }
fn nearest(self, x: Value) -> Value { ... }
fn bitcast<T1>(self, MemTo: Type, MemFlags: T1, x: Value) -> Value
where T1: Into<MemFlags> { ... }
fn scalar_to_vector(self, TxN: Type, s: Value) -> Value { ... }
fn bmask(self, IntTo: Type, x: Value) -> Value { ... }
fn ireduce(self, Int: Type, x: Value) -> Value { ... }
fn snarrow(self, x: Value, y: Value) -> Value { ... }
fn unarrow(self, x: Value, y: Value) -> Value { ... }
fn uunarrow(self, x: Value, y: Value) -> Value { ... }
fn swiden_low(self, x: Value) -> Value { ... }
fn swiden_high(self, x: Value) -> Value { ... }
fn uwiden_low(self, x: Value) -> Value { ... }
fn uwiden_high(self, x: Value) -> Value { ... }
fn iadd_pairwise(self, x: Value, y: Value) -> Value { ... }
fn x86_pmaddubsw(self, x: Value, y: Value) -> Value { ... }
fn uextend(self, Int: Type, x: Value) -> Value { ... }
fn sextend(self, Int: Type, x: Value) -> Value { ... }
fn fpromote(self, FloatScalar: Type, x: Value) -> Value { ... }
fn fdemote(self, FloatScalar: Type, x: Value) -> Value { ... }
fn fvdemote(self, x: Value) -> Value { ... }
fn fvpromote_low(self, a: Value) -> Value { ... }
fn fcvt_to_uint(self, IntTo: Type, x: Value) -> Value { ... }
fn fcvt_to_sint(self, IntTo: Type, x: Value) -> Value { ... }
fn fcvt_to_uint_sat(self, IntTo: Type, x: Value) -> Value { ... }
fn fcvt_to_sint_sat(self, IntTo: Type, x: Value) -> Value { ... }
fn x86_cvtt2dq(self, IntTo: Type, x: Value) -> Value { ... }
fn fcvt_from_uint(self, FloatTo: Type, x: Value) -> Value { ... }
fn fcvt_from_sint(self, FloatTo: Type, x: Value) -> Value { ... }
fn isplit(self, x: Value) -> (Value, Value) { ... }
fn iconcat(self, lo: Value, hi: Value) -> Value { ... }
fn atomic_rmw<T1, T2>(
self,
AtomicMem: Type,
MemFlags: T1,
AtomicRmwOp: T2,
p: Value,
x: Value,
) -> Value
where T1: Into<MemFlags>,
T2: Into<AtomicRmwOp> { ... }
fn atomic_cas<T1>(self, MemFlags: T1, p: Value, e: Value, x: Value) -> Value
where T1: Into<MemFlags> { ... }
fn atomic_load<T1>(self, AtomicMem: Type, MemFlags: T1, p: Value) -> Value
where T1: Into<MemFlags> { ... }
fn atomic_store<T1>(self, MemFlags: T1, x: Value, p: Value) -> Inst
where T1: Into<MemFlags> { ... }
fn fence(self) -> Inst { ... }
fn extract_vector<T1>(self, x: Value, y: T1) -> Value
where T1: Into<u8> { ... }
fn AtomicCas(
self,
opcode: Opcode,
ctrl_typevar: Type,
flags: MemFlags,
arg0: Value,
arg1: Value,
arg2: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn AtomicRmw(
self,
opcode: Opcode,
ctrl_typevar: Type,
flags: MemFlags,
op: AtomicRmwOp,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn Binary(
self,
opcode: Opcode,
ctrl_typevar: Type,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn BinaryImm64(
self,
opcode: Opcode,
ctrl_typevar: Type,
imm: Imm64,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn BinaryImm8(
self,
opcode: Opcode,
ctrl_typevar: Type,
imm: u8,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn BranchTable(
self,
opcode: Opcode,
ctrl_typevar: Type,
table: JumpTable,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn Brif(
self,
opcode: Opcode,
ctrl_typevar: Type,
block0: BlockCall,
block1: BlockCall,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn Call(
self,
opcode: Opcode,
ctrl_typevar: Type,
func_ref: FuncRef,
args: EntityList<Value>,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn CallIndirect(
self,
opcode: Opcode,
ctrl_typevar: Type,
sig_ref: SigRef,
args: EntityList<Value>,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn CondTrap(
self,
opcode: Opcode,
ctrl_typevar: Type,
code: TrapCode,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn DynamicStackLoad(
self,
opcode: Opcode,
ctrl_typevar: Type,
dynamic_stack_slot: DynamicStackSlot,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn DynamicStackStore(
self,
opcode: Opcode,
ctrl_typevar: Type,
dynamic_stack_slot: DynamicStackSlot,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn FloatCompare(
self,
opcode: Opcode,
ctrl_typevar: Type,
cond: FloatCC,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn FuncAddr(
self,
opcode: Opcode,
ctrl_typevar: Type,
func_ref: FuncRef,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn IntAddTrap(
self,
opcode: Opcode,
ctrl_typevar: Type,
code: TrapCode,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn IntCompare(
self,
opcode: Opcode,
ctrl_typevar: Type,
cond: IntCC,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn IntCompareImm(
self,
opcode: Opcode,
ctrl_typevar: Type,
cond: IntCC,
imm: Imm64,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn Jump(
self,
opcode: Opcode,
ctrl_typevar: Type,
block0: BlockCall,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn Load(
self,
opcode: Opcode,
ctrl_typevar: Type,
flags: MemFlags,
offset: Offset32,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn LoadNoOffset(
self,
opcode: Opcode,
ctrl_typevar: Type,
flags: MemFlags,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn MultiAry(
self,
opcode: Opcode,
ctrl_typevar: Type,
args: EntityList<Value>,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn NullAry(
self,
opcode: Opcode,
ctrl_typevar: Type,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn Shuffle(
self,
opcode: Opcode,
ctrl_typevar: Type,
imm: Immediate,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn StackLoad(
self,
opcode: Opcode,
ctrl_typevar: Type,
stack_slot: StackSlot,
offset: Offset32,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn StackStore(
self,
opcode: Opcode,
ctrl_typevar: Type,
stack_slot: StackSlot,
offset: Offset32,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn Store(
self,
opcode: Opcode,
ctrl_typevar: Type,
flags: MemFlags,
offset: Offset32,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn StoreNoOffset(
self,
opcode: Opcode,
ctrl_typevar: Type,
flags: MemFlags,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn Ternary(
self,
opcode: Opcode,
ctrl_typevar: Type,
arg0: Value,
arg1: Value,
arg2: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn TernaryImm8(
self,
opcode: Opcode,
ctrl_typevar: Type,
imm: u8,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn Trap(
self,
opcode: Opcode,
ctrl_typevar: Type,
code: TrapCode,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn Unary(
self,
opcode: Opcode,
ctrl_typevar: Type,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn UnaryConst(
self,
opcode: Opcode,
ctrl_typevar: Type,
constant_handle: Constant,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn UnaryGlobalValue(
self,
opcode: Opcode,
ctrl_typevar: Type,
global_value: GlobalValue,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn UnaryIeee16(
self,
opcode: Opcode,
ctrl_typevar: Type,
imm: Ieee16,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn UnaryIeee32(
self,
opcode: Opcode,
ctrl_typevar: Type,
imm: Ieee32,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn UnaryIeee64(
self,
opcode: Opcode,
ctrl_typevar: Type,
imm: Ieee64,
) -> (Inst, &'f mut DataFlowGraph) { ... }
fn UnaryImm(
self,
opcode: Opcode,
ctrl_typevar: Type,
imm: Imm64,
) -> (Inst, &'f mut DataFlowGraph) { ... }
}
Expand description
Convenience methods for building instructions.
The InstBuilder
trait has one method per instruction opcode for
conveniently constructing the instruction with minimum arguments.
Polymorphic instructions infer their result types from the input
arguments when possible. In some cases, an explicit ctrl_typevar
argument is required.
The opcode methods return the new instruction’s result values, or
the Inst
itself for instructions that don’t have any results.
There is also a method per instruction format. These methods all
return an Inst
.
Provided Methods§
fn jump(self, block_call_label: Block, block_call_args: &[Value]) -> Inst
fn jump(self, block_call_label: Block, block_call_args: &[Value]) -> Inst
Jump.
Unconditionally jump to a basic block, passing the specified block arguments. The number and types of arguments must match the destination block.
Inputs:
- block_call_label: Destination basic block
- block_call_args: Block arguments
fn brif(
self,
c: Value,
block_then_label: Block,
block_then_args: &[Value],
block_else_label: Block,
block_else_args: &[Value],
) -> Inst
fn brif( self, c: Value, block_then_label: Block, block_then_args: &[Value], block_else_label: Block, block_else_args: &[Value], ) -> Inst
Conditional branch when cond is non-zero.
Take the then
branch when c != 0
, and the else
branch otherwise.
Inputs:
- c: Controlling value to test
- block_then_label: Destination basic block
- block_then_args: Block arguments
- block_else_label: Destination basic block
- block_else_args: Block arguments
fn br_table(self, x: Value, JT: JumpTable) -> Inst
fn br_table(self, x: Value, JT: JumpTable) -> Inst
Indirect branch via jump table.
Use x
as an unsigned index into the jump table JT
. If a jump
table entry is found, branch to the corresponding block. If no entry was
found or the index is out-of-bounds, branch to the default block of the
table.
Note that this branch instruction can’t pass arguments to the targeted blocks. Split critical edges as needed to work around this.
Do not confuse this with “tables” in WebAssembly. br_table
is for
jump tables with destinations within the current function only – think
of a match
in Rust or a switch
in C. If you want to call a
function in a dynamic library, that will typically use
call_indirect
.
Inputs:
- x: i32 index into jump table
- JT: A jump table.
fn debugtrap(self) -> Inst
fn debugtrap(self) -> Inst
Encodes an assembly debug trap.
fn trap<T1>(self, code: T1) -> Inst
fn trap<T1>(self, code: T1) -> Inst
Terminate execution unconditionally.
Inputs:
- code: A trap reason code.
fn trapz<T1>(self, c: Value, code: T1) -> Inst
fn trapz<T1>(self, c: Value, code: T1) -> Inst
Trap when zero.
if c
is non-zero, execution continues at the following instruction.
Inputs:
- c: Controlling value to test
- code: A trap reason code.
fn trapnz<T1>(self, c: Value, code: T1) -> Inst
fn trapnz<T1>(self, c: Value, code: T1) -> Inst
Trap when non-zero.
If c
is zero, execution continues at the following instruction.
Inputs:
- c: Controlling value to test
- code: A trap reason code.
fn return_(self, rvals: &[Value]) -> Inst
fn return_(self, rvals: &[Value]) -> Inst
Return from the function.
Unconditionally transfer control to the calling function, passing the provided return values. The list of return values must match the function signature’s return types.
Inputs:
- rvals: return values
fn call(self, FN: FuncRef, args: &[Value]) -> Inst
fn call(self, FN: FuncRef, args: &[Value]) -> Inst
Direct function call.
Call a function which has been declared in the preamble. The argument types must match the function’s signature.
Inputs:
- FN: function to call, declared by
function
- args: call arguments
Outputs:
- rvals: return values
fn call_indirect(self, SIG: SigRef, callee: Value, args: &[Value]) -> Inst
fn call_indirect(self, SIG: SigRef, callee: Value, args: &[Value]) -> Inst
Indirect function call.
Call the function pointed to by callee
with the given arguments. The
called function must match the specified signature.
Note that this is different from WebAssembly’s call_indirect
; the
callee is a native address, rather than a table index. For WebAssembly,
table_addr
and load
are used to obtain a native address
from a table.
Inputs:
- SIG: function signature
- callee: address of function to call
- args: call arguments
Outputs:
- rvals: return values
fn return_call(self, FN: FuncRef, args: &[Value]) -> Inst
fn return_call(self, FN: FuncRef, args: &[Value]) -> Inst
Direct tail call.
Tail call a function which has been declared in the preamble. The argument types must match the function’s signature, the caller and callee calling conventions must be the same, and must be a calling convention that supports tail calls.
This instruction is a block terminator.
Inputs:
- FN: function to call, declared by
function
- args: call arguments
fn return_call_indirect(
self,
SIG: SigRef,
callee: Value,
args: &[Value],
) -> Inst
fn return_call_indirect( self, SIG: SigRef, callee: Value, args: &[Value], ) -> Inst
Indirect tail call.
Call the function pointed to by callee
with the given arguments. The
argument types must match the function’s signature, the caller and
callee calling conventions must be the same, and must be a calling
convention that supports tail calls.
This instruction is a block terminator.
Note that this is different from WebAssembly’s tail_call_indirect
;
the callee is a native address, rather than a table index. For
WebAssembly, table_addr
and load
are used to obtain a native address
from a table.
Inputs:
- SIG: function signature
- callee: address of function to call
- args: call arguments
fn func_addr(self, iAddr: Type, FN: FuncRef) -> Value
fn func_addr(self, iAddr: Type, FN: FuncRef) -> Value
Get the address of a function.
Compute the absolute address of a function declared in the preamble.
The returned address can be used as a callee
argument to
call_indirect
. This is also a method for calling functions that
are too far away to be addressable by a direct call
instruction.
Inputs:
- iAddr (controlling type variable): An integer address type
- FN: function to call, declared by
function
Outputs:
- addr: An integer address type
fn splat(self, TxN: Type, x: Value) -> Value
fn splat(self, TxN: Type, x: Value) -> Value
Vector splat.
Return a vector whose lanes are all x
.
Inputs:
- TxN (controlling type variable): A SIMD vector type
- x: Value to splat to all lanes
Outputs:
- a: A SIMD vector type
fn swizzle(self, x: Value, y: Value) -> Value
fn swizzle(self, x: Value, y: Value) -> Value
Vector swizzle.
Returns a new vector with byte-width lanes selected from the lanes of the first input
vector x
specified in the second input vector s
. The indices i
in range
[0, 15]
select the i
-th element of x
. For indices outside of the range the
resulting lane is 0. Note that this operates on byte-width lanes.
Inputs:
- x: Vector to modify by re-arranging lanes
- y: Mask for re-arranging lanes
Outputs:
- a: A SIMD vector type consisting of 16 lanes of 8-bit integers
fn x86_pshufb(self, x: Value, y: Value) -> Value
fn x86_pshufb(self, x: Value, y: Value) -> Value
A vector swizzle lookalike which has the semantics of pshufb
on x64.
This instruction will permute the 8-bit lanes of x
with the indices
specified in y
. Each lane in the mask, y
, uses the bottom four
bits for selecting the lane from x
unless the most significant bit
is set, in which case the lane is zeroed. The output vector will have
the following contents when the element of y
is in these ranges:
[0, 127]
->x[y[i] % 16]
[128, 255]
-> 0
Inputs:
- x: Vector to modify by re-arranging lanes
- y: Mask for re-arranging lanes
Outputs:
- a: A SIMD vector type consisting of 16 lanes of 8-bit integers
fn insertlane<T1>(self, x: Value, y: Value, Idx: T1) -> Value
fn insertlane<T1>(self, x: Value, y: Value, Idx: T1) -> Value
Insert y
as lane Idx
in x.
The lane index, Idx
, is an immediate value, not an SSA value. It
must indicate a valid lane index for the type of x
.
Inputs:
- x: The vector to modify
- y: New lane value
- Idx: Lane index
Outputs:
- a: A SIMD vector type
fn extractlane<T1>(self, x: Value, Idx: T1) -> Value
fn extractlane<T1>(self, x: Value, Idx: T1) -> Value
Extract lane Idx
from x
.
The lane index, Idx
, is an immediate value, not an SSA value. It
must indicate a valid lane index for the type of x
. Note that the upper bits of a
may or may not be zeroed depending on the ISA but the type system should prevent using
a
as anything other than the extracted value.
Inputs:
- x: A SIMD vector type
- Idx: Lane index
Outputs:
- a:
fn smin(self, x: Value, y: Value) -> Value
fn smin(self, x: Value, y: Value) -> Value
Signed integer minimum.
Inputs:
- x: A scalar or vector integer type
- y: A scalar or vector integer type
Outputs:
- a: A scalar or vector integer type
fn umin(self, x: Value, y: Value) -> Value
fn umin(self, x: Value, y: Value) -> Value
Unsigned integer minimum.
Inputs:
- x: A scalar or vector integer type
- y: A scalar or vector integer type
Outputs:
- a: A scalar or vector integer type
fn smax(self, x: Value, y: Value) -> Value
fn smax(self, x: Value, y: Value) -> Value
Signed integer maximum.
Inputs:
- x: A scalar or vector integer type
- y: A scalar or vector integer type
Outputs:
- a: A scalar or vector integer type
fn umax(self, x: Value, y: Value) -> Value
fn umax(self, x: Value, y: Value) -> Value
Unsigned integer maximum.
Inputs:
- x: A scalar or vector integer type
- y: A scalar or vector integer type
Outputs:
- a: A scalar or vector integer type
fn avg_round(self, x: Value, y: Value) -> Value
fn avg_round(self, x: Value, y: Value) -> Value
Unsigned average with rounding: a := (x + y + 1) // 2
The addition does not lose any information (such as from overflow).
Inputs:
- x: A SIMD vector type containing integers
- y: A SIMD vector type containing integers
Outputs:
- a: A SIMD vector type containing integers
fn uadd_sat(self, x: Value, y: Value) -> Value
fn uadd_sat(self, x: Value, y: Value) -> Value
Add with unsigned saturation.
This is similar to iadd
but the operands are interpreted as unsigned integers and their
summed result, instead of wrapping, will be saturated to the highest unsigned integer for
the controlling type (e.g. 0xFF
for i8).
Inputs:
- x: A SIMD vector type containing integers
- y: A SIMD vector type containing integers
Outputs:
- a: A SIMD vector type containing integers
fn sadd_sat(self, x: Value, y: Value) -> Value
fn sadd_sat(self, x: Value, y: Value) -> Value
Add with signed saturation.
This is similar to iadd
but the operands are interpreted as signed integers and their
summed result, instead of wrapping, will be saturated to the lowest or highest
signed integer for the controlling type (e.g. 0x80
or 0x7F
for i8). For example,
since an sadd_sat.i8
of 0x70
and 0x70
is greater than 0x7F
, the result will be
clamped to 0x7F
.
Inputs:
- x: A SIMD vector type containing integers
- y: A SIMD vector type containing integers
Outputs:
- a: A SIMD vector type containing integers
fn usub_sat(self, x: Value, y: Value) -> Value
fn usub_sat(self, x: Value, y: Value) -> Value
Subtract with unsigned saturation.
This is similar to isub
but the operands are interpreted as unsigned integers and their
difference, instead of wrapping, will be saturated to the lowest unsigned integer for
the controlling type (e.g. 0x00
for i8).
Inputs:
- x: A SIMD vector type containing integers
- y: A SIMD vector type containing integers
Outputs:
- a: A SIMD vector type containing integers
fn ssub_sat(self, x: Value, y: Value) -> Value
fn ssub_sat(self, x: Value, y: Value) -> Value
Subtract with signed saturation.
This is similar to isub
but the operands are interpreted as signed integers and their
difference, instead of wrapping, will be saturated to the lowest or highest
signed integer for the controlling type (e.g. 0x80
or 0x7F
for i8).
Inputs:
- x: A SIMD vector type containing integers
- y: A SIMD vector type containing integers
Outputs:
- a: A SIMD vector type containing integers
fn load<T1, T2>(self, Mem: Type, MemFlags: T1, p: Value, Offset: T2) -> Value
fn load<T1, T2>(self, Mem: Type, MemFlags: T1, p: Value, Offset: T2) -> Value
Load from memory at p + Offset
.
This is a polymorphic instruction that can load any value type which has a memory representation.
Inputs:
- Mem (controlling type variable): Any type that can be stored in memory
- MemFlags: Memory operation flags
- p: An integer address type
- Offset: Byte offset from base address
Outputs:
- a: Value loaded
fn store<T1, T2>(self, MemFlags: T1, x: Value, p: Value, Offset: T2) -> Inst
fn store<T1, T2>(self, MemFlags: T1, x: Value, p: Value, Offset: T2) -> Inst
Store x
to memory at p + Offset
.
This is a polymorphic instruction that can store any value type with a memory representation.
Inputs:
- MemFlags: Memory operation flags
- x: Value to be stored
- p: An integer address type
- Offset: Byte offset from base address
fn uload8<T1, T2>(
self,
iExt8: Type,
MemFlags: T1,
p: Value,
Offset: T2,
) -> Value
fn uload8<T1, T2>( self, iExt8: Type, MemFlags: T1, p: Value, Offset: T2, ) -> Value
Load 8 bits from memory at p + Offset
and zero-extend.
This is equivalent to load.i8
followed by uextend
.
Inputs:
- iExt8 (controlling type variable): An integer type with more than 8 bits
- MemFlags: Memory operation flags
- p: An integer address type
- Offset: Byte offset from base address
Outputs:
- a: An integer type with more than 8 bits
fn sload8<T1, T2>(
self,
iExt8: Type,
MemFlags: T1,
p: Value,
Offset: T2,
) -> Value
fn sload8<T1, T2>( self, iExt8: Type, MemFlags: T1, p: Value, Offset: T2, ) -> Value
Load 8 bits from memory at p + Offset
and sign-extend.
This is equivalent to load.i8
followed by sextend
.
Inputs:
- iExt8 (controlling type variable): An integer type with more than 8 bits
- MemFlags: Memory operation flags
- p: An integer address type
- Offset: Byte offset from base address
Outputs:
- a: An integer type with more than 8 bits
fn istore8<T1, T2>(self, MemFlags: T1, x: Value, p: Value, Offset: T2) -> Inst
fn istore8<T1, T2>(self, MemFlags: T1, x: Value, p: Value, Offset: T2) -> Inst
Store the low 8 bits of x
to memory at p + Offset
.
This is equivalent to ireduce.i8
followed by store.i8
.
Inputs:
- MemFlags: Memory operation flags
- x: An integer type with more than 8 bits
- p: An integer address type
- Offset: Byte offset from base address
fn uload16<T1, T2>(
self,
iExt16: Type,
MemFlags: T1,
p: Value,
Offset: T2,
) -> Value
fn uload16<T1, T2>( self, iExt16: Type, MemFlags: T1, p: Value, Offset: T2, ) -> Value
Load 16 bits from memory at p + Offset
and zero-extend.
This is equivalent to load.i16
followed by uextend
.
Inputs:
- iExt16 (controlling type variable): An integer type with more than 16 bits
- MemFlags: Memory operation flags
- p: An integer address type
- Offset: Byte offset from base address
Outputs:
- a: An integer type with more than 16 bits
fn sload16<T1, T2>(
self,
iExt16: Type,
MemFlags: T1,
p: Value,
Offset: T2,
) -> Value
fn sload16<T1, T2>( self, iExt16: Type, MemFlags: T1, p: Value, Offset: T2, ) -> Value
Load 16 bits from memory at p + Offset
and sign-extend.
This is equivalent to load.i16
followed by sextend
.
Inputs:
- iExt16 (controlling type variable): An integer type with more than 16 bits
- MemFlags: Memory operation flags
- p: An integer address type
- Offset: Byte offset from base address
Outputs:
- a: An integer type with more than 16 bits
fn istore16<T1, T2>(self, MemFlags: T1, x: Value, p: Value, Offset: T2) -> Inst
fn istore16<T1, T2>(self, MemFlags: T1, x: Value, p: Value, Offset: T2) -> Inst
Store the low 16 bits of x
to memory at p + Offset
.
This is equivalent to ireduce.i16
followed by store.i16
.
Inputs:
- MemFlags: Memory operation flags
- x: An integer type with more than 16 bits
- p: An integer address type
- Offset: Byte offset from base address
fn uload32<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
fn uload32<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
Load 32 bits from memory at p + Offset
and zero-extend.
This is equivalent to load.i32
followed by uextend
.
Inputs:
- MemFlags: Memory operation flags
- p: An integer address type
- Offset: Byte offset from base address
Outputs:
- a: An integer type with more than 32 bits
fn sload32<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
fn sload32<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
Load 32 bits from memory at p + Offset
and sign-extend.
This is equivalent to load.i32
followed by sextend
.
Inputs:
- MemFlags: Memory operation flags
- p: An integer address type
- Offset: Byte offset from base address
Outputs:
- a: An integer type with more than 32 bits
fn istore32<T1, T2>(self, MemFlags: T1, x: Value, p: Value, Offset: T2) -> Inst
fn istore32<T1, T2>(self, MemFlags: T1, x: Value, p: Value, Offset: T2) -> Inst
Store the low 32 bits of x
to memory at p + Offset
.
This is equivalent to ireduce.i32
followed by store.i32
.
Inputs:
- MemFlags: Memory operation flags
- x: An integer type with more than 32 bits
- p: An integer address type
- Offset: Byte offset from base address
fn stack_switch(
self,
store_context_ptr: Value,
load_context_ptr: Value,
in_payload0: Value,
) -> Value
fn stack_switch( self, store_context_ptr: Value, load_context_ptr: Value, in_payload0: Value, ) -> Value
Suspends execution of the current stack and resumes execution of another one.
The target stack to switch to is identified by the data stored at
load_context_ptr
. Before switching, this instruction stores
analogous information about the
current (i.e., original) stack at store_context_ptr
, to
enabled switching back to the original stack at a later point.
The size, alignment and layout of the information stored at
load_context_ptr
and store_context_ptr
is platform-dependent.
The instruction assumes that load_context_ptr
and
store_context_ptr
are valid pointers to memory with said layout and
alignment, and does not perform any checks on these pointers or the data
stored there.
The instruction is experimental and only supported on x64 Linux at the moment.
When switching from a stack A to a stack B, one of the following cases must apply:
- Stack B was previously suspended using a
stack_switch
instruction. - Stack B is a newly initialized stack. The necessary initialization is platform-dependent and will generally involve running some kind of trampoline to start execution of a function on the new stack.
In both cases, the in_payload
argument of the stack_switch
instruction executed on A is passed to stack B. In the first case above,
it will be the result value of the earlier stack_switch
instruction
executed on stack B. In the second case, the value will be accessible to
the trampoline in a platform-dependent register.
The pointers load_context_ptr
and store_context_ptr
are allowed
to be equal; the instruction ensures that all data is loaded from the
former before writing to the latter.
Stack switching is one-shot in the sense that each stack_switch
operation effectively consumes the context identified by
load_context_ptr
. In other words, performing two stack_switches
using the same load_context_ptr
causes undefined behavior, unless
the context at load_context_ptr
is overwritten by another
stack_switch
in between.
Inputs:
- store_context_ptr: An integer address type
- load_context_ptr: An integer address type
- in_payload0: An integer address type
Outputs:
- out_payload0: An integer address type
fn uload8x8<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
fn uload8x8<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
Load an 8x8 vector (64 bits) from memory at p + Offset
and zero-extend into an i16x8
vector.
Inputs:
- MemFlags: Memory operation flags
- p: An integer address type
- Offset: Byte offset from base address
Outputs:
- a: Value loaded
fn sload8x8<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
fn sload8x8<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
Load an 8x8 vector (64 bits) from memory at p + Offset
and sign-extend into an i16x8
vector.
Inputs:
- MemFlags: Memory operation flags
- p: An integer address type
- Offset: Byte offset from base address
Outputs:
- a: Value loaded
fn uload16x4<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
fn uload16x4<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
Load a 16x4 vector (64 bits) from memory at p + Offset
and zero-extend into an i32x4
vector.
Inputs:
- MemFlags: Memory operation flags
- p: An integer address type
- Offset: Byte offset from base address
Outputs:
- a: Value loaded
fn sload16x4<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
fn sload16x4<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
Load a 16x4 vector (64 bits) from memory at p + Offset
and sign-extend into an i32x4
vector.
Inputs:
- MemFlags: Memory operation flags
- p: An integer address type
- Offset: Byte offset from base address
Outputs:
- a: Value loaded
fn uload32x2<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
fn uload32x2<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
Load an 32x2 vector (64 bits) from memory at p + Offset
and zero-extend into an i64x2
vector.
Inputs:
- MemFlags: Memory operation flags
- p: An integer address type
- Offset: Byte offset from base address
Outputs:
- a: Value loaded
fn sload32x2<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
fn sload32x2<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
Load a 32x2 vector (64 bits) from memory at p + Offset
and sign-extend into an i64x2
vector.
Inputs:
- MemFlags: Memory operation flags
- p: An integer address type
- Offset: Byte offset from base address
Outputs:
- a: Value loaded
fn stack_load<T1>(self, Mem: Type, SS: StackSlot, Offset: T1) -> Valuewhere
T1: Into<Offset32>,
fn stack_load<T1>(self, Mem: Type, SS: StackSlot, Offset: T1) -> Valuewhere
T1: Into<Offset32>,
Load a value from a stack slot at the constant offset.
This is a polymorphic instruction that can load any value type which has a memory representation.
The offset is an immediate constant, not an SSA value. The memory
access cannot go out of bounds, i.e.
sizeof(a) + Offset <= sizeof(SS)
.
Inputs:
- Mem (controlling type variable): Any type that can be stored in memory
- SS: A stack slot
- Offset: In-bounds offset into stack slot
Outputs:
- a: Value loaded
fn stack_store<T1>(self, x: Value, SS: StackSlot, Offset: T1) -> Instwhere
T1: Into<Offset32>,
fn stack_store<T1>(self, x: Value, SS: StackSlot, Offset: T1) -> Instwhere
T1: Into<Offset32>,
Store a value to a stack slot at a constant offset.
This is a polymorphic instruction that can store any value type with a memory representation.
The offset is an immediate constant, not an SSA value. The memory
access cannot go out of bounds, i.e.
sizeof(a) + Offset <= sizeof(SS)
.
Inputs:
- x: Value to be stored
- SS: A stack slot
- Offset: In-bounds offset into stack slot
fn stack_addr<T1>(self, iAddr: Type, SS: StackSlot, Offset: T1) -> Valuewhere
T1: Into<Offset32>,
fn stack_addr<T1>(self, iAddr: Type, SS: StackSlot, Offset: T1) -> Valuewhere
T1: Into<Offset32>,
Get the address of a stack slot.
Compute the absolute address of a byte in a stack slot. The offset must
refer to a byte inside the stack slot:
0 <= Offset < sizeof(SS)
.
Inputs:
- iAddr (controlling type variable): An integer address type
- SS: A stack slot
- Offset: In-bounds offset into stack slot
Outputs:
- addr: An integer address type
fn dynamic_stack_load(self, Mem: Type, DSS: DynamicStackSlot) -> Value
fn dynamic_stack_load(self, Mem: Type, DSS: DynamicStackSlot) -> Value
Load a value from a dynamic stack slot.
This is a polymorphic instruction that can load any value type which has a memory representation.
Inputs:
- Mem (controlling type variable): Any type that can be stored in memory
- DSS: A dynamic stack slot
Outputs:
- a: Value loaded
fn dynamic_stack_store(self, x: Value, DSS: DynamicStackSlot) -> Inst
fn dynamic_stack_store(self, x: Value, DSS: DynamicStackSlot) -> Inst
Store a value to a dynamic stack slot.
This is a polymorphic instruction that can store any dynamic value type with a memory representation.
Inputs:
- x: Value to be stored
- DSS: A dynamic stack slot
fn dynamic_stack_addr(self, iAddr: Type, DSS: DynamicStackSlot) -> Value
fn dynamic_stack_addr(self, iAddr: Type, DSS: DynamicStackSlot) -> Value
Get the address of a dynamic stack slot.
Compute the absolute address of the first byte of a dynamic stack slot.
Inputs:
- iAddr (controlling type variable): An integer address type
- DSS: A dynamic stack slot
Outputs:
- addr: An integer address type
fn global_value(self, Mem: Type, GV: GlobalValue) -> Value
fn global_value(self, Mem: Type, GV: GlobalValue) -> Value
Compute the value of global GV.
Inputs:
- Mem (controlling type variable): Any type that can be stored in memory
- GV: A global value.
Outputs:
- a: Value loaded
fn symbol_value(self, Mem: Type, GV: GlobalValue) -> Value
fn symbol_value(self, Mem: Type, GV: GlobalValue) -> Value
Compute the value of global GV, which is a symbolic value.
Inputs:
- Mem (controlling type variable): Any type that can be stored in memory
- GV: A global value.
Outputs:
- a: Value loaded
fn tls_value(self, Mem: Type, GV: GlobalValue) -> Value
fn tls_value(self, Mem: Type, GV: GlobalValue) -> Value
Compute the value of global GV, which is a TLS (thread local storage) value.
Inputs:
- Mem (controlling type variable): Any type that can be stored in memory
- GV: A global value.
Outputs:
- a: Value loaded
fn get_pinned_reg(self, iAddr: Type) -> Value
fn get_pinned_reg(self, iAddr: Type) -> Value
Gets the content of the pinned register, when it’s enabled.
Inputs:
- iAddr (controlling type variable): An integer address type
Outputs:
- addr: An integer address type
fn set_pinned_reg(self, addr: Value) -> Inst
fn set_pinned_reg(self, addr: Value) -> Inst
Sets the content of the pinned register, when it’s enabled.
Inputs:
- addr: An integer address type
fn get_frame_pointer(self, iAddr: Type) -> Value
fn get_frame_pointer(self, iAddr: Type) -> Value
Get the address in the frame pointer register.
Usage of this instruction requires setting preserve_frame_pointers
to true
.
Inputs:
- iAddr (controlling type variable): An integer address type
Outputs:
- addr: An integer address type
fn get_stack_pointer(self, iAddr: Type) -> Value
fn get_stack_pointer(self, iAddr: Type) -> Value
Get the address in the stack pointer register.
Inputs:
- iAddr (controlling type variable): An integer address type
Outputs:
- addr: An integer address type
fn get_return_address(self, iAddr: Type) -> Value
fn get_return_address(self, iAddr: Type) -> Value
Get the PC where this function will transfer control to when it returns.
Usage of this instruction requires setting preserve_frame_pointers
to true
.
Inputs:
- iAddr (controlling type variable): An integer address type
Outputs:
- addr: An integer address type
fn iconst<T1>(self, NarrowInt: Type, N: T1) -> Value
fn iconst<T1>(self, NarrowInt: Type, N: T1) -> Value
Integer constant.
Create a scalar integer SSA value with an immediate constant value, or an integer vector where all the lanes have the same value.
Inputs:
- NarrowInt (controlling type variable): An integer type of width up to
i64
- N: A 64-bit immediate integer.
Outputs:
- a: A constant integer scalar or vector value
fn f16const<T1>(self, N: T1) -> Valuewhere
T1: Into<Ieee16>,
fn f16const<T1>(self, N: T1) -> Valuewhere
T1: Into<Ieee16>,
Floating point constant.
Create a f16
SSA value with an immediate constant value.
Inputs:
- N: A 16-bit immediate floating point number.
Outputs:
- a: A constant f16 scalar value
fn f32const<T1>(self, N: T1) -> Value
fn f32const<T1>(self, N: T1) -> Value
Floating point constant.
Create a f32
SSA value with an immediate constant value.
Inputs:
- N: A 32-bit immediate floating point number.
Outputs:
- a: A constant f32 scalar value
fn f64const<T1>(self, N: T1) -> Value
fn f64const<T1>(self, N: T1) -> Value
Floating point constant.
Create a f64
SSA value with an immediate constant value.
Inputs:
- N: A 64-bit immediate floating point number.
Outputs:
- a: A constant f64 scalar value
fn f128const<T1>(self, N: T1) -> Valuewhere
T1: Into<Constant>,
fn f128const<T1>(self, N: T1) -> Valuewhere
T1: Into<Constant>,
Floating point constant.
Create a f128
SSA value with an immediate constant value.
Inputs:
- N: A constant stored in the constant pool.
Outputs:
- a: A constant f128 scalar value
fn vconst<T1>(self, TxN: Type, N: T1) -> Valuewhere
T1: Into<Constant>,
fn vconst<T1>(self, TxN: Type, N: T1) -> Valuewhere
T1: Into<Constant>,
SIMD vector constant.
Construct a vector with the given immediate bytes.
Inputs:
- TxN (controlling type variable): A SIMD vector type
- N: The 16 immediate bytes of a 128-bit vector
Outputs:
- a: A constant vector value
fn shuffle<T1>(self, a: Value, b: Value, mask: T1) -> Valuewhere
T1: Into<Immediate>,
fn shuffle<T1>(self, a: Value, b: Value, mask: T1) -> Valuewhere
T1: Into<Immediate>,
SIMD vector shuffle.
Shuffle two vectors using the given immediate bytes. For each of the 16 bytes of the immediate, a value i of 0-15 selects the i-th element of the first vector and a value i of 16-31 selects the (i-16)th element of the second vector. Immediate values outside of the 0-31 range are not valid.
Inputs:
- a: A vector value
- b: A vector value
- mask: The 16 immediate bytes used for selecting the elements to shuffle
Outputs:
- a: A vector value
fn nop(self) -> Inst
fn nop(self) -> Inst
Just a dummy instruction.
Note: this doesn’t compile to a machine code nop.
fn select(self, c: Value, x: Value, y: Value) -> Value
fn select(self, c: Value, x: Value, y: Value) -> Value
Conditional select.
This instruction selects whole values. Use bitselect
to choose each
bit according to a mask.
Inputs:
- c: Controlling value to test
- x: Value to use when
c
is true - y: Value to use when
c
is false
Outputs:
- a: Any integer, float, or reference scalar or vector type
fn select_spectre_guard(self, c: Value, x: Value, y: Value) -> Value
fn select_spectre_guard(self, c: Value, x: Value, y: Value) -> Value
Conditional select intended for Spectre guards.
This operation is semantically equivalent to a select instruction. However, this instruction prohibits all speculation on the controlling value when determining which input to use as the result. As such, it is suitable for use in Spectre guards.
For example, on a target which may speculatively execute branches, the lowering of this instruction is guaranteed to not conditionally branch. Instead it will typically lower to a conditional move instruction. (No Spectre-vulnerable processors are known to perform value speculation on conditional move instructions.)
Ensure that the instruction you’re trying to protect from Spectre attacks has a data dependency on the result of this instruction. That prevents an out-of-order CPU from evaluating that instruction until the result of this one is known, which in turn will be blocked until the controlling value is known.
Typical usage is to use a bounds-check as the controlling value, and select between either a null pointer if the bounds-check fails, or an in-bounds address otherwise, so that dereferencing the resulting address with a load or store instruction will trap if the bounds-check failed. When this instruction is used in this way, any microarchitectural side effects of the memory access will only occur after the bounds-check finishes, which ensures that no Spectre vulnerability will exist.
Optimization opportunities for this instruction are limited compared to a normal select instruction, but it is allowed to be replaced by other values which are functionally equivalent as long as doing so does not introduce any new opportunities to speculate on the controlling value.
Inputs:
- c: Controlling value to test
- x: Value to use when
c
is true - y: Value to use when
c
is false
Outputs:
- a: Any integer, float, or reference scalar or vector type
fn bitselect(self, c: Value, x: Value, y: Value) -> Value
fn bitselect(self, c: Value, x: Value, y: Value) -> Value
Conditional select of bits.
For each bit in c
, this instruction selects the corresponding bit from x
if the bit
in x
is 1 and the corresponding bit from y
if the bit in c
is 0. See also:
select
.
Inputs:
- c: Controlling value to test
- x: Value to use when
c
is true - y: Value to use when
c
is false
Outputs:
- a: Any integer, float, or reference scalar or vector type
fn x86_blendv(self, c: Value, x: Value, y: Value) -> Value
fn x86_blendv(self, c: Value, x: Value, y: Value) -> Value
A bitselect-lookalike instruction except with the semantics of
blendv
-related instructions on x86.
This instruction will use the top bit of each lane in c
, the condition
mask. If the bit is 1 then the corresponding lane from x
is chosen.
Otherwise the corresponding lane from y
is chosen.
Inputs:
- c: Controlling value to test
- x: Value to use when
c
is true - y: Value to use when
c
is false
Outputs:
- a: Any integer, float, or reference scalar or vector type
fn vany_true(self, a: Value) -> Value
fn vany_true(self, a: Value) -> Value
Reduce a vector to a scalar boolean.
Return a scalar boolean true if any lane in a
is non-zero, false otherwise.
Inputs:
- a: A SIMD vector type
Outputs:
- s: An integer type with 8 bits. WARNING: arithmetic on 8bit integers is incomplete
fn vall_true(self, a: Value) -> Value
fn vall_true(self, a: Value) -> Value
Reduce a vector to a scalar boolean.
Return a scalar boolean true if all lanes in i
are non-zero, false otherwise.
Inputs:
- a: A SIMD vector type
Outputs:
- s: An integer type with 8 bits. WARNING: arithmetic on 8bit integers is incomplete
fn vhigh_bits(self, NarrowInt: Type, a: Value) -> Value
fn vhigh_bits(self, NarrowInt: Type, a: Value) -> Value
Reduce a vector to a scalar integer.
Return a scalar integer, consisting of the concatenation of the most significant bit
of each lane of a
.
Inputs:
- NarrowInt (controlling type variable): An integer type of width up to
i64
- a: A SIMD vector type
Outputs:
- x: An integer type of width up to
i64
fn icmp<T1>(self, Cond: T1, x: Value, y: Value) -> Value
fn icmp<T1>(self, Cond: T1, x: Value, y: Value) -> Value
Integer comparison.
The condition code determines if the operands are interpreted as signed or unsigned integers.
Signed | Unsigned | Condition |
---|---|---|
eq | eq | Equal |
ne | ne | Not equal |
slt | ult | Less than |
sge | uge | Greater than or equal |
sgt | ugt | Greater than |
sle | ule | Less than or equal |
When this instruction compares integer vectors, it returns a vector of lane-wise comparisons.
When comparing scalars, the result is:
- 1
if the condition holds.
- 0
if the condition does not hold.
When comparing vectors, the result is:
- -1
(i.e. all ones) in each lane where the condition holds.
- 0
in each lane where the condition does not hold.
Inputs:
- Cond: An integer comparison condition code.
- x: A scalar or vector integer type
- y: A scalar or vector integer type
Outputs:
- a:
fn icmp_imm<T1, T2>(self, Cond: T1, x: Value, Y: T2) -> Value
fn icmp_imm<T1, T2>(self, Cond: T1, x: Value, Y: T2) -> Value
Compare scalar integer to a constant.
This is the same as the icmp
instruction, except one operand is
a sign extended 64 bit immediate constant.
This instruction can only compare scalars. Use icmp
for
lane-wise vector comparisons.
Inputs:
- Cond: An integer comparison condition code.
- x: A scalar integer type
- Y: A 64-bit immediate integer.
Outputs:
- a: An integer type with 8 bits. WARNING: arithmetic on 8bit integers is incomplete
fn iadd(self, x: Value, y: Value) -> Value
fn iadd(self, x: Value, y: Value) -> Value
Wrapping integer addition: a := x + y \pmod{2^B}
.
This instruction does not depend on the signed/unsigned interpretation of the operands.
Inputs:
- x: A scalar or vector integer type
- y: A scalar or vector integer type
Outputs:
- a: A scalar or vector integer type
fn isub(self, x: Value, y: Value) -> Value
fn isub(self, x: Value, y: Value) -> Value
Wrapping integer subtraction: a := x - y \pmod{2^B}
.
This instruction does not depend on the signed/unsigned interpretation of the operands.
Inputs:
- x: A scalar or vector integer type
- y: A scalar or vector integer type
Outputs:
- a: A scalar or vector integer type
fn ineg(self, x: Value) -> Value
fn ineg(self, x: Value) -> Value
Integer negation: a := -x \pmod{2^B}
.
Inputs:
- x: A scalar or vector integer type
Outputs:
- a: A scalar or vector integer type
fn iabs(self, x: Value) -> Value
fn iabs(self, x: Value) -> Value
Integer absolute value with wrapping: a := |x|
.
Inputs:
- x: A scalar or vector integer type
Outputs:
- a: A scalar or vector integer type
fn imul(self, x: Value, y: Value) -> Value
fn imul(self, x: Value, y: Value) -> Value
Wrapping integer multiplication: a := x y \pmod{2^B}
.
This instruction does not depend on the signed/unsigned interpretation of the operands.
Polymorphic over all integer types (vector and scalar).
Inputs:
- x: A scalar or vector integer type
- y: A scalar or vector integer type
Outputs:
- a: A scalar or vector integer type
fn umulhi(self, x: Value, y: Value) -> Value
fn umulhi(self, x: Value, y: Value) -> Value
Unsigned integer multiplication, producing the high half of a double-length result.
Polymorphic over all integer types (vector and scalar).
Inputs:
- x: A scalar or vector integer type
- y: A scalar or vector integer type
Outputs:
- a: A scalar or vector integer type
fn smulhi(self, x: Value, y: Value) -> Value
fn smulhi(self, x: Value, y: Value) -> Value
Signed integer multiplication, producing the high half of a double-length result.
Polymorphic over all integer types (vector and scalar).
Inputs:
- x: A scalar or vector integer type
- y: A scalar or vector integer type
Outputs:
- a: A scalar or vector integer type
fn sqmul_round_sat(self, x: Value, y: Value) -> Value
fn sqmul_round_sat(self, x: Value, y: Value) -> Value
Fixed-point multiplication of numbers in the QN format, where N + 1
is the number bitwidth:
a := signed_saturate((x * y + 1 << (Q - 1)) >> Q)
Polymorphic over all integer vector types with 16- or 32-bit numbers.
Inputs:
- x: A vector integer type with 16- or 32-bit numbers
- y: A vector integer type with 16- or 32-bit numbers
Outputs:
- a: A vector integer type with 16- or 32-bit numbers
fn x86_pmulhrsw(self, x: Value, y: Value) -> Value
fn x86_pmulhrsw(self, x: Value, y: Value) -> Value
A similar instruction to sqmul_round_sat
except with the semantics
of x86’s pmulhrsw
instruction.
This is the same as sqmul_round_sat
except when both input lanes are
i16::MIN
.
Inputs:
- x: A vector integer type with 16- or 32-bit numbers
- y: A vector integer type with 16- or 32-bit numbers
Outputs:
- a: A vector integer type with 16- or 32-bit numbers
fn udiv(self, x: Value, y: Value) -> Value
fn udiv(self, x: Value, y: Value) -> Value
Unsigned integer division: a := \lfloor {x \over y} \rfloor
.
This operation traps if the divisor is zero.
Inputs:
- x: A scalar integer type
- y: A scalar integer type
Outputs:
- a: A scalar integer type
fn sdiv(self, x: Value, y: Value) -> Value
fn sdiv(self, x: Value, y: Value) -> Value
Signed integer division rounded toward zero: a := sign(xy) \lfloor {|x| \over |y|}\rfloor
.
This operation traps if the divisor is zero, or if the result is not
representable in B
bits two’s complement. This only happens
when x = -2^{B-1}, y = -1
.
Inputs:
- x: A scalar integer type
- y: A scalar integer type
Outputs:
- a: A scalar integer type
fn urem(self, x: Value, y: Value) -> Value
fn urem(self, x: Value, y: Value) -> Value
Unsigned integer remainder.
This operation traps if the divisor is zero.
Inputs:
- x: A scalar integer type
- y: A scalar integer type
Outputs:
- a: A scalar integer type
fn srem(self, x: Value, y: Value) -> Value
fn srem(self, x: Value, y: Value) -> Value
Signed integer remainder. The result has the sign of the dividend.
This operation traps if the divisor is zero.
Inputs:
- x: A scalar integer type
- y: A scalar integer type
Outputs:
- a: A scalar integer type
fn iadd_imm<T1>(self, x: Value, Y: T1) -> Value
fn iadd_imm<T1>(self, x: Value, Y: T1) -> Value
Add immediate integer.
Same as iadd
, but one operand is a sign extended 64 bit immediate constant.
Polymorphic over all scalar integer types, but does not support vector types.
Inputs:
- x: A scalar integer type
- Y: A 64-bit immediate integer.
Outputs:
- a: A scalar integer type
fn imul_imm<T1>(self, x: Value, Y: T1) -> Value
fn imul_imm<T1>(self, x: Value, Y: T1) -> Value
Integer multiplication by immediate constant.
Same as imul
, but one operand is a sign extended 64 bit immediate constant.
Polymorphic over all scalar integer types, but does not support vector types.
Inputs:
- x: A scalar integer type
- Y: A 64-bit immediate integer.
Outputs:
- a: A scalar integer type
fn udiv_imm<T1>(self, x: Value, Y: T1) -> Value
fn udiv_imm<T1>(self, x: Value, Y: T1) -> Value
Unsigned integer division by an immediate constant.
Same as udiv
, but one operand is a zero extended 64 bit immediate constant.
This operation traps if the divisor is zero.
Inputs:
- x: A scalar integer type
- Y: A 64-bit immediate integer.
Outputs:
- a: A scalar integer type
fn sdiv_imm<T1>(self, x: Value, Y: T1) -> Value
fn sdiv_imm<T1>(self, x: Value, Y: T1) -> Value
Signed integer division by an immediate constant.
Same as sdiv
, but one operand is a sign extended 64 bit immediate constant.
This operation traps if the divisor is zero, or if the result is not
representable in B
bits two’s complement. This only happens
when x = -2^{B-1}, Y = -1
.
Inputs:
- x: A scalar integer type
- Y: A 64-bit immediate integer.
Outputs:
- a: A scalar integer type
fn urem_imm<T1>(self, x: Value, Y: T1) -> Value
fn urem_imm<T1>(self, x: Value, Y: T1) -> Value
Unsigned integer remainder with immediate divisor.
Same as urem
, but one operand is a zero extended 64 bit immediate constant.
This operation traps if the divisor is zero.
Inputs:
- x: A scalar integer type
- Y: A 64-bit immediate integer.
Outputs:
- a: A scalar integer type
fn srem_imm<T1>(self, x: Value, Y: T1) -> Value
fn srem_imm<T1>(self, x: Value, Y: T1) -> Value
Signed integer remainder with immediate divisor.
Same as srem
, but one operand is a sign extended 64 bit immediate constant.
This operation traps if the divisor is zero.
Inputs:
- x: A scalar integer type
- Y: A 64-bit immediate integer.
Outputs:
- a: A scalar integer type
fn irsub_imm<T1>(self, x: Value, Y: T1) -> Value
fn irsub_imm<T1>(self, x: Value, Y: T1) -> Value
Immediate reverse wrapping subtraction: a := Y - x \pmod{2^B}
.
The immediate operand is a sign extended 64 bit constant.
Also works as integer negation when Y = 0
. Use iadd_imm
with a negative immediate operand for the reverse immediate
subtraction.
Polymorphic over all scalar integer types, but does not support vector types.
Inputs:
- x: A scalar integer type
- Y: A 64-bit immediate integer.
Outputs:
- a: A scalar integer type
fn sadd_overflow_cin(self, x: Value, y: Value, c_in: Value) -> (Value, Value)
fn sadd_overflow_cin(self, x: Value, y: Value, c_in: Value) -> (Value, Value)
Add signed integers with carry in and overflow out.
Same as sadd_overflow
with an additional carry input. The c_in
type
is interpreted as 1 if it’s nonzero or 0 if it’s zero.
Inputs:
- x: A scalar integer type
- y: A scalar integer type
- c_in: Input carry flag
Outputs:
- a: A scalar integer type
- c_out: Output carry flag
fn uadd_overflow_cin(self, x: Value, y: Value, c_in: Value) -> (Value, Value)
fn uadd_overflow_cin(self, x: Value, y: Value, c_in: Value) -> (Value, Value)
Add unsigned integers with carry in and overflow out.
Same as uadd_overflow
with an additional carry input. The c_in
type
is interpreted as 1 if it’s nonzero or 0 if it’s zero.
Inputs:
- x: A scalar integer type
- y: A scalar integer type
- c_in: Input carry flag
Outputs:
- a: A scalar integer type
- c_out: Output carry flag
fn uadd_overflow(self, x: Value, y: Value) -> (Value, Value)
fn uadd_overflow(self, x: Value, y: Value) -> (Value, Value)
Add integers unsigned with overflow out.
of
is set when the addition overflowed.
a &= x + y \pmod 2^B \\
of &= x+y >= 2^B
Polymorphic over all scalar integer types, but does not support vector types.
Inputs:
- x: A scalar integer type
- y: A scalar integer type
Outputs:
- a: A scalar integer type
- of: Overflow flag
fn sadd_overflow(self, x: Value, y: Value) -> (Value, Value)
fn sadd_overflow(self, x: Value, y: Value) -> (Value, Value)
Add integers signed with overflow out.
of
is set when the addition over- or underflowed.
Polymorphic over all scalar integer types, but does not support vector
types.
Inputs:
- x: A scalar integer type
- y: A scalar integer type
Outputs:
- a: A scalar integer type
- of: Overflow flag
fn usub_overflow(self, x: Value, y: Value) -> (Value, Value)
fn usub_overflow(self, x: Value, y: Value) -> (Value, Value)
Subtract integers unsigned with overflow out.
of
is set when the subtraction underflowed.
a &= x - y \pmod 2^B \\
of &= x - y < 0
Polymorphic over all scalar integer types, but does not support vector types.
Inputs:
- x: A scalar integer type
- y: A scalar integer type
Outputs:
- a: A scalar integer type
- of: Overflow flag
fn ssub_overflow(self, x: Value, y: Value) -> (Value, Value)
fn ssub_overflow(self, x: Value, y: Value) -> (Value, Value)
Subtract integers signed with overflow out.
of
is set when the subtraction over- or underflowed.
Polymorphic over all scalar integer types, but does not support vector
types.
Inputs:
- x: A scalar integer type
- y: A scalar integer type
Outputs:
- a: A scalar integer type
- of: Overflow flag
fn umul_overflow(self, x: Value, y: Value) -> (Value, Value)
fn umul_overflow(self, x: Value, y: Value) -> (Value, Value)
Multiply integers unsigned with overflow out.
of
is set when the multiplication overflowed.
a &= x * y \pmod 2^B \\
of &= x * y > 2^B
Polymorphic over all scalar integer types except i128, but does not support vector types.
Inputs:
- x: A scalar integer type up to 64 bits
- y: A scalar integer type up to 64 bits
Outputs:
- a: A scalar integer type up to 64 bits
- of: Overflow flag
fn smul_overflow(self, x: Value, y: Value) -> (Value, Value)
fn smul_overflow(self, x: Value, y: Value) -> (Value, Value)
Multiply integers signed with overflow out.
of
is set when the multiplication over- or underflowed.
Polymorphic over all scalar integer types except i128, but does not support vector
types.
Inputs:
- x: A scalar integer type up to 64 bits
- y: A scalar integer type up to 64 bits
Outputs:
- a: A scalar integer type up to 64 bits
- of: Overflow flag
fn uadd_overflow_trap<T1>(self, x: Value, y: Value, code: T1) -> Value
fn uadd_overflow_trap<T1>(self, x: Value, y: Value, code: T1) -> Value
Unsigned addition of x and y, trapping if the result overflows.
Accepts 32 or 64-bit integers, and does not support vector types.
Inputs:
- x: A 32 or 64-bit scalar integer type
- y: A 32 or 64-bit scalar integer type
- code: A trap reason code.
Outputs:
- a: A 32 or 64-bit scalar integer type
fn ssub_overflow_bin(self, x: Value, y: Value, b_in: Value) -> (Value, Value)
fn ssub_overflow_bin(self, x: Value, y: Value, b_in: Value) -> (Value, Value)
Subtract signed integers with borrow in and overflow out.
Same as ssub_overflow
with an additional borrow input. The b_in
type
is interpreted as 1 if it’s nonzero or 0 if it’s zero. The computation
performed here is x - (y + (b_in != 0))
.
Inputs:
- x: A scalar integer type
- y: A scalar integer type
- b_in: Input borrow flag
Outputs:
- a: A scalar integer type
- b_out: Output borrow flag
fn usub_overflow_bin(self, x: Value, y: Value, b_in: Value) -> (Value, Value)
fn usub_overflow_bin(self, x: Value, y: Value, b_in: Value) -> (Value, Value)
Subtract unsigned integers with borrow in and overflow out.
Same as usub_overflow
with an additional borrow input. The b_in
type
is interpreted as 1 if it’s nonzero or 0 if it’s zero. The computation
performed here is x - (y + (b_in != 0))
.
Inputs:
- x: A scalar integer type
- y: A scalar integer type
- b_in: Input borrow flag
Outputs:
- a: A scalar integer type
- b_out: Output borrow flag
fn band(self, x: Value, y: Value) -> Value
fn band(self, x: Value, y: Value) -> Value
Bitwise and.
Inputs:
- x: Any integer, float, or vector type
- y: Any integer, float, or vector type
Outputs:
- a: Any integer, float, or vector type
fn bor(self, x: Value, y: Value) -> Value
fn bor(self, x: Value, y: Value) -> Value
Bitwise or.
Inputs:
- x: Any integer, float, or vector type
- y: Any integer, float, or vector type
Outputs:
- a: Any integer, float, or vector type
fn bxor(self, x: Value, y: Value) -> Value
fn bxor(self, x: Value, y: Value) -> Value
Bitwise xor.
Inputs:
- x: Any integer, float, or vector type
- y: Any integer, float, or vector type
Outputs:
- a: Any integer, float, or vector type
fn bnot(self, x: Value) -> Value
fn bnot(self, x: Value) -> Value
Bitwise not.
Inputs:
- x: Any integer, float, or vector type
Outputs:
- a: Any integer, float, or vector type
fn band_not(self, x: Value, y: Value) -> Value
fn band_not(self, x: Value, y: Value) -> Value
Bitwise and not.
Computes x & ~y
.
Inputs:
- x: Any integer, float, or vector type
- y: Any integer, float, or vector type
Outputs:
- a: Any integer, float, or vector type
fn bor_not(self, x: Value, y: Value) -> Value
fn bor_not(self, x: Value, y: Value) -> Value
Bitwise or not.
Computes x | ~y
.
Inputs:
- x: Any integer, float, or vector type
- y: Any integer, float, or vector type
Outputs:
- a: Any integer, float, or vector type
fn bxor_not(self, x: Value, y: Value) -> Value
fn bxor_not(self, x: Value, y: Value) -> Value
Bitwise xor not.
Computes x ^ ~y
.
Inputs:
- x: Any integer, float, or vector type
- y: Any integer, float, or vector type
Outputs:
- a: Any integer, float, or vector type
fn band_imm<T1>(self, x: Value, Y: T1) -> Value
fn band_imm<T1>(self, x: Value, Y: T1) -> Value
Bitwise and with immediate.
Same as band
, but one operand is a zero extended 64 bit immediate constant.
Polymorphic over all scalar integer types, but does not support vector types.
Inputs:
- x: A scalar integer type
- Y: A 64-bit immediate integer.
Outputs:
- a: A scalar integer type
fn bor_imm<T1>(self, x: Value, Y: T1) -> Value
fn bor_imm<T1>(self, x: Value, Y: T1) -> Value
Bitwise or with immediate.
Same as bor
, but one operand is a zero extended 64 bit immediate constant.
Polymorphic over all scalar integer types, but does not support vector types.
Inputs:
- x: A scalar integer type
- Y: A 64-bit immediate integer.
Outputs:
- a: A scalar integer type
fn bxor_imm<T1>(self, x: Value, Y: T1) -> Value
fn bxor_imm<T1>(self, x: Value, Y: T1) -> Value
Bitwise xor with immediate.
Same as bxor
, but one operand is a zero extended 64 bit immediate constant.
Polymorphic over all scalar integer types, but does not support vector types.
Inputs:
- x: A scalar integer type
- Y: A 64-bit immediate integer.
Outputs:
- a: A scalar integer type
fn rotl(self, x: Value, y: Value) -> Value
fn rotl(self, x: Value, y: Value) -> Value
Rotate left.
Rotate the bits in x
by y
places.
Inputs:
- x: Scalar or vector value to shift
- y: Number of bits to shift
Outputs:
- a: A scalar or vector integer type
fn rotr(self, x: Value, y: Value) -> Value
fn rotr(self, x: Value, y: Value) -> Value
Rotate right.
Rotate the bits in x
by y
places.
Inputs:
- x: Scalar or vector value to shift
- y: Number of bits to shift
Outputs:
- a: A scalar or vector integer type
fn rotl_imm<T1>(self, x: Value, Y: T1) -> Value
fn rotl_imm<T1>(self, x: Value, Y: T1) -> Value
Rotate left by immediate.
Same as rotl
, but one operand is a zero extended 64 bit immediate constant.
Inputs:
- x: Scalar or vector value to shift
- Y: A 64-bit immediate integer.
Outputs:
- a: A scalar or vector integer type
fn rotr_imm<T1>(self, x: Value, Y: T1) -> Value
fn rotr_imm<T1>(self, x: Value, Y: T1) -> Value
Rotate right by immediate.
Same as rotr
, but one operand is a zero extended 64 bit immediate constant.
Inputs:
- x: Scalar or vector value to shift
- Y: A 64-bit immediate integer.
Outputs:
- a: A scalar or vector integer type
fn ishl(self, x: Value, y: Value) -> Value
fn ishl(self, x: Value, y: Value) -> Value
Integer shift left. Shift the bits in x
towards the MSB by y
places. Shift in zero bits to the LSB.
The shift amount is masked to the size of x
.
When shifting a B-bits integer type, this instruction computes:
s &:= y \pmod B,
a &:= x \cdot 2^s \pmod{2^B}.
Inputs:
- x: Scalar or vector value to shift
- y: Number of bits to shift
Outputs:
- a: A scalar or vector integer type
fn ushr(self, x: Value, y: Value) -> Value
fn ushr(self, x: Value, y: Value) -> Value
Unsigned shift right. Shift bits in x
towards the LSB by y
places, shifting in zero bits to the MSB. Also called a logical
shift.
The shift amount is masked to the size of x
.
When shifting a B-bits integer type, this instruction computes:
s &:= y \pmod B,
a &:= \lfloor x \cdot 2^{-s} \rfloor.
Inputs:
- x: Scalar or vector value to shift
- y: Number of bits to shift
Outputs:
- a: A scalar or vector integer type
fn sshr(self, x: Value, y: Value) -> Value
fn sshr(self, x: Value, y: Value) -> Value
Signed shift right. Shift bits in x
towards the LSB by y
places, shifting in sign bits to the MSB. Also called an arithmetic
shift.
The shift amount is masked to the size of x
.
Inputs:
- x: Scalar or vector value to shift
- y: Number of bits to shift
Outputs:
- a: A scalar or vector integer type
fn ishl_imm<T1>(self, x: Value, Y: T1) -> Value
fn ishl_imm<T1>(self, x: Value, Y: T1) -> Value
Integer shift left by immediate.
The shift amount is masked to the size of x
.
Inputs:
- x: Scalar or vector value to shift
- Y: A 64-bit immediate integer.
Outputs:
- a: A scalar or vector integer type
fn ushr_imm<T1>(self, x: Value, Y: T1) -> Value
fn ushr_imm<T1>(self, x: Value, Y: T1) -> Value
Unsigned shift right by immediate.
The shift amount is masked to the size of x
.
Inputs:
- x: Scalar or vector value to shift
- Y: A 64-bit immediate integer.
Outputs:
- a: A scalar or vector integer type
fn sshr_imm<T1>(self, x: Value, Y: T1) -> Value
fn sshr_imm<T1>(self, x: Value, Y: T1) -> Value
Signed shift right by immediate.
The shift amount is masked to the size of x
.
Inputs:
- x: Scalar or vector value to shift
- Y: A 64-bit immediate integer.
Outputs:
- a: A scalar or vector integer type
fn bitrev(self, x: Value) -> Value
fn bitrev(self, x: Value) -> Value
Reverse the bits of a integer.
Reverses the bits in x
.
Inputs:
- x: A scalar integer type
Outputs:
- a: A scalar integer type
fn clz(self, x: Value) -> Value
fn clz(self, x: Value) -> Value
Count leading zero bits.
Starting from the MSB in x
, count the number of zero bits before
reaching the first one bit. When x
is zero, returns the size of x
in bits.
Inputs:
- x: A scalar integer type
Outputs:
- a: A scalar integer type
fn cls(self, x: Value) -> Value
fn cls(self, x: Value) -> Value
Count leading sign bits.
Starting from the MSB after the sign bit in x
, count the number of
consecutive bits identical to the sign bit. When x
is 0 or -1,
returns one less than the size of x in bits.
Inputs:
- x: A scalar integer type
Outputs:
- a: A scalar integer type
fn ctz(self, x: Value) -> Value
fn ctz(self, x: Value) -> Value
Count trailing zeros.
Starting from the LSB in x
, count the number of zero bits before
reaching the first one bit. When x
is zero, returns the size of x
in bits.
Inputs:
- x: A scalar integer type
Outputs:
- a: A scalar integer type
fn bswap(self, x: Value) -> Value
fn bswap(self, x: Value) -> Value
Reverse the byte order of an integer.
Reverses the bytes in x
.
Inputs:
- x: A multi byte scalar integer type
Outputs:
- a: A multi byte scalar integer type
fn popcnt(self, x: Value) -> Value
fn popcnt(self, x: Value) -> Value
Population count
Count the number of one bits in x
.
Inputs:
- x: A scalar or vector integer type
Outputs:
- a: A scalar or vector integer type
fn fcmp<T1>(self, Cond: T1, x: Value, y: Value) -> Value
fn fcmp<T1>(self, Cond: T1, x: Value, y: Value) -> Value
Floating point comparison.
Two IEEE 754-2008 floating point numbers, x
and y
, relate to each
other in exactly one of four ways:
== ==========================================
UN Unordered when one or both numbers is NaN.
EQ When `x = y`. (And `0.0 = -0.0`).
LT When `x < y`.
GT When `x > y`.
== ==========================================
The 14 floatcc
condition codes each correspond to a subset of
the four relations, except for the empty set which would always be
false, and the full set which would always be true.
The condition codes are divided into 7 ‘ordered’ conditions which don’t include UN, and 7 unordered conditions which all include UN.
+-------+------------+---------+------------+-------------------------+
|Ordered |Unordered |Condition |
+=======+============+=========+============+=========================+
|ord |EQ | LT | GT|uno |UN |NaNs absent / present. |
+-------+------------+---------+------------+-------------------------+
|eq |EQ |ueq |UN | EQ |Equal |
+-------+------------+---------+------------+-------------------------+
|one |LT | GT |ne |UN | LT | GT|Not equal |
+-------+------------+---------+------------+-------------------------+
|lt |LT |ult |UN | LT |Less than |
+-------+------------+---------+------------+-------------------------+
|le |LT | EQ |ule |UN | LT | EQ|Less than or equal |
+-------+------------+---------+------------+-------------------------+
|gt |GT |ugt |UN | GT |Greater than |
+-------+------------+---------+------------+-------------------------+
|ge |GT | EQ |uge |UN | GT | EQ|Greater than or equal |
+-------+------------+---------+------------+-------------------------+
The standard C comparison operators, <, <=, >, >=
, are all ordered,
so they are false if either operand is NaN. The C equality operator,
==
, is ordered, and since inequality is defined as the logical
inverse it is unordered. They map to the floatcc
condition
codes as follows:
==== ====== ============
C `Cond` Subset
==== ====== ============
`==` eq EQ
`!=` ne UN | LT | GT
`<` lt LT
`<=` le LT | EQ
`>` gt GT
`>=` ge GT | EQ
==== ====== ============
This subset of condition codes also corresponds to the WebAssembly floating point comparisons of the same name.
When this instruction compares floating point vectors, it returns a vector with the results of lane-wise comparisons.
When comparing scalars, the result is:
- 1
if the condition holds.
- 0
if the condition does not hold.
When comparing vectors, the result is:
- -1
(i.e. all ones) in each lane where the condition holds.
- 0
in each lane where the condition does not hold.
Inputs:
- Cond: A floating point comparison condition code
- x: A scalar or vector floating point number
- y: A scalar or vector floating point number
Outputs:
- a:
fn fadd(self, x: Value, y: Value) -> Value
fn fadd(self, x: Value, y: Value) -> Value
Floating point addition.
Inputs:
- x: A scalar or vector floating point number
- y: A scalar or vector floating point number
Outputs:
- a: Result of applying operator to each lane
fn fsub(self, x: Value, y: Value) -> Value
fn fsub(self, x: Value, y: Value) -> Value
Floating point subtraction.
Inputs:
- x: A scalar or vector floating point number
- y: A scalar or vector floating point number
Outputs:
- a: Result of applying operator to each lane
fn fmul(self, x: Value, y: Value) -> Value
fn fmul(self, x: Value, y: Value) -> Value
Floating point multiplication.
Inputs:
- x: A scalar or vector floating point number
- y: A scalar or vector floating point number
Outputs:
- a: Result of applying operator to each lane
fn fdiv(self, x: Value, y: Value) -> Value
fn fdiv(self, x: Value, y: Value) -> Value
Floating point division.
Unlike the integer division instructions and
udiv`, this can’t trap. Division by zero is infinity or
NaN, depending on the dividend.
Inputs:
- x: A scalar or vector floating point number
- y: A scalar or vector floating point number
Outputs:
- a: Result of applying operator to each lane
fn sqrt(self, x: Value) -> Value
fn sqrt(self, x: Value) -> Value
Floating point square root.
Inputs:
- x: A scalar or vector floating point number
Outputs:
- a: Result of applying operator to each lane
fn fma(self, x: Value, y: Value, z: Value) -> Value
fn fma(self, x: Value, y: Value, z: Value) -> Value
Floating point fused multiply-and-add.
Computes a := xy+z
without any intermediate rounding of the
product.
Inputs:
- x: A scalar or vector floating point number
- y: A scalar or vector floating point number
- z: A scalar or vector floating point number
Outputs:
- a: Result of applying operator to each lane
fn fneg(self, x: Value) -> Value
fn fneg(self, x: Value) -> Value
Floating point negation.
Note that this is a pure bitwise operation.
Inputs:
- x: A scalar or vector floating point number
Outputs:
- a:
x
with its sign bit inverted
fn fabs(self, x: Value) -> Value
fn fabs(self, x: Value) -> Value
Floating point absolute value.
Note that this is a pure bitwise operation.
Inputs:
- x: A scalar or vector floating point number
Outputs:
- a:
x
with its sign bit cleared
fn fcopysign(self, x: Value, y: Value) -> Value
fn fcopysign(self, x: Value, y: Value) -> Value
Floating point copy sign.
Note that this is a pure bitwise operation. The sign bit from y
is
copied to the sign bit of x
.
Inputs:
- x: A scalar or vector floating point number
- y: A scalar or vector floating point number
Outputs:
- a:
x
with its sign bit changed to that ofy
fn fmin(self, x: Value, y: Value) -> Value
fn fmin(self, x: Value, y: Value) -> Value
Floating point minimum, propagating NaNs using the WebAssembly rules.
If either operand is NaN, this returns NaN with an unspecified sign. Furthermore, if each input NaN consists of a mantissa whose most significant bit is 1 and the rest is 0, then the output has the same form. Otherwise, the output mantissa’s most significant bit is 1 and the rest is unspecified.
Inputs:
- x: A scalar or vector floating point number
- y: A scalar or vector floating point number
Outputs:
- a: The smaller of
x
andy
fn fmax(self, x: Value, y: Value) -> Value
fn fmax(self, x: Value, y: Value) -> Value
Floating point maximum, propagating NaNs using the WebAssembly rules.
If either operand is NaN, this returns NaN with an unspecified sign. Furthermore, if each input NaN consists of a mantissa whose most significant bit is 1 and the rest is 0, then the output has the same form. Otherwise, the output mantissa’s most significant bit is 1 and the rest is unspecified.
Inputs:
- x: A scalar or vector floating point number
- y: A scalar or vector floating point number
Outputs:
- a: The larger of
x
andy
fn ceil(self, x: Value) -> Value
fn ceil(self, x: Value) -> Value
Round floating point round to integral, towards positive infinity.
Inputs:
- x: A scalar or vector floating point number
Outputs:
- a:
x
rounded to integral value
fn floor(self, x: Value) -> Value
fn floor(self, x: Value) -> Value
Round floating point round to integral, towards negative infinity.
Inputs:
- x: A scalar or vector floating point number
Outputs:
- a:
x
rounded to integral value
fn trunc(self, x: Value) -> Value
fn trunc(self, x: Value) -> Value
Round floating point round to integral, towards zero.
Inputs:
- x: A scalar or vector floating point number
Outputs:
- a:
x
rounded to integral value
fn nearest(self, x: Value) -> Value
fn nearest(self, x: Value) -> Value
Round floating point round to integral, towards nearest with ties to even.
Inputs:
- x: A scalar or vector floating point number
Outputs:
- a:
x
rounded to integral value
fn bitcast<T1>(self, MemTo: Type, MemFlags: T1, x: Value) -> Value
fn bitcast<T1>(self, MemTo: Type, MemFlags: T1, x: Value) -> Value
Reinterpret the bits in x
as a different type.
The input and output types must be storable to memory and of the same size. A bitcast is equivalent to storing one type and loading the other type from the same address, both using the specified MemFlags.
Note that this operation only supports the big
or little
MemFlags.
The specified byte order only affects the result in the case where
input and output types differ in lane count/size. In this case, the
operation is only valid if a byte order specifier is provided.
Inputs:
- MemTo (controlling type variable):
- MemFlags: Memory operation flags
- x: Any type that can be stored in memory
Outputs:
- a: Bits of
x
reinterpreted
fn scalar_to_vector(self, TxN: Type, s: Value) -> Value
fn scalar_to_vector(self, TxN: Type, s: Value) -> Value
Copies a scalar value to a vector value. The scalar is copied into the least significant lane of the vector, and all other lanes will be zero.
Inputs:
- TxN (controlling type variable): A SIMD vector type
- s: A scalar value
Outputs:
- a: A vector value
fn bmask(self, IntTo: Type, x: Value) -> Value
fn bmask(self, IntTo: Type, x: Value) -> Value
Convert x
to an integer mask.
Non-zero maps to all 1s and zero maps to all 0s.
Inputs:
- IntTo (controlling type variable): An integer type
- x: A scalar whose values are truthy
Outputs:
- a: An integer type
fn ireduce(self, Int: Type, x: Value) -> Value
fn ireduce(self, Int: Type, x: Value) -> Value
Convert x
to a smaller integer type by discarding
the most significant bits.
This is the same as reducing modulo 2^n
.
Inputs:
- Int (controlling type variable): A scalar integer type
- x: A scalar integer type, wider than the controlling type
Outputs:
- a: A scalar integer type
fn snarrow(self, x: Value, y: Value) -> Value
fn snarrow(self, x: Value, y: Value) -> Value
Combine x
and y
into a vector with twice the lanes but half the integer width while
saturating overflowing values to the signed maximum and minimum.
The lanes will be concatenated after narrowing. For example, when x
and y
are i32x4
and x = [x3, x2, x1, x0]
and y = [y3, y2, y1, y0]
, then after narrowing the value
returned is an i16x8
: a = [y3', y2', y1', y0', x3', x2', x1', x0']
.
Inputs:
- x: A SIMD vector type containing integer lanes 16, 32, or 64 bits wide
- y: A SIMD vector type containing integer lanes 16, 32, or 64 bits wide
Outputs:
- a:
fn unarrow(self, x: Value, y: Value) -> Value
fn unarrow(self, x: Value, y: Value) -> Value
Combine x
and y
into a vector with twice the lanes but half the integer width while
saturating overflowing values to the unsigned maximum and minimum.
Note that all input lanes are considered signed: any negative lanes will overflow and be
replaced with the unsigned minimum, 0x00
.
The lanes will be concatenated after narrowing. For example, when x
and y
are i32x4
and x = [x3, x2, x1, x0]
and y = [y3, y2, y1, y0]
, then after narrowing the value
returned is an i16x8
: a = [y3', y2', y1', y0', x3', x2', x1', x0']
.
Inputs:
- x: A SIMD vector type containing integer lanes 16, 32, or 64 bits wide
- y: A SIMD vector type containing integer lanes 16, 32, or 64 bits wide
Outputs:
- a:
fn uunarrow(self, x: Value, y: Value) -> Value
fn uunarrow(self, x: Value, y: Value) -> Value
Combine x
and y
into a vector with twice the lanes but half the integer width while
saturating overflowing values to the unsigned maximum and minimum.
Note that all input lanes are considered unsigned: any negative values will be interpreted as unsigned, overflowing and being replaced with the unsigned maximum.
The lanes will be concatenated after narrowing. For example, when x
and y
are i32x4
and x = [x3, x2, x1, x0]
and y = [y3, y2, y1, y0]
, then after narrowing the value
returned is an i16x8
: a = [y3', y2', y1', y0', x3', x2', x1', x0']
.
Inputs:
- x: A SIMD vector type containing integer lanes 16, 32, or 64 bits wide
- y: A SIMD vector type containing integer lanes 16, 32, or 64 bits wide
Outputs:
- a:
fn swiden_low(self, x: Value) -> Value
fn swiden_low(self, x: Value) -> Value
Widen the low lanes of x
using signed extension.
This will double the lane width and halve the number of lanes.
Inputs:
- x: A SIMD vector type containing integer lanes 8, 16, or 32 bits wide.
Outputs:
- a:
fn swiden_high(self, x: Value) -> Value
fn swiden_high(self, x: Value) -> Value
Widen the high lanes of x
using signed extension.
This will double the lane width and halve the number of lanes.
Inputs:
- x: A SIMD vector type containing integer lanes 8, 16, or 32 bits wide.
Outputs:
- a:
fn uwiden_low(self, x: Value) -> Value
fn uwiden_low(self, x: Value) -> Value
Widen the low lanes of x
using unsigned extension.
This will double the lane width and halve the number of lanes.
Inputs:
- x: A SIMD vector type containing integer lanes 8, 16, or 32 bits wide.
Outputs:
- a:
fn uwiden_high(self, x: Value) -> Value
fn uwiden_high(self, x: Value) -> Value
Widen the high lanes of x
using unsigned extension.
This will double the lane width and halve the number of lanes.
Inputs:
- x: A SIMD vector type containing integer lanes 8, 16, or 32 bits wide.
Outputs:
- a:
fn iadd_pairwise(self, x: Value, y: Value) -> Value
fn iadd_pairwise(self, x: Value, y: Value) -> Value
Does lane-wise integer pairwise addition on two operands, putting the combined results into a single vector result. Here a pair refers to adjacent lanes in a vector, i.e. i2 + (i2+1) for i == num_lanes/2. The first operand pairwise add results will make up the low half of the resulting vector while the second operand pairwise add results will make up the upper half of the resulting vector.
Inputs:
- x: A SIMD vector type containing integer lanes 8, 16, or 32 bits wide.
- y: A SIMD vector type containing integer lanes 8, 16, or 32 bits wide.
Outputs:
- a: A SIMD vector type containing integer lanes 8, 16, or 32 bits wide.
fn x86_pmaddubsw(self, x: Value, y: Value) -> Value
fn x86_pmaddubsw(self, x: Value, y: Value) -> Value
An instruction with equivalent semantics to pmaddubsw
on x86.
This instruction will take signed bytes from the first argument and multiply them against unsigned bytes in the second argument. Adjacent pairs are then added, with saturating, to a 16-bit value and are packed into the result.
Inputs:
- x: A SIMD vector type consisting of 16 lanes of 8-bit integers
- y: A SIMD vector type consisting of 16 lanes of 8-bit integers
Outputs:
- a: A SIMD vector with exactly 8 lanes of 16-bit values
fn uextend(self, Int: Type, x: Value) -> Value
fn uextend(self, Int: Type, x: Value) -> Value
Convert x
to a larger integer type by zero-extending.
Each lane in x
is converted to a larger integer type by adding
zeroes. The result has the same numerical value as x
when both are
interpreted as unsigned integers.
The result type must have the same number of vector lanes as the input, and each lane must not have fewer bits that the input lanes. If the input and output types are the same, this is a no-op.
Inputs:
- Int (controlling type variable): A scalar integer type
- x: A scalar integer type, narrower than the controlling type
Outputs:
- a: A scalar integer type
fn sextend(self, Int: Type, x: Value) -> Value
fn sextend(self, Int: Type, x: Value) -> Value
Convert x
to a larger integer type by sign-extending.
Each lane in x
is converted to a larger integer type by replicating
the sign bit. The result has the same numerical value as x
when both
are interpreted as signed integers.
The result type must have the same number of vector lanes as the input, and each lane must not have fewer bits that the input lanes. If the input and output types are the same, this is a no-op.
Inputs:
- Int (controlling type variable): A scalar integer type
- x: A scalar integer type, narrower than the controlling type
Outputs:
- a: A scalar integer type
fn fpromote(self, FloatScalar: Type, x: Value) -> Value
fn fpromote(self, FloatScalar: Type, x: Value) -> Value
Convert x
to a larger floating point format.
Each lane in x
is converted to the destination floating point format.
This is an exact operation.
Cranelift currently only supports two floating point formats
f32
andf64
. This may change in the future.
The result type must have the same number of vector lanes as the input, and the result lanes must not have fewer bits than the input lanes.
Inputs:
- FloatScalar (controlling type variable): A scalar only floating point number
- x: A scalar only floating point number, narrower than the controlling type
Outputs:
- a: A scalar only floating point number
fn fdemote(self, FloatScalar: Type, x: Value) -> Value
fn fdemote(self, FloatScalar: Type, x: Value) -> Value
Convert x
to a smaller floating point format.
Each lane in x
is converted to the destination floating point format
by rounding to nearest, ties to even.
Cranelift currently only supports two floating point formats
f32
andf64
. This may change in the future.
The result type must have the same number of vector lanes as the input, and the result lanes must not have more bits than the input lanes.
Inputs:
- FloatScalar (controlling type variable): A scalar only floating point number
- x: A scalar only floating point number, wider than the controlling type
Outputs:
- a: A scalar only floating point number
fn fvdemote(self, x: Value) -> Value
fn fvdemote(self, x: Value) -> Value
Convert x
to a smaller floating point format.
Each lane in x
is converted to the destination floating point format
by rounding to nearest, ties to even.
Cranelift currently only supports two floating point formats
f32
andf64
. This may change in the future.
Fvdemote differs from fdemote in that with fvdemote it targets vectors. Fvdemote is constrained to having the input type being F64x2 and the result type being F32x4. The result lane that was the upper half of the input lane is initialized to zero.
Inputs:
- x: A SIMD vector type consisting of 2 lanes of 64-bit floats
Outputs:
- a: A SIMD vector type consisting of 4 lanes of 32-bit floats
fn fvpromote_low(self, a: Value) -> Value
fn fvpromote_low(self, a: Value) -> Value
Converts packed single precision floating point to packed double precision floating point.
Considering only the lower half of the register, the low lanes in x
are interpreted as
single precision floats that are then converted to a double precision floats.
The result type will have half the number of vector lanes as the input. Fvpromote_low is constrained to input F32x4 with a result type of F64x2.
Inputs:
- a: A SIMD vector type consisting of 4 lanes of 32-bit floats
Outputs:
- x: A SIMD vector type consisting of 2 lanes of 64-bit floats
fn fcvt_to_uint(self, IntTo: Type, x: Value) -> Value
fn fcvt_to_uint(self, IntTo: Type, x: Value) -> Value
Converts floating point scalars to unsigned integer.
Only operates on x
if it is a scalar. If x
is NaN or if
the unsigned integral value cannot be represented in the result
type, this instruction traps.
Inputs:
- IntTo (controlling type variable): An scalar only integer type
- x: A scalar only floating point number
Outputs:
- a: An scalar only integer type
fn fcvt_to_sint(self, IntTo: Type, x: Value) -> Value
fn fcvt_to_sint(self, IntTo: Type, x: Value) -> Value
Converts floating point scalars to signed integer.
Only operates on x
if it is a scalar. If x
is NaN or if
the unsigned integral value cannot be represented in the result
type, this instruction traps.
Inputs:
- IntTo (controlling type variable): An scalar only integer type
- x: A scalar only floating point number
Outputs:
- a: An scalar only integer type
fn fcvt_to_uint_sat(self, IntTo: Type, x: Value) -> Value
fn fcvt_to_uint_sat(self, IntTo: Type, x: Value) -> Value
Convert floating point to unsigned integer as fcvt_to_uint does, but saturates the input instead of trapping. NaN and negative values are converted to 0.
Inputs:
- IntTo (controlling type variable): A larger integer type with the same number of lanes
- x: A scalar or vector floating point number
Outputs:
- a: A larger integer type with the same number of lanes
fn fcvt_to_sint_sat(self, IntTo: Type, x: Value) -> Value
fn fcvt_to_sint_sat(self, IntTo: Type, x: Value) -> Value
Convert floating point to signed integer as fcvt_to_sint does, but saturates the input instead of trapping. NaN values are converted to 0.
Inputs:
- IntTo (controlling type variable): A larger integer type with the same number of lanes
- x: A scalar or vector floating point number
Outputs:
- a: A larger integer type with the same number of lanes
fn x86_cvtt2dq(self, IntTo: Type, x: Value) -> Value
fn x86_cvtt2dq(self, IntTo: Type, x: Value) -> Value
A float-to-integer conversion instruction for vectors-of-floats which
has the same semantics as cvttp{s,d}2dq
on x86. This specifically
returns INT_MIN
for NaN or out-of-bounds lanes.
Inputs:
- IntTo (controlling type variable): A larger integer type with the same number of lanes
- x: A scalar or vector floating point number
Outputs:
- a: A larger integer type with the same number of lanes
fn fcvt_from_uint(self, FloatTo: Type, x: Value) -> Value
fn fcvt_from_uint(self, FloatTo: Type, x: Value) -> Value
Convert unsigned integer to floating point.
Each lane in x
is interpreted as an unsigned integer and converted to
floating point using round to nearest, ties to even.
The result type must have the same number of vector lanes as the input.
Inputs:
- FloatTo (controlling type variable): A scalar or vector floating point number
- x: A scalar or vector integer type
Outputs:
- a: A scalar or vector floating point number
fn fcvt_from_sint(self, FloatTo: Type, x: Value) -> Value
fn fcvt_from_sint(self, FloatTo: Type, x: Value) -> Value
Convert signed integer to floating point.
Each lane in x
is interpreted as a signed integer and converted to
floating point using round to nearest, ties to even.
The result type must have the same number of vector lanes as the input.
Inputs:
- FloatTo (controlling type variable): A scalar or vector floating point number
- x: A scalar or vector integer type
Outputs:
- a: A scalar or vector floating point number
fn isplit(self, x: Value) -> (Value, Value)
fn isplit(self, x: Value) -> (Value, Value)
Split an integer into low and high parts.
Vectors of integers are split lane-wise, so the results have the same number of lanes as the input, but the lanes are half the size.
Returns the low half of x
and the high half of x
as two independent
values.
Inputs:
- x: An integer type of width
i16
upwards
Outputs:
- lo: The low bits of
x
- hi: The high bits of
x
fn iconcat(self, lo: Value, hi: Value) -> Value
fn iconcat(self, lo: Value, hi: Value) -> Value
Concatenate low and high bits to form a larger integer type.
Vectors of integers are concatenated lane-wise such that the result has the same number of lanes as the inputs, but the lanes are twice the size.
Inputs:
- lo: An integer type of width up to
i64
- hi: An integer type of width up to
i64
Outputs:
- a: The concatenation of
lo
andhi
fn atomic_rmw<T1, T2>(
self,
AtomicMem: Type,
MemFlags: T1,
AtomicRmwOp: T2,
p: Value,
x: Value,
) -> Value
fn atomic_rmw<T1, T2>( self, AtomicMem: Type, MemFlags: T1, AtomicRmwOp: T2, p: Value, x: Value, ) -> Value
Atomically read-modify-write memory at p
, with second operand x
. The old value is
returned. p
has the type of the target word size, and x
may be any integer type; note
that some targets require specific target features to be enabled in order to support 128-bit
integer atomics. The type of the returned value is the same as the type of x
. This
operation is sequentially consistent and creates happens-before edges that order normal
(non-atomic) loads and stores.
Inputs:
- AtomicMem (controlling type variable): Any type that can be stored in memory, which can be used in an atomic operation
- MemFlags: Memory operation flags
- AtomicRmwOp: Atomic Read-Modify-Write Ops
- p: An integer address type
- x: Value to be atomically stored
Outputs:
- a: Value atomically loaded
fn atomic_cas<T1>(self, MemFlags: T1, p: Value, e: Value, x: Value) -> Value
fn atomic_cas<T1>(self, MemFlags: T1, p: Value, e: Value, x: Value) -> Value
Perform an atomic compare-and-swap operation on memory at p
, with expected value e
,
storing x
if the value at p
equals e
. The old value at p
is returned,
regardless of whether the operation succeeds or fails. p
has the type of the target
word size, and x
and e
must have the same type and the same size, which may be any
integer type; note that some targets require specific target features to be enabled in order
to support 128-bit integer atomics. The type of the returned value is the same as the type
of x
and e
. This operation is sequentially consistent and creates happens-before edges
that order normal (non-atomic) loads and stores.
Inputs:
- MemFlags: Memory operation flags
- p: An integer address type
- e: Expected value in CAS
- x: Value to be atomically stored
Outputs:
- a: Value atomically loaded
fn atomic_load<T1>(self, AtomicMem: Type, MemFlags: T1, p: Value) -> Value
fn atomic_load<T1>(self, AtomicMem: Type, MemFlags: T1, p: Value) -> Value
Atomically load from memory at p
.
This is a polymorphic instruction that can load any value type which has a memory representation. It can only be used for integer types; note that some targets require specific target features to be enabled in order to support 128-bit integer atomics. This operation is sequentially consistent and creates happens-before edges that order normal (non-atomic) loads and stores.
Inputs:
- AtomicMem (controlling type variable): Any type that can be stored in memory, which can be used in an atomic operation
- MemFlags: Memory operation flags
- p: An integer address type
Outputs:
- a: Value atomically loaded
fn atomic_store<T1>(self, MemFlags: T1, x: Value, p: Value) -> Inst
fn atomic_store<T1>(self, MemFlags: T1, x: Value, p: Value) -> Inst
Atomically store x
to memory at p
.
This is a polymorphic instruction that can store any value type with a memory representation. It can only be used for integer types; note that some targets require specific target features to be enabled in order to support 128-bit integer atomics This operation is sequentially consistent and creates happens-before edges that order normal (non-atomic) loads and stores.
Inputs:
- MemFlags: Memory operation flags
- x: Value to be atomically stored
- p: An integer address type
fn fence(self) -> Inst
fn fence(self) -> Inst
A memory fence. This must provide ordering to ensure that, at a minimum, neither loads nor stores of any kind may move forwards or backwards across the fence. This operation is sequentially consistent.
fn extract_vector<T1>(self, x: Value, y: T1) -> Value
fn extract_vector<T1>(self, x: Value, y: T1) -> Value
Return a fixed length sub vector, extracted from a dynamic vector.
Inputs:
- x: The dynamic vector to extract from
- y: 128-bit vector index
Outputs:
- a: New fixed vector
fn AtomicCas(
self,
opcode: Opcode,
ctrl_typevar: Type,
flags: MemFlags,
arg0: Value,
arg1: Value,
arg2: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn AtomicCas( self, opcode: Opcode, ctrl_typevar: Type, flags: MemFlags, arg0: Value, arg1: Value, arg2: Value, ) -> (Inst, &'f mut DataFlowGraph)
AtomicCas(imms=(flags: ir::MemFlags), vals=3, blocks=0)
fn AtomicRmw(
self,
opcode: Opcode,
ctrl_typevar: Type,
flags: MemFlags,
op: AtomicRmwOp,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn AtomicRmw( self, opcode: Opcode, ctrl_typevar: Type, flags: MemFlags, op: AtomicRmwOp, arg0: Value, arg1: Value, ) -> (Inst, &'f mut DataFlowGraph)
AtomicRmw(imms=(flags: ir::MemFlags, op: ir::AtomicRmwOp), vals=2, blocks=0)
fn Binary(
self,
opcode: Opcode,
ctrl_typevar: Type,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn Binary( self, opcode: Opcode, ctrl_typevar: Type, arg0: Value, arg1: Value, ) -> (Inst, &'f mut DataFlowGraph)
Binary(imms=(), vals=2, blocks=0)
fn BinaryImm64(
self,
opcode: Opcode,
ctrl_typevar: Type,
imm: Imm64,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn BinaryImm64( self, opcode: Opcode, ctrl_typevar: Type, imm: Imm64, arg0: Value, ) -> (Inst, &'f mut DataFlowGraph)
BinaryImm64(imms=(imm: ir::immediates::Imm64), vals=1, blocks=0)
fn BinaryImm8(
self,
opcode: Opcode,
ctrl_typevar: Type,
imm: u8,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn BinaryImm8( self, opcode: Opcode, ctrl_typevar: Type, imm: u8, arg0: Value, ) -> (Inst, &'f mut DataFlowGraph)
BinaryImm8(imms=(imm: ir::immediates::Uimm8), vals=1, blocks=0)
fn BranchTable(
self,
opcode: Opcode,
ctrl_typevar: Type,
table: JumpTable,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn BranchTable( self, opcode: Opcode, ctrl_typevar: Type, table: JumpTable, arg0: Value, ) -> (Inst, &'f mut DataFlowGraph)
BranchTable(imms=(table: ir::JumpTable), vals=1, blocks=0)
fn Brif(
self,
opcode: Opcode,
ctrl_typevar: Type,
block0: BlockCall,
block1: BlockCall,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn Brif( self, opcode: Opcode, ctrl_typevar: Type, block0: BlockCall, block1: BlockCall, arg0: Value, ) -> (Inst, &'f mut DataFlowGraph)
Brif(imms=(), vals=1, blocks=2)
fn Call(
self,
opcode: Opcode,
ctrl_typevar: Type,
func_ref: FuncRef,
args: EntityList<Value>,
) -> (Inst, &'f mut DataFlowGraph)
fn Call( self, opcode: Opcode, ctrl_typevar: Type, func_ref: FuncRef, args: EntityList<Value>, ) -> (Inst, &'f mut DataFlowGraph)
Call(imms=(func_ref: ir::FuncRef), vals=0, blocks=0)
fn CallIndirect(
self,
opcode: Opcode,
ctrl_typevar: Type,
sig_ref: SigRef,
args: EntityList<Value>,
) -> (Inst, &'f mut DataFlowGraph)
fn CallIndirect( self, opcode: Opcode, ctrl_typevar: Type, sig_ref: SigRef, args: EntityList<Value>, ) -> (Inst, &'f mut DataFlowGraph)
CallIndirect(imms=(sig_ref: ir::SigRef), vals=1, blocks=0)
fn CondTrap(
self,
opcode: Opcode,
ctrl_typevar: Type,
code: TrapCode,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn CondTrap( self, opcode: Opcode, ctrl_typevar: Type, code: TrapCode, arg0: Value, ) -> (Inst, &'f mut DataFlowGraph)
CondTrap(imms=(code: ir::TrapCode), vals=1, blocks=0)
fn DynamicStackLoad(
self,
opcode: Opcode,
ctrl_typevar: Type,
dynamic_stack_slot: DynamicStackSlot,
) -> (Inst, &'f mut DataFlowGraph)
fn DynamicStackLoad( self, opcode: Opcode, ctrl_typevar: Type, dynamic_stack_slot: DynamicStackSlot, ) -> (Inst, &'f mut DataFlowGraph)
DynamicStackLoad(imms=(dynamic_stack_slot: ir::DynamicStackSlot), vals=0, blocks=0)
fn DynamicStackStore(
self,
opcode: Opcode,
ctrl_typevar: Type,
dynamic_stack_slot: DynamicStackSlot,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn DynamicStackStore( self, opcode: Opcode, ctrl_typevar: Type, dynamic_stack_slot: DynamicStackSlot, arg0: Value, ) -> (Inst, &'f mut DataFlowGraph)
DynamicStackStore(imms=(dynamic_stack_slot: ir::DynamicStackSlot), vals=1, blocks=0)
fn FloatCompare(
self,
opcode: Opcode,
ctrl_typevar: Type,
cond: FloatCC,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn FloatCompare( self, opcode: Opcode, ctrl_typevar: Type, cond: FloatCC, arg0: Value, arg1: Value, ) -> (Inst, &'f mut DataFlowGraph)
FloatCompare(imms=(cond: ir::condcodes::FloatCC), vals=2, blocks=0)
fn FuncAddr(
self,
opcode: Opcode,
ctrl_typevar: Type,
func_ref: FuncRef,
) -> (Inst, &'f mut DataFlowGraph)
fn FuncAddr( self, opcode: Opcode, ctrl_typevar: Type, func_ref: FuncRef, ) -> (Inst, &'f mut DataFlowGraph)
FuncAddr(imms=(func_ref: ir::FuncRef), vals=0, blocks=0)
fn IntAddTrap(
self,
opcode: Opcode,
ctrl_typevar: Type,
code: TrapCode,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn IntAddTrap( self, opcode: Opcode, ctrl_typevar: Type, code: TrapCode, arg0: Value, arg1: Value, ) -> (Inst, &'f mut DataFlowGraph)
IntAddTrap(imms=(code: ir::TrapCode), vals=2, blocks=0)
fn IntCompare(
self,
opcode: Opcode,
ctrl_typevar: Type,
cond: IntCC,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn IntCompare( self, opcode: Opcode, ctrl_typevar: Type, cond: IntCC, arg0: Value, arg1: Value, ) -> (Inst, &'f mut DataFlowGraph)
IntCompare(imms=(cond: ir::condcodes::IntCC), vals=2, blocks=0)
fn IntCompareImm(
self,
opcode: Opcode,
ctrl_typevar: Type,
cond: IntCC,
imm: Imm64,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn IntCompareImm( self, opcode: Opcode, ctrl_typevar: Type, cond: IntCC, imm: Imm64, arg0: Value, ) -> (Inst, &'f mut DataFlowGraph)
IntCompareImm(imms=(cond: ir::condcodes::IntCC, imm: ir::immediates::Imm64), vals=1, blocks=0)
fn Jump(
self,
opcode: Opcode,
ctrl_typevar: Type,
block0: BlockCall,
) -> (Inst, &'f mut DataFlowGraph)
fn Jump( self, opcode: Opcode, ctrl_typevar: Type, block0: BlockCall, ) -> (Inst, &'f mut DataFlowGraph)
Jump(imms=(), vals=0, blocks=1)
fn Load(
self,
opcode: Opcode,
ctrl_typevar: Type,
flags: MemFlags,
offset: Offset32,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn Load( self, opcode: Opcode, ctrl_typevar: Type, flags: MemFlags, offset: Offset32, arg0: Value, ) -> (Inst, &'f mut DataFlowGraph)
Load(imms=(flags: ir::MemFlags, offset: ir::immediates::Offset32), vals=1, blocks=0)
fn LoadNoOffset(
self,
opcode: Opcode,
ctrl_typevar: Type,
flags: MemFlags,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn LoadNoOffset( self, opcode: Opcode, ctrl_typevar: Type, flags: MemFlags, arg0: Value, ) -> (Inst, &'f mut DataFlowGraph)
LoadNoOffset(imms=(flags: ir::MemFlags), vals=1, blocks=0)
fn MultiAry(
self,
opcode: Opcode,
ctrl_typevar: Type,
args: EntityList<Value>,
) -> (Inst, &'f mut DataFlowGraph)
fn MultiAry( self, opcode: Opcode, ctrl_typevar: Type, args: EntityList<Value>, ) -> (Inst, &'f mut DataFlowGraph)
MultiAry(imms=(), vals=0, blocks=0)
fn NullAry(
self,
opcode: Opcode,
ctrl_typevar: Type,
) -> (Inst, &'f mut DataFlowGraph)
fn NullAry( self, opcode: Opcode, ctrl_typevar: Type, ) -> (Inst, &'f mut DataFlowGraph)
NullAry(imms=(), vals=0, blocks=0)
fn Shuffle(
self,
opcode: Opcode,
ctrl_typevar: Type,
imm: Immediate,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn Shuffle( self, opcode: Opcode, ctrl_typevar: Type, imm: Immediate, arg0: Value, arg1: Value, ) -> (Inst, &'f mut DataFlowGraph)
Shuffle(imms=(imm: ir::Immediate), vals=2, blocks=0)
fn StackLoad(
self,
opcode: Opcode,
ctrl_typevar: Type,
stack_slot: StackSlot,
offset: Offset32,
) -> (Inst, &'f mut DataFlowGraph)
fn StackLoad( self, opcode: Opcode, ctrl_typevar: Type, stack_slot: StackSlot, offset: Offset32, ) -> (Inst, &'f mut DataFlowGraph)
StackLoad(imms=(stack_slot: ir::StackSlot, offset: ir::immediates::Offset32), vals=0, blocks=0)
fn StackStore(
self,
opcode: Opcode,
ctrl_typevar: Type,
stack_slot: StackSlot,
offset: Offset32,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn StackStore( self, opcode: Opcode, ctrl_typevar: Type, stack_slot: StackSlot, offset: Offset32, arg0: Value, ) -> (Inst, &'f mut DataFlowGraph)
StackStore(imms=(stack_slot: ir::StackSlot, offset: ir::immediates::Offset32), vals=1, blocks=0)
fn Store(
self,
opcode: Opcode,
ctrl_typevar: Type,
flags: MemFlags,
offset: Offset32,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn Store( self, opcode: Opcode, ctrl_typevar: Type, flags: MemFlags, offset: Offset32, arg0: Value, arg1: Value, ) -> (Inst, &'f mut DataFlowGraph)
Store(imms=(flags: ir::MemFlags, offset: ir::immediates::Offset32), vals=2, blocks=0)
fn StoreNoOffset(
self,
opcode: Opcode,
ctrl_typevar: Type,
flags: MemFlags,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn StoreNoOffset( self, opcode: Opcode, ctrl_typevar: Type, flags: MemFlags, arg0: Value, arg1: Value, ) -> (Inst, &'f mut DataFlowGraph)
StoreNoOffset(imms=(flags: ir::MemFlags), vals=2, blocks=0)
fn Ternary(
self,
opcode: Opcode,
ctrl_typevar: Type,
arg0: Value,
arg1: Value,
arg2: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn Ternary( self, opcode: Opcode, ctrl_typevar: Type, arg0: Value, arg1: Value, arg2: Value, ) -> (Inst, &'f mut DataFlowGraph)
Ternary(imms=(), vals=3, blocks=0)
fn TernaryImm8(
self,
opcode: Opcode,
ctrl_typevar: Type,
imm: u8,
arg0: Value,
arg1: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn TernaryImm8( self, opcode: Opcode, ctrl_typevar: Type, imm: u8, arg0: Value, arg1: Value, ) -> (Inst, &'f mut DataFlowGraph)
TernaryImm8(imms=(imm: ir::immediates::Uimm8), vals=2, blocks=0)
fn Trap(
self,
opcode: Opcode,
ctrl_typevar: Type,
code: TrapCode,
) -> (Inst, &'f mut DataFlowGraph)
fn Trap( self, opcode: Opcode, ctrl_typevar: Type, code: TrapCode, ) -> (Inst, &'f mut DataFlowGraph)
Trap(imms=(code: ir::TrapCode), vals=0, blocks=0)
fn Unary(
self,
opcode: Opcode,
ctrl_typevar: Type,
arg0: Value,
) -> (Inst, &'f mut DataFlowGraph)
fn Unary( self, opcode: Opcode, ctrl_typevar: Type, arg0: Value, ) -> (Inst, &'f mut DataFlowGraph)
Unary(imms=(), vals=1, blocks=0)
fn UnaryConst(
self,
opcode: Opcode,
ctrl_typevar: Type,
constant_handle: Constant,
) -> (Inst, &'f mut DataFlowGraph)
fn UnaryConst( self, opcode: Opcode, ctrl_typevar: Type, constant_handle: Constant, ) -> (Inst, &'f mut DataFlowGraph)
UnaryConst(imms=(constant_handle: ir::Constant), vals=0, blocks=0)
fn UnaryGlobalValue(
self,
opcode: Opcode,
ctrl_typevar: Type,
global_value: GlobalValue,
) -> (Inst, &'f mut DataFlowGraph)
fn UnaryGlobalValue( self, opcode: Opcode, ctrl_typevar: Type, global_value: GlobalValue, ) -> (Inst, &'f mut DataFlowGraph)
UnaryGlobalValue(imms=(global_value: ir::GlobalValue), vals=0, blocks=0)
fn UnaryIeee16(
self,
opcode: Opcode,
ctrl_typevar: Type,
imm: Ieee16,
) -> (Inst, &'f mut DataFlowGraph)
fn UnaryIeee16( self, opcode: Opcode, ctrl_typevar: Type, imm: Ieee16, ) -> (Inst, &'f mut DataFlowGraph)
UnaryIeee16(imms=(imm: ir::immediates::Ieee16), vals=0, blocks=0)
fn UnaryIeee32(
self,
opcode: Opcode,
ctrl_typevar: Type,
imm: Ieee32,
) -> (Inst, &'f mut DataFlowGraph)
fn UnaryIeee32( self, opcode: Opcode, ctrl_typevar: Type, imm: Ieee32, ) -> (Inst, &'f mut DataFlowGraph)
UnaryIeee32(imms=(imm: ir::immediates::Ieee32), vals=0, blocks=0)
fn UnaryIeee64(
self,
opcode: Opcode,
ctrl_typevar: Type,
imm: Ieee64,
) -> (Inst, &'f mut DataFlowGraph)
fn UnaryIeee64( self, opcode: Opcode, ctrl_typevar: Type, imm: Ieee64, ) -> (Inst, &'f mut DataFlowGraph)
UnaryIeee64(imms=(imm: ir::immediates::Ieee64), vals=0, blocks=0)
Dyn Compatibility§
This trait is not dyn compatible.
In older versions of Rust, dyn compatibility was called "object safety", so this trait is not object safe.
Implementors§
impl<'f, T> InstBuilder<'f> for Twhere
T: InstBuilderBase<'f>,
Any type implementing InstBuilderBase
gets all the InstBuilder
methods for free.