pub struct MaterializeOpsVisitor<B> { /* private fields */ }decode only.Expand description
A visitor that materializes whole Ops as it decodes the bytecode stream.
Implementations§
Source§impl<B> MaterializeOpsVisitor<B>
 
impl<B> MaterializeOpsVisitor<B>
Trait Implementations§
Source§impl<B: Default> Default for MaterializeOpsVisitor<B>
 
impl<B: Default> Default for MaterializeOpsVisitor<B>
Source§fn default() -> MaterializeOpsVisitor<B>
 
fn default() -> MaterializeOpsVisitor<B>
Source§impl<B: BytecodeStream> ExtendedOpVisitor for MaterializeOpsVisitor<B>
 
impl<B: BytecodeStream> ExtendedOpVisitor for MaterializeOpsVisitor<B>
Source§fn call_indirect_host(&mut self, id: u8) -> Self::Return
 
fn call_indirect_host(&mut self, id: u8) -> Self::Return
A special opcode to halt interpreter execution and yield control back to the host.
This opcode results in DoneReason::CallIndirectHost where the
id here is shepherded along to the embedder. It’s up to the
embedder to determine what to do with the id and the current
state of registers and the stack.
In Wasmtime this is used to implement interpreter-to-host calls.
This is modeled as a call instruction where the first
parameter is the native function pointer to invoke and all
remaining parameters for the native function are in following
parameter positions (e.g. x1, x2, …). The results of the
host call are then store in x0.
Handling this in Wasmtime is done through a “relocation” which is resolved at link-time when raw bytecode from Cranelift is assembled into the final object that Wasmtime will interpret.
Source§fn xpcadd(&mut self, dst: XReg, offset: PcRelOffset) -> Self::Return
 
fn xpcadd(&mut self, dst: XReg, offset: PcRelOffset) -> Self::Return
Adds offset to the pc of this instruction and stores it in
dst.
Source§fn xmov_fp(&mut self, dst: XReg) -> Self::Return
 
fn xmov_fp(&mut self, dst: XReg) -> Self::Return
Gets the special “fp” register and moves it into dst.
Source§fn xmov_lr(&mut self, dst: XReg) -> Self::Return
 
fn xmov_lr(&mut self, dst: XReg) -> Self::Return
Gets the special “lr” register and moves it into dst.
Source§fn xadd32_uoverflow_trap(
    &mut self,
    operands: BinaryOperands<XReg>,
) -> Self::Return
 
fn xadd32_uoverflow_trap( &mut self, operands: BinaryOperands<XReg>, ) -> Self::Return
32-bit checked unsigned addition: low32(dst) = low32(src1) + low32(src2).
The upper 32-bits of dst are unmodified. Traps if the addition
overflows.
Source§fn xadd64_uoverflow_trap(
    &mut self,
    operands: BinaryOperands<XReg>,
) -> Self::Return
 
fn xadd64_uoverflow_trap( &mut self, operands: BinaryOperands<XReg>, ) -> Self::Return
64-bit checked unsigned addition: dst = src1 + src2.
Source§fn xmulhi64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xmulhi64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
dst = high64(src1 * src2) (signed)
Source§fn xmulhi64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xmulhi64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
dst = high64(src1 * src2) (unsigned)
Source§fn xbmask32(&mut self, dst: XReg, src: XReg) -> Self::Return
 
fn xbmask32(&mut self, dst: XReg, src: XReg) -> Self::Return
low32(dst) = if low32(src) == 0 { 0 } else { -1 }
Source§fn xbmask64(&mut self, dst: XReg, src: XReg) -> Self::Return
 
fn xbmask64(&mut self, dst: XReg, src: XReg) -> Self::Return
dst = if src == 0 { 0 } else { -1 }
Source§fn xload16be_u32_o32(&mut self, dst: XReg, addr: AddrO32) -> Self::Return
 
fn xload16be_u32_o32(&mut self, dst: XReg, addr: AddrO32) -> Self::Return
low32(dst) = zext(*addr)
Source§fn xload16be_s32_o32(&mut self, dst: XReg, addr: AddrO32) -> Self::Return
 
fn xload16be_s32_o32(&mut self, dst: XReg, addr: AddrO32) -> Self::Return
low32(dst) = sext(*addr)
Source§fn xload32be_o32(&mut self, dst: XReg, addr: AddrO32) -> Self::Return
 
fn xload32be_o32(&mut self, dst: XReg, addr: AddrO32) -> Self::Return
low32(dst) = zext(*addr)
Source§fn fload32be_o32(&mut self, dst: FReg, addr: AddrO32) -> Self::Return
 
fn fload32be_o32(&mut self, dst: FReg, addr: AddrO32) -> Self::Return
low32(dst) = zext(*addr)
Source§fn fload32le_o32(&mut self, dst: FReg, addr: AddrO32) -> Self::Return
 
fn fload32le_o32(&mut self, dst: FReg, addr: AddrO32) -> Self::Return
low32(dst) = zext(*addr)
Source§fn fload32le_g32(&mut self, dst: FReg, addr: AddrG32) -> Self::Return
 
fn fload32le_g32(&mut self, dst: FReg, addr: AddrG32) -> Self::Return
low32(dst) = zext(*addr)
Source§fn bitcast_int_from_float_32(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn bitcast_int_from_float_32(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = bitcast low32(src) as i32
Source§fn bitcast_int_from_float_64(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn bitcast_int_from_float_64(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = bitcast src as i64
Source§fn bitcast_float_from_int_32(&mut self, dst: FReg, src: XReg) -> Self::Return
 
fn bitcast_float_from_int_32(&mut self, dst: FReg, src: XReg) -> Self::Return
low32(dst) = bitcast low32(src) as f32
Source§fn bitcast_float_from_int_64(&mut self, dst: FReg, src: XReg) -> Self::Return
 
fn bitcast_float_from_int_64(&mut self, dst: FReg, src: XReg) -> Self::Return
dst = bitcast src as f64
Source§fn feq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
 
fn feq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
low32(dst) = zext(src1 == src2)
Source§fn fneq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
 
fn fneq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
low32(dst) = zext(src1 != src2)
Source§fn flt32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
 
fn flt32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
low32(dst) = zext(src1 < src2)
Source§fn flteq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
 
fn flteq32(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
low32(dst) = zext(src1 <= src2)
Source§fn feq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
 
fn feq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
low32(dst) = zext(src1 == src2)
Source§fn fneq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
 
fn fneq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
low32(dst) = zext(src1 != src2)
Source§fn flt64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
 
fn flt64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
low32(dst) = zext(src1 < src2)
Source§fn flteq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
 
fn flteq64(&mut self, dst: XReg, src1: FReg, src2: FReg) -> Self::Return
low32(dst) = zext(src1 <= src2)
Source§fn fselect32(
    &mut self,
    dst: FReg,
    cond: XReg,
    if_nonzero: FReg,
    if_zero: FReg,
) -> Self::Return
 
fn fselect32( &mut self, dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg, ) -> Self::Return
low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)
Source§fn fselect64(
    &mut self,
    dst: FReg,
    cond: XReg,
    if_nonzero: FReg,
    if_zero: FReg,
) -> Self::Return
 
fn fselect64( &mut self, dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg, ) -> Self::Return
dst = low32(cond) ? if_nonzero : if_zero
Source§fn f32_from_x32_s(&mut self, dst: FReg, src: XReg) -> Self::Return
 
fn f32_from_x32_s(&mut self, dst: FReg, src: XReg) -> Self::Return
low32(dst) = checked_f32_from_signed(low32(src))
Source§fn f32_from_x32_u(&mut self, dst: FReg, src: XReg) -> Self::Return
 
fn f32_from_x32_u(&mut self, dst: FReg, src: XReg) -> Self::Return
low32(dst) = checked_f32_from_unsigned(low32(src))
Source§fn f32_from_x64_s(&mut self, dst: FReg, src: XReg) -> Self::Return
 
fn f32_from_x64_s(&mut self, dst: FReg, src: XReg) -> Self::Return
low32(dst) = checked_f32_from_signed(src)
Source§fn f32_from_x64_u(&mut self, dst: FReg, src: XReg) -> Self::Return
 
fn f32_from_x64_u(&mut self, dst: FReg, src: XReg) -> Self::Return
low32(dst) = checked_f32_from_unsigned(src)
Source§fn f64_from_x32_s(&mut self, dst: FReg, src: XReg) -> Self::Return
 
fn f64_from_x32_s(&mut self, dst: FReg, src: XReg) -> Self::Return
dst = checked_f64_from_signed(low32(src))
Source§fn f64_from_x32_u(&mut self, dst: FReg, src: XReg) -> Self::Return
 
fn f64_from_x32_u(&mut self, dst: FReg, src: XReg) -> Self::Return
dst = checked_f64_from_unsigned(low32(src))
Source§fn f64_from_x64_s(&mut self, dst: FReg, src: XReg) -> Self::Return
 
fn f64_from_x64_s(&mut self, dst: FReg, src: XReg) -> Self::Return
dst = checked_f64_from_signed(src)
Source§fn f64_from_x64_u(&mut self, dst: FReg, src: XReg) -> Self::Return
 
fn f64_from_x64_u(&mut self, dst: FReg, src: XReg) -> Self::Return
dst = checked_f64_from_unsigned(src)
Source§fn x32_from_f32_s(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn x32_from_f32_s(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = checked_signed_from_f32(low32(src))
Source§fn x32_from_f32_u(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn x32_from_f32_u(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = checked_unsigned_from_f32(low32(src))
Source§fn x32_from_f64_s(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn x32_from_f64_s(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = checked_signed_from_f64(src)
Source§fn x32_from_f64_u(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn x32_from_f64_u(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = checked_unsigned_from_f64(src)
Source§fn x64_from_f32_s(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn x64_from_f32_s(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = checked_signed_from_f32(low32(src))
Source§fn x64_from_f32_u(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn x64_from_f32_u(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = checked_unsigned_from_f32(low32(src))
Source§fn x64_from_f64_s(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn x64_from_f64_s(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = checked_signed_from_f64(src)
Source§fn x64_from_f64_u(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn x64_from_f64_u(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = checked_unsigned_from_f64(src)
Source§fn x32_from_f32_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn x32_from_f32_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = saturating_signed_from_f32(low32(src))
Source§fn x32_from_f32_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn x32_from_f32_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = saturating_unsigned_from_f32(low32(src))
Source§fn x32_from_f64_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn x32_from_f64_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = saturating_signed_from_f64(src)
Source§fn x32_from_f64_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn x32_from_f64_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
low32(dst) = saturating_unsigned_from_f64(src)
Source§fn x64_from_f32_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn x64_from_f32_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = saturating_signed_from_f32(low32(src))
Source§fn x64_from_f32_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn x64_from_f32_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = saturating_unsigned_from_f32(low32(src))
Source§fn x64_from_f64_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn x64_from_f64_s_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = saturating_signed_from_f64(src)
Source§fn x64_from_f64_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
 
fn x64_from_f64_u_sat(&mut self, dst: XReg, src: FReg) -> Self::Return
dst = saturating_unsigned_from_f64(src)
Source§fn fcopysign32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
 
fn fcopysign32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
low32(dst) = copysign(low32(src1), low32(src2))
Source§fn fcopysign64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
 
fn fcopysign64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
dst = copysign(src1, src2)
Source§fn fadd32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
 
fn fadd32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
low32(dst) = low32(src1) + low32(src2)
Source§fn fsub32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
 
fn fsub32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
low32(dst) = low32(src1) - low32(src2)
Source§fn vsubf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vsubf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
low128(dst) = low128(src1) - low128(src2)
Source§fn fmul32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
 
fn fmul32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
low32(dst) = low32(src1) * low32(src2)
Source§fn vmulf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vmulf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
low128(dst) = low128(src1) * low128(src2)
Source§fn fdiv32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
 
fn fdiv32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
low32(dst) = low32(src1) / low32(src2)
Source§fn vdivf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vdivf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
low128(dst) = low128(src1) / low128(src2)
Source§fn fmaximum32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
 
fn fmaximum32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
low32(dst) = ieee_maximum(low32(src1), low32(src2))
Source§fn fminimum32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
 
fn fminimum32(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
low32(dst) = ieee_minimum(low32(src1), low32(src2))
Source§fn ftrunc32(&mut self, dst: FReg, src: FReg) -> Self::Return
 
fn ftrunc32(&mut self, dst: FReg, src: FReg) -> Self::Return
low32(dst) = ieee_trunc(low32(src))
Source§fn vtrunc32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vtrunc32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
low128(dst) = ieee_trunc(low128(src))
Source§fn vtrunc64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vtrunc64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
low128(dst) = ieee_trunc(low128(src))
Source§fn ffloor32(&mut self, dst: FReg, src: FReg) -> Self::Return
 
fn ffloor32(&mut self, dst: FReg, src: FReg) -> Self::Return
low32(dst) = ieee_floor(low32(src))
Source§fn vfloor32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vfloor32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
low128(dst) = ieee_floor(low128(src))
Source§fn vfloor64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vfloor64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
low128(dst) = ieee_floor(low128(src))
Source§fn fceil32(&mut self, dst: FReg, src: FReg) -> Self::Return
 
fn fceil32(&mut self, dst: FReg, src: FReg) -> Self::Return
low32(dst) = ieee_ceil(low32(src))
Source§fn vceil32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vceil32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
low128(dst) = ieee_ceil(low128(src))
Source§fn vceil64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vceil64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
low128(dst) = ieee_ceil(low128(src))
Source§fn fnearest32(&mut self, dst: FReg, src: FReg) -> Self::Return
 
fn fnearest32(&mut self, dst: FReg, src: FReg) -> Self::Return
low32(dst) = ieee_nearest(low32(src))
Source§fn fsqrt32(&mut self, dst: FReg, src: FReg) -> Self::Return
 
fn fsqrt32(&mut self, dst: FReg, src: FReg) -> Self::Return
low32(dst) = ieee_sqrt(low32(src))
Source§fn vsqrt32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vsqrt32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
low32(dst) = ieee_sqrt(low32(src))
Source§fn vsqrt64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vsqrt64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
low32(dst) = ieee_sqrt(low32(src))
Source§fn fmaximum64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
 
fn fmaximum64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
dst = ieee_maximum(src1, src2)
Source§fn fminimum64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
 
fn fminimum64(&mut self, operands: BinaryOperands<FReg>) -> Self::Return
dst = ieee_minimum(src1, src2)
Source§fn vnearest32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vnearest32x4(&mut self, dst: VReg, src: VReg) -> Self::Return
low128(dst) = ieee_nearest(low128(src))
Source§fn vnearest64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vnearest64x2(&mut self, dst: VReg, src: VReg) -> Self::Return
low128(dst) = ieee_nearest(low128(src))
Source§fn vaddi8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vaddi8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = satruating_add(src1, src2)
Source§fn vaddu8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vaddu8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = satruating_add(src1, src2)
Source§fn vaddi16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vaddi16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = satruating_add(src1, src2)
Source§fn vaddu16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vaddu16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = satruating_add(src1, src2)
Source§fn vaddpairwisei16x8_s(
    &mut self,
    operands: BinaryOperands<VReg>,
) -> Self::Return
 
fn vaddpairwisei16x8_s( &mut self, operands: BinaryOperands<VReg>, ) -> Self::Return
dst = [src1[0] + src1[1], ..., src2[6] + src2[7]]
Source§fn vaddpairwisei32x4_s(
    &mut self,
    operands: BinaryOperands<VReg>,
) -> Self::Return
 
fn vaddpairwisei32x4_s( &mut self, operands: BinaryOperands<VReg>, ) -> Self::Return
dst = [src1[0] + src1[1], ..., src2[2] + src2[3]]
Source§fn vshli8x16(
    &mut self,
    operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
 
fn vshli8x16( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 << src2
Source§fn vshli16x8(
    &mut self,
    operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
 
fn vshli16x8( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 << src2
Source§fn vshli32x4(
    &mut self,
    operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
 
fn vshli32x4( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 << src2
Source§fn vshli64x2(
    &mut self,
    operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
 
fn vshli64x2( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 << src2
Source§fn vshri8x16_s(
    &mut self,
    operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
 
fn vshri8x16_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 >> src2 (signed)
Source§fn vshri16x8_s(
    &mut self,
    operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
 
fn vshri16x8_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 >> src2 (signed)
Source§fn vshri32x4_s(
    &mut self,
    operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
 
fn vshri32x4_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 >> src2 (signed)
Source§fn vshri64x2_s(
    &mut self,
    operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
 
fn vshri64x2_s( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 >> src2 (signed)
Source§fn vshri8x16_u(
    &mut self,
    operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
 
fn vshri8x16_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 >> src2 (unsigned)
Source§fn vshri16x8_u(
    &mut self,
    operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
 
fn vshri16x8_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 >> src2 (unsigned)
Source§fn vshri32x4_u(
    &mut self,
    operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
 
fn vshri32x4_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 >> src2 (unsigned)
Source§fn vshri64x2_u(
    &mut self,
    operands: BinaryOperands<VReg, VReg, XReg>,
) -> Self::Return
 
fn vshri64x2_u( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, ) -> Self::Return
dst = src1 >> src2 (unsigned)
Source§fn vload8x8_s_z(&mut self, dst: VReg, addr: AddrZ) -> Self::Return
 
fn vload8x8_s_z(&mut self, dst: VReg, addr: AddrZ) -> Self::Return
Load the 64-bit source as i8x8 and sign-extend to i16x8.
Source§fn vload8x8_u_z(&mut self, dst: VReg, addr: AddrZ) -> Self::Return
 
fn vload8x8_u_z(&mut self, dst: VReg, addr: AddrZ) -> Self::Return
Load the 64-bit source as u8x8 and zero-extend to i16x8.
Source§fn vload16x4le_s_z(&mut self, dst: VReg, addr: AddrZ) -> Self::Return
 
fn vload16x4le_s_z(&mut self, dst: VReg, addr: AddrZ) -> Self::Return
Load the 64-bit source as i16x4 and sign-extend to i32x4.
Source§fn vload16x4le_u_z(&mut self, dst: VReg, addr: AddrZ) -> Self::Return
 
fn vload16x4le_u_z(&mut self, dst: VReg, addr: AddrZ) -> Self::Return
Load the 64-bit source as u16x4 and zero-extend to i32x4.
Source§fn vload32x2le_s_z(&mut self, dst: VReg, addr: AddrZ) -> Self::Return
 
fn vload32x2le_s_z(&mut self, dst: VReg, addr: AddrZ) -> Self::Return
Load the 64-bit source as i32x2 and sign-extend to i64x2.
Source§fn vload32x2le_u_z(&mut self, dst: VReg, addr: AddrZ) -> Self::Return
 
fn vload32x2le_u_z(&mut self, dst: VReg, addr: AddrZ) -> Self::Return
Load the 64-bit source as u32x2 and zero-extend to i64x2.
Source§fn vbitselect128(
    &mut self,
    dst: VReg,
    c: VReg,
    x: VReg,
    y: VReg,
) -> Self::Return
 
fn vbitselect128( &mut self, dst: VReg, c: VReg, x: VReg, y: VReg, ) -> Self::Return
dst = (c & x) | (!c & y)
Source§fn vbitmask8x16(&mut self, dst: XReg, src: VReg) -> Self::Return
 
fn vbitmask8x16(&mut self, dst: XReg, src: VReg) -> Self::Return
Collect high bits of each lane into the low 32-bits of the destination.
Source§fn vbitmask16x8(&mut self, dst: XReg, src: VReg) -> Self::Return
 
fn vbitmask16x8(&mut self, dst: XReg, src: VReg) -> Self::Return
Collect high bits of each lane into the low 32-bits of the destination.
Source§fn vbitmask32x4(&mut self, dst: XReg, src: VReg) -> Self::Return
 
fn vbitmask32x4(&mut self, dst: XReg, src: VReg) -> Self::Return
Collect high bits of each lane into the low 32-bits of the destination.
Source§fn vbitmask64x2(&mut self, dst: XReg, src: VReg) -> Self::Return
 
fn vbitmask64x2(&mut self, dst: XReg, src: VReg) -> Self::Return
Collect high bits of each lane into the low 32-bits of the destination.
Source§fn valltrue8x16(&mut self, dst: XReg, src: VReg) -> Self::Return
 
fn valltrue8x16(&mut self, dst: XReg, src: VReg) -> Self::Return
Store whether all lanes are nonzero in dst.
Source§fn valltrue16x8(&mut self, dst: XReg, src: VReg) -> Self::Return
 
fn valltrue16x8(&mut self, dst: XReg, src: VReg) -> Self::Return
Store whether all lanes are nonzero in dst.
Source§fn valltrue32x4(&mut self, dst: XReg, src: VReg) -> Self::Return
 
fn valltrue32x4(&mut self, dst: XReg, src: VReg) -> Self::Return
Store whether all lanes are nonzero in dst.
Source§fn valltrue64x2(&mut self, dst: XReg, src: VReg) -> Self::Return
 
fn valltrue64x2(&mut self, dst: XReg, src: VReg) -> Self::Return
Store whether any lanes are nonzero in dst.
Source§fn vanytrue8x16(&mut self, dst: XReg, src: VReg) -> Self::Return
 
fn vanytrue8x16(&mut self, dst: XReg, src: VReg) -> Self::Return
Store whether any lanes are nonzero in dst.
Source§fn vanytrue16x8(&mut self, dst: XReg, src: VReg) -> Self::Return
 
fn vanytrue16x8(&mut self, dst: XReg, src: VReg) -> Self::Return
Store whether any lanes are nonzero in dst.
Source§fn vanytrue32x4(&mut self, dst: XReg, src: VReg) -> Self::Return
 
fn vanytrue32x4(&mut self, dst: XReg, src: VReg) -> Self::Return
Store whether any lanes are nonzero in dst.
Source§fn vanytrue64x2(&mut self, dst: XReg, src: VReg) -> Self::Return
 
fn vanytrue64x2(&mut self, dst: XReg, src: VReg) -> Self::Return
Store whether any lanes are nonzero in dst.
Source§fn vf32x4_from_i32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vf32x4_from_i32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Int-to-float conversion (same as f32_from_x32_s)
Source§fn vf32x4_from_i32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vf32x4_from_i32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Int-to-float conversion (same as f32_from_x32_u)
Source§fn vf64x2_from_i64x2_s(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vf64x2_from_i64x2_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Int-to-float conversion (same as f64_from_x64_s)
Source§fn vf64x2_from_i64x2_u(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vf64x2_from_i64x2_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Int-to-float conversion (same as f64_from_x64_u)
Source§fn vi32x4_from_f32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vi32x4_from_f32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Float-to-int conversion (same as x32_from_f32_s
Source§fn vi32x4_from_f32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vi32x4_from_f32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Float-to-int conversion (same as x32_from_f32_u
Source§fn vi64x2_from_f64x2_s(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vi64x2_from_f64x2_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Float-to-int conversion (same as x64_from_f64_s
Source§fn vi64x2_from_f64x2_u(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vi64x2_from_f64x2_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Float-to-int conversion (same as x64_from_f64_u
Source§fn vwidenlow8x16_s(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vwidenlow8x16_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the low lanes of the input vector, as signed, to twice the width.
Source§fn vwidenlow8x16_u(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vwidenlow8x16_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the low lanes of the input vector, as unsigned, to twice the width.
Source§fn vwidenlow16x8_s(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vwidenlow16x8_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the low lanes of the input vector, as signed, to twice the width.
Source§fn vwidenlow16x8_u(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vwidenlow16x8_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the low lanes of the input vector, as unsigned, to twice the width.
Source§fn vwidenlow32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vwidenlow32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the low lanes of the input vector, as signed, to twice the width.
Source§fn vwidenlow32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vwidenlow32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the low lanes of the input vector, as unsigned, to twice the width.
Source§fn vwidenhigh8x16_s(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vwidenhigh8x16_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the high lanes of the input vector, as signed, to twice the width.
Source§fn vwidenhigh8x16_u(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vwidenhigh8x16_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the high lanes of the input vector, as unsigned, to twice the width.
Source§fn vwidenhigh16x8_s(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vwidenhigh16x8_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the high lanes of the input vector, as signed, to twice the width.
Source§fn vwidenhigh16x8_u(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vwidenhigh16x8_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the high lanes of the input vector, as unsigned, to twice the width.
Source§fn vwidenhigh32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vwidenhigh32x4_s(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the high lanes of the input vector, as signed, to twice the width.
Source§fn vwidenhigh32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vwidenhigh32x4_u(&mut self, dst: VReg, src: VReg) -> Self::Return
Widens the high lanes of the input vector, as unsigned, to twice the width.
Source§fn vnarrow16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vnarrow16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
Source§fn vnarrow16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vnarrow16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
Source§fn vnarrow32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vnarrow32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
Source§fn vnarrow32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vnarrow32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
Source§fn vnarrow64x2_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vnarrow64x2_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
Narrows the two 64x2 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
Source§fn vnarrow64x2_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vnarrow64x2_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
Narrows the two 64x2 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
Source§fn vunarrow64x2_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vunarrow64x2_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
Narrows the two 64x2 vectors, assuming all input lanes are unsigned, to half the width. Narrowing is unsigned and saturating.
Source§fn vfpromotelow(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vfpromotelow(&mut self, dst: VReg, src: VReg) -> Self::Return
Promotes the low two lanes of the f32x4 input to f64x2.
Source§fn vfdemote(&mut self, dst: VReg, src: VReg) -> Self::Return
 
fn vfdemote(&mut self, dst: VReg, src: VReg) -> Self::Return
Demotes the two f64x2 lanes to f32x2 and then extends with two more zero lanes.
Source§fn vsubi8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vsubi8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = saturating_sub(src1, src2)
Source§fn vsubu8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vsubu8x16_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = saturating_sub(src1, src2)
Source§fn vsubi16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vsubi16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = saturating_sub(src1, src2)
Source§fn vsubu16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vsubu16x8_sat(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = saturating_sub(src1, src2)
Source§fn vqmulrsi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vqmulrsi16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = signed_saturate(src1 * src2 + (1 << (Q - 1)) >> Q)
Source§fn xextractv8x16(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return
 
fn xextractv8x16(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return
low32(dst) = zext(src[lane])
Source§fn xextractv16x8(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return
 
fn xextractv16x8(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return
low32(dst) = zext(src[lane])
Source§fn xextractv32x4(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return
 
fn xextractv32x4(&mut self, dst: XReg, src: VReg, lane: u8) -> Self::Return
low32(dst) = src[lane]
Source§fn fextractv32x4(&mut self, dst: FReg, src: VReg, lane: u8) -> Self::Return
 
fn fextractv32x4(&mut self, dst: FReg, src: VReg, lane: u8) -> Self::Return
low32(dst) = src[lane]
Source§fn vinsertx8(
    &mut self,
    operands: BinaryOperands<VReg, VReg, XReg>,
    lane: u8,
) -> Self::Return
 
fn vinsertx8( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return
dst = src1; dst[lane] = src2
Source§fn vinsertx16(
    &mut self,
    operands: BinaryOperands<VReg, VReg, XReg>,
    lane: u8,
) -> Self::Return
 
fn vinsertx16( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return
dst = src1; dst[lane] = src2
Source§fn vinsertx32(
    &mut self,
    operands: BinaryOperands<VReg, VReg, XReg>,
    lane: u8,
) -> Self::Return
 
fn vinsertx32( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return
dst = src1; dst[lane] = src2
Source§fn vinsertx64(
    &mut self,
    operands: BinaryOperands<VReg, VReg, XReg>,
    lane: u8,
) -> Self::Return
 
fn vinsertx64( &mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8, ) -> Self::Return
dst = src1; dst[lane] = src2
Source§fn vinsertf32(
    &mut self,
    operands: BinaryOperands<VReg, VReg, FReg>,
    lane: u8,
) -> Self::Return
 
fn vinsertf32( &mut self, operands: BinaryOperands<VReg, VReg, FReg>, lane: u8, ) -> Self::Return
dst = src1; dst[lane] = src2
Source§fn vinsertf64(
    &mut self,
    operands: BinaryOperands<VReg, VReg, FReg>,
    lane: u8,
) -> Self::Return
 
fn vinsertf64( &mut self, operands: BinaryOperands<VReg, VReg, FReg>, lane: u8, ) -> Self::Return
dst = src1; dst[lane] = src2
Source§fn vslt8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vslt8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src < dst (signed)
Source§fn vslteq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vslteq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst (signed)
Source§fn vult8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vult8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src < dst (unsigned)
Source§fn vulteq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vulteq8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst (unsigned)
Source§fn vslt16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vslt16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src < dst (signed)
Source§fn vslteq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vslteq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst (signed)
Source§fn vult16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vult16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src < dst (unsigned)
Source§fn vulteq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vulteq16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst (unsigned)
Source§fn vslt32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vslt32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src < dst (signed)
Source§fn vslteq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vslteq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst (signed)
Source§fn vult32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vult32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src < dst (unsigned)
Source§fn vulteq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vulteq32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst (unsigned)
Source§fn vslt64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vslt64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src < dst (signed)
Source§fn vslteq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vslteq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst (signed)
Source§fn vult64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vult64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src < dst (unsigned)
Source§fn vulteq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vulteq64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst (unsigned)
Source§fn vmin8x16_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vmin8x16_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = min(src1, src2) (signed)
Source§fn vmin8x16_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vmin8x16_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = min(src1, src2) (unsigned)
Source§fn vmin16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vmin16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = min(src1, src2) (signed)
Source§fn vmin16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vmin16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = min(src1, src2) (unsigned)
Source§fn vmax8x16_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vmax8x16_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = max(src1, src2) (signed)
Source§fn vmax8x16_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vmax8x16_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = max(src1, src2) (unsigned)
Source§fn vmax16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vmax16x8_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = max(src1, src2) (signed)
Source§fn vmax16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vmax16x8_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = max(src1, src2) (unsigned)
Source§fn vmin32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vmin32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = min(src1, src2) (signed)
Source§fn vmin32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vmin32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = min(src1, src2) (unsigned)
Source§fn vmax32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vmax32x4_s(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = max(src1, src2) (signed)
Source§fn vmax32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vmax32x4_u(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = max(src1, src2) (unsigned)
Source§fn vmaximumf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vmaximumf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = ieee_maximum(src1, src2)
Source§fn vmaximumf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vmaximumf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = ieee_maximum(src1, src2)
Source§fn vminimumf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vminimumf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = ieee_minimum(src1, src2)
Source§fn vminimumf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vminimumf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = ieee_minimum(src1, src2)
Source§fn vshuffle(
    &mut self,
    dst: VReg,
    src1: VReg,
    src2: VReg,
    mask: u128,
) -> Self::Return
 
fn vshuffle( &mut self, dst: VReg, src1: VReg, src2: VReg, mask: u128, ) -> Self::Return
dst = shuffle(src1, src2, mask)
Source§fn vswizzlei8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vswizzlei8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = swizzle(src1, src2)
Source§fn vavground8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vavground8x16(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = (src1 + src2 + 1) // 2
Source§fn vavground16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vavground16x8(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = (src1 + src2 + 1) // 2
Source§fn vlteqf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vlteqf32x4(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst
Source§fn vlteqf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
 
fn vlteqf64x2(&mut self, operands: BinaryOperands<VReg>) -> Self::Return
dst = src <= dst
Source§fn vfma32x4(&mut self, dst: VReg, a: VReg, b: VReg, c: VReg) -> Self::Return
 
fn vfma32x4(&mut self, dst: VReg, a: VReg, b: VReg, c: VReg) -> Self::Return
dst = ieee_fma(a, b, c)
Source§fn vfma64x2(&mut self, dst: VReg, a: VReg, b: VReg, c: VReg) -> Self::Return
 
fn vfma64x2(&mut self, dst: VReg, a: VReg, b: VReg, c: VReg) -> Self::Return
dst = ieee_fma(a, b, c)
Source§fn vselect(
    &mut self,
    dst: VReg,
    cond: XReg,
    if_nonzero: VReg,
    if_zero: VReg,
) -> Self::Return
 
fn vselect( &mut self, dst: VReg, cond: XReg, if_nonzero: VReg, if_zero: VReg, ) -> Self::Return
dst = low32(cond) ? if_nonzero : if_zero
Source§fn xadd128(
    &mut self,
    dst_lo: XReg,
    dst_hi: XReg,
    lhs_lo: XReg,
    lhs_hi: XReg,
    rhs_lo: XReg,
    rhs_hi: XReg,
) -> Self::Return
 
fn xadd128( &mut self, dst_lo: XReg, dst_hi: XReg, lhs_lo: XReg, lhs_hi: XReg, rhs_lo: XReg, rhs_hi: XReg, ) -> Self::Return
dst_hi:dst_lo = lhs_hi:lhs_lo + rhs_hi:rhs_lo
Source§fn xsub128(
    &mut self,
    dst_lo: XReg,
    dst_hi: XReg,
    lhs_lo: XReg,
    lhs_hi: XReg,
    rhs_lo: XReg,
    rhs_hi: XReg,
) -> Self::Return
 
fn xsub128( &mut self, dst_lo: XReg, dst_hi: XReg, lhs_lo: XReg, lhs_hi: XReg, rhs_lo: XReg, rhs_hi: XReg, ) -> Self::Return
dst_hi:dst_lo = lhs_hi:lhs_lo - rhs_hi:rhs_lo
Source§impl<B: BytecodeStream> OpVisitor for MaterializeOpsVisitor<B>
 
impl<B: BytecodeStream> OpVisitor for MaterializeOpsVisitor<B>
Source§fn call(&mut self, offset: PcRelOffset) -> Self::Return
 
fn call(&mut self, offset: PcRelOffset) -> Self::Return
Transfer control to the PC at the given offset and set the lr
register to the PC just after this instruction.
This instruction generally assumes that the Pulley ABI is being respected where arguments are in argument registers (starting at x0 for integer arguments) and results are in result registers. This instruction itself assume that all arguments are already in their registers. Subsequent instructions below enable moving arguments into the correct registers as part of the same call instruction.
Source§fn call1(&mut self, arg1: XReg, offset: PcRelOffset) -> Self::Return
 
fn call1(&mut self, arg1: XReg, offset: PcRelOffset) -> Self::Return
Like call, but also x0 = arg1
Source§fn call2(&mut self, arg1: XReg, arg2: XReg, offset: PcRelOffset) -> Self::Return
 
fn call2(&mut self, arg1: XReg, arg2: XReg, offset: PcRelOffset) -> Self::Return
Like call, but also x0, x1 = arg1, arg2
Source§fn call3(
    &mut self,
    arg1: XReg,
    arg2: XReg,
    arg3: XReg,
    offset: PcRelOffset,
) -> Self::Return
 
fn call3( &mut self, arg1: XReg, arg2: XReg, arg3: XReg, offset: PcRelOffset, ) -> Self::Return
Like call, but also x0, x1, x2 = arg1, arg2, arg3
Source§fn call4(
    &mut self,
    arg1: XReg,
    arg2: XReg,
    arg3: XReg,
    arg4: XReg,
    offset: PcRelOffset,
) -> Self::Return
 
fn call4( &mut self, arg1: XReg, arg2: XReg, arg3: XReg, arg4: XReg, offset: PcRelOffset, ) -> Self::Return
Like call, but also x0, x1, x2, x3 = arg1, arg2, arg3, arg4
Source§fn call_indirect(&mut self, reg: XReg) -> Self::Return
 
fn call_indirect(&mut self, reg: XReg) -> Self::Return
Transfer control to the PC in reg and set lr to the PC just
after this instruction.
Source§fn jump(&mut self, offset: PcRelOffset) -> Self::Return
 
fn jump(&mut self, offset: PcRelOffset) -> Self::Return
Unconditionally transfer control to the PC at the given offset.
Source§fn xjump(&mut self, reg: XReg) -> Self::Return
 
fn xjump(&mut self, reg: XReg) -> Self::Return
Unconditionally transfer control to the PC at specified register.
Source§fn br_if32(&mut self, cond: XReg, offset: PcRelOffset) -> Self::Return
 
fn br_if32(&mut self, cond: XReg, offset: PcRelOffset) -> Self::Return
Conditionally transfer control to the given PC offset if
low32(cond) contains a non-zero value.
Source§fn br_if_not32(&mut self, cond: XReg, offset: PcRelOffset) -> Self::Return
 
fn br_if_not32(&mut self, cond: XReg, offset: PcRelOffset) -> Self::Return
Conditionally transfer control to the given PC offset if
low32(cond) contains a zero value.
Source§fn br_if_xeq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return
 
fn br_if_xeq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return
Branch if a == b.
Source§fn br_if_xneq32(
    &mut self,
    a: XReg,
    b: XReg,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xneq32( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return
Branch if a != b.
Source§fn br_if_xslt32(
    &mut self,
    a: XReg,
    b: XReg,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xslt32( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return
Branch if signed a < b.
Source§fn br_if_xslteq32(
    &mut self,
    a: XReg,
    b: XReg,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xslteq32( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return
Branch if signed a <= b.
Source§fn br_if_xult32(
    &mut self,
    a: XReg,
    b: XReg,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xult32( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a < b.
Source§fn br_if_xulteq32(
    &mut self,
    a: XReg,
    b: XReg,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xulteq32( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a <= b.
Source§fn br_if_xeq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return
 
fn br_if_xeq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return
Branch if a == b.
Source§fn br_if_xneq64(
    &mut self,
    a: XReg,
    b: XReg,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xneq64( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return
Branch if a != b.
Source§fn br_if_xslt64(
    &mut self,
    a: XReg,
    b: XReg,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xslt64( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return
Branch if signed a < b.
Source§fn br_if_xslteq64(
    &mut self,
    a: XReg,
    b: XReg,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xslteq64( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return
Branch if signed a <= b.
Source§fn br_if_xult64(
    &mut self,
    a: XReg,
    b: XReg,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xult64( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a < b.
Source§fn br_if_xulteq64(
    &mut self,
    a: XReg,
    b: XReg,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xulteq64( &mut self, a: XReg, b: XReg, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a <= b.
Source§fn br_if_xeq32_i8(
    &mut self,
    a: XReg,
    b: i8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xeq32_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return
Branch if a == b.
Source§fn br_if_xeq32_i32(
    &mut self,
    a: XReg,
    b: i32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xeq32_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return
Branch if a == b.
Source§fn br_if_xneq32_i8(
    &mut self,
    a: XReg,
    b: i8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xneq32_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return
Branch if a != b.
Source§fn br_if_xneq32_i32(
    &mut self,
    a: XReg,
    b: i32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xneq32_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return
Branch if a != b.
Source§fn br_if_xslt32_i8(
    &mut self,
    a: XReg,
    b: i8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xslt32_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return
Branch if signed a < b.
Source§fn br_if_xslt32_i32(
    &mut self,
    a: XReg,
    b: i32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xslt32_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return
Branch if signed a < b.
Source§fn br_if_xsgt32_i8(
    &mut self,
    a: XReg,
    b: i8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xsgt32_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return
Branch if signed a > b.
Source§fn br_if_xsgt32_i32(
    &mut self,
    a: XReg,
    b: i32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xsgt32_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return
Branch if signed a > b.
Source§fn br_if_xslteq32_i8(
    &mut self,
    a: XReg,
    b: i8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xslteq32_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return
Branch if signed a <= b.
Source§fn br_if_xslteq32_i32(
    &mut self,
    a: XReg,
    b: i32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xslteq32_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return
Branch if signed a <= b.
Source§fn br_if_xsgteq32_i8(
    &mut self,
    a: XReg,
    b: i8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xsgteq32_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return
Branch if signed a >= b.
Source§fn br_if_xsgteq32_i32(
    &mut self,
    a: XReg,
    b: i32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xsgteq32_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return
Branch if signed a >= b.
Source§fn br_if_xult32_u8(
    &mut self,
    a: XReg,
    b: u8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xult32_u8( &mut self, a: XReg, b: u8, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a < b.
Source§fn br_if_xult32_u32(
    &mut self,
    a: XReg,
    b: u32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xult32_u32( &mut self, a: XReg, b: u32, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a < b.
Source§fn br_if_xulteq32_u8(
    &mut self,
    a: XReg,
    b: u8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xulteq32_u8( &mut self, a: XReg, b: u8, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a <= b.
Source§fn br_if_xulteq32_u32(
    &mut self,
    a: XReg,
    b: u32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xulteq32_u32( &mut self, a: XReg, b: u32, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a <= b.
Source§fn br_if_xugt32_u8(
    &mut self,
    a: XReg,
    b: u8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xugt32_u8( &mut self, a: XReg, b: u8, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a > b.
Source§fn br_if_xugt32_u32(
    &mut self,
    a: XReg,
    b: u32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xugt32_u32( &mut self, a: XReg, b: u32, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a > b.
Source§fn br_if_xugteq32_u8(
    &mut self,
    a: XReg,
    b: u8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xugteq32_u8( &mut self, a: XReg, b: u8, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a >= b.
Source§fn br_if_xugteq32_u32(
    &mut self,
    a: XReg,
    b: u32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xugteq32_u32( &mut self, a: XReg, b: u32, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a >= b.
Source§fn br_if_xeq64_i8(
    &mut self,
    a: XReg,
    b: i8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xeq64_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return
Branch if a == b.
Source§fn br_if_xeq64_i32(
    &mut self,
    a: XReg,
    b: i32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xeq64_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return
Branch if a == b.
Source§fn br_if_xneq64_i8(
    &mut self,
    a: XReg,
    b: i8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xneq64_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return
Branch if a != b.
Source§fn br_if_xneq64_i32(
    &mut self,
    a: XReg,
    b: i32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xneq64_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return
Branch if a != b.
Source§fn br_if_xslt64_i8(
    &mut self,
    a: XReg,
    b: i8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xslt64_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return
Branch if signed a < b.
Source§fn br_if_xslt64_i32(
    &mut self,
    a: XReg,
    b: i32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xslt64_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return
Branch if signed a < b.
Source§fn br_if_xsgt64_i8(
    &mut self,
    a: XReg,
    b: i8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xsgt64_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return
Branch if signed a > b.
Source§fn br_if_xsgt64_i32(
    &mut self,
    a: XReg,
    b: i32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xsgt64_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return
Branch if signed a > b.
Source§fn br_if_xslteq64_i8(
    &mut self,
    a: XReg,
    b: i8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xslteq64_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return
Branch if signed a <= b.
Source§fn br_if_xslteq64_i32(
    &mut self,
    a: XReg,
    b: i32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xslteq64_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return
Branch if signed a <= b.
Source§fn br_if_xsgteq64_i8(
    &mut self,
    a: XReg,
    b: i8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xsgteq64_i8( &mut self, a: XReg, b: i8, offset: PcRelOffset, ) -> Self::Return
Branch if signed a >= b.
Source§fn br_if_xsgteq64_i32(
    &mut self,
    a: XReg,
    b: i32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xsgteq64_i32( &mut self, a: XReg, b: i32, offset: PcRelOffset, ) -> Self::Return
Branch if signed a >= b.
Source§fn br_if_xult64_u8(
    &mut self,
    a: XReg,
    b: u8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xult64_u8( &mut self, a: XReg, b: u8, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a < b.
Source§fn br_if_xult64_u32(
    &mut self,
    a: XReg,
    b: u32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xult64_u32( &mut self, a: XReg, b: u32, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a < b.
Source§fn br_if_xulteq64_u8(
    &mut self,
    a: XReg,
    b: u8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xulteq64_u8( &mut self, a: XReg, b: u8, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a <= b.
Source§fn br_if_xulteq64_u32(
    &mut self,
    a: XReg,
    b: u32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xulteq64_u32( &mut self, a: XReg, b: u32, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a <= b.
Source§fn br_if_xugt64_u8(
    &mut self,
    a: XReg,
    b: u8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xugt64_u8( &mut self, a: XReg, b: u8, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a > b.
Source§fn br_if_xugt64_u32(
    &mut self,
    a: XReg,
    b: u32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xugt64_u32( &mut self, a: XReg, b: u32, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a > b.
Source§fn br_if_xugteq64_u8(
    &mut self,
    a: XReg,
    b: u8,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xugteq64_u8( &mut self, a: XReg, b: u8, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a >= b.
Source§fn br_if_xugteq64_u32(
    &mut self,
    a: XReg,
    b: u32,
    offset: PcRelOffset,
) -> Self::Return
 
fn br_if_xugteq64_u32( &mut self, a: XReg, b: u32, offset: PcRelOffset, ) -> Self::Return
Branch if unsigned a >= b.
Source§fn br_table32(&mut self, idx: XReg, amt: u32) -> Self::Return
 
fn br_table32(&mut self, idx: XReg, amt: u32) -> Self::Return
Branch to the label indicated by low32(idx).
After this instruction are amt instances of PcRelOffset
and the idx selects which one will be branched to. The value
of idx is clamped to amt - 1 (e.g. the last offset is the
“default” one.
Source§fn xadd32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xadd32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
32-bit wrapping addition: low32(dst) = low32(src1) + low32(src2).
The upper 32-bits of dst are unmodified.
Source§fn xadd32_u8(&mut self, dst: XReg, src1: XReg, src2: u8) -> Self::Return
 
fn xadd32_u8(&mut self, dst: XReg, src1: XReg, src2: u8) -> Self::Return
Same as xadd32 but src2 is a zero-extended 8-bit immediate.
Source§fn xadd32_u32(&mut self, dst: XReg, src1: XReg, src2: u32) -> Self::Return
 
fn xadd32_u32(&mut self, dst: XReg, src1: XReg, src2: u32) -> Self::Return
Same as xadd32 but src2 is a 32-bit immediate.
Source§fn xadd64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xadd64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
64-bit wrapping addition: dst = src1 + src2.
Source§fn xadd64_u8(&mut self, dst: XReg, src1: XReg, src2: u8) -> Self::Return
 
fn xadd64_u8(&mut self, dst: XReg, src1: XReg, src2: u8) -> Self::Return
Same as xadd64 but src2 is a zero-extended 8-bit immediate.
Source§fn xadd64_u32(&mut self, dst: XReg, src1: XReg, src2: u32) -> Self::Return
 
fn xadd64_u32(&mut self, dst: XReg, src1: XReg, src2: u32) -> Self::Return
Same as xadd64 but src2 is a zero-extended 32-bit immediate.
Source§fn xmadd32(
    &mut self,
    dst: XReg,
    src1: XReg,
    src2: XReg,
    src3: XReg,
) -> Self::Return
 
fn xmadd32( &mut self, dst: XReg, src1: XReg, src2: XReg, src3: XReg, ) -> Self::Return
low32(dst) = low32(src1) * low32(src2) + low32(src3)
Source§fn xmadd64(
    &mut self,
    dst: XReg,
    src1: XReg,
    src2: XReg,
    src3: XReg,
) -> Self::Return
 
fn xmadd64( &mut self, dst: XReg, src1: XReg, src2: XReg, src3: XReg, ) -> Self::Return
dst = src1 * src2 + src3
Source§fn xsub32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xsub32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
32-bit wrapping subtraction: low32(dst) = low32(src1) - low32(src2).
The upper 32-bits of dst are unmodified.
Source§fn xsub32_u8(&mut self, dst: XReg, src1: XReg, src2: u8) -> Self::Return
 
fn xsub32_u8(&mut self, dst: XReg, src1: XReg, src2: u8) -> Self::Return
Same as xsub32 but src2 is a zero-extended 8-bit immediate.
Source§fn xsub32_u32(&mut self, dst: XReg, src1: XReg, src2: u32) -> Self::Return
 
fn xsub32_u32(&mut self, dst: XReg, src1: XReg, src2: u32) -> Self::Return
Same as xsub32 but src2 is a 32-bit immediate.
Source§fn xsub64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xsub64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
64-bit wrapping subtraction: dst = src1 - src2.
Source§fn xsub64_u8(&mut self, dst: XReg, src1: XReg, src2: u8) -> Self::Return
 
fn xsub64_u8(&mut self, dst: XReg, src1: XReg, src2: u8) -> Self::Return
Same as xsub64 but src2 is a zero-extended 8-bit immediate.
Source§fn xsub64_u32(&mut self, dst: XReg, src1: XReg, src2: u32) -> Self::Return
 
fn xsub64_u32(&mut self, dst: XReg, src1: XReg, src2: u32) -> Self::Return
Same as xsub64 but src2 is a zero-extended 32-bit immediate.
Source§fn xmul32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xmul32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = low32(src1) * low32(src2)
Source§fn xmul32_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return
 
fn xmul32_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return
Same as xmul64 but src2 is a sign-extended 8-bit immediate.
Source§fn xmul32_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return
 
fn xmul32_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return
Same as xmul32 but src2 is a sign-extended 32-bit immediate.
Source§fn xmul64_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return
 
fn xmul64_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return
Same as xmul64 but src2 is a sign-extended 8-bit immediate.
Source§fn xmul64_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return
 
fn xmul64_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return
Same as xmul64 but src2 is a sign-extended 64-bit immediate.
Source§fn xctz32(&mut self, dst: XReg, src: XReg) -> Self::Return
 
fn xctz32(&mut self, dst: XReg, src: XReg) -> Self::Return
low32(dst) = trailing_zeros(low32(src))
Source§fn xclz32(&mut self, dst: XReg, src: XReg) -> Self::Return
 
fn xclz32(&mut self, dst: XReg, src: XReg) -> Self::Return
low32(dst) = leading_zeros(low32(src))
Source§fn xpopcnt32(&mut self, dst: XReg, src: XReg) -> Self::Return
 
fn xpopcnt32(&mut self, dst: XReg, src: XReg) -> Self::Return
low32(dst) = count_ones(low32(src))
Source§fn xrotl32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xrotl32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = rotate_left(low32(src1), low32(src2))
Source§fn xrotl64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xrotl64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
dst = rotate_left(src1, src2)
Source§fn xrotr32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xrotr32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = rotate_right(low32(src1), low32(src2))
Source§fn xrotr64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xrotr64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
dst = rotate_right(src1, src2)
Source§fn xshl32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xshl32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = low32(src1) << low5(src2)
Source§fn xshr32_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xshr32_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = low32(src1) >> low5(src2)
Source§fn xshr32_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xshr32_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = low32(src1) >> low5(src2)
Source§fn xshr64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xshr64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
dst = src1 >> low6(src2)
Source§fn xshr64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xshr64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
dst = src1 >> low6(src2)
Source§fn xshl32_u6(
    &mut self,
    operands: BinaryOperands<XReg, XReg, U6>,
) -> Self::Return
 
fn xshl32_u6( &mut self, operands: BinaryOperands<XReg, XReg, U6>, ) -> Self::Return
low32(dst) = low32(src1) << low5(src2)
Source§fn xshr32_s_u6(
    &mut self,
    operands: BinaryOperands<XReg, XReg, U6>,
) -> Self::Return
 
fn xshr32_s_u6( &mut self, operands: BinaryOperands<XReg, XReg, U6>, ) -> Self::Return
low32(dst) = low32(src1) >> low5(src2)
Source§fn xshr32_u_u6(
    &mut self,
    operands: BinaryOperands<XReg, XReg, U6>,
) -> Self::Return
 
fn xshr32_u_u6( &mut self, operands: BinaryOperands<XReg, XReg, U6>, ) -> Self::Return
low32(dst) = low32(src1) >> low5(src2)
Source§fn xshl64_u6(
    &mut self,
    operands: BinaryOperands<XReg, XReg, U6>,
) -> Self::Return
 
fn xshl64_u6( &mut self, operands: BinaryOperands<XReg, XReg, U6>, ) -> Self::Return
dst = src1 << low5(src2)
Source§fn xshr64_s_u6(
    &mut self,
    operands: BinaryOperands<XReg, XReg, U6>,
) -> Self::Return
 
fn xshr64_s_u6( &mut self, operands: BinaryOperands<XReg, XReg, U6>, ) -> Self::Return
dst = src1 >> low6(src2)
Source§fn xshr64_u_u6(
    &mut self,
    operands: BinaryOperands<XReg, XReg, U6>,
) -> Self::Return
 
fn xshr64_u_u6( &mut self, operands: BinaryOperands<XReg, XReg, U6>, ) -> Self::Return
dst = src1 >> low6(src2)
Source§fn xneq64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xneq64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = src1 != src2
Source§fn xslt64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xslt64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = src1 < src2 (signed)
Source§fn xslteq64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xslteq64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = src1 <= src2 (signed)
Source§fn xult64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xult64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = src1 < src2 (unsigned)
Source§fn xulteq64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xulteq64(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = src1 <= src2 (unsigned)
Source§fn xeq32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xeq32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = low32(src1) == low32(src2)
Source§fn xneq32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xneq32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = low32(src1) != low32(src2)
Source§fn xslt32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xslt32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = low32(src1) < low32(src2) (signed)
Source§fn xslteq32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xslteq32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = low32(src1) <= low32(src2) (signed)
Source§fn xult32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xult32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = low32(src1) < low32(src2) (unsigned)
Source§fn xulteq32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xulteq32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = low32(src1) <= low32(src2) (unsigned)
Source§fn xload8_u32_o32(&mut self, dst: XReg, addr: AddrO32) -> Self::Return
 
fn xload8_u32_o32(&mut self, dst: XReg, addr: AddrO32) -> Self::Return
low32(dst) = zext_8_32(*addr)
Source§fn xload8_s32_o32(&mut self, dst: XReg, addr: AddrO32) -> Self::Return
 
fn xload8_s32_o32(&mut self, dst: XReg, addr: AddrO32) -> Self::Return
low32(dst) = sext_8_32(*addr)
Source§fn xload16le_u32_o32(&mut self, dst: XReg, addr: AddrO32) -> Self::Return
 
fn xload16le_u32_o32(&mut self, dst: XReg, addr: AddrO32) -> Self::Return
low32(dst) = o32ext_16_32(*addr)
Source§fn xload16le_s32_o32(&mut self, dst: XReg, addr: AddrO32) -> Self::Return
 
fn xload16le_s32_o32(&mut self, dst: XReg, addr: AddrO32) -> Self::Return
low32(dst) = sext_16_32(*addr)
Source§fn xload8_u32_z(&mut self, dst: XReg, addr: AddrZ) -> Self::Return
 
fn xload8_u32_z(&mut self, dst: XReg, addr: AddrZ) -> Self::Return
low32(dst) = zext_8_32(*addr)
Source§fn xload8_s32_z(&mut self, dst: XReg, addr: AddrZ) -> Self::Return
 
fn xload8_s32_z(&mut self, dst: XReg, addr: AddrZ) -> Self::Return
low32(dst) = sext_8_32(*addr)
Source§fn xload16le_u32_z(&mut self, dst: XReg, addr: AddrZ) -> Self::Return
 
fn xload16le_u32_z(&mut self, dst: XReg, addr: AddrZ) -> Self::Return
low32(dst) = zext_16_32(*addr)
Source§fn xload16le_s32_z(&mut self, dst: XReg, addr: AddrZ) -> Self::Return
 
fn xload16le_s32_z(&mut self, dst: XReg, addr: AddrZ) -> Self::Return
low32(dst) = sext_16_32(*addr)
Source§fn xload8_u32_g32(&mut self, dst: XReg, addr: AddrG32) -> Self::Return
 
fn xload8_u32_g32(&mut self, dst: XReg, addr: AddrG32) -> Self::Return
low32(dst) = zext_8_32(*addr)
Source§fn xload8_s32_g32(&mut self, dst: XReg, addr: AddrG32) -> Self::Return
 
fn xload8_s32_g32(&mut self, dst: XReg, addr: AddrG32) -> Self::Return
low32(dst) = sext_8_32(*addr)
Source§fn xload16le_u32_g32(&mut self, dst: XReg, addr: AddrG32) -> Self::Return
 
fn xload16le_u32_g32(&mut self, dst: XReg, addr: AddrG32) -> Self::Return
low32(dst) = zext_16_32(*addr)
Source§fn xload16le_s32_g32(&mut self, dst: XReg, addr: AddrG32) -> Self::Return
 
fn xload16le_s32_g32(&mut self, dst: XReg, addr: AddrG32) -> Self::Return
low32(dst) = sext_16_32(*addr)
Source§fn xload8_u32_g32bne(&mut self, dst: XReg, addr: AddrG32Bne) -> Self::Return
 
fn xload8_u32_g32bne(&mut self, dst: XReg, addr: AddrG32Bne) -> Self::Return
low32(dst) = zext_8_32(*addr)
Source§fn xload8_s32_g32bne(&mut self, dst: XReg, addr: AddrG32Bne) -> Self::Return
 
fn xload8_s32_g32bne(&mut self, dst: XReg, addr: AddrG32Bne) -> Self::Return
low32(dst) = sext_8_32(*addr)
Source§fn xload16le_u32_g32bne(&mut self, dst: XReg, addr: AddrG32Bne) -> Self::Return
 
fn xload16le_u32_g32bne(&mut self, dst: XReg, addr: AddrG32Bne) -> Self::Return
low32(dst) = zext_16_32(*addr)
Source§fn xload16le_s32_g32bne(&mut self, dst: XReg, addr: AddrG32Bne) -> Self::Return
 
fn xload16le_s32_g32bne(&mut self, dst: XReg, addr: AddrG32Bne) -> Self::Return
low32(dst) = sext_16_32(*addr)
Source§fn xload32le_g32bne(&mut self, dst: XReg, addr: AddrG32Bne) -> Self::Return
 
fn xload32le_g32bne(&mut self, dst: XReg, addr: AddrG32Bne) -> Self::Return
low32(dst) = *addr
Source§fn xload64le_g32bne(&mut self, dst: XReg, addr: AddrG32Bne) -> Self::Return
 
fn xload64le_g32bne(&mut self, dst: XReg, addr: AddrG32Bne) -> Self::Return
dst = *addr
Source§fn xstore8_g32bne(&mut self, addr: AddrG32Bne, src: XReg) -> Self::Return
 
fn xstore8_g32bne(&mut self, addr: AddrG32Bne, src: XReg) -> Self::Return
*addr = low8(src)
Source§fn xstore16le_g32bne(&mut self, addr: AddrG32Bne, src: XReg) -> Self::Return
 
fn xstore16le_g32bne(&mut self, addr: AddrG32Bne, src: XReg) -> Self::Return
*addr = low16(src)
Source§fn xstore32le_g32bne(&mut self, addr: AddrG32Bne, src: XReg) -> Self::Return
 
fn xstore32le_g32bne(&mut self, addr: AddrG32Bne, src: XReg) -> Self::Return
*addr = low32(src)
Source§fn xstore64le_g32bne(&mut self, addr: AddrG32Bne, src: XReg) -> Self::Return
 
fn xstore64le_g32bne(&mut self, addr: AddrG32Bne, src: XReg) -> Self::Return
*addr = src
Source§fn push_frame(&mut self) -> Self::Return
 
fn push_frame(&mut self) -> Self::Return
push lr; push fp; fp = sp
Source§fn push_frame_save(&mut self, amt: u16, regs: UpperRegSet<XReg>) -> Self::Return
 
fn push_frame_save(&mut self, amt: u16, regs: UpperRegSet<XReg>) -> Self::Return
Macro-instruction to enter a function, allocate some stack, and then save some registers.
This is equivalent to push_frame, stack_alloc32 amt, then
saving all of regs to the top of the stack just allocated.
Source§fn pop_frame_restore(
    &mut self,
    amt: u16,
    regs: UpperRegSet<XReg>,
) -> Self::Return
 
fn pop_frame_restore( &mut self, amt: u16, regs: UpperRegSet<XReg>, ) -> Self::Return
Inverse of push_frame_save. Restores regs from the top of
the stack, then runs stack_free32 amt, then runs pop_frame.
Source§fn stack_alloc32(&mut self, amt: u32) -> Self::Return
 
fn stack_alloc32(&mut self, amt: u32) -> Self::Return
sp = sp.checked_sub(amt)
Source§fn stack_free32(&mut self, amt: u32) -> Self::Return
 
fn stack_free32(&mut self, amt: u32) -> Self::Return
sp = sp + amt
Source§fn xdiv32_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xdiv32_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = low32(src1) / low32(src2) (signed)
Source§fn xdiv64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xdiv64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
dst = src1 / src2 (signed)
Source§fn xdiv32_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xdiv32_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = low32(src1) / low32(src2) (unsigned)
Source§fn xdiv64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xdiv64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
dst = src1 / src2 (unsigned)
Source§fn xrem32_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xrem32_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = low32(src1) % low32(src2) (signed)
Source§fn xrem64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xrem64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
dst = src1 / src2 (signed)
Source§fn xrem32_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xrem32_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = low32(src1) % low32(src2) (unsigned)
Source§fn xrem64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xrem64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
dst = src1 / src2 (unsigned)
Source§fn xband32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xband32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = low32(src1) & low32(src2)
Source§fn xband32_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return
 
fn xband32_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return
Same as xband64 but src2 is a sign-extended 8-bit immediate.
Source§fn xband32_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return
 
fn xband32_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return
Same as xband32 but src2 is a sign-extended 32-bit immediate.
Source§fn xband64_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return
 
fn xband64_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return
Same as xband64 but src2 is a sign-extended 8-bit immediate.
Source§fn xband64_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return
 
fn xband64_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return
Same as xband64 but src2 is a sign-extended 32-bit immediate.
Source§fn xbor32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xbor32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = low32(src1) | low32(src2)
Source§fn xbor32_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return
 
fn xbor32_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return
Same as xbor64 but src2 is a sign-extended 8-bit immediate.
Source§fn xbor32_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return
 
fn xbor32_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return
Same as xbor32 but src2 is a sign-extended 32-bit immediate.
Source§fn xbor64_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return
 
fn xbor64_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return
Same as xbor64 but src2 is a sign-extended 8-bit immediate.
Source§fn xbor64_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return
 
fn xbor64_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return
Same as xbor64 but src2 is a sign-extended 32-bit immediate.
Source§fn xbxor32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xbxor32(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = low32(src1) ^ low32(src2)
Source§fn xbxor32_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return
 
fn xbxor32_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return
Same as xbxor64 but src2 is a sign-extended 8-bit immediate.
Source§fn xbxor32_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return
 
fn xbxor32_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return
Same as xbxor32 but src2 is a sign-extended 32-bit immediate.
Source§fn xbxor64_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return
 
fn xbxor64_s8(&mut self, dst: XReg, src1: XReg, src2: i8) -> Self::Return
Same as xbxor64 but src2 is a sign-extended 8-bit immediate.
Source§fn xbxor64_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return
 
fn xbxor64_s32(&mut self, dst: XReg, src1: XReg, src2: i32) -> Self::Return
Same as xbxor64 but src2 is a sign-extended 32-bit immediate.
Source§fn xmin32_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xmin32_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = min(low32(src1), low32(src2)) (unsigned)
Source§fn xmin32_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xmin32_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = min(low32(src1), low32(src2)) (signed)
Source§fn xmax32_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xmax32_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = max(low32(src1), low32(src2)) (unsigned)
Source§fn xmax32_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xmax32_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
low32(dst) = max(low32(src1), low32(src2)) (signed)
Source§fn xmin64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xmin64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
dst = min(src1, src2) (unsigned)
Source§fn xmin64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xmin64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
dst = min(src1, src2) (signed)
Source§fn xmax64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xmax64_u(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
dst = max(src1, src2) (unsigned)
Source§fn xmax64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
 
fn xmax64_s(&mut self, operands: BinaryOperands<XReg>) -> Self::Return
dst = max(src1, src2) (signed)
Source§fn xselect32(
    &mut self,
    dst: XReg,
    cond: XReg,
    if_nonzero: XReg,
    if_zero: XReg,
) -> Self::Return
 
fn xselect32( &mut self, dst: XReg, cond: XReg, if_nonzero: XReg, if_zero: XReg, ) -> Self::Return
low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)
Source§fn xselect64(
    &mut self,
    dst: XReg,
    cond: XReg,
    if_nonzero: XReg,
    if_zero: XReg,
) -> Self::Return
 
fn xselect64( &mut self, dst: XReg, cond: XReg, if_nonzero: XReg, if_zero: XReg, ) -> Self::Return
dst = low32(cond) ? if_nonzero : if_zero