Available on crate feature 
encode only.Expand description
Encoding support for pulley bytecode.
Traits§
- Encode
 - Helper trait to encode instructions into a “sink”.
 
Functions§
- bitcast_
float_ from_ int_ 32  low32(dst) = bitcast low32(src) as f32- bitcast_
float_ from_ int_ 64  dst = bitcast src as f64- bitcast_
int_ from_ float_ 32  low32(dst) = bitcast low32(src) as i32- bitcast_
int_ from_ float_ 64  dst = bitcast src as i64- br_if32
 - Conditionally transfer control to the given PC offset if
low32(cond)contains a non-zero value. - br_
if_ not32  - Conditionally transfer control to the given PC offset if
low32(cond)contains a zero value. - br_
if_ xeq32  - Branch if 
a == b. - br_
if_ xeq64  - Branch if 
a == b. - br_
if_ xeq32_ i8  - Branch if 
a == b. - br_
if_ xeq32_ i32  - Branch if 
a == b. - br_
if_ xeq64_ i8  - Branch if 
a == b. - br_
if_ xeq64_ i32  - Branch if 
a == b. - br_
if_ xneq32  - Branch if 
a !=b. - br_
if_ xneq64  - Branch if 
a !=b. - br_
if_ xneq32_ i8  - Branch if 
a !=b. - br_
if_ xneq32_ i32  - Branch if 
a !=b. - br_
if_ xneq64_ i8  - Branch if 
a !=b. - br_
if_ xneq64_ i32  - Branch if 
a !=b. - br_
if_ xsgt32_ i8  - Branch if signed 
a > b. - br_
if_ xsgt32_ i32  - Branch if signed 
a > b. - br_
if_ xsgt64_ i8  - Branch if signed 
a > b. - br_
if_ xsgt64_ i32  - Branch if signed 
a > b. - br_
if_ xsgteq32_ i8  - Branch if signed 
a >= b. - br_
if_ xsgteq32_ i32  - Branch if signed 
a >= b. - br_
if_ xsgteq64_ i8  - Branch if signed 
a >= b. - br_
if_ xsgteq64_ i32  - Branch if signed 
a >= b. - br_
if_ xslt32  - Branch if signed 
a < b. - br_
if_ xslt64  - Branch if signed 
a < b. - br_
if_ xslt32_ i8  - Branch if signed 
a < b. - br_
if_ xslt32_ i32  - Branch if signed 
a < b. - br_
if_ xslt64_ i8  - Branch if signed 
a < b. - br_
if_ xslt64_ i32  - Branch if signed 
a < b. - br_
if_ xslteq32  - Branch if signed 
a <= b. - br_
if_ xslteq64  - Branch if signed 
a <= b. - br_
if_ xslteq32_ i8  - Branch if signed 
a <= b. - br_
if_ xslteq32_ i32  - Branch if signed 
a <= b. - br_
if_ xslteq64_ i8  - Branch if signed 
a <= b. - br_
if_ xslteq64_ i32  - Branch if signed 
a <= b. - br_
if_ xugt32_ u8  - Branch if unsigned 
a > b. - br_
if_ xugt32_ u32  - Branch if unsigned 
a > b. - br_
if_ xugt64_ u8  - Branch if unsigned 
a > b. - br_
if_ xugt64_ u32  - Branch if unsigned 
a > b. - br_
if_ xugteq32_ u8  - Branch if unsigned 
a >= b. - br_
if_ xugteq32_ u32  - Branch if unsigned 
a >= b. - br_
if_ xugteq64_ u8  - Branch if unsigned 
a >= b. - br_
if_ xugteq64_ u32  - Branch if unsigned 
a >= b. - br_
if_ xult32  - Branch if unsigned 
a < b. - br_
if_ xult64  - Branch if unsigned 
a < b. - br_
if_ xult32_ u8  - Branch if unsigned 
a < b. - br_
if_ xult32_ u32  - Branch if unsigned 
a < b. - br_
if_ xult64_ u8  - Branch if unsigned 
a < b. - br_
if_ xult64_ u32  - Branch if unsigned 
a < b. - br_
if_ xulteq32  - Branch if unsigned 
a <= b. - br_
if_ xulteq64  - Branch if unsigned 
a <= b. - br_
if_ xulteq32_ u8  - Branch if unsigned 
a <= b. - br_
if_ xulteq32_ u32  - Branch if unsigned 
a <= b. - br_
if_ xulteq64_ u8  - Branch if unsigned 
a <= b. - br_
if_ xulteq64_ u32  - Branch if unsigned 
a <= b. - br_
table32  - Branch to the label indicated by 
low32(idx). - bswap32
 dst = byteswap(low32(src))- bswap64
 dst = byteswap(src)- call
 - Transfer control to the PC at the given offset and set the 
lrregister to the PC just after this instruction. - call1
 - Like 
call, but alsox0 = arg1 - call2
 - Like 
call, but alsox0, x1 = arg1, arg2 - call3
 - Like 
call, but alsox0, x1, x2 = arg1, arg2, arg3 - call4
 - Like 
call, but alsox0, x1, x2, x3 = arg1, arg2, arg3, arg4 - call_
indirect  - Transfer control to the PC in 
regand setlrto the PC just after this instruction. - call_
indirect_ host  - A special opcode to halt interpreter execution and yield control back to the host.
 - f32_
from_ f64  low32(dst) = demote(src)- f32_
from_ x32_ s  low32(dst) = checked_f32_from_signed(low32(src))- f32_
from_ x32_ u  low32(dst) = checked_f32_from_unsigned(low32(src))- f32_
from_ x64_ s  low32(dst) = checked_f32_from_signed(src)- f32_
from_ x64_ u  low32(dst) = checked_f32_from_unsigned(src)- f64_
from_ f32  (st) = promote(low32(src))- f64_
from_ x32_ s  dst = checked_f64_from_signed(low32(src))- f64_
from_ x32_ u  dst = checked_f64_from_unsigned(low32(src))- f64_
from_ x64_ s  dst = checked_f64_from_signed(src)- f64_
from_ x64_ u  dst = checked_f64_from_unsigned(src)- fabs32
 low32(dst) = |low32(src)|- fabs64
 dst = |src|- fadd32
 low32(dst) = low32(src1) + low32(src2)- fadd64
 dst = src1 + src2- fceil32
 low32(dst) = ieee_ceil(low32(src))- fceil64
 dst = ieee_ceil(src)- fconst32
 low32(dst) = bits- fconst64
 dst = bits- fcopysign32
 low32(dst) = copysign(low32(src1), low32(src2))- fcopysign64
 dst = copysign(src1, src2)- fdiv32
 low32(dst) = low32(src1) / low32(src2)- fdiv64
 dst = src1 / src2- feq32
 low32(dst) = zext(src1 == src2)- feq64
 low32(dst) = zext(src1 == src2)- fextractv32x4
 low32(dst) = src[lane]- fextractv64x2
 dst = src[lane]- ffloor32
 low32(dst) = ieee_floor(low32(src))- ffloor64
 dst = ieee_floor(src)- fload32be_
o32  low32(dst) = zext(*addr)- fload32le_
g32  low32(dst) = zext(*addr)- fload32le_
o32  low32(dst) = zext(*addr)- fload32le_
z  low32(dst) = zext(*addr)- fload64be_
o32  dst = *addr- fload64le_
g32  dst = *addr- fload64le_
o32  dst = *addr- fload64le_
z  dst = *addr- flt32
 low32(dst) = zext(src1 < src2)- flt64
 low32(dst) = zext(src1 < src2)- flteq32
 low32(dst) = zext(src1 <= src2)- flteq64
 low32(dst) = zext(src1 <= src2)- fmaximum32
 low32(dst) = ieee_maximum(low32(src1), low32(src2))- fmaximum64
 dst = ieee_maximum(src1, src2)- fminimum32
 low32(dst) = ieee_minimum(low32(src1), low32(src2))- fminimum64
 dst = ieee_minimum(src1, src2)- fmov
 - Move between 
fregisters. - fmul32
 low32(dst) = low32(src1) * low32(src2)- fmul64
 dst = src1 * src2- fnearest32
 low32(dst) = ieee_nearest(low32(src))- fnearest64
 dst = ieee_nearest(src)- fneg32
 low32(dst) = -low32(src)- fneg64
 dst = -src- fneq32
 low32(dst) = zext(src1 != src2)- fneq64
 low32(dst) = zext(src1 != src2)- fselect32
 low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)- fselect64
 dst = low32(cond) ? if_nonzero : if_zero- fsqrt32
 low32(dst) = ieee_sqrt(low32(src))- fsqrt64
 dst = ieee_sqrt(src)- fstore32be_
o32  *addr = low32(src)- fstore32le_
g32  *addr = low32(src)- fstore32le_
o32  *addr = low32(src)- fstore32le_
z  *addr = low32(src)- fstore64be_
o32  *addr = src- fstore64le_
g32  *addr = src- fstore64le_
o32  *addr = src- fstore64le_
z  *addr = src- fsub32
 low32(dst) = low32(src1) - low32(src2)- fsub64
 dst = src1 - src2- ftrunc32
 low32(dst) = ieee_trunc(low32(src))- ftrunc64
 dst = ieee_trunc(src)- jump
 - Unconditionally transfer control to the PC at the given offset.
 - nop
 - Do nothing.
 - pop_
frame  sp = fp; pop fp; pop lr- pop_
frame_ restore  - Inverse of 
push_frame_save. Restoresregsfrom the top of the stack, then runsstack_free32 amt, then runspop_frame. - push_
frame  push lr; push fp; fp = sp- push_
frame_ save  - Macro-instruction to enter a function, allocate some stack, and then save some registers.
 - ret
 - Transfer control the address in the 
lrregister. - sext8
 dst = sext(low8(src))- sext16
 dst = sext(low16(src))- sext32
 dst = sext(low32(src))- stack_
alloc32  sp = sp.checked_sub(amt)- stack_
free32  sp = sp + amt- trap
 - Raise a trap.
 - vabs8x16
 dst = |src|- vabs16x8
 dst = |src|- vabs32x4
 dst = |src|- vabs64x2
 dst = |src|- vabsf32x4
 dst = |src|- vabsf64x2
 dst = |src|- vaddf32x4
 dst = src1 + src2- vaddf64x2
 dst = src1 + src2- vaddi8x16
 dst = src1 + src2- vaddi8x16_
sat  dst = satruating_add(src1, src2)- vaddi16x8
 dst = src1 + src2- vaddi16x8_
sat  dst = satruating_add(src1, src2)- vaddi32x4
 dst = src1 + src2- vaddi64x2
 dst = src1 + src2- vaddpairwisei16x8_
s  dst = [src1[0] + src1[1], ..., src2[6] + src2[7]]- vaddpairwisei32x4_
s  dst = [src1[0] + src1[1], ..., src2[2] + src2[3]]- vaddu8x16_
sat  dst = satruating_add(src1, src2)- vaddu16x8_
sat  dst = satruating_add(src1, src2)- valltrue8x16
 - Store whether all lanes are nonzero in 
dst. - valltrue16x8
 - Store whether all lanes are nonzero in 
dst. - valltrue32x4
 - Store whether all lanes are nonzero in 
dst. - valltrue64x2
 - Store whether any lanes are nonzero in 
dst. - vanytrue8x16
 - Store whether any lanes are nonzero in 
dst. - vanytrue16x8
 - Store whether any lanes are nonzero in 
dst. - vanytrue32x4
 - Store whether any lanes are nonzero in 
dst. - vanytrue64x2
 - Store whether any lanes are nonzero in 
dst. - vavground8x16
 dst = (src1 + src2 + 1) // 2- vavground16x8
 dst = (src1 + src2 + 1) // 2- vband128
 dst = src1 & src2- vbitmask8x16
 - Collect high bits of each lane into the low 32-bits of the destination.
 - vbitmask16x8
 - Collect high bits of each lane into the low 32-bits of the destination.
 - vbitmask32x4
 - Collect high bits of each lane into the low 32-bits of the destination.
 - vbitmask64x2
 - Collect high bits of each lane into the low 32-bits of the destination.
 - vbitselect128
 dst = (c & x) | (!c & y)- vbnot128
 dst = !src1- vbor128
 dst = src1 | src2- vbxor128
 dst = src1 ^ src2- vceil32x4
 low128(dst) = ieee_ceil(low128(src))- vceil64x2
 low128(dst) = ieee_ceil(low128(src))- vconst128
 dst = imm- vdivf32x4
 low128(dst) = low128(src1) / low128(src2)- vdivf64x2
 dst = src1 / src2- veq8x16
 dst = src == dst- veq16x8
 dst = src == dst- veq32x4
 dst = src == dst- veq64x2
 dst = src == dst- veqf32x4
 dst = src == dst- veqf64x2
 dst = src == dst- vf32x4_
from_ i32x4_ s  - Int-to-float conversion (same as 
f32_from_x32_s) - vf32x4_
from_ i32x4_ u  - Int-to-float conversion (same as 
f32_from_x32_u) - vf64x2_
from_ i64x2_ s  - Int-to-float conversion (same as 
f64_from_x64_s) - vf64x2_
from_ i64x2_ u  - Int-to-float conversion (same as 
f64_from_x64_u) - vfdemote
 - Demotes the two f64x2 lanes to f32x2 and then extends with two more zero lanes.
 - vfloor32x4
 low128(dst) = ieee_floor(low128(src))- vfloor64x2
 low128(dst) = ieee_floor(low128(src))- vfma32x4
 dst = ieee_fma(a, b, c)- vfma64x2
 dst = ieee_fma(a, b, c)- vfpromotelow
 - Promotes the low two lanes of the f32x4 input to f64x2.
 - vi32x4_
from_ f32x4_ s  - Float-to-int conversion (same as 
x32_from_f32_s - vi32x4_
from_ f32x4_ u  - Float-to-int conversion (same as 
x32_from_f32_u - vi64x2_
from_ f64x2_ s  - Float-to-int conversion (same as 
x64_from_f64_s - vi64x2_
from_ f64x2_ u  - Float-to-int conversion (same as 
x64_from_f64_u - vinsertf32
 dst = src1; dst[lane] = src2- vinsertf64
 dst = src1; dst[lane] = src2- vinsertx8
 dst = src1; dst[lane] = src2- vinsertx16
 dst = src1; dst[lane] = src2- vinsertx32
 dst = src1; dst[lane] = src2- vinsertx64
 dst = src1; dst[lane] = src2- vload8x8_
s_ z  - Load the 64-bit source as i8x8 and sign-extend to i16x8.
 - vload8x8_
u_ z  - Load the 64-bit source as u8x8 and zero-extend to i16x8.
 - vload16x4le_
s_ z  - Load the 64-bit source as i16x4 and sign-extend to i32x4.
 - vload16x4le_
u_ z  - Load the 64-bit source as u16x4 and zero-extend to i32x4.
 - vload32x2le_
s_ z  - Load the 64-bit source as i32x2 and sign-extend to i64x2.
 - vload32x2le_
u_ z  - Load the 64-bit source as u32x2 and zero-extend to i64x2.
 - vload128le_
g32  dst = *(ptr + offset)- vload128le_
o32  dst = *addr- vload128le_
z  dst = *(ptr + offset)- vlteqf32x4
 dst = src <= dst- vlteqf64x2
 dst = src <= dst- vltf32x4
 dst = src < dst- vltf64x2
 dst = src < dst- vmax8x16_
s  dst = max(src1, src2)(signed)- vmax8x16_
u  dst = max(src1, src2)(unsigned)- vmax16x8_
s  dst = max(src1, src2)(signed)- vmax16x8_
u  dst = max(src1, src2)(unsigned)- vmax32x4_
s  dst = max(src1, src2)(signed)- vmax32x4_
u  dst = max(src1, src2)(unsigned)- vmaximumf32x4
 dst = ieee_maximum(src1, src2)- vmaximumf64x2
 dst = ieee_maximum(src1, src2)- vmin8x16_
s  dst = min(src1, src2)(signed)- vmin8x16_
u  dst = min(src1, src2)(unsigned)- vmin16x8_
s  dst = min(src1, src2)(signed)- vmin16x8_
u  dst = min(src1, src2)(unsigned)- vmin32x4_
s  dst = min(src1, src2)(signed)- vmin32x4_
u  dst = min(src1, src2)(unsigned)- vminimumf32x4
 dst = ieee_minimum(src1, src2)- vminimumf64x2
 dst = ieee_minimum(src1, src2)- vmov
 - Move between 
vregisters. - vmulf32x4
 low128(dst) = low128(src1) * low128(src2)- vmulf64x2
 dst = src1 * src2- vmuli8x16
 dst = src1 * src2- vmuli16x8
 dst = src1 * src2- vmuli32x4
 dst = src1 * src2- vmuli64x2
 dst = src1 * src2- vnarrow16x8_
s  - Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
 - vnarrow16x8_
u  - Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
 - vnarrow32x4_
s  - Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
 - vnarrow32x4_
u  - Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
 - vnarrow64x2_
s  - Narrows the two 64x2 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
 - vnarrow64x2_
u  - Narrows the two 64x2 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
 - vnearest32x4
 low128(dst) = ieee_nearest(low128(src))- vnearest64x2
 low128(dst) = ieee_nearest(low128(src))- vneg8x16
 dst = -src- vneg16x8
 dst = -src- vneg32x4
 dst = -src- vneg64x2
 dst = -src- vnegf32x4
 low128(dst) = -low128(src)- vnegf64x2
 dst = -src- vneq8x16
 dst = src != dst- vneq16x8
 dst = src != dst- vneq32x4
 dst = src != dst- vneq64x2
 dst = src != dst- vneqf32x4
 dst = src != dst- vneqf64x2
 dst = src != dst- vpopcnt8x16
 dst = count_ones(src)- vqmulrsi16x8
 dst = signed_saturate(src1 * src2 + (1 << (Q - 1)) >> Q)- vselect
 dst = low32(cond) ? if_nonzero : if_zero- vshli8x16
 dst = src1 << src2- vshli16x8
 dst = src1 << src2- vshli32x4
 dst = src1 << src2- vshli64x2
 dst = src1 << src2- vshri8x16_
s  dst = src1 >> src2(signed)- vshri8x16_
u  dst = src1 >> src2(unsigned)- vshri16x8_
s  dst = src1 >> src2(signed)- vshri16x8_
u  dst = src1 >> src2(unsigned)- vshri32x4_
s  dst = src1 >> src2(signed)- vshri32x4_
u  dst = src1 >> src2(unsigned)- vshri64x2_
s  dst = src1 >> src2(signed)- vshri64x2_
u  dst = src1 >> src2(unsigned)- vshuffle
 dst = shuffle(src1, src2, mask)- vslt8x16
 dst = src < dst(signed)- vslt16x8
 dst = src < dst(signed)- vslt32x4
 dst = src < dst(signed)- vslt64x2
 dst = src < dst(signed)- vslteq8x16
 dst = src <= dst(signed)- vslteq16x8
 dst = src <= dst(signed)- vslteq32x4
 dst = src <= dst(signed)- vslteq64x2
 dst = src <= dst(signed)- vsplatf32
 dst = splat(low32(src))- vsplatf64
 dst = splat(src)- vsplatx8
 dst = splat(low8(src))- vsplatx16
 dst = splat(low16(src))- vsplatx32
 dst = splat(low32(src))- vsplatx64
 dst = splat(src)- vsqrt32x4
 low32(dst) = ieee_sqrt(low32(src))- vsqrt64x2
 low32(dst) = ieee_sqrt(low32(src))- vstore128le_
g32  *(ptr + offset) = src- vstore128le_
o32  *addr = src- vstore128le_
z  *(ptr + offset) = src- vsubf32x4
 low128(dst) = low128(src1) - low128(src2)- vsubf64x2
 dst = src1 - src2- vsubi8x16
 dst = src1 - src2- vsubi8x16_
sat  dst = saturating_sub(src1, src2)- vsubi16x8
 dst = src1 - src2- vsubi16x8_
sat  dst = saturating_sub(src1, src2)- vsubi32x4
 dst = src1 - src2- vsubi64x2
 dst = src1 - src2- vsubu8x16_
sat  dst = saturating_sub(src1, src2)- vsubu16x8_
sat  dst = saturating_sub(src1, src2)- vswizzlei8x16
 dst = swizzle(src1, src2)- vtrunc32x4
 low128(dst) = ieee_trunc(low128(src))- vtrunc64x2
 low128(dst) = ieee_trunc(low128(src))- vult8x16
 dst = src < dst(unsigned)- vult16x8
 dst = src < dst(unsigned)- vult32x4
 dst = src < dst(unsigned)- vult64x2
 dst = src < dst(unsigned)- vulteq8x16
 dst = src <= dst(unsigned)- vulteq16x8
 dst = src <= dst(unsigned)- vulteq32x4
 dst = src <= dst(unsigned)- vulteq64x2
 dst = src <= dst(unsigned)- vunarrow64x2_
u  - Narrows the two 64x2 vectors, assuming all input lanes are unsigned, to half the width. Narrowing is unsigned and saturating.
 - vwidenhigh8x16_
s  - Widens the high lanes of the input vector, as signed, to twice the width.
 - vwidenhigh8x16_
u  - Widens the high lanes of the input vector, as unsigned, to twice the width.
 - vwidenhigh16x8_
s  - Widens the high lanes of the input vector, as signed, to twice the width.
 - vwidenhigh16x8_
u  - Widens the high lanes of the input vector, as unsigned, to twice the width.
 - vwidenhigh32x4_
s  - Widens the high lanes of the input vector, as signed, to twice the width.
 - vwidenhigh32x4_
u  - Widens the high lanes of the input vector, as unsigned, to twice the width.
 - vwidenlow8x16_
s  - Widens the low lanes of the input vector, as signed, to twice the width.
 - vwidenlow8x16_
u  - Widens the low lanes of the input vector, as unsigned, to twice the width.
 - vwidenlow16x8_
s  - Widens the low lanes of the input vector, as signed, to twice the width.
 - vwidenlow16x8_
u  - Widens the low lanes of the input vector, as unsigned, to twice the width.
 - vwidenlow32x4_
s  - Widens the low lanes of the input vector, as signed, to twice the width.
 - vwidenlow32x4_
u  - Widens the low lanes of the input vector, as unsigned, to twice the width.
 - x32_
from_ f32_ s  low32(dst) = checked_signed_from_f32(low32(src))- x32_
from_ f32_ s_ sat  low32(dst) = saturating_signed_from_f32(low32(src))- x32_
from_ f32_ u  low32(dst) = checked_unsigned_from_f32(low32(src))- x32_
from_ f32_ u_ sat  low32(dst) = saturating_unsigned_from_f32(low32(src))- x32_
from_ f64_ s  low32(dst) = checked_signed_from_f64(src)- x32_
from_ f64_ s_ sat  low32(dst) = saturating_signed_from_f64(src)- x32_
from_ f64_ u  low32(dst) = checked_unsigned_from_f64(src)- x32_
from_ f64_ u_ sat  low32(dst) = saturating_unsigned_from_f64(src)- x64_
from_ f32_ s  dst = checked_signed_from_f32(low32(src))- x64_
from_ f32_ s_ sat  dst = saturating_signed_from_f32(low32(src))- x64_
from_ f32_ u  dst = checked_unsigned_from_f32(low32(src))- x64_
from_ f32_ u_ sat  dst = saturating_unsigned_from_f32(low32(src))- x64_
from_ f64_ s  dst = checked_signed_from_f64(src)- x64_
from_ f64_ s_ sat  dst = saturating_signed_from_f64(src)- x64_
from_ f64_ u  dst = checked_unsigned_from_f64(src)- x64_
from_ f64_ u_ sat  dst = saturating_unsigned_from_f64(src)- xabs32
 low32(dst) = |low32(src)|- xabs64
 dst = |src|- xadd32
 - 32-bit wrapping addition: 
low32(dst) = low32(src1) + low32(src2). - xadd64
 - 64-bit wrapping addition: 
dst = src1 + src2. - xadd32_
u8  - Same as 
xadd32butsrc2is a zero-extended 8-bit immediate. - xadd32_
u32  - Same as 
xadd32butsrc2is a 32-bit immediate. - xadd32_
uoverflow_ trap  - 32-bit checked unsigned addition: 
low32(dst) = low32(src1) + low32(src2). - xadd64_
u8  - Same as 
xadd64butsrc2is a zero-extended 8-bit immediate. - xadd64_
u32  - Same as 
xadd64butsrc2is a zero-extended 32-bit immediate. - xadd64_
uoverflow_ trap  - 64-bit checked unsigned addition: 
dst = src1 + src2. - xadd128
 dst_hi:dst_lo = lhs_hi:lhs_lo + rhs_hi:rhs_lo- xband32
 low32(dst) = low32(src1) & low32(src2)- xband64
 dst = src1 & src2- xband32_
s8  - Same as 
xband64butsrc2is a sign-extended 8-bit immediate. - xband32_
s32  - Same as 
xband32butsrc2is a sign-extended 32-bit immediate. - xband64_
s8  - Same as 
xband64butsrc2is a sign-extended 8-bit immediate. - xband64_
s32  - Same as 
xband64butsrc2is a sign-extended 32-bit immediate. - xbmask32
 - low32(dst) = if low32(src) == 0 { 0 } else { -1 }
 - xbmask64
 - dst = if src == 0 { 0 } else { -1 }
 - xbnot32
 low32(dst) = !low32(src1)- xbnot64
 dst = !src1- xbor32
 low32(dst) = low32(src1) | low32(src2)- xbor64
 dst = src1 | src2- xbor32_
s8  - Same as 
xbor64butsrc2is a sign-extended 8-bit immediate. - xbor32_
s32  - Same as 
xbor32butsrc2is a sign-extended 32-bit immediate. - xbor64_
s8  - Same as 
xbor64butsrc2is a sign-extended 8-bit immediate. - xbor64_
s32  - Same as 
xbor64butsrc2is a sign-extended 32-bit immediate. - xbxor32
 low32(dst) = low32(src1) ^ low32(src2)- xbxor64
 dst = src1 ^ src2- xbxor32_
s8  - Same as 
xbxor64butsrc2is a sign-extended 8-bit immediate. - xbxor32_
s32  - Same as 
xbxor32butsrc2is a sign-extended 32-bit immediate. - xbxor64_
s8  - Same as 
xbxor64butsrc2is a sign-extended 8-bit immediate. - xbxor64_
s32  - Same as 
xbxor64butsrc2is a sign-extended 32-bit immediate. - xclz32
 low32(dst) = leading_zeros(low32(src))- xclz64
 dst = leading_zeros(src)- xconst8
 - Set 
dst = sign_extend(imm8). - xconst16
 - Set 
dst = sign_extend(imm16). - xconst32
 - Set 
dst = sign_extend(imm32). - xconst64
 - Set 
dst = imm64. - xctz32
 low32(dst) = trailing_zeros(low32(src))- xctz64
 dst = trailing_zeros(src)- xdiv32_
s  low32(dst) = low32(src1) / low32(src2)(signed)- xdiv32_
u  low32(dst) = low32(src1) / low32(src2)(unsigned)- xdiv64_
s  dst = src1 / src2(signed)- xdiv64_
u  dst = src1 / src2(unsigned)- xeq32
 low32(dst) = low32(src1) == low32(src2)- xeq64
 low32(dst) = src1 == src2- xextractv8x16
 low32(dst) = zext(src[lane])- xextractv16x8
 low32(dst) = zext(src[lane])- xextractv32x4
 low32(dst) = src[lane]- xextractv64x2
 dst = src[lane]- xjump
 - Unconditionally transfer control to the PC at specified register.
 - xload8_
s32_ g32  low32(dst) = sext_8_32(*addr)- xload8_
s32_ g32bne  low32(dst) = sext_8_32(*addr)- xload8_
s32_ o32  low32(dst) = sext_8_32(*addr)- xload8_
s32_ z  low32(dst) = sext_8_32(*addr)- xload8_
u32_ g32  low32(dst) = zext_8_32(*addr)- xload8_
u32_ g32bne  low32(dst) = zext_8_32(*addr)- xload8_
u32_ o32  low32(dst) = zext_8_32(*addr)- xload8_
u32_ z  low32(dst) = zext_8_32(*addr)- xload16be_
s32_ o32  low32(dst) = sext(*addr)- xload16be_
u32_ o32  low32(dst) = zext(*addr)- xload16le_
s32_ g32  low32(dst) = sext_16_32(*addr)- xload16le_
s32_ g32bne  low32(dst) = sext_16_32(*addr)- xload16le_
s32_ o32  low32(dst) = sext_16_32(*addr)- xload16le_
s32_ z  low32(dst) = sext_16_32(*addr)- xload16le_
u32_ g32  low32(dst) = zext_16_32(*addr)- xload16le_
u32_ g32bne  low32(dst) = zext_16_32(*addr)- xload16le_
u32_ o32  low32(dst) = o32ext_16_32(*addr)- xload16le_
u32_ z  low32(dst) = zext_16_32(*addr)- xload32be_
o32  low32(dst) = zext(*addr)- xload32le_
g32  low32(dst) = *addr- xload32le_
g32bne  low32(dst) = *addr- xload32le_
o32  low32(dst) = *addr- xload32le_
z  low32(dst) = *addr- xload64be_
o32  dst = *addr- xload64le_
g32  dst = *addr- xload64le_
g32bne  dst = *addr- xload64le_
o32  dst = *addr- xload64le_
z  dst = *addr- xmadd32
 low32(dst) = low32(src1) * low32(src2) + low32(src3)- xmadd64
 dst = src1 * src2 + src3- xmax32_
s  low32(dst) = max(low32(src1), low32(src2))(signed)- xmax32_
u  low32(dst) = max(low32(src1), low32(src2))(unsigned)- xmax64_
s  dst = max(src1, src2)(signed)- xmax64_
u  dst = max(src1, src2)(unsigned)- xmin32_
s  low32(dst) = min(low32(src1), low32(src2))(signed)- xmin32_
u  low32(dst) = min(low32(src1), low32(src2))(unsigned)- xmin64_
s  dst = min(src1, src2)(signed)- xmin64_
u  dst = min(src1, src2)(unsigned)- xmov
 - Move between 
xregisters. - xmov_fp
 - Gets the special “fp” register and moves it into 
dst. - xmov_lr
 - Gets the special “lr” register and moves it into 
dst. - xmul32
 low32(dst) = low32(src1) * low32(src2)- xmul64
 dst = src1 * src2- xmul32_
s8  - Same as 
xmul64butsrc2is a sign-extended 8-bit immediate. - xmul32_
s32  - Same as 
xmul32butsrc2is a sign-extended 32-bit immediate. - xmul64_
s8  - Same as 
xmul64butsrc2is a sign-extended 8-bit immediate. - xmul64_
s32  - Same as 
xmul64butsrc2is a sign-extended 64-bit immediate. - xmulhi64_
s  dst = high64(src1 * src2)(signed)- xmulhi64_
u  dst = high64(src1 * src2)(unsigned)- xneg32
 low32(dst) = -low32(src)- xneg64
 dst = -src- xneq32
 low32(dst) = low32(src1) != low32(src2)- xneq64
 low32(dst) = src1 != src2- xone
 - Set 
dst = 1 - xpcadd
 - Adds 
offsetto the pc of this instruction and stores it indst. - xpopcnt32
 low32(dst) = count_ones(low32(src))- xpopcnt64
 dst = count_ones(src)- xrem32_
s  low32(dst) = low32(src1) % low32(src2)(signed)- xrem32_
u  low32(dst) = low32(src1) % low32(src2)(unsigned)- xrem64_
s  dst = src1 / src2(signed)- xrem64_
u  dst = src1 / src2(unsigned)- xrotl32
 low32(dst) = rotate_left(low32(src1), low32(src2))- xrotl64
 dst = rotate_left(src1, src2)- xrotr32
 low32(dst) = rotate_right(low32(src1), low32(src2))- xrotr64
 dst = rotate_right(src1, src2)- xselect32
 low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)- xselect64
 dst = low32(cond) ? if_nonzero : if_zero- xshl32
 low32(dst) = low32(src1) << low5(src2)- xshl64
 dst = src1 << low5(src2)- xshl32_
u6  low32(dst) = low32(src1) << low5(src2)- xshl64_
u6  dst = src1 << low5(src2)- xshr32_
s  low32(dst) = low32(src1) >> low5(src2)- xshr32_
s_ u6  low32(dst) = low32(src1) >> low5(src2)- xshr32_
u  low32(dst) = low32(src1) >> low5(src2)- xshr32_
u_ u6  low32(dst) = low32(src1) >> low5(src2)- xshr64_
s  dst = src1 >> low6(src2)- xshr64_
s_ u6  dst = src1 >> low6(src2)- xshr64_
u  dst = src1 >> low6(src2)- xshr64_
u_ u6  dst = src1 >> low6(src2)- xslt32
 low32(dst) = low32(src1) < low32(src2)(signed)- xslt64
 low32(dst) = src1 < src2(signed)- xslteq32
 low32(dst) = low32(src1) <= low32(src2)(signed)- xslteq64
 low32(dst) = src1 <= src2(signed)- xstore8_
g32  *addr = low8(src)- xstore8_
g32bne  *addr = low8(src)- xstore8_
o32  *addr = low8(src)- xstore8_
z  *addr = low8(src)- xstore16be_
o32  *addr = low16(src)- xstore16le_
g32  *addr = low16(src)- xstore16le_
g32bne  *addr = low16(src)- xstore16le_
o32  *addr = low16(src)- xstore16le_
z  *addr = low16(src)- xstore32be_
o32  *addr = low32(src)- xstore32le_
g32  *addr = low32(src)- xstore32le_
g32bne  *addr = low32(src)- xstore32le_
o32  *addr = low32(src)- xstore32le_
z  *addr = low32(src)- xstore64be_
o32  *addr = low64(src)- xstore64le_
g32  *addr = src- xstore64le_
g32bne  *addr = src- xstore64le_
o32  *addr = src- xstore64le_
z  *addr = src- xsub32
 - 32-bit wrapping subtraction: 
low32(dst) = low32(src1) - low32(src2). - xsub64
 - 64-bit wrapping subtraction: 
dst = src1 - src2. - xsub32_
u8  - Same as 
xsub32butsrc2is a zero-extended 8-bit immediate. - xsub32_
u32  - Same as 
xsub32butsrc2is a 32-bit immediate. - xsub64_
u8  - Same as 
xsub64butsrc2is a zero-extended 8-bit immediate. - xsub64_
u32  - Same as 
xsub64butsrc2is a zero-extended 32-bit immediate. - xsub128
 dst_hi:dst_lo = lhs_hi:lhs_lo - rhs_hi:rhs_lo- xult32
 low32(dst) = low32(src1) < low32(src2)(unsigned)- xult64
 low32(dst) = src1 < src2(unsigned)- xulteq32
 low32(dst) = low32(src1) <= low32(src2)(unsigned)- xulteq64
 low32(dst) = src1 <= src2(unsigned)- xwidemul64_
s  dst_hi:dst_lo = sext(lhs) * sext(rhs)- xwidemul64_
u  dst_hi:dst_lo = zext(lhs) * zext(rhs)- xzero
 - Set 
dst = 0 - zext8
 dst = zext(low8(src))- zext16
 dst = zext(low16(src))- zext32
 dst = zext(low32(src))