#[repr(u16)]pub enum ExtendedOpcode {
Show 311 variants
Trap = 0,
Nop = 1,
CallIndirectHost = 2,
Xpcadd = 3,
XmovFp = 4,
XmovLr = 5,
Bswap32 = 6,
Bswap64 = 7,
Xadd32UoverflowTrap = 8,
Xadd64UoverflowTrap = 9,
XMulHi64S = 10,
XMulHi64U = 11,
Xbmask32 = 12,
Xbmask64 = 13,
XLoad16BeU32O32 = 14,
XLoad16BeS32O32 = 15,
XLoad32BeO32 = 16,
XLoad64BeO32 = 17,
XStore16BeO32 = 18,
XStore32BeO32 = 19,
XStore64BeO32 = 20,
Fload32BeO32 = 21,
Fload64BeO32 = 22,
Fstore32BeO32 = 23,
Fstore64BeO32 = 24,
Fload32LeO32 = 25,
Fload64LeO32 = 26,
Fstore32LeO32 = 27,
Fstore64LeO32 = 28,
Fload32LeZ = 29,
Fload64LeZ = 30,
Fstore32LeZ = 31,
Fstore64LeZ = 32,
Fload32LeG32 = 33,
Fload64LeG32 = 34,
Fstore32LeG32 = 35,
Fstore64LeG32 = 36,
VLoad128O32 = 37,
Vstore128LeO32 = 38,
VLoad128Z = 39,
Vstore128LeZ = 40,
VLoad128G32 = 41,
Vstore128LeG32 = 42,
Fmov = 43,
Vmov = 44,
BitcastIntFromFloat32 = 45,
BitcastIntFromFloat64 = 46,
BitcastFloatFromInt32 = 47,
BitcastFloatFromInt64 = 48,
FConst32 = 49,
FConst64 = 50,
Feq32 = 51,
Fneq32 = 52,
Flt32 = 53,
Flteq32 = 54,
Feq64 = 55,
Fneq64 = 56,
Flt64 = 57,
Flteq64 = 58,
FSelect32 = 59,
FSelect64 = 60,
F32FromF64 = 61,
F64FromF32 = 62,
F32FromX32S = 63,
F32FromX32U = 64,
F32FromX64S = 65,
F32FromX64U = 66,
F64FromX32S = 67,
F64FromX32U = 68,
F64FromX64S = 69,
F64FromX64U = 70,
X32FromF32S = 71,
X32FromF32U = 72,
X32FromF64S = 73,
X32FromF64U = 74,
X64FromF32S = 75,
X64FromF32U = 76,
X64FromF64S = 77,
X64FromF64U = 78,
X32FromF32SSat = 79,
X32FromF32USat = 80,
X32FromF64SSat = 81,
X32FromF64USat = 82,
X64FromF32SSat = 83,
X64FromF32USat = 84,
X64FromF64SSat = 85,
X64FromF64USat = 86,
FCopySign32 = 87,
FCopySign64 = 88,
Fadd32 = 89,
Fsub32 = 90,
Vsubf32x4 = 91,
Fmul32 = 92,
Vmulf32x4 = 93,
Fdiv32 = 94,
Vdivf32x4 = 95,
Fmaximum32 = 96,
Fminimum32 = 97,
Ftrunc32 = 98,
Vtrunc32x4 = 99,
Vtrunc64x2 = 100,
Ffloor32 = 101,
Vfloor32x4 = 102,
Vfloor64x2 = 103,
Fceil32 = 104,
Vceil32x4 = 105,
Vceil64x2 = 106,
Fnearest32 = 107,
Fsqrt32 = 108,
Vsqrt32x4 = 109,
Vsqrt64x2 = 110,
Fneg32 = 111,
Vnegf32x4 = 112,
Fabs32 = 113,
Fadd64 = 114,
Fsub64 = 115,
Fmul64 = 116,
Fdiv64 = 117,
VDivF64x2 = 118,
Fmaximum64 = 119,
Fminimum64 = 120,
Ftrunc64 = 121,
Ffloor64 = 122,
Fceil64 = 123,
Fnearest64 = 124,
Vnearest32x4 = 125,
Vnearest64x2 = 126,
Fsqrt64 = 127,
Fneg64 = 128,
Fabs64 = 129,
Vconst128 = 130,
VAddI8x16 = 131,
VAddI16x8 = 132,
VAddI32x4 = 133,
VAddI64x2 = 134,
VAddF32x4 = 135,
VAddF64x2 = 136,
VAddI8x16Sat = 137,
VAddU8x16Sat = 138,
VAddI16x8Sat = 139,
VAddU16x8Sat = 140,
VAddpairwiseI16x8S = 141,
VAddpairwiseI32x4S = 142,
VShlI8x16 = 143,
VShlI16x8 = 144,
VShlI32x4 = 145,
VShlI64x2 = 146,
VShrI8x16S = 147,
VShrI16x8S = 148,
VShrI32x4S = 149,
VShrI64x2S = 150,
VShrI8x16U = 151,
VShrI16x8U = 152,
VShrI32x4U = 153,
VShrI64x2U = 154,
VSplatX8 = 155,
VSplatX16 = 156,
VSplatX32 = 157,
VSplatX64 = 158,
VSplatF32 = 159,
VSplatF64 = 160,
VLoad8x8SZ = 161,
VLoad8x8UZ = 162,
VLoad16x4LeSZ = 163,
VLoad16x4LeUZ = 164,
VLoad32x2LeSZ = 165,
VLoad32x2LeUZ = 166,
VBand128 = 167,
VBor128 = 168,
VBxor128 = 169,
VBnot128 = 170,
VBitselect128 = 171,
Vbitmask8x16 = 172,
Vbitmask16x8 = 173,
Vbitmask32x4 = 174,
Vbitmask64x2 = 175,
Valltrue8x16 = 176,
Valltrue16x8 = 177,
Valltrue32x4 = 178,
Valltrue64x2 = 179,
Vanytrue8x16 = 180,
Vanytrue16x8 = 181,
Vanytrue32x4 = 182,
Vanytrue64x2 = 183,
VF32x4FromI32x4S = 184,
VF32x4FromI32x4U = 185,
VF64x2FromI64x2S = 186,
VF64x2FromI64x2U = 187,
VI32x4FromF32x4S = 188,
VI32x4FromF32x4U = 189,
VI64x2FromF64x2S = 190,
VI64x2FromF64x2U = 191,
VWidenLow8x16S = 192,
VWidenLow8x16U = 193,
VWidenLow16x8S = 194,
VWidenLow16x8U = 195,
VWidenLow32x4S = 196,
VWidenLow32x4U = 197,
VWidenHigh8x16S = 198,
VWidenHigh8x16U = 199,
VWidenHigh16x8S = 200,
VWidenHigh16x8U = 201,
VWidenHigh32x4S = 202,
VWidenHigh32x4U = 203,
Vnarrow16x8S = 204,
Vnarrow16x8U = 205,
Vnarrow32x4S = 206,
Vnarrow32x4U = 207,
Vnarrow64x2S = 208,
Vnarrow64x2U = 209,
Vunarrow64x2U = 210,
VFpromoteLow = 211,
VFdemote = 212,
VSubI8x16 = 213,
VSubI16x8 = 214,
VSubI32x4 = 215,
VSubI64x2 = 216,
VSubF64x2 = 217,
VSubI8x16Sat = 218,
VSubU8x16Sat = 219,
VSubI16x8Sat = 220,
VSubU16x8Sat = 221,
VMulI8x16 = 222,
VMulI16x8 = 223,
VMulI32x4 = 224,
VMulI64x2 = 225,
VMulF64x2 = 226,
VQmulrsI16x8 = 227,
VPopcnt8x16 = 228,
XExtractV8x16 = 229,
XExtractV16x8 = 230,
XExtractV32x4 = 231,
XExtractV64x2 = 232,
FExtractV32x4 = 233,
FExtractV64x2 = 234,
VInsertX8 = 235,
VInsertX16 = 236,
VInsertX32 = 237,
VInsertX64 = 238,
VInsertF32 = 239,
VInsertF64 = 240,
Veq8x16 = 241,
Vneq8x16 = 242,
Vslt8x16 = 243,
Vslteq8x16 = 244,
Vult8x16 = 245,
Vulteq8x16 = 246,
Veq16x8 = 247,
Vneq16x8 = 248,
Vslt16x8 = 249,
Vslteq16x8 = 250,
Vult16x8 = 251,
Vulteq16x8 = 252,
Veq32x4 = 253,
Vneq32x4 = 254,
Vslt32x4 = 255,
Vslteq32x4 = 256,
Vult32x4 = 257,
Vulteq32x4 = 258,
Veq64x2 = 259,
Vneq64x2 = 260,
Vslt64x2 = 261,
Vslteq64x2 = 262,
Vult64x2 = 263,
Vulteq64x2 = 264,
Vneg8x16 = 265,
Vneg16x8 = 266,
Vneg32x4 = 267,
Vneg64x2 = 268,
VnegF64x2 = 269,
Vmin8x16S = 270,
Vmin8x16U = 271,
Vmin16x8S = 272,
Vmin16x8U = 273,
Vmax8x16S = 274,
Vmax8x16U = 275,
Vmax16x8S = 276,
Vmax16x8U = 277,
Vmin32x4S = 278,
Vmin32x4U = 279,
Vmax32x4S = 280,
Vmax32x4U = 281,
Vabs8x16 = 282,
Vabs16x8 = 283,
Vabs32x4 = 284,
Vabs64x2 = 285,
Vabsf32x4 = 286,
Vabsf64x2 = 287,
Vmaximumf32x4 = 288,
Vmaximumf64x2 = 289,
Vminimumf32x4 = 290,
Vminimumf64x2 = 291,
VShuffle = 292,
Vswizzlei8x16 = 293,
Vavground8x16 = 294,
Vavground16x8 = 295,
VeqF32x4 = 296,
VneqF32x4 = 297,
VltF32x4 = 298,
VlteqF32x4 = 299,
VeqF64x2 = 300,
VneqF64x2 = 301,
VltF64x2 = 302,
VlteqF64x2 = 303,
Vfma32x4 = 304,
Vfma64x2 = 305,
Vselect = 306,
Xadd128 = 307,
Xsub128 = 308,
Xwidemul64S = 309,
Xwidemul64U = 310,
}
Expand description
An extended opcode.
Variants§
Trap = 0
Raise a trap.
Nop = 1
Do nothing.
CallIndirectHost = 2
A special opcode to halt interpreter execution and yield control back to the host.
This opcode results in DoneReason::CallIndirectHost
where the
id
here is shepherded along to the embedder. It’s up to the
embedder to determine what to do with the id
and the current
state of registers and the stack.
In Wasmtime this is used to implement interpreter-to-host calls.
This is modeled as a call
instruction where the first
parameter is the native function pointer to invoke and all
remaining parameters for the native function are in following
parameter positions (e.g. x1
, x2
, …). The results of the
host call are then store in x0
.
Handling this in Wasmtime is done through a “relocation” which is resolved at link-time when raw bytecode from Cranelift is assembled into the final object that Wasmtime will interpret.
Xpcadd = 3
Adds offset
to the pc of this instruction and stores it in
dst
.
XmovFp = 4
Gets the special “fp” register and moves it into dst
.
XmovLr = 5
Gets the special “lr” register and moves it into dst
.
Bswap32 = 6
dst = byteswap(low32(src))
Bswap64 = 7
dst = byteswap(src)
Xadd32UoverflowTrap = 8
32-bit checked unsigned addition: low32(dst) = low32(src1) + low32(src2)
.
The upper 32-bits of dst
are unmodified. Traps if the addition
overflows.
Xadd64UoverflowTrap = 9
64-bit checked unsigned addition: dst = src1 + src2
.
XMulHi64S = 10
dst = high64(src1 * src2)
(signed)
XMulHi64U = 11
dst = high64(src1 * src2)
(unsigned)
Xbmask32 = 12
low32(dst) = if low32(src) == 0 { 0 } else { -1 }
Xbmask64 = 13
dst = if src == 0 { 0 } else { -1 }
XLoad16BeU32O32 = 14
low32(dst) = zext(*addr)
XLoad16BeS32O32 = 15
low32(dst) = sext(*addr)
XLoad32BeO32 = 16
low32(dst) = zext(*addr)
XLoad64BeO32 = 17
dst = *addr
XStore16BeO32 = 18
*addr = low16(src)
XStore32BeO32 = 19
*addr = low32(src)
XStore64BeO32 = 20
*addr = low64(src)
Fload32BeO32 = 21
low32(dst) = zext(*addr)
Fload64BeO32 = 22
dst = *addr
Fstore32BeO32 = 23
*addr = low32(src)
Fstore64BeO32 = 24
*addr = src
Fload32LeO32 = 25
low32(dst) = zext(*addr)
Fload64LeO32 = 26
dst = *addr
Fstore32LeO32 = 27
*addr = low32(src)
Fstore64LeO32 = 28
*addr = src
Fload32LeZ = 29
low32(dst) = zext(*addr)
Fload64LeZ = 30
dst = *addr
Fstore32LeZ = 31
*addr = low32(src)
Fstore64LeZ = 32
*addr = src
Fload32LeG32 = 33
low32(dst) = zext(*addr)
Fload64LeG32 = 34
dst = *addr
Fstore32LeG32 = 35
*addr = low32(src)
Fstore64LeG32 = 36
*addr = src
VLoad128O32 = 37
dst = *addr
Vstore128LeO32 = 38
*addr = src
VLoad128Z = 39
dst = *(ptr + offset)
Vstore128LeZ = 40
*(ptr + offset) = src
VLoad128G32 = 41
dst = *(ptr + offset)
Vstore128LeG32 = 42
*(ptr + offset) = src
Fmov = 43
Move between f
registers.
Vmov = 44
Move between v
registers.
BitcastIntFromFloat32 = 45
low32(dst) = bitcast low32(src) as i32
BitcastIntFromFloat64 = 46
dst = bitcast src as i64
BitcastFloatFromInt32 = 47
low32(dst) = bitcast low32(src) as f32
BitcastFloatFromInt64 = 48
dst = bitcast src as f64
FConst32 = 49
low32(dst) = bits
FConst64 = 50
dst = bits
Feq32 = 51
low32(dst) = zext(src1 == src2)
Fneq32 = 52
low32(dst) = zext(src1 != src2)
Flt32 = 53
low32(dst) = zext(src1 < src2)
Flteq32 = 54
low32(dst) = zext(src1 <= src2)
Feq64 = 55
low32(dst) = zext(src1 == src2)
Fneq64 = 56
low32(dst) = zext(src1 != src2)
Flt64 = 57
low32(dst) = zext(src1 < src2)
Flteq64 = 58
low32(dst) = zext(src1 <= src2)
FSelect32 = 59
low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)
FSelect64 = 60
dst = low32(cond) ? if_nonzero : if_zero
F32FromF64 = 61
low32(dst) = demote(src)
F64FromF32 = 62
(st) = promote(low32(src))
F32FromX32S = 63
low32(dst) = checked_f32_from_signed(low32(src))
F32FromX32U = 64
low32(dst) = checked_f32_from_unsigned(low32(src))
F32FromX64S = 65
low32(dst) = checked_f32_from_signed(src)
F32FromX64U = 66
low32(dst) = checked_f32_from_unsigned(src)
F64FromX32S = 67
dst = checked_f64_from_signed(low32(src))
F64FromX32U = 68
dst = checked_f64_from_unsigned(low32(src))
F64FromX64S = 69
dst = checked_f64_from_signed(src)
F64FromX64U = 70
dst = checked_f64_from_unsigned(src)
X32FromF32S = 71
low32(dst) = checked_signed_from_f32(low32(src))
X32FromF32U = 72
low32(dst) = checked_unsigned_from_f32(low32(src))
X32FromF64S = 73
low32(dst) = checked_signed_from_f64(src)
X32FromF64U = 74
low32(dst) = checked_unsigned_from_f64(src)
X64FromF32S = 75
dst = checked_signed_from_f32(low32(src))
X64FromF32U = 76
dst = checked_unsigned_from_f32(low32(src))
X64FromF64S = 77
dst = checked_signed_from_f64(src)
X64FromF64U = 78
dst = checked_unsigned_from_f64(src)
X32FromF32SSat = 79
low32(dst) = saturating_signed_from_f32(low32(src))
X32FromF32USat = 80
low32(dst) = saturating_unsigned_from_f32(low32(src))
X32FromF64SSat = 81
low32(dst) = saturating_signed_from_f64(src)
X32FromF64USat = 82
low32(dst) = saturating_unsigned_from_f64(src)
X64FromF32SSat = 83
dst = saturating_signed_from_f32(low32(src))
X64FromF32USat = 84
dst = saturating_unsigned_from_f32(low32(src))
X64FromF64SSat = 85
dst = saturating_signed_from_f64(src)
X64FromF64USat = 86
dst = saturating_unsigned_from_f64(src)
FCopySign32 = 87
low32(dst) = copysign(low32(src1), low32(src2))
FCopySign64 = 88
dst = copysign(src1, src2)
Fadd32 = 89
low32(dst) = low32(src1) + low32(src2)
Fsub32 = 90
low32(dst) = low32(src1) - low32(src2)
Vsubf32x4 = 91
low128(dst) = low128(src1) - low128(src2)
Fmul32 = 92
low32(dst) = low32(src1) * low32(src2)
Vmulf32x4 = 93
low128(dst) = low128(src1) * low128(src2)
Fdiv32 = 94
low32(dst) = low32(src1) / low32(src2)
Vdivf32x4 = 95
low128(dst) = low128(src1) / low128(src2)
Fmaximum32 = 96
low32(dst) = ieee_maximum(low32(src1), low32(src2))
Fminimum32 = 97
low32(dst) = ieee_minimum(low32(src1), low32(src2))
Ftrunc32 = 98
low32(dst) = ieee_trunc(low32(src))
Vtrunc32x4 = 99
low128(dst) = ieee_trunc(low128(src))
Vtrunc64x2 = 100
low128(dst) = ieee_trunc(low128(src))
Ffloor32 = 101
low32(dst) = ieee_floor(low32(src))
Vfloor32x4 = 102
low128(dst) = ieee_floor(low128(src))
Vfloor64x2 = 103
low128(dst) = ieee_floor(low128(src))
Fceil32 = 104
low32(dst) = ieee_ceil(low32(src))
Vceil32x4 = 105
low128(dst) = ieee_ceil(low128(src))
Vceil64x2 = 106
low128(dst) = ieee_ceil(low128(src))
Fnearest32 = 107
low32(dst) = ieee_nearest(low32(src))
Fsqrt32 = 108
low32(dst) = ieee_sqrt(low32(src))
Vsqrt32x4 = 109
low32(dst) = ieee_sqrt(low32(src))
Vsqrt64x2 = 110
low32(dst) = ieee_sqrt(low32(src))
Fneg32 = 111
low32(dst) = -low32(src)
Vnegf32x4 = 112
low128(dst) = -low128(src)
Fabs32 = 113
low32(dst) = |low32(src)|
Fadd64 = 114
dst = src1 + src2
Fsub64 = 115
dst = src1 - src2
Fmul64 = 116
dst = src1 * src2
Fdiv64 = 117
dst = src1 / src2
VDivF64x2 = 118
dst = src1 / src2
Fmaximum64 = 119
dst = ieee_maximum(src1, src2)
Fminimum64 = 120
dst = ieee_minimum(src1, src2)
Ftrunc64 = 121
dst = ieee_trunc(src)
Ffloor64 = 122
dst = ieee_floor(src)
Fceil64 = 123
dst = ieee_ceil(src)
Fnearest64 = 124
dst = ieee_nearest(src)
Vnearest32x4 = 125
low128(dst) = ieee_nearest(low128(src))
Vnearest64x2 = 126
low128(dst) = ieee_nearest(low128(src))
Fsqrt64 = 127
dst = ieee_sqrt(src)
Fneg64 = 128
dst = -src
Fabs64 = 129
dst = |src|
Vconst128 = 130
dst = imm
VAddI8x16 = 131
dst = src1 + src2
VAddI16x8 = 132
dst = src1 + src2
VAddI32x4 = 133
dst = src1 + src2
VAddI64x2 = 134
dst = src1 + src2
VAddF32x4 = 135
dst = src1 + src2
VAddF64x2 = 136
dst = src1 + src2
VAddI8x16Sat = 137
dst = satruating_add(src1, src2)
VAddU8x16Sat = 138
dst = satruating_add(src1, src2)
VAddI16x8Sat = 139
dst = satruating_add(src1, src2)
VAddU16x8Sat = 140
dst = satruating_add(src1, src2)
VAddpairwiseI16x8S = 141
dst = [src1[0] + src1[1], ..., src2[6] + src2[7]]
VAddpairwiseI32x4S = 142
dst = [src1[0] + src1[1], ..., src2[2] + src2[3]]
VShlI8x16 = 143
dst = src1 << src2
VShlI16x8 = 144
dst = src1 << src2
VShlI32x4 = 145
dst = src1 << src2
VShlI64x2 = 146
dst = src1 << src2
VShrI8x16S = 147
dst = src1 >> src2
(signed)
VShrI16x8S = 148
dst = src1 >> src2
(signed)
VShrI32x4S = 149
dst = src1 >> src2
(signed)
VShrI64x2S = 150
dst = src1 >> src2
(signed)
VShrI8x16U = 151
dst = src1 >> src2
(unsigned)
VShrI16x8U = 152
dst = src1 >> src2
(unsigned)
VShrI32x4U = 153
dst = src1 >> src2
(unsigned)
VShrI64x2U = 154
dst = src1 >> src2
(unsigned)
VSplatX8 = 155
dst = splat(low8(src))
VSplatX16 = 156
dst = splat(low16(src))
VSplatX32 = 157
dst = splat(low32(src))
VSplatX64 = 158
dst = splat(src)
VSplatF32 = 159
dst = splat(low32(src))
VSplatF64 = 160
dst = splat(src)
VLoad8x8SZ = 161
Load the 64-bit source as i8x8 and sign-extend to i16x8.
VLoad8x8UZ = 162
Load the 64-bit source as u8x8 and zero-extend to i16x8.
VLoad16x4LeSZ = 163
Load the 64-bit source as i16x4 and sign-extend to i32x4.
VLoad16x4LeUZ = 164
Load the 64-bit source as u16x4 and zero-extend to i32x4.
VLoad32x2LeSZ = 165
Load the 64-bit source as i32x2 and sign-extend to i64x2.
VLoad32x2LeUZ = 166
Load the 64-bit source as u32x2 and zero-extend to i64x2.
VBand128 = 167
dst = src1 & src2
VBor128 = 168
dst = src1 | src2
VBxor128 = 169
dst = src1 ^ src2
VBnot128 = 170
dst = !src1
VBitselect128 = 171
dst = (c & x) | (!c & y)
Vbitmask8x16 = 172
Collect high bits of each lane into the low 32-bits of the destination.
Vbitmask16x8 = 173
Collect high bits of each lane into the low 32-bits of the destination.
Vbitmask32x4 = 174
Collect high bits of each lane into the low 32-bits of the destination.
Vbitmask64x2 = 175
Collect high bits of each lane into the low 32-bits of the destination.
Valltrue8x16 = 176
Store whether all lanes are nonzero in dst
.
Valltrue16x8 = 177
Store whether all lanes are nonzero in dst
.
Valltrue32x4 = 178
Store whether all lanes are nonzero in dst
.
Valltrue64x2 = 179
Store whether any lanes are nonzero in dst
.
Vanytrue8x16 = 180
Store whether any lanes are nonzero in dst
.
Vanytrue16x8 = 181
Store whether any lanes are nonzero in dst
.
Vanytrue32x4 = 182
Store whether any lanes are nonzero in dst
.
Vanytrue64x2 = 183
Store whether any lanes are nonzero in dst
.
VF32x4FromI32x4S = 184
Int-to-float conversion (same as f32_from_x32_s
)
VF32x4FromI32x4U = 185
Int-to-float conversion (same as f32_from_x32_u
)
VF64x2FromI64x2S = 186
Int-to-float conversion (same as f64_from_x64_s
)
VF64x2FromI64x2U = 187
Int-to-float conversion (same as f64_from_x64_u
)
VI32x4FromF32x4S = 188
Float-to-int conversion (same as x32_from_f32_s
VI32x4FromF32x4U = 189
Float-to-int conversion (same as x32_from_f32_u
VI64x2FromF64x2S = 190
Float-to-int conversion (same as x64_from_f64_s
VI64x2FromF64x2U = 191
Float-to-int conversion (same as x64_from_f64_u
VWidenLow8x16S = 192
Widens the low lanes of the input vector, as signed, to twice the width.
VWidenLow8x16U = 193
Widens the low lanes of the input vector, as unsigned, to twice the width.
VWidenLow16x8S = 194
Widens the low lanes of the input vector, as signed, to twice the width.
VWidenLow16x8U = 195
Widens the low lanes of the input vector, as unsigned, to twice the width.
VWidenLow32x4S = 196
Widens the low lanes of the input vector, as signed, to twice the width.
VWidenLow32x4U = 197
Widens the low lanes of the input vector, as unsigned, to twice the width.
VWidenHigh8x16S = 198
Widens the high lanes of the input vector, as signed, to twice the width.
VWidenHigh8x16U = 199
Widens the high lanes of the input vector, as unsigned, to twice the width.
VWidenHigh16x8S = 200
Widens the high lanes of the input vector, as signed, to twice the width.
VWidenHigh16x8U = 201
Widens the high lanes of the input vector, as unsigned, to twice the width.
VWidenHigh32x4S = 202
Widens the high lanes of the input vector, as signed, to twice the width.
VWidenHigh32x4U = 203
Widens the high lanes of the input vector, as unsigned, to twice the width.
Vnarrow16x8S = 204
Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
Vnarrow16x8U = 205
Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
Vnarrow32x4S = 206
Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
Vnarrow32x4U = 207
Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
Vnarrow64x2S = 208
Narrows the two 64x2 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
Vnarrow64x2U = 209
Narrows the two 64x2 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
Vunarrow64x2U = 210
Narrows the two 64x2 vectors, assuming all input lanes are unsigned, to half the width. Narrowing is unsigned and saturating.
VFpromoteLow = 211
Promotes the low two lanes of the f32x4 input to f64x2.
VFdemote = 212
Demotes the two f64x2 lanes to f32x2 and then extends with two more zero lanes.
VSubI8x16 = 213
dst = src1 - src2
VSubI16x8 = 214
dst = src1 - src2
VSubI32x4 = 215
dst = src1 - src2
VSubI64x2 = 216
dst = src1 - src2
VSubF64x2 = 217
dst = src1 - src2
VSubI8x16Sat = 218
dst = saturating_sub(src1, src2)
VSubU8x16Sat = 219
dst = saturating_sub(src1, src2)
VSubI16x8Sat = 220
dst = saturating_sub(src1, src2)
VSubU16x8Sat = 221
dst = saturating_sub(src1, src2)
VMulI8x16 = 222
dst = src1 * src2
VMulI16x8 = 223
dst = src1 * src2
VMulI32x4 = 224
dst = src1 * src2
VMulI64x2 = 225
dst = src1 * src2
VMulF64x2 = 226
dst = src1 * src2
VQmulrsI16x8 = 227
dst = signed_saturate(src1 * src2 + (1 << (Q - 1)) >> Q)
VPopcnt8x16 = 228
dst = count_ones(src)
XExtractV8x16 = 229
low32(dst) = zext(src[lane])
XExtractV16x8 = 230
low32(dst) = zext(src[lane])
XExtractV32x4 = 231
low32(dst) = src[lane]
XExtractV64x2 = 232
dst = src[lane]
FExtractV32x4 = 233
low32(dst) = src[lane]
FExtractV64x2 = 234
dst = src[lane]
VInsertX8 = 235
dst = src1; dst[lane] = src2
VInsertX16 = 236
dst = src1; dst[lane] = src2
VInsertX32 = 237
dst = src1; dst[lane] = src2
VInsertX64 = 238
dst = src1; dst[lane] = src2
VInsertF32 = 239
dst = src1; dst[lane] = src2
VInsertF64 = 240
dst = src1; dst[lane] = src2
Veq8x16 = 241
dst = src == dst
Vneq8x16 = 242
dst = src != dst
Vslt8x16 = 243
dst = src < dst
(signed)
Vslteq8x16 = 244
dst = src <= dst
(signed)
Vult8x16 = 245
dst = src < dst
(unsigned)
Vulteq8x16 = 246
dst = src <= dst
(unsigned)
Veq16x8 = 247
dst = src == dst
Vneq16x8 = 248
dst = src != dst
Vslt16x8 = 249
dst = src < dst
(signed)
Vslteq16x8 = 250
dst = src <= dst
(signed)
Vult16x8 = 251
dst = src < dst
(unsigned)
Vulteq16x8 = 252
dst = src <= dst
(unsigned)
Veq32x4 = 253
dst = src == dst
Vneq32x4 = 254
dst = src != dst
Vslt32x4 = 255
dst = src < dst
(signed)
Vslteq32x4 = 256
dst = src <= dst
(signed)
Vult32x4 = 257
dst = src < dst
(unsigned)
Vulteq32x4 = 258
dst = src <= dst
(unsigned)
Veq64x2 = 259
dst = src == dst
Vneq64x2 = 260
dst = src != dst
Vslt64x2 = 261
dst = src < dst
(signed)
Vslteq64x2 = 262
dst = src <= dst
(signed)
Vult64x2 = 263
dst = src < dst
(unsigned)
Vulteq64x2 = 264
dst = src <= dst
(unsigned)
Vneg8x16 = 265
dst = -src
Vneg16x8 = 266
dst = -src
Vneg32x4 = 267
dst = -src
Vneg64x2 = 268
dst = -src
VnegF64x2 = 269
dst = -src
Vmin8x16S = 270
dst = min(src1, src2)
(signed)
Vmin8x16U = 271
dst = min(src1, src2)
(unsigned)
Vmin16x8S = 272
dst = min(src1, src2)
(signed)
Vmin16x8U = 273
dst = min(src1, src2)
(unsigned)
Vmax8x16S = 274
dst = max(src1, src2)
(signed)
Vmax8x16U = 275
dst = max(src1, src2)
(unsigned)
Vmax16x8S = 276
dst = max(src1, src2)
(signed)
Vmax16x8U = 277
dst = max(src1, src2)
(unsigned)
Vmin32x4S = 278
dst = min(src1, src2)
(signed)
Vmin32x4U = 279
dst = min(src1, src2)
(unsigned)
Vmax32x4S = 280
dst = max(src1, src2)
(signed)
Vmax32x4U = 281
dst = max(src1, src2)
(unsigned)
Vabs8x16 = 282
dst = |src|
Vabs16x8 = 283
dst = |src|
Vabs32x4 = 284
dst = |src|
Vabs64x2 = 285
dst = |src|
Vabsf32x4 = 286
dst = |src|
Vabsf64x2 = 287
dst = |src|
Vmaximumf32x4 = 288
dst = ieee_maximum(src1, src2)
Vmaximumf64x2 = 289
dst = ieee_maximum(src1, src2)
Vminimumf32x4 = 290
dst = ieee_minimum(src1, src2)
Vminimumf64x2 = 291
dst = ieee_minimum(src1, src2)
VShuffle = 292
dst = shuffle(src1, src2, mask)
Vswizzlei8x16 = 293
dst = swizzle(src1, src2)
Vavground8x16 = 294
dst = (src1 + src2 + 1) // 2
Vavground16x8 = 295
dst = (src1 + src2 + 1) // 2
VeqF32x4 = 296
dst = src == dst
VneqF32x4 = 297
dst = src != dst
VltF32x4 = 298
dst = src < dst
VlteqF32x4 = 299
dst = src <= dst
VeqF64x2 = 300
dst = src == dst
VneqF64x2 = 301
dst = src != dst
VltF64x2 = 302
dst = src < dst
VlteqF64x2 = 303
dst = src <= dst
Vfma32x4 = 304
dst = ieee_fma(a, b, c)
Vfma64x2 = 305
dst = ieee_fma(a, b, c)
Vselect = 306
dst = low32(cond) ? if_nonzero : if_zero
Xadd128 = 307
dst_hi:dst_lo = lhs_hi:lhs_lo + rhs_hi:rhs_lo
Xsub128 = 308
dst_hi:dst_lo = lhs_hi:lhs_lo - rhs_hi:rhs_lo
Xwidemul64S = 309
dst_hi:dst_lo = sext(lhs) * sext(rhs)
Xwidemul64U = 310
dst_hi:dst_lo = zext(lhs) * zext(rhs)
Implementations§
Source§impl ExtendedOpcode
impl ExtendedOpcode
Sourcepub fn new(bytes: u16) -> Option<Self>
pub fn new(bytes: u16) -> Option<Self>
Create a new ExtendedOpcode
from the given bytes.
Returns None
if bytes
is not a valid extended opcode.
Sourcepub unsafe fn unchecked_new(byte: u16) -> Self
pub unsafe fn unchecked_new(byte: u16) -> Self
Like new
but does not check whether bytes
is a valid opcode.
§Safety
It is unsafe to pass bytes
that is not a valid opcode.
Trait Implementations§
Source§impl Clone for ExtendedOpcode
impl Clone for ExtendedOpcode
Source§fn clone(&self) -> ExtendedOpcode
fn clone(&self) -> ExtendedOpcode
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source
. Read moreSource§impl Debug for ExtendedOpcode
impl Debug for ExtendedOpcode
Source§impl Decode for ExtendedOpcode
Available on crate feature decode
only.
impl Decode for ExtendedOpcode
decode
only.