Enum ExtendedOpcode

Source
#[repr(u16)]
pub enum ExtendedOpcode {
Show 310 variants Trap = 0, Nop = 1, CallIndirectHost = 2, XmovFp = 3, XmovLr = 4, Bswap32 = 5, Bswap64 = 6, Xadd32UoverflowTrap = 7, Xadd64UoverflowTrap = 8, XMulHi64S = 9, XMulHi64U = 10, Xbmask32 = 11, Xbmask64 = 12, XLoad16BeU32O32 = 13, XLoad16BeS32O32 = 14, XLoad32BeO32 = 15, XLoad64BeO32 = 16, XStore16BeO32 = 17, XStore32BeO32 = 18, XStore64BeO32 = 19, Fload32BeO32 = 20, Fload64BeO32 = 21, Fstore32BeO32 = 22, Fstore64BeO32 = 23, Fload32LeO32 = 24, Fload64LeO32 = 25, Fstore32LeO32 = 26, Fstore64LeO32 = 27, Fload32LeZ = 28, Fload64LeZ = 29, Fstore32LeZ = 30, Fstore64LeZ = 31, Fload32LeG32 = 32, Fload64LeG32 = 33, Fstore32LeG32 = 34, Fstore64LeG32 = 35, VLoad128O32 = 36, Vstore128LeO32 = 37, VLoad128Z = 38, Vstore128LeZ = 39, VLoad128G32 = 40, Vstore128LeG32 = 41, Fmov = 42, Vmov = 43, BitcastIntFromFloat32 = 44, BitcastIntFromFloat64 = 45, BitcastFloatFromInt32 = 46, BitcastFloatFromInt64 = 47, FConst32 = 48, FConst64 = 49, Feq32 = 50, Fneq32 = 51, Flt32 = 52, Flteq32 = 53, Feq64 = 54, Fneq64 = 55, Flt64 = 56, Flteq64 = 57, FSelect32 = 58, FSelect64 = 59, F32FromF64 = 60, F64FromF32 = 61, F32FromX32S = 62, F32FromX32U = 63, F32FromX64S = 64, F32FromX64U = 65, F64FromX32S = 66, F64FromX32U = 67, F64FromX64S = 68, F64FromX64U = 69, X32FromF32S = 70, X32FromF32U = 71, X32FromF64S = 72, X32FromF64U = 73, X64FromF32S = 74, X64FromF32U = 75, X64FromF64S = 76, X64FromF64U = 77, X32FromF32SSat = 78, X32FromF32USat = 79, X32FromF64SSat = 80, X32FromF64USat = 81, X64FromF32SSat = 82, X64FromF32USat = 83, X64FromF64SSat = 84, X64FromF64USat = 85, FCopySign32 = 86, FCopySign64 = 87, Fadd32 = 88, Fsub32 = 89, Vsubf32x4 = 90, Fmul32 = 91, Vmulf32x4 = 92, Fdiv32 = 93, Vdivf32x4 = 94, Fmaximum32 = 95, Fminimum32 = 96, Ftrunc32 = 97, Vtrunc32x4 = 98, Vtrunc64x2 = 99, Ffloor32 = 100, Vfloor32x4 = 101, Vfloor64x2 = 102, Fceil32 = 103, Vceil32x4 = 104, Vceil64x2 = 105, Fnearest32 = 106, Fsqrt32 = 107, Vsqrt32x4 = 108, Vsqrt64x2 = 109, Fneg32 = 110, Vnegf32x4 = 111, Fabs32 = 112, Fadd64 = 113, Fsub64 = 114, Fmul64 = 115, Fdiv64 = 116, VDivF64x2 = 117, Fmaximum64 = 118, Fminimum64 = 119, Ftrunc64 = 120, Ffloor64 = 121, Fceil64 = 122, Fnearest64 = 123, Vnearest32x4 = 124, Vnearest64x2 = 125, Fsqrt64 = 126, Fneg64 = 127, Fabs64 = 128, Vconst128 = 129, VAddI8x16 = 130, VAddI16x8 = 131, VAddI32x4 = 132, VAddI64x2 = 133, VAddF32x4 = 134, VAddF64x2 = 135, VAddI8x16Sat = 136, VAddU8x16Sat = 137, VAddI16x8Sat = 138, VAddU16x8Sat = 139, VAddpairwiseI16x8S = 140, VAddpairwiseI32x4S = 141, VShlI8x16 = 142, VShlI16x8 = 143, VShlI32x4 = 144, VShlI64x2 = 145, VShrI8x16S = 146, VShrI16x8S = 147, VShrI32x4S = 148, VShrI64x2S = 149, VShrI8x16U = 150, VShrI16x8U = 151, VShrI32x4U = 152, VShrI64x2U = 153, VSplatX8 = 154, VSplatX16 = 155, VSplatX32 = 156, VSplatX64 = 157, VSplatF32 = 158, VSplatF64 = 159, VLoad8x8SZ = 160, VLoad8x8UZ = 161, VLoad16x4LeSZ = 162, VLoad16x4LeUZ = 163, VLoad32x2LeSZ = 164, VLoad32x2LeUZ = 165, VBand128 = 166, VBor128 = 167, VBxor128 = 168, VBnot128 = 169, VBitselect128 = 170, Vbitmask8x16 = 171, Vbitmask16x8 = 172, Vbitmask32x4 = 173, Vbitmask64x2 = 174, Valltrue8x16 = 175, Valltrue16x8 = 176, Valltrue32x4 = 177, Valltrue64x2 = 178, Vanytrue8x16 = 179, Vanytrue16x8 = 180, Vanytrue32x4 = 181, Vanytrue64x2 = 182, VF32x4FromI32x4S = 183, VF32x4FromI32x4U = 184, VF64x2FromI64x2S = 185, VF64x2FromI64x2U = 186, VI32x4FromF32x4S = 187, VI32x4FromF32x4U = 188, VI64x2FromF64x2S = 189, VI64x2FromF64x2U = 190, VWidenLow8x16S = 191, VWidenLow8x16U = 192, VWidenLow16x8S = 193, VWidenLow16x8U = 194, VWidenLow32x4S = 195, VWidenLow32x4U = 196, VWidenHigh8x16S = 197, VWidenHigh8x16U = 198, VWidenHigh16x8S = 199, VWidenHigh16x8U = 200, VWidenHigh32x4S = 201, VWidenHigh32x4U = 202, Vnarrow16x8S = 203, Vnarrow16x8U = 204, Vnarrow32x4S = 205, Vnarrow32x4U = 206, Vnarrow64x2S = 207, Vnarrow64x2U = 208, Vunarrow64x2U = 209, VFpromoteLow = 210, VFdemote = 211, VSubI8x16 = 212, VSubI16x8 = 213, VSubI32x4 = 214, VSubI64x2 = 215, VSubF64x2 = 216, VSubI8x16Sat = 217, VSubU8x16Sat = 218, VSubI16x8Sat = 219, VSubU16x8Sat = 220, VMulI8x16 = 221, VMulI16x8 = 222, VMulI32x4 = 223, VMulI64x2 = 224, VMulF64x2 = 225, VQmulrsI16x8 = 226, VPopcnt8x16 = 227, XExtractV8x16 = 228, XExtractV16x8 = 229, XExtractV32x4 = 230, XExtractV64x2 = 231, FExtractV32x4 = 232, FExtractV64x2 = 233, VInsertX8 = 234, VInsertX16 = 235, VInsertX32 = 236, VInsertX64 = 237, VInsertF32 = 238, VInsertF64 = 239, Veq8x16 = 240, Vneq8x16 = 241, Vslt8x16 = 242, Vslteq8x16 = 243, Vult8x16 = 244, Vulteq8x16 = 245, Veq16x8 = 246, Vneq16x8 = 247, Vslt16x8 = 248, Vslteq16x8 = 249, Vult16x8 = 250, Vulteq16x8 = 251, Veq32x4 = 252, Vneq32x4 = 253, Vslt32x4 = 254, Vslteq32x4 = 255, Vult32x4 = 256, Vulteq32x4 = 257, Veq64x2 = 258, Vneq64x2 = 259, Vslt64x2 = 260, Vslteq64x2 = 261, Vult64x2 = 262, Vulteq64x2 = 263, Vneg8x16 = 264, Vneg16x8 = 265, Vneg32x4 = 266, Vneg64x2 = 267, VnegF64x2 = 268, Vmin8x16S = 269, Vmin8x16U = 270, Vmin16x8S = 271, Vmin16x8U = 272, Vmax8x16S = 273, Vmax8x16U = 274, Vmax16x8S = 275, Vmax16x8U = 276, Vmin32x4S = 277, Vmin32x4U = 278, Vmax32x4S = 279, Vmax32x4U = 280, Vabs8x16 = 281, Vabs16x8 = 282, Vabs32x4 = 283, Vabs64x2 = 284, Vabsf32x4 = 285, Vabsf64x2 = 286, Vmaximumf32x4 = 287, Vmaximumf64x2 = 288, Vminimumf32x4 = 289, Vminimumf64x2 = 290, VShuffle = 291, Vswizzlei8x16 = 292, Vavground8x16 = 293, Vavground16x8 = 294, VeqF32x4 = 295, VneqF32x4 = 296, VltF32x4 = 297, VlteqF32x4 = 298, VeqF64x2 = 299, VneqF64x2 = 300, VltF64x2 = 301, VlteqF64x2 = 302, Vfma32x4 = 303, Vfma64x2 = 304, Vselect = 305, Xadd128 = 306, Xsub128 = 307, Xwidemul64S = 308, Xwidemul64U = 309,
}
Expand description

An extended opcode.

Variants§

§

Trap = 0

Raise a trap.

§

Nop = 1

Do nothing.

§

CallIndirectHost = 2

A special opcode to halt interpreter execution and yield control back to the host.

This opcode results in DoneReason::CallIndirectHost where the id here is shepherded along to the embedder. It’s up to the embedder to determine what to do with the id and the current state of registers and the stack.

In Wasmtime this is used to implement interpreter-to-host calls. This is modeled as a call instruction where the first parameter is the native function pointer to invoke and all remaining parameters for the native function are in following parameter positions (e.g. x1, x2, …). The results of the host call are then store in x0.

Handling this in Wasmtime is done through a “relocation” which is resolved at link-time when raw bytecode from Cranelift is assembled into the final object that Wasmtime will interpret.

§

XmovFp = 3

Gets the special “fp” register and moves it into dst.

§

XmovLr = 4

Gets the special “lr” register and moves it into dst.

§

Bswap32 = 5

dst = byteswap(low32(src))

§

Bswap64 = 6

dst = byteswap(src)

§

Xadd32UoverflowTrap = 7

32-bit checked unsigned addition: low32(dst) = low32(src1) + low32(src2).

The upper 32-bits of dst are unmodified. Traps if the addition overflows.

§

Xadd64UoverflowTrap = 8

64-bit checked unsigned addition: dst = src1 + src2.

§

XMulHi64S = 9

dst = high64(src1 * src2) (signed)

§

XMulHi64U = 10

dst = high64(src1 * src2) (unsigned)

§

Xbmask32 = 11

low32(dst) = if low32(src) == 0 { 0 } else { -1 }

§

Xbmask64 = 12

dst = if src == 0 { 0 } else { -1 }

§

XLoad16BeU32O32 = 13

low32(dst) = zext(*addr)

§

XLoad16BeS32O32 = 14

low32(dst) = sext(*addr)

§

XLoad32BeO32 = 15

low32(dst) = zext(*addr)

§

XLoad64BeO32 = 16

dst = *addr

§

XStore16BeO32 = 17

*addr = low16(src)

§

XStore32BeO32 = 18

*addr = low32(src)

§

XStore64BeO32 = 19

*addr = low64(src)

§

Fload32BeO32 = 20

low32(dst) = zext(*addr)

§

Fload64BeO32 = 21

dst = *addr

§

Fstore32BeO32 = 22

*addr = low32(src)

§

Fstore64BeO32 = 23

*addr = src

§

Fload32LeO32 = 24

low32(dst) = zext(*addr)

§

Fload64LeO32 = 25

dst = *addr

§

Fstore32LeO32 = 26

*addr = low32(src)

§

Fstore64LeO32 = 27

*addr = src

§

Fload32LeZ = 28

low32(dst) = zext(*addr)

§

Fload64LeZ = 29

dst = *addr

§

Fstore32LeZ = 30

*addr = low32(src)

§

Fstore64LeZ = 31

*addr = src

§

Fload32LeG32 = 32

low32(dst) = zext(*addr)

§

Fload64LeG32 = 33

dst = *addr

§

Fstore32LeG32 = 34

*addr = low32(src)

§

Fstore64LeG32 = 35

*addr = src

§

VLoad128O32 = 36

dst = *addr

§

Vstore128LeO32 = 37

*addr = src

§

VLoad128Z = 38

dst = *(ptr + offset)

§

Vstore128LeZ = 39

*(ptr + offset) = src

§

VLoad128G32 = 40

dst = *(ptr + offset)

§

Vstore128LeG32 = 41

*(ptr + offset) = src

§

Fmov = 42

Move between f registers.

§

Vmov = 43

Move between v registers.

§

BitcastIntFromFloat32 = 44

low32(dst) = bitcast low32(src) as i32

§

BitcastIntFromFloat64 = 45

dst = bitcast src as i64

§

BitcastFloatFromInt32 = 46

low32(dst) = bitcast low32(src) as f32

§

BitcastFloatFromInt64 = 47

dst = bitcast src as f64

§

FConst32 = 48

low32(dst) = bits

§

FConst64 = 49

dst = bits

§

Feq32 = 50

low32(dst) = zext(src1 == src2)

§

Fneq32 = 51

low32(dst) = zext(src1 != src2)

§

Flt32 = 52

low32(dst) = zext(src1 < src2)

§

Flteq32 = 53

low32(dst) = zext(src1 <= src2)

§

Feq64 = 54

low32(dst) = zext(src1 == src2)

§

Fneq64 = 55

low32(dst) = zext(src1 != src2)

§

Flt64 = 56

low32(dst) = zext(src1 < src2)

§

Flteq64 = 57

low32(dst) = zext(src1 <= src2)

§

FSelect32 = 58

low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)

§

FSelect64 = 59

dst = low32(cond) ? if_nonzero : if_zero

§

F32FromF64 = 60

low32(dst) = demote(src)

§

F64FromF32 = 61

(st) = promote(low32(src))

§

F32FromX32S = 62

low32(dst) = checked_f32_from_signed(low32(src))

§

F32FromX32U = 63

low32(dst) = checked_f32_from_unsigned(low32(src))

§

F32FromX64S = 64

low32(dst) = checked_f32_from_signed(src)

§

F32FromX64U = 65

low32(dst) = checked_f32_from_unsigned(src)

§

F64FromX32S = 66

dst = checked_f64_from_signed(low32(src))

§

F64FromX32U = 67

dst = checked_f64_from_unsigned(low32(src))

§

F64FromX64S = 68

dst = checked_f64_from_signed(src)

§

F64FromX64U = 69

dst = checked_f64_from_unsigned(src)

§

X32FromF32S = 70

low32(dst) = checked_signed_from_f32(low32(src))

§

X32FromF32U = 71

low32(dst) = checked_unsigned_from_f32(low32(src))

§

X32FromF64S = 72

low32(dst) = checked_signed_from_f64(src)

§

X32FromF64U = 73

low32(dst) = checked_unsigned_from_f64(src)

§

X64FromF32S = 74

dst = checked_signed_from_f32(low32(src))

§

X64FromF32U = 75

dst = checked_unsigned_from_f32(low32(src))

§

X64FromF64S = 76

dst = checked_signed_from_f64(src)

§

X64FromF64U = 77

dst = checked_unsigned_from_f64(src)

§

X32FromF32SSat = 78

low32(dst) = saturating_signed_from_f32(low32(src))

§

X32FromF32USat = 79

low32(dst) = saturating_unsigned_from_f32(low32(src))

§

X32FromF64SSat = 80

low32(dst) = saturating_signed_from_f64(src)

§

X32FromF64USat = 81

low32(dst) = saturating_unsigned_from_f64(src)

§

X64FromF32SSat = 82

dst = saturating_signed_from_f32(low32(src))

§

X64FromF32USat = 83

dst = saturating_unsigned_from_f32(low32(src))

§

X64FromF64SSat = 84

dst = saturating_signed_from_f64(src)

§

X64FromF64USat = 85

dst = saturating_unsigned_from_f64(src)

§

FCopySign32 = 86

low32(dst) = copysign(low32(src1), low32(src2))

§

FCopySign64 = 87

dst = copysign(src1, src2)

§

Fadd32 = 88

low32(dst) = low32(src1) + low32(src2)

§

Fsub32 = 89

low32(dst) = low32(src1) - low32(src2)

§

Vsubf32x4 = 90

low128(dst) = low128(src1) - low128(src2)

§

Fmul32 = 91

low32(dst) = low32(src1) * low32(src2)

§

Vmulf32x4 = 92

low128(dst) = low128(src1) * low128(src2)

§

Fdiv32 = 93

low32(dst) = low32(src1) / low32(src2)

§

Vdivf32x4 = 94

low128(dst) = low128(src1) / low128(src2)

§

Fmaximum32 = 95

low32(dst) = ieee_maximum(low32(src1), low32(src2))

§

Fminimum32 = 96

low32(dst) = ieee_minimum(low32(src1), low32(src2))

§

Ftrunc32 = 97

low32(dst) = ieee_trunc(low32(src))

§

Vtrunc32x4 = 98

low128(dst) = ieee_trunc(low128(src))

§

Vtrunc64x2 = 99

low128(dst) = ieee_trunc(low128(src))

§

Ffloor32 = 100

low32(dst) = ieee_floor(low32(src))

§

Vfloor32x4 = 101

low128(dst) = ieee_floor(low128(src))

§

Vfloor64x2 = 102

low128(dst) = ieee_floor(low128(src))

§

Fceil32 = 103

low32(dst) = ieee_ceil(low32(src))

§

Vceil32x4 = 104

low128(dst) = ieee_ceil(low128(src))

§

Vceil64x2 = 105

low128(dst) = ieee_ceil(low128(src))

§

Fnearest32 = 106

low32(dst) = ieee_nearest(low32(src))

§

Fsqrt32 = 107

low32(dst) = ieee_sqrt(low32(src))

§

Vsqrt32x4 = 108

low32(dst) = ieee_sqrt(low32(src))

§

Vsqrt64x2 = 109

low32(dst) = ieee_sqrt(low32(src))

§

Fneg32 = 110

low32(dst) = -low32(src)

§

Vnegf32x4 = 111

low128(dst) = -low128(src)

§

Fabs32 = 112

low32(dst) = |low32(src)|

§

Fadd64 = 113

dst = src1 + src2

§

Fsub64 = 114

dst = src1 - src2

§

Fmul64 = 115

dst = src1 * src2

§

Fdiv64 = 116

dst = src1 / src2

§

VDivF64x2 = 117

dst = src1 / src2

§

Fmaximum64 = 118

dst = ieee_maximum(src1, src2)

§

Fminimum64 = 119

dst = ieee_minimum(src1, src2)

§

Ftrunc64 = 120

dst = ieee_trunc(src)

§

Ffloor64 = 121

dst = ieee_floor(src)

§

Fceil64 = 122

dst = ieee_ceil(src)

§

Fnearest64 = 123

dst = ieee_nearest(src)

§

Vnearest32x4 = 124

low128(dst) = ieee_nearest(low128(src))

§

Vnearest64x2 = 125

low128(dst) = ieee_nearest(low128(src))

§

Fsqrt64 = 126

dst = ieee_sqrt(src)

§

Fneg64 = 127

dst = -src

§

Fabs64 = 128

dst = |src|

§

Vconst128 = 129

dst = imm

§

VAddI8x16 = 130

dst = src1 + src2

§

VAddI16x8 = 131

dst = src1 + src2

§

VAddI32x4 = 132

dst = src1 + src2

§

VAddI64x2 = 133

dst = src1 + src2

§

VAddF32x4 = 134

dst = src1 + src2

§

VAddF64x2 = 135

dst = src1 + src2

§

VAddI8x16Sat = 136

dst = satruating_add(src1, src2)

§

VAddU8x16Sat = 137

dst = satruating_add(src1, src2)

§

VAddI16x8Sat = 138

dst = satruating_add(src1, src2)

§

VAddU16x8Sat = 139

dst = satruating_add(src1, src2)

§

VAddpairwiseI16x8S = 140

dst = [src1[0] + src1[1], ..., src2[6] + src2[7]]

§

VAddpairwiseI32x4S = 141

dst = [src1[0] + src1[1], ..., src2[2] + src2[3]]

§

VShlI8x16 = 142

dst = src1 << src2

§

VShlI16x8 = 143

dst = src1 << src2

§

VShlI32x4 = 144

dst = src1 << src2

§

VShlI64x2 = 145

dst = src1 << src2

§

VShrI8x16S = 146

dst = src1 >> src2 (signed)

§

VShrI16x8S = 147

dst = src1 >> src2 (signed)

§

VShrI32x4S = 148

dst = src1 >> src2 (signed)

§

VShrI64x2S = 149

dst = src1 >> src2 (signed)

§

VShrI8x16U = 150

dst = src1 >> src2 (unsigned)

§

VShrI16x8U = 151

dst = src1 >> src2 (unsigned)

§

VShrI32x4U = 152

dst = src1 >> src2 (unsigned)

§

VShrI64x2U = 153

dst = src1 >> src2 (unsigned)

§

VSplatX8 = 154

dst = splat(low8(src))

§

VSplatX16 = 155

dst = splat(low16(src))

§

VSplatX32 = 156

dst = splat(low32(src))

§

VSplatX64 = 157

dst = splat(src)

§

VSplatF32 = 158

dst = splat(low32(src))

§

VSplatF64 = 159

dst = splat(src)

§

VLoad8x8SZ = 160

Load the 64-bit source as i8x8 and sign-extend to i16x8.

§

VLoad8x8UZ = 161

Load the 64-bit source as u8x8 and zero-extend to i16x8.

§

VLoad16x4LeSZ = 162

Load the 64-bit source as i16x4 and sign-extend to i32x4.

§

VLoad16x4LeUZ = 163

Load the 64-bit source as u16x4 and zero-extend to i32x4.

§

VLoad32x2LeSZ = 164

Load the 64-bit source as i32x2 and sign-extend to i64x2.

§

VLoad32x2LeUZ = 165

Load the 64-bit source as u32x2 and zero-extend to i64x2.

§

VBand128 = 166

dst = src1 & src2

§

VBor128 = 167

dst = src1 | src2

§

VBxor128 = 168

dst = src1 ^ src2

§

VBnot128 = 169

dst = !src1

§

VBitselect128 = 170

dst = (c & x) | (!c & y)

§

Vbitmask8x16 = 171

Collect high bits of each lane into the low 32-bits of the destination.

§

Vbitmask16x8 = 172

Collect high bits of each lane into the low 32-bits of the destination.

§

Vbitmask32x4 = 173

Collect high bits of each lane into the low 32-bits of the destination.

§

Vbitmask64x2 = 174

Collect high bits of each lane into the low 32-bits of the destination.

§

Valltrue8x16 = 175

Store whether all lanes are nonzero in dst.

§

Valltrue16x8 = 176

Store whether all lanes are nonzero in dst.

§

Valltrue32x4 = 177

Store whether all lanes are nonzero in dst.

§

Valltrue64x2 = 178

Store whether any lanes are nonzero in dst.

§

Vanytrue8x16 = 179

Store whether any lanes are nonzero in dst.

§

Vanytrue16x8 = 180

Store whether any lanes are nonzero in dst.

§

Vanytrue32x4 = 181

Store whether any lanes are nonzero in dst.

§

Vanytrue64x2 = 182

Store whether any lanes are nonzero in dst.

§

VF32x4FromI32x4S = 183

Int-to-float conversion (same as f32_from_x32_s)

§

VF32x4FromI32x4U = 184

Int-to-float conversion (same as f32_from_x32_u)

§

VF64x2FromI64x2S = 185

Int-to-float conversion (same as f64_from_x64_s)

§

VF64x2FromI64x2U = 186

Int-to-float conversion (same as f64_from_x64_u)

§

VI32x4FromF32x4S = 187

Float-to-int conversion (same as x32_from_f32_s

§

VI32x4FromF32x4U = 188

Float-to-int conversion (same as x32_from_f32_u

§

VI64x2FromF64x2S = 189

Float-to-int conversion (same as x64_from_f64_s

§

VI64x2FromF64x2U = 190

Float-to-int conversion (same as x64_from_f64_u

§

VWidenLow8x16S = 191

Widens the low lanes of the input vector, as signed, to twice the width.

§

VWidenLow8x16U = 192

Widens the low lanes of the input vector, as unsigned, to twice the width.

§

VWidenLow16x8S = 193

Widens the low lanes of the input vector, as signed, to twice the width.

§

VWidenLow16x8U = 194

Widens the low lanes of the input vector, as unsigned, to twice the width.

§

VWidenLow32x4S = 195

Widens the low lanes of the input vector, as signed, to twice the width.

§

VWidenLow32x4U = 196

Widens the low lanes of the input vector, as unsigned, to twice the width.

§

VWidenHigh8x16S = 197

Widens the high lanes of the input vector, as signed, to twice the width.

§

VWidenHigh8x16U = 198

Widens the high lanes of the input vector, as unsigned, to twice the width.

§

VWidenHigh16x8S = 199

Widens the high lanes of the input vector, as signed, to twice the width.

§

VWidenHigh16x8U = 200

Widens the high lanes of the input vector, as unsigned, to twice the width.

§

VWidenHigh32x4S = 201

Widens the high lanes of the input vector, as signed, to twice the width.

§

VWidenHigh32x4U = 202

Widens the high lanes of the input vector, as unsigned, to twice the width.

§

Vnarrow16x8S = 203

Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.

§

Vnarrow16x8U = 204

Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.

§

Vnarrow32x4S = 205

Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.

§

Vnarrow32x4U = 206

Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.

§

Vnarrow64x2S = 207

Narrows the two 64x2 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.

§

Vnarrow64x2U = 208

Narrows the two 64x2 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.

§

Vunarrow64x2U = 209

Narrows the two 64x2 vectors, assuming all input lanes are unsigned, to half the width. Narrowing is unsigned and saturating.

§

VFpromoteLow = 210

Promotes the low two lanes of the f32x4 input to f64x2.

§

VFdemote = 211

Demotes the two f64x2 lanes to f32x2 and then extends with two more zero lanes.

§

VSubI8x16 = 212

dst = src1 - src2

§

VSubI16x8 = 213

dst = src1 - src2

§

VSubI32x4 = 214

dst = src1 - src2

§

VSubI64x2 = 215

dst = src1 - src2

§

VSubF64x2 = 216

dst = src1 - src2

§

VSubI8x16Sat = 217

dst = saturating_sub(src1, src2)

§

VSubU8x16Sat = 218

dst = saturating_sub(src1, src2)

§

VSubI16x8Sat = 219

dst = saturating_sub(src1, src2)

§

VSubU16x8Sat = 220

dst = saturating_sub(src1, src2)

§

VMulI8x16 = 221

dst = src1 * src2

§

VMulI16x8 = 222

dst = src1 * src2

§

VMulI32x4 = 223

dst = src1 * src2

§

VMulI64x2 = 224

dst = src1 * src2

§

VMulF64x2 = 225

dst = src1 * src2

§

VQmulrsI16x8 = 226

dst = signed_saturate(src1 * src2 + (1 << (Q - 1)) >> Q)

§

VPopcnt8x16 = 227

dst = count_ones(src)

§

XExtractV8x16 = 228

low32(dst) = zext(src[lane])

§

XExtractV16x8 = 229

low32(dst) = zext(src[lane])

§

XExtractV32x4 = 230

low32(dst) = src[lane]

§

XExtractV64x2 = 231

dst = src[lane]

§

FExtractV32x4 = 232

low32(dst) = src[lane]

§

FExtractV64x2 = 233

dst = src[lane]

§

VInsertX8 = 234

dst = src1; dst[lane] = src2

§

VInsertX16 = 235

dst = src1; dst[lane] = src2

§

VInsertX32 = 236

dst = src1; dst[lane] = src2

§

VInsertX64 = 237

dst = src1; dst[lane] = src2

§

VInsertF32 = 238

dst = src1; dst[lane] = src2

§

VInsertF64 = 239

dst = src1; dst[lane] = src2

§

Veq8x16 = 240

dst = src == dst

§

Vneq8x16 = 241

dst = src != dst

§

Vslt8x16 = 242

dst = src < dst (signed)

§

Vslteq8x16 = 243

dst = src <= dst (signed)

§

Vult8x16 = 244

dst = src < dst (unsigned)

§

Vulteq8x16 = 245

dst = src <= dst (unsigned)

§

Veq16x8 = 246

dst = src == dst

§

Vneq16x8 = 247

dst = src != dst

§

Vslt16x8 = 248

dst = src < dst (signed)

§

Vslteq16x8 = 249

dst = src <= dst (signed)

§

Vult16x8 = 250

dst = src < dst (unsigned)

§

Vulteq16x8 = 251

dst = src <= dst (unsigned)

§

Veq32x4 = 252

dst = src == dst

§

Vneq32x4 = 253

dst = src != dst

§

Vslt32x4 = 254

dst = src < dst (signed)

§

Vslteq32x4 = 255

dst = src <= dst (signed)

§

Vult32x4 = 256

dst = src < dst (unsigned)

§

Vulteq32x4 = 257

dst = src <= dst (unsigned)

§

Veq64x2 = 258

dst = src == dst

§

Vneq64x2 = 259

dst = src != dst

§

Vslt64x2 = 260

dst = src < dst (signed)

§

Vslteq64x2 = 261

dst = src <= dst (signed)

§

Vult64x2 = 262

dst = src < dst (unsigned)

§

Vulteq64x2 = 263

dst = src <= dst (unsigned)

§

Vneg8x16 = 264

dst = -src

§

Vneg16x8 = 265

dst = -src

§

Vneg32x4 = 266

dst = -src

§

Vneg64x2 = 267

dst = -src

§

VnegF64x2 = 268

dst = -src

§

Vmin8x16S = 269

dst = min(src1, src2) (signed)

§

Vmin8x16U = 270

dst = min(src1, src2) (unsigned)

§

Vmin16x8S = 271

dst = min(src1, src2) (signed)

§

Vmin16x8U = 272

dst = min(src1, src2) (unsigned)

§

Vmax8x16S = 273

dst = max(src1, src2) (signed)

§

Vmax8x16U = 274

dst = max(src1, src2) (unsigned)

§

Vmax16x8S = 275

dst = max(src1, src2) (signed)

§

Vmax16x8U = 276

dst = max(src1, src2) (unsigned)

§

Vmin32x4S = 277

dst = min(src1, src2) (signed)

§

Vmin32x4U = 278

dst = min(src1, src2) (unsigned)

§

Vmax32x4S = 279

dst = max(src1, src2) (signed)

§

Vmax32x4U = 280

dst = max(src1, src2) (unsigned)

§

Vabs8x16 = 281

dst = |src|

§

Vabs16x8 = 282

dst = |src|

§

Vabs32x4 = 283

dst = |src|

§

Vabs64x2 = 284

dst = |src|

§

Vabsf32x4 = 285

dst = |src|

§

Vabsf64x2 = 286

dst = |src|

§

Vmaximumf32x4 = 287

dst = ieee_maximum(src1, src2)

§

Vmaximumf64x2 = 288

dst = ieee_maximum(src1, src2)

§

Vminimumf32x4 = 289

dst = ieee_minimum(src1, src2)

§

Vminimumf64x2 = 290

dst = ieee_minimum(src1, src2)

§

VShuffle = 291

dst = shuffle(src1, src2, mask)

§

Vswizzlei8x16 = 292

dst = swizzle(src1, src2)

§

Vavground8x16 = 293

dst = (src1 + src2 + 1) // 2

§

Vavground16x8 = 294

dst = (src1 + src2 + 1) // 2

§

VeqF32x4 = 295

dst = src == dst

§

VneqF32x4 = 296

dst = src != dst

§

VltF32x4 = 297

dst = src < dst

§

VlteqF32x4 = 298

dst = src <= dst

§

VeqF64x2 = 299

dst = src == dst

§

VneqF64x2 = 300

dst = src != dst

§

VltF64x2 = 301

dst = src < dst

§

VlteqF64x2 = 302

dst = src <= dst

§

Vfma32x4 = 303

dst = ieee_fma(a, b, c)

§

Vfma64x2 = 304

dst = ieee_fma(a, b, c)

§

Vselect = 305

dst = low32(cond) ? if_nonzero : if_zero

§

Xadd128 = 306

dst_hi:dst_lo = lhs_hi:lhs_lo + rhs_hi:rhs_lo

§

Xsub128 = 307

dst_hi:dst_lo = lhs_hi:lhs_lo - rhs_hi:rhs_lo

§

Xwidemul64S = 308

dst_hi:dst_lo = sext(lhs) * sext(rhs)

§

Xwidemul64U = 309

dst_hi:dst_lo = zext(lhs) * zext(rhs)

Implementations§

Source§

impl ExtendedOpcode

Source

pub const MAX: u16 = 310u16

The value of the maximum defined extended opcode.

Source§

impl ExtendedOpcode

Source

pub fn new(bytes: u16) -> Option<Self>

Create a new ExtendedOpcode from the given bytes.

Returns None if bytes is not a valid extended opcode.

Source

pub unsafe fn unchecked_new(byte: u16) -> Self

Like new but does not check whether bytes is a valid opcode.

§Safety

It is unsafe to pass bytes that is not a valid opcode.

Trait Implementations§

Source§

impl Clone for ExtendedOpcode

Source§

fn clone(&self) -> ExtendedOpcode

Returns a copy of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for ExtendedOpcode

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl Decode for ExtendedOpcode

Available on crate feature decode only.
Source§

fn decode<T>(bytecode: &mut T) -> Result<Self, T::Error>
where T: BytecodeStream,

Decode this type from the given bytecode stream.
Source§

impl Hash for ExtendedOpcode

Source§

fn hash<__H: Hasher>(&self, state: &mut __H)

Feeds this value into the given Hasher. Read more
1.3.0 · Source§

fn hash_slice<H>(data: &[Self], state: &mut H)
where H: Hasher, Self: Sized,

Feeds a slice of this type into the given Hasher. Read more
Source§

impl Ord for ExtendedOpcode

Source§

fn cmp(&self, other: &ExtendedOpcode) -> Ordering

This method returns an Ordering between self and other. Read more
1.21.0 · Source§

fn max(self, other: Self) -> Self
where Self: Sized,

Compares and returns the maximum of two values. Read more
1.21.0 · Source§

fn min(self, other: Self) -> Self
where Self: Sized,

Compares and returns the minimum of two values. Read more
1.50.0 · Source§

fn clamp(self, min: Self, max: Self) -> Self
where Self: Sized,

Restrict a value to a certain interval. Read more
Source§

impl PartialEq for ExtendedOpcode

Source§

fn eq(&self, other: &ExtendedOpcode) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · Source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, and should not be overridden without very good reason.
Source§

impl PartialOrd for ExtendedOpcode

Source§

fn partial_cmp(&self, other: &ExtendedOpcode) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · Source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · Source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the <= operator. Read more
1.0.0 · Source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > operator. Read more
1.0.0 · Source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by the >= operator. Read more
Source§

impl Copy for ExtendedOpcode

Source§

impl Eq for ExtendedOpcode

Source§

impl StructuralPartialEq for ExtendedOpcode

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dst: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dst. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.