cranelift_codegen/isa/x64/encoding/
evex.rs

1//! Encodes EVEX instructions. These instructions are those added by the AVX-512 extensions. The
2//! EVEX encoding requires a 4-byte prefix:
3//!
4//! Byte 0:  0x62
5//!         ┌───┬───┬───┬───┬───┬───┬───┬───┐
6//! Byte 1: │ R │ X │ B │ R'│ 0 │ 0 │ m │ m │
7//!         ├───┼───┼───┼───┼───┼───┼───┼───┤
8//! Byte 2: │ W │ v │ v │ v │ v │ 1 │ p │ p │
9//!         ├───┼───┼───┼───┼───┼───┼───┼───┤
10//! Byte 3: │ z │ L'│ L │ b │ V'│ a │ a │ a │
11//!         └───┴───┴───┴───┴───┴───┴───┴───┘
12//!
13//! The prefix is then followed by the opcode byte, the ModR/M byte, and other optional suffixes
14//! (e.g. SIB byte, displacements, immediates) based on the instruction (see section 2.6, Intel
15//! Software Development Manual, volume 2A).
16
17use super::rex::{self, LegacyPrefixes, OpcodeMap};
18use crate::MachBuffer;
19use crate::isa::x64::args::{Amode, Avx512TupleType};
20use crate::isa::x64::inst::Inst;
21use core::ops::RangeInclusive;
22
23/// Constructs an EVEX-encoded instruction using a builder pattern. This approach makes it visually
24/// easier to transform something the manual's syntax, `EVEX.256.66.0F38.W1 1F /r` to code:
25/// `EvexInstruction::new().length(...).prefix(...).map(...).w(true).opcode(0x1F).reg(...).rm(...)`.
26pub struct EvexInstruction {
27    bits: u32,
28    opcode: u8,
29    reg: Register,
30    rm: RegisterOrAmode,
31    tuple_type: Option<Avx512TupleType>,
32    imm: Option<u8>,
33}
34
35/// Because some of the bit flags in the EVEX prefix are reversed and users of `EvexInstruction` may
36/// choose to skip setting fields, here we set some sane defaults. Note that:
37/// - the first byte is always `0x62` but you will notice it at the end of the default `bits` value
38///   implemented--remember the little-endian order
39/// - some bits are always set to certain values: bits 10-11 to 0, bit 18 to 1
40/// - the other bits set correspond to reversed bits: R, X, B, R' (byte 1), vvvv (byte 2), V' (byte
41///   3).
42///
43/// See the `default_emission` test for what these defaults are equivalent to (e.g. using RAX,
44/// unsetting the W bit, etc.)
45impl Default for EvexInstruction {
46    fn default() -> Self {
47        Self {
48            bits: 0x08_7C_F0_62,
49            opcode: 0,
50            reg: Register::default(),
51            rm: RegisterOrAmode::Register(Register::default()),
52            tuple_type: None,
53            imm: None,
54        }
55    }
56}
57
58#[allow(non_upper_case_globals)] // This makes it easier to match the bit range names to the manual's names.
59impl EvexInstruction {
60    /// Construct a default EVEX instruction.
61    pub fn new() -> Self {
62        Self::default()
63    }
64
65    /// Set the length of the instruction . Note that there are sets of instructions (i.e. rounding,
66    /// memory broadcast) that modify the same underlying bits--at some point (TODO) we can add a
67    /// way to set those context bits and verify that both are not used (e.g. rounding AND length).
68    /// For now, this method is very convenient.
69    #[inline(always)]
70    pub fn length(mut self, length: EvexVectorLength) -> Self {
71        self.write(Self::LL, EvexContext::Other { length }.bits() as u32);
72        self
73    }
74
75    /// Set the legacy prefix byte of the instruction: None | 66 | F0 | F2 | F3. EVEX instructions
76    /// pack these into the prefix, not as separate bytes.
77    #[inline(always)]
78    pub fn prefix(mut self, prefix: LegacyPrefixes) -> Self {
79        self.write(Self::pp, prefix.bits() as u32);
80        self
81    }
82
83    /// Set the opcode map byte of the instruction: None | 0F | 0F38 | 0F3A. EVEX instructions pack
84    /// these into the prefix, not as separate bytes.
85    #[inline(always)]
86    pub fn map(mut self, map: OpcodeMap) -> Self {
87        self.write(Self::mm, map.bits() as u32);
88        self
89    }
90
91    /// Set the W bit, typically used to indicate an instruction using 64 bits of an operand (e.g.
92    /// 64 bit lanes). EVEX packs this bit in the EVEX prefix; previous encodings used the REX
93    /// prefix.
94    #[inline(always)]
95    pub fn w(mut self, w: bool) -> Self {
96        self.write(Self::W, w as u32);
97        self
98    }
99
100    /// Set the instruction opcode byte.
101    #[inline(always)]
102    pub fn opcode(mut self, opcode: u8) -> Self {
103        self.opcode = opcode;
104        self
105    }
106
107    /// Set the "tuple type" which is used for 8-bit scaling when a memory
108    /// operand is used.
109    #[inline(always)]
110    pub fn tuple_type(mut self, tt: Avx512TupleType) -> Self {
111        self.tuple_type = Some(tt);
112        self
113    }
114
115    /// Set the register to use for the `reg` bits; many instructions use this as the write operand.
116    /// Setting this affects both the ModRM byte (`reg` section) and the EVEX prefix (the extension
117    /// bits for register encodings > 8).
118    #[inline(always)]
119    pub fn reg(mut self, reg: impl Into<Register>) -> Self {
120        self.reg = reg.into();
121        let r = !(self.reg.0 >> 3) & 1;
122        let r_ = !(self.reg.0 >> 4) & 1;
123        self.write(Self::R, r as u32);
124        self.write(Self::R_, r_ as u32);
125        self
126    }
127
128    /// Set the mask to use. See section 2.6 in the Intel Software Developer's Manual, volume 2A for
129    /// more details.
130    #[allow(dead_code)]
131    #[inline(always)]
132    pub fn mask(mut self, mask: EvexMasking) -> Self {
133        self.write(Self::aaa, mask.aaa_bits() as u32);
134        self.write(Self::z, mask.z_bit() as u32);
135        self
136    }
137
138    /// Set the `vvvvv` register; some instructions allow using this as a second, non-destructive
139    /// source register in 3-operand instructions (e.g. 2 read, 1 write).
140    #[allow(dead_code)]
141    #[inline(always)]
142    pub fn vvvvv(mut self, reg: impl Into<Register>) -> Self {
143        let reg = reg.into();
144        self.write(Self::vvvv, !(reg.0 as u32) & 0b1111);
145        self.write(Self::V_, !(reg.0 as u32 >> 4) & 0b1);
146        self
147    }
148
149    /// Set the register to use for the `rm` bits; many instructions use this
150    /// as the "read from register/memory" operand. Setting this affects both
151    /// the ModRM byte (`rm` section) and the EVEX prefix (the extension bits
152    /// for register encodings > 8).
153    #[inline(always)]
154    pub fn rm(mut self, reg: impl Into<RegisterOrAmode>) -> Self {
155        // NB: See Table 2-31. 32-Register Support in 64-bit Mode Using EVEX
156        // with Embedded REX Bits
157        self.rm = reg.into();
158        let x = match &self.rm {
159            RegisterOrAmode::Register(r) => r.0 >> 4,
160            RegisterOrAmode::Amode(Amode::ImmRegRegShift { index, .. }) => {
161                index.to_real_reg().unwrap().hw_enc() >> 3
162            }
163
164            // These two modes technically don't use the X bit, so leave it at
165            // 0.
166            RegisterOrAmode::Amode(Amode::ImmReg { .. }) => 0,
167            RegisterOrAmode::Amode(Amode::RipRelative { .. }) => 0,
168        };
169        // The X bit is stored in an inverted format, so invert it here.
170        self.write(Self::X, u32::from(!x & 1));
171
172        let b = match &self.rm {
173            RegisterOrAmode::Register(r) => r.0 >> 3,
174            RegisterOrAmode::Amode(Amode::ImmReg { base, .. }) => {
175                base.to_real_reg().unwrap().hw_enc() >> 3
176            }
177            RegisterOrAmode::Amode(Amode::ImmRegRegShift { base, .. }) => {
178                base.to_real_reg().unwrap().hw_enc() >> 3
179            }
180            // The 4th bit of %rip is 0
181            RegisterOrAmode::Amode(Amode::RipRelative { .. }) => 0,
182        };
183        // The B bit is stored in an inverted format, so invert it here.
184        self.write(Self::B, u32::from(!b & 1));
185        self
186    }
187
188    /// Set the imm byte.
189    #[inline(always)]
190    pub fn imm(mut self, imm: u8) -> Self {
191        self.imm = Some(imm);
192        self
193    }
194
195    /// Emit the EVEX-encoded instruction to the code sink:
196    ///
197    /// - the 4-byte EVEX prefix;
198    /// - the opcode byte;
199    /// - the ModR/M byte
200    /// - SIB bytes, if necessary
201    /// - an optional immediate, if necessary (not currently implemented)
202    pub fn encode(&self, sink: &mut MachBuffer<Inst>) {
203        if let RegisterOrAmode::Amode(amode) = &self.rm {
204            if let Some(trap_code) = amode.get_flags().trap_code() {
205                sink.add_trap(trap_code);
206            }
207        }
208        sink.put4(self.bits);
209        sink.put1(self.opcode);
210
211        match &self.rm {
212            RegisterOrAmode::Register(reg) => {
213                let rm: u8 = (*reg).into();
214                sink.put1(rex::encode_modrm(3, self.reg.0 & 7, rm & 7));
215            }
216            RegisterOrAmode::Amode(amode) => {
217                let scaling = self.scaling_for_8bit_disp();
218
219                let bytes_at_end = if self.imm.is_some() { 1 } else { 0 };
220                rex::emit_modrm_sib_disp(sink, self.reg.0 & 7, amode, bytes_at_end, Some(scaling));
221            }
222        }
223        if let Some(imm) = self.imm {
224            sink.put1(imm);
225        }
226    }
227
228    // In order to simplify the encoding of the various bit ranges in the prefix, we specify those
229    // ranges according to the table below (extracted from the Intel Software Development Manual,
230    // volume 2A). Remember that, because we pack the 4-byte prefix into a little-endian `u32`, this
231    // chart should be read from right-to-left, top-to-bottom. Note also that we start ranges at bit
232    // 8, leaving bits 0-7 for the mandatory `0x62`.
233    //         ┌───┬───┬───┬───┬───┬───┬───┬───┐
234    // Byte 1: │ R │ X │ B │ R'│ 0 │ 0 │ m │ m │
235    //         ├───┼───┼───┼───┼───┼───┼───┼───┤
236    // Byte 2: │ W │ v │ v │ v │ v │ 1 │ p │ p │
237    //         ├───┼───┼───┼───┼───┼───┼───┼───┤
238    // Byte 3: │ z │ L'│ L │ b │ V'│ a │ a │ a │
239    //         └───┴───┴───┴───┴───┴───┴───┴───┘
240
241    // Byte 1:
242    const mm: RangeInclusive<u8> = 8..=9;
243    const R_: RangeInclusive<u8> = 12..=12;
244    const B: RangeInclusive<u8> = 13..=13;
245    const X: RangeInclusive<u8> = 14..=14;
246    const R: RangeInclusive<u8> = 15..=15;
247
248    // Byte 2:
249    const pp: RangeInclusive<u8> = 16..=17;
250    const vvvv: RangeInclusive<u8> = 19..=22;
251    const W: RangeInclusive<u8> = 23..=23;
252
253    // Byte 3:
254    const aaa: RangeInclusive<u8> = 24..=26;
255    const V_: RangeInclusive<u8> = 27..=27;
256    const b: RangeInclusive<u8> = 28..=28;
257    const LL: RangeInclusive<u8> = 29..=30;
258    const z: RangeInclusive<u8> = 31..=31;
259
260    // A convenience method for writing the `value` bits to the given range in `self.bits`.
261    #[inline]
262    fn write(&mut self, range: RangeInclusive<u8>, value: u32) {
263        assert!(ExactSizeIterator::len(&range) > 0);
264        let size = range.end() - range.start() + 1; // Calculate the number of bits in the range.
265        let mask: u32 = (1 << size) - 1; // Generate a bit mask.
266        debug_assert!(
267            value <= mask,
268            "The written value should have fewer than {size} bits."
269        );
270        let mask_complement = !(mask << *range.start()); // Create the bitwise complement for the clear mask.
271        self.bits &= mask_complement; // Clear the bits in `range`; otherwise the OR below may allow previously-set bits to slip through.
272        let value = value << *range.start(); // Place the value in the correct location (assumes `value <= mask`).
273        self.bits |= value; // Modify the bits in `range`.
274    }
275
276    /// A convenience method for reading given range of bits in `self.bits`
277    /// shifted to the LSB of the returned value..
278    #[inline]
279    fn read(&self, range: RangeInclusive<u8>) -> u32 {
280        (self.bits >> range.start()) & ((1 << range.len()) - 1)
281    }
282
283    fn scaling_for_8bit_disp(&self) -> i8 {
284        use Avx512TupleType::*;
285
286        let vector_size_scaling = || match self.read(Self::LL) {
287            0b00 => 16,
288            0b01 => 32,
289            0b10 => 64,
290            _ => unreachable!(),
291        };
292
293        match self.tuple_type {
294            Some(Full) => {
295                if self.read(Self::b) == 1 {
296                    if self.read(Self::W) == 0 { 4 } else { 8 }
297                } else {
298                    vector_size_scaling()
299                }
300            }
301            Some(FullMem) => vector_size_scaling(),
302            Some(Mem128) => 16,
303            None => panic!("tuple type was not set"),
304        }
305    }
306}
307
308/// Describe the register index to use. This wrapper is a type-safe way to pass
309/// around the registers defined in `inst/regs.rs`.
310#[derive(Debug, Copy, Clone, Default)]
311pub struct Register(u8);
312impl From<u8> for Register {
313    fn from(reg: u8) -> Self {
314        debug_assert!(reg < 16);
315        Self(reg)
316    }
317}
318impl From<Register> for u8 {
319    fn from(reg: Register) -> u8 {
320        reg.0
321    }
322}
323
324#[allow(missing_docs)]
325#[derive(Debug, Clone)]
326pub enum RegisterOrAmode {
327    Register(Register),
328    Amode(Amode),
329}
330
331impl From<u8> for RegisterOrAmode {
332    fn from(reg: u8) -> Self {
333        RegisterOrAmode::Register(reg.into())
334    }
335}
336
337impl From<Amode> for RegisterOrAmode {
338    fn from(amode: Amode) -> Self {
339        RegisterOrAmode::Amode(amode)
340    }
341}
342
343/// Defines the EVEX context for the `L'`, `L`, and `b` bits (bits 6:4 of EVEX P2 byte). Table 2-36 in
344/// section 2.6.10 (Intel Software Development Manual, volume 2A) describes how these bits can be
345/// used together for certain classes of instructions; i.e., special care should be taken to ensure
346/// that instructions use an applicable correct `EvexContext`. Table 2-39 contains cases where
347/// opcodes can result in an #UD.
348#[allow(dead_code, missing_docs)] // Rounding and broadcast modes are not yet used.
349pub enum EvexContext {
350    RoundingRegToRegFP {
351        rc: EvexRoundingControl,
352    },
353    NoRoundingFP {
354        sae: bool,
355        length: EvexVectorLength,
356    },
357    MemoryOp {
358        broadcast: bool,
359        length: EvexVectorLength,
360    },
361    Other {
362        length: EvexVectorLength,
363    },
364}
365
366impl Default for EvexContext {
367    fn default() -> Self {
368        Self::Other {
369            length: EvexVectorLength::default(),
370        }
371    }
372}
373
374impl EvexContext {
375    /// Encode the `L'`, `L`, and `b` bits (bits 6:4 of EVEX P2 byte) for merging with the P2 byte.
376    pub fn bits(&self) -> u8 {
377        match self {
378            Self::RoundingRegToRegFP { rc } => 0b001 | rc.bits() << 1,
379            Self::NoRoundingFP { sae, length } => (*sae as u8) | length.bits() << 1,
380            Self::MemoryOp { broadcast, length } => (*broadcast as u8) | length.bits() << 1,
381            Self::Other { length } => length.bits() << 1,
382        }
383    }
384}
385
386/// The EVEX format allows choosing a vector length in the `L'` and `L` bits; see `EvexContext`.
387#[allow(dead_code, missing_docs)] // Wider-length vectors are not yet used.
388pub enum EvexVectorLength {
389    V128,
390    V256,
391    V512,
392}
393
394impl EvexVectorLength {
395    /// Encode the `L'` and `L` bits for merging with the P2 byte.
396    fn bits(&self) -> u8 {
397        match self {
398            Self::V128 => 0b00,
399            Self::V256 => 0b01,
400            Self::V512 => 0b10,
401            // 0b11 is reserved (#UD).
402        }
403    }
404}
405
406impl Default for EvexVectorLength {
407    fn default() -> Self {
408        Self::V128
409    }
410}
411
412/// The EVEX format allows defining rounding control in the `L'` and `L` bits; see `EvexContext`.
413#[allow(dead_code, missing_docs)] // Rounding controls are not yet used.
414pub enum EvexRoundingControl {
415    RNE,
416    RD,
417    RU,
418    RZ,
419}
420
421impl EvexRoundingControl {
422    /// Encode the `L'` and `L` bits for merging with the P2 byte.
423    fn bits(&self) -> u8 {
424        match self {
425            Self::RNE => 0b00,
426            Self::RD => 0b01,
427            Self::RU => 0b10,
428            Self::RZ => 0b11,
429        }
430    }
431}
432
433/// Defines the EVEX masking behavior; masking support is described in section 2.6.4 of the Intel
434/// Software Development Manual, volume 2A.
435#[allow(dead_code, missing_docs)] // Masking is not yet used.
436pub enum EvexMasking {
437    None,
438    Merging { k: u8 },
439    Zeroing { k: u8 },
440}
441
442impl Default for EvexMasking {
443    fn default() -> Self {
444        EvexMasking::None
445    }
446}
447
448impl EvexMasking {
449    /// Encode the `z` bit for merging with the P2 byte.
450    pub fn z_bit(&self) -> u8 {
451        match self {
452            Self::None | Self::Merging { .. } => 0,
453            Self::Zeroing { .. } => 1,
454        }
455    }
456
457    /// Encode the `aaa` bits for merging with the P2 byte.
458    pub fn aaa_bits(&self) -> u8 {
459        match self {
460            Self::None => 0b000,
461            Self::Merging { k } | Self::Zeroing { k } => {
462                debug_assert!(*k <= 7);
463                *k
464            }
465        }
466    }
467}
468
469#[cfg(test)]
470mod tests {
471    use super::*;
472    use crate::ir::MemFlags;
473    use crate::isa::x64::args::Gpr;
474    use crate::isa::x64::inst::regs;
475    use std::vec::Vec;
476
477    // As a sanity test, we verify that the output of `xed-asmparse-main 'vpabsq xmm0{k0},
478    // xmm1'` matches this EVEX encoding machinery.
479    #[test]
480    fn vpabsq() {
481        let mut tmp = MachBuffer::<Inst>::new();
482        let tests: &[(crate::Reg, RegisterOrAmode, Vec<u8>)] = &[
483            // vpabsq %xmm1, %xmm0
484            (
485                regs::xmm0(),
486                regs::xmm1().to_real_reg().unwrap().hw_enc().into(),
487                vec![0x62, 0xf2, 0xfd, 0x08, 0x1f, 0xc1],
488            ),
489            // vpabsq %xmm8, %xmm10
490            (
491                regs::xmm10(),
492                regs::xmm8().to_real_reg().unwrap().hw_enc().into(),
493                vec![0x62, 0x52, 0xfd, 0x08, 0x1f, 0xd0],
494            ),
495            // vpabsq %xmm15, %xmm3
496            (
497                regs::xmm3(),
498                regs::xmm15().to_real_reg().unwrap().hw_enc().into(),
499                vec![0x62, 0xd2, 0xfd, 0x08, 0x1f, 0xdf],
500            ),
501            // vpabsq (%rsi), %xmm12
502            (
503                regs::xmm12(),
504                Amode::ImmReg {
505                    simm32: 0,
506                    base: regs::rsi(),
507                    flags: MemFlags::trusted(),
508                }
509                .into(),
510                vec![0x62, 0x72, 0xfd, 0x08, 0x1f, 0x26],
511            ),
512            // vpabsq 8(%r15), %xmm14
513            (
514                regs::xmm14(),
515                Amode::ImmReg {
516                    simm32: 8,
517                    base: regs::r15(),
518                    flags: MemFlags::trusted(),
519                }
520                .into(),
521                vec![0x62, 0x52, 0xfd, 0x08, 0x1f, 0xb7, 0x08, 0x00, 0x00, 0x00],
522            ),
523            // vpabsq 16(%r15), %xmm14
524            (
525                regs::xmm14(),
526                Amode::ImmReg {
527                    simm32: 16,
528                    base: regs::r15(),
529                    flags: MemFlags::trusted(),
530                }
531                .into(),
532                vec![0x62, 0x52, 0xfd, 0x08, 0x1f, 0x77, 0x01],
533            ),
534            // vpabsq 17(%rax), %xmm3
535            (
536                regs::xmm3(),
537                Amode::ImmReg {
538                    simm32: 17,
539                    base: regs::rax(),
540                    flags: MemFlags::trusted(),
541                }
542                .into(),
543                vec![0x62, 0xf2, 0xfd, 0x08, 0x1f, 0x98, 0x11, 0x00, 0x00, 0x00],
544            ),
545            // vpabsq (%rbx, %rsi, 8), %xmm9
546            (
547                regs::xmm9(),
548                Amode::ImmRegRegShift {
549                    simm32: 0,
550                    base: Gpr::unwrap_new(regs::rbx()),
551                    index: Gpr::unwrap_new(regs::rsi()),
552                    shift: 3,
553                    flags: MemFlags::trusted(),
554                }
555                .into(),
556                vec![0x62, 0x72, 0xfd, 0x08, 0x1f, 0x0c, 0xf3],
557            ),
558            // vpabsq 1(%r11, %rdi, 4), %xmm13
559            (
560                regs::xmm13(),
561                Amode::ImmRegRegShift {
562                    simm32: 1,
563                    base: Gpr::unwrap_new(regs::r11()),
564                    index: Gpr::unwrap_new(regs::rdi()),
565                    shift: 2,
566                    flags: MemFlags::trusted(),
567                }
568                .into(),
569                vec![
570                    0x62, 0x52, 0xfd, 0x08, 0x1f, 0xac, 0xbb, 0x01, 0x00, 0x00, 0x00,
571                ],
572            ),
573            // vpabsq 128(%rsp, %r10, 2), %xmm5
574            (
575                regs::xmm5(),
576                Amode::ImmRegRegShift {
577                    simm32: 128,
578                    base: Gpr::unwrap_new(regs::rsp()),
579                    index: Gpr::unwrap_new(regs::r10()),
580                    shift: 1,
581                    flags: MemFlags::trusted(),
582                }
583                .into(),
584                vec![0x62, 0xb2, 0xfd, 0x08, 0x1f, 0x6c, 0x54, 0x08],
585            ),
586            // vpabsq 112(%rbp, %r13, 1), %xmm6
587            (
588                regs::xmm6(),
589                Amode::ImmRegRegShift {
590                    simm32: 112,
591                    base: Gpr::unwrap_new(regs::rbp()),
592                    index: Gpr::unwrap_new(regs::r13()),
593                    shift: 0,
594                    flags: MemFlags::trusted(),
595                }
596                .into(),
597                vec![0x62, 0xb2, 0xfd, 0x08, 0x1f, 0x74, 0x2d, 0x07],
598            ),
599            // vpabsq (%rbp, %r13, 1), %xmm7
600            (
601                regs::xmm7(),
602                Amode::ImmRegRegShift {
603                    simm32: 0,
604                    base: Gpr::unwrap_new(regs::rbp()),
605                    index: Gpr::unwrap_new(regs::r13()),
606                    shift: 0,
607                    flags: MemFlags::trusted(),
608                }
609                .into(),
610                vec![0x62, 0xb2, 0xfd, 0x08, 0x1f, 0x7c, 0x2d, 0x00],
611            ),
612            // vpabsq 2032(%r12), %xmm8
613            (
614                regs::xmm8(),
615                Amode::ImmReg {
616                    simm32: 2032,
617                    base: regs::r12(),
618                    flags: MemFlags::trusted(),
619                }
620                .into(),
621                vec![0x62, 0x52, 0xfd, 0x08, 0x1f, 0x44, 0x24, 0x7f],
622            ),
623            // vpabsq 2048(%r13), %xmm9
624            (
625                regs::xmm9(),
626                Amode::ImmReg {
627                    simm32: 2048,
628                    base: regs::r13(),
629                    flags: MemFlags::trusted(),
630                }
631                .into(),
632                vec![0x62, 0x52, 0xfd, 0x08, 0x1f, 0x8d, 0x00, 0x08, 0x00, 0x00],
633            ),
634            // vpabsq -16(%r14), %xmm10
635            (
636                regs::xmm10(),
637                Amode::ImmReg {
638                    simm32: -16,
639                    base: regs::r14(),
640                    flags: MemFlags::trusted(),
641                }
642                .into(),
643                vec![0x62, 0x52, 0xfd, 0x08, 0x1f, 0x56, 0xff],
644            ),
645            // vpabsq -5(%r15), %xmm11
646            (
647                regs::xmm11(),
648                Amode::ImmReg {
649                    simm32: -5,
650                    base: regs::r15(),
651                    flags: MemFlags::trusted(),
652                }
653                .into(),
654                vec![0x62, 0x52, 0xfd, 0x08, 0x1f, 0x9f, 0xfb, 0xff, 0xff, 0xff],
655            ),
656            // vpabsq -2048(%rdx), %xmm12
657            (
658                regs::xmm12(),
659                Amode::ImmReg {
660                    simm32: -2048,
661                    base: regs::rdx(),
662                    flags: MemFlags::trusted(),
663                }
664                .into(),
665                vec![0x62, 0x72, 0xfd, 0x08, 0x1f, 0x62, 0x80],
666            ),
667            // vpabsq -2064(%rsi), %xmm13
668            (
669                regs::xmm13(),
670                Amode::ImmReg {
671                    simm32: -2064,
672                    base: regs::rsi(),
673                    flags: MemFlags::trusted(),
674                }
675                .into(),
676                vec![0x62, 0x72, 0xfd, 0x08, 0x1f, 0xae, 0xf0, 0xf7, 0xff, 0xff],
677            ),
678            // a: vpabsq a(%rip), %xmm14
679            (
680                regs::xmm14(),
681                Amode::RipRelative {
682                    target: tmp.get_label(),
683                }
684                .into(),
685                vec![0x62, 0x72, 0xfd, 0x08, 0x1f, 0x35, 0xf6, 0xff, 0xff, 0xff],
686            ),
687        ];
688
689        for (dst, src, encoding) in tests {
690            let mut sink = MachBuffer::new();
691            let label = sink.get_label();
692            sink.bind_label(label, &mut Default::default());
693            EvexInstruction::new()
694                .prefix(LegacyPrefixes::_66)
695                .map(OpcodeMap::_0F38)
696                .w(true)
697                .opcode(0x1F)
698                .reg(dst.to_real_reg().unwrap().hw_enc())
699                .rm(src.clone())
700                .length(EvexVectorLength::V128)
701                .tuple_type(Avx512TupleType::Full)
702                .encode(&mut sink);
703            let bytes0 = sink
704                .finish(&Default::default(), &mut Default::default())
705                .data;
706            assert_eq!(
707                bytes0.as_slice(),
708                encoding.as_slice(),
709                "dst={dst:?} src={src:?}"
710            );
711        }
712    }
713
714    /// Verify that the defaults are equivalent to an instruction with a `0x00` opcode using the
715    /// "0" register (i.e. `rax`), with sane defaults for the various configurable parameters. This
716    /// test is more interesting than it may appear because some of the parameters have flipped-bit
717    /// representations (e.g. `vvvvv`) so emitting 0s as a default will not work.
718    #[test]
719    fn default_emission() {
720        let mut sink = MachBuffer::new();
721        EvexInstruction::new().encode(&mut sink);
722        let bytes0 = sink
723            .finish(&Default::default(), &mut Default::default())
724            .data;
725
726        let mut sink = MachBuffer::new();
727        EvexInstruction::new()
728            .length(EvexVectorLength::V128)
729            .prefix(LegacyPrefixes::None)
730            .map(OpcodeMap::None)
731            .w(false)
732            .opcode(0x00)
733            .reg(regs::rax().to_real_reg().unwrap().hw_enc())
734            .rm(regs::rax().to_real_reg().unwrap().hw_enc())
735            .mask(EvexMasking::None)
736            .encode(&mut sink);
737        let bytes1 = sink
738            .finish(&Default::default(), &mut Default::default())
739            .data;
740
741        assert_eq!(bytes0, bytes1);
742    }
743}