cranelift_codegen/isa/aarch64/inst/
mod.rs

1//! This module defines aarch64-specific machine instruction types.
2
3use crate::binemit::{Addend, CodeOffset, Reloc};
4use crate::ir::types::{F16, F32, F64, F128, I8, I8X16, I16, I32, I64, I128};
5use crate::ir::{MemFlags, Type, types};
6use crate::isa::{CallConv, FunctionAlignment};
7use crate::machinst::*;
8use crate::{CodegenError, CodegenResult, settings};
9
10use crate::machinst::{PrettyPrint, Reg, RegClass, Writable};
11
12use alloc::vec::Vec;
13use core::slice;
14use smallvec::{SmallVec, smallvec};
15use std::fmt::Write;
16use std::string::{String, ToString};
17
18pub(crate) mod regs;
19pub(crate) use self::regs::*;
20pub mod imms;
21pub use self::imms::*;
22pub mod args;
23pub use self::args::*;
24pub mod emit;
25pub(crate) use self::emit::*;
26use crate::isa::aarch64::abi::AArch64MachineDeps;
27
28pub(crate) mod unwind;
29
30#[cfg(test)]
31mod emit_tests;
32
33//=============================================================================
34// Instructions (top level): definition
35
36pub use crate::isa::aarch64::lower::isle::generated_code::{
37    ALUOp, ALUOp3, AMode, APIKey, AtomicRMWLoopOp, AtomicRMWOp, BitOp, BranchTargetType, FPUOp1,
38    FPUOp2, FPUOp3, FpuRoundMode, FpuToIntOp, IntToFpuOp, MInst as Inst, MoveWideOp, VecALUModOp,
39    VecALUOp, VecExtendOp, VecLanesOp, VecMisc2, VecPairOp, VecRRLongOp, VecRRNarrowOp,
40    VecRRPairLongOp, VecRRRLongModOp, VecRRRLongOp, VecShiftImmModOp, VecShiftImmOp,
41};
42
43/// A floating-point unit (FPU) operation with two args, a register and an immediate.
44#[derive(Copy, Clone, Debug)]
45pub enum FPUOpRI {
46    /// Unsigned right shift. Rd = Rn << #imm
47    UShr32(FPURightShiftImm),
48    /// Unsigned right shift. Rd = Rn << #imm
49    UShr64(FPURightShiftImm),
50}
51
52/// A floating-point unit (FPU) operation with two args, a register and
53/// an immediate that modifies its dest (so takes that input value as a
54/// separate virtual register).
55#[derive(Copy, Clone, Debug)]
56pub enum FPUOpRIMod {
57    /// Shift left and insert. Rd |= Rn << #imm
58    Sli32(FPULeftShiftImm),
59    /// Shift left and insert. Rd |= Rn << #imm
60    Sli64(FPULeftShiftImm),
61}
62
63impl BitOp {
64    /// Get the assembly mnemonic for this opcode.
65    pub fn op_str(&self) -> &'static str {
66        match self {
67            BitOp::RBit => "rbit",
68            BitOp::Clz => "clz",
69            BitOp::Cls => "cls",
70            BitOp::Rev16 => "rev16",
71            BitOp::Rev32 => "rev32",
72            BitOp::Rev64 => "rev64",
73        }
74    }
75}
76
77/// Additional information for `return_call[_ind]` instructions, left out of
78/// line to lower the size of the `Inst` enum.
79#[derive(Clone, Debug)]
80pub struct ReturnCallInfo<T> {
81    /// Where this call is going to
82    pub dest: T,
83    /// Arguments to the call instruction.
84    pub uses: CallArgList,
85    /// The size of the new stack frame's stack arguments. This is necessary
86    /// for copying the frame over our current frame. It must already be
87    /// allocated on the stack.
88    pub new_stack_arg_size: u32,
89    /// API key to use to restore the return address, if any.
90    pub key: Option<APIKey>,
91}
92
93fn count_zero_half_words(mut value: u64, num_half_words: u8) -> usize {
94    let mut count = 0;
95    for _ in 0..num_half_words {
96        if value & 0xffff == 0 {
97            count += 1;
98        }
99        value >>= 16;
100    }
101
102    count
103}
104
105impl Inst {
106    /// Create an instruction that loads a constant, using one of several options (MOVZ, MOVN,
107    /// logical immediate, or constant pool).
108    pub fn load_constant(rd: Writable<Reg>, value: u64) -> SmallVec<[Inst; 4]> {
109        // NB: this is duplicated in `lower/isle.rs` and `inst.isle` right now,
110        // if modifications are made here before this is deleted after moving to
111        // ISLE then those locations should be updated as well.
112
113        if let Some(imm) = MoveWideConst::maybe_from_u64(value) {
114            // 16-bit immediate (shifted by 0, 16, 32 or 48 bits) in MOVZ
115            smallvec![Inst::MovWide {
116                op: MoveWideOp::MovZ,
117                rd,
118                imm,
119                size: OperandSize::Size64
120            }]
121        } else if let Some(imm) = MoveWideConst::maybe_from_u64(!value) {
122            // 16-bit immediate (shifted by 0, 16, 32 or 48 bits) in MOVN
123            smallvec![Inst::MovWide {
124                op: MoveWideOp::MovN,
125                rd,
126                imm,
127                size: OperandSize::Size64
128            }]
129        } else if let Some(imml) = ImmLogic::maybe_from_u64(value, I64) {
130            // Weird logical-instruction immediate in ORI using zero register
131            smallvec![Inst::AluRRImmLogic {
132                alu_op: ALUOp::Orr,
133                size: OperandSize::Size64,
134                rd,
135                rn: zero_reg(),
136                imml,
137            }]
138        } else {
139            let mut insts = smallvec![];
140
141            // If the top 32 bits are zero, use 32-bit `mov` operations.
142            let (num_half_words, size, negated) = if value >> 32 == 0 {
143                (2, OperandSize::Size32, (!value << 32) >> 32)
144            } else {
145                (4, OperandSize::Size64, !value)
146            };
147
148            // If the number of 0xffff half words is greater than the number of 0x0000 half words
149            // it is more efficient to use `movn` for the first instruction.
150            let first_is_inverted = count_zero_half_words(negated, num_half_words)
151                > count_zero_half_words(value, num_half_words);
152
153            // Either 0xffff or 0x0000 half words can be skipped, depending on the first
154            // instruction used.
155            let ignored_halfword = if first_is_inverted { 0xffff } else { 0 };
156
157            let halfwords: SmallVec<[_; 4]> = (0..num_half_words)
158                .filter_map(|i| {
159                    let imm16 = (value >> (16 * i)) & 0xffff;
160                    if imm16 == ignored_halfword {
161                        None
162                    } else {
163                        Some((i, imm16))
164                    }
165                })
166                .collect();
167
168            let mut prev_result = None;
169            for (i, imm16) in halfwords {
170                let shift = i * 16;
171
172                if let Some(rn) = prev_result {
173                    let imm = MoveWideConst::maybe_with_shift(imm16 as u16, shift).unwrap();
174                    insts.push(Inst::MovK { rd, rn, imm, size });
175                } else {
176                    if first_is_inverted {
177                        let imm =
178                            MoveWideConst::maybe_with_shift(((!imm16) & 0xffff) as u16, shift)
179                                .unwrap();
180                        insts.push(Inst::MovWide {
181                            op: MoveWideOp::MovN,
182                            rd,
183                            imm,
184                            size,
185                        });
186                    } else {
187                        let imm = MoveWideConst::maybe_with_shift(imm16 as u16, shift).unwrap();
188                        insts.push(Inst::MovWide {
189                            op: MoveWideOp::MovZ,
190                            rd,
191                            imm,
192                            size,
193                        });
194                    }
195                }
196
197                prev_result = Some(rd.to_reg());
198            }
199
200            assert!(prev_result.is_some());
201
202            insts
203        }
204    }
205
206    /// Generic constructor for a load (zero-extending where appropriate).
207    pub fn gen_load(into_reg: Writable<Reg>, mem: AMode, ty: Type, flags: MemFlags) -> Inst {
208        match ty {
209            I8 => Inst::ULoad8 {
210                rd: into_reg,
211                mem,
212                flags,
213            },
214            I16 => Inst::ULoad16 {
215                rd: into_reg,
216                mem,
217                flags,
218            },
219            I32 => Inst::ULoad32 {
220                rd: into_reg,
221                mem,
222                flags,
223            },
224            I64 => Inst::ULoad64 {
225                rd: into_reg,
226                mem,
227                flags,
228            },
229            _ => {
230                if ty.is_vector() || ty.is_float() {
231                    let bits = ty_bits(ty);
232                    let rd = into_reg;
233
234                    match bits {
235                        128 => Inst::FpuLoad128 { rd, mem, flags },
236                        64 => Inst::FpuLoad64 { rd, mem, flags },
237                        32 => Inst::FpuLoad32 { rd, mem, flags },
238                        16 => Inst::FpuLoad16 { rd, mem, flags },
239                        _ => unimplemented!("gen_load({})", ty),
240                    }
241                } else {
242                    unimplemented!("gen_load({})", ty);
243                }
244            }
245        }
246    }
247
248    /// Generic constructor for a store.
249    pub fn gen_store(mem: AMode, from_reg: Reg, ty: Type, flags: MemFlags) -> Inst {
250        match ty {
251            I8 => Inst::Store8 {
252                rd: from_reg,
253                mem,
254                flags,
255            },
256            I16 => Inst::Store16 {
257                rd: from_reg,
258                mem,
259                flags,
260            },
261            I32 => Inst::Store32 {
262                rd: from_reg,
263                mem,
264                flags,
265            },
266            I64 => Inst::Store64 {
267                rd: from_reg,
268                mem,
269                flags,
270            },
271            _ => {
272                if ty.is_vector() || ty.is_float() {
273                    let bits = ty_bits(ty);
274                    let rd = from_reg;
275
276                    match bits {
277                        128 => Inst::FpuStore128 { rd, mem, flags },
278                        64 => Inst::FpuStore64 { rd, mem, flags },
279                        32 => Inst::FpuStore32 { rd, mem, flags },
280                        16 => Inst::FpuStore16 { rd, mem, flags },
281                        _ => unimplemented!("gen_store({})", ty),
282                    }
283                } else {
284                    unimplemented!("gen_store({})", ty);
285                }
286            }
287        }
288    }
289
290    /// What type does this load or store instruction access in memory? When
291    /// uimm12 encoding is used, the size of this type is the amount that
292    /// immediate offsets are scaled by.
293    pub fn mem_type(&self) -> Option<Type> {
294        match self {
295            Inst::ULoad8 { .. } => Some(I8),
296            Inst::SLoad8 { .. } => Some(I8),
297            Inst::ULoad16 { .. } => Some(I16),
298            Inst::SLoad16 { .. } => Some(I16),
299            Inst::ULoad32 { .. } => Some(I32),
300            Inst::SLoad32 { .. } => Some(I32),
301            Inst::ULoad64 { .. } => Some(I64),
302            Inst::FpuLoad16 { .. } => Some(F16),
303            Inst::FpuLoad32 { .. } => Some(F32),
304            Inst::FpuLoad64 { .. } => Some(F64),
305            Inst::FpuLoad128 { .. } => Some(I8X16),
306            Inst::Store8 { .. } => Some(I8),
307            Inst::Store16 { .. } => Some(I16),
308            Inst::Store32 { .. } => Some(I32),
309            Inst::Store64 { .. } => Some(I64),
310            Inst::FpuStore16 { .. } => Some(F16),
311            Inst::FpuStore32 { .. } => Some(F32),
312            Inst::FpuStore64 { .. } => Some(F64),
313            Inst::FpuStore128 { .. } => Some(I8X16),
314            _ => None,
315        }
316    }
317}
318
319//=============================================================================
320// Instructions: get_regs
321
322fn memarg_operands(memarg: &mut AMode, collector: &mut impl OperandVisitor) {
323    match memarg {
324        AMode::Unscaled { rn, .. } | AMode::UnsignedOffset { rn, .. } => {
325            collector.reg_use(rn);
326        }
327        AMode::RegReg { rn, rm, .. }
328        | AMode::RegScaled { rn, rm, .. }
329        | AMode::RegScaledExtended { rn, rm, .. }
330        | AMode::RegExtended { rn, rm, .. } => {
331            collector.reg_use(rn);
332            collector.reg_use(rm);
333        }
334        AMode::Label { .. } => {}
335        AMode::SPPreIndexed { .. } | AMode::SPPostIndexed { .. } => {}
336        AMode::FPOffset { .. } | AMode::IncomingArg { .. } => {}
337        AMode::SPOffset { .. } | AMode::SlotOffset { .. } => {}
338        AMode::RegOffset { rn, .. } => {
339            collector.reg_use(rn);
340        }
341        AMode::Const { .. } => {}
342    }
343}
344
345fn pairmemarg_operands(pairmemarg: &mut PairAMode, collector: &mut impl OperandVisitor) {
346    match pairmemarg {
347        PairAMode::SignedOffset { reg, .. } => {
348            collector.reg_use(reg);
349        }
350        PairAMode::SPPreIndexed { .. } | PairAMode::SPPostIndexed { .. } => {}
351    }
352}
353
354fn aarch64_get_operands(inst: &mut Inst, collector: &mut impl OperandVisitor) {
355    match inst {
356        Inst::AluRRR { rd, rn, rm, .. } => {
357            collector.reg_def(rd);
358            collector.reg_use(rn);
359            collector.reg_use(rm);
360        }
361        Inst::AluRRRR { rd, rn, rm, ra, .. } => {
362            collector.reg_def(rd);
363            collector.reg_use(rn);
364            collector.reg_use(rm);
365            collector.reg_use(ra);
366        }
367        Inst::AluRRImm12 { rd, rn, .. } => {
368            collector.reg_def(rd);
369            collector.reg_use(rn);
370        }
371        Inst::AluRRImmLogic { rd, rn, .. } => {
372            collector.reg_def(rd);
373            collector.reg_use(rn);
374        }
375        Inst::AluRRImmShift { rd, rn, .. } => {
376            collector.reg_def(rd);
377            collector.reg_use(rn);
378        }
379        Inst::AluRRRShift { rd, rn, rm, .. } => {
380            collector.reg_def(rd);
381            collector.reg_use(rn);
382            collector.reg_use(rm);
383        }
384        Inst::AluRRRExtend { rd, rn, rm, .. } => {
385            collector.reg_def(rd);
386            collector.reg_use(rn);
387            collector.reg_use(rm);
388        }
389        Inst::BitRR { rd, rn, .. } => {
390            collector.reg_def(rd);
391            collector.reg_use(rn);
392        }
393        Inst::ULoad8 { rd, mem, .. }
394        | Inst::SLoad8 { rd, mem, .. }
395        | Inst::ULoad16 { rd, mem, .. }
396        | Inst::SLoad16 { rd, mem, .. }
397        | Inst::ULoad32 { rd, mem, .. }
398        | Inst::SLoad32 { rd, mem, .. }
399        | Inst::ULoad64 { rd, mem, .. } => {
400            collector.reg_def(rd);
401            memarg_operands(mem, collector);
402        }
403        Inst::Store8 { rd, mem, .. }
404        | Inst::Store16 { rd, mem, .. }
405        | Inst::Store32 { rd, mem, .. }
406        | Inst::Store64 { rd, mem, .. } => {
407            collector.reg_use(rd);
408            memarg_operands(mem, collector);
409        }
410        Inst::StoreP64 { rt, rt2, mem, .. } => {
411            collector.reg_use(rt);
412            collector.reg_use(rt2);
413            pairmemarg_operands(mem, collector);
414        }
415        Inst::LoadP64 { rt, rt2, mem, .. } => {
416            collector.reg_def(rt);
417            collector.reg_def(rt2);
418            pairmemarg_operands(mem, collector);
419        }
420        Inst::Mov { rd, rm, .. } => {
421            collector.reg_def(rd);
422            collector.reg_use(rm);
423        }
424        Inst::MovFromPReg { rd, rm } => {
425            debug_assert!(rd.to_reg().is_virtual());
426            collector.reg_def(rd);
427            collector.reg_fixed_nonallocatable(*rm);
428        }
429        Inst::MovToPReg { rd, rm } => {
430            debug_assert!(rm.is_virtual());
431            collector.reg_fixed_nonallocatable(*rd);
432            collector.reg_use(rm);
433        }
434        Inst::MovK { rd, rn, .. } => {
435            collector.reg_use(rn);
436            collector.reg_reuse_def(rd, 0); // `rn` == `rd`.
437        }
438        Inst::MovWide { rd, .. } => {
439            collector.reg_def(rd);
440        }
441        Inst::CSel { rd, rn, rm, .. } => {
442            collector.reg_def(rd);
443            collector.reg_use(rn);
444            collector.reg_use(rm);
445        }
446        Inst::CSNeg { rd, rn, rm, .. } => {
447            collector.reg_def(rd);
448            collector.reg_use(rn);
449            collector.reg_use(rm);
450        }
451        Inst::CSet { rd, .. } | Inst::CSetm { rd, .. } => {
452            collector.reg_def(rd);
453        }
454        Inst::CCmp { rn, rm, .. } => {
455            collector.reg_use(rn);
456            collector.reg_use(rm);
457        }
458        Inst::CCmpImm { rn, .. } => {
459            collector.reg_use(rn);
460        }
461        Inst::AtomicRMWLoop {
462            op,
463            addr,
464            operand,
465            oldval,
466            scratch1,
467            scratch2,
468            ..
469        } => {
470            collector.reg_fixed_use(addr, xreg(25));
471            collector.reg_fixed_use(operand, xreg(26));
472            collector.reg_fixed_def(oldval, xreg(27));
473            collector.reg_fixed_def(scratch1, xreg(24));
474            if *op != AtomicRMWLoopOp::Xchg {
475                collector.reg_fixed_def(scratch2, xreg(28));
476            }
477        }
478        Inst::AtomicRMW { rs, rt, rn, .. } => {
479            collector.reg_use(rs);
480            collector.reg_def(rt);
481            collector.reg_use(rn);
482        }
483        Inst::AtomicCAS { rd, rs, rt, rn, .. } => {
484            collector.reg_reuse_def(rd, 1); // reuse `rs`.
485            collector.reg_use(rs);
486            collector.reg_use(rt);
487            collector.reg_use(rn);
488        }
489        Inst::AtomicCASLoop {
490            addr,
491            expected,
492            replacement,
493            oldval,
494            scratch,
495            ..
496        } => {
497            collector.reg_fixed_use(addr, xreg(25));
498            collector.reg_fixed_use(expected, xreg(26));
499            collector.reg_fixed_use(replacement, xreg(28));
500            collector.reg_fixed_def(oldval, xreg(27));
501            collector.reg_fixed_def(scratch, xreg(24));
502        }
503        Inst::LoadAcquire { rt, rn, .. } => {
504            collector.reg_use(rn);
505            collector.reg_def(rt);
506        }
507        Inst::StoreRelease { rt, rn, .. } => {
508            collector.reg_use(rn);
509            collector.reg_use(rt);
510        }
511        Inst::Fence {} | Inst::Csdb {} => {}
512        Inst::FpuMove32 { rd, rn } => {
513            collector.reg_def(rd);
514            collector.reg_use(rn);
515        }
516        Inst::FpuMove64 { rd, rn } => {
517            collector.reg_def(rd);
518            collector.reg_use(rn);
519        }
520        Inst::FpuMove128 { rd, rn } => {
521            collector.reg_def(rd);
522            collector.reg_use(rn);
523        }
524        Inst::FpuMoveFromVec { rd, rn, .. } => {
525            collector.reg_def(rd);
526            collector.reg_use(rn);
527        }
528        Inst::FpuExtend { rd, rn, .. } => {
529            collector.reg_def(rd);
530            collector.reg_use(rn);
531        }
532        Inst::FpuRR { rd, rn, .. } => {
533            collector.reg_def(rd);
534            collector.reg_use(rn);
535        }
536        Inst::FpuRRR { rd, rn, rm, .. } => {
537            collector.reg_def(rd);
538            collector.reg_use(rn);
539            collector.reg_use(rm);
540        }
541        Inst::FpuRRI { rd, rn, .. } => {
542            collector.reg_def(rd);
543            collector.reg_use(rn);
544        }
545        Inst::FpuRRIMod { rd, ri, rn, .. } => {
546            collector.reg_reuse_def(rd, 1); // reuse `ri`.
547            collector.reg_use(ri);
548            collector.reg_use(rn);
549        }
550        Inst::FpuRRRR { rd, rn, rm, ra, .. } => {
551            collector.reg_def(rd);
552            collector.reg_use(rn);
553            collector.reg_use(rm);
554            collector.reg_use(ra);
555        }
556        Inst::VecMisc { rd, rn, .. } => {
557            collector.reg_def(rd);
558            collector.reg_use(rn);
559        }
560
561        Inst::VecLanes { rd, rn, .. } => {
562            collector.reg_def(rd);
563            collector.reg_use(rn);
564        }
565        Inst::VecShiftImm { rd, rn, .. } => {
566            collector.reg_def(rd);
567            collector.reg_use(rn);
568        }
569        Inst::VecShiftImmMod { rd, ri, rn, .. } => {
570            collector.reg_reuse_def(rd, 1); // `rd` == `ri`.
571            collector.reg_use(ri);
572            collector.reg_use(rn);
573        }
574        Inst::VecExtract { rd, rn, rm, .. } => {
575            collector.reg_def(rd);
576            collector.reg_use(rn);
577            collector.reg_use(rm);
578        }
579        Inst::VecTbl { rd, rn, rm } => {
580            collector.reg_use(rn);
581            collector.reg_use(rm);
582            collector.reg_def(rd);
583        }
584        Inst::VecTblExt { rd, ri, rn, rm } => {
585            collector.reg_use(rn);
586            collector.reg_use(rm);
587            collector.reg_reuse_def(rd, 3); // `rd` == `ri`.
588            collector.reg_use(ri);
589        }
590
591        Inst::VecTbl2 { rd, rn, rn2, rm } => {
592            // Constrain to v30 / v31 so that we satisfy the "adjacent
593            // registers" constraint without use of pinned vregs in
594            // lowering.
595            collector.reg_fixed_use(rn, vreg(30));
596            collector.reg_fixed_use(rn2, vreg(31));
597            collector.reg_use(rm);
598            collector.reg_def(rd);
599        }
600        Inst::VecTbl2Ext {
601            rd,
602            ri,
603            rn,
604            rn2,
605            rm,
606        } => {
607            // Constrain to v30 / v31 so that we satisfy the "adjacent
608            // registers" constraint without use of pinned vregs in
609            // lowering.
610            collector.reg_fixed_use(rn, vreg(30));
611            collector.reg_fixed_use(rn2, vreg(31));
612            collector.reg_use(rm);
613            collector.reg_reuse_def(rd, 4); // `rd` == `ri`.
614            collector.reg_use(ri);
615        }
616        Inst::VecLoadReplicate { rd, rn, .. } => {
617            collector.reg_def(rd);
618            collector.reg_use(rn);
619        }
620        Inst::VecCSel { rd, rn, rm, .. } => {
621            collector.reg_def(rd);
622            collector.reg_use(rn);
623            collector.reg_use(rm);
624        }
625        Inst::FpuCmp { rn, rm, .. } => {
626            collector.reg_use(rn);
627            collector.reg_use(rm);
628        }
629        Inst::FpuLoad16 { rd, mem, .. } => {
630            collector.reg_def(rd);
631            memarg_operands(mem, collector);
632        }
633        Inst::FpuLoad32 { rd, mem, .. } => {
634            collector.reg_def(rd);
635            memarg_operands(mem, collector);
636        }
637        Inst::FpuLoad64 { rd, mem, .. } => {
638            collector.reg_def(rd);
639            memarg_operands(mem, collector);
640        }
641        Inst::FpuLoad128 { rd, mem, .. } => {
642            collector.reg_def(rd);
643            memarg_operands(mem, collector);
644        }
645        Inst::FpuStore16 { rd, mem, .. } => {
646            collector.reg_use(rd);
647            memarg_operands(mem, collector);
648        }
649        Inst::FpuStore32 { rd, mem, .. } => {
650            collector.reg_use(rd);
651            memarg_operands(mem, collector);
652        }
653        Inst::FpuStore64 { rd, mem, .. } => {
654            collector.reg_use(rd);
655            memarg_operands(mem, collector);
656        }
657        Inst::FpuStore128 { rd, mem, .. } => {
658            collector.reg_use(rd);
659            memarg_operands(mem, collector);
660        }
661        Inst::FpuLoadP64 { rt, rt2, mem, .. } => {
662            collector.reg_def(rt);
663            collector.reg_def(rt2);
664            pairmemarg_operands(mem, collector);
665        }
666        Inst::FpuStoreP64 { rt, rt2, mem, .. } => {
667            collector.reg_use(rt);
668            collector.reg_use(rt2);
669            pairmemarg_operands(mem, collector);
670        }
671        Inst::FpuLoadP128 { rt, rt2, mem, .. } => {
672            collector.reg_def(rt);
673            collector.reg_def(rt2);
674            pairmemarg_operands(mem, collector);
675        }
676        Inst::FpuStoreP128 { rt, rt2, mem, .. } => {
677            collector.reg_use(rt);
678            collector.reg_use(rt2);
679            pairmemarg_operands(mem, collector);
680        }
681        Inst::FpuToInt { rd, rn, .. } => {
682            collector.reg_def(rd);
683            collector.reg_use(rn);
684        }
685        Inst::IntToFpu { rd, rn, .. } => {
686            collector.reg_def(rd);
687            collector.reg_use(rn);
688        }
689        Inst::FpuCSel16 { rd, rn, rm, .. }
690        | Inst::FpuCSel32 { rd, rn, rm, .. }
691        | Inst::FpuCSel64 { rd, rn, rm, .. } => {
692            collector.reg_def(rd);
693            collector.reg_use(rn);
694            collector.reg_use(rm);
695        }
696        Inst::FpuRound { rd, rn, .. } => {
697            collector.reg_def(rd);
698            collector.reg_use(rn);
699        }
700        Inst::MovToFpu { rd, rn, .. } => {
701            collector.reg_def(rd);
702            collector.reg_use(rn);
703        }
704        Inst::FpuMoveFPImm { rd, .. } => {
705            collector.reg_def(rd);
706        }
707        Inst::MovToVec { rd, ri, rn, .. } => {
708            collector.reg_reuse_def(rd, 1); // `rd` == `ri`.
709            collector.reg_use(ri);
710            collector.reg_use(rn);
711        }
712        Inst::MovFromVec { rd, rn, .. } | Inst::MovFromVecSigned { rd, rn, .. } => {
713            collector.reg_def(rd);
714            collector.reg_use(rn);
715        }
716        Inst::VecDup { rd, rn, .. } => {
717            collector.reg_def(rd);
718            collector.reg_use(rn);
719        }
720        Inst::VecDupFromFpu { rd, rn, .. } => {
721            collector.reg_def(rd);
722            collector.reg_use(rn);
723        }
724        Inst::VecDupFPImm { rd, .. } => {
725            collector.reg_def(rd);
726        }
727        Inst::VecDupImm { rd, .. } => {
728            collector.reg_def(rd);
729        }
730        Inst::VecExtend { rd, rn, .. } => {
731            collector.reg_def(rd);
732            collector.reg_use(rn);
733        }
734        Inst::VecMovElement { rd, ri, rn, .. } => {
735            collector.reg_reuse_def(rd, 1); // `rd` == `ri`.
736            collector.reg_use(ri);
737            collector.reg_use(rn);
738        }
739        Inst::VecRRLong { rd, rn, .. } => {
740            collector.reg_def(rd);
741            collector.reg_use(rn);
742        }
743        Inst::VecRRNarrowLow { rd, rn, .. } => {
744            collector.reg_use(rn);
745            collector.reg_def(rd);
746        }
747        Inst::VecRRNarrowHigh { rd, ri, rn, .. } => {
748            collector.reg_use(rn);
749            collector.reg_reuse_def(rd, 2); // `rd` == `ri`.
750            collector.reg_use(ri);
751        }
752        Inst::VecRRPair { rd, rn, .. } => {
753            collector.reg_def(rd);
754            collector.reg_use(rn);
755        }
756        Inst::VecRRRLong { rd, rn, rm, .. } => {
757            collector.reg_def(rd);
758            collector.reg_use(rn);
759            collector.reg_use(rm);
760        }
761        Inst::VecRRRLongMod { rd, ri, rn, rm, .. } => {
762            collector.reg_reuse_def(rd, 1); // `rd` == `ri`.
763            collector.reg_use(ri);
764            collector.reg_use(rn);
765            collector.reg_use(rm);
766        }
767        Inst::VecRRPairLong { rd, rn, .. } => {
768            collector.reg_def(rd);
769            collector.reg_use(rn);
770        }
771        Inst::VecRRR { rd, rn, rm, .. } => {
772            collector.reg_def(rd);
773            collector.reg_use(rn);
774            collector.reg_use(rm);
775        }
776        Inst::VecRRRMod { rd, ri, rn, rm, .. } | Inst::VecFmlaElem { rd, ri, rn, rm, .. } => {
777            collector.reg_reuse_def(rd, 1); // `rd` == `ri`.
778            collector.reg_use(ri);
779            collector.reg_use(rn);
780            collector.reg_use(rm);
781        }
782        Inst::MovToNZCV { rn } => {
783            collector.reg_use(rn);
784        }
785        Inst::MovFromNZCV { rd } => {
786            collector.reg_def(rd);
787        }
788        Inst::Extend { rd, rn, .. } => {
789            collector.reg_def(rd);
790            collector.reg_use(rn);
791        }
792        Inst::Args { args } => {
793            for ArgPair { vreg, preg } in args {
794                collector.reg_fixed_def(vreg, *preg);
795            }
796        }
797        Inst::Rets { rets } => {
798            for RetPair { vreg, preg } in rets {
799                collector.reg_fixed_use(vreg, *preg);
800            }
801        }
802        Inst::Ret { .. } | Inst::AuthenticatedRet { .. } => {}
803        Inst::Jump { .. } => {}
804        Inst::Call { info, .. } => {
805            let CallInfo { uses, defs, .. } = &mut **info;
806            for CallArgPair { vreg, preg } in uses {
807                collector.reg_fixed_use(vreg, *preg);
808            }
809            for CallRetPair { vreg, location } in defs {
810                match location {
811                    RetLocation::Reg(preg, ..) => collector.reg_fixed_def(vreg, *preg),
812                    RetLocation::Stack(..) => collector.any_def(vreg),
813                }
814            }
815            collector.reg_clobbers(info.clobbers);
816        }
817        Inst::CallInd { info, .. } => {
818            let CallInfo {
819                dest, uses, defs, ..
820            } = &mut **info;
821            collector.reg_use(dest);
822            for CallArgPair { vreg, preg } in uses {
823                collector.reg_fixed_use(vreg, *preg);
824            }
825            for CallRetPair { vreg, location } in defs {
826                match location {
827                    RetLocation::Reg(preg, ..) => collector.reg_fixed_def(vreg, *preg),
828                    RetLocation::Stack(..) => collector.any_def(vreg),
829                }
830            }
831            collector.reg_clobbers(info.clobbers);
832        }
833        Inst::ReturnCall { info } => {
834            for CallArgPair { vreg, preg } in &mut info.uses {
835                collector.reg_fixed_use(vreg, *preg);
836            }
837        }
838        Inst::ReturnCallInd { info } => {
839            // TODO(https://github.com/bytecodealliance/regalloc2/issues/145):
840            // This shouldn't be a fixed register constraint, but it's not clear how to pick a
841            // register that won't be clobbered by the callee-save restore code emitted with a
842            // return_call_indirect.
843            collector.reg_fixed_use(&mut info.dest, xreg(1));
844            for CallArgPair { vreg, preg } in &mut info.uses {
845                collector.reg_fixed_use(vreg, *preg);
846            }
847        }
848        Inst::CondBr { kind, .. } => match kind {
849            CondBrKind::Zero(rt, _) | CondBrKind::NotZero(rt, _) => collector.reg_use(rt),
850            CondBrKind::Cond(_) => {}
851        },
852        Inst::TestBitAndBranch { rn, .. } => {
853            collector.reg_use(rn);
854        }
855        Inst::IndirectBr { rn, .. } => {
856            collector.reg_use(rn);
857        }
858        Inst::Nop0 | Inst::Nop4 => {}
859        Inst::Brk => {}
860        Inst::Udf { .. } => {}
861        Inst::TrapIf { kind, .. } => match kind {
862            CondBrKind::Zero(rt, _) | CondBrKind::NotZero(rt, _) => collector.reg_use(rt),
863            CondBrKind::Cond(_) => {}
864        },
865        Inst::Adr { rd, .. } | Inst::Adrp { rd, .. } => {
866            collector.reg_def(rd);
867        }
868        Inst::Word4 { .. } | Inst::Word8 { .. } => {}
869        Inst::JTSequence {
870            ridx, rtmp1, rtmp2, ..
871        } => {
872            collector.reg_use(ridx);
873            collector.reg_early_def(rtmp1);
874            collector.reg_early_def(rtmp2);
875        }
876        Inst::LoadExtName { rd, .. } => {
877            collector.reg_def(rd);
878        }
879        Inst::LoadAddr { rd, mem } => {
880            collector.reg_def(rd);
881            memarg_operands(mem, collector);
882        }
883        Inst::Paci { .. } | Inst::Xpaclri => {
884            // Neither LR nor SP is an allocatable register, so there is no need
885            // to do anything.
886        }
887        Inst::Bti { .. } => {}
888
889        Inst::ElfTlsGetAddr { rd, tmp, .. } => {
890            // TLSDESC has a very neat calling convention. It is required to preserve
891            // all registers except x0 and x30. X30 is non allocatable in cranelift since
892            // its the link register.
893            //
894            // Additionally we need a second register as a temporary register for the
895            // TLSDESC sequence. This register can be any register other than x0 (and x30).
896            collector.reg_fixed_def(rd, regs::xreg(0));
897            collector.reg_early_def(tmp);
898        }
899        Inst::MachOTlsGetAddr { rd, .. } => {
900            collector.reg_fixed_def(rd, regs::xreg(0));
901            let mut clobbers =
902                AArch64MachineDeps::get_regs_clobbered_by_call(CallConv::AppleAarch64, false);
903            clobbers.remove(regs::xreg_preg(0));
904            collector.reg_clobbers(clobbers);
905        }
906        Inst::Unwind { .. } => {}
907        Inst::EmitIsland { .. } => {}
908        Inst::DummyUse { reg } => {
909            collector.reg_use(reg);
910        }
911        Inst::StackProbeLoop { start, end, .. } => {
912            collector.reg_early_def(start);
913            collector.reg_use(end);
914        }
915    }
916}
917
918//=============================================================================
919// Instructions: misc functions and external interface
920
921impl MachInst for Inst {
922    type ABIMachineSpec = AArch64MachineDeps;
923    type LabelUse = LabelUse;
924
925    // "CLIF" in hex, to make the trap recognizable during
926    // debugging.
927    const TRAP_OPCODE: &'static [u8] = &0xc11f_u32.to_le_bytes();
928
929    fn get_operands(&mut self, collector: &mut impl OperandVisitor) {
930        aarch64_get_operands(self, collector);
931    }
932
933    fn is_move(&self) -> Option<(Writable<Reg>, Reg)> {
934        match self {
935            &Inst::Mov {
936                size: OperandSize::Size64,
937                rd,
938                rm,
939            } => Some((rd, rm)),
940            &Inst::FpuMove64 { rd, rn } => Some((rd, rn)),
941            &Inst::FpuMove128 { rd, rn } => Some((rd, rn)),
942            _ => None,
943        }
944    }
945
946    fn is_included_in_clobbers(&self) -> bool {
947        let (caller, callee, is_exception) = match self {
948            Inst::Args { .. } => return false,
949            Inst::Call { info } => (
950                info.caller_conv,
951                info.callee_conv,
952                info.try_call_info.is_some(),
953            ),
954            Inst::CallInd { info } => (
955                info.caller_conv,
956                info.callee_conv,
957                info.try_call_info.is_some(),
958            ),
959            _ => return true,
960        };
961
962        // We exclude call instructions from the clobber-set when they are calls
963        // from caller to callee that both clobber the same register (such as
964        // using the same or similar ABIs). Such calls cannot possibly force any
965        // new registers to be saved in the prologue, because anything that the
966        // callee clobbers, the caller is also allowed to clobber. This both
967        // saves work and enables us to more precisely follow the
968        // half-caller-save, half-callee-save SysV ABI for some vector
969        // registers.
970        //
971        // See the note in [crate::isa::aarch64::abi::is_caller_save_reg] for
972        // more information on this ABI-implementation hack.
973        let caller_clobbers = AArch64MachineDeps::get_regs_clobbered_by_call(caller, is_exception);
974        let callee_clobbers = AArch64MachineDeps::get_regs_clobbered_by_call(callee, is_exception);
975
976        let mut all_clobbers = caller_clobbers;
977        all_clobbers.union_from(callee_clobbers);
978        all_clobbers != caller_clobbers
979    }
980
981    fn is_trap(&self) -> bool {
982        match self {
983            Self::Udf { .. } => true,
984            _ => false,
985        }
986    }
987
988    fn is_args(&self) -> bool {
989        match self {
990            Self::Args { .. } => true,
991            _ => false,
992        }
993    }
994
995    fn is_term(&self) -> MachTerminator {
996        match self {
997            &Inst::Rets { .. } => MachTerminator::Ret,
998            &Inst::ReturnCall { .. } | &Inst::ReturnCallInd { .. } => MachTerminator::RetCall,
999            &Inst::Jump { .. } => MachTerminator::Branch,
1000            &Inst::CondBr { .. } => MachTerminator::Branch,
1001            &Inst::TestBitAndBranch { .. } => MachTerminator::Branch,
1002            &Inst::IndirectBr { .. } => MachTerminator::Branch,
1003            &Inst::JTSequence { .. } => MachTerminator::Branch,
1004            &Inst::Call { ref info } if info.try_call_info.is_some() => MachTerminator::Branch,
1005            &Inst::CallInd { ref info } if info.try_call_info.is_some() => MachTerminator::Branch,
1006            _ => MachTerminator::None,
1007        }
1008    }
1009
1010    fn is_mem_access(&self) -> bool {
1011        match self {
1012            &Inst::ULoad8 { .. }
1013            | &Inst::SLoad8 { .. }
1014            | &Inst::ULoad16 { .. }
1015            | &Inst::SLoad16 { .. }
1016            | &Inst::ULoad32 { .. }
1017            | &Inst::SLoad32 { .. }
1018            | &Inst::ULoad64 { .. }
1019            | &Inst::LoadP64 { .. }
1020            | &Inst::FpuLoad16 { .. }
1021            | &Inst::FpuLoad32 { .. }
1022            | &Inst::FpuLoad64 { .. }
1023            | &Inst::FpuLoad128 { .. }
1024            | &Inst::FpuLoadP64 { .. }
1025            | &Inst::FpuLoadP128 { .. }
1026            | &Inst::Store8 { .. }
1027            | &Inst::Store16 { .. }
1028            | &Inst::Store32 { .. }
1029            | &Inst::Store64 { .. }
1030            | &Inst::StoreP64 { .. }
1031            | &Inst::FpuStore16 { .. }
1032            | &Inst::FpuStore32 { .. }
1033            | &Inst::FpuStore64 { .. }
1034            | &Inst::FpuStore128 { .. } => true,
1035            // TODO: verify this carefully
1036            _ => false,
1037        }
1038    }
1039
1040    fn gen_move(to_reg: Writable<Reg>, from_reg: Reg, ty: Type) -> Inst {
1041        let bits = ty.bits();
1042
1043        assert!(bits <= 128);
1044        assert!(to_reg.to_reg().class() == from_reg.class());
1045        match from_reg.class() {
1046            RegClass::Int => Inst::Mov {
1047                size: OperandSize::Size64,
1048                rd: to_reg,
1049                rm: from_reg,
1050            },
1051            RegClass::Float => {
1052                if bits > 64 {
1053                    Inst::FpuMove128 {
1054                        rd: to_reg,
1055                        rn: from_reg,
1056                    }
1057                } else {
1058                    Inst::FpuMove64 {
1059                        rd: to_reg,
1060                        rn: from_reg,
1061                    }
1062                }
1063            }
1064            RegClass::Vector => unreachable!(),
1065        }
1066    }
1067
1068    fn is_safepoint(&self) -> bool {
1069        match self {
1070            Inst::Call { .. } | Inst::CallInd { .. } => true,
1071            _ => false,
1072        }
1073    }
1074
1075    fn gen_dummy_use(reg: Reg) -> Inst {
1076        Inst::DummyUse { reg }
1077    }
1078
1079    fn gen_nop(preferred_size: usize) -> Inst {
1080        if preferred_size == 0 {
1081            return Inst::Nop0;
1082        }
1083        // We can't give a NOP (or any insn) < 4 bytes.
1084        assert!(preferred_size >= 4);
1085        Inst::Nop4
1086    }
1087
1088    fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])> {
1089        match ty {
1090            I8 => Ok((&[RegClass::Int], &[I8])),
1091            I16 => Ok((&[RegClass::Int], &[I16])),
1092            I32 => Ok((&[RegClass::Int], &[I32])),
1093            I64 => Ok((&[RegClass::Int], &[I64])),
1094            F16 => Ok((&[RegClass::Float], &[F16])),
1095            F32 => Ok((&[RegClass::Float], &[F32])),
1096            F64 => Ok((&[RegClass::Float], &[F64])),
1097            F128 => Ok((&[RegClass::Float], &[F128])),
1098            I128 => Ok((&[RegClass::Int, RegClass::Int], &[I64, I64])),
1099            _ if ty.is_vector() && ty.bits() <= 128 => {
1100                let types = &[types::I8X2, types::I8X4, types::I8X8, types::I8X16];
1101                Ok((
1102                    &[RegClass::Float],
1103                    slice::from_ref(&types[ty.bytes().ilog2() as usize - 1]),
1104                ))
1105            }
1106            _ if ty.is_dynamic_vector() => Ok((&[RegClass::Float], &[I8X16])),
1107            _ => Err(CodegenError::Unsupported(format!(
1108                "Unexpected SSA-value type: {ty}"
1109            ))),
1110        }
1111    }
1112
1113    fn canonical_type_for_rc(rc: RegClass) -> Type {
1114        match rc {
1115            RegClass::Float => types::I8X16,
1116            RegClass::Int => types::I64,
1117            RegClass::Vector => unreachable!(),
1118        }
1119    }
1120
1121    fn gen_jump(target: MachLabel) -> Inst {
1122        Inst::Jump {
1123            dest: BranchTarget::Label(target),
1124        }
1125    }
1126
1127    fn worst_case_size() -> CodeOffset {
1128        // The maximum size, in bytes, of any `Inst`'s emitted code. We have at least one case of
1129        // an 8-instruction sequence (saturating int-to-float conversions) with three embedded
1130        // 64-bit f64 constants.
1131        //
1132        // Note that inline jump-tables handle island/pool insertion separately, so we do not need
1133        // to account for them here (otherwise the worst case would be 2^31 * 4, clearly not
1134        // feasible for other reasons).
1135        44
1136    }
1137
1138    fn ref_type_regclass(_: &settings::Flags) -> RegClass {
1139        RegClass::Int
1140    }
1141
1142    fn gen_block_start(
1143        is_indirect_branch_target: bool,
1144        is_forward_edge_cfi_enabled: bool,
1145    ) -> Option<Self> {
1146        if is_indirect_branch_target && is_forward_edge_cfi_enabled {
1147            Some(Inst::Bti {
1148                targets: BranchTargetType::J,
1149            })
1150        } else {
1151            None
1152        }
1153    }
1154
1155    fn function_alignment() -> FunctionAlignment {
1156        // We use 32-byte alignment for performance reasons, but for correctness
1157        // we would only need 4-byte alignment.
1158        FunctionAlignment {
1159            minimum: 4,
1160            preferred: 32,
1161        }
1162    }
1163}
1164
1165//=============================================================================
1166// Pretty-printing of instructions.
1167
1168fn mem_finalize_for_show(mem: &AMode, access_ty: Type, state: &EmitState) -> (String, String) {
1169    let (mem_insts, mem) = mem_finalize(None, mem, access_ty, state);
1170    let mut mem_str = mem_insts
1171        .into_iter()
1172        .map(|inst| inst.print_with_state(&mut EmitState::default()))
1173        .collect::<Vec<_>>()
1174        .join(" ; ");
1175    if !mem_str.is_empty() {
1176        mem_str += " ; ";
1177    }
1178
1179    let mem = mem.pretty_print(access_ty.bytes() as u8);
1180    (mem_str, mem)
1181}
1182
1183fn pretty_print_try_call(info: &TryCallInfo) -> String {
1184    let dests = info
1185        .exception_dests
1186        .iter()
1187        .map(|(tag, label)| format!("{tag:?}: {label:?}"))
1188        .collect::<Vec<_>>()
1189        .join(", ");
1190    format!("; b {:?}; catch [{dests}]", info.continuation)
1191}
1192
1193impl Inst {
1194    fn print_with_state(&self, state: &mut EmitState) -> String {
1195        fn op_name(alu_op: ALUOp) -> &'static str {
1196            match alu_op {
1197                ALUOp::Add => "add",
1198                ALUOp::Sub => "sub",
1199                ALUOp::Orr => "orr",
1200                ALUOp::And => "and",
1201                ALUOp::AndS => "ands",
1202                ALUOp::Eor => "eor",
1203                ALUOp::AddS => "adds",
1204                ALUOp::SubS => "subs",
1205                ALUOp::SMulH => "smulh",
1206                ALUOp::UMulH => "umulh",
1207                ALUOp::SDiv => "sdiv",
1208                ALUOp::UDiv => "udiv",
1209                ALUOp::AndNot => "bic",
1210                ALUOp::OrrNot => "orn",
1211                ALUOp::EorNot => "eon",
1212                ALUOp::Extr => "extr",
1213                ALUOp::Lsr => "lsr",
1214                ALUOp::Asr => "asr",
1215                ALUOp::Lsl => "lsl",
1216                ALUOp::Adc => "adc",
1217                ALUOp::AdcS => "adcs",
1218                ALUOp::Sbc => "sbc",
1219                ALUOp::SbcS => "sbcs",
1220            }
1221        }
1222
1223        match self {
1224            &Inst::Nop0 => "nop-zero-len".to_string(),
1225            &Inst::Nop4 => "nop".to_string(),
1226            &Inst::AluRRR {
1227                alu_op,
1228                size,
1229                rd,
1230                rn,
1231                rm,
1232            } => {
1233                let op = op_name(alu_op);
1234                let rd = pretty_print_ireg(rd.to_reg(), size);
1235                let rn = pretty_print_ireg(rn, size);
1236                let rm = pretty_print_ireg(rm, size);
1237                format!("{op} {rd}, {rn}, {rm}")
1238            }
1239            &Inst::AluRRRR {
1240                alu_op,
1241                size,
1242                rd,
1243                rn,
1244                rm,
1245                ra,
1246            } => {
1247                let (op, da_size) = match alu_op {
1248                    ALUOp3::MAdd => ("madd", size),
1249                    ALUOp3::MSub => ("msub", size),
1250                    ALUOp3::UMAddL => ("umaddl", OperandSize::Size64),
1251                    ALUOp3::SMAddL => ("smaddl", OperandSize::Size64),
1252                };
1253                let rd = pretty_print_ireg(rd.to_reg(), da_size);
1254                let rn = pretty_print_ireg(rn, size);
1255                let rm = pretty_print_ireg(rm, size);
1256                let ra = pretty_print_ireg(ra, da_size);
1257
1258                format!("{op} {rd}, {rn}, {rm}, {ra}")
1259            }
1260            &Inst::AluRRImm12 {
1261                alu_op,
1262                size,
1263                rd,
1264                rn,
1265                ref imm12,
1266            } => {
1267                let op = op_name(alu_op);
1268                let rd = pretty_print_ireg(rd.to_reg(), size);
1269                let rn = pretty_print_ireg(rn, size);
1270
1271                if imm12.bits == 0 && alu_op == ALUOp::Add && size.is64() {
1272                    // special-case MOV (used for moving into SP).
1273                    format!("mov {rd}, {rn}")
1274                } else {
1275                    let imm12 = imm12.pretty_print(0);
1276                    format!("{op} {rd}, {rn}, {imm12}")
1277                }
1278            }
1279            &Inst::AluRRImmLogic {
1280                alu_op,
1281                size,
1282                rd,
1283                rn,
1284                ref imml,
1285            } => {
1286                let op = op_name(alu_op);
1287                let rd = pretty_print_ireg(rd.to_reg(), size);
1288                let rn = pretty_print_ireg(rn, size);
1289                let imml = imml.pretty_print(0);
1290                format!("{op} {rd}, {rn}, {imml}")
1291            }
1292            &Inst::AluRRImmShift {
1293                alu_op,
1294                size,
1295                rd,
1296                rn,
1297                ref immshift,
1298            } => {
1299                let op = op_name(alu_op);
1300                let rd = pretty_print_ireg(rd.to_reg(), size);
1301                let rn = pretty_print_ireg(rn, size);
1302                let immshift = immshift.pretty_print(0);
1303                format!("{op} {rd}, {rn}, {immshift}")
1304            }
1305            &Inst::AluRRRShift {
1306                alu_op,
1307                size,
1308                rd,
1309                rn,
1310                rm,
1311                ref shiftop,
1312            } => {
1313                let op = op_name(alu_op);
1314                let rd = pretty_print_ireg(rd.to_reg(), size);
1315                let rn = pretty_print_ireg(rn, size);
1316                let rm = pretty_print_ireg(rm, size);
1317                let shiftop = shiftop.pretty_print(0);
1318                format!("{op} {rd}, {rn}, {rm}, {shiftop}")
1319            }
1320            &Inst::AluRRRExtend {
1321                alu_op,
1322                size,
1323                rd,
1324                rn,
1325                rm,
1326                ref extendop,
1327            } => {
1328                let op = op_name(alu_op);
1329                let rd = pretty_print_ireg(rd.to_reg(), size);
1330                let rn = pretty_print_ireg(rn, size);
1331                let rm = pretty_print_ireg(rm, size);
1332                let extendop = extendop.pretty_print(0);
1333                format!("{op} {rd}, {rn}, {rm}, {extendop}")
1334            }
1335            &Inst::BitRR { op, size, rd, rn } => {
1336                let op = op.op_str();
1337                let rd = pretty_print_ireg(rd.to_reg(), size);
1338                let rn = pretty_print_ireg(rn, size);
1339                format!("{op} {rd}, {rn}")
1340            }
1341            &Inst::ULoad8 { rd, ref mem, .. }
1342            | &Inst::SLoad8 { rd, ref mem, .. }
1343            | &Inst::ULoad16 { rd, ref mem, .. }
1344            | &Inst::SLoad16 { rd, ref mem, .. }
1345            | &Inst::ULoad32 { rd, ref mem, .. }
1346            | &Inst::SLoad32 { rd, ref mem, .. }
1347            | &Inst::ULoad64 { rd, ref mem, .. } => {
1348                let is_unscaled = match &mem {
1349                    &AMode::Unscaled { .. } => true,
1350                    _ => false,
1351                };
1352                let (op, size) = match (self, is_unscaled) {
1353                    (&Inst::ULoad8 { .. }, false) => ("ldrb", OperandSize::Size32),
1354                    (&Inst::ULoad8 { .. }, true) => ("ldurb", OperandSize::Size32),
1355                    (&Inst::SLoad8 { .. }, false) => ("ldrsb", OperandSize::Size64),
1356                    (&Inst::SLoad8 { .. }, true) => ("ldursb", OperandSize::Size64),
1357                    (&Inst::ULoad16 { .. }, false) => ("ldrh", OperandSize::Size32),
1358                    (&Inst::ULoad16 { .. }, true) => ("ldurh", OperandSize::Size32),
1359                    (&Inst::SLoad16 { .. }, false) => ("ldrsh", OperandSize::Size64),
1360                    (&Inst::SLoad16 { .. }, true) => ("ldursh", OperandSize::Size64),
1361                    (&Inst::ULoad32 { .. }, false) => ("ldr", OperandSize::Size32),
1362                    (&Inst::ULoad32 { .. }, true) => ("ldur", OperandSize::Size32),
1363                    (&Inst::SLoad32 { .. }, false) => ("ldrsw", OperandSize::Size64),
1364                    (&Inst::SLoad32 { .. }, true) => ("ldursw", OperandSize::Size64),
1365                    (&Inst::ULoad64 { .. }, false) => ("ldr", OperandSize::Size64),
1366                    (&Inst::ULoad64 { .. }, true) => ("ldur", OperandSize::Size64),
1367                    _ => unreachable!(),
1368                };
1369
1370                let rd = pretty_print_ireg(rd.to_reg(), size);
1371                let mem = mem.clone();
1372                let access_ty = self.mem_type().unwrap();
1373                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1374
1375                format!("{mem_str}{op} {rd}, {mem}")
1376            }
1377            &Inst::Store8 { rd, ref mem, .. }
1378            | &Inst::Store16 { rd, ref mem, .. }
1379            | &Inst::Store32 { rd, ref mem, .. }
1380            | &Inst::Store64 { rd, ref mem, .. } => {
1381                let is_unscaled = match &mem {
1382                    &AMode::Unscaled { .. } => true,
1383                    _ => false,
1384                };
1385                let (op, size) = match (self, is_unscaled) {
1386                    (&Inst::Store8 { .. }, false) => ("strb", OperandSize::Size32),
1387                    (&Inst::Store8 { .. }, true) => ("sturb", OperandSize::Size32),
1388                    (&Inst::Store16 { .. }, false) => ("strh", OperandSize::Size32),
1389                    (&Inst::Store16 { .. }, true) => ("sturh", OperandSize::Size32),
1390                    (&Inst::Store32 { .. }, false) => ("str", OperandSize::Size32),
1391                    (&Inst::Store32 { .. }, true) => ("stur", OperandSize::Size32),
1392                    (&Inst::Store64 { .. }, false) => ("str", OperandSize::Size64),
1393                    (&Inst::Store64 { .. }, true) => ("stur", OperandSize::Size64),
1394                    _ => unreachable!(),
1395                };
1396
1397                let rd = pretty_print_ireg(rd, size);
1398                let mem = mem.clone();
1399                let access_ty = self.mem_type().unwrap();
1400                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1401
1402                format!("{mem_str}{op} {rd}, {mem}")
1403            }
1404            &Inst::StoreP64 {
1405                rt, rt2, ref mem, ..
1406            } => {
1407                let rt = pretty_print_ireg(rt, OperandSize::Size64);
1408                let rt2 = pretty_print_ireg(rt2, OperandSize::Size64);
1409                let mem = mem.clone();
1410                let mem = mem.pretty_print_default();
1411                format!("stp {rt}, {rt2}, {mem}")
1412            }
1413            &Inst::LoadP64 {
1414                rt, rt2, ref mem, ..
1415            } => {
1416                let rt = pretty_print_ireg(rt.to_reg(), OperandSize::Size64);
1417                let rt2 = pretty_print_ireg(rt2.to_reg(), OperandSize::Size64);
1418                let mem = mem.clone();
1419                let mem = mem.pretty_print_default();
1420                format!("ldp {rt}, {rt2}, {mem}")
1421            }
1422            &Inst::Mov { size, rd, rm } => {
1423                let rd = pretty_print_ireg(rd.to_reg(), size);
1424                let rm = pretty_print_ireg(rm, size);
1425                format!("mov {rd}, {rm}")
1426            }
1427            &Inst::MovFromPReg { rd, rm } => {
1428                let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64);
1429                let rm = show_ireg_sized(rm.into(), OperandSize::Size64);
1430                format!("mov {rd}, {rm}")
1431            }
1432            &Inst::MovToPReg { rd, rm } => {
1433                let rd = show_ireg_sized(rd.into(), OperandSize::Size64);
1434                let rm = pretty_print_ireg(rm, OperandSize::Size64);
1435                format!("mov {rd}, {rm}")
1436            }
1437            &Inst::MovWide {
1438                op,
1439                rd,
1440                ref imm,
1441                size,
1442            } => {
1443                let op_str = match op {
1444                    MoveWideOp::MovZ => "movz",
1445                    MoveWideOp::MovN => "movn",
1446                };
1447                let rd = pretty_print_ireg(rd.to_reg(), size);
1448                let imm = imm.pretty_print(0);
1449                format!("{op_str} {rd}, {imm}")
1450            }
1451            &Inst::MovK {
1452                rd,
1453                rn,
1454                ref imm,
1455                size,
1456            } => {
1457                let rn = pretty_print_ireg(rn, size);
1458                let rd = pretty_print_ireg(rd.to_reg(), size);
1459                let imm = imm.pretty_print(0);
1460                format!("movk {rd}, {rn}, {imm}")
1461            }
1462            &Inst::CSel { rd, rn, rm, cond } => {
1463                let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64);
1464                let rn = pretty_print_ireg(rn, OperandSize::Size64);
1465                let rm = pretty_print_ireg(rm, OperandSize::Size64);
1466                let cond = cond.pretty_print(0);
1467                format!("csel {rd}, {rn}, {rm}, {cond}")
1468            }
1469            &Inst::CSNeg { rd, rn, rm, cond } => {
1470                let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64);
1471                let rn = pretty_print_ireg(rn, OperandSize::Size64);
1472                let rm = pretty_print_ireg(rm, OperandSize::Size64);
1473                let cond = cond.pretty_print(0);
1474                format!("csneg {rd}, {rn}, {rm}, {cond}")
1475            }
1476            &Inst::CSet { rd, cond } => {
1477                let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64);
1478                let cond = cond.pretty_print(0);
1479                format!("cset {rd}, {cond}")
1480            }
1481            &Inst::CSetm { rd, cond } => {
1482                let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64);
1483                let cond = cond.pretty_print(0);
1484                format!("csetm {rd}, {cond}")
1485            }
1486            &Inst::CCmp {
1487                size,
1488                rn,
1489                rm,
1490                nzcv,
1491                cond,
1492            } => {
1493                let rn = pretty_print_ireg(rn, size);
1494                let rm = pretty_print_ireg(rm, size);
1495                let nzcv = nzcv.pretty_print(0);
1496                let cond = cond.pretty_print(0);
1497                format!("ccmp {rn}, {rm}, {nzcv}, {cond}")
1498            }
1499            &Inst::CCmpImm {
1500                size,
1501                rn,
1502                imm,
1503                nzcv,
1504                cond,
1505            } => {
1506                let rn = pretty_print_ireg(rn, size);
1507                let imm = imm.pretty_print(0);
1508                let nzcv = nzcv.pretty_print(0);
1509                let cond = cond.pretty_print(0);
1510                format!("ccmp {rn}, {imm}, {nzcv}, {cond}")
1511            }
1512            &Inst::AtomicRMW {
1513                rs, rt, rn, ty, op, ..
1514            } => {
1515                let op = match op {
1516                    AtomicRMWOp::Add => "ldaddal",
1517                    AtomicRMWOp::Clr => "ldclral",
1518                    AtomicRMWOp::Eor => "ldeoral",
1519                    AtomicRMWOp::Set => "ldsetal",
1520                    AtomicRMWOp::Smax => "ldsmaxal",
1521                    AtomicRMWOp::Umax => "ldumaxal",
1522                    AtomicRMWOp::Smin => "ldsminal",
1523                    AtomicRMWOp::Umin => "lduminal",
1524                    AtomicRMWOp::Swp => "swpal",
1525                };
1526
1527                let size = OperandSize::from_ty(ty);
1528                let rs = pretty_print_ireg(rs, size);
1529                let rt = pretty_print_ireg(rt.to_reg(), size);
1530                let rn = pretty_print_ireg(rn, OperandSize::Size64);
1531
1532                let ty_suffix = match ty {
1533                    I8 => "b",
1534                    I16 => "h",
1535                    _ => "",
1536                };
1537                format!("{op}{ty_suffix} {rs}, {rt}, [{rn}]")
1538            }
1539            &Inst::AtomicRMWLoop {
1540                ty,
1541                op,
1542                addr,
1543                operand,
1544                oldval,
1545                scratch1,
1546                scratch2,
1547                ..
1548            } => {
1549                let op = match op {
1550                    AtomicRMWLoopOp::Add => "add",
1551                    AtomicRMWLoopOp::Sub => "sub",
1552                    AtomicRMWLoopOp::Eor => "eor",
1553                    AtomicRMWLoopOp::Orr => "orr",
1554                    AtomicRMWLoopOp::And => "and",
1555                    AtomicRMWLoopOp::Nand => "nand",
1556                    AtomicRMWLoopOp::Smin => "smin",
1557                    AtomicRMWLoopOp::Smax => "smax",
1558                    AtomicRMWLoopOp::Umin => "umin",
1559                    AtomicRMWLoopOp::Umax => "umax",
1560                    AtomicRMWLoopOp::Xchg => "xchg",
1561                };
1562                let addr = pretty_print_ireg(addr, OperandSize::Size64);
1563                let operand = pretty_print_ireg(operand, OperandSize::Size64);
1564                let oldval = pretty_print_ireg(oldval.to_reg(), OperandSize::Size64);
1565                let scratch1 = pretty_print_ireg(scratch1.to_reg(), OperandSize::Size64);
1566                let scratch2 = pretty_print_ireg(scratch2.to_reg(), OperandSize::Size64);
1567                format!(
1568                    "atomic_rmw_loop_{}_{} addr={} operand={} oldval={} scratch1={} scratch2={}",
1569                    op,
1570                    ty.bits(),
1571                    addr,
1572                    operand,
1573                    oldval,
1574                    scratch1,
1575                    scratch2,
1576                )
1577            }
1578            &Inst::AtomicCAS {
1579                rd, rs, rt, rn, ty, ..
1580            } => {
1581                let op = match ty {
1582                    I8 => "casalb",
1583                    I16 => "casalh",
1584                    I32 | I64 => "casal",
1585                    _ => panic!("Unsupported type: {ty}"),
1586                };
1587                let size = OperandSize::from_ty(ty);
1588                let rd = pretty_print_ireg(rd.to_reg(), size);
1589                let rs = pretty_print_ireg(rs, size);
1590                let rt = pretty_print_ireg(rt, size);
1591                let rn = pretty_print_ireg(rn, OperandSize::Size64);
1592
1593                format!("{op} {rd}, {rs}, {rt}, [{rn}]")
1594            }
1595            &Inst::AtomicCASLoop {
1596                ty,
1597                addr,
1598                expected,
1599                replacement,
1600                oldval,
1601                scratch,
1602                ..
1603            } => {
1604                let addr = pretty_print_ireg(addr, OperandSize::Size64);
1605                let expected = pretty_print_ireg(expected, OperandSize::Size64);
1606                let replacement = pretty_print_ireg(replacement, OperandSize::Size64);
1607                let oldval = pretty_print_ireg(oldval.to_reg(), OperandSize::Size64);
1608                let scratch = pretty_print_ireg(scratch.to_reg(), OperandSize::Size64);
1609                format!(
1610                    "atomic_cas_loop_{} addr={}, expect={}, replacement={}, oldval={}, scratch={}",
1611                    ty.bits(),
1612                    addr,
1613                    expected,
1614                    replacement,
1615                    oldval,
1616                    scratch,
1617                )
1618            }
1619            &Inst::LoadAcquire {
1620                access_ty, rt, rn, ..
1621            } => {
1622                let (op, ty) = match access_ty {
1623                    I8 => ("ldarb", I32),
1624                    I16 => ("ldarh", I32),
1625                    I32 => ("ldar", I32),
1626                    I64 => ("ldar", I64),
1627                    _ => panic!("Unsupported type: {access_ty}"),
1628                };
1629                let size = OperandSize::from_ty(ty);
1630                let rn = pretty_print_ireg(rn, OperandSize::Size64);
1631                let rt = pretty_print_ireg(rt.to_reg(), size);
1632                format!("{op} {rt}, [{rn}]")
1633            }
1634            &Inst::StoreRelease {
1635                access_ty, rt, rn, ..
1636            } => {
1637                let (op, ty) = match access_ty {
1638                    I8 => ("stlrb", I32),
1639                    I16 => ("stlrh", I32),
1640                    I32 => ("stlr", I32),
1641                    I64 => ("stlr", I64),
1642                    _ => panic!("Unsupported type: {access_ty}"),
1643                };
1644                let size = OperandSize::from_ty(ty);
1645                let rn = pretty_print_ireg(rn, OperandSize::Size64);
1646                let rt = pretty_print_ireg(rt, size);
1647                format!("{op} {rt}, [{rn}]")
1648            }
1649            &Inst::Fence {} => {
1650                format!("dmb ish")
1651            }
1652            &Inst::Csdb {} => {
1653                format!("csdb")
1654            }
1655            &Inst::FpuMove32 { rd, rn } => {
1656                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size32);
1657                let rn = pretty_print_vreg_scalar(rn, ScalarSize::Size32);
1658                format!("fmov {rd}, {rn}")
1659            }
1660            &Inst::FpuMove64 { rd, rn } => {
1661                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64);
1662                let rn = pretty_print_vreg_scalar(rn, ScalarSize::Size64);
1663                format!("fmov {rd}, {rn}")
1664            }
1665            &Inst::FpuMove128 { rd, rn } => {
1666                let rd = pretty_print_reg(rd.to_reg());
1667                let rn = pretty_print_reg(rn);
1668                format!("mov {rd}.16b, {rn}.16b")
1669            }
1670            &Inst::FpuMoveFromVec { rd, rn, idx, size } => {
1671                let rd = pretty_print_vreg_scalar(rd.to_reg(), size.lane_size());
1672                let rn = pretty_print_vreg_element(rn, idx as usize, size.lane_size());
1673                format!("mov {rd}, {rn}")
1674            }
1675            &Inst::FpuExtend { rd, rn, size } => {
1676                let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1677                let rn = pretty_print_vreg_scalar(rn, size);
1678                format!("fmov {rd}, {rn}")
1679            }
1680            &Inst::FpuRR {
1681                fpu_op,
1682                size,
1683                rd,
1684                rn,
1685            } => {
1686                let op = match fpu_op {
1687                    FPUOp1::Abs => "fabs",
1688                    FPUOp1::Neg => "fneg",
1689                    FPUOp1::Sqrt => "fsqrt",
1690                    FPUOp1::Cvt32To64 | FPUOp1::Cvt64To32 => "fcvt",
1691                };
1692                let dst_size = match fpu_op {
1693                    FPUOp1::Cvt32To64 => ScalarSize::Size64,
1694                    FPUOp1::Cvt64To32 => ScalarSize::Size32,
1695                    _ => size,
1696                };
1697                let rd = pretty_print_vreg_scalar(rd.to_reg(), dst_size);
1698                let rn = pretty_print_vreg_scalar(rn, size);
1699                format!("{op} {rd}, {rn}")
1700            }
1701            &Inst::FpuRRR {
1702                fpu_op,
1703                size,
1704                rd,
1705                rn,
1706                rm,
1707            } => {
1708                let op = match fpu_op {
1709                    FPUOp2::Add => "fadd",
1710                    FPUOp2::Sub => "fsub",
1711                    FPUOp2::Mul => "fmul",
1712                    FPUOp2::Div => "fdiv",
1713                    FPUOp2::Max => "fmax",
1714                    FPUOp2::Min => "fmin",
1715                };
1716                let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1717                let rn = pretty_print_vreg_scalar(rn, size);
1718                let rm = pretty_print_vreg_scalar(rm, size);
1719                format!("{op} {rd}, {rn}, {rm}")
1720            }
1721            &Inst::FpuRRI { fpu_op, rd, rn } => {
1722                let (op, imm, vector) = match fpu_op {
1723                    FPUOpRI::UShr32(imm) => ("ushr", imm.pretty_print(0), true),
1724                    FPUOpRI::UShr64(imm) => ("ushr", imm.pretty_print(0), false),
1725                };
1726
1727                let (rd, rn) = if vector {
1728                    (
1729                        pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size32x2),
1730                        pretty_print_vreg_vector(rn, VectorSize::Size32x2),
1731                    )
1732                } else {
1733                    (
1734                        pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64),
1735                        pretty_print_vreg_scalar(rn, ScalarSize::Size64),
1736                    )
1737                };
1738                format!("{op} {rd}, {rn}, {imm}")
1739            }
1740            &Inst::FpuRRIMod { fpu_op, rd, ri, rn } => {
1741                let (op, imm, vector) = match fpu_op {
1742                    FPUOpRIMod::Sli32(imm) => ("sli", imm.pretty_print(0), true),
1743                    FPUOpRIMod::Sli64(imm) => ("sli", imm.pretty_print(0), false),
1744                };
1745
1746                let (rd, ri, rn) = if vector {
1747                    (
1748                        pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size32x2),
1749                        pretty_print_vreg_vector(ri, VectorSize::Size32x2),
1750                        pretty_print_vreg_vector(rn, VectorSize::Size32x2),
1751                    )
1752                } else {
1753                    (
1754                        pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64),
1755                        pretty_print_vreg_scalar(ri, ScalarSize::Size64),
1756                        pretty_print_vreg_scalar(rn, ScalarSize::Size64),
1757                    )
1758                };
1759                format!("{op} {rd}, {ri}, {rn}, {imm}")
1760            }
1761            &Inst::FpuRRRR {
1762                fpu_op,
1763                size,
1764                rd,
1765                rn,
1766                rm,
1767                ra,
1768            } => {
1769                let op = match fpu_op {
1770                    FPUOp3::MAdd => "fmadd",
1771                    FPUOp3::MSub => "fmsub",
1772                    FPUOp3::NMAdd => "fnmadd",
1773                    FPUOp3::NMSub => "fnmsub",
1774                };
1775                let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1776                let rn = pretty_print_vreg_scalar(rn, size);
1777                let rm = pretty_print_vreg_scalar(rm, size);
1778                let ra = pretty_print_vreg_scalar(ra, size);
1779                format!("{op} {rd}, {rn}, {rm}, {ra}")
1780            }
1781            &Inst::FpuCmp { size, rn, rm } => {
1782                let rn = pretty_print_vreg_scalar(rn, size);
1783                let rm = pretty_print_vreg_scalar(rm, size);
1784                format!("fcmp {rn}, {rm}")
1785            }
1786            &Inst::FpuLoad16 { rd, ref mem, .. } => {
1787                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size16);
1788                let mem = mem.clone();
1789                let access_ty = self.mem_type().unwrap();
1790                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1791                format!("{mem_str}ldr {rd}, {mem}")
1792            }
1793            &Inst::FpuLoad32 { rd, ref mem, .. } => {
1794                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size32);
1795                let mem = mem.clone();
1796                let access_ty = self.mem_type().unwrap();
1797                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1798                format!("{mem_str}ldr {rd}, {mem}")
1799            }
1800            &Inst::FpuLoad64 { rd, ref mem, .. } => {
1801                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64);
1802                let mem = mem.clone();
1803                let access_ty = self.mem_type().unwrap();
1804                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1805                format!("{mem_str}ldr {rd}, {mem}")
1806            }
1807            &Inst::FpuLoad128 { rd, ref mem, .. } => {
1808                let rd = pretty_print_reg(rd.to_reg());
1809                let rd = "q".to_string() + &rd[1..];
1810                let mem = mem.clone();
1811                let access_ty = self.mem_type().unwrap();
1812                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1813                format!("{mem_str}ldr {rd}, {mem}")
1814            }
1815            &Inst::FpuStore16 { rd, ref mem, .. } => {
1816                let rd = pretty_print_vreg_scalar(rd, ScalarSize::Size16);
1817                let mem = mem.clone();
1818                let access_ty = self.mem_type().unwrap();
1819                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1820                format!("{mem_str}str {rd}, {mem}")
1821            }
1822            &Inst::FpuStore32 { rd, ref mem, .. } => {
1823                let rd = pretty_print_vreg_scalar(rd, ScalarSize::Size32);
1824                let mem = mem.clone();
1825                let access_ty = self.mem_type().unwrap();
1826                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1827                format!("{mem_str}str {rd}, {mem}")
1828            }
1829            &Inst::FpuStore64 { rd, ref mem, .. } => {
1830                let rd = pretty_print_vreg_scalar(rd, ScalarSize::Size64);
1831                let mem = mem.clone();
1832                let access_ty = self.mem_type().unwrap();
1833                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1834                format!("{mem_str}str {rd}, {mem}")
1835            }
1836            &Inst::FpuStore128 { rd, ref mem, .. } => {
1837                let rd = pretty_print_reg(rd);
1838                let rd = "q".to_string() + &rd[1..];
1839                let mem = mem.clone();
1840                let access_ty = self.mem_type().unwrap();
1841                let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1842                format!("{mem_str}str {rd}, {mem}")
1843            }
1844            &Inst::FpuLoadP64 {
1845                rt, rt2, ref mem, ..
1846            } => {
1847                let rt = pretty_print_vreg_scalar(rt.to_reg(), ScalarSize::Size64);
1848                let rt2 = pretty_print_vreg_scalar(rt2.to_reg(), ScalarSize::Size64);
1849                let mem = mem.clone();
1850                let mem = mem.pretty_print_default();
1851
1852                format!("ldp {rt}, {rt2}, {mem}")
1853            }
1854            &Inst::FpuStoreP64 {
1855                rt, rt2, ref mem, ..
1856            } => {
1857                let rt = pretty_print_vreg_scalar(rt, ScalarSize::Size64);
1858                let rt2 = pretty_print_vreg_scalar(rt2, ScalarSize::Size64);
1859                let mem = mem.clone();
1860                let mem = mem.pretty_print_default();
1861
1862                format!("stp {rt}, {rt2}, {mem}")
1863            }
1864            &Inst::FpuLoadP128 {
1865                rt, rt2, ref mem, ..
1866            } => {
1867                let rt = pretty_print_vreg_scalar(rt.to_reg(), ScalarSize::Size128);
1868                let rt2 = pretty_print_vreg_scalar(rt2.to_reg(), ScalarSize::Size128);
1869                let mem = mem.clone();
1870                let mem = mem.pretty_print_default();
1871
1872                format!("ldp {rt}, {rt2}, {mem}")
1873            }
1874            &Inst::FpuStoreP128 {
1875                rt, rt2, ref mem, ..
1876            } => {
1877                let rt = pretty_print_vreg_scalar(rt, ScalarSize::Size128);
1878                let rt2 = pretty_print_vreg_scalar(rt2, ScalarSize::Size128);
1879                let mem = mem.clone();
1880                let mem = mem.pretty_print_default();
1881
1882                format!("stp {rt}, {rt2}, {mem}")
1883            }
1884            &Inst::FpuToInt { op, rd, rn } => {
1885                let (op, sizesrc, sizedest) = match op {
1886                    FpuToIntOp::F32ToI32 => ("fcvtzs", ScalarSize::Size32, OperandSize::Size32),
1887                    FpuToIntOp::F32ToU32 => ("fcvtzu", ScalarSize::Size32, OperandSize::Size32),
1888                    FpuToIntOp::F32ToI64 => ("fcvtzs", ScalarSize::Size32, OperandSize::Size64),
1889                    FpuToIntOp::F32ToU64 => ("fcvtzu", ScalarSize::Size32, OperandSize::Size64),
1890                    FpuToIntOp::F64ToI32 => ("fcvtzs", ScalarSize::Size64, OperandSize::Size32),
1891                    FpuToIntOp::F64ToU32 => ("fcvtzu", ScalarSize::Size64, OperandSize::Size32),
1892                    FpuToIntOp::F64ToI64 => ("fcvtzs", ScalarSize::Size64, OperandSize::Size64),
1893                    FpuToIntOp::F64ToU64 => ("fcvtzu", ScalarSize::Size64, OperandSize::Size64),
1894                };
1895                let rd = pretty_print_ireg(rd.to_reg(), sizedest);
1896                let rn = pretty_print_vreg_scalar(rn, sizesrc);
1897                format!("{op} {rd}, {rn}")
1898            }
1899            &Inst::IntToFpu { op, rd, rn } => {
1900                let (op, sizesrc, sizedest) = match op {
1901                    IntToFpuOp::I32ToF32 => ("scvtf", OperandSize::Size32, ScalarSize::Size32),
1902                    IntToFpuOp::U32ToF32 => ("ucvtf", OperandSize::Size32, ScalarSize::Size32),
1903                    IntToFpuOp::I64ToF32 => ("scvtf", OperandSize::Size64, ScalarSize::Size32),
1904                    IntToFpuOp::U64ToF32 => ("ucvtf", OperandSize::Size64, ScalarSize::Size32),
1905                    IntToFpuOp::I32ToF64 => ("scvtf", OperandSize::Size32, ScalarSize::Size64),
1906                    IntToFpuOp::U32ToF64 => ("ucvtf", OperandSize::Size32, ScalarSize::Size64),
1907                    IntToFpuOp::I64ToF64 => ("scvtf", OperandSize::Size64, ScalarSize::Size64),
1908                    IntToFpuOp::U64ToF64 => ("ucvtf", OperandSize::Size64, ScalarSize::Size64),
1909                };
1910                let rd = pretty_print_vreg_scalar(rd.to_reg(), sizedest);
1911                let rn = pretty_print_ireg(rn, sizesrc);
1912                format!("{op} {rd}, {rn}")
1913            }
1914            &Inst::FpuCSel16 { rd, rn, rm, cond } => {
1915                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size16);
1916                let rn = pretty_print_vreg_scalar(rn, ScalarSize::Size16);
1917                let rm = pretty_print_vreg_scalar(rm, ScalarSize::Size16);
1918                let cond = cond.pretty_print(0);
1919                format!("fcsel {rd}, {rn}, {rm}, {cond}")
1920            }
1921            &Inst::FpuCSel32 { rd, rn, rm, cond } => {
1922                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size32);
1923                let rn = pretty_print_vreg_scalar(rn, ScalarSize::Size32);
1924                let rm = pretty_print_vreg_scalar(rm, ScalarSize::Size32);
1925                let cond = cond.pretty_print(0);
1926                format!("fcsel {rd}, {rn}, {rm}, {cond}")
1927            }
1928            &Inst::FpuCSel64 { rd, rn, rm, cond } => {
1929                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64);
1930                let rn = pretty_print_vreg_scalar(rn, ScalarSize::Size64);
1931                let rm = pretty_print_vreg_scalar(rm, ScalarSize::Size64);
1932                let cond = cond.pretty_print(0);
1933                format!("fcsel {rd}, {rn}, {rm}, {cond}")
1934            }
1935            &Inst::FpuRound { op, rd, rn } => {
1936                let (inst, size) = match op {
1937                    FpuRoundMode::Minus32 => ("frintm", ScalarSize::Size32),
1938                    FpuRoundMode::Minus64 => ("frintm", ScalarSize::Size64),
1939                    FpuRoundMode::Plus32 => ("frintp", ScalarSize::Size32),
1940                    FpuRoundMode::Plus64 => ("frintp", ScalarSize::Size64),
1941                    FpuRoundMode::Zero32 => ("frintz", ScalarSize::Size32),
1942                    FpuRoundMode::Zero64 => ("frintz", ScalarSize::Size64),
1943                    FpuRoundMode::Nearest32 => ("frintn", ScalarSize::Size32),
1944                    FpuRoundMode::Nearest64 => ("frintn", ScalarSize::Size64),
1945                };
1946                let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1947                let rn = pretty_print_vreg_scalar(rn, size);
1948                format!("{inst} {rd}, {rn}")
1949            }
1950            &Inst::MovToFpu { rd, rn, size } => {
1951                let operand_size = size.operand_size();
1952                let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1953                let rn = pretty_print_ireg(rn, operand_size);
1954                format!("fmov {rd}, {rn}")
1955            }
1956            &Inst::FpuMoveFPImm { rd, imm, size } => {
1957                let imm = imm.pretty_print(0);
1958                let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1959
1960                format!("fmov {rd}, {imm}")
1961            }
1962            &Inst::MovToVec {
1963                rd,
1964                ri,
1965                rn,
1966                idx,
1967                size,
1968            } => {
1969                let rd = pretty_print_vreg_element(rd.to_reg(), idx as usize, size.lane_size());
1970                let ri = pretty_print_vreg_element(ri, idx as usize, size.lane_size());
1971                let rn = pretty_print_ireg(rn, size.operand_size());
1972                format!("mov {rd}, {ri}, {rn}")
1973            }
1974            &Inst::MovFromVec { rd, rn, idx, size } => {
1975                let op = match size {
1976                    ScalarSize::Size8 => "umov",
1977                    ScalarSize::Size16 => "umov",
1978                    ScalarSize::Size32 => "mov",
1979                    ScalarSize::Size64 => "mov",
1980                    _ => unimplemented!(),
1981                };
1982                let rd = pretty_print_ireg(rd.to_reg(), size.operand_size());
1983                let rn = pretty_print_vreg_element(rn, idx as usize, size);
1984                format!("{op} {rd}, {rn}")
1985            }
1986            &Inst::MovFromVecSigned {
1987                rd,
1988                rn,
1989                idx,
1990                size,
1991                scalar_size,
1992            } => {
1993                let rd = pretty_print_ireg(rd.to_reg(), scalar_size);
1994                let rn = pretty_print_vreg_element(rn, idx as usize, size.lane_size());
1995                format!("smov {rd}, {rn}")
1996            }
1997            &Inst::VecDup { rd, rn, size } => {
1998                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
1999                let rn = pretty_print_ireg(rn, size.operand_size());
2000                format!("dup {rd}, {rn}")
2001            }
2002            &Inst::VecDupFromFpu { rd, rn, size, lane } => {
2003                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2004                let rn = pretty_print_vreg_element(rn, lane.into(), size.lane_size());
2005                format!("dup {rd}, {rn}")
2006            }
2007            &Inst::VecDupFPImm { rd, imm, size } => {
2008                let imm = imm.pretty_print(0);
2009                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2010
2011                format!("fmov {rd}, {imm}")
2012            }
2013            &Inst::VecDupImm {
2014                rd,
2015                imm,
2016                invert,
2017                size,
2018            } => {
2019                let imm = imm.pretty_print(0);
2020                let op = if invert { "mvni" } else { "movi" };
2021                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2022
2023                format!("{op} {rd}, {imm}")
2024            }
2025            &Inst::VecExtend {
2026                t,
2027                rd,
2028                rn,
2029                high_half,
2030                lane_size,
2031            } => {
2032                let vec64 = VectorSize::from_lane_size(lane_size.narrow(), false);
2033                let vec128 = VectorSize::from_lane_size(lane_size.narrow(), true);
2034                let rd_size = VectorSize::from_lane_size(lane_size, true);
2035                let (op, rn_size) = match (t, high_half) {
2036                    (VecExtendOp::Sxtl, false) => ("sxtl", vec64),
2037                    (VecExtendOp::Sxtl, true) => ("sxtl2", vec128),
2038                    (VecExtendOp::Uxtl, false) => ("uxtl", vec64),
2039                    (VecExtendOp::Uxtl, true) => ("uxtl2", vec128),
2040                };
2041                let rd = pretty_print_vreg_vector(rd.to_reg(), rd_size);
2042                let rn = pretty_print_vreg_vector(rn, rn_size);
2043                format!("{op} {rd}, {rn}")
2044            }
2045            &Inst::VecMovElement {
2046                rd,
2047                ri,
2048                rn,
2049                dest_idx,
2050                src_idx,
2051                size,
2052            } => {
2053                let rd =
2054                    pretty_print_vreg_element(rd.to_reg(), dest_idx as usize, size.lane_size());
2055                let ri = pretty_print_vreg_element(ri, dest_idx as usize, size.lane_size());
2056                let rn = pretty_print_vreg_element(rn, src_idx as usize, size.lane_size());
2057                format!("mov {rd}, {ri}, {rn}")
2058            }
2059            &Inst::VecRRLong {
2060                op,
2061                rd,
2062                rn,
2063                high_half,
2064            } => {
2065                let (op, rd_size, size, suffix) = match (op, high_half) {
2066                    (VecRRLongOp::Fcvtl16, false) => {
2067                        ("fcvtl", VectorSize::Size32x4, VectorSize::Size16x4, "")
2068                    }
2069                    (VecRRLongOp::Fcvtl16, true) => {
2070                        ("fcvtl2", VectorSize::Size32x4, VectorSize::Size16x8, "")
2071                    }
2072                    (VecRRLongOp::Fcvtl32, false) => {
2073                        ("fcvtl", VectorSize::Size64x2, VectorSize::Size32x2, "")
2074                    }
2075                    (VecRRLongOp::Fcvtl32, true) => {
2076                        ("fcvtl2", VectorSize::Size64x2, VectorSize::Size32x4, "")
2077                    }
2078                    (VecRRLongOp::Shll8, false) => {
2079                        ("shll", VectorSize::Size16x8, VectorSize::Size8x8, ", #8")
2080                    }
2081                    (VecRRLongOp::Shll8, true) => {
2082                        ("shll2", VectorSize::Size16x8, VectorSize::Size8x16, ", #8")
2083                    }
2084                    (VecRRLongOp::Shll16, false) => {
2085                        ("shll", VectorSize::Size32x4, VectorSize::Size16x4, ", #16")
2086                    }
2087                    (VecRRLongOp::Shll16, true) => {
2088                        ("shll2", VectorSize::Size32x4, VectorSize::Size16x8, ", #16")
2089                    }
2090                    (VecRRLongOp::Shll32, false) => {
2091                        ("shll", VectorSize::Size64x2, VectorSize::Size32x2, ", #32")
2092                    }
2093                    (VecRRLongOp::Shll32, true) => {
2094                        ("shll2", VectorSize::Size64x2, VectorSize::Size32x4, ", #32")
2095                    }
2096                };
2097                let rd = pretty_print_vreg_vector(rd.to_reg(), rd_size);
2098                let rn = pretty_print_vreg_vector(rn, size);
2099
2100                format!("{op} {rd}, {rn}{suffix}")
2101            }
2102            &Inst::VecRRNarrowLow {
2103                op,
2104                rd,
2105                rn,
2106                lane_size,
2107                ..
2108            }
2109            | &Inst::VecRRNarrowHigh {
2110                op,
2111                rd,
2112                rn,
2113                lane_size,
2114                ..
2115            } => {
2116                let vec64 = VectorSize::from_lane_size(lane_size, false);
2117                let vec128 = VectorSize::from_lane_size(lane_size, true);
2118                let rn_size = VectorSize::from_lane_size(lane_size.widen(), true);
2119                let high_half = match self {
2120                    &Inst::VecRRNarrowLow { .. } => false,
2121                    &Inst::VecRRNarrowHigh { .. } => true,
2122                    _ => unreachable!(),
2123                };
2124                let (op, rd_size) = match (op, high_half) {
2125                    (VecRRNarrowOp::Xtn, false) => ("xtn", vec64),
2126                    (VecRRNarrowOp::Xtn, true) => ("xtn2", vec128),
2127                    (VecRRNarrowOp::Sqxtn, false) => ("sqxtn", vec64),
2128                    (VecRRNarrowOp::Sqxtn, true) => ("sqxtn2", vec128),
2129                    (VecRRNarrowOp::Sqxtun, false) => ("sqxtun", vec64),
2130                    (VecRRNarrowOp::Sqxtun, true) => ("sqxtun2", vec128),
2131                    (VecRRNarrowOp::Uqxtn, false) => ("uqxtn", vec64),
2132                    (VecRRNarrowOp::Uqxtn, true) => ("uqxtn2", vec128),
2133                    (VecRRNarrowOp::Fcvtn, false) => ("fcvtn", vec64),
2134                    (VecRRNarrowOp::Fcvtn, true) => ("fcvtn2", vec128),
2135                };
2136                let rn = pretty_print_vreg_vector(rn, rn_size);
2137                let rd = pretty_print_vreg_vector(rd.to_reg(), rd_size);
2138                let ri = match self {
2139                    &Inst::VecRRNarrowLow { .. } => "".to_string(),
2140                    &Inst::VecRRNarrowHigh { ri, .. } => {
2141                        format!("{}, ", pretty_print_vreg_vector(ri, rd_size))
2142                    }
2143                    _ => unreachable!(),
2144                };
2145
2146                format!("{op} {rd}, {ri}{rn}")
2147            }
2148            &Inst::VecRRPair { op, rd, rn } => {
2149                let op = match op {
2150                    VecPairOp::Addp => "addp",
2151                };
2152                let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64);
2153                let rn = pretty_print_vreg_vector(rn, VectorSize::Size64x2);
2154
2155                format!("{op} {rd}, {rn}")
2156            }
2157            &Inst::VecRRPairLong { op, rd, rn } => {
2158                let (op, dest, src) = match op {
2159                    VecRRPairLongOp::Saddlp8 => {
2160                        ("saddlp", VectorSize::Size16x8, VectorSize::Size8x16)
2161                    }
2162                    VecRRPairLongOp::Saddlp16 => {
2163                        ("saddlp", VectorSize::Size32x4, VectorSize::Size16x8)
2164                    }
2165                    VecRRPairLongOp::Uaddlp8 => {
2166                        ("uaddlp", VectorSize::Size16x8, VectorSize::Size8x16)
2167                    }
2168                    VecRRPairLongOp::Uaddlp16 => {
2169                        ("uaddlp", VectorSize::Size32x4, VectorSize::Size16x8)
2170                    }
2171                };
2172                let rd = pretty_print_vreg_vector(rd.to_reg(), dest);
2173                let rn = pretty_print_vreg_vector(rn, src);
2174
2175                format!("{op} {rd}, {rn}")
2176            }
2177            &Inst::VecRRR {
2178                rd,
2179                rn,
2180                rm,
2181                alu_op,
2182                size,
2183            } => {
2184                let (op, size) = match alu_op {
2185                    VecALUOp::Sqadd => ("sqadd", size),
2186                    VecALUOp::Uqadd => ("uqadd", size),
2187                    VecALUOp::Sqsub => ("sqsub", size),
2188                    VecALUOp::Uqsub => ("uqsub", size),
2189                    VecALUOp::Cmeq => ("cmeq", size),
2190                    VecALUOp::Cmge => ("cmge", size),
2191                    VecALUOp::Cmgt => ("cmgt", size),
2192                    VecALUOp::Cmhs => ("cmhs", size),
2193                    VecALUOp::Cmhi => ("cmhi", size),
2194                    VecALUOp::Fcmeq => ("fcmeq", size),
2195                    VecALUOp::Fcmgt => ("fcmgt", size),
2196                    VecALUOp::Fcmge => ("fcmge", size),
2197                    VecALUOp::And => ("and", VectorSize::Size8x16),
2198                    VecALUOp::Bic => ("bic", VectorSize::Size8x16),
2199                    VecALUOp::Orr => ("orr", VectorSize::Size8x16),
2200                    VecALUOp::Eor => ("eor", VectorSize::Size8x16),
2201                    VecALUOp::Umaxp => ("umaxp", size),
2202                    VecALUOp::Add => ("add", size),
2203                    VecALUOp::Sub => ("sub", size),
2204                    VecALUOp::Mul => ("mul", size),
2205                    VecALUOp::Sshl => ("sshl", size),
2206                    VecALUOp::Ushl => ("ushl", size),
2207                    VecALUOp::Umin => ("umin", size),
2208                    VecALUOp::Smin => ("smin", size),
2209                    VecALUOp::Umax => ("umax", size),
2210                    VecALUOp::Smax => ("smax", size),
2211                    VecALUOp::Urhadd => ("urhadd", size),
2212                    VecALUOp::Fadd => ("fadd", size),
2213                    VecALUOp::Fsub => ("fsub", size),
2214                    VecALUOp::Fdiv => ("fdiv", size),
2215                    VecALUOp::Fmax => ("fmax", size),
2216                    VecALUOp::Fmin => ("fmin", size),
2217                    VecALUOp::Fmul => ("fmul", size),
2218                    VecALUOp::Addp => ("addp", size),
2219                    VecALUOp::Zip1 => ("zip1", size),
2220                    VecALUOp::Zip2 => ("zip2", size),
2221                    VecALUOp::Sqrdmulh => ("sqrdmulh", size),
2222                    VecALUOp::Uzp1 => ("uzp1", size),
2223                    VecALUOp::Uzp2 => ("uzp2", size),
2224                    VecALUOp::Trn1 => ("trn1", size),
2225                    VecALUOp::Trn2 => ("trn2", size),
2226                };
2227                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2228                let rn = pretty_print_vreg_vector(rn, size);
2229                let rm = pretty_print_vreg_vector(rm, size);
2230                format!("{op} {rd}, {rn}, {rm}")
2231            }
2232            &Inst::VecRRRMod {
2233                rd,
2234                ri,
2235                rn,
2236                rm,
2237                alu_op,
2238                size,
2239            } => {
2240                let (op, size) = match alu_op {
2241                    VecALUModOp::Bsl => ("bsl", VectorSize::Size8x16),
2242                    VecALUModOp::Fmla => ("fmla", size),
2243                    VecALUModOp::Fmls => ("fmls", size),
2244                };
2245                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2246                let ri = pretty_print_vreg_vector(ri, size);
2247                let rn = pretty_print_vreg_vector(rn, size);
2248                let rm = pretty_print_vreg_vector(rm, size);
2249                format!("{op} {rd}, {ri}, {rn}, {rm}")
2250            }
2251            &Inst::VecFmlaElem {
2252                rd,
2253                ri,
2254                rn,
2255                rm,
2256                alu_op,
2257                size,
2258                idx,
2259            } => {
2260                let (op, size) = match alu_op {
2261                    VecALUModOp::Fmla => ("fmla", size),
2262                    VecALUModOp::Fmls => ("fmls", size),
2263                    _ => unreachable!(),
2264                };
2265                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2266                let ri = pretty_print_vreg_vector(ri, size);
2267                let rn = pretty_print_vreg_vector(rn, size);
2268                let rm = pretty_print_vreg_element(rm, idx.into(), size.lane_size());
2269                format!("{op} {rd}, {ri}, {rn}, {rm}")
2270            }
2271            &Inst::VecRRRLong {
2272                rd,
2273                rn,
2274                rm,
2275                alu_op,
2276                high_half,
2277            } => {
2278                let (op, dest_size, src_size) = match (alu_op, high_half) {
2279                    (VecRRRLongOp::Smull8, false) => {
2280                        ("smull", VectorSize::Size16x8, VectorSize::Size8x8)
2281                    }
2282                    (VecRRRLongOp::Smull8, true) => {
2283                        ("smull2", VectorSize::Size16x8, VectorSize::Size8x16)
2284                    }
2285                    (VecRRRLongOp::Smull16, false) => {
2286                        ("smull", VectorSize::Size32x4, VectorSize::Size16x4)
2287                    }
2288                    (VecRRRLongOp::Smull16, true) => {
2289                        ("smull2", VectorSize::Size32x4, VectorSize::Size16x8)
2290                    }
2291                    (VecRRRLongOp::Smull32, false) => {
2292                        ("smull", VectorSize::Size64x2, VectorSize::Size32x2)
2293                    }
2294                    (VecRRRLongOp::Smull32, true) => {
2295                        ("smull2", VectorSize::Size64x2, VectorSize::Size32x4)
2296                    }
2297                    (VecRRRLongOp::Umull8, false) => {
2298                        ("umull", VectorSize::Size16x8, VectorSize::Size8x8)
2299                    }
2300                    (VecRRRLongOp::Umull8, true) => {
2301                        ("umull2", VectorSize::Size16x8, VectorSize::Size8x16)
2302                    }
2303                    (VecRRRLongOp::Umull16, false) => {
2304                        ("umull", VectorSize::Size32x4, VectorSize::Size16x4)
2305                    }
2306                    (VecRRRLongOp::Umull16, true) => {
2307                        ("umull2", VectorSize::Size32x4, VectorSize::Size16x8)
2308                    }
2309                    (VecRRRLongOp::Umull32, false) => {
2310                        ("umull", VectorSize::Size64x2, VectorSize::Size32x2)
2311                    }
2312                    (VecRRRLongOp::Umull32, true) => {
2313                        ("umull2", VectorSize::Size64x2, VectorSize::Size32x4)
2314                    }
2315                };
2316                let rd = pretty_print_vreg_vector(rd.to_reg(), dest_size);
2317                let rn = pretty_print_vreg_vector(rn, src_size);
2318                let rm = pretty_print_vreg_vector(rm, src_size);
2319                format!("{op} {rd}, {rn}, {rm}")
2320            }
2321            &Inst::VecRRRLongMod {
2322                rd,
2323                ri,
2324                rn,
2325                rm,
2326                alu_op,
2327                high_half,
2328            } => {
2329                let (op, dest_size, src_size) = match (alu_op, high_half) {
2330                    (VecRRRLongModOp::Umlal8, false) => {
2331                        ("umlal", VectorSize::Size16x8, VectorSize::Size8x8)
2332                    }
2333                    (VecRRRLongModOp::Umlal8, true) => {
2334                        ("umlal2", VectorSize::Size16x8, VectorSize::Size8x16)
2335                    }
2336                    (VecRRRLongModOp::Umlal16, false) => {
2337                        ("umlal", VectorSize::Size32x4, VectorSize::Size16x4)
2338                    }
2339                    (VecRRRLongModOp::Umlal16, true) => {
2340                        ("umlal2", VectorSize::Size32x4, VectorSize::Size16x8)
2341                    }
2342                    (VecRRRLongModOp::Umlal32, false) => {
2343                        ("umlal", VectorSize::Size64x2, VectorSize::Size32x2)
2344                    }
2345                    (VecRRRLongModOp::Umlal32, true) => {
2346                        ("umlal2", VectorSize::Size64x2, VectorSize::Size32x4)
2347                    }
2348                };
2349                let rd = pretty_print_vreg_vector(rd.to_reg(), dest_size);
2350                let ri = pretty_print_vreg_vector(ri, dest_size);
2351                let rn = pretty_print_vreg_vector(rn, src_size);
2352                let rm = pretty_print_vreg_vector(rm, src_size);
2353                format!("{op} {rd}, {ri}, {rn}, {rm}")
2354            }
2355            &Inst::VecMisc { op, rd, rn, size } => {
2356                let (op, size, suffix) = match op {
2357                    VecMisc2::Not => (
2358                        "mvn",
2359                        if size.is_128bits() {
2360                            VectorSize::Size8x16
2361                        } else {
2362                            VectorSize::Size8x8
2363                        },
2364                        "",
2365                    ),
2366                    VecMisc2::Neg => ("neg", size, ""),
2367                    VecMisc2::Abs => ("abs", size, ""),
2368                    VecMisc2::Fabs => ("fabs", size, ""),
2369                    VecMisc2::Fneg => ("fneg", size, ""),
2370                    VecMisc2::Fsqrt => ("fsqrt", size, ""),
2371                    VecMisc2::Rev16 => ("rev16", size, ""),
2372                    VecMisc2::Rev32 => ("rev32", size, ""),
2373                    VecMisc2::Rev64 => ("rev64", size, ""),
2374                    VecMisc2::Fcvtzs => ("fcvtzs", size, ""),
2375                    VecMisc2::Fcvtzu => ("fcvtzu", size, ""),
2376                    VecMisc2::Scvtf => ("scvtf", size, ""),
2377                    VecMisc2::Ucvtf => ("ucvtf", size, ""),
2378                    VecMisc2::Frintn => ("frintn", size, ""),
2379                    VecMisc2::Frintz => ("frintz", size, ""),
2380                    VecMisc2::Frintm => ("frintm", size, ""),
2381                    VecMisc2::Frintp => ("frintp", size, ""),
2382                    VecMisc2::Cnt => ("cnt", size, ""),
2383                    VecMisc2::Cmeq0 => ("cmeq", size, ", #0"),
2384                    VecMisc2::Cmge0 => ("cmge", size, ", #0"),
2385                    VecMisc2::Cmgt0 => ("cmgt", size, ", #0"),
2386                    VecMisc2::Cmle0 => ("cmle", size, ", #0"),
2387                    VecMisc2::Cmlt0 => ("cmlt", size, ", #0"),
2388                    VecMisc2::Fcmeq0 => ("fcmeq", size, ", #0.0"),
2389                    VecMisc2::Fcmge0 => ("fcmge", size, ", #0.0"),
2390                    VecMisc2::Fcmgt0 => ("fcmgt", size, ", #0.0"),
2391                    VecMisc2::Fcmle0 => ("fcmle", size, ", #0.0"),
2392                    VecMisc2::Fcmlt0 => ("fcmlt", size, ", #0.0"),
2393                };
2394                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2395                let rn = pretty_print_vreg_vector(rn, size);
2396                format!("{op} {rd}, {rn}{suffix}")
2397            }
2398            &Inst::VecLanes { op, rd, rn, size } => {
2399                let op = match op {
2400                    VecLanesOp::Uminv => "uminv",
2401                    VecLanesOp::Addv => "addv",
2402                };
2403                let rd = pretty_print_vreg_scalar(rd.to_reg(), size.lane_size());
2404                let rn = pretty_print_vreg_vector(rn, size);
2405                format!("{op} {rd}, {rn}")
2406            }
2407            &Inst::VecShiftImm {
2408                op,
2409                rd,
2410                rn,
2411                size,
2412                imm,
2413            } => {
2414                let op = match op {
2415                    VecShiftImmOp::Shl => "shl",
2416                    VecShiftImmOp::Ushr => "ushr",
2417                    VecShiftImmOp::Sshr => "sshr",
2418                };
2419                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2420                let rn = pretty_print_vreg_vector(rn, size);
2421                format!("{op} {rd}, {rn}, #{imm}")
2422            }
2423            &Inst::VecShiftImmMod {
2424                op,
2425                rd,
2426                ri,
2427                rn,
2428                size,
2429                imm,
2430            } => {
2431                let op = match op {
2432                    VecShiftImmModOp::Sli => "sli",
2433                };
2434                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2435                let ri = pretty_print_vreg_vector(ri, size);
2436                let rn = pretty_print_vreg_vector(rn, size);
2437                format!("{op} {rd}, {ri}, {rn}, #{imm}")
2438            }
2439            &Inst::VecExtract { rd, rn, rm, imm4 } => {
2440                let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2441                let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2442                let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2443                format!("ext {rd}, {rn}, {rm}, #{imm4}")
2444            }
2445            &Inst::VecTbl { rd, rn, rm } => {
2446                let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2447                let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2448                let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2449                format!("tbl {rd}, {{ {rn} }}, {rm}")
2450            }
2451            &Inst::VecTblExt { rd, ri, rn, rm } => {
2452                let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2453                let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2454                let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2455                let ri = pretty_print_vreg_vector(ri, VectorSize::Size8x16);
2456                format!("tbx {rd}, {ri}, {{ {rn} }}, {rm}")
2457            }
2458            &Inst::VecTbl2 { rd, rn, rn2, rm } => {
2459                let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2460                let rn2 = pretty_print_vreg_vector(rn2, VectorSize::Size8x16);
2461                let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2462                let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2463                format!("tbl {rd}, {{ {rn}, {rn2} }}, {rm}")
2464            }
2465            &Inst::VecTbl2Ext {
2466                rd,
2467                ri,
2468                rn,
2469                rn2,
2470                rm,
2471            } => {
2472                let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2473                let rn2 = pretty_print_vreg_vector(rn2, VectorSize::Size8x16);
2474                let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2475                let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2476                let ri = pretty_print_vreg_vector(ri, VectorSize::Size8x16);
2477                format!("tbx {rd}, {ri}, {{ {rn}, {rn2} }}, {rm}")
2478            }
2479            &Inst::VecLoadReplicate { rd, rn, size, .. } => {
2480                let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2481                let rn = pretty_print_reg(rn);
2482
2483                format!("ld1r {{ {rd} }}, [{rn}]")
2484            }
2485            &Inst::VecCSel { rd, rn, rm, cond } => {
2486                let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2487                let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2488                let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2489                let cond = cond.pretty_print(0);
2490                format!("vcsel {rd}, {rn}, {rm}, {cond} (if-then-else diamond)")
2491            }
2492            &Inst::MovToNZCV { rn } => {
2493                let rn = pretty_print_reg(rn);
2494                format!("msr nzcv, {rn}")
2495            }
2496            &Inst::MovFromNZCV { rd } => {
2497                let rd = pretty_print_reg(rd.to_reg());
2498                format!("mrs {rd}, nzcv")
2499            }
2500            &Inst::Extend {
2501                rd,
2502                rn,
2503                signed: false,
2504                from_bits: 1,
2505                ..
2506            } => {
2507                let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size32);
2508                let rn = pretty_print_ireg(rn, OperandSize::Size32);
2509                format!("and {rd}, {rn}, #1")
2510            }
2511            &Inst::Extend {
2512                rd,
2513                rn,
2514                signed: false,
2515                from_bits: 32,
2516                to_bits: 64,
2517            } => {
2518                // The case of a zero extension from 32 to 64 bits, is implemented
2519                // with a "mov" to a 32-bit (W-reg) dest, because this zeroes
2520                // the top 32 bits.
2521                let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size32);
2522                let rn = pretty_print_ireg(rn, OperandSize::Size32);
2523                format!("mov {rd}, {rn}")
2524            }
2525            &Inst::Extend {
2526                rd,
2527                rn,
2528                signed,
2529                from_bits,
2530                to_bits,
2531            } => {
2532                assert!(from_bits <= to_bits);
2533                let op = match (signed, from_bits) {
2534                    (false, 8) => "uxtb",
2535                    (true, 8) => "sxtb",
2536                    (false, 16) => "uxth",
2537                    (true, 16) => "sxth",
2538                    (true, 32) => "sxtw",
2539                    (true, _) => "sbfx",
2540                    (false, _) => "ubfx",
2541                };
2542                if op == "sbfx" || op == "ubfx" {
2543                    let dest_size = OperandSize::from_bits(to_bits);
2544                    let rd = pretty_print_ireg(rd.to_reg(), dest_size);
2545                    let rn = pretty_print_ireg(rn, dest_size);
2546                    format!("{op} {rd}, {rn}, #0, #{from_bits}")
2547                } else {
2548                    let dest_size = if signed {
2549                        OperandSize::from_bits(to_bits)
2550                    } else {
2551                        OperandSize::Size32
2552                    };
2553                    let rd = pretty_print_ireg(rd.to_reg(), dest_size);
2554                    let rn = pretty_print_ireg(rn, OperandSize::from_bits(from_bits));
2555                    format!("{op} {rd}, {rn}")
2556                }
2557            }
2558            &Inst::Call { ref info } => {
2559                let try_call = info
2560                    .try_call_info
2561                    .as_ref()
2562                    .map(|tci| pretty_print_try_call(tci))
2563                    .unwrap_or_default();
2564                format!("bl 0{try_call}")
2565            }
2566            &Inst::CallInd { ref info } => {
2567                let rn = pretty_print_reg(info.dest);
2568                let try_call = info
2569                    .try_call_info
2570                    .as_ref()
2571                    .map(|tci| pretty_print_try_call(tci))
2572                    .unwrap_or_default();
2573                format!("blr {rn}{try_call}")
2574            }
2575            &Inst::ReturnCall { ref info } => {
2576                let mut s = format!(
2577                    "return_call {:?} new_stack_arg_size:{}",
2578                    info.dest, info.new_stack_arg_size
2579                );
2580                for ret in &info.uses {
2581                    let preg = pretty_print_reg(ret.preg);
2582                    let vreg = pretty_print_reg(ret.vreg);
2583                    write!(&mut s, " {vreg}={preg}").unwrap();
2584                }
2585                s
2586            }
2587            &Inst::ReturnCallInd { ref info } => {
2588                let callee = pretty_print_reg(info.dest);
2589                let mut s = format!(
2590                    "return_call_ind {callee} new_stack_arg_size:{}",
2591                    info.new_stack_arg_size
2592                );
2593                for ret in &info.uses {
2594                    let preg = pretty_print_reg(ret.preg);
2595                    let vreg = pretty_print_reg(ret.vreg);
2596                    write!(&mut s, " {vreg}={preg}").unwrap();
2597                }
2598                s
2599            }
2600            &Inst::Args { ref args } => {
2601                let mut s = "args".to_string();
2602                for arg in args {
2603                    let preg = pretty_print_reg(arg.preg);
2604                    let def = pretty_print_reg(arg.vreg.to_reg());
2605                    write!(&mut s, " {def}={preg}").unwrap();
2606                }
2607                s
2608            }
2609            &Inst::Rets { ref rets } => {
2610                let mut s = "rets".to_string();
2611                for ret in rets {
2612                    let preg = pretty_print_reg(ret.preg);
2613                    let vreg = pretty_print_reg(ret.vreg);
2614                    write!(&mut s, " {vreg}={preg}").unwrap();
2615                }
2616                s
2617            }
2618            &Inst::Ret {} => "ret".to_string(),
2619            &Inst::AuthenticatedRet { key, is_hint } => {
2620                let key = match key {
2621                    APIKey::AZ => "az",
2622                    APIKey::BZ => "bz",
2623                    APIKey::ASP => "asp",
2624                    APIKey::BSP => "bsp",
2625                };
2626                match is_hint {
2627                    false => format!("reta{key}"),
2628                    true => format!("auti{key} ; ret"),
2629                }
2630            }
2631            &Inst::Jump { ref dest } => {
2632                let dest = dest.pretty_print(0);
2633                format!("b {dest}")
2634            }
2635            &Inst::CondBr {
2636                ref taken,
2637                ref not_taken,
2638                ref kind,
2639            } => {
2640                let taken = taken.pretty_print(0);
2641                let not_taken = not_taken.pretty_print(0);
2642                match kind {
2643                    &CondBrKind::Zero(reg, size) => {
2644                        let reg = pretty_print_reg_sized(reg, size);
2645                        format!("cbz {reg}, {taken} ; b {not_taken}")
2646                    }
2647                    &CondBrKind::NotZero(reg, size) => {
2648                        let reg = pretty_print_reg_sized(reg, size);
2649                        format!("cbnz {reg}, {taken} ; b {not_taken}")
2650                    }
2651                    &CondBrKind::Cond(c) => {
2652                        let c = c.pretty_print(0);
2653                        format!("b.{c} {taken} ; b {not_taken}")
2654                    }
2655                }
2656            }
2657            &Inst::TestBitAndBranch {
2658                kind,
2659                ref taken,
2660                ref not_taken,
2661                rn,
2662                bit,
2663            } => {
2664                let cond = match kind {
2665                    TestBitAndBranchKind::Z => "z",
2666                    TestBitAndBranchKind::NZ => "nz",
2667                };
2668                let taken = taken.pretty_print(0);
2669                let not_taken = not_taken.pretty_print(0);
2670                let rn = pretty_print_reg(rn);
2671                format!("tb{cond} {rn}, #{bit}, {taken} ; b {not_taken}")
2672            }
2673            &Inst::IndirectBr { rn, .. } => {
2674                let rn = pretty_print_reg(rn);
2675                format!("br {rn}")
2676            }
2677            &Inst::Brk => "brk #0xf000".to_string(),
2678            &Inst::Udf { .. } => "udf #0xc11f".to_string(),
2679            &Inst::TrapIf {
2680                ref kind,
2681                trap_code,
2682            } => match kind {
2683                &CondBrKind::Zero(reg, size) => {
2684                    let reg = pretty_print_reg_sized(reg, size);
2685                    format!("cbz {reg}, #trap={trap_code}")
2686                }
2687                &CondBrKind::NotZero(reg, size) => {
2688                    let reg = pretty_print_reg_sized(reg, size);
2689                    format!("cbnz {reg}, #trap={trap_code}")
2690                }
2691                &CondBrKind::Cond(c) => {
2692                    let c = c.pretty_print(0);
2693                    format!("b.{c} #trap={trap_code}")
2694                }
2695            },
2696            &Inst::Adr { rd, off } => {
2697                let rd = pretty_print_reg(rd.to_reg());
2698                format!("adr {rd}, pc+{off}")
2699            }
2700            &Inst::Adrp { rd, off } => {
2701                let rd = pretty_print_reg(rd.to_reg());
2702                // This instruction addresses 4KiB pages, so multiply it by the page size.
2703                let byte_offset = off * 4096;
2704                format!("adrp {rd}, pc+{byte_offset}")
2705            }
2706            &Inst::Word4 { data } => format!("data.i32 {data}"),
2707            &Inst::Word8 { data } => format!("data.i64 {data}"),
2708            &Inst::JTSequence {
2709                default,
2710                ref targets,
2711                ridx,
2712                rtmp1,
2713                rtmp2,
2714                ..
2715            } => {
2716                let ridx = pretty_print_reg(ridx);
2717                let rtmp1 = pretty_print_reg(rtmp1.to_reg());
2718                let rtmp2 = pretty_print_reg(rtmp2.to_reg());
2719                let default_target = BranchTarget::Label(default).pretty_print(0);
2720                format!(
2721                    concat!(
2722                        "b.hs {} ; ",
2723                        "csel {}, xzr, {}, hs ; ",
2724                        "csdb ; ",
2725                        "adr {}, pc+16 ; ",
2726                        "ldrsw {}, [{}, {}, uxtw #2] ; ",
2727                        "add {}, {}, {} ; ",
2728                        "br {} ; ",
2729                        "jt_entries {:?}"
2730                    ),
2731                    default_target,
2732                    rtmp2,
2733                    ridx,
2734                    rtmp1,
2735                    rtmp2,
2736                    rtmp1,
2737                    rtmp2,
2738                    rtmp1,
2739                    rtmp1,
2740                    rtmp2,
2741                    rtmp1,
2742                    targets
2743                )
2744            }
2745            &Inst::LoadExtName {
2746                rd,
2747                ref name,
2748                offset,
2749            } => {
2750                let rd = pretty_print_reg(rd.to_reg());
2751                format!("load_ext_name {rd}, {name:?}+{offset}")
2752            }
2753            &Inst::LoadAddr { rd, ref mem } => {
2754                // TODO: we really should find a better way to avoid duplication of
2755                // this logic between `emit()` and `show_rru()` -- a separate 1-to-N
2756                // expansion stage (i.e., legalization, but without the slow edit-in-place
2757                // of the existing legalization framework).
2758                let mem = mem.clone();
2759                let (mem_insts, mem) = mem_finalize(None, &mem, I8, state);
2760                let mut ret = String::new();
2761                for inst in mem_insts.into_iter() {
2762                    ret.push_str(&inst.print_with_state(&mut EmitState::default()));
2763                }
2764                let (reg, index_reg, offset) = match mem {
2765                    AMode::RegExtended { rn, rm, extendop } => (rn, Some((rm, extendop)), 0),
2766                    AMode::Unscaled { rn, simm9 } => (rn, None, simm9.value()),
2767                    AMode::UnsignedOffset { rn, uimm12 } => (rn, None, uimm12.value() as i32),
2768                    _ => panic!("Unsupported case for LoadAddr: {mem:?}"),
2769                };
2770                let abs_offset = if offset < 0 {
2771                    -offset as u64
2772                } else {
2773                    offset as u64
2774                };
2775                let alu_op = if offset < 0 { ALUOp::Sub } else { ALUOp::Add };
2776
2777                if let Some((idx, extendop)) = index_reg {
2778                    let add = Inst::AluRRRExtend {
2779                        alu_op: ALUOp::Add,
2780                        size: OperandSize::Size64,
2781                        rd,
2782                        rn: reg,
2783                        rm: idx,
2784                        extendop,
2785                    };
2786
2787                    ret.push_str(&add.print_with_state(&mut EmitState::default()));
2788                } else if offset == 0 {
2789                    let mov = Inst::gen_move(rd, reg, I64);
2790                    ret.push_str(&mov.print_with_state(&mut EmitState::default()));
2791                } else if let Some(imm12) = Imm12::maybe_from_u64(abs_offset) {
2792                    let add = Inst::AluRRImm12 {
2793                        alu_op,
2794                        size: OperandSize::Size64,
2795                        rd,
2796                        rn: reg,
2797                        imm12,
2798                    };
2799                    ret.push_str(&add.print_with_state(&mut EmitState::default()));
2800                } else {
2801                    let tmp = writable_spilltmp_reg();
2802                    for inst in Inst::load_constant(tmp, abs_offset).into_iter() {
2803                        ret.push_str(&inst.print_with_state(&mut EmitState::default()));
2804                    }
2805                    let add = Inst::AluRRR {
2806                        alu_op,
2807                        size: OperandSize::Size64,
2808                        rd,
2809                        rn: reg,
2810                        rm: tmp.to_reg(),
2811                    };
2812                    ret.push_str(&add.print_with_state(&mut EmitState::default()));
2813                }
2814                ret
2815            }
2816            &Inst::Paci { key } => {
2817                let key = match key {
2818                    APIKey::AZ => "az",
2819                    APIKey::BZ => "bz",
2820                    APIKey::ASP => "asp",
2821                    APIKey::BSP => "bsp",
2822                };
2823
2824                "paci".to_string() + key
2825            }
2826            &Inst::Xpaclri => "xpaclri".to_string(),
2827            &Inst::Bti { targets } => {
2828                let targets = match targets {
2829                    BranchTargetType::None => "",
2830                    BranchTargetType::C => " c",
2831                    BranchTargetType::J => " j",
2832                    BranchTargetType::JC => " jc",
2833                };
2834
2835                "bti".to_string() + targets
2836            }
2837            &Inst::EmitIsland { needed_space } => format!("emit_island {needed_space}"),
2838
2839            &Inst::ElfTlsGetAddr {
2840                ref symbol,
2841                rd,
2842                tmp,
2843            } => {
2844                let rd = pretty_print_reg(rd.to_reg());
2845                let tmp = pretty_print_reg(tmp.to_reg());
2846                format!("elf_tls_get_addr {}, {}, {}", rd, tmp, symbol.display(None))
2847            }
2848            &Inst::MachOTlsGetAddr { ref symbol, rd } => {
2849                let rd = pretty_print_reg(rd.to_reg());
2850                format!("macho_tls_get_addr {}, {}", rd, symbol.display(None))
2851            }
2852            &Inst::Unwind { ref inst } => {
2853                format!("unwind {inst:?}")
2854            }
2855            &Inst::DummyUse { reg } => {
2856                let reg = pretty_print_reg(reg);
2857                format!("dummy_use {reg}")
2858            }
2859            &Inst::StackProbeLoop { start, end, step } => {
2860                let start = pretty_print_reg(start.to_reg());
2861                let end = pretty_print_reg(end);
2862                let step = step.pretty_print(0);
2863                format!("stack_probe_loop {start}, {end}, {step}")
2864            }
2865        }
2866    }
2867}
2868
2869//=============================================================================
2870// Label fixups and jump veneers.
2871
2872/// Different forms of label references for different instruction formats.
2873#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2874pub enum LabelUse {
2875    /// 14-bit branch offset (conditional branches). PC-rel, offset is imm <<
2876    /// 2. Immediate is 14 signed bits, in bits 18:5. Used by tbz and tbnz.
2877    Branch14,
2878    /// 19-bit branch offset (conditional branches). PC-rel, offset is imm << 2. Immediate is 19
2879    /// signed bits, in bits 23:5. Used by cbz, cbnz, b.cond.
2880    Branch19,
2881    /// 26-bit branch offset (unconditional branches). PC-rel, offset is imm << 2. Immediate is 26
2882    /// signed bits, in bits 25:0. Used by b, bl.
2883    Branch26,
2884    #[allow(dead_code)]
2885    /// 19-bit offset for LDR (load literal). PC-rel, offset is imm << 2. Immediate is 19 signed bits,
2886    /// in bits 23:5.
2887    Ldr19,
2888    #[allow(dead_code)]
2889    /// 21-bit offset for ADR (get address of label). PC-rel, offset is not shifted. Immediate is
2890    /// 21 signed bits, with high 19 bits in bits 23:5 and low 2 bits in bits 30:29.
2891    Adr21,
2892    /// 32-bit PC relative constant offset (from address of constant itself),
2893    /// signed. Used in jump tables.
2894    PCRel32,
2895}
2896
2897impl MachInstLabelUse for LabelUse {
2898    /// Alignment for veneer code. Every AArch64 instruction must be 4-byte-aligned.
2899    const ALIGN: CodeOffset = 4;
2900
2901    /// Maximum PC-relative range (positive), inclusive.
2902    fn max_pos_range(self) -> CodeOffset {
2903        match self {
2904            // N-bit immediate, left-shifted by 2, for (N+2) bits of total
2905            // range. Signed, so +2^(N+1) from zero. Likewise for two other
2906            // shifted cases below.
2907            LabelUse::Branch14 => (1 << 15) - 1,
2908            LabelUse::Branch19 => (1 << 20) - 1,
2909            LabelUse::Branch26 => (1 << 27) - 1,
2910            LabelUse::Ldr19 => (1 << 20) - 1,
2911            // Adr does not shift its immediate, so the 21-bit immediate gives 21 bits of total
2912            // range.
2913            LabelUse::Adr21 => (1 << 20) - 1,
2914            LabelUse::PCRel32 => 0x7fffffff,
2915        }
2916    }
2917
2918    /// Maximum PC-relative range (negative).
2919    fn max_neg_range(self) -> CodeOffset {
2920        // All forms are twos-complement signed offsets, so negative limit is one more than
2921        // positive limit.
2922        self.max_pos_range() + 1
2923    }
2924
2925    /// Size of window into code needed to do the patch.
2926    fn patch_size(self) -> CodeOffset {
2927        // Patch is on one instruction only for all of these label reference types.
2928        4
2929    }
2930
2931    /// Perform the patch.
2932    fn patch(self, buffer: &mut [u8], use_offset: CodeOffset, label_offset: CodeOffset) {
2933        let pc_rel = (label_offset as i64) - (use_offset as i64);
2934        debug_assert!(pc_rel <= self.max_pos_range() as i64);
2935        debug_assert!(pc_rel >= -(self.max_neg_range() as i64));
2936        let pc_rel = pc_rel as u32;
2937        let insn_word = u32::from_le_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
2938        let mask = match self {
2939            LabelUse::Branch14 => 0x0007ffe0, // bits 18..5 inclusive
2940            LabelUse::Branch19 => 0x00ffffe0, // bits 23..5 inclusive
2941            LabelUse::Branch26 => 0x03ffffff, // bits 25..0 inclusive
2942            LabelUse::Ldr19 => 0x00ffffe0,    // bits 23..5 inclusive
2943            LabelUse::Adr21 => 0x60ffffe0,    // bits 30..29, 25..5 inclusive
2944            LabelUse::PCRel32 => 0xffffffff,
2945        };
2946        let pc_rel_shifted = match self {
2947            LabelUse::Adr21 | LabelUse::PCRel32 => pc_rel,
2948            _ => {
2949                debug_assert!(pc_rel & 3 == 0);
2950                pc_rel >> 2
2951            }
2952        };
2953        let pc_rel_inserted = match self {
2954            LabelUse::Branch14 => (pc_rel_shifted & 0x3fff) << 5,
2955            LabelUse::Branch19 | LabelUse::Ldr19 => (pc_rel_shifted & 0x7ffff) << 5,
2956            LabelUse::Branch26 => pc_rel_shifted & 0x3ffffff,
2957            LabelUse::Adr21 => (pc_rel_shifted & 0x7ffff) << 5 | (pc_rel_shifted & 0x180000) << 10,
2958            LabelUse::PCRel32 => pc_rel_shifted,
2959        };
2960        let is_add = match self {
2961            LabelUse::PCRel32 => true,
2962            _ => false,
2963        };
2964        let insn_word = if is_add {
2965            insn_word.wrapping_add(pc_rel_inserted)
2966        } else {
2967            (insn_word & !mask) | pc_rel_inserted
2968        };
2969        buffer[0..4].clone_from_slice(&u32::to_le_bytes(insn_word));
2970    }
2971
2972    /// Is a veneer supported for this label reference type?
2973    fn supports_veneer(self) -> bool {
2974        match self {
2975            LabelUse::Branch14 | LabelUse::Branch19 => true, // veneer is a Branch26
2976            LabelUse::Branch26 => true,                      // veneer is a PCRel32
2977            _ => false,
2978        }
2979    }
2980
2981    /// How large is the veneer, if supported?
2982    fn veneer_size(self) -> CodeOffset {
2983        match self {
2984            LabelUse::Branch14 | LabelUse::Branch19 => 4,
2985            LabelUse::Branch26 => 20,
2986            _ => unreachable!(),
2987        }
2988    }
2989
2990    fn worst_case_veneer_size() -> CodeOffset {
2991        20
2992    }
2993
2994    /// Generate a veneer into the buffer, given that this veneer is at `veneer_offset`, and return
2995    /// an offset and label-use for the veneer's use of the original label.
2996    fn generate_veneer(
2997        self,
2998        buffer: &mut [u8],
2999        veneer_offset: CodeOffset,
3000    ) -> (CodeOffset, LabelUse) {
3001        match self {
3002            LabelUse::Branch14 | LabelUse::Branch19 => {
3003                // veneer is a Branch26 (unconditional branch). Just encode directly here -- don't
3004                // bother with constructing an Inst.
3005                let insn_word = 0b000101 << 26;
3006                buffer[0..4].clone_from_slice(&u32::to_le_bytes(insn_word));
3007                (veneer_offset, LabelUse::Branch26)
3008            }
3009
3010            // This is promoting a 26-bit call/jump to a 32-bit call/jump to
3011            // get a further range. This jump translates to a jump to a
3012            // relative location based on the address of the constant loaded
3013            // from here.
3014            //
3015            // If this path is taken from a call instruction then caller-saved
3016            // registers are available (minus arguments), so x16/x17 are
3017            // available. Otherwise for intra-function jumps we also reserve
3018            // x16/x17 as spill-style registers. In both cases these are
3019            // available for us to use.
3020            LabelUse::Branch26 => {
3021                let tmp1 = regs::spilltmp_reg();
3022                let tmp1_w = regs::writable_spilltmp_reg();
3023                let tmp2 = regs::tmp2_reg();
3024                let tmp2_w = regs::writable_tmp2_reg();
3025                // ldrsw x16, 16
3026                let ldr = emit::enc_ldst_imm19(0b1001_1000, 16 / 4, tmp1);
3027                // adr x17, 12
3028                let adr = emit::enc_adr(12, tmp2_w);
3029                // add x16, x16, x17
3030                let add = emit::enc_arith_rrr(0b10001011_000, 0, tmp1_w, tmp1, tmp2);
3031                // br x16
3032                let br = emit::enc_br(tmp1);
3033                buffer[0..4].clone_from_slice(&u32::to_le_bytes(ldr));
3034                buffer[4..8].clone_from_slice(&u32::to_le_bytes(adr));
3035                buffer[8..12].clone_from_slice(&u32::to_le_bytes(add));
3036                buffer[12..16].clone_from_slice(&u32::to_le_bytes(br));
3037                // the 4-byte signed immediate we'll load is after these
3038                // instructions, 16-bytes in.
3039                (veneer_offset + 16, LabelUse::PCRel32)
3040            }
3041
3042            _ => panic!("Unsupported label-reference type for veneer generation!"),
3043        }
3044    }
3045
3046    fn from_reloc(reloc: Reloc, addend: Addend) -> Option<LabelUse> {
3047        match (reloc, addend) {
3048            (Reloc::Arm64Call, 0) => Some(LabelUse::Branch26),
3049            _ => None,
3050        }
3051    }
3052}
3053
3054#[cfg(test)]
3055mod tests {
3056    use super::*;
3057
3058    #[test]
3059    fn inst_size_test() {
3060        // This test will help with unintentionally growing the size
3061        // of the Inst enum.
3062        let expected = if cfg!(target_pointer_width = "32") && !cfg!(target_arch = "arm") {
3063            28
3064        } else {
3065            32
3066        };
3067        assert_eq!(expected, std::mem::size_of::<Inst>());
3068    }
3069}