cranelift_codegen/isa/aarch64/inst/
emit.rs

1//! AArch64 ISA: binary code emission.
2
3use cranelift_control::ControlPlane;
4
5use crate::ir::{self, types::*};
6use crate::isa::aarch64::inst::*;
7use crate::trace;
8
9/// Memory addressing mode finalization: convert "special" modes (e.g.,
10/// generic arbitrary stack offset) into real addressing modes, possibly by
11/// emitting some helper instructions that come immediately before the use
12/// of this amode.
13pub fn mem_finalize(
14    sink: Option<&mut MachBuffer<Inst>>,
15    mem: &AMode,
16    access_ty: Type,
17    state: &EmitState,
18) -> (SmallVec<[Inst; 4]>, AMode) {
19    match mem {
20        &AMode::RegOffset { off, .. }
21        | &AMode::SPOffset { off }
22        | &AMode::FPOffset { off }
23        | &AMode::IncomingArg { off }
24        | &AMode::SlotOffset { off } => {
25            let basereg = match mem {
26                &AMode::RegOffset { rn, .. } => rn,
27                &AMode::SPOffset { .. }
28                | &AMode::SlotOffset { .. }
29                | &AMode::IncomingArg { .. } => stack_reg(),
30                &AMode::FPOffset { .. } => fp_reg(),
31                _ => unreachable!(),
32            };
33            let off = match mem {
34                &AMode::IncomingArg { .. } => {
35                    let frame_layout = state.frame_layout();
36                    i64::from(
37                        frame_layout.setup_area_size
38                            + frame_layout.tail_args_size
39                            + frame_layout.clobber_size
40                            + frame_layout.fixed_frame_storage_size
41                            + frame_layout.outgoing_args_size,
42                    ) - off
43                }
44                &AMode::SlotOffset { .. } => {
45                    let adj = i64::from(state.frame_layout().outgoing_args_size);
46                    trace!(
47                        "mem_finalize: slot offset {} + adj {} -> {}",
48                        off,
49                        adj,
50                        off + adj
51                    );
52                    off + adj
53                }
54                _ => off,
55            };
56
57            if let Some(simm9) = SImm9::maybe_from_i64(off) {
58                let mem = AMode::Unscaled { rn: basereg, simm9 };
59                (smallvec![], mem)
60            } else if let Some(uimm12) = UImm12Scaled::maybe_from_i64(off, access_ty) {
61                let mem = AMode::UnsignedOffset {
62                    rn: basereg,
63                    uimm12,
64                };
65                (smallvec![], mem)
66            } else {
67                let tmp = writable_spilltmp_reg();
68                (
69                    Inst::load_constant(tmp, off as u64, &mut |_| tmp),
70                    AMode::RegExtended {
71                        rn: basereg,
72                        rm: tmp.to_reg(),
73                        extendop: ExtendOp::SXTX,
74                    },
75                )
76            }
77        }
78
79        AMode::Const { addr } => {
80            let sink = match sink {
81                Some(sink) => sink,
82                None => return (smallvec![], mem.clone()),
83            };
84            let label = sink.get_label_for_constant(*addr);
85            let label = MemLabel::Mach(label);
86            (smallvec![], AMode::Label { label })
87        }
88
89        _ => (smallvec![], mem.clone()),
90    }
91}
92
93//=============================================================================
94// Instructions and subcomponents: emission
95
96pub(crate) fn machreg_to_gpr(m: Reg) -> u32 {
97    assert_eq!(m.class(), RegClass::Int);
98    u32::from(m.to_real_reg().unwrap().hw_enc() & 31)
99}
100
101pub(crate) fn machreg_to_vec(m: Reg) -> u32 {
102    assert_eq!(m.class(), RegClass::Float);
103    u32::from(m.to_real_reg().unwrap().hw_enc())
104}
105
106fn machreg_to_gpr_or_vec(m: Reg) -> u32 {
107    u32::from(m.to_real_reg().unwrap().hw_enc() & 31)
108}
109
110pub(crate) fn enc_arith_rrr(
111    bits_31_21: u32,
112    bits_15_10: u32,
113    rd: Writable<Reg>,
114    rn: Reg,
115    rm: Reg,
116) -> u32 {
117    (bits_31_21 << 21)
118        | (bits_15_10 << 10)
119        | machreg_to_gpr(rd.to_reg())
120        | (machreg_to_gpr(rn) << 5)
121        | (machreg_to_gpr(rm) << 16)
122}
123
124fn enc_arith_rr_imm12(
125    bits_31_24: u32,
126    immshift: u32,
127    imm12: u32,
128    rn: Reg,
129    rd: Writable<Reg>,
130) -> u32 {
131    (bits_31_24 << 24)
132        | (immshift << 22)
133        | (imm12 << 10)
134        | (machreg_to_gpr(rn) << 5)
135        | machreg_to_gpr(rd.to_reg())
136}
137
138fn enc_arith_rr_imml(bits_31_23: u32, imm_bits: u32, rn: Reg, rd: Writable<Reg>) -> u32 {
139    (bits_31_23 << 23) | (imm_bits << 10) | (machreg_to_gpr(rn) << 5) | machreg_to_gpr(rd.to_reg())
140}
141
142fn enc_arith_rrrr(top11: u32, rm: Reg, bit15: u32, ra: Reg, rn: Reg, rd: Writable<Reg>) -> u32 {
143    (top11 << 21)
144        | (machreg_to_gpr(rm) << 16)
145        | (bit15 << 15)
146        | (machreg_to_gpr(ra) << 10)
147        | (machreg_to_gpr(rn) << 5)
148        | machreg_to_gpr(rd.to_reg())
149}
150
151fn enc_jump26(op_31_26: u32, off_26_0: u32) -> u32 {
152    assert!(off_26_0 < (1 << 26));
153    (op_31_26 << 26) | off_26_0
154}
155
156fn enc_cmpbr(op_31_24: u32, off_18_0: u32, reg: Reg) -> u32 {
157    assert!(off_18_0 < (1 << 19));
158    (op_31_24 << 24) | (off_18_0 << 5) | machreg_to_gpr(reg)
159}
160
161fn enc_cbr(op_31_24: u32, off_18_0: u32, op_4: u32, cond: u32) -> u32 {
162    assert!(off_18_0 < (1 << 19));
163    assert!(cond < (1 << 4));
164    (op_31_24 << 24) | (off_18_0 << 5) | (op_4 << 4) | cond
165}
166
167/// Set the size bit of an instruction.
168fn enc_op_size(op: u32, size: OperandSize) -> u32 {
169    (op & !(1 << 31)) | (size.sf_bit() << 31)
170}
171
172fn enc_conditional_br(taken: BranchTarget, kind: CondBrKind) -> u32 {
173    match kind {
174        CondBrKind::Zero(reg, size) => enc_op_size(
175            enc_cmpbr(0b0_011010_0, taken.as_offset19_or_zero(), reg),
176            size,
177        ),
178        CondBrKind::NotZero(reg, size) => enc_op_size(
179            enc_cmpbr(0b0_011010_1, taken.as_offset19_or_zero(), reg),
180            size,
181        ),
182        CondBrKind::Cond(c) => enc_cbr(0b01010100, taken.as_offset19_or_zero(), 0b0, c.bits()),
183    }
184}
185
186fn enc_test_bit_and_branch(
187    kind: TestBitAndBranchKind,
188    taken: BranchTarget,
189    reg: Reg,
190    bit: u8,
191) -> u32 {
192    assert!(bit < 64);
193    let op_31 = u32::from(bit >> 5);
194    let op_23_19 = u32::from(bit & 0b11111);
195    let op_30_24 = 0b0110110
196        | match kind {
197            TestBitAndBranchKind::Z => 0,
198            TestBitAndBranchKind::NZ => 1,
199        };
200    (op_31 << 31)
201        | (op_30_24 << 24)
202        | (op_23_19 << 19)
203        | (taken.as_offset14_or_zero() << 5)
204        | machreg_to_gpr(reg)
205}
206
207fn enc_move_wide(op: MoveWideOp, rd: Writable<Reg>, imm: MoveWideConst, size: OperandSize) -> u32 {
208    assert!(imm.shift <= 0b11);
209    let op = match op {
210        MoveWideOp::MovN => 0b00,
211        MoveWideOp::MovZ => 0b10,
212    };
213    0x12800000
214        | size.sf_bit() << 31
215        | op << 29
216        | u32::from(imm.shift) << 21
217        | u32::from(imm.bits) << 5
218        | machreg_to_gpr(rd.to_reg())
219}
220
221fn enc_movk(rd: Writable<Reg>, imm: MoveWideConst, size: OperandSize) -> u32 {
222    assert!(imm.shift <= 0b11);
223    0x72800000
224        | size.sf_bit() << 31
225        | u32::from(imm.shift) << 21
226        | u32::from(imm.bits) << 5
227        | machreg_to_gpr(rd.to_reg())
228}
229
230fn enc_ldst_pair(op_31_22: u32, simm7: SImm7Scaled, rn: Reg, rt: Reg, rt2: Reg) -> u32 {
231    (op_31_22 << 22)
232        | (simm7.bits() << 15)
233        | (machreg_to_gpr(rt2) << 10)
234        | (machreg_to_gpr(rn) << 5)
235        | machreg_to_gpr(rt)
236}
237
238fn enc_ldst_simm9(op_31_22: u32, simm9: SImm9, op_11_10: u32, rn: Reg, rd: Reg) -> u32 {
239    (op_31_22 << 22)
240        | (simm9.bits() << 12)
241        | (op_11_10 << 10)
242        | (machreg_to_gpr(rn) << 5)
243        | machreg_to_gpr_or_vec(rd)
244}
245
246fn enc_ldst_uimm12(op_31_22: u32, uimm12: UImm12Scaled, rn: Reg, rd: Reg) -> u32 {
247    (op_31_22 << 22)
248        | (0b1 << 24)
249        | (uimm12.bits() << 10)
250        | (machreg_to_gpr(rn) << 5)
251        | machreg_to_gpr_or_vec(rd)
252}
253
254fn enc_ldst_reg(
255    op_31_22: u32,
256    rn: Reg,
257    rm: Reg,
258    s_bit: bool,
259    extendop: Option<ExtendOp>,
260    rd: Reg,
261) -> u32 {
262    let s_bit = if s_bit { 1 } else { 0 };
263    let extend_bits = match extendop {
264        Some(ExtendOp::UXTW) => 0b010,
265        Some(ExtendOp::SXTW) => 0b110,
266        Some(ExtendOp::SXTX) => 0b111,
267        None => 0b011, // LSL
268        _ => panic!("bad extend mode for ld/st AMode"),
269    };
270    (op_31_22 << 22)
271        | (1 << 21)
272        | (machreg_to_gpr(rm) << 16)
273        | (extend_bits << 13)
274        | (s_bit << 12)
275        | (0b10 << 10)
276        | (machreg_to_gpr(rn) << 5)
277        | machreg_to_gpr_or_vec(rd)
278}
279
280pub(crate) fn enc_ldst_imm19(op_31_24: u32, imm19: u32, rd: Reg) -> u32 {
281    (op_31_24 << 24) | (imm19 << 5) | machreg_to_gpr_or_vec(rd)
282}
283
284fn enc_ldst_vec(q: u32, size: u32, rn: Reg, rt: Writable<Reg>) -> u32 {
285    debug_assert_eq!(q & 0b1, q);
286    debug_assert_eq!(size & 0b11, size);
287    0b0_0_0011010_10_00000_110_0_00_00000_00000
288        | q << 30
289        | size << 10
290        | machreg_to_gpr(rn) << 5
291        | machreg_to_vec(rt.to_reg())
292}
293
294fn enc_ldst_vec_pair(
295    opc: u32,
296    amode: u32,
297    is_load: bool,
298    simm7: SImm7Scaled,
299    rn: Reg,
300    rt: Reg,
301    rt2: Reg,
302) -> u32 {
303    debug_assert_eq!(opc & 0b11, opc);
304    debug_assert_eq!(amode & 0b11, amode);
305
306    0b00_10110_00_0_0000000_00000_00000_00000
307        | opc << 30
308        | amode << 23
309        | (is_load as u32) << 22
310        | simm7.bits() << 15
311        | machreg_to_vec(rt2) << 10
312        | machreg_to_gpr(rn) << 5
313        | machreg_to_vec(rt)
314}
315
316fn enc_vec_rrr(top11: u32, rm: Reg, bit15_10: u32, rn: Reg, rd: Writable<Reg>) -> u32 {
317    (top11 << 21)
318        | (machreg_to_vec(rm) << 16)
319        | (bit15_10 << 10)
320        | (machreg_to_vec(rn) << 5)
321        | machreg_to_vec(rd.to_reg())
322}
323
324fn enc_vec_rrr_long(
325    q: u32,
326    u: u32,
327    size: u32,
328    bit14: u32,
329    rm: Reg,
330    rn: Reg,
331    rd: Writable<Reg>,
332) -> u32 {
333    debug_assert_eq!(q & 0b1, q);
334    debug_assert_eq!(u & 0b1, u);
335    debug_assert_eq!(size & 0b11, size);
336    debug_assert_eq!(bit14 & 0b1, bit14);
337
338    0b0_0_0_01110_00_1_00000_100000_00000_00000
339        | q << 30
340        | u << 29
341        | size << 22
342        | bit14 << 14
343        | (machreg_to_vec(rm) << 16)
344        | (machreg_to_vec(rn) << 5)
345        | machreg_to_vec(rd.to_reg())
346}
347
348fn enc_bit_rr(size: u32, opcode2: u32, opcode1: u32, rn: Reg, rd: Writable<Reg>) -> u32 {
349    (0b01011010110 << 21)
350        | size << 31
351        | opcode2 << 16
352        | opcode1 << 10
353        | machreg_to_gpr(rn) << 5
354        | machreg_to_gpr(rd.to_reg())
355}
356
357pub(crate) fn enc_br(rn: Reg) -> u32 {
358    0b1101011_0000_11111_000000_00000_00000 | (machreg_to_gpr(rn) << 5)
359}
360
361pub(crate) fn enc_adr_inst(opcode: u32, off: i32, rd: Writable<Reg>) -> u32 {
362    let off = u32::try_from(off).unwrap();
363    let immlo = off & 3;
364    let immhi = (off >> 2) & ((1 << 19) - 1);
365    opcode | (immlo << 29) | (immhi << 5) | machreg_to_gpr(rd.to_reg())
366}
367
368pub(crate) fn enc_adr(off: i32, rd: Writable<Reg>) -> u32 {
369    let opcode = 0b00010000 << 24;
370    enc_adr_inst(opcode, off, rd)
371}
372
373pub(crate) fn enc_adrp(off: i32, rd: Writable<Reg>) -> u32 {
374    let opcode = 0b10010000 << 24;
375    enc_adr_inst(opcode, off, rd)
376}
377
378fn enc_csel(rd: Writable<Reg>, rn: Reg, rm: Reg, cond: Cond, op: u32, o2: u32) -> u32 {
379    debug_assert_eq!(op & 0b1, op);
380    debug_assert_eq!(o2 & 0b1, o2);
381    0b100_11010100_00000_0000_00_00000_00000
382        | (op << 30)
383        | (machreg_to_gpr(rm) << 16)
384        | (cond.bits() << 12)
385        | (o2 << 10)
386        | (machreg_to_gpr(rn) << 5)
387        | machreg_to_gpr(rd.to_reg())
388}
389
390fn enc_fcsel(rd: Writable<Reg>, rn: Reg, rm: Reg, cond: Cond, size: ScalarSize) -> u32 {
391    0b000_11110_00_1_00000_0000_11_00000_00000
392        | (size.ftype() << 22)
393        | (machreg_to_vec(rm) << 16)
394        | (machreg_to_vec(rn) << 5)
395        | machreg_to_vec(rd.to_reg())
396        | (cond.bits() << 12)
397}
398
399fn enc_ccmp(size: OperandSize, rn: Reg, rm: Reg, nzcv: NZCV, cond: Cond) -> u32 {
400    0b0_1_1_11010010_00000_0000_00_00000_0_0000
401        | size.sf_bit() << 31
402        | machreg_to_gpr(rm) << 16
403        | cond.bits() << 12
404        | machreg_to_gpr(rn) << 5
405        | nzcv.bits()
406}
407
408fn enc_ccmp_imm(size: OperandSize, rn: Reg, imm: UImm5, nzcv: NZCV, cond: Cond) -> u32 {
409    0b0_1_1_11010010_00000_0000_10_00000_0_0000
410        | size.sf_bit() << 31
411        | imm.bits() << 16
412        | cond.bits() << 12
413        | machreg_to_gpr(rn) << 5
414        | nzcv.bits()
415}
416
417fn enc_bfm(opc: u8, size: OperandSize, rd: Writable<Reg>, rn: Reg, immr: u8, imms: u8) -> u32 {
418    match size {
419        OperandSize::Size64 => {
420            debug_assert!(immr <= 63);
421            debug_assert!(imms <= 63);
422        }
423        OperandSize::Size32 => {
424            debug_assert!(immr <= 31);
425            debug_assert!(imms <= 31);
426        }
427    }
428    debug_assert_eq!(opc & 0b11, opc);
429    let n_bit = size.sf_bit();
430    0b0_00_100110_0_000000_000000_00000_00000
431        | size.sf_bit() << 31
432        | u32::from(opc) << 29
433        | n_bit << 22
434        | u32::from(immr) << 16
435        | u32::from(imms) << 10
436        | machreg_to_gpr(rn) << 5
437        | machreg_to_gpr(rd.to_reg())
438}
439
440fn enc_vecmov(is_16b: bool, rd: Writable<Reg>, rn: Reg) -> u32 {
441    0b00001110_101_00000_00011_1_00000_00000
442        | ((is_16b as u32) << 30)
443        | machreg_to_vec(rd.to_reg())
444        | (machreg_to_vec(rn) << 16)
445        | (machreg_to_vec(rn) << 5)
446}
447
448fn enc_fpurr(top22: u32, rd: Writable<Reg>, rn: Reg) -> u32 {
449    (top22 << 10) | (machreg_to_vec(rn) << 5) | machreg_to_vec(rd.to_reg())
450}
451
452fn enc_fpurrr(top22: u32, rd: Writable<Reg>, rn: Reg, rm: Reg) -> u32 {
453    (top22 << 10)
454        | (machreg_to_vec(rm) << 16)
455        | (machreg_to_vec(rn) << 5)
456        | machreg_to_vec(rd.to_reg())
457}
458
459fn enc_fpurrrr(top17: u32, rd: Writable<Reg>, rn: Reg, rm: Reg, ra: Reg) -> u32 {
460    (top17 << 15)
461        | (machreg_to_vec(rm) << 16)
462        | (machreg_to_vec(ra) << 10)
463        | (machreg_to_vec(rn) << 5)
464        | machreg_to_vec(rd.to_reg())
465}
466
467fn enc_fcmp(size: ScalarSize, rn: Reg, rm: Reg) -> u32 {
468    0b000_11110_00_1_00000_00_1000_00000_00000
469        | (size.ftype() << 22)
470        | (machreg_to_vec(rm) << 16)
471        | (machreg_to_vec(rn) << 5)
472}
473
474fn enc_fputoint(top16: u32, rd: Writable<Reg>, rn: Reg) -> u32 {
475    (top16 << 16) | (machreg_to_vec(rn) << 5) | machreg_to_gpr(rd.to_reg())
476}
477
478fn enc_inttofpu(top16: u32, rd: Writable<Reg>, rn: Reg) -> u32 {
479    (top16 << 16) | (machreg_to_gpr(rn) << 5) | machreg_to_vec(rd.to_reg())
480}
481
482fn enc_fround(top22: u32, rd: Writable<Reg>, rn: Reg) -> u32 {
483    (top22 << 10) | (machreg_to_vec(rn) << 5) | machreg_to_vec(rd.to_reg())
484}
485
486fn enc_vec_rr_misc(qu: u32, size: u32, bits_12_16: u32, rd: Writable<Reg>, rn: Reg) -> u32 {
487    debug_assert_eq!(qu & 0b11, qu);
488    debug_assert_eq!(size & 0b11, size);
489    debug_assert_eq!(bits_12_16 & 0b11111, bits_12_16);
490    let bits = 0b0_00_01110_00_10000_00000_10_00000_00000;
491    bits | qu << 29
492        | size << 22
493        | bits_12_16 << 12
494        | machreg_to_vec(rn) << 5
495        | machreg_to_vec(rd.to_reg())
496}
497
498fn enc_vec_rr_pair(bits_12_16: u32, rd: Writable<Reg>, rn: Reg) -> u32 {
499    debug_assert_eq!(bits_12_16 & 0b11111, bits_12_16);
500
501    0b010_11110_11_11000_11011_10_00000_00000
502        | bits_12_16 << 12
503        | machreg_to_vec(rn) << 5
504        | machreg_to_vec(rd.to_reg())
505}
506
507fn enc_vec_rr_pair_long(u: u32, enc_size: u32, rd: Writable<Reg>, rn: Reg) -> u32 {
508    debug_assert_eq!(u & 0b1, u);
509    debug_assert_eq!(enc_size & 0b1, enc_size);
510
511    0b0_1_0_01110_00_10000_00_0_10_10_00000_00000
512        | u << 29
513        | enc_size << 22
514        | machreg_to_vec(rn) << 5
515        | machreg_to_vec(rd.to_reg())
516}
517
518fn enc_vec_lanes(q: u32, u: u32, size: u32, opcode: u32, rd: Writable<Reg>, rn: Reg) -> u32 {
519    debug_assert_eq!(q & 0b1, q);
520    debug_assert_eq!(u & 0b1, u);
521    debug_assert_eq!(size & 0b11, size);
522    debug_assert_eq!(opcode & 0b11111, opcode);
523    0b0_0_0_01110_00_11000_0_0000_10_00000_00000
524        | q << 30
525        | u << 29
526        | size << 22
527        | opcode << 12
528        | machreg_to_vec(rn) << 5
529        | machreg_to_vec(rd.to_reg())
530}
531
532fn enc_tbl(is_extension: bool, len: u32, rd: Writable<Reg>, rn: Reg, rm: Reg) -> u32 {
533    debug_assert_eq!(len & 0b11, len);
534    0b0_1_001110_000_00000_0_00_0_00_00000_00000
535        | (machreg_to_vec(rm) << 16)
536        | len << 13
537        | (is_extension as u32) << 12
538        | (machreg_to_vec(rn) << 5)
539        | machreg_to_vec(rd.to_reg())
540}
541
542fn enc_dmb_ish() -> u32 {
543    0xD5033BBF
544}
545
546fn enc_acq_rel(ty: Type, op: AtomicRMWOp, rs: Reg, rt: Writable<Reg>, rn: Reg) -> u32 {
547    assert!(machreg_to_gpr(rt.to_reg()) != 31);
548    let sz = match ty {
549        I64 => 0b11,
550        I32 => 0b10,
551        I16 => 0b01,
552        I8 => 0b00,
553        _ => unreachable!(),
554    };
555    let bit15 = match op {
556        AtomicRMWOp::Swp => 0b1,
557        _ => 0b0,
558    };
559    let op = match op {
560        AtomicRMWOp::Add => 0b000,
561        AtomicRMWOp::Clr => 0b001,
562        AtomicRMWOp::Eor => 0b010,
563        AtomicRMWOp::Set => 0b011,
564        AtomicRMWOp::Smax => 0b100,
565        AtomicRMWOp::Smin => 0b101,
566        AtomicRMWOp::Umax => 0b110,
567        AtomicRMWOp::Umin => 0b111,
568        AtomicRMWOp::Swp => 0b000,
569    };
570    0b00_111_000_111_00000_0_000_00_00000_00000
571        | (sz << 30)
572        | (machreg_to_gpr(rs) << 16)
573        | bit15 << 15
574        | (op << 12)
575        | (machreg_to_gpr(rn) << 5)
576        | machreg_to_gpr(rt.to_reg())
577}
578
579fn enc_ldar(ty: Type, rt: Writable<Reg>, rn: Reg) -> u32 {
580    let sz = match ty {
581        I64 => 0b11,
582        I32 => 0b10,
583        I16 => 0b01,
584        I8 => 0b00,
585        _ => unreachable!(),
586    };
587    0b00_001000_1_1_0_11111_1_11111_00000_00000
588        | (sz << 30)
589        | (machreg_to_gpr(rn) << 5)
590        | machreg_to_gpr(rt.to_reg())
591}
592
593fn enc_stlr(ty: Type, rt: Reg, rn: Reg) -> u32 {
594    let sz = match ty {
595        I64 => 0b11,
596        I32 => 0b10,
597        I16 => 0b01,
598        I8 => 0b00,
599        _ => unreachable!(),
600    };
601    0b00_001000_100_11111_1_11111_00000_00000
602        | (sz << 30)
603        | (machreg_to_gpr(rn) << 5)
604        | machreg_to_gpr(rt)
605}
606
607fn enc_ldaxr(ty: Type, rt: Writable<Reg>, rn: Reg) -> u32 {
608    let sz = match ty {
609        I64 => 0b11,
610        I32 => 0b10,
611        I16 => 0b01,
612        I8 => 0b00,
613        _ => unreachable!(),
614    };
615    0b00_001000_0_1_0_11111_1_11111_00000_00000
616        | (sz << 30)
617        | (machreg_to_gpr(rn) << 5)
618        | machreg_to_gpr(rt.to_reg())
619}
620
621fn enc_stlxr(ty: Type, rs: Writable<Reg>, rt: Reg, rn: Reg) -> u32 {
622    let sz = match ty {
623        I64 => 0b11,
624        I32 => 0b10,
625        I16 => 0b01,
626        I8 => 0b00,
627        _ => unreachable!(),
628    };
629    0b00_001000_000_00000_1_11111_00000_00000
630        | (sz << 30)
631        | (machreg_to_gpr(rs.to_reg()) << 16)
632        | (machreg_to_gpr(rn) << 5)
633        | machreg_to_gpr(rt)
634}
635
636fn enc_cas(size: u32, rs: Writable<Reg>, rt: Reg, rn: Reg) -> u32 {
637    debug_assert_eq!(size & 0b11, size);
638
639    0b00_0010001_1_1_00000_1_11111_00000_00000
640        | size << 30
641        | machreg_to_gpr(rs.to_reg()) << 16
642        | machreg_to_gpr(rn) << 5
643        | machreg_to_gpr(rt)
644}
645
646fn enc_asimd_mod_imm(rd: Writable<Reg>, q_op: u32, cmode: u32, imm: u8) -> u32 {
647    let abc = (imm >> 5) as u32;
648    let defgh = (imm & 0b11111) as u32;
649
650    debug_assert_eq!(cmode & 0b1111, cmode);
651    debug_assert_eq!(q_op & 0b11, q_op);
652
653    0b0_0_0_0111100000_000_0000_01_00000_00000
654        | (q_op << 29)
655        | (abc << 16)
656        | (cmode << 12)
657        | (defgh << 5)
658        | machreg_to_vec(rd.to_reg())
659}
660
661/// State carried between emissions of a sequence of instructions.
662#[derive(Default, Clone, Debug)]
663pub struct EmitState {
664    /// The user stack map for the upcoming instruction, as provided to
665    /// `pre_safepoint()`.
666    user_stack_map: Option<ir::UserStackMap>,
667
668    /// Only used during fuzz-testing. Otherwise, it is a zero-sized struct and
669    /// optimized away at compiletime. See [cranelift_control].
670    ctrl_plane: ControlPlane,
671
672    frame_layout: FrameLayout,
673}
674
675impl MachInstEmitState<Inst> for EmitState {
676    fn new(abi: &Callee<AArch64MachineDeps>, ctrl_plane: ControlPlane) -> Self {
677        EmitState {
678            user_stack_map: None,
679            ctrl_plane,
680            frame_layout: abi.frame_layout().clone(),
681        }
682    }
683
684    fn pre_safepoint(&mut self, user_stack_map: Option<ir::UserStackMap>) {
685        self.user_stack_map = user_stack_map;
686    }
687
688    fn ctrl_plane_mut(&mut self) -> &mut ControlPlane {
689        &mut self.ctrl_plane
690    }
691
692    fn take_ctrl_plane(self) -> ControlPlane {
693        self.ctrl_plane
694    }
695
696    fn frame_layout(&self) -> &FrameLayout {
697        &self.frame_layout
698    }
699}
700
701impl EmitState {
702    fn take_stack_map(&mut self) -> Option<ir::UserStackMap> {
703        self.user_stack_map.take()
704    }
705
706    fn clear_post_insn(&mut self) {
707        self.user_stack_map = None;
708    }
709}
710
711/// Constant state used during function compilation.
712pub struct EmitInfo(settings::Flags);
713
714impl EmitInfo {
715    /// Create a constant state for emission of instructions.
716    pub fn new(flags: settings::Flags) -> Self {
717        Self(flags)
718    }
719}
720
721impl MachInstEmit for Inst {
722    type State = EmitState;
723    type Info = EmitInfo;
724
725    fn emit(&self, sink: &mut MachBuffer<Inst>, emit_info: &Self::Info, state: &mut EmitState) {
726        // N.B.: we *must* not exceed the "worst-case size" used to compute
727        // where to insert islands, except when islands are explicitly triggered
728        // (with an `EmitIsland`). We check this in debug builds. This is `mut`
729        // to allow disabling the check for `JTSequence`, which is always
730        // emitted following an `EmitIsland`.
731        let mut start_off = sink.cur_offset();
732
733        match self {
734            &Inst::AluRRR {
735                alu_op,
736                size,
737                rd,
738                rn,
739                rm,
740            } => {
741                debug_assert!(match alu_op {
742                    ALUOp::SMulH | ALUOp::UMulH => size == OperandSize::Size64,
743                    _ => true,
744                });
745                let top11 = match alu_op {
746                    ALUOp::Add => 0b00001011_000,
747                    ALUOp::Adc => 0b00011010_000,
748                    ALUOp::AdcS => 0b00111010_000,
749                    ALUOp::Sub => 0b01001011_000,
750                    ALUOp::Sbc => 0b01011010_000,
751                    ALUOp::SbcS => 0b01111010_000,
752                    ALUOp::Orr => 0b00101010_000,
753                    ALUOp::And => 0b00001010_000,
754                    ALUOp::AndS => 0b01101010_000,
755                    ALUOp::Eor => 0b01001010_000,
756                    ALUOp::OrrNot => 0b00101010_001,
757                    ALUOp::AndNot => 0b00001010_001,
758                    ALUOp::EorNot => 0b01001010_001,
759                    ALUOp::AddS => 0b00101011_000,
760                    ALUOp::SubS => 0b01101011_000,
761                    ALUOp::SDiv | ALUOp::UDiv => 0b00011010_110,
762                    ALUOp::Extr | ALUOp::Lsr | ALUOp::Asr | ALUOp::Lsl => 0b00011010_110,
763                    ALUOp::SMulH => 0b10011011_010,
764                    ALUOp::UMulH => 0b10011011_110,
765                };
766
767                let top11 = top11 | size.sf_bit() << 10;
768                let bit15_10 = match alu_op {
769                    ALUOp::SDiv => 0b000011,
770                    ALUOp::UDiv => 0b000010,
771                    ALUOp::Extr => 0b001011,
772                    ALUOp::Lsr => 0b001001,
773                    ALUOp::Asr => 0b001010,
774                    ALUOp::Lsl => 0b001000,
775                    ALUOp::SMulH | ALUOp::UMulH => 0b011111,
776                    _ => 0b000000,
777                };
778                debug_assert_ne!(writable_stack_reg(), rd);
779                // The stack pointer is the zero register in this context, so this might be an
780                // indication that something is wrong.
781                debug_assert_ne!(stack_reg(), rn);
782                debug_assert_ne!(stack_reg(), rm);
783                sink.put4(enc_arith_rrr(top11, bit15_10, rd, rn, rm));
784            }
785            &Inst::AluRRRR {
786                alu_op,
787                size,
788                rd,
789                rm,
790                rn,
791                ra,
792            } => {
793                let (top11, bit15) = match alu_op {
794                    ALUOp3::MAdd => (0b0_00_11011_000, 0),
795                    ALUOp3::MSub => (0b0_00_11011_000, 1),
796                    ALUOp3::UMAddL => {
797                        debug_assert!(size == OperandSize::Size32);
798                        (0b1_00_11011_1_01, 0)
799                    }
800                    ALUOp3::SMAddL => {
801                        debug_assert!(size == OperandSize::Size32);
802                        (0b1_00_11011_0_01, 0)
803                    }
804                };
805                let top11 = top11 | size.sf_bit() << 10;
806                sink.put4(enc_arith_rrrr(top11, rm, bit15, ra, rn, rd));
807            }
808            &Inst::AluRRImm12 {
809                alu_op,
810                size,
811                rd,
812                rn,
813                ref imm12,
814            } => {
815                let top8 = match alu_op {
816                    ALUOp::Add => 0b000_10001,
817                    ALUOp::Sub => 0b010_10001,
818                    ALUOp::AddS => 0b001_10001,
819                    ALUOp::SubS => 0b011_10001,
820                    _ => unimplemented!("{:?}", alu_op),
821                };
822                let top8 = top8 | size.sf_bit() << 7;
823                sink.put4(enc_arith_rr_imm12(
824                    top8,
825                    imm12.shift_bits(),
826                    imm12.imm_bits(),
827                    rn,
828                    rd,
829                ));
830            }
831            &Inst::AluRRImmLogic {
832                alu_op,
833                size,
834                rd,
835                rn,
836                ref imml,
837            } => {
838                let (top9, inv) = match alu_op {
839                    ALUOp::Orr => (0b001_100100, false),
840                    ALUOp::And => (0b000_100100, false),
841                    ALUOp::AndS => (0b011_100100, false),
842                    ALUOp::Eor => (0b010_100100, false),
843                    ALUOp::OrrNot => (0b001_100100, true),
844                    ALUOp::AndNot => (0b000_100100, true),
845                    ALUOp::EorNot => (0b010_100100, true),
846                    _ => unimplemented!("{:?}", alu_op),
847                };
848                let top9 = top9 | size.sf_bit() << 8;
849                let imml = if inv { imml.invert() } else { *imml };
850                sink.put4(enc_arith_rr_imml(top9, imml.enc_bits(), rn, rd));
851            }
852
853            &Inst::AluRRImmShift {
854                alu_op,
855                size,
856                rd,
857                rn,
858                ref immshift,
859            } => {
860                let amt = immshift.value();
861                let (top10, immr, imms) = match alu_op {
862                    ALUOp::Extr => (0b0001001110, machreg_to_gpr(rn), u32::from(amt)),
863                    ALUOp::Lsr => (0b0101001100, u32::from(amt), 0b011111),
864                    ALUOp::Asr => (0b0001001100, u32::from(amt), 0b011111),
865                    ALUOp::Lsl => {
866                        let bits = if size.is64() { 64 } else { 32 };
867                        (
868                            0b0101001100,
869                            u32::from((bits - amt) % bits),
870                            u32::from(bits - 1 - amt),
871                        )
872                    }
873                    _ => unimplemented!("{:?}", alu_op),
874                };
875                let top10 = top10 | size.sf_bit() << 9 | size.sf_bit();
876                let imms = match alu_op {
877                    ALUOp::Lsr | ALUOp::Asr => imms | size.sf_bit() << 5,
878                    _ => imms,
879                };
880                sink.put4(
881                    (top10 << 22)
882                        | (immr << 16)
883                        | (imms << 10)
884                        | (machreg_to_gpr(rn) << 5)
885                        | machreg_to_gpr(rd.to_reg()),
886                );
887            }
888
889            &Inst::AluRRRShift {
890                alu_op,
891                size,
892                rd,
893                rn,
894                rm,
895                ref shiftop,
896            } => {
897                let top11: u32 = match alu_op {
898                    ALUOp::Add => 0b000_01011000,
899                    ALUOp::AddS => 0b001_01011000,
900                    ALUOp::Sub => 0b010_01011000,
901                    ALUOp::SubS => 0b011_01011000,
902                    ALUOp::Orr => 0b001_01010000,
903                    ALUOp::And => 0b000_01010000,
904                    ALUOp::AndS => 0b011_01010000,
905                    ALUOp::Eor => 0b010_01010000,
906                    ALUOp::OrrNot => 0b001_01010001,
907                    ALUOp::EorNot => 0b010_01010001,
908                    ALUOp::AndNot => 0b000_01010001,
909                    ALUOp::Extr => 0b000_10011100,
910                    _ => unimplemented!("{:?}", alu_op),
911                };
912                let top11 = top11 | size.sf_bit() << 10;
913                let top11 = top11 | (u32::from(shiftop.op().bits()) << 1);
914                let bits_15_10 = u32::from(shiftop.amt().value());
915                sink.put4(enc_arith_rrr(top11, bits_15_10, rd, rn, rm));
916            }
917
918            &Inst::AluRRRExtend {
919                alu_op,
920                size,
921                rd,
922                rn,
923                rm,
924                extendop,
925            } => {
926                let top11: u32 = match alu_op {
927                    ALUOp::Add => 0b00001011001,
928                    ALUOp::Sub => 0b01001011001,
929                    ALUOp::AddS => 0b00101011001,
930                    ALUOp::SubS => 0b01101011001,
931                    _ => unimplemented!("{:?}", alu_op),
932                };
933                let top11 = top11 | size.sf_bit() << 10;
934                let bits_15_10 = u32::from(extendop.bits()) << 3;
935                sink.put4(enc_arith_rrr(top11, bits_15_10, rd, rn, rm));
936            }
937
938            &Inst::BitRR {
939                op, size, rd, rn, ..
940            } => {
941                let (op1, op2) = match op {
942                    BitOp::RBit => (0b00000, 0b000000),
943                    BitOp::Clz => (0b00000, 0b000100),
944                    BitOp::Cls => (0b00000, 0b000101),
945                    BitOp::Rev16 => (0b00000, 0b000001),
946                    BitOp::Rev32 => (0b00000, 0b000010),
947                    BitOp::Rev64 => (0b00000, 0b000011),
948                };
949                sink.put4(enc_bit_rr(size.sf_bit(), op1, op2, rn, rd))
950            }
951
952            &Inst::ULoad8 { rd, ref mem, flags }
953            | &Inst::SLoad8 { rd, ref mem, flags }
954            | &Inst::ULoad16 { rd, ref mem, flags }
955            | &Inst::SLoad16 { rd, ref mem, flags }
956            | &Inst::ULoad32 { rd, ref mem, flags }
957            | &Inst::SLoad32 { rd, ref mem, flags }
958            | &Inst::ULoad64 {
959                rd, ref mem, flags, ..
960            }
961            | &Inst::FpuLoad16 { rd, ref mem, flags }
962            | &Inst::FpuLoad32 { rd, ref mem, flags }
963            | &Inst::FpuLoad64 { rd, ref mem, flags }
964            | &Inst::FpuLoad128 { rd, ref mem, flags } => {
965                let mem = mem.clone();
966                let access_ty = self.mem_type().unwrap();
967                let (mem_insts, mem) = mem_finalize(Some(sink), &mem, access_ty, state);
968
969                for inst in mem_insts.into_iter() {
970                    inst.emit(sink, emit_info, state);
971                }
972
973                // ldst encoding helpers take Reg, not Writable<Reg>.
974                let rd = rd.to_reg();
975
976                // This is the base opcode (top 10 bits) for the "unscaled
977                // immediate" form (Unscaled). Other addressing modes will OR in
978                // other values for bits 24/25 (bits 1/2 of this constant).
979                let op = match self {
980                    Inst::ULoad8 { .. } => 0b0011100001,
981                    Inst::SLoad8 { .. } => 0b0011100010,
982                    Inst::ULoad16 { .. } => 0b0111100001,
983                    Inst::SLoad16 { .. } => 0b0111100010,
984                    Inst::ULoad32 { .. } => 0b1011100001,
985                    Inst::SLoad32 { .. } => 0b1011100010,
986                    Inst::ULoad64 { .. } => 0b1111100001,
987                    Inst::FpuLoad16 { .. } => 0b0111110001,
988                    Inst::FpuLoad32 { .. } => 0b1011110001,
989                    Inst::FpuLoad64 { .. } => 0b1111110001,
990                    Inst::FpuLoad128 { .. } => 0b0011110011,
991                    _ => unreachable!(),
992                };
993
994                if let Some(trap_code) = flags.trap_code() {
995                    // Register the offset at which the actual load instruction starts.
996                    sink.add_trap(trap_code);
997                }
998
999                match &mem {
1000                    &AMode::Unscaled { rn, simm9 } => {
1001                        let reg = rn;
1002                        sink.put4(enc_ldst_simm9(op, simm9, 0b00, reg, rd));
1003                    }
1004                    &AMode::UnsignedOffset { rn, uimm12 } => {
1005                        let reg = rn;
1006                        sink.put4(enc_ldst_uimm12(op, uimm12, reg, rd));
1007                    }
1008                    &AMode::RegReg { rn, rm } => {
1009                        let r1 = rn;
1010                        let r2 = rm;
1011                        sink.put4(enc_ldst_reg(
1012                            op, r1, r2, /* scaled = */ false, /* extendop = */ None, rd,
1013                        ));
1014                    }
1015                    &AMode::RegScaled { rn, rm } | &AMode::RegScaledExtended { rn, rm, .. } => {
1016                        let r1 = rn;
1017                        let r2 = rm;
1018                        let extendop = match &mem {
1019                            &AMode::RegScaled { .. } => None,
1020                            &AMode::RegScaledExtended { extendop, .. } => Some(extendop),
1021                            _ => unreachable!(),
1022                        };
1023                        sink.put4(enc_ldst_reg(
1024                            op, r1, r2, /* scaled = */ true, extendop, rd,
1025                        ));
1026                    }
1027                    &AMode::RegExtended { rn, rm, extendop } => {
1028                        let r1 = rn;
1029                        let r2 = rm;
1030                        sink.put4(enc_ldst_reg(
1031                            op,
1032                            r1,
1033                            r2,
1034                            /* scaled = */ false,
1035                            Some(extendop),
1036                            rd,
1037                        ));
1038                    }
1039                    &AMode::Label { ref label } => {
1040                        let offset = match label {
1041                            // cast i32 to u32 (two's-complement)
1042                            MemLabel::PCRel(off) => *off as u32,
1043                            // Emit a relocation into the `MachBuffer`
1044                            // for the label that's being loaded from and
1045                            // encode an address of 0 in its place which will
1046                            // get filled in by relocation resolution later on.
1047                            MemLabel::Mach(label) => {
1048                                sink.use_label_at_offset(
1049                                    sink.cur_offset(),
1050                                    *label,
1051                                    LabelUse::Ldr19,
1052                                );
1053                                0
1054                            }
1055                        } / 4;
1056                        assert!(offset < (1 << 19));
1057                        match self {
1058                            &Inst::ULoad32 { .. } => {
1059                                sink.put4(enc_ldst_imm19(0b00011000, offset, rd));
1060                            }
1061                            &Inst::SLoad32 { .. } => {
1062                                sink.put4(enc_ldst_imm19(0b10011000, offset, rd));
1063                            }
1064                            &Inst::FpuLoad32 { .. } => {
1065                                sink.put4(enc_ldst_imm19(0b00011100, offset, rd));
1066                            }
1067                            &Inst::ULoad64 { .. } => {
1068                                sink.put4(enc_ldst_imm19(0b01011000, offset, rd));
1069                            }
1070                            &Inst::FpuLoad64 { .. } => {
1071                                sink.put4(enc_ldst_imm19(0b01011100, offset, rd));
1072                            }
1073                            &Inst::FpuLoad128 { .. } => {
1074                                sink.put4(enc_ldst_imm19(0b10011100, offset, rd));
1075                            }
1076                            _ => panic!("Unsupported size for LDR from constant pool!"),
1077                        }
1078                    }
1079                    &AMode::SPPreIndexed { simm9 } => {
1080                        let reg = stack_reg();
1081                        sink.put4(enc_ldst_simm9(op, simm9, 0b11, reg, rd));
1082                    }
1083                    &AMode::SPPostIndexed { simm9 } => {
1084                        let reg = stack_reg();
1085                        sink.put4(enc_ldst_simm9(op, simm9, 0b01, reg, rd));
1086                    }
1087                    // Eliminated by `mem_finalize()` above.
1088                    &AMode::SPOffset { .. }
1089                    | &AMode::FPOffset { .. }
1090                    | &AMode::IncomingArg { .. }
1091                    | &AMode::SlotOffset { .. }
1092                    | &AMode::Const { .. }
1093                    | &AMode::RegOffset { .. } => {
1094                        panic!("Should not see {mem:?} here!")
1095                    }
1096                }
1097            }
1098
1099            &Inst::Store8 { rd, ref mem, flags }
1100            | &Inst::Store16 { rd, ref mem, flags }
1101            | &Inst::Store32 { rd, ref mem, flags }
1102            | &Inst::Store64 { rd, ref mem, flags }
1103            | &Inst::FpuStore16 { rd, ref mem, flags }
1104            | &Inst::FpuStore32 { rd, ref mem, flags }
1105            | &Inst::FpuStore64 { rd, ref mem, flags }
1106            | &Inst::FpuStore128 { rd, ref mem, flags } => {
1107                let mem = mem.clone();
1108                let access_ty = self.mem_type().unwrap();
1109                let (mem_insts, mem) = mem_finalize(Some(sink), &mem, access_ty, state);
1110
1111                for inst in mem_insts.into_iter() {
1112                    inst.emit(sink, emit_info, state);
1113                }
1114
1115                let op = match self {
1116                    Inst::Store8 { .. } => 0b0011100000,
1117                    Inst::Store16 { .. } => 0b0111100000,
1118                    Inst::Store32 { .. } => 0b1011100000,
1119                    Inst::Store64 { .. } => 0b1111100000,
1120                    Inst::FpuStore16 { .. } => 0b0111110000,
1121                    Inst::FpuStore32 { .. } => 0b1011110000,
1122                    Inst::FpuStore64 { .. } => 0b1111110000,
1123                    Inst::FpuStore128 { .. } => 0b0011110010,
1124                    _ => unreachable!(),
1125                };
1126
1127                if let Some(trap_code) = flags.trap_code() {
1128                    // Register the offset at which the actual store instruction starts.
1129                    sink.add_trap(trap_code);
1130                }
1131
1132                match &mem {
1133                    &AMode::Unscaled { rn, simm9 } => {
1134                        let reg = rn;
1135                        sink.put4(enc_ldst_simm9(op, simm9, 0b00, reg, rd));
1136                    }
1137                    &AMode::UnsignedOffset { rn, uimm12 } => {
1138                        let reg = rn;
1139                        sink.put4(enc_ldst_uimm12(op, uimm12, reg, rd));
1140                    }
1141                    &AMode::RegReg { rn, rm } => {
1142                        let r1 = rn;
1143                        let r2 = rm;
1144                        sink.put4(enc_ldst_reg(
1145                            op, r1, r2, /* scaled = */ false, /* extendop = */ None, rd,
1146                        ));
1147                    }
1148                    &AMode::RegScaled { rn, rm } | &AMode::RegScaledExtended { rn, rm, .. } => {
1149                        let r1 = rn;
1150                        let r2 = rm;
1151                        let extendop = match &mem {
1152                            &AMode::RegScaled { .. } => None,
1153                            &AMode::RegScaledExtended { extendop, .. } => Some(extendop),
1154                            _ => unreachable!(),
1155                        };
1156                        sink.put4(enc_ldst_reg(
1157                            op, r1, r2, /* scaled = */ true, extendop, rd,
1158                        ));
1159                    }
1160                    &AMode::RegExtended { rn, rm, extendop } => {
1161                        let r1 = rn;
1162                        let r2 = rm;
1163                        sink.put4(enc_ldst_reg(
1164                            op,
1165                            r1,
1166                            r2,
1167                            /* scaled = */ false,
1168                            Some(extendop),
1169                            rd,
1170                        ));
1171                    }
1172                    &AMode::Label { .. } => {
1173                        panic!("Store to a MemLabel not implemented!");
1174                    }
1175                    &AMode::SPPreIndexed { simm9 } => {
1176                        let reg = stack_reg();
1177                        sink.put4(enc_ldst_simm9(op, simm9, 0b11, reg, rd));
1178                    }
1179                    &AMode::SPPostIndexed { simm9 } => {
1180                        let reg = stack_reg();
1181                        sink.put4(enc_ldst_simm9(op, simm9, 0b01, reg, rd));
1182                    }
1183                    // Eliminated by `mem_finalize()` above.
1184                    &AMode::SPOffset { .. }
1185                    | &AMode::FPOffset { .. }
1186                    | &AMode::IncomingArg { .. }
1187                    | &AMode::SlotOffset { .. }
1188                    | &AMode::Const { .. }
1189                    | &AMode::RegOffset { .. } => {
1190                        panic!("Should not see {mem:?} here!")
1191                    }
1192                }
1193            }
1194
1195            &Inst::StoreP64 {
1196                rt,
1197                rt2,
1198                ref mem,
1199                flags,
1200            } => {
1201                let mem = mem.clone();
1202                if let Some(trap_code) = flags.trap_code() {
1203                    // Register the offset at which the actual store instruction starts.
1204                    sink.add_trap(trap_code);
1205                }
1206                match &mem {
1207                    &PairAMode::SignedOffset { reg, simm7 } => {
1208                        assert_eq!(simm7.scale_ty, I64);
1209                        sink.put4(enc_ldst_pair(0b1010100100, simm7, reg, rt, rt2));
1210                    }
1211                    &PairAMode::SPPreIndexed { simm7 } => {
1212                        assert_eq!(simm7.scale_ty, I64);
1213                        let reg = stack_reg();
1214                        sink.put4(enc_ldst_pair(0b1010100110, simm7, reg, rt, rt2));
1215                    }
1216                    &PairAMode::SPPostIndexed { simm7 } => {
1217                        assert_eq!(simm7.scale_ty, I64);
1218                        let reg = stack_reg();
1219                        sink.put4(enc_ldst_pair(0b1010100010, simm7, reg, rt, rt2));
1220                    }
1221                }
1222            }
1223            &Inst::LoadP64 {
1224                rt,
1225                rt2,
1226                ref mem,
1227                flags,
1228            } => {
1229                let rt = rt.to_reg();
1230                let rt2 = rt2.to_reg();
1231                let mem = mem.clone();
1232                if let Some(trap_code) = flags.trap_code() {
1233                    // Register the offset at which the actual load instruction starts.
1234                    sink.add_trap(trap_code);
1235                }
1236
1237                match &mem {
1238                    &PairAMode::SignedOffset { reg, simm7 } => {
1239                        assert_eq!(simm7.scale_ty, I64);
1240                        sink.put4(enc_ldst_pair(0b1010100101, simm7, reg, rt, rt2));
1241                    }
1242                    &PairAMode::SPPreIndexed { simm7 } => {
1243                        assert_eq!(simm7.scale_ty, I64);
1244                        let reg = stack_reg();
1245                        sink.put4(enc_ldst_pair(0b1010100111, simm7, reg, rt, rt2));
1246                    }
1247                    &PairAMode::SPPostIndexed { simm7 } => {
1248                        assert_eq!(simm7.scale_ty, I64);
1249                        let reg = stack_reg();
1250                        sink.put4(enc_ldst_pair(0b1010100011, simm7, reg, rt, rt2));
1251                    }
1252                }
1253            }
1254            &Inst::FpuLoadP64 {
1255                rt,
1256                rt2,
1257                ref mem,
1258                flags,
1259            }
1260            | &Inst::FpuLoadP128 {
1261                rt,
1262                rt2,
1263                ref mem,
1264                flags,
1265            } => {
1266                let rt = rt.to_reg();
1267                let rt2 = rt2.to_reg();
1268                let mem = mem.clone();
1269
1270                if let Some(trap_code) = flags.trap_code() {
1271                    // Register the offset at which the actual load instruction starts.
1272                    sink.add_trap(trap_code);
1273                }
1274
1275                let opc = match self {
1276                    &Inst::FpuLoadP64 { .. } => 0b01,
1277                    &Inst::FpuLoadP128 { .. } => 0b10,
1278                    _ => unreachable!(),
1279                };
1280
1281                match &mem {
1282                    &PairAMode::SignedOffset { reg, simm7 } => {
1283                        assert!(simm7.scale_ty == F64 || simm7.scale_ty == I8X16);
1284                        sink.put4(enc_ldst_vec_pair(opc, 0b10, true, simm7, reg, rt, rt2));
1285                    }
1286                    &PairAMode::SPPreIndexed { simm7 } => {
1287                        assert!(simm7.scale_ty == F64 || simm7.scale_ty == I8X16);
1288                        let reg = stack_reg();
1289                        sink.put4(enc_ldst_vec_pair(opc, 0b11, true, simm7, reg, rt, rt2));
1290                    }
1291                    &PairAMode::SPPostIndexed { simm7 } => {
1292                        assert!(simm7.scale_ty == F64 || simm7.scale_ty == I8X16);
1293                        let reg = stack_reg();
1294                        sink.put4(enc_ldst_vec_pair(opc, 0b01, true, simm7, reg, rt, rt2));
1295                    }
1296                }
1297            }
1298            &Inst::FpuStoreP64 {
1299                rt,
1300                rt2,
1301                ref mem,
1302                flags,
1303            }
1304            | &Inst::FpuStoreP128 {
1305                rt,
1306                rt2,
1307                ref mem,
1308                flags,
1309            } => {
1310                let mem = mem.clone();
1311
1312                if let Some(trap_code) = flags.trap_code() {
1313                    // Register the offset at which the actual store instruction starts.
1314                    sink.add_trap(trap_code);
1315                }
1316
1317                let opc = match self {
1318                    &Inst::FpuStoreP64 { .. } => 0b01,
1319                    &Inst::FpuStoreP128 { .. } => 0b10,
1320                    _ => unreachable!(),
1321                };
1322
1323                match &mem {
1324                    &PairAMode::SignedOffset { reg, simm7 } => {
1325                        assert!(simm7.scale_ty == F64 || simm7.scale_ty == I8X16);
1326                        sink.put4(enc_ldst_vec_pair(opc, 0b10, false, simm7, reg, rt, rt2));
1327                    }
1328                    &PairAMode::SPPreIndexed { simm7 } => {
1329                        assert!(simm7.scale_ty == F64 || simm7.scale_ty == I8X16);
1330                        let reg = stack_reg();
1331                        sink.put4(enc_ldst_vec_pair(opc, 0b11, false, simm7, reg, rt, rt2));
1332                    }
1333                    &PairAMode::SPPostIndexed { simm7 } => {
1334                        assert!(simm7.scale_ty == F64 || simm7.scale_ty == I8X16);
1335                        let reg = stack_reg();
1336                        sink.put4(enc_ldst_vec_pair(opc, 0b01, false, simm7, reg, rt, rt2));
1337                    }
1338                }
1339            }
1340            &Inst::Mov { size, rd, rm } => {
1341                assert!(rd.to_reg().class() == rm.class());
1342                assert!(rm.class() == RegClass::Int);
1343
1344                match size {
1345                    OperandSize::Size64 => {
1346                        // MOV to SP is interpreted as MOV to XZR instead. And our codegen
1347                        // should never MOV to XZR.
1348                        assert!(rd.to_reg() != stack_reg());
1349
1350                        if rm == stack_reg() {
1351                            // We can't use ORR here, so use an `add rd, sp, #0` instead.
1352                            let imm12 = Imm12::maybe_from_u64(0).unwrap();
1353                            sink.put4(enc_arith_rr_imm12(
1354                                0b100_10001,
1355                                imm12.shift_bits(),
1356                                imm12.imm_bits(),
1357                                rm,
1358                                rd,
1359                            ));
1360                        } else {
1361                            // Encoded as ORR rd, rm, zero.
1362                            sink.put4(enc_arith_rrr(0b10101010_000, 0b000_000, rd, zero_reg(), rm));
1363                        }
1364                    }
1365                    OperandSize::Size32 => {
1366                        // MOV to SP is interpreted as MOV to XZR instead. And our codegen
1367                        // should never MOV to XZR.
1368                        assert!(machreg_to_gpr(rd.to_reg()) != 31);
1369                        // Encoded as ORR rd, rm, zero.
1370                        sink.put4(enc_arith_rrr(0b00101010_000, 0b000_000, rd, zero_reg(), rm));
1371                    }
1372                }
1373            }
1374            &Inst::MovFromPReg { rd, rm } => {
1375                let rm: Reg = rm.into();
1376                debug_assert!([
1377                    regs::fp_reg(),
1378                    regs::stack_reg(),
1379                    regs::link_reg(),
1380                    regs::pinned_reg()
1381                ]
1382                .contains(&rm));
1383                assert!(rm.class() == RegClass::Int);
1384                assert!(rd.to_reg().class() == rm.class());
1385                let size = OperandSize::Size64;
1386                Inst::Mov { size, rd, rm }.emit(sink, emit_info, state);
1387            }
1388            &Inst::MovToPReg { rd, rm } => {
1389                let rd: Writable<Reg> = Writable::from_reg(rd.into());
1390                debug_assert!([
1391                    regs::fp_reg(),
1392                    regs::stack_reg(),
1393                    regs::link_reg(),
1394                    regs::pinned_reg()
1395                ]
1396                .contains(&rd.to_reg()));
1397                assert!(rd.to_reg().class() == RegClass::Int);
1398                assert!(rm.class() == rd.to_reg().class());
1399                let size = OperandSize::Size64;
1400                Inst::Mov { size, rd, rm }.emit(sink, emit_info, state);
1401            }
1402            &Inst::MovWide { op, rd, imm, size } => {
1403                sink.put4(enc_move_wide(op, rd, imm, size));
1404            }
1405            &Inst::MovK { rd, rn, imm, size } => {
1406                debug_assert_eq!(rn, rd.to_reg());
1407                sink.put4(enc_movk(rd, imm, size));
1408            }
1409            &Inst::CSel { rd, rn, rm, cond } => {
1410                sink.put4(enc_csel(rd, rn, rm, cond, 0, 0));
1411            }
1412            &Inst::CSNeg { rd, rn, rm, cond } => {
1413                sink.put4(enc_csel(rd, rn, rm, cond, 1, 1));
1414            }
1415            &Inst::CSet { rd, cond } => {
1416                sink.put4(enc_csel(rd, zero_reg(), zero_reg(), cond.invert(), 0, 1));
1417            }
1418            &Inst::CSetm { rd, cond } => {
1419                sink.put4(enc_csel(rd, zero_reg(), zero_reg(), cond.invert(), 1, 0));
1420            }
1421            &Inst::CCmp {
1422                size,
1423                rn,
1424                rm,
1425                nzcv,
1426                cond,
1427            } => {
1428                sink.put4(enc_ccmp(size, rn, rm, nzcv, cond));
1429            }
1430            &Inst::CCmpImm {
1431                size,
1432                rn,
1433                imm,
1434                nzcv,
1435                cond,
1436            } => {
1437                sink.put4(enc_ccmp_imm(size, rn, imm, nzcv, cond));
1438            }
1439            &Inst::AtomicRMW {
1440                ty,
1441                op,
1442                rs,
1443                rt,
1444                rn,
1445                flags,
1446            } => {
1447                if let Some(trap_code) = flags.trap_code() {
1448                    sink.add_trap(trap_code);
1449                }
1450
1451                sink.put4(enc_acq_rel(ty, op, rs, rt, rn));
1452            }
1453            &Inst::AtomicRMWLoop { ty, op, flags, .. } => {
1454                /* Emit this:
1455                     again:
1456                      ldaxr{,b,h}  x/w27, [x25]
1457                      // maybe sign extend
1458                      op          x28, x27, x26 // op is add,sub,and,orr,eor
1459                      stlxr{,b,h}  w24, x/w28, [x25]
1460                      cbnz        x24, again
1461
1462                   Operand conventions:
1463                      IN:  x25 (addr), x26 (2nd arg for op)
1464                      OUT: x27 (old value), x24 (trashed), x28 (trashed)
1465
1466                   It is unfortunate that, per the ARM documentation, x28 cannot be used for
1467                   both the store-data and success-flag operands of stlxr.  This causes the
1468                   instruction's behaviour to be "CONSTRAINED UNPREDICTABLE", so we use x24
1469                   instead for the success-flag.
1470                */
1471                // TODO: We should not hardcode registers here, a better idea would be to
1472                // pass some scratch registers in the AtomicRMWLoop pseudo-instruction, and use those
1473                let xzr = zero_reg();
1474                let x24 = xreg(24);
1475                let x25 = xreg(25);
1476                let x26 = xreg(26);
1477                let x27 = xreg(27);
1478                let x28 = xreg(28);
1479                let x24wr = writable_xreg(24);
1480                let x27wr = writable_xreg(27);
1481                let x28wr = writable_xreg(28);
1482                let again_label = sink.get_label();
1483
1484                // again:
1485                sink.bind_label(again_label, &mut state.ctrl_plane);
1486
1487                if let Some(trap_code) = flags.trap_code() {
1488                    sink.add_trap(trap_code);
1489                }
1490
1491                sink.put4(enc_ldaxr(ty, x27wr, x25)); // ldaxr x27, [x25]
1492                let size = OperandSize::from_ty(ty);
1493                let sign_ext = match op {
1494                    AtomicRMWLoopOp::Smin | AtomicRMWLoopOp::Smax => match ty {
1495                        I16 => Some((ExtendOp::SXTH, 16)),
1496                        I8 => Some((ExtendOp::SXTB, 8)),
1497                        _ => None,
1498                    },
1499                    _ => None,
1500                };
1501
1502                // sxt{b|h} the loaded result if necessary.
1503                if sign_ext.is_some() {
1504                    let (_, from_bits) = sign_ext.unwrap();
1505                    Inst::Extend {
1506                        rd: x27wr,
1507                        rn: x27,
1508                        signed: true,
1509                        from_bits,
1510                        to_bits: size.bits(),
1511                    }
1512                    .emit(sink, emit_info, state);
1513                }
1514
1515                match op {
1516                    AtomicRMWLoopOp::Xchg => {} // do nothing
1517                    AtomicRMWLoopOp::Nand => {
1518                        // and x28, x27, x26
1519                        // mvn x28, x28
1520
1521                        Inst::AluRRR {
1522                            alu_op: ALUOp::And,
1523                            size,
1524                            rd: x28wr,
1525                            rn: x27,
1526                            rm: x26,
1527                        }
1528                        .emit(sink, emit_info, state);
1529
1530                        Inst::AluRRR {
1531                            alu_op: ALUOp::OrrNot,
1532                            size,
1533                            rd: x28wr,
1534                            rn: xzr,
1535                            rm: x28,
1536                        }
1537                        .emit(sink, emit_info, state);
1538                    }
1539                    AtomicRMWLoopOp::Umin
1540                    | AtomicRMWLoopOp::Umax
1541                    | AtomicRMWLoopOp::Smin
1542                    | AtomicRMWLoopOp::Smax => {
1543                        // cmp x27, x26 {?sxt}
1544                        // csel.op x28, x27, x26
1545
1546                        let cond = match op {
1547                            AtomicRMWLoopOp::Umin => Cond::Lo,
1548                            AtomicRMWLoopOp::Umax => Cond::Hi,
1549                            AtomicRMWLoopOp::Smin => Cond::Lt,
1550                            AtomicRMWLoopOp::Smax => Cond::Gt,
1551                            _ => unreachable!(),
1552                        };
1553
1554                        if sign_ext.is_some() {
1555                            let (extendop, _) = sign_ext.unwrap();
1556                            Inst::AluRRRExtend {
1557                                alu_op: ALUOp::SubS,
1558                                size,
1559                                rd: writable_zero_reg(),
1560                                rn: x27,
1561                                rm: x26,
1562                                extendop,
1563                            }
1564                            .emit(sink, emit_info, state);
1565                        } else {
1566                            Inst::AluRRR {
1567                                alu_op: ALUOp::SubS,
1568                                size,
1569                                rd: writable_zero_reg(),
1570                                rn: x27,
1571                                rm: x26,
1572                            }
1573                            .emit(sink, emit_info, state);
1574                        }
1575
1576                        Inst::CSel {
1577                            cond,
1578                            rd: x28wr,
1579                            rn: x27,
1580                            rm: x26,
1581                        }
1582                        .emit(sink, emit_info, state);
1583                    }
1584                    _ => {
1585                        // add/sub/and/orr/eor x28, x27, x26
1586                        let alu_op = match op {
1587                            AtomicRMWLoopOp::Add => ALUOp::Add,
1588                            AtomicRMWLoopOp::Sub => ALUOp::Sub,
1589                            AtomicRMWLoopOp::And => ALUOp::And,
1590                            AtomicRMWLoopOp::Orr => ALUOp::Orr,
1591                            AtomicRMWLoopOp::Eor => ALUOp::Eor,
1592                            AtomicRMWLoopOp::Nand
1593                            | AtomicRMWLoopOp::Umin
1594                            | AtomicRMWLoopOp::Umax
1595                            | AtomicRMWLoopOp::Smin
1596                            | AtomicRMWLoopOp::Smax
1597                            | AtomicRMWLoopOp::Xchg => unreachable!(),
1598                        };
1599
1600                        Inst::AluRRR {
1601                            alu_op,
1602                            size,
1603                            rd: x28wr,
1604                            rn: x27,
1605                            rm: x26,
1606                        }
1607                        .emit(sink, emit_info, state);
1608                    }
1609                }
1610
1611                if let Some(trap_code) = flags.trap_code() {
1612                    sink.add_trap(trap_code);
1613                }
1614                if op == AtomicRMWLoopOp::Xchg {
1615                    sink.put4(enc_stlxr(ty, x24wr, x26, x25)); // stlxr w24, x26, [x25]
1616                } else {
1617                    sink.put4(enc_stlxr(ty, x24wr, x28, x25)); // stlxr w24, x28, [x25]
1618                }
1619
1620                // cbnz w24, again
1621                // Note, we're actually testing x24, and relying on the default zero-high-half
1622                // rule in the assignment that `stlxr` does.
1623                let br_offset = sink.cur_offset();
1624                sink.put4(enc_conditional_br(
1625                    BranchTarget::Label(again_label),
1626                    CondBrKind::NotZero(x24, OperandSize::Size64),
1627                ));
1628                sink.use_label_at_offset(br_offset, again_label, LabelUse::Branch19);
1629            }
1630            &Inst::AtomicCAS {
1631                rd,
1632                rs,
1633                rt,
1634                rn,
1635                ty,
1636                flags,
1637            } => {
1638                debug_assert_eq!(rd.to_reg(), rs);
1639                let size = match ty {
1640                    I8 => 0b00,
1641                    I16 => 0b01,
1642                    I32 => 0b10,
1643                    I64 => 0b11,
1644                    _ => panic!("Unsupported type: {ty}"),
1645                };
1646
1647                if let Some(trap_code) = flags.trap_code() {
1648                    sink.add_trap(trap_code);
1649                }
1650
1651                sink.put4(enc_cas(size, rd, rt, rn));
1652            }
1653            &Inst::AtomicCASLoop { ty, flags, .. } => {
1654                /* Emit this:
1655                    again:
1656                     ldaxr{,b,h} x/w27, [x25]
1657                     cmp         x27, x/w26 uxt{b,h}
1658                     b.ne        out
1659                     stlxr{,b,h} w24, x/w28, [x25]
1660                     cbnz        x24, again
1661                    out:
1662
1663                  Operand conventions:
1664                     IN:  x25 (addr), x26 (expected value), x28 (replacement value)
1665                     OUT: x27 (old value), x24 (trashed)
1666                */
1667                let x24 = xreg(24);
1668                let x25 = xreg(25);
1669                let x26 = xreg(26);
1670                let x27 = xreg(27);
1671                let x28 = xreg(28);
1672                let xzrwr = writable_zero_reg();
1673                let x24wr = writable_xreg(24);
1674                let x27wr = writable_xreg(27);
1675                let again_label = sink.get_label();
1676                let out_label = sink.get_label();
1677
1678                // again:
1679                sink.bind_label(again_label, &mut state.ctrl_plane);
1680
1681                if let Some(trap_code) = flags.trap_code() {
1682                    sink.add_trap(trap_code);
1683                }
1684
1685                // ldaxr x27, [x25]
1686                sink.put4(enc_ldaxr(ty, x27wr, x25));
1687
1688                // The top 32-bits are zero-extended by the ldaxr so we don't
1689                // have to use UXTW, just the x-form of the register.
1690                let (bit21, extend_op) = match ty {
1691                    I8 => (0b1, 0b000000),
1692                    I16 => (0b1, 0b001000),
1693                    _ => (0b0, 0b000000),
1694                };
1695                let bits_31_21 = 0b111_01011_000 | bit21;
1696                // cmp x27, x26 (== subs xzr, x27, x26)
1697                sink.put4(enc_arith_rrr(bits_31_21, extend_op, xzrwr, x27, x26));
1698
1699                // b.ne out
1700                let br_out_offset = sink.cur_offset();
1701                sink.put4(enc_conditional_br(
1702                    BranchTarget::Label(out_label),
1703                    CondBrKind::Cond(Cond::Ne),
1704                ));
1705                sink.use_label_at_offset(br_out_offset, out_label, LabelUse::Branch19);
1706
1707                if let Some(trap_code) = flags.trap_code() {
1708                    sink.add_trap(trap_code);
1709                }
1710
1711                sink.put4(enc_stlxr(ty, x24wr, x28, x25)); // stlxr w24, x28, [x25]
1712
1713                // cbnz w24, again.
1714                // Note, we're actually testing x24, and relying on the default zero-high-half
1715                // rule in the assignment that `stlxr` does.
1716                let br_again_offset = sink.cur_offset();
1717                sink.put4(enc_conditional_br(
1718                    BranchTarget::Label(again_label),
1719                    CondBrKind::NotZero(x24, OperandSize::Size64),
1720                ));
1721                sink.use_label_at_offset(br_again_offset, again_label, LabelUse::Branch19);
1722
1723                // out:
1724                sink.bind_label(out_label, &mut state.ctrl_plane);
1725            }
1726            &Inst::LoadAcquire {
1727                access_ty,
1728                rt,
1729                rn,
1730                flags,
1731            } => {
1732                if let Some(trap_code) = flags.trap_code() {
1733                    sink.add_trap(trap_code);
1734                }
1735
1736                sink.put4(enc_ldar(access_ty, rt, rn));
1737            }
1738            &Inst::StoreRelease {
1739                access_ty,
1740                rt,
1741                rn,
1742                flags,
1743            } => {
1744                if let Some(trap_code) = flags.trap_code() {
1745                    sink.add_trap(trap_code);
1746                }
1747
1748                sink.put4(enc_stlr(access_ty, rt, rn));
1749            }
1750            &Inst::Fence {} => {
1751                sink.put4(enc_dmb_ish()); // dmb ish
1752            }
1753            &Inst::Csdb {} => {
1754                sink.put4(0xd503229f);
1755            }
1756            &Inst::FpuMove32 { rd, rn } => {
1757                sink.put4(enc_fpurr(0b000_11110_00_1_000000_10000, rd, rn));
1758            }
1759            &Inst::FpuMove64 { rd, rn } => {
1760                sink.put4(enc_fpurr(0b000_11110_01_1_000000_10000, rd, rn));
1761            }
1762            &Inst::FpuMove128 { rd, rn } => {
1763                sink.put4(enc_vecmov(/* 16b = */ true, rd, rn));
1764            }
1765            &Inst::FpuMoveFromVec { rd, rn, idx, size } => {
1766                let (imm5, shift, mask) = match size.lane_size() {
1767                    ScalarSize::Size32 => (0b00100, 3, 0b011),
1768                    ScalarSize::Size64 => (0b01000, 4, 0b001),
1769                    _ => unimplemented!(),
1770                };
1771                debug_assert_eq!(idx & mask, idx);
1772                let imm5 = imm5 | ((idx as u32) << shift);
1773                sink.put4(
1774                    0b010_11110000_00000_000001_00000_00000
1775                        | (imm5 << 16)
1776                        | (machreg_to_vec(rn) << 5)
1777                        | machreg_to_vec(rd.to_reg()),
1778                );
1779            }
1780            &Inst::FpuExtend { rd, rn, size } => {
1781                sink.put4(enc_fpurr(
1782                    0b000_11110_00_1_000000_10000 | (size.ftype() << 12),
1783                    rd,
1784                    rn,
1785                ));
1786            }
1787            &Inst::FpuRR {
1788                fpu_op,
1789                size,
1790                rd,
1791                rn,
1792            } => {
1793                let top22 = match fpu_op {
1794                    FPUOp1::Abs => 0b000_11110_00_1_000001_10000,
1795                    FPUOp1::Neg => 0b000_11110_00_1_000010_10000,
1796                    FPUOp1::Sqrt => 0b000_11110_00_1_000011_10000,
1797                    FPUOp1::Cvt32To64 => {
1798                        debug_assert_eq!(size, ScalarSize::Size32);
1799                        0b000_11110_00_1_000101_10000
1800                    }
1801                    FPUOp1::Cvt64To32 => {
1802                        debug_assert_eq!(size, ScalarSize::Size64);
1803                        0b000_11110_01_1_000100_10000
1804                    }
1805                };
1806                let top22 = top22 | size.ftype() << 12;
1807                sink.put4(enc_fpurr(top22, rd, rn));
1808            }
1809            &Inst::FpuRRR {
1810                fpu_op,
1811                size,
1812                rd,
1813                rn,
1814                rm,
1815            } => {
1816                let top22 = match fpu_op {
1817                    FPUOp2::Add => 0b000_11110_00_1_00000_001010,
1818                    FPUOp2::Sub => 0b000_11110_00_1_00000_001110,
1819                    FPUOp2::Mul => 0b000_11110_00_1_00000_000010,
1820                    FPUOp2::Div => 0b000_11110_00_1_00000_000110,
1821                    FPUOp2::Max => 0b000_11110_00_1_00000_010010,
1822                    FPUOp2::Min => 0b000_11110_00_1_00000_010110,
1823                };
1824                let top22 = top22 | size.ftype() << 12;
1825                sink.put4(enc_fpurrr(top22, rd, rn, rm));
1826            }
1827            &Inst::FpuRRI { fpu_op, rd, rn } => match fpu_op {
1828                FPUOpRI::UShr32(imm) => {
1829                    debug_assert_eq!(32, imm.lane_size_in_bits);
1830                    sink.put4(
1831                        0b0_0_1_011110_0000000_00_0_0_0_1_00000_00000
1832                            | imm.enc() << 16
1833                            | machreg_to_vec(rn) << 5
1834                            | machreg_to_vec(rd.to_reg()),
1835                    )
1836                }
1837                FPUOpRI::UShr64(imm) => {
1838                    debug_assert_eq!(64, imm.lane_size_in_bits);
1839                    sink.put4(
1840                        0b01_1_111110_0000000_00_0_0_0_1_00000_00000
1841                            | imm.enc() << 16
1842                            | machreg_to_vec(rn) << 5
1843                            | machreg_to_vec(rd.to_reg()),
1844                    )
1845                }
1846            },
1847            &Inst::FpuRRIMod { fpu_op, rd, ri, rn } => {
1848                debug_assert_eq!(rd.to_reg(), ri);
1849                match fpu_op {
1850                    FPUOpRIMod::Sli64(imm) => {
1851                        debug_assert_eq!(64, imm.lane_size_in_bits);
1852                        sink.put4(
1853                            0b01_1_111110_0000000_010101_00000_00000
1854                                | imm.enc() << 16
1855                                | machreg_to_vec(rn) << 5
1856                                | machreg_to_vec(rd.to_reg()),
1857                        )
1858                    }
1859                    FPUOpRIMod::Sli32(imm) => {
1860                        debug_assert_eq!(32, imm.lane_size_in_bits);
1861                        sink.put4(
1862                            0b0_0_1_011110_0000000_010101_00000_00000
1863                                | imm.enc() << 16
1864                                | machreg_to_vec(rn) << 5
1865                                | machreg_to_vec(rd.to_reg()),
1866                        )
1867                    }
1868                }
1869            }
1870            &Inst::FpuRRRR {
1871                fpu_op,
1872                size,
1873                rd,
1874                rn,
1875                rm,
1876                ra,
1877            } => {
1878                let top17 = match fpu_op {
1879                    FPUOp3::MAdd => 0b000_11111_00_0_00000_0,
1880                    FPUOp3::MSub => 0b000_11111_00_0_00000_1,
1881                    FPUOp3::NMAdd => 0b000_11111_00_1_00000_0,
1882                    FPUOp3::NMSub => 0b000_11111_00_1_00000_1,
1883                };
1884                let top17 = top17 | size.ftype() << 7;
1885                sink.put4(enc_fpurrrr(top17, rd, rn, rm, ra));
1886            }
1887            &Inst::VecMisc { op, rd, rn, size } => {
1888                let (q, enc_size) = size.enc_size();
1889                let (u, bits_12_16, size) = match op {
1890                    VecMisc2::Not => (0b1, 0b00101, 0b00),
1891                    VecMisc2::Neg => (0b1, 0b01011, enc_size),
1892                    VecMisc2::Abs => (0b0, 0b01011, enc_size),
1893                    VecMisc2::Fabs => {
1894                        debug_assert!(
1895                            size == VectorSize::Size32x2
1896                                || size == VectorSize::Size32x4
1897                                || size == VectorSize::Size64x2
1898                        );
1899                        (0b0, 0b01111, enc_size)
1900                    }
1901                    VecMisc2::Fneg => {
1902                        debug_assert!(
1903                            size == VectorSize::Size32x2
1904                                || size == VectorSize::Size32x4
1905                                || size == VectorSize::Size64x2
1906                        );
1907                        (0b1, 0b01111, enc_size)
1908                    }
1909                    VecMisc2::Fsqrt => {
1910                        debug_assert!(
1911                            size == VectorSize::Size32x2
1912                                || size == VectorSize::Size32x4
1913                                || size == VectorSize::Size64x2
1914                        );
1915                        (0b1, 0b11111, enc_size)
1916                    }
1917                    VecMisc2::Rev16 => {
1918                        debug_assert_eq!(size, VectorSize::Size8x16);
1919                        (0b0, 0b00001, enc_size)
1920                    }
1921                    VecMisc2::Rev32 => {
1922                        debug_assert!(size == VectorSize::Size8x16 || size == VectorSize::Size16x8);
1923                        (0b1, 0b00000, enc_size)
1924                    }
1925                    VecMisc2::Rev64 => {
1926                        debug_assert!(
1927                            size == VectorSize::Size8x16
1928                                || size == VectorSize::Size16x8
1929                                || size == VectorSize::Size32x4
1930                        );
1931                        (0b0, 0b00000, enc_size)
1932                    }
1933                    VecMisc2::Fcvtzs => {
1934                        debug_assert!(
1935                            size == VectorSize::Size32x2
1936                                || size == VectorSize::Size32x4
1937                                || size == VectorSize::Size64x2
1938                        );
1939                        (0b0, 0b11011, enc_size)
1940                    }
1941                    VecMisc2::Fcvtzu => {
1942                        debug_assert!(
1943                            size == VectorSize::Size32x2
1944                                || size == VectorSize::Size32x4
1945                                || size == VectorSize::Size64x2
1946                        );
1947                        (0b1, 0b11011, enc_size)
1948                    }
1949                    VecMisc2::Scvtf => {
1950                        debug_assert!(size == VectorSize::Size32x4 || size == VectorSize::Size64x2);
1951                        (0b0, 0b11101, enc_size & 0b1)
1952                    }
1953                    VecMisc2::Ucvtf => {
1954                        debug_assert!(size == VectorSize::Size32x4 || size == VectorSize::Size64x2);
1955                        (0b1, 0b11101, enc_size & 0b1)
1956                    }
1957                    VecMisc2::Frintn => {
1958                        debug_assert!(
1959                            size == VectorSize::Size32x2
1960                                || size == VectorSize::Size32x4
1961                                || size == VectorSize::Size64x2
1962                        );
1963                        (0b0, 0b11000, enc_size & 0b01)
1964                    }
1965                    VecMisc2::Frintz => {
1966                        debug_assert!(
1967                            size == VectorSize::Size32x2
1968                                || size == VectorSize::Size32x4
1969                                || size == VectorSize::Size64x2
1970                        );
1971                        (0b0, 0b11001, enc_size)
1972                    }
1973                    VecMisc2::Frintm => {
1974                        debug_assert!(
1975                            size == VectorSize::Size32x2
1976                                || size == VectorSize::Size32x4
1977                                || size == VectorSize::Size64x2
1978                        );
1979                        (0b0, 0b11001, enc_size & 0b01)
1980                    }
1981                    VecMisc2::Frintp => {
1982                        debug_assert!(
1983                            size == VectorSize::Size32x2
1984                                || size == VectorSize::Size32x4
1985                                || size == VectorSize::Size64x2
1986                        );
1987                        (0b0, 0b11000, enc_size)
1988                    }
1989                    VecMisc2::Cnt => {
1990                        debug_assert!(size == VectorSize::Size8x8 || size == VectorSize::Size8x16);
1991                        (0b0, 0b00101, enc_size)
1992                    }
1993                    VecMisc2::Cmeq0 => (0b0, 0b01001, enc_size),
1994                    VecMisc2::Cmge0 => (0b1, 0b01000, enc_size),
1995                    VecMisc2::Cmgt0 => (0b0, 0b01000, enc_size),
1996                    VecMisc2::Cmle0 => (0b1, 0b01001, enc_size),
1997                    VecMisc2::Cmlt0 => (0b0, 0b01010, enc_size),
1998                    VecMisc2::Fcmeq0 => {
1999                        debug_assert!(
2000                            size == VectorSize::Size32x2
2001                                || size == VectorSize::Size32x4
2002                                || size == VectorSize::Size64x2
2003                        );
2004                        (0b0, 0b01101, enc_size)
2005                    }
2006                    VecMisc2::Fcmge0 => {
2007                        debug_assert!(
2008                            size == VectorSize::Size32x2
2009                                || size == VectorSize::Size32x4
2010                                || size == VectorSize::Size64x2
2011                        );
2012                        (0b1, 0b01100, enc_size)
2013                    }
2014                    VecMisc2::Fcmgt0 => {
2015                        debug_assert!(
2016                            size == VectorSize::Size32x2
2017                                || size == VectorSize::Size32x4
2018                                || size == VectorSize::Size64x2
2019                        );
2020                        (0b0, 0b01100, enc_size)
2021                    }
2022                    VecMisc2::Fcmle0 => {
2023                        debug_assert!(
2024                            size == VectorSize::Size32x2
2025                                || size == VectorSize::Size32x4
2026                                || size == VectorSize::Size64x2
2027                        );
2028                        (0b1, 0b01101, enc_size)
2029                    }
2030                    VecMisc2::Fcmlt0 => {
2031                        debug_assert!(
2032                            size == VectorSize::Size32x2
2033                                || size == VectorSize::Size32x4
2034                                || size == VectorSize::Size64x2
2035                        );
2036                        (0b0, 0b01110, enc_size)
2037                    }
2038                };
2039                sink.put4(enc_vec_rr_misc((q << 1) | u, size, bits_12_16, rd, rn));
2040            }
2041            &Inst::VecLanes { op, rd, rn, size } => {
2042                let (q, size) = match size {
2043                    VectorSize::Size8x8 => (0b0, 0b00),
2044                    VectorSize::Size8x16 => (0b1, 0b00),
2045                    VectorSize::Size16x4 => (0b0, 0b01),
2046                    VectorSize::Size16x8 => (0b1, 0b01),
2047                    VectorSize::Size32x4 => (0b1, 0b10),
2048                    _ => unreachable!(),
2049                };
2050                let (u, opcode) = match op {
2051                    VecLanesOp::Uminv => (0b1, 0b11010),
2052                    VecLanesOp::Addv => (0b0, 0b11011),
2053                };
2054                sink.put4(enc_vec_lanes(q, u, size, opcode, rd, rn));
2055            }
2056            &Inst::VecShiftImm {
2057                op,
2058                rd,
2059                rn,
2060                size,
2061                imm,
2062            } => {
2063                let (is_shr, mut template) = match op {
2064                    VecShiftImmOp::Ushr => (true, 0b_001_011110_0000_000_000001_00000_00000_u32),
2065                    VecShiftImmOp::Sshr => (true, 0b_000_011110_0000_000_000001_00000_00000_u32),
2066                    VecShiftImmOp::Shl => (false, 0b_000_011110_0000_000_010101_00000_00000_u32),
2067                };
2068                if size.is_128bits() {
2069                    template |= 0b1 << 30;
2070                }
2071                let imm = imm as u32;
2072                // Deal with the somewhat strange encoding scheme for, and limits on,
2073                // the shift amount.
2074                let immh_immb = match (size.lane_size(), is_shr) {
2075                    (ScalarSize::Size64, true) if imm >= 1 && imm <= 64 => {
2076                        0b_1000_000_u32 | (64 - imm)
2077                    }
2078                    (ScalarSize::Size32, true) if imm >= 1 && imm <= 32 => {
2079                        0b_0100_000_u32 | (32 - imm)
2080                    }
2081                    (ScalarSize::Size16, true) if imm >= 1 && imm <= 16 => {
2082                        0b_0010_000_u32 | (16 - imm)
2083                    }
2084                    (ScalarSize::Size8, true) if imm >= 1 && imm <= 8 => {
2085                        0b_0001_000_u32 | (8 - imm)
2086                    }
2087                    (ScalarSize::Size64, false) if imm <= 63 => 0b_1000_000_u32 | imm,
2088                    (ScalarSize::Size32, false) if imm <= 31 => 0b_0100_000_u32 | imm,
2089                    (ScalarSize::Size16, false) if imm <= 15 => 0b_0010_000_u32 | imm,
2090                    (ScalarSize::Size8, false) if imm <= 7 => 0b_0001_000_u32 | imm,
2091                    _ => panic!(
2092                        "aarch64: Inst::VecShiftImm: emit: invalid op/size/imm {op:?}, {size:?}, {imm:?}"
2093                    ),
2094                };
2095                let rn_enc = machreg_to_vec(rn);
2096                let rd_enc = machreg_to_vec(rd.to_reg());
2097                sink.put4(template | (immh_immb << 16) | (rn_enc << 5) | rd_enc);
2098            }
2099            &Inst::VecShiftImmMod {
2100                op,
2101                rd,
2102                ri,
2103                rn,
2104                size,
2105                imm,
2106            } => {
2107                debug_assert_eq!(rd.to_reg(), ri);
2108                let (is_shr, mut template) = match op {
2109                    VecShiftImmModOp::Sli => (false, 0b_001_011110_0000_000_010101_00000_00000_u32),
2110                };
2111                if size.is_128bits() {
2112                    template |= 0b1 << 30;
2113                }
2114                let imm = imm as u32;
2115                // Deal with the somewhat strange encoding scheme for, and limits on,
2116                // the shift amount.
2117                let immh_immb = match (size.lane_size(), is_shr) {
2118                    (ScalarSize::Size64, true) if imm >= 1 && imm <= 64 => {
2119                        0b_1000_000_u32 | (64 - imm)
2120                    }
2121                    (ScalarSize::Size32, true) if imm >= 1 && imm <= 32 => {
2122                        0b_0100_000_u32 | (32 - imm)
2123                    }
2124                    (ScalarSize::Size16, true) if imm >= 1 && imm <= 16 => {
2125                        0b_0010_000_u32 | (16 - imm)
2126                    }
2127                    (ScalarSize::Size8, true) if imm >= 1 && imm <= 8 => {
2128                        0b_0001_000_u32 | (8 - imm)
2129                    }
2130                    (ScalarSize::Size64, false) if imm <= 63 => 0b_1000_000_u32 | imm,
2131                    (ScalarSize::Size32, false) if imm <= 31 => 0b_0100_000_u32 | imm,
2132                    (ScalarSize::Size16, false) if imm <= 15 => 0b_0010_000_u32 | imm,
2133                    (ScalarSize::Size8, false) if imm <= 7 => 0b_0001_000_u32 | imm,
2134                    _ => panic!(
2135                        "aarch64: Inst::VecShiftImmMod: emit: invalid op/size/imm {op:?}, {size:?}, {imm:?}"
2136                    ),
2137                };
2138                let rn_enc = machreg_to_vec(rn);
2139                let rd_enc = machreg_to_vec(rd.to_reg());
2140                sink.put4(template | (immh_immb << 16) | (rn_enc << 5) | rd_enc);
2141            }
2142            &Inst::VecExtract { rd, rn, rm, imm4 } => {
2143                if imm4 < 16 {
2144                    let template = 0b_01_101110_000_00000_0_0000_0_00000_00000_u32;
2145                    let rm_enc = machreg_to_vec(rm);
2146                    let rn_enc = machreg_to_vec(rn);
2147                    let rd_enc = machreg_to_vec(rd.to_reg());
2148                    sink.put4(
2149                        template | (rm_enc << 16) | ((imm4 as u32) << 11) | (rn_enc << 5) | rd_enc,
2150                    );
2151                } else {
2152                    panic!("aarch64: Inst::VecExtract: emit: invalid extract index {imm4}");
2153                }
2154            }
2155            &Inst::VecTbl { rd, rn, rm } => {
2156                sink.put4(enc_tbl(/* is_extension = */ false, 0b00, rd, rn, rm));
2157            }
2158            &Inst::VecTblExt { rd, ri, rn, rm } => {
2159                debug_assert_eq!(rd.to_reg(), ri);
2160                sink.put4(enc_tbl(/* is_extension = */ true, 0b00, rd, rn, rm));
2161            }
2162            &Inst::VecTbl2 { rd, rn, rn2, rm } => {
2163                assert_eq!(machreg_to_vec(rn2), (machreg_to_vec(rn) + 1) % 32);
2164                sink.put4(enc_tbl(/* is_extension = */ false, 0b01, rd, rn, rm));
2165            }
2166            &Inst::VecTbl2Ext {
2167                rd,
2168                ri,
2169                rn,
2170                rn2,
2171                rm,
2172            } => {
2173                debug_assert_eq!(rd.to_reg(), ri);
2174                assert_eq!(machreg_to_vec(rn2), (machreg_to_vec(rn) + 1) % 32);
2175                sink.put4(enc_tbl(/* is_extension = */ true, 0b01, rd, rn, rm));
2176            }
2177            &Inst::FpuCmp { size, rn, rm } => {
2178                sink.put4(enc_fcmp(size, rn, rm));
2179            }
2180            &Inst::FpuToInt { op, rd, rn } => {
2181                let top16 = match op {
2182                    // FCVTZS (32/32-bit)
2183                    FpuToIntOp::F32ToI32 => 0b000_11110_00_1_11_000,
2184                    // FCVTZU (32/32-bit)
2185                    FpuToIntOp::F32ToU32 => 0b000_11110_00_1_11_001,
2186                    // FCVTZS (32/64-bit)
2187                    FpuToIntOp::F32ToI64 => 0b100_11110_00_1_11_000,
2188                    // FCVTZU (32/64-bit)
2189                    FpuToIntOp::F32ToU64 => 0b100_11110_00_1_11_001,
2190                    // FCVTZS (64/32-bit)
2191                    FpuToIntOp::F64ToI32 => 0b000_11110_01_1_11_000,
2192                    // FCVTZU (64/32-bit)
2193                    FpuToIntOp::F64ToU32 => 0b000_11110_01_1_11_001,
2194                    // FCVTZS (64/64-bit)
2195                    FpuToIntOp::F64ToI64 => 0b100_11110_01_1_11_000,
2196                    // FCVTZU (64/64-bit)
2197                    FpuToIntOp::F64ToU64 => 0b100_11110_01_1_11_001,
2198                };
2199                sink.put4(enc_fputoint(top16, rd, rn));
2200            }
2201            &Inst::IntToFpu { op, rd, rn } => {
2202                let top16 = match op {
2203                    // SCVTF (32/32-bit)
2204                    IntToFpuOp::I32ToF32 => 0b000_11110_00_1_00_010,
2205                    // UCVTF (32/32-bit)
2206                    IntToFpuOp::U32ToF32 => 0b000_11110_00_1_00_011,
2207                    // SCVTF (64/32-bit)
2208                    IntToFpuOp::I64ToF32 => 0b100_11110_00_1_00_010,
2209                    // UCVTF (64/32-bit)
2210                    IntToFpuOp::U64ToF32 => 0b100_11110_00_1_00_011,
2211                    // SCVTF (32/64-bit)
2212                    IntToFpuOp::I32ToF64 => 0b000_11110_01_1_00_010,
2213                    // UCVTF (32/64-bit)
2214                    IntToFpuOp::U32ToF64 => 0b000_11110_01_1_00_011,
2215                    // SCVTF (64/64-bit)
2216                    IntToFpuOp::I64ToF64 => 0b100_11110_01_1_00_010,
2217                    // UCVTF (64/64-bit)
2218                    IntToFpuOp::U64ToF64 => 0b100_11110_01_1_00_011,
2219                };
2220                sink.put4(enc_inttofpu(top16, rd, rn));
2221            }
2222            &Inst::FpuCSel16 { rd, rn, rm, cond } => {
2223                sink.put4(enc_fcsel(rd, rn, rm, cond, ScalarSize::Size16));
2224            }
2225            &Inst::FpuCSel32 { rd, rn, rm, cond } => {
2226                sink.put4(enc_fcsel(rd, rn, rm, cond, ScalarSize::Size32));
2227            }
2228            &Inst::FpuCSel64 { rd, rn, rm, cond } => {
2229                sink.put4(enc_fcsel(rd, rn, rm, cond, ScalarSize::Size64));
2230            }
2231            &Inst::FpuRound { op, rd, rn } => {
2232                let top22 = match op {
2233                    FpuRoundMode::Minus32 => 0b000_11110_00_1_001_010_10000,
2234                    FpuRoundMode::Minus64 => 0b000_11110_01_1_001_010_10000,
2235                    FpuRoundMode::Plus32 => 0b000_11110_00_1_001_001_10000,
2236                    FpuRoundMode::Plus64 => 0b000_11110_01_1_001_001_10000,
2237                    FpuRoundMode::Zero32 => 0b000_11110_00_1_001_011_10000,
2238                    FpuRoundMode::Zero64 => 0b000_11110_01_1_001_011_10000,
2239                    FpuRoundMode::Nearest32 => 0b000_11110_00_1_001_000_10000,
2240                    FpuRoundMode::Nearest64 => 0b000_11110_01_1_001_000_10000,
2241                };
2242                sink.put4(enc_fround(top22, rd, rn));
2243            }
2244            &Inst::MovToFpu { rd, rn, size } => {
2245                let template = match size {
2246                    ScalarSize::Size16 => 0b000_11110_11_1_00_111_000000_00000_00000,
2247                    ScalarSize::Size32 => 0b000_11110_00_1_00_111_000000_00000_00000,
2248                    ScalarSize::Size64 => 0b100_11110_01_1_00_111_000000_00000_00000,
2249                    _ => unreachable!(),
2250                };
2251                sink.put4(template | (machreg_to_gpr(rn) << 5) | machreg_to_vec(rd.to_reg()));
2252            }
2253            &Inst::FpuMoveFPImm { rd, imm, size } => {
2254                sink.put4(
2255                    0b000_11110_00_1_00_000_000100_00000_00000
2256                        | size.ftype() << 22
2257                        | ((imm.enc_bits() as u32) << 13)
2258                        | machreg_to_vec(rd.to_reg()),
2259                );
2260            }
2261            &Inst::MovToVec {
2262                rd,
2263                ri,
2264                rn,
2265                idx,
2266                size,
2267            } => {
2268                debug_assert_eq!(rd.to_reg(), ri);
2269                let (imm5, shift) = match size.lane_size() {
2270                    ScalarSize::Size8 => (0b00001, 1),
2271                    ScalarSize::Size16 => (0b00010, 2),
2272                    ScalarSize::Size32 => (0b00100, 3),
2273                    ScalarSize::Size64 => (0b01000, 4),
2274                    _ => unreachable!(),
2275                };
2276                debug_assert_eq!(idx & (0b11111 >> shift), idx);
2277                let imm5 = imm5 | ((idx as u32) << shift);
2278                sink.put4(
2279                    0b010_01110000_00000_0_0011_1_00000_00000
2280                        | (imm5 << 16)
2281                        | (machreg_to_gpr(rn) << 5)
2282                        | machreg_to_vec(rd.to_reg()),
2283                );
2284            }
2285            &Inst::MovFromVec { rd, rn, idx, size } => {
2286                let (q, imm5, shift, mask) = match size {
2287                    ScalarSize::Size8 => (0b0, 0b00001, 1, 0b1111),
2288                    ScalarSize::Size16 => (0b0, 0b00010, 2, 0b0111),
2289                    ScalarSize::Size32 => (0b0, 0b00100, 3, 0b0011),
2290                    ScalarSize::Size64 => (0b1, 0b01000, 4, 0b0001),
2291                    _ => panic!("Unexpected scalar FP operand size: {size:?}"),
2292                };
2293                debug_assert_eq!(idx & mask, idx);
2294                let imm5 = imm5 | ((idx as u32) << shift);
2295                sink.put4(
2296                    0b000_01110000_00000_0_0111_1_00000_00000
2297                        | (q << 30)
2298                        | (imm5 << 16)
2299                        | (machreg_to_vec(rn) << 5)
2300                        | machreg_to_gpr(rd.to_reg()),
2301                );
2302            }
2303            &Inst::MovFromVecSigned {
2304                rd,
2305                rn,
2306                idx,
2307                size,
2308                scalar_size,
2309            } => {
2310                let (imm5, shift, half) = match size {
2311                    VectorSize::Size8x8 => (0b00001, 1, true),
2312                    VectorSize::Size8x16 => (0b00001, 1, false),
2313                    VectorSize::Size16x4 => (0b00010, 2, true),
2314                    VectorSize::Size16x8 => (0b00010, 2, false),
2315                    VectorSize::Size32x2 => {
2316                        debug_assert_ne!(scalar_size, OperandSize::Size32);
2317                        (0b00100, 3, true)
2318                    }
2319                    VectorSize::Size32x4 => {
2320                        debug_assert_ne!(scalar_size, OperandSize::Size32);
2321                        (0b00100, 3, false)
2322                    }
2323                    _ => panic!("Unexpected vector operand size"),
2324                };
2325                debug_assert_eq!(idx & (0b11111 >> (half as u32 + shift)), idx);
2326                let imm5 = imm5 | ((idx as u32) << shift);
2327                sink.put4(
2328                    0b000_01110000_00000_0_0101_1_00000_00000
2329                        | (scalar_size.is64() as u32) << 30
2330                        | (imm5 << 16)
2331                        | (machreg_to_vec(rn) << 5)
2332                        | machreg_to_gpr(rd.to_reg()),
2333                );
2334            }
2335            &Inst::VecDup { rd, rn, size } => {
2336                let q = size.is_128bits() as u32;
2337                let imm5 = match size.lane_size() {
2338                    ScalarSize::Size8 => 0b00001,
2339                    ScalarSize::Size16 => 0b00010,
2340                    ScalarSize::Size32 => 0b00100,
2341                    ScalarSize::Size64 => 0b01000,
2342                    _ => unreachable!(),
2343                };
2344                sink.put4(
2345                    0b0_0_0_01110000_00000_000011_00000_00000
2346                        | (q << 30)
2347                        | (imm5 << 16)
2348                        | (machreg_to_gpr(rn) << 5)
2349                        | machreg_to_vec(rd.to_reg()),
2350                );
2351            }
2352            &Inst::VecDupFromFpu { rd, rn, size, lane } => {
2353                let q = size.is_128bits() as u32;
2354                let imm5 = match size.lane_size() {
2355                    ScalarSize::Size8 => {
2356                        assert!(lane < 16);
2357                        0b00001 | (u32::from(lane) << 1)
2358                    }
2359                    ScalarSize::Size16 => {
2360                        assert!(lane < 8);
2361                        0b00010 | (u32::from(lane) << 2)
2362                    }
2363                    ScalarSize::Size32 => {
2364                        assert!(lane < 4);
2365                        0b00100 | (u32::from(lane) << 3)
2366                    }
2367                    ScalarSize::Size64 => {
2368                        assert!(lane < 2);
2369                        0b01000 | (u32::from(lane) << 4)
2370                    }
2371                    _ => unimplemented!(),
2372                };
2373                sink.put4(
2374                    0b000_01110000_00000_000001_00000_00000
2375                        | (q << 30)
2376                        | (imm5 << 16)
2377                        | (machreg_to_vec(rn) << 5)
2378                        | machreg_to_vec(rd.to_reg()),
2379                );
2380            }
2381            &Inst::VecDupFPImm { rd, imm, size } => {
2382                let imm = imm.enc_bits();
2383                let op = match size.lane_size() {
2384                    ScalarSize::Size32 => 0,
2385                    ScalarSize::Size64 => 1,
2386                    _ => unimplemented!(),
2387                };
2388                let q_op = op | ((size.is_128bits() as u32) << 1);
2389
2390                sink.put4(enc_asimd_mod_imm(rd, q_op, 0b1111, imm));
2391            }
2392            &Inst::VecDupImm {
2393                rd,
2394                imm,
2395                invert,
2396                size,
2397            } => {
2398                let (imm, shift, shift_ones) = imm.value();
2399                let (op, cmode) = match size.lane_size() {
2400                    ScalarSize::Size8 => {
2401                        assert!(!invert);
2402                        assert_eq!(shift, 0);
2403
2404                        (0, 0b1110)
2405                    }
2406                    ScalarSize::Size16 => {
2407                        let s = shift & 8;
2408
2409                        assert!(!shift_ones);
2410                        assert_eq!(s, shift);
2411
2412                        (invert as u32, 0b1000 | (s >> 2))
2413                    }
2414                    ScalarSize::Size32 => {
2415                        if shift_ones {
2416                            assert!(shift == 8 || shift == 16);
2417
2418                            (invert as u32, 0b1100 | (shift >> 4))
2419                        } else {
2420                            let s = shift & 24;
2421
2422                            assert_eq!(s, shift);
2423
2424                            (invert as u32, 0b0000 | (s >> 2))
2425                        }
2426                    }
2427                    ScalarSize::Size64 => {
2428                        assert!(!invert);
2429                        assert_eq!(shift, 0);
2430
2431                        (1, 0b1110)
2432                    }
2433                    _ => unreachable!(),
2434                };
2435                let q_op = op | ((size.is_128bits() as u32) << 1);
2436
2437                sink.put4(enc_asimd_mod_imm(rd, q_op, cmode, imm));
2438            }
2439            &Inst::VecExtend {
2440                t,
2441                rd,
2442                rn,
2443                high_half,
2444                lane_size,
2445            } => {
2446                let immh = match lane_size {
2447                    ScalarSize::Size16 => 0b001,
2448                    ScalarSize::Size32 => 0b010,
2449                    ScalarSize::Size64 => 0b100,
2450                    _ => panic!("Unexpected VecExtend to lane size of {lane_size:?}"),
2451                };
2452                let u = match t {
2453                    VecExtendOp::Sxtl => 0b0,
2454                    VecExtendOp::Uxtl => 0b1,
2455                };
2456                sink.put4(
2457                    0b000_011110_0000_000_101001_00000_00000
2458                        | ((high_half as u32) << 30)
2459                        | (u << 29)
2460                        | (immh << 19)
2461                        | (machreg_to_vec(rn) << 5)
2462                        | machreg_to_vec(rd.to_reg()),
2463                );
2464            }
2465            &Inst::VecRRLong {
2466                op,
2467                rd,
2468                rn,
2469                high_half,
2470            } => {
2471                let (u, size, bits_12_16) = match op {
2472                    VecRRLongOp::Fcvtl16 => (0b0, 0b00, 0b10111),
2473                    VecRRLongOp::Fcvtl32 => (0b0, 0b01, 0b10111),
2474                    VecRRLongOp::Shll8 => (0b1, 0b00, 0b10011),
2475                    VecRRLongOp::Shll16 => (0b1, 0b01, 0b10011),
2476                    VecRRLongOp::Shll32 => (0b1, 0b10, 0b10011),
2477                };
2478
2479                sink.put4(enc_vec_rr_misc(
2480                    ((high_half as u32) << 1) | u,
2481                    size,
2482                    bits_12_16,
2483                    rd,
2484                    rn,
2485                ));
2486            }
2487            &Inst::VecRRNarrowLow {
2488                op,
2489                rd,
2490                rn,
2491                lane_size,
2492            }
2493            | &Inst::VecRRNarrowHigh {
2494                op,
2495                rd,
2496                rn,
2497                lane_size,
2498                ..
2499            } => {
2500                let high_half = match self {
2501                    &Inst::VecRRNarrowLow { .. } => false,
2502                    &Inst::VecRRNarrowHigh { .. } => true,
2503                    _ => unreachable!(),
2504                };
2505
2506                let size = match lane_size {
2507                    ScalarSize::Size8 => 0b00,
2508                    ScalarSize::Size16 => 0b01,
2509                    ScalarSize::Size32 => 0b10,
2510                    _ => panic!("unsupported size: {lane_size:?}"),
2511                };
2512
2513                // Floats use a single bit, to encode either half or single.
2514                let size = match op {
2515                    VecRRNarrowOp::Fcvtn => size >> 1,
2516                    _ => size,
2517                };
2518
2519                let (u, bits_12_16) = match op {
2520                    VecRRNarrowOp::Xtn => (0b0, 0b10010),
2521                    VecRRNarrowOp::Sqxtn => (0b0, 0b10100),
2522                    VecRRNarrowOp::Sqxtun => (0b1, 0b10010),
2523                    VecRRNarrowOp::Uqxtn => (0b1, 0b10100),
2524                    VecRRNarrowOp::Fcvtn => (0b0, 0b10110),
2525                };
2526
2527                sink.put4(enc_vec_rr_misc(
2528                    ((high_half as u32) << 1) | u,
2529                    size,
2530                    bits_12_16,
2531                    rd,
2532                    rn,
2533                ));
2534            }
2535            &Inst::VecMovElement {
2536                rd,
2537                ri,
2538                rn,
2539                dest_idx,
2540                src_idx,
2541                size,
2542            } => {
2543                debug_assert_eq!(rd.to_reg(), ri);
2544                let (imm5, shift) = match size.lane_size() {
2545                    ScalarSize::Size8 => (0b00001, 1),
2546                    ScalarSize::Size16 => (0b00010, 2),
2547                    ScalarSize::Size32 => (0b00100, 3),
2548                    ScalarSize::Size64 => (0b01000, 4),
2549                    _ => unreachable!(),
2550                };
2551                let mask = 0b11111 >> shift;
2552                debug_assert_eq!(dest_idx & mask, dest_idx);
2553                debug_assert_eq!(src_idx & mask, src_idx);
2554                let imm4 = (src_idx as u32) << (shift - 1);
2555                let imm5 = imm5 | ((dest_idx as u32) << shift);
2556                sink.put4(
2557                    0b011_01110000_00000_0_0000_1_00000_00000
2558                        | (imm5 << 16)
2559                        | (imm4 << 11)
2560                        | (machreg_to_vec(rn) << 5)
2561                        | machreg_to_vec(rd.to_reg()),
2562                );
2563            }
2564            &Inst::VecRRPair { op, rd, rn } => {
2565                let bits_12_16 = match op {
2566                    VecPairOp::Addp => 0b11011,
2567                };
2568
2569                sink.put4(enc_vec_rr_pair(bits_12_16, rd, rn));
2570            }
2571            &Inst::VecRRRLong {
2572                rd,
2573                rn,
2574                rm,
2575                alu_op,
2576                high_half,
2577            } => {
2578                let (u, size, bit14) = match alu_op {
2579                    VecRRRLongOp::Smull8 => (0b0, 0b00, 0b1),
2580                    VecRRRLongOp::Smull16 => (0b0, 0b01, 0b1),
2581                    VecRRRLongOp::Smull32 => (0b0, 0b10, 0b1),
2582                    VecRRRLongOp::Umull8 => (0b1, 0b00, 0b1),
2583                    VecRRRLongOp::Umull16 => (0b1, 0b01, 0b1),
2584                    VecRRRLongOp::Umull32 => (0b1, 0b10, 0b1),
2585                };
2586                sink.put4(enc_vec_rrr_long(
2587                    high_half as u32,
2588                    u,
2589                    size,
2590                    bit14,
2591                    rm,
2592                    rn,
2593                    rd,
2594                ));
2595            }
2596            &Inst::VecRRRLongMod {
2597                rd,
2598                ri,
2599                rn,
2600                rm,
2601                alu_op,
2602                high_half,
2603            } => {
2604                debug_assert_eq!(rd.to_reg(), ri);
2605                let (u, size, bit14) = match alu_op {
2606                    VecRRRLongModOp::Umlal8 => (0b1, 0b00, 0b0),
2607                    VecRRRLongModOp::Umlal16 => (0b1, 0b01, 0b0),
2608                    VecRRRLongModOp::Umlal32 => (0b1, 0b10, 0b0),
2609                };
2610                sink.put4(enc_vec_rrr_long(
2611                    high_half as u32,
2612                    u,
2613                    size,
2614                    bit14,
2615                    rm,
2616                    rn,
2617                    rd,
2618                ));
2619            }
2620            &Inst::VecRRPairLong { op, rd, rn } => {
2621                let (u, size) = match op {
2622                    VecRRPairLongOp::Saddlp8 => (0b0, 0b0),
2623                    VecRRPairLongOp::Uaddlp8 => (0b1, 0b0),
2624                    VecRRPairLongOp::Saddlp16 => (0b0, 0b1),
2625                    VecRRPairLongOp::Uaddlp16 => (0b1, 0b1),
2626                };
2627
2628                sink.put4(enc_vec_rr_pair_long(u, size, rd, rn));
2629            }
2630            &Inst::VecRRR {
2631                rd,
2632                rn,
2633                rm,
2634                alu_op,
2635                size,
2636            } => {
2637                let (q, enc_size) = size.enc_size();
2638                let is_float = match alu_op {
2639                    VecALUOp::Fcmeq
2640                    | VecALUOp::Fcmgt
2641                    | VecALUOp::Fcmge
2642                    | VecALUOp::Fadd
2643                    | VecALUOp::Fsub
2644                    | VecALUOp::Fdiv
2645                    | VecALUOp::Fmax
2646                    | VecALUOp::Fmin
2647                    | VecALUOp::Fmul => true,
2648                    _ => false,
2649                };
2650
2651                let (top11, bit15_10) = match alu_op {
2652                    VecALUOp::Sqadd => (0b000_01110_00_1 | enc_size << 1, 0b000011),
2653                    VecALUOp::Sqsub => (0b000_01110_00_1 | enc_size << 1, 0b001011),
2654                    VecALUOp::Uqadd => (0b001_01110_00_1 | enc_size << 1, 0b000011),
2655                    VecALUOp::Uqsub => (0b001_01110_00_1 | enc_size << 1, 0b001011),
2656                    VecALUOp::Cmeq => (0b001_01110_00_1 | enc_size << 1, 0b100011),
2657                    VecALUOp::Cmge => (0b000_01110_00_1 | enc_size << 1, 0b001111),
2658                    VecALUOp::Cmgt => (0b000_01110_00_1 | enc_size << 1, 0b001101),
2659                    VecALUOp::Cmhi => (0b001_01110_00_1 | enc_size << 1, 0b001101),
2660                    VecALUOp::Cmhs => (0b001_01110_00_1 | enc_size << 1, 0b001111),
2661                    VecALUOp::Fcmeq => (0b000_01110_00_1, 0b111001),
2662                    VecALUOp::Fcmgt => (0b001_01110_10_1, 0b111001),
2663                    VecALUOp::Fcmge => (0b001_01110_00_1, 0b111001),
2664                    // The following logical instructions operate on bytes, so are not encoded differently
2665                    // for the different vector types.
2666                    VecALUOp::And => (0b000_01110_00_1, 0b000111),
2667                    VecALUOp::Bic => (0b000_01110_01_1, 0b000111),
2668                    VecALUOp::Orr => (0b000_01110_10_1, 0b000111),
2669                    VecALUOp::Eor => (0b001_01110_00_1, 0b000111),
2670                    VecALUOp::Umaxp => {
2671                        debug_assert_ne!(size, VectorSize::Size64x2);
2672
2673                        (0b001_01110_00_1 | enc_size << 1, 0b101001)
2674                    }
2675                    VecALUOp::Add => (0b000_01110_00_1 | enc_size << 1, 0b100001),
2676                    VecALUOp::Sub => (0b001_01110_00_1 | enc_size << 1, 0b100001),
2677                    VecALUOp::Mul => {
2678                        debug_assert_ne!(size, VectorSize::Size64x2);
2679                        (0b000_01110_00_1 | enc_size << 1, 0b100111)
2680                    }
2681                    VecALUOp::Sshl => (0b000_01110_00_1 | enc_size << 1, 0b010001),
2682                    VecALUOp::Ushl => (0b001_01110_00_1 | enc_size << 1, 0b010001),
2683                    VecALUOp::Umin => {
2684                        debug_assert_ne!(size, VectorSize::Size64x2);
2685
2686                        (0b001_01110_00_1 | enc_size << 1, 0b011011)
2687                    }
2688                    VecALUOp::Smin => {
2689                        debug_assert_ne!(size, VectorSize::Size64x2);
2690
2691                        (0b000_01110_00_1 | enc_size << 1, 0b011011)
2692                    }
2693                    VecALUOp::Umax => {
2694                        debug_assert_ne!(size, VectorSize::Size64x2);
2695
2696                        (0b001_01110_00_1 | enc_size << 1, 0b011001)
2697                    }
2698                    VecALUOp::Smax => {
2699                        debug_assert_ne!(size, VectorSize::Size64x2);
2700
2701                        (0b000_01110_00_1 | enc_size << 1, 0b011001)
2702                    }
2703                    VecALUOp::Urhadd => {
2704                        debug_assert_ne!(size, VectorSize::Size64x2);
2705
2706                        (0b001_01110_00_1 | enc_size << 1, 0b000101)
2707                    }
2708                    VecALUOp::Fadd => (0b000_01110_00_1, 0b110101),
2709                    VecALUOp::Fsub => (0b000_01110_10_1, 0b110101),
2710                    VecALUOp::Fdiv => (0b001_01110_00_1, 0b111111),
2711                    VecALUOp::Fmax => (0b000_01110_00_1, 0b111101),
2712                    VecALUOp::Fmin => (0b000_01110_10_1, 0b111101),
2713                    VecALUOp::Fmul => (0b001_01110_00_1, 0b110111),
2714                    VecALUOp::Addp => (0b000_01110_00_1 | enc_size << 1, 0b101111),
2715                    VecALUOp::Zip1 => (0b01001110_00_0 | enc_size << 1, 0b001110),
2716                    VecALUOp::Zip2 => (0b01001110_00_0 | enc_size << 1, 0b011110),
2717                    VecALUOp::Sqrdmulh => {
2718                        debug_assert!(
2719                            size.lane_size() == ScalarSize::Size16
2720                                || size.lane_size() == ScalarSize::Size32
2721                        );
2722
2723                        (0b001_01110_00_1 | enc_size << 1, 0b101101)
2724                    }
2725                    VecALUOp::Uzp1 => (0b01001110_00_0 | enc_size << 1, 0b000110),
2726                    VecALUOp::Uzp2 => (0b01001110_00_0 | enc_size << 1, 0b010110),
2727                    VecALUOp::Trn1 => (0b01001110_00_0 | enc_size << 1, 0b001010),
2728                    VecALUOp::Trn2 => (0b01001110_00_0 | enc_size << 1, 0b011010),
2729                };
2730                let top11 = if is_float {
2731                    top11 | size.enc_float_size() << 1
2732                } else {
2733                    top11
2734                };
2735                sink.put4(enc_vec_rrr(top11 | q << 9, rm, bit15_10, rn, rd));
2736            }
2737            &Inst::VecRRRMod {
2738                rd,
2739                ri,
2740                rn,
2741                rm,
2742                alu_op,
2743                size,
2744            } => {
2745                debug_assert_eq!(rd.to_reg(), ri);
2746                let (q, _enc_size) = size.enc_size();
2747
2748                let (top11, bit15_10) = match alu_op {
2749                    VecALUModOp::Bsl => (0b001_01110_01_1, 0b000111),
2750                    VecALUModOp::Fmla => {
2751                        (0b000_01110_00_1 | (size.enc_float_size() << 1), 0b110011)
2752                    }
2753                    VecALUModOp::Fmls => {
2754                        (0b000_01110_10_1 | (size.enc_float_size() << 1), 0b110011)
2755                    }
2756                };
2757                sink.put4(enc_vec_rrr(top11 | q << 9, rm, bit15_10, rn, rd));
2758            }
2759            &Inst::VecFmlaElem {
2760                rd,
2761                ri,
2762                rn,
2763                rm,
2764                alu_op,
2765                size,
2766                idx,
2767            } => {
2768                debug_assert_eq!(rd.to_reg(), ri);
2769                let idx = u32::from(idx);
2770
2771                let (q, _size) = size.enc_size();
2772                let o2 = match alu_op {
2773                    VecALUModOp::Fmla => 0b0,
2774                    VecALUModOp::Fmls => 0b1,
2775                    _ => unreachable!(),
2776                };
2777
2778                let (h, l) = match size {
2779                    VectorSize::Size32x4 => {
2780                        assert!(idx < 4);
2781                        (idx >> 1, idx & 1)
2782                    }
2783                    VectorSize::Size64x2 => {
2784                        assert!(idx < 2);
2785                        (idx, 0)
2786                    }
2787                    _ => unreachable!(),
2788                };
2789
2790                let top11 = 0b000_011111_00 | (q << 9) | (size.enc_float_size() << 1) | l;
2791                let bit15_10 = 0b000100 | (o2 << 4) | (h << 1);
2792                sink.put4(enc_vec_rrr(top11, rm, bit15_10, rn, rd));
2793            }
2794            &Inst::VecLoadReplicate {
2795                rd,
2796                rn,
2797                size,
2798                flags,
2799            } => {
2800                let (q, size) = size.enc_size();
2801
2802                if let Some(trap_code) = flags.trap_code() {
2803                    // Register the offset at which the actual load instruction starts.
2804                    sink.add_trap(trap_code);
2805                }
2806
2807                sink.put4(enc_ldst_vec(q, size, rn, rd));
2808            }
2809            &Inst::VecCSel { rd, rn, rm, cond } => {
2810                /* Emit this:
2811                      b.cond  else
2812                      mov     rd, rm
2813                      b       out
2814                     else:
2815                      mov     rd, rn
2816                     out:
2817
2818                   Note, we could do better in the cases where rd == rn or rd == rm.
2819                */
2820                let else_label = sink.get_label();
2821                let out_label = sink.get_label();
2822
2823                // b.cond else
2824                let br_else_offset = sink.cur_offset();
2825                sink.put4(enc_conditional_br(
2826                    BranchTarget::Label(else_label),
2827                    CondBrKind::Cond(cond),
2828                ));
2829                sink.use_label_at_offset(br_else_offset, else_label, LabelUse::Branch19);
2830
2831                // mov rd, rm
2832                sink.put4(enc_vecmov(/* 16b = */ true, rd, rm));
2833
2834                // b out
2835                let b_out_offset = sink.cur_offset();
2836                sink.use_label_at_offset(b_out_offset, out_label, LabelUse::Branch26);
2837                sink.add_uncond_branch(b_out_offset, b_out_offset + 4, out_label);
2838                sink.put4(enc_jump26(0b000101, 0 /* will be fixed up later */));
2839
2840                // else:
2841                sink.bind_label(else_label, &mut state.ctrl_plane);
2842
2843                // mov rd, rn
2844                sink.put4(enc_vecmov(/* 16b = */ true, rd, rn));
2845
2846                // out:
2847                sink.bind_label(out_label, &mut state.ctrl_plane);
2848            }
2849            &Inst::MovToNZCV { rn } => {
2850                sink.put4(0xd51b4200 | machreg_to_gpr(rn));
2851            }
2852            &Inst::MovFromNZCV { rd } => {
2853                sink.put4(0xd53b4200 | machreg_to_gpr(rd.to_reg()));
2854            }
2855            &Inst::Extend {
2856                rd,
2857                rn,
2858                signed: false,
2859                from_bits: 1,
2860                to_bits,
2861            } => {
2862                assert!(to_bits <= 64);
2863                // Reduce zero-extend-from-1-bit to:
2864                // - and rd, rn, #1
2865                // Note: This is special cased as UBFX may take more cycles
2866                // than AND on smaller cores.
2867                let imml = ImmLogic::maybe_from_u64(1, I32).unwrap();
2868                Inst::AluRRImmLogic {
2869                    alu_op: ALUOp::And,
2870                    size: OperandSize::Size32,
2871                    rd,
2872                    rn,
2873                    imml,
2874                }
2875                .emit(sink, emit_info, state);
2876            }
2877            &Inst::Extend {
2878                rd,
2879                rn,
2880                signed: false,
2881                from_bits: 32,
2882                to_bits: 64,
2883            } => {
2884                let mov = Inst::Mov {
2885                    size: OperandSize::Size32,
2886                    rd,
2887                    rm: rn,
2888                };
2889                mov.emit(sink, emit_info, state);
2890            }
2891            &Inst::Extend {
2892                rd,
2893                rn,
2894                signed,
2895                from_bits,
2896                to_bits,
2897            } => {
2898                let (opc, size) = if signed {
2899                    (0b00, OperandSize::from_bits(to_bits))
2900                } else {
2901                    (0b10, OperandSize::Size32)
2902                };
2903                sink.put4(enc_bfm(opc, size, rd, rn, 0, from_bits - 1));
2904            }
2905            &Inst::Jump { ref dest } => {
2906                let off = sink.cur_offset();
2907                // Indicate that the jump uses a label, if so, so that a fixup can occur later.
2908                if let Some(l) = dest.as_label() {
2909                    sink.use_label_at_offset(off, l, LabelUse::Branch26);
2910                    sink.add_uncond_branch(off, off + 4, l);
2911                }
2912                // Emit the jump itself.
2913                sink.put4(enc_jump26(0b000101, dest.as_offset26_or_zero()));
2914            }
2915            &Inst::Args { .. } | &Inst::Rets { .. } => {
2916                // Nothing: this is a pseudoinstruction that serves
2917                // only to constrain registers at a certain point.
2918            }
2919            &Inst::Ret {} => {
2920                sink.put4(0xd65f03c0);
2921            }
2922            &Inst::AuthenticatedRet { key, is_hint } => {
2923                let (op2, is_hint) = match key {
2924                    APIKey::AZ => (0b100, true),
2925                    APIKey::ASP => (0b101, is_hint),
2926                    APIKey::BZ => (0b110, true),
2927                    APIKey::BSP => (0b111, is_hint),
2928                };
2929
2930                if is_hint {
2931                    sink.put4(key.enc_auti_hint());
2932                    Inst::Ret {}.emit(sink, emit_info, state);
2933                } else {
2934                    sink.put4(0xd65f0bff | (op2 << 9)); // reta{key}
2935                }
2936            }
2937            &Inst::Call { ref info } => {
2938                let user_stack_map = state.take_stack_map();
2939                sink.add_reloc(Reloc::Arm64Call, &info.dest, 0);
2940                sink.put4(enc_jump26(0b100101, 0));
2941                if let Some(s) = user_stack_map {
2942                    let offset = sink.cur_offset();
2943                    sink.push_user_stack_map(state, offset, s);
2944                }
2945                sink.add_call_site();
2946
2947                if info.callee_pop_size > 0 {
2948                    let callee_pop_size =
2949                        i32::try_from(info.callee_pop_size).expect("callee popped more than 2GB");
2950                    for inst in AArch64MachineDeps::gen_sp_reg_adjust(-callee_pop_size) {
2951                        inst.emit(sink, emit_info, state);
2952                    }
2953                }
2954            }
2955            &Inst::CallInd { ref info } => {
2956                let user_stack_map = state.take_stack_map();
2957                sink.put4(
2958                    0b1101011_0001_11111_000000_00000_00000 | (machreg_to_gpr(info.dest) << 5),
2959                );
2960                if let Some(s) = user_stack_map {
2961                    let offset = sink.cur_offset();
2962                    sink.push_user_stack_map(state, offset, s);
2963                }
2964                sink.add_call_site();
2965
2966                if info.callee_pop_size > 0 {
2967                    let callee_pop_size =
2968                        i32::try_from(info.callee_pop_size).expect("callee popped more than 2GB");
2969                    for inst in AArch64MachineDeps::gen_sp_reg_adjust(-callee_pop_size) {
2970                        inst.emit(sink, emit_info, state);
2971                    }
2972                }
2973            }
2974            &Inst::ReturnCall { ref info } => {
2975                emit_return_call_common_sequence(sink, emit_info, state, info);
2976
2977                // Note: this is not `Inst::Jump { .. }.emit(..)` because we
2978                // have different metadata in this case: we don't have a label
2979                // for the target, but rather a function relocation.
2980                sink.add_reloc(Reloc::Arm64Call, &info.dest, 0);
2981                sink.put4(enc_jump26(0b000101, 0));
2982                sink.add_call_site();
2983
2984                // `emit_return_call_common_sequence` emits an island if
2985                // necessary, so we can safely disable the worst-case-size check
2986                // in this case.
2987                start_off = sink.cur_offset();
2988            }
2989            &Inst::ReturnCallInd { ref info } => {
2990                emit_return_call_common_sequence(sink, emit_info, state, info);
2991
2992                Inst::IndirectBr {
2993                    rn: info.dest,
2994                    targets: vec![],
2995                }
2996                .emit(sink, emit_info, state);
2997                sink.add_call_site();
2998
2999                // `emit_return_call_common_sequence` emits an island if
3000                // necessary, so we can safely disable the worst-case-size check
3001                // in this case.
3002                start_off = sink.cur_offset();
3003            }
3004            &Inst::CondBr {
3005                taken,
3006                not_taken,
3007                kind,
3008            } => {
3009                // Conditional part first.
3010                let cond_off = sink.cur_offset();
3011                if let Some(l) = taken.as_label() {
3012                    sink.use_label_at_offset(cond_off, l, LabelUse::Branch19);
3013                    let inverted = enc_conditional_br(taken, kind.invert()).to_le_bytes();
3014                    sink.add_cond_branch(cond_off, cond_off + 4, l, &inverted[..]);
3015                }
3016                sink.put4(enc_conditional_br(taken, kind));
3017
3018                // Unconditional part next.
3019                let uncond_off = sink.cur_offset();
3020                if let Some(l) = not_taken.as_label() {
3021                    sink.use_label_at_offset(uncond_off, l, LabelUse::Branch26);
3022                    sink.add_uncond_branch(uncond_off, uncond_off + 4, l);
3023                }
3024                sink.put4(enc_jump26(0b000101, not_taken.as_offset26_or_zero()));
3025            }
3026            &Inst::TestBitAndBranch {
3027                taken,
3028                not_taken,
3029                kind,
3030                rn,
3031                bit,
3032            } => {
3033                // Emit the conditional branch first
3034                let cond_off = sink.cur_offset();
3035                if let Some(l) = taken.as_label() {
3036                    sink.use_label_at_offset(cond_off, l, LabelUse::Branch14);
3037                    let inverted =
3038                        enc_test_bit_and_branch(kind.complement(), taken, rn, bit).to_le_bytes();
3039                    sink.add_cond_branch(cond_off, cond_off + 4, l, &inverted[..]);
3040                }
3041                sink.put4(enc_test_bit_and_branch(kind, taken, rn, bit));
3042
3043                // Unconditional part next.
3044                let uncond_off = sink.cur_offset();
3045                if let Some(l) = not_taken.as_label() {
3046                    sink.use_label_at_offset(uncond_off, l, LabelUse::Branch26);
3047                    sink.add_uncond_branch(uncond_off, uncond_off + 4, l);
3048                }
3049                sink.put4(enc_jump26(0b000101, not_taken.as_offset26_or_zero()));
3050            }
3051            &Inst::TrapIf { kind, trap_code } => {
3052                let label = sink.defer_trap(trap_code);
3053                // condbr KIND, LABEL
3054                let off = sink.cur_offset();
3055                sink.put4(enc_conditional_br(BranchTarget::Label(label), kind));
3056                sink.use_label_at_offset(off, label, LabelUse::Branch19);
3057            }
3058            &Inst::IndirectBr { rn, .. } => {
3059                sink.put4(enc_br(rn));
3060            }
3061            &Inst::Nop0 => {}
3062            &Inst::Nop4 => {
3063                sink.put4(0xd503201f);
3064            }
3065            &Inst::Brk => {
3066                sink.put4(0xd4200000);
3067            }
3068            &Inst::Udf { trap_code } => {
3069                sink.add_trap(trap_code);
3070                sink.put_data(Inst::TRAP_OPCODE);
3071            }
3072            &Inst::Adr { rd, off } => {
3073                assert!(off > -(1 << 20));
3074                assert!(off < (1 << 20));
3075                sink.put4(enc_adr(off, rd));
3076            }
3077            &Inst::Adrp { rd, off } => {
3078                assert!(off > -(1 << 20));
3079                assert!(off < (1 << 20));
3080                sink.put4(enc_adrp(off, rd));
3081            }
3082            &Inst::Word4 { data } => {
3083                sink.put4(data);
3084            }
3085            &Inst::Word8 { data } => {
3086                sink.put8(data);
3087            }
3088            &Inst::JTSequence {
3089                ridx,
3090                rtmp1,
3091                rtmp2,
3092                default,
3093                ref targets,
3094                ..
3095            } => {
3096                // This sequence is *one* instruction in the vcode, and is expanded only here at
3097                // emission time, because we cannot allow the regalloc to insert spills/reloads in
3098                // the middle; we depend on hardcoded PC-rel addressing below.
3099
3100                // Branch to default when condition code from prior comparison indicates.
3101                let br =
3102                    enc_conditional_br(BranchTarget::Label(default), CondBrKind::Cond(Cond::Hs));
3103
3104                // No need to inform the sink's branch folding logic about this branch, because it
3105                // will not be merged with any other branch, flipped, or elided (it is not preceded
3106                // or succeeded by any other branch). Just emit it with the label use.
3107                let default_br_offset = sink.cur_offset();
3108                sink.use_label_at_offset(default_br_offset, default, LabelUse::Branch19);
3109                sink.put4(br);
3110
3111                // Overwrite the index with a zero when the above
3112                // branch misspeculates (Spectre mitigation). Save the
3113                // resulting index in rtmp2.
3114                let inst = Inst::CSel {
3115                    rd: rtmp2,
3116                    cond: Cond::Hs,
3117                    rn: zero_reg(),
3118                    rm: ridx,
3119                };
3120                inst.emit(sink, emit_info, state);
3121                // Prevent any data value speculation.
3122                Inst::Csdb.emit(sink, emit_info, state);
3123
3124                // Load address of jump table
3125                let inst = Inst::Adr { rd: rtmp1, off: 16 };
3126                inst.emit(sink, emit_info, state);
3127                // Load value out of jump table
3128                let inst = Inst::SLoad32 {
3129                    rd: rtmp2,
3130                    mem: AMode::reg_plus_reg_scaled_extended(
3131                        rtmp1.to_reg(),
3132                        rtmp2.to_reg(),
3133                        ExtendOp::UXTW,
3134                    ),
3135                    flags: MemFlags::trusted(),
3136                };
3137                inst.emit(sink, emit_info, state);
3138                // Add base of jump table to jump-table-sourced block offset
3139                let inst = Inst::AluRRR {
3140                    alu_op: ALUOp::Add,
3141                    size: OperandSize::Size64,
3142                    rd: rtmp1,
3143                    rn: rtmp1.to_reg(),
3144                    rm: rtmp2.to_reg(),
3145                };
3146                inst.emit(sink, emit_info, state);
3147                // Branch to computed address. (`targets` here is only used for successor queries
3148                // and is not needed for emission.)
3149                let inst = Inst::IndirectBr {
3150                    rn: rtmp1.to_reg(),
3151                    targets: vec![],
3152                };
3153                inst.emit(sink, emit_info, state);
3154                // Emit jump table (table of 32-bit offsets).
3155                let jt_off = sink.cur_offset();
3156                for &target in targets.iter() {
3157                    let word_off = sink.cur_offset();
3158                    // off_into_table is an addend here embedded in the label to be later patched
3159                    // at the end of codegen. The offset is initially relative to this jump table
3160                    // entry; with the extra addend, it'll be relative to the jump table's start,
3161                    // after patching.
3162                    let off_into_table = word_off - jt_off;
3163                    sink.use_label_at_offset(word_off, target, LabelUse::PCRel32);
3164                    sink.put4(off_into_table);
3165                }
3166
3167                // Lowering produces an EmitIsland before using a JTSequence, so we can safely
3168                // disable the worst-case-size check in this case.
3169                start_off = sink.cur_offset();
3170            }
3171            &Inst::LoadExtName {
3172                rd,
3173                ref name,
3174                offset,
3175            } => {
3176                if emit_info.0.is_pic() {
3177                    // See this CE Example for the variations of this with and without BTI & PAUTH
3178                    // https://godbolt.org/z/ncqjbbvvn
3179                    //
3180                    // Emit the following code:
3181                    //   adrp    rd, :got:X
3182                    //   ldr     rd, [rd, :got_lo12:X]
3183
3184                    // adrp rd, symbol
3185                    sink.add_reloc(Reloc::Aarch64AdrGotPage21, &**name, 0);
3186                    let inst = Inst::Adrp { rd, off: 0 };
3187                    inst.emit(sink, emit_info, state);
3188
3189                    // ldr rd, [rd, :got_lo12:X]
3190                    sink.add_reloc(Reloc::Aarch64Ld64GotLo12Nc, &**name, 0);
3191                    let inst = Inst::ULoad64 {
3192                        rd,
3193                        mem: AMode::reg(rd.to_reg()),
3194                        flags: MemFlags::trusted(),
3195                    };
3196                    inst.emit(sink, emit_info, state);
3197                } else {
3198                    // With absolute offsets we set up a load from a preallocated space, and then jump
3199                    // over it.
3200                    //
3201                    // Emit the following code:
3202                    //   ldr     rd, #8
3203                    //   b       #0x10
3204                    //   <8 byte space>
3205
3206                    let inst = Inst::ULoad64 {
3207                        rd,
3208                        mem: AMode::Label {
3209                            label: MemLabel::PCRel(8),
3210                        },
3211                        flags: MemFlags::trusted(),
3212                    };
3213                    inst.emit(sink, emit_info, state);
3214                    let inst = Inst::Jump {
3215                        dest: BranchTarget::ResolvedOffset(12),
3216                    };
3217                    inst.emit(sink, emit_info, state);
3218                    sink.add_reloc(Reloc::Abs8, &**name, offset);
3219                    sink.put8(0);
3220                }
3221            }
3222            &Inst::LoadAddr { rd, ref mem } => {
3223                let mem = mem.clone();
3224                let (mem_insts, mem) = mem_finalize(Some(sink), &mem, I8, state);
3225                for inst in mem_insts.into_iter() {
3226                    inst.emit(sink, emit_info, state);
3227                }
3228
3229                let (reg, index_reg, offset) = match mem {
3230                    AMode::RegExtended { rn, rm, extendop } => {
3231                        let r = rn;
3232                        (r, Some((rm, extendop)), 0)
3233                    }
3234                    AMode::Unscaled { rn, simm9 } => {
3235                        let r = rn;
3236                        (r, None, simm9.value())
3237                    }
3238                    AMode::UnsignedOffset { rn, uimm12 } => {
3239                        let r = rn;
3240                        (r, None, uimm12.value() as i32)
3241                    }
3242                    _ => panic!("Unsupported case for LoadAddr: {mem:?}"),
3243                };
3244                let abs_offset = if offset < 0 {
3245                    -offset as u64
3246                } else {
3247                    offset as u64
3248                };
3249                let alu_op = if offset < 0 { ALUOp::Sub } else { ALUOp::Add };
3250
3251                if let Some((idx, extendop)) = index_reg {
3252                    let add = Inst::AluRRRExtend {
3253                        alu_op: ALUOp::Add,
3254                        size: OperandSize::Size64,
3255                        rd,
3256                        rn: reg,
3257                        rm: idx,
3258                        extendop,
3259                    };
3260
3261                    add.emit(sink, emit_info, state);
3262                } else if offset == 0 {
3263                    if reg != rd.to_reg() {
3264                        let mov = Inst::Mov {
3265                            size: OperandSize::Size64,
3266                            rd,
3267                            rm: reg,
3268                        };
3269
3270                        mov.emit(sink, emit_info, state);
3271                    }
3272                } else if let Some(imm12) = Imm12::maybe_from_u64(abs_offset) {
3273                    let add = Inst::AluRRImm12 {
3274                        alu_op,
3275                        size: OperandSize::Size64,
3276                        rd,
3277                        rn: reg,
3278                        imm12,
3279                    };
3280                    add.emit(sink, emit_info, state);
3281                } else {
3282                    // Use `tmp2` here: `reg` may be `spilltmp` if the `AMode` on this instruction
3283                    // was initially an `SPOffset`. Assert that `tmp2` is truly free to use. Note
3284                    // that no other instructions will be inserted here (we're emitting directly),
3285                    // and a live range of `tmp2` should not span this instruction, so this use
3286                    // should otherwise be correct.
3287                    debug_assert!(rd.to_reg() != tmp2_reg());
3288                    debug_assert!(reg != tmp2_reg());
3289                    let tmp = writable_tmp2_reg();
3290                    for insn in Inst::load_constant(tmp, abs_offset, &mut |_| tmp).into_iter() {
3291                        insn.emit(sink, emit_info, state);
3292                    }
3293                    let add = Inst::AluRRR {
3294                        alu_op,
3295                        size: OperandSize::Size64,
3296                        rd,
3297                        rn: reg,
3298                        rm: tmp.to_reg(),
3299                    };
3300                    add.emit(sink, emit_info, state);
3301                }
3302            }
3303            &Inst::Paci { key } => {
3304                let (crm, op2) = match key {
3305                    APIKey::AZ => (0b0011, 0b000),
3306                    APIKey::ASP => (0b0011, 0b001),
3307                    APIKey::BZ => (0b0011, 0b010),
3308                    APIKey::BSP => (0b0011, 0b011),
3309                };
3310
3311                sink.put4(0xd503211f | (crm << 8) | (op2 << 5));
3312            }
3313            &Inst::Xpaclri => sink.put4(0xd50320ff),
3314            &Inst::Bti { targets } => {
3315                let targets = match targets {
3316                    BranchTargetType::None => 0b00,
3317                    BranchTargetType::C => 0b01,
3318                    BranchTargetType::J => 0b10,
3319                    BranchTargetType::JC => 0b11,
3320                };
3321
3322                sink.put4(0xd503241f | targets << 6);
3323            }
3324            &Inst::EmitIsland { needed_space } => {
3325                if sink.island_needed(needed_space + 4) {
3326                    let jump_around_label = sink.get_label();
3327                    let jmp = Inst::Jump {
3328                        dest: BranchTarget::Label(jump_around_label),
3329                    };
3330                    jmp.emit(sink, emit_info, state);
3331                    sink.emit_island(needed_space + 4, &mut state.ctrl_plane);
3332                    sink.bind_label(jump_around_label, &mut state.ctrl_plane);
3333                }
3334            }
3335
3336            &Inst::ElfTlsGetAddr {
3337                ref symbol,
3338                rd,
3339                tmp,
3340            } => {
3341                assert_eq!(xreg(0), rd.to_reg());
3342
3343                // See the original proposal for TLSDESC.
3344                // http://www.fsfla.org/~lxoliva/writeups/TLS/paper-lk2006.pdf
3345                //
3346                // Implement the TLSDESC instruction sequence:
3347                //   adrp x0, :tlsdesc:tlsvar
3348                //   ldr  tmp, [x0, :tlsdesc_lo12:tlsvar]
3349                //   add  x0, x0, :tlsdesc_lo12:tlsvar
3350                //   blr  tmp
3351                //   mrs  tmp, tpidr_el0
3352                //   add  x0, x0, tmp
3353                //
3354                // This is the instruction sequence that GCC emits for ELF GD TLS Relocations in aarch64
3355                // See: https://gcc.godbolt.org/z/e4j7MdErh
3356
3357                // adrp x0, :tlsdesc:tlsvar
3358                sink.add_reloc(Reloc::Aarch64TlsDescAdrPage21, &**symbol, 0);
3359                Inst::Adrp { rd, off: 0 }.emit(sink, emit_info, state);
3360
3361                // ldr  tmp, [x0, :tlsdesc_lo12:tlsvar]
3362                sink.add_reloc(Reloc::Aarch64TlsDescLd64Lo12, &**symbol, 0);
3363                Inst::ULoad64 {
3364                    rd: tmp,
3365                    mem: AMode::reg(rd.to_reg()),
3366                    flags: MemFlags::trusted(),
3367                }
3368                .emit(sink, emit_info, state);
3369
3370                // add x0, x0, :tlsdesc_lo12:tlsvar
3371                sink.add_reloc(Reloc::Aarch64TlsDescAddLo12, &**symbol, 0);
3372                Inst::AluRRImm12 {
3373                    alu_op: ALUOp::Add,
3374                    size: OperandSize::Size64,
3375                    rd,
3376                    rn: rd.to_reg(),
3377                    imm12: Imm12::maybe_from_u64(0).unwrap(),
3378                }
3379                .emit(sink, emit_info, state);
3380
3381                // blr tmp
3382                sink.add_reloc(Reloc::Aarch64TlsDescCall, &**symbol, 0);
3383                Inst::CallInd {
3384                    info: crate::isa::Box::new(CallInfo::empty(tmp.to_reg(), CallConv::SystemV)),
3385                }
3386                .emit(sink, emit_info, state);
3387
3388                // mrs tmp, tpidr_el0
3389                sink.put4(0xd53bd040 | machreg_to_gpr(tmp.to_reg()));
3390
3391                // add x0, x0, tmp
3392                Inst::AluRRR {
3393                    alu_op: ALUOp::Add,
3394                    size: OperandSize::Size64,
3395                    rd,
3396                    rn: rd.to_reg(),
3397                    rm: tmp.to_reg(),
3398                }
3399                .emit(sink, emit_info, state);
3400            }
3401
3402            &Inst::MachOTlsGetAddr { ref symbol, rd } => {
3403                // Each thread local variable gets a descriptor, where the first xword of the descriptor is a pointer
3404                // to a function that takes the descriptor address in x0, and after the function returns x0
3405                // contains the address for the thread local variable
3406                //
3407                // what we want to emit is basically:
3408                //
3409                // adrp x0, <label>@TLVPPAGE  ; Load the address of the page of the thread local variable pointer (TLVP)
3410                // ldr x0, [x0, <label>@TLVPPAGEOFF] ; Load the descriptor's address into x0
3411                // ldr x1, [x0] ; Load the function pointer (the first part of the descriptor)
3412                // blr x1 ; Call the function pointer with the descriptor address in x0
3413                // ; x0 now contains the TLV address
3414
3415                assert_eq!(xreg(0), rd.to_reg());
3416                let rtmp = writable_xreg(1);
3417
3418                // adrp x0, <label>@TLVPPAGE
3419                sink.add_reloc(Reloc::MachOAarch64TlsAdrPage21, symbol, 0);
3420                sink.put4(0x90000000);
3421
3422                // ldr x0, [x0, <label>@TLVPPAGEOFF]
3423                sink.add_reloc(Reloc::MachOAarch64TlsAdrPageOff12, symbol, 0);
3424                sink.put4(0xf9400000);
3425
3426                // load [x0] into temp register
3427                Inst::ULoad64 {
3428                    rd: rtmp,
3429                    mem: AMode::reg(rd.to_reg()),
3430                    flags: MemFlags::trusted(),
3431                }
3432                .emit(sink, emit_info, state);
3433
3434                // call function pointer in temp register
3435                Inst::CallInd {
3436                    info: crate::isa::Box::new(CallInfo::empty(
3437                        rtmp.to_reg(),
3438                        CallConv::AppleAarch64,
3439                    )),
3440                }
3441                .emit(sink, emit_info, state);
3442            }
3443
3444            &Inst::Unwind { ref inst } => {
3445                sink.add_unwind(inst.clone());
3446            }
3447
3448            &Inst::DummyUse { .. } => {}
3449
3450            &Inst::StackProbeLoop { start, end, step } => {
3451                assert!(emit_info.0.enable_probestack());
3452
3453                // The loop generated here uses `start` as a counter register to
3454                // count backwards until negating it exceeds `end`. In other
3455                // words `start` is an offset from `sp` we're testing where
3456                // `end` is the max size we need to test. The loop looks like:
3457                //
3458                //      loop_start:
3459                //          sub start, start, #step
3460                //          stur xzr, [sp, start]
3461                //          cmn start, end
3462                //          br.gt loop_start
3463                //      loop_end:
3464                //
3465                // Note that this loop cannot use the spilltmp and tmp2
3466                // registers as those are currently used as the input to this
3467                // loop when generating the instruction. This means that some
3468                // more flavorful address modes and lowerings need to be
3469                // avoided.
3470                //
3471                // Perhaps someone more clever than I can figure out how to use
3472                // `subs` or the like and skip the `cmn`, but I can't figure it
3473                // out at this time.
3474
3475                let loop_start = sink.get_label();
3476                sink.bind_label(loop_start, &mut state.ctrl_plane);
3477
3478                Inst::AluRRImm12 {
3479                    alu_op: ALUOp::Sub,
3480                    size: OperandSize::Size64,
3481                    rd: start,
3482                    rn: start.to_reg(),
3483                    imm12: step,
3484                }
3485                .emit(sink, emit_info, state);
3486                Inst::Store32 {
3487                    rd: regs::zero_reg(),
3488                    mem: AMode::RegReg {
3489                        rn: regs::stack_reg(),
3490                        rm: start.to_reg(),
3491                    },
3492                    flags: MemFlags::trusted(),
3493                }
3494                .emit(sink, emit_info, state);
3495                Inst::AluRRR {
3496                    alu_op: ALUOp::AddS,
3497                    size: OperandSize::Size64,
3498                    rd: regs::writable_zero_reg(),
3499                    rn: start.to_reg(),
3500                    rm: end,
3501                }
3502                .emit(sink, emit_info, state);
3503
3504                let loop_end = sink.get_label();
3505                Inst::CondBr {
3506                    taken: BranchTarget::Label(loop_start),
3507                    not_taken: BranchTarget::Label(loop_end),
3508                    kind: CondBrKind::Cond(Cond::Gt),
3509                }
3510                .emit(sink, emit_info, state);
3511                sink.bind_label(loop_end, &mut state.ctrl_plane);
3512            }
3513        }
3514
3515        let end_off = sink.cur_offset();
3516        debug_assert!(
3517            (end_off - start_off) <= Inst::worst_case_size()
3518                || matches!(self, Inst::EmitIsland { .. }),
3519            "Worst case size exceed for {:?}: {}",
3520            self,
3521            end_off - start_off
3522        );
3523
3524        state.clear_post_insn();
3525    }
3526
3527    fn pretty_print_inst(&self, state: &mut Self::State) -> String {
3528        self.print_with_state(state)
3529    }
3530}
3531
3532fn emit_return_call_common_sequence<T>(
3533    sink: &mut MachBuffer<Inst>,
3534    emit_info: &EmitInfo,
3535    state: &mut EmitState,
3536    info: &ReturnCallInfo<T>,
3537) {
3538    for inst in
3539        AArch64MachineDeps::gen_clobber_restore(CallConv::Tail, &emit_info.0, state.frame_layout())
3540    {
3541        inst.emit(sink, emit_info, state);
3542    }
3543
3544    let setup_area_size = state.frame_layout().setup_area_size;
3545    if setup_area_size > 0 {
3546        // N.B.: sp is already adjusted to the appropriate place by the
3547        // clobber-restore code (which also frees the fixed frame). Hence, there
3548        // is no need for the usual `mov sp, fp` here.
3549
3550        // `ldp fp, lr, [sp], #16`
3551        Inst::LoadP64 {
3552            rt: writable_fp_reg(),
3553            rt2: writable_link_reg(),
3554            mem: PairAMode::SPPostIndexed {
3555                // TODO: we could fold the increment for incoming_args_diff here, as long as that
3556                // value is less than 502*8, by adding it to `setup_area_size`.
3557                // https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/LDP--Load-Pair-of-Registers-
3558                simm7: SImm7Scaled::maybe_from_i64(i64::from(setup_area_size), types::I64).unwrap(),
3559            },
3560            flags: MemFlags::trusted(),
3561        }
3562        .emit(sink, emit_info, state);
3563    }
3564
3565    // Adjust SP to account for the possible over-allocation in the prologue.
3566    let incoming_args_diff = state.frame_layout().tail_args_size - info.new_stack_arg_size;
3567    if incoming_args_diff > 0 {
3568        for inst in
3569            AArch64MachineDeps::gen_sp_reg_adjust(i32::try_from(incoming_args_diff).unwrap())
3570        {
3571            inst.emit(sink, emit_info, state);
3572        }
3573    }
3574
3575    if let Some(key) = info.key {
3576        sink.put4(key.enc_auti_hint());
3577    }
3578}