1use crate::binemit::{Addend, CodeOffset, Reloc};
4use crate::ir::types::{F16, F32, F64, F128, I8, I8X16, I16, I32, I64, I128};
5use crate::ir::{MemFlags, Type, types};
6use crate::isa::{CallConv, FunctionAlignment};
7use crate::machinst::*;
8use crate::{CodegenError, CodegenResult, settings};
9
10use crate::machinst::{PrettyPrint, Reg, RegClass, Writable};
11
12use alloc::vec::Vec;
13use core::slice;
14use smallvec::{SmallVec, smallvec};
15use std::fmt::Write;
16use std::string::{String, ToString};
17
18pub(crate) mod regs;
19pub(crate) use self::regs::*;
20pub mod imms;
21pub use self::imms::*;
22pub mod args;
23pub use self::args::*;
24pub mod emit;
25pub(crate) use self::emit::*;
26use crate::isa::aarch64::abi::AArch64MachineDeps;
27
28pub(crate) mod unwind;
29
30#[cfg(test)]
31mod emit_tests;
32
33pub use crate::isa::aarch64::lower::isle::generated_code::{
37 ALUOp, ALUOp3, AMode, APIKey, AtomicRMWLoopOp, AtomicRMWOp, BitOp, BranchTargetType, FPUOp1,
38 FPUOp2, FPUOp3, FpuRoundMode, FpuToIntOp, IntToFpuOp, MInst as Inst, MoveWideOp, VecALUModOp,
39 VecALUOp, VecExtendOp, VecLanesOp, VecMisc2, VecPairOp, VecRRLongOp, VecRRNarrowOp,
40 VecRRPairLongOp, VecRRRLongModOp, VecRRRLongOp, VecShiftImmModOp, VecShiftImmOp,
41};
42
43#[derive(Copy, Clone, Debug)]
45pub enum FPUOpRI {
46 UShr32(FPURightShiftImm),
48 UShr64(FPURightShiftImm),
50}
51
52#[derive(Copy, Clone, Debug)]
56pub enum FPUOpRIMod {
57 Sli32(FPULeftShiftImm),
59 Sli64(FPULeftShiftImm),
61}
62
63impl BitOp {
64 pub fn op_str(&self) -> &'static str {
66 match self {
67 BitOp::RBit => "rbit",
68 BitOp::Clz => "clz",
69 BitOp::Cls => "cls",
70 BitOp::Rev16 => "rev16",
71 BitOp::Rev32 => "rev32",
72 BitOp::Rev64 => "rev64",
73 }
74 }
75}
76
77#[derive(Clone, Debug)]
80pub struct ReturnCallInfo<T> {
81 pub dest: T,
83 pub uses: CallArgList,
85 pub new_stack_arg_size: u32,
89 pub key: Option<APIKey>,
91}
92
93fn count_zero_half_words(mut value: u64, num_half_words: u8) -> usize {
94 let mut count = 0;
95 for _ in 0..num_half_words {
96 if value & 0xffff == 0 {
97 count += 1;
98 }
99 value >>= 16;
100 }
101
102 count
103}
104
105impl Inst {
106 pub fn load_constant(rd: Writable<Reg>, value: u64) -> SmallVec<[Inst; 4]> {
109 if let Some(imm) = MoveWideConst::maybe_from_u64(value) {
114 smallvec![Inst::MovWide {
116 op: MoveWideOp::MovZ,
117 rd,
118 imm,
119 size: OperandSize::Size64
120 }]
121 } else if let Some(imm) = MoveWideConst::maybe_from_u64(!value) {
122 smallvec![Inst::MovWide {
124 op: MoveWideOp::MovN,
125 rd,
126 imm,
127 size: OperandSize::Size64
128 }]
129 } else if let Some(imml) = ImmLogic::maybe_from_u64(value, I64) {
130 smallvec![Inst::AluRRImmLogic {
132 alu_op: ALUOp::Orr,
133 size: OperandSize::Size64,
134 rd,
135 rn: zero_reg(),
136 imml,
137 }]
138 } else {
139 let mut insts = smallvec![];
140
141 let (num_half_words, size, negated) = if value >> 32 == 0 {
143 (2, OperandSize::Size32, (!value << 32) >> 32)
144 } else {
145 (4, OperandSize::Size64, !value)
146 };
147
148 let first_is_inverted = count_zero_half_words(negated, num_half_words)
151 > count_zero_half_words(value, num_half_words);
152
153 let ignored_halfword = if first_is_inverted { 0xffff } else { 0 };
156
157 let halfwords: SmallVec<[_; 4]> = (0..num_half_words)
158 .filter_map(|i| {
159 let imm16 = (value >> (16 * i)) & 0xffff;
160 if imm16 == ignored_halfword {
161 None
162 } else {
163 Some((i, imm16))
164 }
165 })
166 .collect();
167
168 let mut prev_result = None;
169 for (i, imm16) in halfwords {
170 let shift = i * 16;
171
172 if let Some(rn) = prev_result {
173 let imm = MoveWideConst::maybe_with_shift(imm16 as u16, shift).unwrap();
174 insts.push(Inst::MovK { rd, rn, imm, size });
175 } else {
176 if first_is_inverted {
177 let imm =
178 MoveWideConst::maybe_with_shift(((!imm16) & 0xffff) as u16, shift)
179 .unwrap();
180 insts.push(Inst::MovWide {
181 op: MoveWideOp::MovN,
182 rd,
183 imm,
184 size,
185 });
186 } else {
187 let imm = MoveWideConst::maybe_with_shift(imm16 as u16, shift).unwrap();
188 insts.push(Inst::MovWide {
189 op: MoveWideOp::MovZ,
190 rd,
191 imm,
192 size,
193 });
194 }
195 }
196
197 prev_result = Some(rd.to_reg());
198 }
199
200 assert!(prev_result.is_some());
201
202 insts
203 }
204 }
205
206 pub fn gen_load(into_reg: Writable<Reg>, mem: AMode, ty: Type, flags: MemFlags) -> Inst {
208 match ty {
209 I8 => Inst::ULoad8 {
210 rd: into_reg,
211 mem,
212 flags,
213 },
214 I16 => Inst::ULoad16 {
215 rd: into_reg,
216 mem,
217 flags,
218 },
219 I32 => Inst::ULoad32 {
220 rd: into_reg,
221 mem,
222 flags,
223 },
224 I64 => Inst::ULoad64 {
225 rd: into_reg,
226 mem,
227 flags,
228 },
229 _ => {
230 if ty.is_vector() || ty.is_float() {
231 let bits = ty_bits(ty);
232 let rd = into_reg;
233
234 match bits {
235 128 => Inst::FpuLoad128 { rd, mem, flags },
236 64 => Inst::FpuLoad64 { rd, mem, flags },
237 32 => Inst::FpuLoad32 { rd, mem, flags },
238 16 => Inst::FpuLoad16 { rd, mem, flags },
239 _ => unimplemented!("gen_load({})", ty),
240 }
241 } else {
242 unimplemented!("gen_load({})", ty);
243 }
244 }
245 }
246 }
247
248 pub fn gen_store(mem: AMode, from_reg: Reg, ty: Type, flags: MemFlags) -> Inst {
250 match ty {
251 I8 => Inst::Store8 {
252 rd: from_reg,
253 mem,
254 flags,
255 },
256 I16 => Inst::Store16 {
257 rd: from_reg,
258 mem,
259 flags,
260 },
261 I32 => Inst::Store32 {
262 rd: from_reg,
263 mem,
264 flags,
265 },
266 I64 => Inst::Store64 {
267 rd: from_reg,
268 mem,
269 flags,
270 },
271 _ => {
272 if ty.is_vector() || ty.is_float() {
273 let bits = ty_bits(ty);
274 let rd = from_reg;
275
276 match bits {
277 128 => Inst::FpuStore128 { rd, mem, flags },
278 64 => Inst::FpuStore64 { rd, mem, flags },
279 32 => Inst::FpuStore32 { rd, mem, flags },
280 16 => Inst::FpuStore16 { rd, mem, flags },
281 _ => unimplemented!("gen_store({})", ty),
282 }
283 } else {
284 unimplemented!("gen_store({})", ty);
285 }
286 }
287 }
288 }
289
290 pub fn mem_type(&self) -> Option<Type> {
294 match self {
295 Inst::ULoad8 { .. } => Some(I8),
296 Inst::SLoad8 { .. } => Some(I8),
297 Inst::ULoad16 { .. } => Some(I16),
298 Inst::SLoad16 { .. } => Some(I16),
299 Inst::ULoad32 { .. } => Some(I32),
300 Inst::SLoad32 { .. } => Some(I32),
301 Inst::ULoad64 { .. } => Some(I64),
302 Inst::FpuLoad16 { .. } => Some(F16),
303 Inst::FpuLoad32 { .. } => Some(F32),
304 Inst::FpuLoad64 { .. } => Some(F64),
305 Inst::FpuLoad128 { .. } => Some(I8X16),
306 Inst::Store8 { .. } => Some(I8),
307 Inst::Store16 { .. } => Some(I16),
308 Inst::Store32 { .. } => Some(I32),
309 Inst::Store64 { .. } => Some(I64),
310 Inst::FpuStore16 { .. } => Some(F16),
311 Inst::FpuStore32 { .. } => Some(F32),
312 Inst::FpuStore64 { .. } => Some(F64),
313 Inst::FpuStore128 { .. } => Some(I8X16),
314 _ => None,
315 }
316 }
317}
318
319fn memarg_operands(memarg: &mut AMode, collector: &mut impl OperandVisitor) {
323 match memarg {
324 AMode::Unscaled { rn, .. } | AMode::UnsignedOffset { rn, .. } => {
325 collector.reg_use(rn);
326 }
327 AMode::RegReg { rn, rm, .. }
328 | AMode::RegScaled { rn, rm, .. }
329 | AMode::RegScaledExtended { rn, rm, .. }
330 | AMode::RegExtended { rn, rm, .. } => {
331 collector.reg_use(rn);
332 collector.reg_use(rm);
333 }
334 AMode::Label { .. } => {}
335 AMode::SPPreIndexed { .. } | AMode::SPPostIndexed { .. } => {}
336 AMode::FPOffset { .. } | AMode::IncomingArg { .. } => {}
337 AMode::SPOffset { .. } | AMode::SlotOffset { .. } => {}
338 AMode::RegOffset { rn, .. } => {
339 collector.reg_use(rn);
340 }
341 AMode::Const { .. } => {}
342 }
343}
344
345fn pairmemarg_operands(pairmemarg: &mut PairAMode, collector: &mut impl OperandVisitor) {
346 match pairmemarg {
347 PairAMode::SignedOffset { reg, .. } => {
348 collector.reg_use(reg);
349 }
350 PairAMode::SPPreIndexed { .. } | PairAMode::SPPostIndexed { .. } => {}
351 }
352}
353
354fn aarch64_get_operands(inst: &mut Inst, collector: &mut impl OperandVisitor) {
355 match inst {
356 Inst::AluRRR { rd, rn, rm, .. } => {
357 collector.reg_def(rd);
358 collector.reg_use(rn);
359 collector.reg_use(rm);
360 }
361 Inst::AluRRRR { rd, rn, rm, ra, .. } => {
362 collector.reg_def(rd);
363 collector.reg_use(rn);
364 collector.reg_use(rm);
365 collector.reg_use(ra);
366 }
367 Inst::AluRRImm12 { rd, rn, .. } => {
368 collector.reg_def(rd);
369 collector.reg_use(rn);
370 }
371 Inst::AluRRImmLogic { rd, rn, .. } => {
372 collector.reg_def(rd);
373 collector.reg_use(rn);
374 }
375 Inst::AluRRImmShift { rd, rn, .. } => {
376 collector.reg_def(rd);
377 collector.reg_use(rn);
378 }
379 Inst::AluRRRShift { rd, rn, rm, .. } => {
380 collector.reg_def(rd);
381 collector.reg_use(rn);
382 collector.reg_use(rm);
383 }
384 Inst::AluRRRExtend { rd, rn, rm, .. } => {
385 collector.reg_def(rd);
386 collector.reg_use(rn);
387 collector.reg_use(rm);
388 }
389 Inst::BitRR { rd, rn, .. } => {
390 collector.reg_def(rd);
391 collector.reg_use(rn);
392 }
393 Inst::ULoad8 { rd, mem, .. }
394 | Inst::SLoad8 { rd, mem, .. }
395 | Inst::ULoad16 { rd, mem, .. }
396 | Inst::SLoad16 { rd, mem, .. }
397 | Inst::ULoad32 { rd, mem, .. }
398 | Inst::SLoad32 { rd, mem, .. }
399 | Inst::ULoad64 { rd, mem, .. } => {
400 collector.reg_def(rd);
401 memarg_operands(mem, collector);
402 }
403 Inst::Store8 { rd, mem, .. }
404 | Inst::Store16 { rd, mem, .. }
405 | Inst::Store32 { rd, mem, .. }
406 | Inst::Store64 { rd, mem, .. } => {
407 collector.reg_use(rd);
408 memarg_operands(mem, collector);
409 }
410 Inst::StoreP64 { rt, rt2, mem, .. } => {
411 collector.reg_use(rt);
412 collector.reg_use(rt2);
413 pairmemarg_operands(mem, collector);
414 }
415 Inst::LoadP64 { rt, rt2, mem, .. } => {
416 collector.reg_def(rt);
417 collector.reg_def(rt2);
418 pairmemarg_operands(mem, collector);
419 }
420 Inst::Mov { rd, rm, .. } => {
421 collector.reg_def(rd);
422 collector.reg_use(rm);
423 }
424 Inst::MovFromPReg { rd, rm } => {
425 debug_assert!(rd.to_reg().is_virtual());
426 collector.reg_def(rd);
427 collector.reg_fixed_nonallocatable(*rm);
428 }
429 Inst::MovToPReg { rd, rm } => {
430 debug_assert!(rm.is_virtual());
431 collector.reg_fixed_nonallocatable(*rd);
432 collector.reg_use(rm);
433 }
434 Inst::MovK { rd, rn, .. } => {
435 collector.reg_use(rn);
436 collector.reg_reuse_def(rd, 0); }
438 Inst::MovWide { rd, .. } => {
439 collector.reg_def(rd);
440 }
441 Inst::CSel { rd, rn, rm, .. } => {
442 collector.reg_def(rd);
443 collector.reg_use(rn);
444 collector.reg_use(rm);
445 }
446 Inst::CSNeg { rd, rn, rm, .. } => {
447 collector.reg_def(rd);
448 collector.reg_use(rn);
449 collector.reg_use(rm);
450 }
451 Inst::CSet { rd, .. } | Inst::CSetm { rd, .. } => {
452 collector.reg_def(rd);
453 }
454 Inst::CCmp { rn, rm, .. } => {
455 collector.reg_use(rn);
456 collector.reg_use(rm);
457 }
458 Inst::CCmpImm { rn, .. } => {
459 collector.reg_use(rn);
460 }
461 Inst::AtomicRMWLoop {
462 op,
463 addr,
464 operand,
465 oldval,
466 scratch1,
467 scratch2,
468 ..
469 } => {
470 collector.reg_fixed_use(addr, xreg(25));
471 collector.reg_fixed_use(operand, xreg(26));
472 collector.reg_fixed_def(oldval, xreg(27));
473 collector.reg_fixed_def(scratch1, xreg(24));
474 if *op != AtomicRMWLoopOp::Xchg {
475 collector.reg_fixed_def(scratch2, xreg(28));
476 }
477 }
478 Inst::AtomicRMW { rs, rt, rn, .. } => {
479 collector.reg_use(rs);
480 collector.reg_def(rt);
481 collector.reg_use(rn);
482 }
483 Inst::AtomicCAS { rd, rs, rt, rn, .. } => {
484 collector.reg_reuse_def(rd, 1); collector.reg_use(rs);
486 collector.reg_use(rt);
487 collector.reg_use(rn);
488 }
489 Inst::AtomicCASLoop {
490 addr,
491 expected,
492 replacement,
493 oldval,
494 scratch,
495 ..
496 } => {
497 collector.reg_fixed_use(addr, xreg(25));
498 collector.reg_fixed_use(expected, xreg(26));
499 collector.reg_fixed_use(replacement, xreg(28));
500 collector.reg_fixed_def(oldval, xreg(27));
501 collector.reg_fixed_def(scratch, xreg(24));
502 }
503 Inst::LoadAcquire { rt, rn, .. } => {
504 collector.reg_use(rn);
505 collector.reg_def(rt);
506 }
507 Inst::StoreRelease { rt, rn, .. } => {
508 collector.reg_use(rn);
509 collector.reg_use(rt);
510 }
511 Inst::Fence {} | Inst::Csdb {} => {}
512 Inst::FpuMove32 { rd, rn } => {
513 collector.reg_def(rd);
514 collector.reg_use(rn);
515 }
516 Inst::FpuMove64 { rd, rn } => {
517 collector.reg_def(rd);
518 collector.reg_use(rn);
519 }
520 Inst::FpuMove128 { rd, rn } => {
521 collector.reg_def(rd);
522 collector.reg_use(rn);
523 }
524 Inst::FpuMoveFromVec { rd, rn, .. } => {
525 collector.reg_def(rd);
526 collector.reg_use(rn);
527 }
528 Inst::FpuExtend { rd, rn, .. } => {
529 collector.reg_def(rd);
530 collector.reg_use(rn);
531 }
532 Inst::FpuRR { rd, rn, .. } => {
533 collector.reg_def(rd);
534 collector.reg_use(rn);
535 }
536 Inst::FpuRRR { rd, rn, rm, .. } => {
537 collector.reg_def(rd);
538 collector.reg_use(rn);
539 collector.reg_use(rm);
540 }
541 Inst::FpuRRI { rd, rn, .. } => {
542 collector.reg_def(rd);
543 collector.reg_use(rn);
544 }
545 Inst::FpuRRIMod { rd, ri, rn, .. } => {
546 collector.reg_reuse_def(rd, 1); collector.reg_use(ri);
548 collector.reg_use(rn);
549 }
550 Inst::FpuRRRR { rd, rn, rm, ra, .. } => {
551 collector.reg_def(rd);
552 collector.reg_use(rn);
553 collector.reg_use(rm);
554 collector.reg_use(ra);
555 }
556 Inst::VecMisc { rd, rn, .. } => {
557 collector.reg_def(rd);
558 collector.reg_use(rn);
559 }
560
561 Inst::VecLanes { rd, rn, .. } => {
562 collector.reg_def(rd);
563 collector.reg_use(rn);
564 }
565 Inst::VecShiftImm { rd, rn, .. } => {
566 collector.reg_def(rd);
567 collector.reg_use(rn);
568 }
569 Inst::VecShiftImmMod { rd, ri, rn, .. } => {
570 collector.reg_reuse_def(rd, 1); collector.reg_use(ri);
572 collector.reg_use(rn);
573 }
574 Inst::VecExtract { rd, rn, rm, .. } => {
575 collector.reg_def(rd);
576 collector.reg_use(rn);
577 collector.reg_use(rm);
578 }
579 Inst::VecTbl { rd, rn, rm } => {
580 collector.reg_use(rn);
581 collector.reg_use(rm);
582 collector.reg_def(rd);
583 }
584 Inst::VecTblExt { rd, ri, rn, rm } => {
585 collector.reg_use(rn);
586 collector.reg_use(rm);
587 collector.reg_reuse_def(rd, 3); collector.reg_use(ri);
589 }
590
591 Inst::VecTbl2 { rd, rn, rn2, rm } => {
592 collector.reg_fixed_use(rn, vreg(30));
596 collector.reg_fixed_use(rn2, vreg(31));
597 collector.reg_use(rm);
598 collector.reg_def(rd);
599 }
600 Inst::VecTbl2Ext {
601 rd,
602 ri,
603 rn,
604 rn2,
605 rm,
606 } => {
607 collector.reg_fixed_use(rn, vreg(30));
611 collector.reg_fixed_use(rn2, vreg(31));
612 collector.reg_use(rm);
613 collector.reg_reuse_def(rd, 4); collector.reg_use(ri);
615 }
616 Inst::VecLoadReplicate { rd, rn, .. } => {
617 collector.reg_def(rd);
618 collector.reg_use(rn);
619 }
620 Inst::VecCSel { rd, rn, rm, .. } => {
621 collector.reg_def(rd);
622 collector.reg_use(rn);
623 collector.reg_use(rm);
624 }
625 Inst::FpuCmp { rn, rm, .. } => {
626 collector.reg_use(rn);
627 collector.reg_use(rm);
628 }
629 Inst::FpuLoad16 { rd, mem, .. } => {
630 collector.reg_def(rd);
631 memarg_operands(mem, collector);
632 }
633 Inst::FpuLoad32 { rd, mem, .. } => {
634 collector.reg_def(rd);
635 memarg_operands(mem, collector);
636 }
637 Inst::FpuLoad64 { rd, mem, .. } => {
638 collector.reg_def(rd);
639 memarg_operands(mem, collector);
640 }
641 Inst::FpuLoad128 { rd, mem, .. } => {
642 collector.reg_def(rd);
643 memarg_operands(mem, collector);
644 }
645 Inst::FpuStore16 { rd, mem, .. } => {
646 collector.reg_use(rd);
647 memarg_operands(mem, collector);
648 }
649 Inst::FpuStore32 { rd, mem, .. } => {
650 collector.reg_use(rd);
651 memarg_operands(mem, collector);
652 }
653 Inst::FpuStore64 { rd, mem, .. } => {
654 collector.reg_use(rd);
655 memarg_operands(mem, collector);
656 }
657 Inst::FpuStore128 { rd, mem, .. } => {
658 collector.reg_use(rd);
659 memarg_operands(mem, collector);
660 }
661 Inst::FpuLoadP64 { rt, rt2, mem, .. } => {
662 collector.reg_def(rt);
663 collector.reg_def(rt2);
664 pairmemarg_operands(mem, collector);
665 }
666 Inst::FpuStoreP64 { rt, rt2, mem, .. } => {
667 collector.reg_use(rt);
668 collector.reg_use(rt2);
669 pairmemarg_operands(mem, collector);
670 }
671 Inst::FpuLoadP128 { rt, rt2, mem, .. } => {
672 collector.reg_def(rt);
673 collector.reg_def(rt2);
674 pairmemarg_operands(mem, collector);
675 }
676 Inst::FpuStoreP128 { rt, rt2, mem, .. } => {
677 collector.reg_use(rt);
678 collector.reg_use(rt2);
679 pairmemarg_operands(mem, collector);
680 }
681 Inst::FpuToInt { rd, rn, .. } => {
682 collector.reg_def(rd);
683 collector.reg_use(rn);
684 }
685 Inst::IntToFpu { rd, rn, .. } => {
686 collector.reg_def(rd);
687 collector.reg_use(rn);
688 }
689 Inst::FpuCSel16 { rd, rn, rm, .. }
690 | Inst::FpuCSel32 { rd, rn, rm, .. }
691 | Inst::FpuCSel64 { rd, rn, rm, .. } => {
692 collector.reg_def(rd);
693 collector.reg_use(rn);
694 collector.reg_use(rm);
695 }
696 Inst::FpuRound { rd, rn, .. } => {
697 collector.reg_def(rd);
698 collector.reg_use(rn);
699 }
700 Inst::MovToFpu { rd, rn, .. } => {
701 collector.reg_def(rd);
702 collector.reg_use(rn);
703 }
704 Inst::FpuMoveFPImm { rd, .. } => {
705 collector.reg_def(rd);
706 }
707 Inst::MovToVec { rd, ri, rn, .. } => {
708 collector.reg_reuse_def(rd, 1); collector.reg_use(ri);
710 collector.reg_use(rn);
711 }
712 Inst::MovFromVec { rd, rn, .. } | Inst::MovFromVecSigned { rd, rn, .. } => {
713 collector.reg_def(rd);
714 collector.reg_use(rn);
715 }
716 Inst::VecDup { rd, rn, .. } => {
717 collector.reg_def(rd);
718 collector.reg_use(rn);
719 }
720 Inst::VecDupFromFpu { rd, rn, .. } => {
721 collector.reg_def(rd);
722 collector.reg_use(rn);
723 }
724 Inst::VecDupFPImm { rd, .. } => {
725 collector.reg_def(rd);
726 }
727 Inst::VecDupImm { rd, .. } => {
728 collector.reg_def(rd);
729 }
730 Inst::VecExtend { rd, rn, .. } => {
731 collector.reg_def(rd);
732 collector.reg_use(rn);
733 }
734 Inst::VecMovElement { rd, ri, rn, .. } => {
735 collector.reg_reuse_def(rd, 1); collector.reg_use(ri);
737 collector.reg_use(rn);
738 }
739 Inst::VecRRLong { rd, rn, .. } => {
740 collector.reg_def(rd);
741 collector.reg_use(rn);
742 }
743 Inst::VecRRNarrowLow { rd, rn, .. } => {
744 collector.reg_use(rn);
745 collector.reg_def(rd);
746 }
747 Inst::VecRRNarrowHigh { rd, ri, rn, .. } => {
748 collector.reg_use(rn);
749 collector.reg_reuse_def(rd, 2); collector.reg_use(ri);
751 }
752 Inst::VecRRPair { rd, rn, .. } => {
753 collector.reg_def(rd);
754 collector.reg_use(rn);
755 }
756 Inst::VecRRRLong { rd, rn, rm, .. } => {
757 collector.reg_def(rd);
758 collector.reg_use(rn);
759 collector.reg_use(rm);
760 }
761 Inst::VecRRRLongMod { rd, ri, rn, rm, .. } => {
762 collector.reg_reuse_def(rd, 1); collector.reg_use(ri);
764 collector.reg_use(rn);
765 collector.reg_use(rm);
766 }
767 Inst::VecRRPairLong { rd, rn, .. } => {
768 collector.reg_def(rd);
769 collector.reg_use(rn);
770 }
771 Inst::VecRRR { rd, rn, rm, .. } => {
772 collector.reg_def(rd);
773 collector.reg_use(rn);
774 collector.reg_use(rm);
775 }
776 Inst::VecRRRMod { rd, ri, rn, rm, .. } | Inst::VecFmlaElem { rd, ri, rn, rm, .. } => {
777 collector.reg_reuse_def(rd, 1); collector.reg_use(ri);
779 collector.reg_use(rn);
780 collector.reg_use(rm);
781 }
782 Inst::MovToNZCV { rn } => {
783 collector.reg_use(rn);
784 }
785 Inst::MovFromNZCV { rd } => {
786 collector.reg_def(rd);
787 }
788 Inst::Extend { rd, rn, .. } => {
789 collector.reg_def(rd);
790 collector.reg_use(rn);
791 }
792 Inst::Args { args } => {
793 for ArgPair { vreg, preg } in args {
794 collector.reg_fixed_def(vreg, *preg);
795 }
796 }
797 Inst::Rets { rets } => {
798 for RetPair { vreg, preg } in rets {
799 collector.reg_fixed_use(vreg, *preg);
800 }
801 }
802 Inst::Ret { .. } | Inst::AuthenticatedRet { .. } => {}
803 Inst::Jump { .. } => {}
804 Inst::Call { info, .. } | Inst::PatchableCall { info, .. } => {
805 let CallInfo { uses, defs, .. } = &mut **info;
806 for CallArgPair { vreg, preg } in uses {
807 collector.reg_fixed_use(vreg, *preg);
808 }
809 for CallRetPair { vreg, location } in defs {
810 match location {
811 RetLocation::Reg(preg, ..) => collector.reg_fixed_def(vreg, *preg),
812 RetLocation::Stack(..) => collector.any_def(vreg),
813 }
814 }
815 collector.reg_clobbers(info.clobbers);
816 if let Some(try_call_info) = &mut info.try_call_info {
817 try_call_info.collect_operands(collector);
818 }
819 }
820 Inst::CallInd { info, .. } => {
821 let CallInfo {
822 dest, uses, defs, ..
823 } = &mut **info;
824 collector.reg_use(dest);
825 for CallArgPair { vreg, preg } in uses {
826 collector.reg_fixed_use(vreg, *preg);
827 }
828 for CallRetPair { vreg, location } in defs {
829 match location {
830 RetLocation::Reg(preg, ..) => collector.reg_fixed_def(vreg, *preg),
831 RetLocation::Stack(..) => collector.any_def(vreg),
832 }
833 }
834 collector.reg_clobbers(info.clobbers);
835 if let Some(try_call_info) = &mut info.try_call_info {
836 try_call_info.collect_operands(collector);
837 }
838 }
839 Inst::ReturnCall { info } => {
840 for CallArgPair { vreg, preg } in &mut info.uses {
841 collector.reg_fixed_use(vreg, *preg);
842 }
843 }
844 Inst::ReturnCallInd { info } => {
845 collector.reg_fixed_use(&mut info.dest, xreg(1));
850 for CallArgPair { vreg, preg } in &mut info.uses {
851 collector.reg_fixed_use(vreg, *preg);
852 }
853 }
854 Inst::CondBr { kind, .. } => match kind {
855 CondBrKind::Zero(rt, _) | CondBrKind::NotZero(rt, _) => collector.reg_use(rt),
856 CondBrKind::Cond(_) => {}
857 },
858 Inst::TestBitAndBranch { rn, .. } => {
859 collector.reg_use(rn);
860 }
861 Inst::IndirectBr { rn, .. } => {
862 collector.reg_use(rn);
863 }
864 Inst::Nop0 | Inst::Nop4 => {}
865 Inst::Brk => {}
866 Inst::Udf { .. } => {}
867 Inst::TrapIf { kind, .. } => match kind {
868 CondBrKind::Zero(rt, _) | CondBrKind::NotZero(rt, _) => collector.reg_use(rt),
869 CondBrKind::Cond(_) => {}
870 },
871 Inst::Adr { rd, .. } | Inst::Adrp { rd, .. } => {
872 collector.reg_def(rd);
873 }
874 Inst::Word4 { .. } | Inst::Word8 { .. } => {}
875 Inst::JTSequence {
876 ridx, rtmp1, rtmp2, ..
877 } => {
878 collector.reg_use(ridx);
879 collector.reg_early_def(rtmp1);
880 collector.reg_early_def(rtmp2);
881 }
882 Inst::LoadExtNameGot { rd, .. }
883 | Inst::LoadExtNameNear { rd, .. }
884 | Inst::LoadExtNameFar { rd, .. } => {
885 collector.reg_def(rd);
886 }
887 Inst::LoadAddr { rd, mem } => {
888 collector.reg_def(rd);
889 memarg_operands(mem, collector);
890 }
891 Inst::Paci { .. } | Inst::Xpaclri => {
892 }
895 Inst::Bti { .. } => {}
896
897 Inst::ElfTlsGetAddr { rd, tmp, .. } => {
898 collector.reg_fixed_def(rd, regs::xreg(0));
905 collector.reg_early_def(tmp);
906 }
907 Inst::MachOTlsGetAddr { rd, .. } => {
908 collector.reg_fixed_def(rd, regs::xreg(0));
909 let mut clobbers =
910 AArch64MachineDeps::get_regs_clobbered_by_call(CallConv::AppleAarch64, false);
911 clobbers.remove(regs::xreg_preg(0));
912 collector.reg_clobbers(clobbers);
913 }
914 Inst::Unwind { .. } => {}
915 Inst::EmitIsland { .. } => {}
916 Inst::DummyUse { reg } => {
917 collector.reg_use(reg);
918 }
919 Inst::LabelAddress { dst, .. } => {
920 collector.reg_def(dst);
921 }
922 Inst::SequencePoint { .. } => {}
923 Inst::StackProbeLoop { start, end, .. } => {
924 collector.reg_early_def(start);
925 collector.reg_use(end);
926 }
927 }
928}
929
930impl MachInst for Inst {
934 type ABIMachineSpec = AArch64MachineDeps;
935 type LabelUse = LabelUse;
936
937 const TRAP_OPCODE: &'static [u8] = &0xc11f_u32.to_le_bytes();
940
941 fn get_operands(&mut self, collector: &mut impl OperandVisitor) {
942 aarch64_get_operands(self, collector);
943 }
944
945 fn is_move(&self) -> Option<(Writable<Reg>, Reg)> {
946 match self {
947 &Inst::Mov {
948 size: OperandSize::Size64,
949 rd,
950 rm,
951 } => Some((rd, rm)),
952 &Inst::FpuMove64 { rd, rn } => Some((rd, rn)),
953 &Inst::FpuMove128 { rd, rn } => Some((rd, rn)),
954 _ => None,
955 }
956 }
957
958 fn is_included_in_clobbers(&self) -> bool {
959 let (caller, callee, is_exception) = match self {
960 Inst::Args { .. } => return false,
961 Inst::Call { info } => (
962 info.caller_conv,
963 info.callee_conv,
964 info.try_call_info.is_some(),
965 ),
966 Inst::CallInd { info } => (
967 info.caller_conv,
968 info.callee_conv,
969 info.try_call_info.is_some(),
970 ),
971 _ => return true,
972 };
973
974 let caller_clobbers = AArch64MachineDeps::get_regs_clobbered_by_call(caller, false);
986 let callee_clobbers = AArch64MachineDeps::get_regs_clobbered_by_call(callee, is_exception);
987
988 let mut all_clobbers = caller_clobbers;
989 all_clobbers.union_from(callee_clobbers);
990 all_clobbers != caller_clobbers
991 }
992
993 fn is_trap(&self) -> bool {
994 match self {
995 Self::Udf { .. } => true,
996 _ => false,
997 }
998 }
999
1000 fn is_args(&self) -> bool {
1001 match self {
1002 Self::Args { .. } => true,
1003 _ => false,
1004 }
1005 }
1006
1007 fn call_type(&self) -> CallType {
1008 match self {
1009 Inst::Call { .. }
1010 | Inst::CallInd { .. }
1011 | Inst::PatchableCall { .. }
1012 | Inst::ElfTlsGetAddr { .. }
1013 | Inst::MachOTlsGetAddr { .. } => CallType::Regular,
1014
1015 Inst::ReturnCall { .. } | Inst::ReturnCallInd { .. } => CallType::TailCall,
1016
1017 _ => CallType::None,
1018 }
1019 }
1020
1021 fn is_term(&self) -> MachTerminator {
1022 match self {
1023 &Inst::Rets { .. } => MachTerminator::Ret,
1024 &Inst::ReturnCall { .. } | &Inst::ReturnCallInd { .. } => MachTerminator::RetCall,
1025 &Inst::Jump { .. } => MachTerminator::Branch,
1026 &Inst::CondBr { .. } => MachTerminator::Branch,
1027 &Inst::TestBitAndBranch { .. } => MachTerminator::Branch,
1028 &Inst::IndirectBr { .. } => MachTerminator::Branch,
1029 &Inst::JTSequence { .. } => MachTerminator::Branch,
1030 &Inst::Call { ref info } if info.try_call_info.is_some() => MachTerminator::Branch,
1031 &Inst::CallInd { ref info } if info.try_call_info.is_some() => MachTerminator::Branch,
1032 _ => MachTerminator::None,
1033 }
1034 }
1035
1036 fn is_mem_access(&self) -> bool {
1037 match self {
1038 &Inst::ULoad8 { .. }
1039 | &Inst::SLoad8 { .. }
1040 | &Inst::ULoad16 { .. }
1041 | &Inst::SLoad16 { .. }
1042 | &Inst::ULoad32 { .. }
1043 | &Inst::SLoad32 { .. }
1044 | &Inst::ULoad64 { .. }
1045 | &Inst::LoadP64 { .. }
1046 | &Inst::FpuLoad16 { .. }
1047 | &Inst::FpuLoad32 { .. }
1048 | &Inst::FpuLoad64 { .. }
1049 | &Inst::FpuLoad128 { .. }
1050 | &Inst::FpuLoadP64 { .. }
1051 | &Inst::FpuLoadP128 { .. }
1052 | &Inst::Store8 { .. }
1053 | &Inst::Store16 { .. }
1054 | &Inst::Store32 { .. }
1055 | &Inst::Store64 { .. }
1056 | &Inst::StoreP64 { .. }
1057 | &Inst::FpuStore16 { .. }
1058 | &Inst::FpuStore32 { .. }
1059 | &Inst::FpuStore64 { .. }
1060 | &Inst::FpuStore128 { .. } => true,
1061 _ => false,
1063 }
1064 }
1065
1066 fn gen_move(to_reg: Writable<Reg>, from_reg: Reg, ty: Type) -> Inst {
1067 let bits = ty.bits();
1068
1069 assert!(bits <= 128);
1070 assert!(to_reg.to_reg().class() == from_reg.class());
1071 match from_reg.class() {
1072 RegClass::Int => Inst::Mov {
1073 size: OperandSize::Size64,
1074 rd: to_reg,
1075 rm: from_reg,
1076 },
1077 RegClass::Float => {
1078 if bits > 64 {
1079 Inst::FpuMove128 {
1080 rd: to_reg,
1081 rn: from_reg,
1082 }
1083 } else {
1084 Inst::FpuMove64 {
1085 rd: to_reg,
1086 rn: from_reg,
1087 }
1088 }
1089 }
1090 RegClass::Vector => unreachable!(),
1091 }
1092 }
1093
1094 fn is_safepoint(&self) -> bool {
1095 match self {
1096 Inst::Call { .. } | Inst::CallInd { .. } | Inst::PatchableCall { .. } => true,
1097 _ => false,
1098 }
1099 }
1100
1101 fn gen_dummy_use(reg: Reg) -> Inst {
1102 Inst::DummyUse { reg }
1103 }
1104
1105 fn gen_nop(preferred_size: usize) -> Inst {
1106 if preferred_size == 0 {
1107 return Inst::Nop0;
1108 }
1109 assert!(preferred_size >= 4);
1111 Inst::Nop4
1112 }
1113
1114 fn gen_nop_unit() -> SmallVec<[u8; 8]> {
1115 smallvec![0x1f, 0x20, 0x03, 0xd5]
1116 }
1117
1118 fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])> {
1119 match ty {
1120 I8 => Ok((&[RegClass::Int], &[I8])),
1121 I16 => Ok((&[RegClass::Int], &[I16])),
1122 I32 => Ok((&[RegClass::Int], &[I32])),
1123 I64 => Ok((&[RegClass::Int], &[I64])),
1124 F16 => Ok((&[RegClass::Float], &[F16])),
1125 F32 => Ok((&[RegClass::Float], &[F32])),
1126 F64 => Ok((&[RegClass::Float], &[F64])),
1127 F128 => Ok((&[RegClass::Float], &[F128])),
1128 I128 => Ok((&[RegClass::Int, RegClass::Int], &[I64, I64])),
1129 _ if ty.is_vector() && ty.bits() <= 128 => {
1130 let types = &[types::I8X2, types::I8X4, types::I8X8, types::I8X16];
1131 Ok((
1132 &[RegClass::Float],
1133 slice::from_ref(&types[ty.bytes().ilog2() as usize - 1]),
1134 ))
1135 }
1136 _ if ty.is_dynamic_vector() => Ok((&[RegClass::Float], &[I8X16])),
1137 _ => Err(CodegenError::Unsupported(format!(
1138 "Unexpected SSA-value type: {ty}"
1139 ))),
1140 }
1141 }
1142
1143 fn canonical_type_for_rc(rc: RegClass) -> Type {
1144 match rc {
1145 RegClass::Float => types::I8X16,
1146 RegClass::Int => types::I64,
1147 RegClass::Vector => unreachable!(),
1148 }
1149 }
1150
1151 fn gen_jump(target: MachLabel) -> Inst {
1152 Inst::Jump {
1153 dest: BranchTarget::Label(target),
1154 }
1155 }
1156
1157 fn worst_case_size() -> CodeOffset {
1158 44
1166 }
1167
1168 fn ref_type_regclass(_: &settings::Flags) -> RegClass {
1169 RegClass::Int
1170 }
1171
1172 fn gen_block_start(
1173 is_indirect_branch_target: bool,
1174 is_forward_edge_cfi_enabled: bool,
1175 ) -> Option<Self> {
1176 if is_indirect_branch_target && is_forward_edge_cfi_enabled {
1177 Some(Inst::Bti {
1178 targets: BranchTargetType::J,
1179 })
1180 } else {
1181 None
1182 }
1183 }
1184
1185 fn function_alignment() -> FunctionAlignment {
1186 FunctionAlignment {
1189 minimum: 4,
1190 preferred: 32,
1191 }
1192 }
1193}
1194
1195fn mem_finalize_for_show(mem: &AMode, access_ty: Type, state: &EmitState) -> (String, String) {
1199 let (mem_insts, mem) = mem_finalize(None, mem, access_ty, state);
1200 let mut mem_str = mem_insts
1201 .into_iter()
1202 .map(|inst| inst.print_with_state(&mut EmitState::default()))
1203 .collect::<Vec<_>>()
1204 .join(" ; ");
1205 if !mem_str.is_empty() {
1206 mem_str += " ; ";
1207 }
1208
1209 let mem = mem.pretty_print(access_ty.bytes() as u8);
1210 (mem_str, mem)
1211}
1212
1213fn pretty_print_try_call(info: &TryCallInfo) -> String {
1214 format!(
1215 "; b {:?}; catch [{}]",
1216 info.continuation,
1217 info.pretty_print_dests()
1218 )
1219}
1220
1221impl Inst {
1222 fn print_with_state(&self, state: &mut EmitState) -> String {
1223 fn op_name(alu_op: ALUOp) -> &'static str {
1224 match alu_op {
1225 ALUOp::Add => "add",
1226 ALUOp::Sub => "sub",
1227 ALUOp::Orr => "orr",
1228 ALUOp::And => "and",
1229 ALUOp::AndS => "ands",
1230 ALUOp::Eor => "eor",
1231 ALUOp::AddS => "adds",
1232 ALUOp::SubS => "subs",
1233 ALUOp::SMulH => "smulh",
1234 ALUOp::UMulH => "umulh",
1235 ALUOp::SDiv => "sdiv",
1236 ALUOp::UDiv => "udiv",
1237 ALUOp::AndNot => "bic",
1238 ALUOp::OrrNot => "orn",
1239 ALUOp::EorNot => "eon",
1240 ALUOp::Extr => "extr",
1241 ALUOp::Lsr => "lsr",
1242 ALUOp::Asr => "asr",
1243 ALUOp::Lsl => "lsl",
1244 ALUOp::Adc => "adc",
1245 ALUOp::AdcS => "adcs",
1246 ALUOp::Sbc => "sbc",
1247 ALUOp::SbcS => "sbcs",
1248 }
1249 }
1250
1251 match self {
1252 &Inst::Nop0 => "nop-zero-len".to_string(),
1253 &Inst::Nop4 => "nop".to_string(),
1254 &Inst::AluRRR {
1255 alu_op,
1256 size,
1257 rd,
1258 rn,
1259 rm,
1260 } => {
1261 let op = op_name(alu_op);
1262 let rd = pretty_print_ireg(rd.to_reg(), size);
1263 let rn = pretty_print_ireg(rn, size);
1264 let rm = pretty_print_ireg(rm, size);
1265 format!("{op} {rd}, {rn}, {rm}")
1266 }
1267 &Inst::AluRRRR {
1268 alu_op,
1269 size,
1270 rd,
1271 rn,
1272 rm,
1273 ra,
1274 } => {
1275 let (op, da_size) = match alu_op {
1276 ALUOp3::MAdd => ("madd", size),
1277 ALUOp3::MSub => ("msub", size),
1278 ALUOp3::UMAddL => ("umaddl", OperandSize::Size64),
1279 ALUOp3::SMAddL => ("smaddl", OperandSize::Size64),
1280 };
1281 let rd = pretty_print_ireg(rd.to_reg(), da_size);
1282 let rn = pretty_print_ireg(rn, size);
1283 let rm = pretty_print_ireg(rm, size);
1284 let ra = pretty_print_ireg(ra, da_size);
1285
1286 format!("{op} {rd}, {rn}, {rm}, {ra}")
1287 }
1288 &Inst::AluRRImm12 {
1289 alu_op,
1290 size,
1291 rd,
1292 rn,
1293 ref imm12,
1294 } => {
1295 let op = op_name(alu_op);
1296 let rd = pretty_print_ireg(rd.to_reg(), size);
1297 let rn = pretty_print_ireg(rn, size);
1298
1299 if imm12.bits == 0 && alu_op == ALUOp::Add && size.is64() {
1300 format!("mov {rd}, {rn}")
1302 } else {
1303 let imm12 = imm12.pretty_print(0);
1304 format!("{op} {rd}, {rn}, {imm12}")
1305 }
1306 }
1307 &Inst::AluRRImmLogic {
1308 alu_op,
1309 size,
1310 rd,
1311 rn,
1312 ref imml,
1313 } => {
1314 let op = op_name(alu_op);
1315 let rd = pretty_print_ireg(rd.to_reg(), size);
1316 let rn = pretty_print_ireg(rn, size);
1317 let imml = imml.pretty_print(0);
1318 format!("{op} {rd}, {rn}, {imml}")
1319 }
1320 &Inst::AluRRImmShift {
1321 alu_op,
1322 size,
1323 rd,
1324 rn,
1325 ref immshift,
1326 } => {
1327 let op = op_name(alu_op);
1328 let rd = pretty_print_ireg(rd.to_reg(), size);
1329 let rn = pretty_print_ireg(rn, size);
1330 let immshift = immshift.pretty_print(0);
1331 format!("{op} {rd}, {rn}, {immshift}")
1332 }
1333 &Inst::AluRRRShift {
1334 alu_op,
1335 size,
1336 rd,
1337 rn,
1338 rm,
1339 ref shiftop,
1340 } => {
1341 let op = op_name(alu_op);
1342 let rd = pretty_print_ireg(rd.to_reg(), size);
1343 let rn = pretty_print_ireg(rn, size);
1344 let rm = pretty_print_ireg(rm, size);
1345 let shiftop = shiftop.pretty_print(0);
1346 format!("{op} {rd}, {rn}, {rm}, {shiftop}")
1347 }
1348 &Inst::AluRRRExtend {
1349 alu_op,
1350 size,
1351 rd,
1352 rn,
1353 rm,
1354 ref extendop,
1355 } => {
1356 let op = op_name(alu_op);
1357 let rd = pretty_print_ireg(rd.to_reg(), size);
1358 let rn = pretty_print_ireg(rn, size);
1359 let rm = pretty_print_ireg(rm, size);
1360 let extendop = extendop.pretty_print(0);
1361 format!("{op} {rd}, {rn}, {rm}, {extendop}")
1362 }
1363 &Inst::BitRR { op, size, rd, rn } => {
1364 let op = op.op_str();
1365 let rd = pretty_print_ireg(rd.to_reg(), size);
1366 let rn = pretty_print_ireg(rn, size);
1367 format!("{op} {rd}, {rn}")
1368 }
1369 &Inst::ULoad8 { rd, ref mem, .. }
1370 | &Inst::SLoad8 { rd, ref mem, .. }
1371 | &Inst::ULoad16 { rd, ref mem, .. }
1372 | &Inst::SLoad16 { rd, ref mem, .. }
1373 | &Inst::ULoad32 { rd, ref mem, .. }
1374 | &Inst::SLoad32 { rd, ref mem, .. }
1375 | &Inst::ULoad64 { rd, ref mem, .. } => {
1376 let is_unscaled = match &mem {
1377 &AMode::Unscaled { .. } => true,
1378 _ => false,
1379 };
1380 let (op, size) = match (self, is_unscaled) {
1381 (&Inst::ULoad8 { .. }, false) => ("ldrb", OperandSize::Size32),
1382 (&Inst::ULoad8 { .. }, true) => ("ldurb", OperandSize::Size32),
1383 (&Inst::SLoad8 { .. }, false) => ("ldrsb", OperandSize::Size64),
1384 (&Inst::SLoad8 { .. }, true) => ("ldursb", OperandSize::Size64),
1385 (&Inst::ULoad16 { .. }, false) => ("ldrh", OperandSize::Size32),
1386 (&Inst::ULoad16 { .. }, true) => ("ldurh", OperandSize::Size32),
1387 (&Inst::SLoad16 { .. }, false) => ("ldrsh", OperandSize::Size64),
1388 (&Inst::SLoad16 { .. }, true) => ("ldursh", OperandSize::Size64),
1389 (&Inst::ULoad32 { .. }, false) => ("ldr", OperandSize::Size32),
1390 (&Inst::ULoad32 { .. }, true) => ("ldur", OperandSize::Size32),
1391 (&Inst::SLoad32 { .. }, false) => ("ldrsw", OperandSize::Size64),
1392 (&Inst::SLoad32 { .. }, true) => ("ldursw", OperandSize::Size64),
1393 (&Inst::ULoad64 { .. }, false) => ("ldr", OperandSize::Size64),
1394 (&Inst::ULoad64 { .. }, true) => ("ldur", OperandSize::Size64),
1395 _ => unreachable!(),
1396 };
1397
1398 let rd = pretty_print_ireg(rd.to_reg(), size);
1399 let mem = mem.clone();
1400 let access_ty = self.mem_type().unwrap();
1401 let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1402
1403 format!("{mem_str}{op} {rd}, {mem}")
1404 }
1405 &Inst::Store8 { rd, ref mem, .. }
1406 | &Inst::Store16 { rd, ref mem, .. }
1407 | &Inst::Store32 { rd, ref mem, .. }
1408 | &Inst::Store64 { rd, ref mem, .. } => {
1409 let is_unscaled = match &mem {
1410 &AMode::Unscaled { .. } => true,
1411 _ => false,
1412 };
1413 let (op, size) = match (self, is_unscaled) {
1414 (&Inst::Store8 { .. }, false) => ("strb", OperandSize::Size32),
1415 (&Inst::Store8 { .. }, true) => ("sturb", OperandSize::Size32),
1416 (&Inst::Store16 { .. }, false) => ("strh", OperandSize::Size32),
1417 (&Inst::Store16 { .. }, true) => ("sturh", OperandSize::Size32),
1418 (&Inst::Store32 { .. }, false) => ("str", OperandSize::Size32),
1419 (&Inst::Store32 { .. }, true) => ("stur", OperandSize::Size32),
1420 (&Inst::Store64 { .. }, false) => ("str", OperandSize::Size64),
1421 (&Inst::Store64 { .. }, true) => ("stur", OperandSize::Size64),
1422 _ => unreachable!(),
1423 };
1424
1425 let rd = pretty_print_ireg(rd, size);
1426 let mem = mem.clone();
1427 let access_ty = self.mem_type().unwrap();
1428 let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1429
1430 format!("{mem_str}{op} {rd}, {mem}")
1431 }
1432 &Inst::StoreP64 {
1433 rt, rt2, ref mem, ..
1434 } => {
1435 let rt = pretty_print_ireg(rt, OperandSize::Size64);
1436 let rt2 = pretty_print_ireg(rt2, OperandSize::Size64);
1437 let mem = mem.clone();
1438 let mem = mem.pretty_print_default();
1439 format!("stp {rt}, {rt2}, {mem}")
1440 }
1441 &Inst::LoadP64 {
1442 rt, rt2, ref mem, ..
1443 } => {
1444 let rt = pretty_print_ireg(rt.to_reg(), OperandSize::Size64);
1445 let rt2 = pretty_print_ireg(rt2.to_reg(), OperandSize::Size64);
1446 let mem = mem.clone();
1447 let mem = mem.pretty_print_default();
1448 format!("ldp {rt}, {rt2}, {mem}")
1449 }
1450 &Inst::Mov { size, rd, rm } => {
1451 let rd = pretty_print_ireg(rd.to_reg(), size);
1452 let rm = pretty_print_ireg(rm, size);
1453 format!("mov {rd}, {rm}")
1454 }
1455 &Inst::MovFromPReg { rd, rm } => {
1456 let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64);
1457 let rm = show_ireg_sized(rm.into(), OperandSize::Size64);
1458 format!("mov {rd}, {rm}")
1459 }
1460 &Inst::MovToPReg { rd, rm } => {
1461 let rd = show_ireg_sized(rd.into(), OperandSize::Size64);
1462 let rm = pretty_print_ireg(rm, OperandSize::Size64);
1463 format!("mov {rd}, {rm}")
1464 }
1465 &Inst::MovWide {
1466 op,
1467 rd,
1468 ref imm,
1469 size,
1470 } => {
1471 let op_str = match op {
1472 MoveWideOp::MovZ => "movz",
1473 MoveWideOp::MovN => "movn",
1474 };
1475 let rd = pretty_print_ireg(rd.to_reg(), size);
1476 let imm = imm.pretty_print(0);
1477 format!("{op_str} {rd}, {imm}")
1478 }
1479 &Inst::MovK {
1480 rd,
1481 rn,
1482 ref imm,
1483 size,
1484 } => {
1485 let rn = pretty_print_ireg(rn, size);
1486 let rd = pretty_print_ireg(rd.to_reg(), size);
1487 let imm = imm.pretty_print(0);
1488 format!("movk {rd}, {rn}, {imm}")
1489 }
1490 &Inst::CSel { rd, rn, rm, cond } => {
1491 let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64);
1492 let rn = pretty_print_ireg(rn, OperandSize::Size64);
1493 let rm = pretty_print_ireg(rm, OperandSize::Size64);
1494 let cond = cond.pretty_print(0);
1495 format!("csel {rd}, {rn}, {rm}, {cond}")
1496 }
1497 &Inst::CSNeg { rd, rn, rm, cond } => {
1498 let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64);
1499 let rn = pretty_print_ireg(rn, OperandSize::Size64);
1500 let rm = pretty_print_ireg(rm, OperandSize::Size64);
1501 let cond = cond.pretty_print(0);
1502 format!("csneg {rd}, {rn}, {rm}, {cond}")
1503 }
1504 &Inst::CSet { rd, cond } => {
1505 let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64);
1506 let cond = cond.pretty_print(0);
1507 format!("cset {rd}, {cond}")
1508 }
1509 &Inst::CSetm { rd, cond } => {
1510 let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64);
1511 let cond = cond.pretty_print(0);
1512 format!("csetm {rd}, {cond}")
1513 }
1514 &Inst::CCmp {
1515 size,
1516 rn,
1517 rm,
1518 nzcv,
1519 cond,
1520 } => {
1521 let rn = pretty_print_ireg(rn, size);
1522 let rm = pretty_print_ireg(rm, size);
1523 let nzcv = nzcv.pretty_print(0);
1524 let cond = cond.pretty_print(0);
1525 format!("ccmp {rn}, {rm}, {nzcv}, {cond}")
1526 }
1527 &Inst::CCmpImm {
1528 size,
1529 rn,
1530 imm,
1531 nzcv,
1532 cond,
1533 } => {
1534 let rn = pretty_print_ireg(rn, size);
1535 let imm = imm.pretty_print(0);
1536 let nzcv = nzcv.pretty_print(0);
1537 let cond = cond.pretty_print(0);
1538 format!("ccmp {rn}, {imm}, {nzcv}, {cond}")
1539 }
1540 &Inst::AtomicRMW {
1541 rs, rt, rn, ty, op, ..
1542 } => {
1543 let op = match op {
1544 AtomicRMWOp::Add => "ldaddal",
1545 AtomicRMWOp::Clr => "ldclral",
1546 AtomicRMWOp::Eor => "ldeoral",
1547 AtomicRMWOp::Set => "ldsetal",
1548 AtomicRMWOp::Smax => "ldsmaxal",
1549 AtomicRMWOp::Umax => "ldumaxal",
1550 AtomicRMWOp::Smin => "ldsminal",
1551 AtomicRMWOp::Umin => "lduminal",
1552 AtomicRMWOp::Swp => "swpal",
1553 };
1554
1555 let size = OperandSize::from_ty(ty);
1556 let rs = pretty_print_ireg(rs, size);
1557 let rt = pretty_print_ireg(rt.to_reg(), size);
1558 let rn = pretty_print_ireg(rn, OperandSize::Size64);
1559
1560 let ty_suffix = match ty {
1561 I8 => "b",
1562 I16 => "h",
1563 _ => "",
1564 };
1565 format!("{op}{ty_suffix} {rs}, {rt}, [{rn}]")
1566 }
1567 &Inst::AtomicRMWLoop {
1568 ty,
1569 op,
1570 addr,
1571 operand,
1572 oldval,
1573 scratch1,
1574 scratch2,
1575 ..
1576 } => {
1577 let op = match op {
1578 AtomicRMWLoopOp::Add => "add",
1579 AtomicRMWLoopOp::Sub => "sub",
1580 AtomicRMWLoopOp::Eor => "eor",
1581 AtomicRMWLoopOp::Orr => "orr",
1582 AtomicRMWLoopOp::And => "and",
1583 AtomicRMWLoopOp::Nand => "nand",
1584 AtomicRMWLoopOp::Smin => "smin",
1585 AtomicRMWLoopOp::Smax => "smax",
1586 AtomicRMWLoopOp::Umin => "umin",
1587 AtomicRMWLoopOp::Umax => "umax",
1588 AtomicRMWLoopOp::Xchg => "xchg",
1589 };
1590 let addr = pretty_print_ireg(addr, OperandSize::Size64);
1591 let operand = pretty_print_ireg(operand, OperandSize::Size64);
1592 let oldval = pretty_print_ireg(oldval.to_reg(), OperandSize::Size64);
1593 let scratch1 = pretty_print_ireg(scratch1.to_reg(), OperandSize::Size64);
1594 let scratch2 = pretty_print_ireg(scratch2.to_reg(), OperandSize::Size64);
1595 format!(
1596 "atomic_rmw_loop_{}_{} addr={} operand={} oldval={} scratch1={} scratch2={}",
1597 op,
1598 ty.bits(),
1599 addr,
1600 operand,
1601 oldval,
1602 scratch1,
1603 scratch2,
1604 )
1605 }
1606 &Inst::AtomicCAS {
1607 rd, rs, rt, rn, ty, ..
1608 } => {
1609 let op = match ty {
1610 I8 => "casalb",
1611 I16 => "casalh",
1612 I32 | I64 => "casal",
1613 _ => panic!("Unsupported type: {ty}"),
1614 };
1615 let size = OperandSize::from_ty(ty);
1616 let rd = pretty_print_ireg(rd.to_reg(), size);
1617 let rs = pretty_print_ireg(rs, size);
1618 let rt = pretty_print_ireg(rt, size);
1619 let rn = pretty_print_ireg(rn, OperandSize::Size64);
1620
1621 format!("{op} {rd}, {rs}, {rt}, [{rn}]")
1622 }
1623 &Inst::AtomicCASLoop {
1624 ty,
1625 addr,
1626 expected,
1627 replacement,
1628 oldval,
1629 scratch,
1630 ..
1631 } => {
1632 let addr = pretty_print_ireg(addr, OperandSize::Size64);
1633 let expected = pretty_print_ireg(expected, OperandSize::Size64);
1634 let replacement = pretty_print_ireg(replacement, OperandSize::Size64);
1635 let oldval = pretty_print_ireg(oldval.to_reg(), OperandSize::Size64);
1636 let scratch = pretty_print_ireg(scratch.to_reg(), OperandSize::Size64);
1637 format!(
1638 "atomic_cas_loop_{} addr={}, expect={}, replacement={}, oldval={}, scratch={}",
1639 ty.bits(),
1640 addr,
1641 expected,
1642 replacement,
1643 oldval,
1644 scratch,
1645 )
1646 }
1647 &Inst::LoadAcquire {
1648 access_ty, rt, rn, ..
1649 } => {
1650 let (op, ty) = match access_ty {
1651 I8 => ("ldarb", I32),
1652 I16 => ("ldarh", I32),
1653 I32 => ("ldar", I32),
1654 I64 => ("ldar", I64),
1655 _ => panic!("Unsupported type: {access_ty}"),
1656 };
1657 let size = OperandSize::from_ty(ty);
1658 let rn = pretty_print_ireg(rn, OperandSize::Size64);
1659 let rt = pretty_print_ireg(rt.to_reg(), size);
1660 format!("{op} {rt}, [{rn}]")
1661 }
1662 &Inst::StoreRelease {
1663 access_ty, rt, rn, ..
1664 } => {
1665 let (op, ty) = match access_ty {
1666 I8 => ("stlrb", I32),
1667 I16 => ("stlrh", I32),
1668 I32 => ("stlr", I32),
1669 I64 => ("stlr", I64),
1670 _ => panic!("Unsupported type: {access_ty}"),
1671 };
1672 let size = OperandSize::from_ty(ty);
1673 let rn = pretty_print_ireg(rn, OperandSize::Size64);
1674 let rt = pretty_print_ireg(rt, size);
1675 format!("{op} {rt}, [{rn}]")
1676 }
1677 &Inst::Fence {} => {
1678 format!("dmb ish")
1679 }
1680 &Inst::Csdb {} => {
1681 format!("csdb")
1682 }
1683 &Inst::FpuMove32 { rd, rn } => {
1684 let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size32);
1685 let rn = pretty_print_vreg_scalar(rn, ScalarSize::Size32);
1686 format!("fmov {rd}, {rn}")
1687 }
1688 &Inst::FpuMove64 { rd, rn } => {
1689 let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64);
1690 let rn = pretty_print_vreg_scalar(rn, ScalarSize::Size64);
1691 format!("fmov {rd}, {rn}")
1692 }
1693 &Inst::FpuMove128 { rd, rn } => {
1694 let rd = pretty_print_reg(rd.to_reg());
1695 let rn = pretty_print_reg(rn);
1696 format!("mov {rd}.16b, {rn}.16b")
1697 }
1698 &Inst::FpuMoveFromVec { rd, rn, idx, size } => {
1699 let rd = pretty_print_vreg_scalar(rd.to_reg(), size.lane_size());
1700 let rn = pretty_print_vreg_element(rn, idx as usize, size.lane_size());
1701 format!("mov {rd}, {rn}")
1702 }
1703 &Inst::FpuExtend { rd, rn, size } => {
1704 let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1705 let rn = pretty_print_vreg_scalar(rn, size);
1706 format!("fmov {rd}, {rn}")
1707 }
1708 &Inst::FpuRR {
1709 fpu_op,
1710 size,
1711 rd,
1712 rn,
1713 } => {
1714 let op = match fpu_op {
1715 FPUOp1::Abs => "fabs",
1716 FPUOp1::Neg => "fneg",
1717 FPUOp1::Sqrt => "fsqrt",
1718 FPUOp1::Cvt32To64 | FPUOp1::Cvt64To32 => "fcvt",
1719 };
1720 let dst_size = match fpu_op {
1721 FPUOp1::Cvt32To64 => ScalarSize::Size64,
1722 FPUOp1::Cvt64To32 => ScalarSize::Size32,
1723 _ => size,
1724 };
1725 let rd = pretty_print_vreg_scalar(rd.to_reg(), dst_size);
1726 let rn = pretty_print_vreg_scalar(rn, size);
1727 format!("{op} {rd}, {rn}")
1728 }
1729 &Inst::FpuRRR {
1730 fpu_op,
1731 size,
1732 rd,
1733 rn,
1734 rm,
1735 } => {
1736 let op = match fpu_op {
1737 FPUOp2::Add => "fadd",
1738 FPUOp2::Sub => "fsub",
1739 FPUOp2::Mul => "fmul",
1740 FPUOp2::Div => "fdiv",
1741 FPUOp2::Max => "fmax",
1742 FPUOp2::Min => "fmin",
1743 };
1744 let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1745 let rn = pretty_print_vreg_scalar(rn, size);
1746 let rm = pretty_print_vreg_scalar(rm, size);
1747 format!("{op} {rd}, {rn}, {rm}")
1748 }
1749 &Inst::FpuRRI { fpu_op, rd, rn } => {
1750 let (op, imm, vector) = match fpu_op {
1751 FPUOpRI::UShr32(imm) => ("ushr", imm.pretty_print(0), true),
1752 FPUOpRI::UShr64(imm) => ("ushr", imm.pretty_print(0), false),
1753 };
1754
1755 let (rd, rn) = if vector {
1756 (
1757 pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size32x2),
1758 pretty_print_vreg_vector(rn, VectorSize::Size32x2),
1759 )
1760 } else {
1761 (
1762 pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64),
1763 pretty_print_vreg_scalar(rn, ScalarSize::Size64),
1764 )
1765 };
1766 format!("{op} {rd}, {rn}, {imm}")
1767 }
1768 &Inst::FpuRRIMod { fpu_op, rd, ri, rn } => {
1769 let (op, imm, vector) = match fpu_op {
1770 FPUOpRIMod::Sli32(imm) => ("sli", imm.pretty_print(0), true),
1771 FPUOpRIMod::Sli64(imm) => ("sli", imm.pretty_print(0), false),
1772 };
1773
1774 let (rd, ri, rn) = if vector {
1775 (
1776 pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size32x2),
1777 pretty_print_vreg_vector(ri, VectorSize::Size32x2),
1778 pretty_print_vreg_vector(rn, VectorSize::Size32x2),
1779 )
1780 } else {
1781 (
1782 pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64),
1783 pretty_print_vreg_scalar(ri, ScalarSize::Size64),
1784 pretty_print_vreg_scalar(rn, ScalarSize::Size64),
1785 )
1786 };
1787 format!("{op} {rd}, {ri}, {rn}, {imm}")
1788 }
1789 &Inst::FpuRRRR {
1790 fpu_op,
1791 size,
1792 rd,
1793 rn,
1794 rm,
1795 ra,
1796 } => {
1797 let op = match fpu_op {
1798 FPUOp3::MAdd => "fmadd",
1799 FPUOp3::MSub => "fmsub",
1800 FPUOp3::NMAdd => "fnmadd",
1801 FPUOp3::NMSub => "fnmsub",
1802 };
1803 let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1804 let rn = pretty_print_vreg_scalar(rn, size);
1805 let rm = pretty_print_vreg_scalar(rm, size);
1806 let ra = pretty_print_vreg_scalar(ra, size);
1807 format!("{op} {rd}, {rn}, {rm}, {ra}")
1808 }
1809 &Inst::FpuCmp { size, rn, rm } => {
1810 let rn = pretty_print_vreg_scalar(rn, size);
1811 let rm = pretty_print_vreg_scalar(rm, size);
1812 format!("fcmp {rn}, {rm}")
1813 }
1814 &Inst::FpuLoad16 { rd, ref mem, .. } => {
1815 let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size16);
1816 let mem = mem.clone();
1817 let access_ty = self.mem_type().unwrap();
1818 let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1819 format!("{mem_str}ldr {rd}, {mem}")
1820 }
1821 &Inst::FpuLoad32 { rd, ref mem, .. } => {
1822 let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size32);
1823 let mem = mem.clone();
1824 let access_ty = self.mem_type().unwrap();
1825 let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1826 format!("{mem_str}ldr {rd}, {mem}")
1827 }
1828 &Inst::FpuLoad64 { rd, ref mem, .. } => {
1829 let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64);
1830 let mem = mem.clone();
1831 let access_ty = self.mem_type().unwrap();
1832 let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1833 format!("{mem_str}ldr {rd}, {mem}")
1834 }
1835 &Inst::FpuLoad128 { rd, ref mem, .. } => {
1836 let rd = pretty_print_reg(rd.to_reg());
1837 let rd = "q".to_string() + &rd[1..];
1838 let mem = mem.clone();
1839 let access_ty = self.mem_type().unwrap();
1840 let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1841 format!("{mem_str}ldr {rd}, {mem}")
1842 }
1843 &Inst::FpuStore16 { rd, ref mem, .. } => {
1844 let rd = pretty_print_vreg_scalar(rd, ScalarSize::Size16);
1845 let mem = mem.clone();
1846 let access_ty = self.mem_type().unwrap();
1847 let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1848 format!("{mem_str}str {rd}, {mem}")
1849 }
1850 &Inst::FpuStore32 { rd, ref mem, .. } => {
1851 let rd = pretty_print_vreg_scalar(rd, ScalarSize::Size32);
1852 let mem = mem.clone();
1853 let access_ty = self.mem_type().unwrap();
1854 let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1855 format!("{mem_str}str {rd}, {mem}")
1856 }
1857 &Inst::FpuStore64 { rd, ref mem, .. } => {
1858 let rd = pretty_print_vreg_scalar(rd, ScalarSize::Size64);
1859 let mem = mem.clone();
1860 let access_ty = self.mem_type().unwrap();
1861 let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1862 format!("{mem_str}str {rd}, {mem}")
1863 }
1864 &Inst::FpuStore128 { rd, ref mem, .. } => {
1865 let rd = pretty_print_reg(rd);
1866 let rd = "q".to_string() + &rd[1..];
1867 let mem = mem.clone();
1868 let access_ty = self.mem_type().unwrap();
1869 let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state);
1870 format!("{mem_str}str {rd}, {mem}")
1871 }
1872 &Inst::FpuLoadP64 {
1873 rt, rt2, ref mem, ..
1874 } => {
1875 let rt = pretty_print_vreg_scalar(rt.to_reg(), ScalarSize::Size64);
1876 let rt2 = pretty_print_vreg_scalar(rt2.to_reg(), ScalarSize::Size64);
1877 let mem = mem.clone();
1878 let mem = mem.pretty_print_default();
1879
1880 format!("ldp {rt}, {rt2}, {mem}")
1881 }
1882 &Inst::FpuStoreP64 {
1883 rt, rt2, ref mem, ..
1884 } => {
1885 let rt = pretty_print_vreg_scalar(rt, ScalarSize::Size64);
1886 let rt2 = pretty_print_vreg_scalar(rt2, ScalarSize::Size64);
1887 let mem = mem.clone();
1888 let mem = mem.pretty_print_default();
1889
1890 format!("stp {rt}, {rt2}, {mem}")
1891 }
1892 &Inst::FpuLoadP128 {
1893 rt, rt2, ref mem, ..
1894 } => {
1895 let rt = pretty_print_vreg_scalar(rt.to_reg(), ScalarSize::Size128);
1896 let rt2 = pretty_print_vreg_scalar(rt2.to_reg(), ScalarSize::Size128);
1897 let mem = mem.clone();
1898 let mem = mem.pretty_print_default();
1899
1900 format!("ldp {rt}, {rt2}, {mem}")
1901 }
1902 &Inst::FpuStoreP128 {
1903 rt, rt2, ref mem, ..
1904 } => {
1905 let rt = pretty_print_vreg_scalar(rt, ScalarSize::Size128);
1906 let rt2 = pretty_print_vreg_scalar(rt2, ScalarSize::Size128);
1907 let mem = mem.clone();
1908 let mem = mem.pretty_print_default();
1909
1910 format!("stp {rt}, {rt2}, {mem}")
1911 }
1912 &Inst::FpuToInt { op, rd, rn } => {
1913 let (op, sizesrc, sizedest) = match op {
1914 FpuToIntOp::F32ToI32 => ("fcvtzs", ScalarSize::Size32, OperandSize::Size32),
1915 FpuToIntOp::F32ToU32 => ("fcvtzu", ScalarSize::Size32, OperandSize::Size32),
1916 FpuToIntOp::F32ToI64 => ("fcvtzs", ScalarSize::Size32, OperandSize::Size64),
1917 FpuToIntOp::F32ToU64 => ("fcvtzu", ScalarSize::Size32, OperandSize::Size64),
1918 FpuToIntOp::F64ToI32 => ("fcvtzs", ScalarSize::Size64, OperandSize::Size32),
1919 FpuToIntOp::F64ToU32 => ("fcvtzu", ScalarSize::Size64, OperandSize::Size32),
1920 FpuToIntOp::F64ToI64 => ("fcvtzs", ScalarSize::Size64, OperandSize::Size64),
1921 FpuToIntOp::F64ToU64 => ("fcvtzu", ScalarSize::Size64, OperandSize::Size64),
1922 };
1923 let rd = pretty_print_ireg(rd.to_reg(), sizedest);
1924 let rn = pretty_print_vreg_scalar(rn, sizesrc);
1925 format!("{op} {rd}, {rn}")
1926 }
1927 &Inst::IntToFpu { op, rd, rn } => {
1928 let (op, sizesrc, sizedest) = match op {
1929 IntToFpuOp::I32ToF32 => ("scvtf", OperandSize::Size32, ScalarSize::Size32),
1930 IntToFpuOp::U32ToF32 => ("ucvtf", OperandSize::Size32, ScalarSize::Size32),
1931 IntToFpuOp::I64ToF32 => ("scvtf", OperandSize::Size64, ScalarSize::Size32),
1932 IntToFpuOp::U64ToF32 => ("ucvtf", OperandSize::Size64, ScalarSize::Size32),
1933 IntToFpuOp::I32ToF64 => ("scvtf", OperandSize::Size32, ScalarSize::Size64),
1934 IntToFpuOp::U32ToF64 => ("ucvtf", OperandSize::Size32, ScalarSize::Size64),
1935 IntToFpuOp::I64ToF64 => ("scvtf", OperandSize::Size64, ScalarSize::Size64),
1936 IntToFpuOp::U64ToF64 => ("ucvtf", OperandSize::Size64, ScalarSize::Size64),
1937 };
1938 let rd = pretty_print_vreg_scalar(rd.to_reg(), sizedest);
1939 let rn = pretty_print_ireg(rn, sizesrc);
1940 format!("{op} {rd}, {rn}")
1941 }
1942 &Inst::FpuCSel16 { rd, rn, rm, cond } => {
1943 let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size16);
1944 let rn = pretty_print_vreg_scalar(rn, ScalarSize::Size16);
1945 let rm = pretty_print_vreg_scalar(rm, ScalarSize::Size16);
1946 let cond = cond.pretty_print(0);
1947 format!("fcsel {rd}, {rn}, {rm}, {cond}")
1948 }
1949 &Inst::FpuCSel32 { rd, rn, rm, cond } => {
1950 let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size32);
1951 let rn = pretty_print_vreg_scalar(rn, ScalarSize::Size32);
1952 let rm = pretty_print_vreg_scalar(rm, ScalarSize::Size32);
1953 let cond = cond.pretty_print(0);
1954 format!("fcsel {rd}, {rn}, {rm}, {cond}")
1955 }
1956 &Inst::FpuCSel64 { rd, rn, rm, cond } => {
1957 let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64);
1958 let rn = pretty_print_vreg_scalar(rn, ScalarSize::Size64);
1959 let rm = pretty_print_vreg_scalar(rm, ScalarSize::Size64);
1960 let cond = cond.pretty_print(0);
1961 format!("fcsel {rd}, {rn}, {rm}, {cond}")
1962 }
1963 &Inst::FpuRound { op, rd, rn } => {
1964 let (inst, size) = match op {
1965 FpuRoundMode::Minus32 => ("frintm", ScalarSize::Size32),
1966 FpuRoundMode::Minus64 => ("frintm", ScalarSize::Size64),
1967 FpuRoundMode::Plus32 => ("frintp", ScalarSize::Size32),
1968 FpuRoundMode::Plus64 => ("frintp", ScalarSize::Size64),
1969 FpuRoundMode::Zero32 => ("frintz", ScalarSize::Size32),
1970 FpuRoundMode::Zero64 => ("frintz", ScalarSize::Size64),
1971 FpuRoundMode::Nearest32 => ("frintn", ScalarSize::Size32),
1972 FpuRoundMode::Nearest64 => ("frintn", ScalarSize::Size64),
1973 };
1974 let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1975 let rn = pretty_print_vreg_scalar(rn, size);
1976 format!("{inst} {rd}, {rn}")
1977 }
1978 &Inst::MovToFpu { rd, rn, size } => {
1979 let operand_size = size.operand_size();
1980 let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1981 let rn = pretty_print_ireg(rn, operand_size);
1982 format!("fmov {rd}, {rn}")
1983 }
1984 &Inst::FpuMoveFPImm { rd, imm, size } => {
1985 let imm = imm.pretty_print(0);
1986 let rd = pretty_print_vreg_scalar(rd.to_reg(), size);
1987
1988 format!("fmov {rd}, {imm}")
1989 }
1990 &Inst::MovToVec {
1991 rd,
1992 ri,
1993 rn,
1994 idx,
1995 size,
1996 } => {
1997 let rd = pretty_print_vreg_element(rd.to_reg(), idx as usize, size.lane_size());
1998 let ri = pretty_print_vreg_element(ri, idx as usize, size.lane_size());
1999 let rn = pretty_print_ireg(rn, size.operand_size());
2000 format!("mov {rd}, {ri}, {rn}")
2001 }
2002 &Inst::MovFromVec { rd, rn, idx, size } => {
2003 let op = match size {
2004 ScalarSize::Size8 => "umov",
2005 ScalarSize::Size16 => "umov",
2006 ScalarSize::Size32 => "mov",
2007 ScalarSize::Size64 => "mov",
2008 _ => unimplemented!(),
2009 };
2010 let rd = pretty_print_ireg(rd.to_reg(), size.operand_size());
2011 let rn = pretty_print_vreg_element(rn, idx as usize, size);
2012 format!("{op} {rd}, {rn}")
2013 }
2014 &Inst::MovFromVecSigned {
2015 rd,
2016 rn,
2017 idx,
2018 size,
2019 scalar_size,
2020 } => {
2021 let rd = pretty_print_ireg(rd.to_reg(), scalar_size);
2022 let rn = pretty_print_vreg_element(rn, idx as usize, size.lane_size());
2023 format!("smov {rd}, {rn}")
2024 }
2025 &Inst::VecDup { rd, rn, size } => {
2026 let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2027 let rn = pretty_print_ireg(rn, size.operand_size());
2028 format!("dup {rd}, {rn}")
2029 }
2030 &Inst::VecDupFromFpu { rd, rn, size, lane } => {
2031 let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2032 let rn = pretty_print_vreg_element(rn, lane.into(), size.lane_size());
2033 format!("dup {rd}, {rn}")
2034 }
2035 &Inst::VecDupFPImm { rd, imm, size } => {
2036 let imm = imm.pretty_print(0);
2037 let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2038
2039 format!("fmov {rd}, {imm}")
2040 }
2041 &Inst::VecDupImm {
2042 rd,
2043 imm,
2044 invert,
2045 size,
2046 } => {
2047 let imm = imm.pretty_print(0);
2048 let op = if invert { "mvni" } else { "movi" };
2049 let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2050
2051 format!("{op} {rd}, {imm}")
2052 }
2053 &Inst::VecExtend {
2054 t,
2055 rd,
2056 rn,
2057 high_half,
2058 lane_size,
2059 } => {
2060 let vec64 = VectorSize::from_lane_size(lane_size.narrow(), false);
2061 let vec128 = VectorSize::from_lane_size(lane_size.narrow(), true);
2062 let rd_size = VectorSize::from_lane_size(lane_size, true);
2063 let (op, rn_size) = match (t, high_half) {
2064 (VecExtendOp::Sxtl, false) => ("sxtl", vec64),
2065 (VecExtendOp::Sxtl, true) => ("sxtl2", vec128),
2066 (VecExtendOp::Uxtl, false) => ("uxtl", vec64),
2067 (VecExtendOp::Uxtl, true) => ("uxtl2", vec128),
2068 };
2069 let rd = pretty_print_vreg_vector(rd.to_reg(), rd_size);
2070 let rn = pretty_print_vreg_vector(rn, rn_size);
2071 format!("{op} {rd}, {rn}")
2072 }
2073 &Inst::VecMovElement {
2074 rd,
2075 ri,
2076 rn,
2077 dest_idx,
2078 src_idx,
2079 size,
2080 } => {
2081 let rd =
2082 pretty_print_vreg_element(rd.to_reg(), dest_idx as usize, size.lane_size());
2083 let ri = pretty_print_vreg_element(ri, dest_idx as usize, size.lane_size());
2084 let rn = pretty_print_vreg_element(rn, src_idx as usize, size.lane_size());
2085 format!("mov {rd}, {ri}, {rn}")
2086 }
2087 &Inst::VecRRLong {
2088 op,
2089 rd,
2090 rn,
2091 high_half,
2092 } => {
2093 let (op, rd_size, size, suffix) = match (op, high_half) {
2094 (VecRRLongOp::Fcvtl16, false) => {
2095 ("fcvtl", VectorSize::Size32x4, VectorSize::Size16x4, "")
2096 }
2097 (VecRRLongOp::Fcvtl16, true) => {
2098 ("fcvtl2", VectorSize::Size32x4, VectorSize::Size16x8, "")
2099 }
2100 (VecRRLongOp::Fcvtl32, false) => {
2101 ("fcvtl", VectorSize::Size64x2, VectorSize::Size32x2, "")
2102 }
2103 (VecRRLongOp::Fcvtl32, true) => {
2104 ("fcvtl2", VectorSize::Size64x2, VectorSize::Size32x4, "")
2105 }
2106 (VecRRLongOp::Shll8, false) => {
2107 ("shll", VectorSize::Size16x8, VectorSize::Size8x8, ", #8")
2108 }
2109 (VecRRLongOp::Shll8, true) => {
2110 ("shll2", VectorSize::Size16x8, VectorSize::Size8x16, ", #8")
2111 }
2112 (VecRRLongOp::Shll16, false) => {
2113 ("shll", VectorSize::Size32x4, VectorSize::Size16x4, ", #16")
2114 }
2115 (VecRRLongOp::Shll16, true) => {
2116 ("shll2", VectorSize::Size32x4, VectorSize::Size16x8, ", #16")
2117 }
2118 (VecRRLongOp::Shll32, false) => {
2119 ("shll", VectorSize::Size64x2, VectorSize::Size32x2, ", #32")
2120 }
2121 (VecRRLongOp::Shll32, true) => {
2122 ("shll2", VectorSize::Size64x2, VectorSize::Size32x4, ", #32")
2123 }
2124 };
2125 let rd = pretty_print_vreg_vector(rd.to_reg(), rd_size);
2126 let rn = pretty_print_vreg_vector(rn, size);
2127
2128 format!("{op} {rd}, {rn}{suffix}")
2129 }
2130 &Inst::VecRRNarrowLow {
2131 op,
2132 rd,
2133 rn,
2134 lane_size,
2135 ..
2136 }
2137 | &Inst::VecRRNarrowHigh {
2138 op,
2139 rd,
2140 rn,
2141 lane_size,
2142 ..
2143 } => {
2144 let vec64 = VectorSize::from_lane_size(lane_size, false);
2145 let vec128 = VectorSize::from_lane_size(lane_size, true);
2146 let rn_size = VectorSize::from_lane_size(lane_size.widen(), true);
2147 let high_half = match self {
2148 &Inst::VecRRNarrowLow { .. } => false,
2149 &Inst::VecRRNarrowHigh { .. } => true,
2150 _ => unreachable!(),
2151 };
2152 let (op, rd_size) = match (op, high_half) {
2153 (VecRRNarrowOp::Xtn, false) => ("xtn", vec64),
2154 (VecRRNarrowOp::Xtn, true) => ("xtn2", vec128),
2155 (VecRRNarrowOp::Sqxtn, false) => ("sqxtn", vec64),
2156 (VecRRNarrowOp::Sqxtn, true) => ("sqxtn2", vec128),
2157 (VecRRNarrowOp::Sqxtun, false) => ("sqxtun", vec64),
2158 (VecRRNarrowOp::Sqxtun, true) => ("sqxtun2", vec128),
2159 (VecRRNarrowOp::Uqxtn, false) => ("uqxtn", vec64),
2160 (VecRRNarrowOp::Uqxtn, true) => ("uqxtn2", vec128),
2161 (VecRRNarrowOp::Fcvtn, false) => ("fcvtn", vec64),
2162 (VecRRNarrowOp::Fcvtn, true) => ("fcvtn2", vec128),
2163 };
2164 let rn = pretty_print_vreg_vector(rn, rn_size);
2165 let rd = pretty_print_vreg_vector(rd.to_reg(), rd_size);
2166 let ri = match self {
2167 &Inst::VecRRNarrowLow { .. } => "".to_string(),
2168 &Inst::VecRRNarrowHigh { ri, .. } => {
2169 format!("{}, ", pretty_print_vreg_vector(ri, rd_size))
2170 }
2171 _ => unreachable!(),
2172 };
2173
2174 format!("{op} {rd}, {ri}{rn}")
2175 }
2176 &Inst::VecRRPair { op, rd, rn } => {
2177 let op = match op {
2178 VecPairOp::Addp => "addp",
2179 };
2180 let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64);
2181 let rn = pretty_print_vreg_vector(rn, VectorSize::Size64x2);
2182
2183 format!("{op} {rd}, {rn}")
2184 }
2185 &Inst::VecRRPairLong { op, rd, rn } => {
2186 let (op, dest, src) = match op {
2187 VecRRPairLongOp::Saddlp8 => {
2188 ("saddlp", VectorSize::Size16x8, VectorSize::Size8x16)
2189 }
2190 VecRRPairLongOp::Saddlp16 => {
2191 ("saddlp", VectorSize::Size32x4, VectorSize::Size16x8)
2192 }
2193 VecRRPairLongOp::Uaddlp8 => {
2194 ("uaddlp", VectorSize::Size16x8, VectorSize::Size8x16)
2195 }
2196 VecRRPairLongOp::Uaddlp16 => {
2197 ("uaddlp", VectorSize::Size32x4, VectorSize::Size16x8)
2198 }
2199 };
2200 let rd = pretty_print_vreg_vector(rd.to_reg(), dest);
2201 let rn = pretty_print_vreg_vector(rn, src);
2202
2203 format!("{op} {rd}, {rn}")
2204 }
2205 &Inst::VecRRR {
2206 rd,
2207 rn,
2208 rm,
2209 alu_op,
2210 size,
2211 } => {
2212 let (op, size) = match alu_op {
2213 VecALUOp::Sqadd => ("sqadd", size),
2214 VecALUOp::Uqadd => ("uqadd", size),
2215 VecALUOp::Sqsub => ("sqsub", size),
2216 VecALUOp::Uqsub => ("uqsub", size),
2217 VecALUOp::Cmeq => ("cmeq", size),
2218 VecALUOp::Cmge => ("cmge", size),
2219 VecALUOp::Cmgt => ("cmgt", size),
2220 VecALUOp::Cmhs => ("cmhs", size),
2221 VecALUOp::Cmhi => ("cmhi", size),
2222 VecALUOp::Fcmeq => ("fcmeq", size),
2223 VecALUOp::Fcmgt => ("fcmgt", size),
2224 VecALUOp::Fcmge => ("fcmge", size),
2225 VecALUOp::Umaxp => ("umaxp", size),
2226 VecALUOp::Add => ("add", size),
2227 VecALUOp::Sub => ("sub", size),
2228 VecALUOp::Mul => ("mul", size),
2229 VecALUOp::Sshl => ("sshl", size),
2230 VecALUOp::Ushl => ("ushl", size),
2231 VecALUOp::Umin => ("umin", size),
2232 VecALUOp::Smin => ("smin", size),
2233 VecALUOp::Umax => ("umax", size),
2234 VecALUOp::Smax => ("smax", size),
2235 VecALUOp::Urhadd => ("urhadd", size),
2236 VecALUOp::Fadd => ("fadd", size),
2237 VecALUOp::Fsub => ("fsub", size),
2238 VecALUOp::Fdiv => ("fdiv", size),
2239 VecALUOp::Fmax => ("fmax", size),
2240 VecALUOp::Fmin => ("fmin", size),
2241 VecALUOp::Fmul => ("fmul", size),
2242 VecALUOp::Addp => ("addp", size),
2243 VecALUOp::Zip1 => ("zip1", size),
2244 VecALUOp::Zip2 => ("zip2", size),
2245 VecALUOp::Sqrdmulh => ("sqrdmulh", size),
2246 VecALUOp::Uzp1 => ("uzp1", size),
2247 VecALUOp::Uzp2 => ("uzp2", size),
2248 VecALUOp::Trn1 => ("trn1", size),
2249 VecALUOp::Trn2 => ("trn2", size),
2250
2251 VecALUOp::And => ("and", size.as_scalar8_vector()),
2254 VecALUOp::Bic => ("bic", size.as_scalar8_vector()),
2255 VecALUOp::Orr => ("orr", size.as_scalar8_vector()),
2256 VecALUOp::Orn => ("orn", size.as_scalar8_vector()),
2257 VecALUOp::Eor => ("eor", size.as_scalar8_vector()),
2258 };
2259 let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2260 let rn = pretty_print_vreg_vector(rn, size);
2261 let rm = pretty_print_vreg_vector(rm, size);
2262 format!("{op} {rd}, {rn}, {rm}")
2263 }
2264 &Inst::VecRRRMod {
2265 rd,
2266 ri,
2267 rn,
2268 rm,
2269 alu_op,
2270 size,
2271 } => {
2272 let (op, size) = match alu_op {
2273 VecALUModOp::Bsl => ("bsl", VectorSize::Size8x16),
2274 VecALUModOp::Fmla => ("fmla", size),
2275 VecALUModOp::Fmls => ("fmls", size),
2276 };
2277 let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2278 let ri = pretty_print_vreg_vector(ri, size);
2279 let rn = pretty_print_vreg_vector(rn, size);
2280 let rm = pretty_print_vreg_vector(rm, size);
2281 format!("{op} {rd}, {ri}, {rn}, {rm}")
2282 }
2283 &Inst::VecFmlaElem {
2284 rd,
2285 ri,
2286 rn,
2287 rm,
2288 alu_op,
2289 size,
2290 idx,
2291 } => {
2292 let (op, size) = match alu_op {
2293 VecALUModOp::Fmla => ("fmla", size),
2294 VecALUModOp::Fmls => ("fmls", size),
2295 _ => unreachable!(),
2296 };
2297 let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2298 let ri = pretty_print_vreg_vector(ri, size);
2299 let rn = pretty_print_vreg_vector(rn, size);
2300 let rm = pretty_print_vreg_element(rm, idx.into(), size.lane_size());
2301 format!("{op} {rd}, {ri}, {rn}, {rm}")
2302 }
2303 &Inst::VecRRRLong {
2304 rd,
2305 rn,
2306 rm,
2307 alu_op,
2308 high_half,
2309 } => {
2310 let (op, dest_size, src_size) = match (alu_op, high_half) {
2311 (VecRRRLongOp::Smull8, false) => {
2312 ("smull", VectorSize::Size16x8, VectorSize::Size8x8)
2313 }
2314 (VecRRRLongOp::Smull8, true) => {
2315 ("smull2", VectorSize::Size16x8, VectorSize::Size8x16)
2316 }
2317 (VecRRRLongOp::Smull16, false) => {
2318 ("smull", VectorSize::Size32x4, VectorSize::Size16x4)
2319 }
2320 (VecRRRLongOp::Smull16, true) => {
2321 ("smull2", VectorSize::Size32x4, VectorSize::Size16x8)
2322 }
2323 (VecRRRLongOp::Smull32, false) => {
2324 ("smull", VectorSize::Size64x2, VectorSize::Size32x2)
2325 }
2326 (VecRRRLongOp::Smull32, true) => {
2327 ("smull2", VectorSize::Size64x2, VectorSize::Size32x4)
2328 }
2329 (VecRRRLongOp::Umull8, false) => {
2330 ("umull", VectorSize::Size16x8, VectorSize::Size8x8)
2331 }
2332 (VecRRRLongOp::Umull8, true) => {
2333 ("umull2", VectorSize::Size16x8, VectorSize::Size8x16)
2334 }
2335 (VecRRRLongOp::Umull16, false) => {
2336 ("umull", VectorSize::Size32x4, VectorSize::Size16x4)
2337 }
2338 (VecRRRLongOp::Umull16, true) => {
2339 ("umull2", VectorSize::Size32x4, VectorSize::Size16x8)
2340 }
2341 (VecRRRLongOp::Umull32, false) => {
2342 ("umull", VectorSize::Size64x2, VectorSize::Size32x2)
2343 }
2344 (VecRRRLongOp::Umull32, true) => {
2345 ("umull2", VectorSize::Size64x2, VectorSize::Size32x4)
2346 }
2347 };
2348 let rd = pretty_print_vreg_vector(rd.to_reg(), dest_size);
2349 let rn = pretty_print_vreg_vector(rn, src_size);
2350 let rm = pretty_print_vreg_vector(rm, src_size);
2351 format!("{op} {rd}, {rn}, {rm}")
2352 }
2353 &Inst::VecRRRLongMod {
2354 rd,
2355 ri,
2356 rn,
2357 rm,
2358 alu_op,
2359 high_half,
2360 } => {
2361 let (op, dest_size, src_size) = match (alu_op, high_half) {
2362 (VecRRRLongModOp::Umlal8, false) => {
2363 ("umlal", VectorSize::Size16x8, VectorSize::Size8x8)
2364 }
2365 (VecRRRLongModOp::Umlal8, true) => {
2366 ("umlal2", VectorSize::Size16x8, VectorSize::Size8x16)
2367 }
2368 (VecRRRLongModOp::Umlal16, false) => {
2369 ("umlal", VectorSize::Size32x4, VectorSize::Size16x4)
2370 }
2371 (VecRRRLongModOp::Umlal16, true) => {
2372 ("umlal2", VectorSize::Size32x4, VectorSize::Size16x8)
2373 }
2374 (VecRRRLongModOp::Umlal32, false) => {
2375 ("umlal", VectorSize::Size64x2, VectorSize::Size32x2)
2376 }
2377 (VecRRRLongModOp::Umlal32, true) => {
2378 ("umlal2", VectorSize::Size64x2, VectorSize::Size32x4)
2379 }
2380 };
2381 let rd = pretty_print_vreg_vector(rd.to_reg(), dest_size);
2382 let ri = pretty_print_vreg_vector(ri, dest_size);
2383 let rn = pretty_print_vreg_vector(rn, src_size);
2384 let rm = pretty_print_vreg_vector(rm, src_size);
2385 format!("{op} {rd}, {ri}, {rn}, {rm}")
2386 }
2387 &Inst::VecMisc { op, rd, rn, size } => {
2388 let (op, size, suffix) = match op {
2389 VecMisc2::Neg => ("neg", size, ""),
2390 VecMisc2::Abs => ("abs", size, ""),
2391 VecMisc2::Fabs => ("fabs", size, ""),
2392 VecMisc2::Fneg => ("fneg", size, ""),
2393 VecMisc2::Fsqrt => ("fsqrt", size, ""),
2394 VecMisc2::Rev16 => ("rev16", size, ""),
2395 VecMisc2::Rev32 => ("rev32", size, ""),
2396 VecMisc2::Rev64 => ("rev64", size, ""),
2397 VecMisc2::Fcvtzs => ("fcvtzs", size, ""),
2398 VecMisc2::Fcvtzu => ("fcvtzu", size, ""),
2399 VecMisc2::Scvtf => ("scvtf", size, ""),
2400 VecMisc2::Ucvtf => ("ucvtf", size, ""),
2401 VecMisc2::Frintn => ("frintn", size, ""),
2402 VecMisc2::Frintz => ("frintz", size, ""),
2403 VecMisc2::Frintm => ("frintm", size, ""),
2404 VecMisc2::Frintp => ("frintp", size, ""),
2405 VecMisc2::Cnt => ("cnt", size, ""),
2406 VecMisc2::Cmeq0 => ("cmeq", size, ", #0"),
2407 VecMisc2::Cmge0 => ("cmge", size, ", #0"),
2408 VecMisc2::Cmgt0 => ("cmgt", size, ", #0"),
2409 VecMisc2::Cmle0 => ("cmle", size, ", #0"),
2410 VecMisc2::Cmlt0 => ("cmlt", size, ", #0"),
2411 VecMisc2::Fcmeq0 => ("fcmeq", size, ", #0.0"),
2412 VecMisc2::Fcmge0 => ("fcmge", size, ", #0.0"),
2413 VecMisc2::Fcmgt0 => ("fcmgt", size, ", #0.0"),
2414 VecMisc2::Fcmle0 => ("fcmle", size, ", #0.0"),
2415 VecMisc2::Fcmlt0 => ("fcmlt", size, ", #0.0"),
2416
2417 VecMisc2::Not => ("mvn", size.as_scalar8_vector(), ""),
2420 };
2421 let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2422 let rn = pretty_print_vreg_vector(rn, size);
2423 format!("{op} {rd}, {rn}{suffix}")
2424 }
2425 &Inst::VecLanes { op, rd, rn, size } => {
2426 let op = match op {
2427 VecLanesOp::Uminv => "uminv",
2428 VecLanesOp::Addv => "addv",
2429 };
2430 let rd = pretty_print_vreg_scalar(rd.to_reg(), size.lane_size());
2431 let rn = pretty_print_vreg_vector(rn, size);
2432 format!("{op} {rd}, {rn}")
2433 }
2434 &Inst::VecShiftImm {
2435 op,
2436 rd,
2437 rn,
2438 size,
2439 imm,
2440 } => {
2441 let op = match op {
2442 VecShiftImmOp::Shl => "shl",
2443 VecShiftImmOp::Ushr => "ushr",
2444 VecShiftImmOp::Sshr => "sshr",
2445 };
2446 let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2447 let rn = pretty_print_vreg_vector(rn, size);
2448 format!("{op} {rd}, {rn}, #{imm}")
2449 }
2450 &Inst::VecShiftImmMod {
2451 op,
2452 rd,
2453 ri,
2454 rn,
2455 size,
2456 imm,
2457 } => {
2458 let op = match op {
2459 VecShiftImmModOp::Sli => "sli",
2460 };
2461 let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2462 let ri = pretty_print_vreg_vector(ri, size);
2463 let rn = pretty_print_vreg_vector(rn, size);
2464 format!("{op} {rd}, {ri}, {rn}, #{imm}")
2465 }
2466 &Inst::VecExtract { rd, rn, rm, imm4 } => {
2467 let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2468 let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2469 let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2470 format!("ext {rd}, {rn}, {rm}, #{imm4}")
2471 }
2472 &Inst::VecTbl { rd, rn, rm } => {
2473 let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2474 let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2475 let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2476 format!("tbl {rd}, {{ {rn} }}, {rm}")
2477 }
2478 &Inst::VecTblExt { rd, ri, rn, rm } => {
2479 let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2480 let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2481 let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2482 let ri = pretty_print_vreg_vector(ri, VectorSize::Size8x16);
2483 format!("tbx {rd}, {ri}, {{ {rn} }}, {rm}")
2484 }
2485 &Inst::VecTbl2 { rd, rn, rn2, rm } => {
2486 let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2487 let rn2 = pretty_print_vreg_vector(rn2, VectorSize::Size8x16);
2488 let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2489 let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2490 format!("tbl {rd}, {{ {rn}, {rn2} }}, {rm}")
2491 }
2492 &Inst::VecTbl2Ext {
2493 rd,
2494 ri,
2495 rn,
2496 rn2,
2497 rm,
2498 } => {
2499 let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2500 let rn2 = pretty_print_vreg_vector(rn2, VectorSize::Size8x16);
2501 let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2502 let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2503 let ri = pretty_print_vreg_vector(ri, VectorSize::Size8x16);
2504 format!("tbx {rd}, {ri}, {{ {rn}, {rn2} }}, {rm}")
2505 }
2506 &Inst::VecLoadReplicate { rd, rn, size, .. } => {
2507 let rd = pretty_print_vreg_vector(rd.to_reg(), size);
2508 let rn = pretty_print_reg(rn);
2509
2510 format!("ld1r {{ {rd} }}, [{rn}]")
2511 }
2512 &Inst::VecCSel { rd, rn, rm, cond } => {
2513 let rd = pretty_print_vreg_vector(rd.to_reg(), VectorSize::Size8x16);
2514 let rn = pretty_print_vreg_vector(rn, VectorSize::Size8x16);
2515 let rm = pretty_print_vreg_vector(rm, VectorSize::Size8x16);
2516 let cond = cond.pretty_print(0);
2517 format!("vcsel {rd}, {rn}, {rm}, {cond} (if-then-else diamond)")
2518 }
2519 &Inst::MovToNZCV { rn } => {
2520 let rn = pretty_print_reg(rn);
2521 format!("msr nzcv, {rn}")
2522 }
2523 &Inst::MovFromNZCV { rd } => {
2524 let rd = pretty_print_reg(rd.to_reg());
2525 format!("mrs {rd}, nzcv")
2526 }
2527 &Inst::Extend {
2528 rd,
2529 rn,
2530 signed: false,
2531 from_bits: 1,
2532 ..
2533 } => {
2534 let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size32);
2535 let rn = pretty_print_ireg(rn, OperandSize::Size32);
2536 format!("and {rd}, {rn}, #1")
2537 }
2538 &Inst::Extend {
2539 rd,
2540 rn,
2541 signed: false,
2542 from_bits: 32,
2543 to_bits: 64,
2544 } => {
2545 let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size32);
2549 let rn = pretty_print_ireg(rn, OperandSize::Size32);
2550 format!("mov {rd}, {rn}")
2551 }
2552 &Inst::Extend {
2553 rd,
2554 rn,
2555 signed,
2556 from_bits,
2557 to_bits,
2558 } => {
2559 assert!(from_bits <= to_bits);
2560 let op = match (signed, from_bits) {
2561 (false, 8) => "uxtb",
2562 (true, 8) => "sxtb",
2563 (false, 16) => "uxth",
2564 (true, 16) => "sxth",
2565 (true, 32) => "sxtw",
2566 (true, _) => "sbfx",
2567 (false, _) => "ubfx",
2568 };
2569 if op == "sbfx" || op == "ubfx" {
2570 let dest_size = OperandSize::from_bits(to_bits);
2571 let rd = pretty_print_ireg(rd.to_reg(), dest_size);
2572 let rn = pretty_print_ireg(rn, dest_size);
2573 format!("{op} {rd}, {rn}, #0, #{from_bits}")
2574 } else {
2575 let dest_size = if signed {
2576 OperandSize::from_bits(to_bits)
2577 } else {
2578 OperandSize::Size32
2579 };
2580 let rd = pretty_print_ireg(rd.to_reg(), dest_size);
2581 let rn = pretty_print_ireg(rn, OperandSize::from_bits(from_bits));
2582 format!("{op} {rd}, {rn}")
2583 }
2584 }
2585 &Inst::Call { ref info } => {
2586 let try_call = info
2587 .try_call_info
2588 .as_ref()
2589 .map(|tci| pretty_print_try_call(tci))
2590 .unwrap_or_default();
2591 format!("bl 0{try_call}")
2592 }
2593 &Inst::CallInd { ref info } => {
2594 let rn = pretty_print_reg(info.dest);
2595 let try_call = info
2596 .try_call_info
2597 .as_ref()
2598 .map(|tci| pretty_print_try_call(tci))
2599 .unwrap_or_default();
2600 format!("blr {rn}{try_call}")
2601 }
2602 &Inst::PatchableCall { .. } => {
2603 format!("bl 0 ; patchable")
2604 }
2605 &Inst::ReturnCall { ref info } => {
2606 let mut s = format!(
2607 "return_call {:?} new_stack_arg_size:{}",
2608 info.dest, info.new_stack_arg_size
2609 );
2610 for ret in &info.uses {
2611 let preg = pretty_print_reg(ret.preg);
2612 let vreg = pretty_print_reg(ret.vreg);
2613 write!(&mut s, " {vreg}={preg}").unwrap();
2614 }
2615 s
2616 }
2617 &Inst::ReturnCallInd { ref info } => {
2618 let callee = pretty_print_reg(info.dest);
2619 let mut s = format!(
2620 "return_call_ind {callee} new_stack_arg_size:{}",
2621 info.new_stack_arg_size
2622 );
2623 for ret in &info.uses {
2624 let preg = pretty_print_reg(ret.preg);
2625 let vreg = pretty_print_reg(ret.vreg);
2626 write!(&mut s, " {vreg}={preg}").unwrap();
2627 }
2628 s
2629 }
2630 &Inst::Args { ref args } => {
2631 let mut s = "args".to_string();
2632 for arg in args {
2633 let preg = pretty_print_reg(arg.preg);
2634 let def = pretty_print_reg(arg.vreg.to_reg());
2635 write!(&mut s, " {def}={preg}").unwrap();
2636 }
2637 s
2638 }
2639 &Inst::Rets { ref rets } => {
2640 let mut s = "rets".to_string();
2641 for ret in rets {
2642 let preg = pretty_print_reg(ret.preg);
2643 let vreg = pretty_print_reg(ret.vreg);
2644 write!(&mut s, " {vreg}={preg}").unwrap();
2645 }
2646 s
2647 }
2648 &Inst::Ret {} => "ret".to_string(),
2649 &Inst::AuthenticatedRet { key, is_hint } => {
2650 let key = match key {
2651 APIKey::AZ => "az",
2652 APIKey::BZ => "bz",
2653 APIKey::ASP => "asp",
2654 APIKey::BSP => "bsp",
2655 };
2656 match is_hint {
2657 false => format!("reta{key}"),
2658 true => format!("auti{key} ; ret"),
2659 }
2660 }
2661 &Inst::Jump { ref dest } => {
2662 let dest = dest.pretty_print(0);
2663 format!("b {dest}")
2664 }
2665 &Inst::CondBr {
2666 ref taken,
2667 ref not_taken,
2668 ref kind,
2669 } => {
2670 let taken = taken.pretty_print(0);
2671 let not_taken = not_taken.pretty_print(0);
2672 match kind {
2673 &CondBrKind::Zero(reg, size) => {
2674 let reg = pretty_print_reg_sized(reg, size);
2675 format!("cbz {reg}, {taken} ; b {not_taken}")
2676 }
2677 &CondBrKind::NotZero(reg, size) => {
2678 let reg = pretty_print_reg_sized(reg, size);
2679 format!("cbnz {reg}, {taken} ; b {not_taken}")
2680 }
2681 &CondBrKind::Cond(c) => {
2682 let c = c.pretty_print(0);
2683 format!("b.{c} {taken} ; b {not_taken}")
2684 }
2685 }
2686 }
2687 &Inst::TestBitAndBranch {
2688 kind,
2689 ref taken,
2690 ref not_taken,
2691 rn,
2692 bit,
2693 } => {
2694 let cond = match kind {
2695 TestBitAndBranchKind::Z => "z",
2696 TestBitAndBranchKind::NZ => "nz",
2697 };
2698 let taken = taken.pretty_print(0);
2699 let not_taken = not_taken.pretty_print(0);
2700 let rn = pretty_print_reg(rn);
2701 format!("tb{cond} {rn}, #{bit}, {taken} ; b {not_taken}")
2702 }
2703 &Inst::IndirectBr { rn, .. } => {
2704 let rn = pretty_print_reg(rn);
2705 format!("br {rn}")
2706 }
2707 &Inst::Brk => "brk #0xf000".to_string(),
2708 &Inst::Udf { .. } => "udf #0xc11f".to_string(),
2709 &Inst::TrapIf {
2710 ref kind,
2711 trap_code,
2712 } => match kind {
2713 &CondBrKind::Zero(reg, size) => {
2714 let reg = pretty_print_reg_sized(reg, size);
2715 format!("cbz {reg}, #trap={trap_code}")
2716 }
2717 &CondBrKind::NotZero(reg, size) => {
2718 let reg = pretty_print_reg_sized(reg, size);
2719 format!("cbnz {reg}, #trap={trap_code}")
2720 }
2721 &CondBrKind::Cond(c) => {
2722 let c = c.pretty_print(0);
2723 format!("b.{c} #trap={trap_code}")
2724 }
2725 },
2726 &Inst::Adr { rd, off } => {
2727 let rd = pretty_print_reg(rd.to_reg());
2728 format!("adr {rd}, pc+{off}")
2729 }
2730 &Inst::Adrp { rd, off } => {
2731 let rd = pretty_print_reg(rd.to_reg());
2732 let byte_offset = off * 4096;
2734 format!("adrp {rd}, pc+{byte_offset}")
2735 }
2736 &Inst::Word4 { data } => format!("data.i32 {data}"),
2737 &Inst::Word8 { data } => format!("data.i64 {data}"),
2738 &Inst::JTSequence {
2739 default,
2740 ref targets,
2741 ridx,
2742 rtmp1,
2743 rtmp2,
2744 ..
2745 } => {
2746 let ridx = pretty_print_reg(ridx);
2747 let rtmp1 = pretty_print_reg(rtmp1.to_reg());
2748 let rtmp2 = pretty_print_reg(rtmp2.to_reg());
2749 let default_target = BranchTarget::Label(default).pretty_print(0);
2750 format!(
2751 concat!(
2752 "b.hs {} ; ",
2753 "csel {}, xzr, {}, hs ; ",
2754 "csdb ; ",
2755 "adr {}, pc+16 ; ",
2756 "ldrsw {}, [{}, {}, uxtw #2] ; ",
2757 "add {}, {}, {} ; ",
2758 "br {} ; ",
2759 "jt_entries {:?}"
2760 ),
2761 default_target,
2762 rtmp2,
2763 ridx,
2764 rtmp1,
2765 rtmp2,
2766 rtmp1,
2767 rtmp2,
2768 rtmp1,
2769 rtmp1,
2770 rtmp2,
2771 rtmp1,
2772 targets
2773 )
2774 }
2775 &Inst::LoadExtNameGot { rd, ref name } => {
2776 let rd = pretty_print_reg(rd.to_reg());
2777 format!("load_ext_name_got {rd}, {name:?}")
2778 }
2779 &Inst::LoadExtNameNear {
2780 rd,
2781 ref name,
2782 offset,
2783 } => {
2784 let rd = pretty_print_reg(rd.to_reg());
2785 format!("load_ext_name_near {rd}, {name:?}+{offset}")
2786 }
2787 &Inst::LoadExtNameFar {
2788 rd,
2789 ref name,
2790 offset,
2791 } => {
2792 let rd = pretty_print_reg(rd.to_reg());
2793 format!("load_ext_name_far {rd}, {name:?}+{offset}")
2794 }
2795 &Inst::LoadAddr { rd, ref mem } => {
2796 let mem = mem.clone();
2801 let (mem_insts, mem) = mem_finalize(None, &mem, I8, state);
2802 let mut ret = String::new();
2803 for inst in mem_insts.into_iter() {
2804 ret.push_str(&inst.print_with_state(&mut EmitState::default()));
2805 }
2806 let (reg, index_reg, offset) = match mem {
2807 AMode::RegExtended { rn, rm, extendop } => (rn, Some((rm, extendop)), 0),
2808 AMode::Unscaled { rn, simm9 } => (rn, None, simm9.value()),
2809 AMode::UnsignedOffset { rn, uimm12 } => (rn, None, uimm12.value() as i32),
2810 _ => panic!("Unsupported case for LoadAddr: {mem:?}"),
2811 };
2812 let abs_offset = if offset < 0 {
2813 -offset as u64
2814 } else {
2815 offset as u64
2816 };
2817 let alu_op = if offset < 0 { ALUOp::Sub } else { ALUOp::Add };
2818
2819 if let Some((idx, extendop)) = index_reg {
2820 let add = Inst::AluRRRExtend {
2821 alu_op: ALUOp::Add,
2822 size: OperandSize::Size64,
2823 rd,
2824 rn: reg,
2825 rm: idx,
2826 extendop,
2827 };
2828
2829 ret.push_str(&add.print_with_state(&mut EmitState::default()));
2830 } else if offset == 0 {
2831 let mov = Inst::gen_move(rd, reg, I64);
2832 ret.push_str(&mov.print_with_state(&mut EmitState::default()));
2833 } else if let Some(imm12) = Imm12::maybe_from_u64(abs_offset) {
2834 let add = Inst::AluRRImm12 {
2835 alu_op,
2836 size: OperandSize::Size64,
2837 rd,
2838 rn: reg,
2839 imm12,
2840 };
2841 ret.push_str(&add.print_with_state(&mut EmitState::default()));
2842 } else {
2843 let tmp = writable_spilltmp_reg();
2844 for inst in Inst::load_constant(tmp, abs_offset).into_iter() {
2845 ret.push_str(&inst.print_with_state(&mut EmitState::default()));
2846 }
2847 let add = Inst::AluRRR {
2848 alu_op,
2849 size: OperandSize::Size64,
2850 rd,
2851 rn: reg,
2852 rm: tmp.to_reg(),
2853 };
2854 ret.push_str(&add.print_with_state(&mut EmitState::default()));
2855 }
2856 ret
2857 }
2858 &Inst::Paci { key } => {
2859 let key = match key {
2860 APIKey::AZ => "az",
2861 APIKey::BZ => "bz",
2862 APIKey::ASP => "asp",
2863 APIKey::BSP => "bsp",
2864 };
2865
2866 "paci".to_string() + key
2867 }
2868 &Inst::Xpaclri => "xpaclri".to_string(),
2869 &Inst::Bti { targets } => {
2870 let targets = match targets {
2871 BranchTargetType::None => "",
2872 BranchTargetType::C => " c",
2873 BranchTargetType::J => " j",
2874 BranchTargetType::JC => " jc",
2875 };
2876
2877 "bti".to_string() + targets
2878 }
2879 &Inst::EmitIsland { needed_space } => format!("emit_island {needed_space}"),
2880
2881 &Inst::ElfTlsGetAddr {
2882 ref symbol,
2883 rd,
2884 tmp,
2885 } => {
2886 let rd = pretty_print_reg(rd.to_reg());
2887 let tmp = pretty_print_reg(tmp.to_reg());
2888 format!("elf_tls_get_addr {}, {}, {}", rd, tmp, symbol.display(None))
2889 }
2890 &Inst::MachOTlsGetAddr { ref symbol, rd } => {
2891 let rd = pretty_print_reg(rd.to_reg());
2892 format!("macho_tls_get_addr {}, {}", rd, symbol.display(None))
2893 }
2894 &Inst::Unwind { ref inst } => {
2895 format!("unwind {inst:?}")
2896 }
2897 &Inst::DummyUse { reg } => {
2898 let reg = pretty_print_reg(reg);
2899 format!("dummy_use {reg}")
2900 }
2901 &Inst::LabelAddress { dst, label } => {
2902 let dst = pretty_print_reg(dst.to_reg());
2903 format!("label_address {dst}, {label:?}")
2904 }
2905 &Inst::SequencePoint {} => {
2906 format!("sequence_point")
2907 }
2908 &Inst::StackProbeLoop { start, end, step } => {
2909 let start = pretty_print_reg(start.to_reg());
2910 let end = pretty_print_reg(end);
2911 let step = step.pretty_print(0);
2912 format!("stack_probe_loop {start}, {end}, {step}")
2913 }
2914 }
2915 }
2916}
2917
2918#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2923pub enum LabelUse {
2924 Branch14,
2927 Branch19,
2930 Branch26,
2933 Ldr19,
2936 Adr21,
2939 PCRel32,
2942}
2943
2944impl MachInstLabelUse for LabelUse {
2945 const ALIGN: CodeOffset = 4;
2947
2948 fn max_pos_range(self) -> CodeOffset {
2950 match self {
2951 LabelUse::Branch14 => (1 << 15) - 1,
2955 LabelUse::Branch19 => (1 << 20) - 1,
2956 LabelUse::Branch26 => (1 << 27) - 1,
2957 LabelUse::Ldr19 => (1 << 20) - 1,
2958 LabelUse::Adr21 => (1 << 20) - 1,
2961 LabelUse::PCRel32 => 0x7fffffff,
2962 }
2963 }
2964
2965 fn max_neg_range(self) -> CodeOffset {
2967 self.max_pos_range() + 1
2970 }
2971
2972 fn patch_size(self) -> CodeOffset {
2974 4
2976 }
2977
2978 fn patch(self, buffer: &mut [u8], use_offset: CodeOffset, label_offset: CodeOffset) {
2980 let pc_rel = (label_offset as i64) - (use_offset as i64);
2981 debug_assert!(pc_rel <= self.max_pos_range() as i64);
2982 debug_assert!(pc_rel >= -(self.max_neg_range() as i64));
2983 let pc_rel = pc_rel as u32;
2984 let insn_word = u32::from_le_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
2985 let mask = match self {
2986 LabelUse::Branch14 => 0x0007ffe0, LabelUse::Branch19 => 0x00ffffe0, LabelUse::Branch26 => 0x03ffffff, LabelUse::Ldr19 => 0x00ffffe0, LabelUse::Adr21 => 0x60ffffe0, LabelUse::PCRel32 => 0xffffffff,
2992 };
2993 let pc_rel_shifted = match self {
2994 LabelUse::Adr21 | LabelUse::PCRel32 => pc_rel,
2995 _ => {
2996 debug_assert!(pc_rel & 3 == 0);
2997 pc_rel >> 2
2998 }
2999 };
3000 let pc_rel_inserted = match self {
3001 LabelUse::Branch14 => (pc_rel_shifted & 0x3fff) << 5,
3002 LabelUse::Branch19 | LabelUse::Ldr19 => (pc_rel_shifted & 0x7ffff) << 5,
3003 LabelUse::Branch26 => pc_rel_shifted & 0x3ffffff,
3004 LabelUse::Adr21 => (pc_rel_shifted & 0x1ffffc) << 3 | (pc_rel_shifted & 3) << 29,
3007 LabelUse::PCRel32 => pc_rel_shifted,
3008 };
3009 let is_add = match self {
3010 LabelUse::PCRel32 => true,
3011 _ => false,
3012 };
3013 let insn_word = if is_add {
3014 insn_word.wrapping_add(pc_rel_inserted)
3015 } else {
3016 (insn_word & !mask) | pc_rel_inserted
3017 };
3018 buffer[0..4].clone_from_slice(&u32::to_le_bytes(insn_word));
3019 }
3020
3021 fn supports_veneer(self) -> bool {
3023 match self {
3024 LabelUse::Branch14 | LabelUse::Branch19 => true, LabelUse::Branch26 => true, _ => false,
3027 }
3028 }
3029
3030 fn veneer_size(self) -> CodeOffset {
3032 match self {
3033 LabelUse::Branch14 | LabelUse::Branch19 => 4,
3034 LabelUse::Branch26 => 20,
3035 _ => unreachable!(),
3036 }
3037 }
3038
3039 fn worst_case_veneer_size() -> CodeOffset {
3040 20
3041 }
3042
3043 fn generate_veneer(
3046 self,
3047 buffer: &mut [u8],
3048 veneer_offset: CodeOffset,
3049 ) -> (CodeOffset, LabelUse) {
3050 match self {
3051 LabelUse::Branch14 | LabelUse::Branch19 => {
3052 let insn_word = 0b000101 << 26;
3055 buffer[0..4].clone_from_slice(&u32::to_le_bytes(insn_word));
3056 (veneer_offset, LabelUse::Branch26)
3057 }
3058
3059 LabelUse::Branch26 => {
3070 let tmp1 = regs::spilltmp_reg();
3071 let tmp1_w = regs::writable_spilltmp_reg();
3072 let tmp2 = regs::tmp2_reg();
3073 let tmp2_w = regs::writable_tmp2_reg();
3074 let ldr = emit::enc_ldst_imm19(0b1001_1000, 16 / 4, tmp1);
3076 let adr = emit::enc_adr(12, tmp2_w);
3078 let add = emit::enc_arith_rrr(0b10001011_000, 0, tmp1_w, tmp1, tmp2);
3080 let br = emit::enc_br(tmp1);
3082 buffer[0..4].clone_from_slice(&u32::to_le_bytes(ldr));
3083 buffer[4..8].clone_from_slice(&u32::to_le_bytes(adr));
3084 buffer[8..12].clone_from_slice(&u32::to_le_bytes(add));
3085 buffer[12..16].clone_from_slice(&u32::to_le_bytes(br));
3086 (veneer_offset + 16, LabelUse::PCRel32)
3089 }
3090
3091 _ => panic!("Unsupported label-reference type for veneer generation!"),
3092 }
3093 }
3094
3095 fn from_reloc(reloc: Reloc, addend: Addend) -> Option<LabelUse> {
3096 match (reloc, addend) {
3097 (Reloc::Arm64Call, 0) => Some(LabelUse::Branch26),
3098 _ => None,
3099 }
3100 }
3101}
3102
3103#[cfg(test)]
3104mod tests {
3105 use super::*;
3106
3107 #[test]
3108 fn inst_size_test() {
3109 let expected = if cfg!(target_pointer_width = "32") && !cfg!(target_arch = "arm") {
3112 28
3113 } else {
3114 32
3115 };
3116 assert_eq!(expected, std::mem::size_of::<Inst>());
3117 }
3118}