1use crate::binemit::{Addend, CodeOffset, Reloc};
4use crate::ir::{types, ExternalName, Type};
5use crate::isa::s390x::abi::S390xMachineDeps;
6use crate::isa::{CallConv, FunctionAlignment};
7use crate::machinst::*;
8use crate::{settings, CodegenError, CodegenResult};
9use alloc::boxed::Box;
10use alloc::vec::Vec;
11use regalloc2::{PReg, PRegSet};
12use smallvec::SmallVec;
13use std::fmt::Write;
14use std::string::{String, ToString};
15pub mod regs;
16pub use self::regs::*;
17pub mod imms;
18pub use self::imms::*;
19pub mod args;
20pub use self::args::*;
21pub mod emit;
22pub use self::emit::*;
23pub mod unwind;
24
25#[cfg(test)]
26mod emit_tests;
27
28pub use crate::isa::s390x::lower::isle::generated_code::{
32 ALUOp, CmpOp, FPUOp1, FPUOp2, FPUOp3, FpuRoundMode, FpuRoundOp, LaneOrder, MInst as Inst,
33 RxSBGOp, ShiftOp, SymbolReloc, UnaryOp, VecBinaryOp, VecFloatCmpOp, VecIntCmpOp, VecShiftOp,
34 VecUnaryOp,
35};
36
37#[derive(Clone, Debug)]
40pub struct ReturnCallInfo<T> {
41 pub dest: T,
42 pub uses: CallArgList,
43 pub callee_pop_size: u32,
44}
45
46#[test]
47fn inst_size_test() {
48 assert_eq!(32, std::mem::size_of::<Inst>());
51}
52
53#[derive(Clone, Copy, Debug)]
55pub struct RegPair {
56 pub hi: Reg,
57 pub lo: Reg,
58}
59
60#[derive(Clone, Copy, Debug)]
62pub struct WritableRegPair {
63 pub hi: Writable<Reg>,
64 pub lo: Writable<Reg>,
65}
66
67impl WritableRegPair {
68 pub fn to_regpair(&self) -> RegPair {
69 RegPair {
70 hi: self.hi.to_reg(),
71 lo: self.lo.to_reg(),
72 }
73 }
74}
75
76#[allow(non_camel_case_types)]
78#[derive(Debug)]
79pub(crate) enum InstructionSet {
80 Base,
82 MIE2,
84 VXRS_EXT2,
86}
87
88impl Inst {
89 fn available_in_isa(&self) -> InstructionSet {
91 match self {
92 Inst::Nop0
94 | Inst::Nop2
95 | Inst::AluRRSImm16 { .. }
96 | Inst::AluRR { .. }
97 | Inst::AluRX { .. }
98 | Inst::AluRSImm16 { .. }
99 | Inst::AluRSImm32 { .. }
100 | Inst::AluRUImm32 { .. }
101 | Inst::AluRUImm16Shifted { .. }
102 | Inst::AluRUImm32Shifted { .. }
103 | Inst::ShiftRR { .. }
104 | Inst::RxSBG { .. }
105 | Inst::RxSBGTest { .. }
106 | Inst::SMulWide { .. }
107 | Inst::UMulWide { .. }
108 | Inst::SDivMod32 { .. }
109 | Inst::SDivMod64 { .. }
110 | Inst::UDivMod32 { .. }
111 | Inst::UDivMod64 { .. }
112 | Inst::Flogr { .. }
113 | Inst::CmpRR { .. }
114 | Inst::CmpRX { .. }
115 | Inst::CmpRSImm16 { .. }
116 | Inst::CmpRSImm32 { .. }
117 | Inst::CmpRUImm32 { .. }
118 | Inst::CmpTrapRR { .. }
119 | Inst::CmpTrapRSImm16 { .. }
120 | Inst::CmpTrapRUImm16 { .. }
121 | Inst::AtomicRmw { .. }
122 | Inst::AtomicCas32 { .. }
123 | Inst::AtomicCas64 { .. }
124 | Inst::Fence
125 | Inst::Load32 { .. }
126 | Inst::Load32ZExt8 { .. }
127 | Inst::Load32SExt8 { .. }
128 | Inst::Load32ZExt16 { .. }
129 | Inst::Load32SExt16 { .. }
130 | Inst::Load64 { .. }
131 | Inst::Load64ZExt8 { .. }
132 | Inst::Load64SExt8 { .. }
133 | Inst::Load64ZExt16 { .. }
134 | Inst::Load64SExt16 { .. }
135 | Inst::Load64ZExt32 { .. }
136 | Inst::Load64SExt32 { .. }
137 | Inst::LoadRev16 { .. }
138 | Inst::LoadRev32 { .. }
139 | Inst::LoadRev64 { .. }
140 | Inst::Store8 { .. }
141 | Inst::Store16 { .. }
142 | Inst::Store32 { .. }
143 | Inst::Store64 { .. }
144 | Inst::StoreImm8 { .. }
145 | Inst::StoreImm16 { .. }
146 | Inst::StoreImm32SExt16 { .. }
147 | Inst::StoreImm64SExt16 { .. }
148 | Inst::StoreRev16 { .. }
149 | Inst::StoreRev32 { .. }
150 | Inst::StoreRev64 { .. }
151 | Inst::LoadMultiple64 { .. }
152 | Inst::StoreMultiple64 { .. }
153 | Inst::Mov32 { .. }
154 | Inst::Mov64 { .. }
155 | Inst::MovPReg { .. }
156 | Inst::Mov32Imm { .. }
157 | Inst::Mov32SImm16 { .. }
158 | Inst::Mov64SImm16 { .. }
159 | Inst::Mov64SImm32 { .. }
160 | Inst::Mov64UImm16Shifted { .. }
161 | Inst::Mov64UImm32Shifted { .. }
162 | Inst::Insert64UImm16Shifted { .. }
163 | Inst::Insert64UImm32Shifted { .. }
164 | Inst::LoadAR { .. }
165 | Inst::InsertAR { .. }
166 | Inst::Extend { .. }
167 | Inst::CMov32 { .. }
168 | Inst::CMov64 { .. }
169 | Inst::CMov32SImm16 { .. }
170 | Inst::CMov64SImm16 { .. }
171 | Inst::FpuMove32 { .. }
172 | Inst::FpuMove64 { .. }
173 | Inst::FpuCMov32 { .. }
174 | Inst::FpuCMov64 { .. }
175 | Inst::FpuRR { .. }
176 | Inst::FpuRRR { .. }
177 | Inst::FpuRRRR { .. }
178 | Inst::FpuCmp32 { .. }
179 | Inst::FpuCmp64 { .. }
180 | Inst::LoadFpuConst32 { .. }
181 | Inst::LoadFpuConst64 { .. }
182 | Inst::VecRRR { .. }
183 | Inst::VecRR { .. }
184 | Inst::VecShiftRR { .. }
185 | Inst::VecSelect { .. }
186 | Inst::VecPermute { .. }
187 | Inst::VecPermuteDWImm { .. }
188 | Inst::VecIntCmp { .. }
189 | Inst::VecIntCmpS { .. }
190 | Inst::VecFloatCmp { .. }
191 | Inst::VecFloatCmpS { .. }
192 | Inst::VecInt128SCmpHi { .. }
193 | Inst::VecInt128UCmpHi { .. }
194 | Inst::VecLoad { .. }
195 | Inst::VecStore { .. }
196 | Inst::VecLoadReplicate { .. }
197 | Inst::VecMov { .. }
198 | Inst::VecCMov { .. }
199 | Inst::MovToVec128 { .. }
200 | Inst::VecLoadConst { .. }
201 | Inst::VecLoadConstReplicate { .. }
202 | Inst::VecImmByteMask { .. }
203 | Inst::VecImmBitMask { .. }
204 | Inst::VecImmReplicate { .. }
205 | Inst::VecLoadLane { .. }
206 | Inst::VecLoadLaneUndef { .. }
207 | Inst::VecStoreLane { .. }
208 | Inst::VecInsertLane { .. }
209 | Inst::VecInsertLaneUndef { .. }
210 | Inst::VecExtractLane { .. }
211 | Inst::VecInsertLaneImm { .. }
212 | Inst::VecReplicateLane { .. }
213 | Inst::AllocateArgs { .. }
214 | Inst::Call { .. }
215 | Inst::CallInd { .. }
216 | Inst::ReturnCall { .. }
217 | Inst::ReturnCallInd { .. }
218 | Inst::Args { .. }
219 | Inst::Rets { .. }
220 | Inst::Ret { .. }
221 | Inst::Jump { .. }
222 | Inst::CondBr { .. }
223 | Inst::TrapIf { .. }
224 | Inst::IndirectBr { .. }
225 | Inst::Debugtrap
226 | Inst::Trap { .. }
227 | Inst::JTSequence { .. }
228 | Inst::StackProbeLoop { .. }
229 | Inst::LoadSymbolReloc { .. }
230 | Inst::LoadAddr { .. }
231 | Inst::Loop { .. }
232 | Inst::CondBreak { .. }
233 | Inst::Unwind { .. }
234 | Inst::ElfTlsGetOffset { .. } => InstructionSet::Base,
235
236 Inst::AluRRR { alu_op, .. } => match alu_op {
238 ALUOp::NotAnd32 | ALUOp::NotAnd64 => InstructionSet::MIE2,
239 ALUOp::NotOrr32 | ALUOp::NotOrr64 => InstructionSet::MIE2,
240 ALUOp::NotXor32 | ALUOp::NotXor64 => InstructionSet::MIE2,
241 ALUOp::AndNot32 | ALUOp::AndNot64 => InstructionSet::MIE2,
242 ALUOp::OrrNot32 | ALUOp::OrrNot64 => InstructionSet::MIE2,
243 _ => InstructionSet::Base,
244 },
245 Inst::UnaryRR { op, .. } => match op {
246 UnaryOp::PopcntReg => InstructionSet::MIE2,
247 _ => InstructionSet::Base,
248 },
249 Inst::FpuRound { op, .. } => match op {
250 FpuRoundOp::ToSInt32 | FpuRoundOp::FromSInt32 => InstructionSet::VXRS_EXT2,
251 FpuRoundOp::ToUInt32 | FpuRoundOp::FromUInt32 => InstructionSet::VXRS_EXT2,
252 FpuRoundOp::ToSInt32x4 | FpuRoundOp::FromSInt32x4 => InstructionSet::VXRS_EXT2,
253 FpuRoundOp::ToUInt32x4 | FpuRoundOp::FromUInt32x4 => InstructionSet::VXRS_EXT2,
254 _ => InstructionSet::Base,
255 },
256
257 Inst::VecLoadRev { .. }
259 | Inst::VecLoadByte16Rev { .. }
260 | Inst::VecLoadByte32Rev { .. }
261 | Inst::VecLoadByte64Rev { .. }
262 | Inst::VecLoadElt16Rev { .. }
263 | Inst::VecLoadElt32Rev { .. }
264 | Inst::VecLoadElt64Rev { .. }
265 | Inst::VecStoreRev { .. }
266 | Inst::VecStoreByte16Rev { .. }
267 | Inst::VecStoreByte32Rev { .. }
268 | Inst::VecStoreByte64Rev { .. }
269 | Inst::VecStoreElt16Rev { .. }
270 | Inst::VecStoreElt32Rev { .. }
271 | Inst::VecStoreElt64Rev { .. }
272 | Inst::VecLoadReplicateRev { .. }
273 | Inst::VecLoadLaneRev { .. }
274 | Inst::VecLoadLaneRevUndef { .. }
275 | Inst::VecStoreLaneRev { .. } => InstructionSet::VXRS_EXT2,
276
277 Inst::DummyUse { .. } => InstructionSet::Base,
278 }
279 }
280
281 pub fn mov128(to_reg: Writable<Reg>, from_reg: Reg) -> Inst {
283 assert!(to_reg.to_reg().class() == RegClass::Float);
284 assert!(from_reg.class() == RegClass::Float);
285 Inst::VecMov {
286 rd: to_reg,
287 rn: from_reg,
288 }
289 }
290
291 pub fn mov64(to_reg: Writable<Reg>, from_reg: Reg) -> Inst {
293 assert!(to_reg.to_reg().class() == from_reg.class());
294 if from_reg.class() == RegClass::Int {
295 Inst::Mov64 {
296 rd: to_reg,
297 rm: from_reg,
298 }
299 } else {
300 Inst::FpuMove64 {
301 rd: to_reg,
302 rn: from_reg,
303 }
304 }
305 }
306
307 pub fn mov32(to_reg: Writable<Reg>, from_reg: Reg) -> Inst {
309 if from_reg.class() == RegClass::Int {
310 Inst::Mov32 {
311 rd: to_reg,
312 rm: from_reg,
313 }
314 } else {
315 Inst::FpuMove32 {
316 rd: to_reg,
317 rn: from_reg,
318 }
319 }
320 }
321
322 pub fn gen_load(into_reg: Writable<Reg>, mem: MemArg, ty: Type) -> Inst {
324 match ty {
325 types::I8 => Inst::Load64ZExt8 { rd: into_reg, mem },
326 types::I16 => Inst::Load64ZExt16 { rd: into_reg, mem },
327 types::I32 => Inst::Load64ZExt32 { rd: into_reg, mem },
328 types::I64 => Inst::Load64 { rd: into_reg, mem },
329 types::F32 => Inst::VecLoadLaneUndef {
330 size: 32,
331 rd: into_reg,
332 mem,
333 lane_imm: 0,
334 },
335 types::F64 => Inst::VecLoadLaneUndef {
336 size: 64,
337 rd: into_reg,
338 mem,
339 lane_imm: 0,
340 },
341 _ if ty.is_vector() && ty.bits() == 128 => Inst::VecLoad { rd: into_reg, mem },
342 types::I128 => Inst::VecLoad { rd: into_reg, mem },
343 _ => unimplemented!("gen_load({})", ty),
344 }
345 }
346
347 pub fn gen_store(mem: MemArg, from_reg: Reg, ty: Type) -> Inst {
349 match ty {
350 types::I8 => Inst::Store8 { rd: from_reg, mem },
351 types::I16 => Inst::Store16 { rd: from_reg, mem },
352 types::I32 => Inst::Store32 { rd: from_reg, mem },
353 types::I64 => Inst::Store64 { rd: from_reg, mem },
354 types::F32 => Inst::VecStoreLane {
355 size: 32,
356 rd: from_reg,
357 mem,
358 lane_imm: 0,
359 },
360 types::F64 => Inst::VecStoreLane {
361 size: 64,
362 rd: from_reg,
363 mem,
364 lane_imm: 0,
365 },
366 _ if ty.is_vector() && ty.bits() == 128 => Inst::VecStore { rd: from_reg, mem },
367 types::I128 => Inst::VecStore { rd: from_reg, mem },
368 _ => unimplemented!("gen_store({})", ty),
369 }
370 }
371}
372
373fn memarg_operands(memarg: &mut MemArg, collector: &mut impl OperandVisitor) {
377 match memarg {
378 MemArg::BXD12 { base, index, .. } | MemArg::BXD20 { base, index, .. } => {
379 collector.reg_use(base);
380 collector.reg_use(index);
381 }
382 MemArg::Label { .. } | MemArg::Symbol { .. } => {}
383 MemArg::RegOffset { reg, .. } => {
384 collector.reg_use(reg);
385 }
386 MemArg::InitialSPOffset { .. }
387 | MemArg::NominalSPOffset { .. }
388 | MemArg::SlotOffset { .. } => {}
389 }
390 collector.reg_fixed_nonallocatable(gpr_preg(1));
393}
394
395fn s390x_get_operands(inst: &mut Inst, collector: &mut DenyReuseVisitor<impl OperandVisitor>) {
396 match inst {
397 Inst::AluRRR { rd, rn, rm, .. } => {
398 collector.reg_def(rd);
399 collector.reg_use(rn);
400 collector.reg_use(rm);
401 }
402 Inst::AluRRSImm16 { rd, rn, .. } => {
403 collector.reg_def(rd);
404 collector.reg_use(rn);
405 }
406 Inst::AluRR { rd, ri, rm, .. } => {
407 collector.reg_reuse_def(rd, 1);
408 collector.reg_use(ri);
409 collector.reg_use(rm);
410 }
411 Inst::AluRX { rd, ri, mem, .. } => {
412 collector.reg_reuse_def(rd, 1);
413 collector.reg_use(ri);
414 memarg_operands(mem, collector);
415 }
416 Inst::AluRSImm16 { rd, ri, .. } => {
417 collector.reg_reuse_def(rd, 1);
418 collector.reg_use(ri);
419 }
420 Inst::AluRSImm32 { rd, ri, .. } => {
421 collector.reg_reuse_def(rd, 1);
422 collector.reg_use(ri);
423 }
424 Inst::AluRUImm32 { rd, ri, .. } => {
425 collector.reg_reuse_def(rd, 1);
426 collector.reg_use(ri);
427 }
428 Inst::AluRUImm16Shifted { rd, ri, .. } => {
429 collector.reg_reuse_def(rd, 1);
430 collector.reg_use(ri);
431 }
432 Inst::AluRUImm32Shifted { rd, ri, .. } => {
433 collector.reg_reuse_def(rd, 1);
434 collector.reg_use(ri);
435 }
436 Inst::SMulWide { rd, rn, rm } => {
437 collector.reg_use(rn);
438 collector.reg_use(rm);
439 collector.reg_fixed_def(&mut rd.hi, gpr(2));
442 collector.reg_fixed_def(&mut rd.lo, gpr(3));
443 }
444 Inst::UMulWide { rd, ri, rn } => {
445 collector.reg_use(rn);
446 collector.reg_fixed_def(&mut rd.hi, gpr(2));
447 collector.reg_fixed_def(&mut rd.lo, gpr(3));
448 collector.reg_fixed_use(ri, gpr(3));
449 }
450 Inst::SDivMod32 { rd, ri, rn } | Inst::SDivMod64 { rd, ri, rn } => {
451 collector.reg_use(rn);
452 collector.reg_fixed_def(&mut rd.hi, gpr(2));
453 collector.reg_fixed_def(&mut rd.lo, gpr(3));
454 collector.reg_fixed_use(ri, gpr(3));
455 }
456 Inst::UDivMod32 { rd, ri, rn } | Inst::UDivMod64 { rd, ri, rn } => {
457 collector.reg_use(rn);
458 collector.reg_fixed_def(&mut rd.hi, gpr(2));
459 collector.reg_fixed_def(&mut rd.lo, gpr(3));
460 collector.reg_fixed_use(&mut ri.hi, gpr(2));
461 collector.reg_fixed_use(&mut ri.lo, gpr(3));
462 }
463 Inst::Flogr { rd, rn } => {
464 collector.reg_use(rn);
465 collector.reg_fixed_def(&mut rd.hi, gpr(2));
466 collector.reg_fixed_def(&mut rd.lo, gpr(3));
467 }
468 Inst::ShiftRR {
469 rd, rn, shift_reg, ..
470 } => {
471 collector.reg_def(rd);
472 collector.reg_use(rn);
473 collector.reg_use(shift_reg);
474 }
475 Inst::RxSBG { rd, ri, rn, .. } => {
476 collector.reg_reuse_def(rd, 1);
477 collector.reg_use(ri);
478 collector.reg_use(rn);
479 }
480 Inst::RxSBGTest { rd, rn, .. } => {
481 collector.reg_use(rd);
482 collector.reg_use(rn);
483 }
484 Inst::UnaryRR { rd, rn, .. } => {
485 collector.reg_def(rd);
486 collector.reg_use(rn);
487 }
488 Inst::CmpRR { rn, rm, .. } => {
489 collector.reg_use(rn);
490 collector.reg_use(rm);
491 }
492 Inst::CmpRX { rn, mem, .. } => {
493 collector.reg_use(rn);
494 memarg_operands(mem, collector);
495 }
496 Inst::CmpRSImm16 { rn, .. } => {
497 collector.reg_use(rn);
498 }
499 Inst::CmpRSImm32 { rn, .. } => {
500 collector.reg_use(rn);
501 }
502 Inst::CmpRUImm32 { rn, .. } => {
503 collector.reg_use(rn);
504 }
505 Inst::CmpTrapRR { rn, rm, .. } => {
506 collector.reg_use(rn);
507 collector.reg_use(rm);
508 }
509 Inst::CmpTrapRSImm16 { rn, .. } => {
510 collector.reg_use(rn);
511 }
512 Inst::CmpTrapRUImm16 { rn, .. } => {
513 collector.reg_use(rn);
514 }
515 Inst::AtomicRmw { rd, rn, mem, .. } => {
516 collector.reg_def(rd);
517 collector.reg_use(rn);
518 memarg_operands(mem, collector);
519 }
520 Inst::AtomicCas32 {
521 rd, ri, rn, mem, ..
522 }
523 | Inst::AtomicCas64 {
524 rd, ri, rn, mem, ..
525 } => {
526 collector.reg_reuse_def(rd, 1);
527 collector.reg_use(ri);
528 collector.reg_use(rn);
529 memarg_operands(mem, collector);
530 }
531 Inst::Fence => {}
532 Inst::Load32 { rd, mem, .. }
533 | Inst::Load32ZExt8 { rd, mem, .. }
534 | Inst::Load32SExt8 { rd, mem, .. }
535 | Inst::Load32ZExt16 { rd, mem, .. }
536 | Inst::Load32SExt16 { rd, mem, .. }
537 | Inst::Load64 { rd, mem, .. }
538 | Inst::Load64ZExt8 { rd, mem, .. }
539 | Inst::Load64SExt8 { rd, mem, .. }
540 | Inst::Load64ZExt16 { rd, mem, .. }
541 | Inst::Load64SExt16 { rd, mem, .. }
542 | Inst::Load64ZExt32 { rd, mem, .. }
543 | Inst::Load64SExt32 { rd, mem, .. }
544 | Inst::LoadRev16 { rd, mem, .. }
545 | Inst::LoadRev32 { rd, mem, .. }
546 | Inst::LoadRev64 { rd, mem, .. } => {
547 collector.reg_def(rd);
548 memarg_operands(mem, collector);
549 }
550 Inst::Store8 { rd, mem, .. }
551 | Inst::Store16 { rd, mem, .. }
552 | Inst::Store32 { rd, mem, .. }
553 | Inst::Store64 { rd, mem, .. }
554 | Inst::StoreRev16 { rd, mem, .. }
555 | Inst::StoreRev32 { rd, mem, .. }
556 | Inst::StoreRev64 { rd, mem, .. } => {
557 collector.reg_use(rd);
558 memarg_operands(mem, collector);
559 }
560 Inst::StoreImm8 { mem, .. }
561 | Inst::StoreImm16 { mem, .. }
562 | Inst::StoreImm32SExt16 { mem, .. }
563 | Inst::StoreImm64SExt16 { mem, .. } => {
564 memarg_operands(mem, collector);
565 }
566 Inst::LoadMultiple64 { rt, rt2, mem, .. } => {
567 memarg_operands(mem, collector);
568 let first_regnum = rt.to_reg().to_real_reg().unwrap().hw_enc();
569 let last_regnum = rt2.to_reg().to_real_reg().unwrap().hw_enc();
570 for regnum in first_regnum..last_regnum + 1 {
571 collector.reg_fixed_nonallocatable(gpr_preg(regnum));
572 }
573 }
574 Inst::StoreMultiple64 { rt, rt2, mem, .. } => {
575 memarg_operands(mem, collector);
576 let first_regnum = rt.to_real_reg().unwrap().hw_enc();
577 let last_regnum = rt2.to_real_reg().unwrap().hw_enc();
578 for regnum in first_regnum..last_regnum + 1 {
579 collector.reg_fixed_nonallocatable(gpr_preg(regnum));
580 }
581 }
582 Inst::Mov64 { rd, rm } => {
583 collector.reg_def(rd);
584 collector.reg_use(rm);
585 }
586 Inst::MovPReg { rd, rm } => {
587 collector.reg_def(rd);
588 collector.reg_fixed_nonallocatable(*rm);
589 }
590 Inst::Mov32 { rd, rm } => {
591 collector.reg_def(rd);
592 collector.reg_use(rm);
593 }
594 Inst::Mov32Imm { rd, .. }
595 | Inst::Mov32SImm16 { rd, .. }
596 | Inst::Mov64SImm16 { rd, .. }
597 | Inst::Mov64SImm32 { rd, .. }
598 | Inst::Mov64UImm16Shifted { rd, .. }
599 | Inst::Mov64UImm32Shifted { rd, .. } => {
600 collector.reg_def(rd);
601 }
602 Inst::CMov32 { rd, ri, rm, .. } | Inst::CMov64 { rd, ri, rm, .. } => {
603 collector.reg_reuse_def(rd, 1);
604 collector.reg_use(ri);
605 collector.reg_use(rm);
606 }
607 Inst::CMov32SImm16 { rd, ri, .. } | Inst::CMov64SImm16 { rd, ri, .. } => {
608 collector.reg_reuse_def(rd, 1);
609 collector.reg_use(ri);
610 }
611 Inst::Insert64UImm16Shifted { rd, ri, .. } | Inst::Insert64UImm32Shifted { rd, ri, .. } => {
612 collector.reg_reuse_def(rd, 1);
613 collector.reg_use(ri);
614 }
615 Inst::LoadAR { rd, .. } => {
616 collector.reg_def(rd);
617 }
618 Inst::InsertAR { rd, ri, .. } => {
619 collector.reg_reuse_def(rd, 1);
620 collector.reg_use(ri);
621 }
622 Inst::FpuMove32 { rd, rn } | Inst::FpuMove64 { rd, rn } => {
623 collector.reg_def(rd);
624 collector.reg_use(rn);
625 }
626 Inst::FpuCMov32 { rd, ri, rm, .. } | Inst::FpuCMov64 { rd, ri, rm, .. } => {
627 collector.reg_reuse_def(rd, 1);
628 collector.reg_use(ri);
629 collector.reg_use(rm);
630 }
631 Inst::FpuRR { rd, rn, .. } => {
632 collector.reg_def(rd);
633 collector.reg_use(rn);
634 }
635 Inst::FpuRRR { rd, rn, rm, .. } => {
636 collector.reg_def(rd);
637 collector.reg_use(rn);
638 collector.reg_use(rm);
639 }
640 Inst::FpuRRRR { rd, rn, rm, ra, .. } => {
641 collector.reg_def(rd);
642 collector.reg_use(rn);
643 collector.reg_use(rm);
644 collector.reg_use(ra);
645 }
646 Inst::FpuCmp32 { rn, rm } | Inst::FpuCmp64 { rn, rm } => {
647 collector.reg_use(rn);
648 collector.reg_use(rm);
649 }
650 Inst::LoadFpuConst32 { rd, .. } | Inst::LoadFpuConst64 { rd, .. } => {
651 collector.reg_def(rd);
652 collector.reg_fixed_nonallocatable(gpr_preg(1));
653 }
654 Inst::FpuRound { rd, rn, .. } => {
655 collector.reg_def(rd);
656 collector.reg_use(rn);
657 }
658 Inst::VecRRR { rd, rn, rm, .. } => {
659 collector.reg_def(rd);
660 collector.reg_use(rn);
661 collector.reg_use(rm);
662 }
663 Inst::VecRR { rd, rn, .. } => {
664 collector.reg_def(rd);
665 collector.reg_use(rn);
666 }
667 Inst::VecShiftRR {
668 rd, rn, shift_reg, ..
669 } => {
670 collector.reg_def(rd);
671 collector.reg_use(rn);
672 collector.reg_use(shift_reg);
673 }
674 Inst::VecSelect { rd, rn, rm, ra, .. } => {
675 collector.reg_def(rd);
676 collector.reg_use(rn);
677 collector.reg_use(rm);
678 collector.reg_use(ra);
679 }
680 Inst::VecPermute { rd, rn, rm, ra, .. } => {
681 collector.reg_def(rd);
682 collector.reg_use(rn);
683 collector.reg_use(rm);
684 collector.reg_use(ra);
685 }
686 Inst::VecPermuteDWImm { rd, rn, rm, .. } => {
687 collector.reg_def(rd);
688 collector.reg_use(rn);
689 collector.reg_use(rm);
690 }
691 Inst::VecIntCmp { rd, rn, rm, .. } | Inst::VecIntCmpS { rd, rn, rm, .. } => {
692 collector.reg_def(rd);
693 collector.reg_use(rn);
694 collector.reg_use(rm);
695 }
696 Inst::VecFloatCmp { rd, rn, rm, .. } | Inst::VecFloatCmpS { rd, rn, rm, .. } => {
697 collector.reg_def(rd);
698 collector.reg_use(rn);
699 collector.reg_use(rm);
700 }
701 Inst::VecInt128SCmpHi { tmp, rn, rm, .. } | Inst::VecInt128UCmpHi { tmp, rn, rm, .. } => {
702 collector.reg_def(tmp);
703 collector.reg_use(rn);
704 collector.reg_use(rm);
705 }
706 Inst::VecLoad { rd, mem, .. } => {
707 collector.reg_def(rd);
708 memarg_operands(mem, collector);
709 }
710 Inst::VecLoadRev { rd, mem, .. } => {
711 collector.reg_def(rd);
712 memarg_operands(mem, collector);
713 }
714 Inst::VecLoadByte16Rev { rd, mem, .. } => {
715 collector.reg_def(rd);
716 memarg_operands(mem, collector);
717 }
718 Inst::VecLoadByte32Rev { rd, mem, .. } => {
719 collector.reg_def(rd);
720 memarg_operands(mem, collector);
721 }
722 Inst::VecLoadByte64Rev { rd, mem, .. } => {
723 collector.reg_def(rd);
724 memarg_operands(mem, collector);
725 }
726 Inst::VecLoadElt16Rev { rd, mem, .. } => {
727 collector.reg_def(rd);
728 memarg_operands(mem, collector);
729 }
730 Inst::VecLoadElt32Rev { rd, mem, .. } => {
731 collector.reg_def(rd);
732 memarg_operands(mem, collector);
733 }
734 Inst::VecLoadElt64Rev { rd, mem, .. } => {
735 collector.reg_def(rd);
736 memarg_operands(mem, collector);
737 }
738 Inst::VecStore { rd, mem, .. } => {
739 collector.reg_use(rd);
740 memarg_operands(mem, collector);
741 }
742 Inst::VecStoreRev { rd, mem, .. } => {
743 collector.reg_use(rd);
744 memarg_operands(mem, collector);
745 }
746 Inst::VecStoreByte16Rev { rd, mem, .. } => {
747 collector.reg_use(rd);
748 memarg_operands(mem, collector);
749 }
750 Inst::VecStoreByte32Rev { rd, mem, .. } => {
751 collector.reg_use(rd);
752 memarg_operands(mem, collector);
753 }
754 Inst::VecStoreByte64Rev { rd, mem, .. } => {
755 collector.reg_use(rd);
756 memarg_operands(mem, collector);
757 }
758 Inst::VecStoreElt16Rev { rd, mem, .. } => {
759 collector.reg_use(rd);
760 memarg_operands(mem, collector);
761 }
762 Inst::VecStoreElt32Rev { rd, mem, .. } => {
763 collector.reg_use(rd);
764 memarg_operands(mem, collector);
765 }
766 Inst::VecStoreElt64Rev { rd, mem, .. } => {
767 collector.reg_use(rd);
768 memarg_operands(mem, collector);
769 }
770 Inst::VecLoadReplicate { rd, mem, .. } => {
771 collector.reg_def(rd);
772 memarg_operands(mem, collector);
773 }
774 Inst::VecLoadReplicateRev { rd, mem, .. } => {
775 collector.reg_def(rd);
776 memarg_operands(mem, collector);
777 }
778 Inst::VecMov { rd, rn } => {
779 collector.reg_def(rd);
780 collector.reg_use(rn);
781 }
782 Inst::VecCMov { rd, ri, rm, .. } => {
783 collector.reg_reuse_def(rd, 1);
784 collector.reg_use(ri);
785 collector.reg_use(rm);
786 }
787 Inst::MovToVec128 { rd, rn, rm } => {
788 collector.reg_def(rd);
789 collector.reg_use(rn);
790 collector.reg_use(rm);
791 }
792 Inst::VecLoadConst { rd, .. } | Inst::VecLoadConstReplicate { rd, .. } => {
793 collector.reg_def(rd);
794 collector.reg_fixed_nonallocatable(gpr_preg(1));
795 }
796 Inst::VecImmByteMask { rd, .. } => {
797 collector.reg_def(rd);
798 }
799 Inst::VecImmBitMask { rd, .. } => {
800 collector.reg_def(rd);
801 }
802 Inst::VecImmReplicate { rd, .. } => {
803 collector.reg_def(rd);
804 }
805 Inst::VecLoadLane { rd, ri, mem, .. } => {
806 collector.reg_reuse_def(rd, 1);
807 collector.reg_use(ri);
808 memarg_operands(mem, collector);
809 }
810 Inst::VecLoadLaneUndef { rd, mem, .. } => {
811 collector.reg_def(rd);
812 memarg_operands(mem, collector);
813 }
814 Inst::VecStoreLaneRev { rd, mem, .. } => {
815 collector.reg_use(rd);
816 memarg_operands(mem, collector);
817 }
818 Inst::VecLoadLaneRevUndef { rd, mem, .. } => {
819 collector.reg_def(rd);
820 memarg_operands(mem, collector);
821 }
822 Inst::VecStoreLane { rd, mem, .. } => {
823 collector.reg_use(rd);
824 memarg_operands(mem, collector);
825 }
826 Inst::VecLoadLaneRev { rd, ri, mem, .. } => {
827 collector.reg_reuse_def(rd, 1);
828 collector.reg_use(ri);
829 memarg_operands(mem, collector);
830 }
831 Inst::VecInsertLane {
832 rd,
833 ri,
834 rn,
835 lane_reg,
836 ..
837 } => {
838 collector.reg_reuse_def(rd, 1);
839 collector.reg_use(ri);
840 collector.reg_use(rn);
841 collector.reg_use(lane_reg);
842 }
843 Inst::VecInsertLaneUndef {
844 rd, rn, lane_reg, ..
845 } => {
846 collector.reg_def(rd);
847 collector.reg_use(rn);
848 collector.reg_use(lane_reg);
849 }
850 Inst::VecExtractLane {
851 rd, rn, lane_reg, ..
852 } => {
853 collector.reg_def(rd);
854 collector.reg_use(rn);
855 collector.reg_use(lane_reg);
856 }
857 Inst::VecInsertLaneImm { rd, ri, .. } => {
858 collector.reg_reuse_def(rd, 1);
859 collector.reg_use(ri);
860 }
861 Inst::VecReplicateLane { rd, rn, .. } => {
862 collector.reg_def(rd);
863 collector.reg_use(rn);
864 }
865 Inst::Extend { rd, rn, .. } => {
866 collector.reg_def(rd);
867 collector.reg_use(rn);
868 }
869 Inst::AllocateArgs { .. } => {}
870 Inst::Call { link, info, .. } => {
871 let CallInfo {
872 uses,
873 defs,
874 clobbers,
875 ..
876 } = &mut **info;
877 for CallArgPair { vreg, preg } in uses {
878 collector.reg_fixed_use(vreg, *preg);
879 }
880 let mut clobbers = *clobbers;
881 clobbers.add(link.to_reg().to_real_reg().unwrap().into());
882 for CallRetPair { vreg, preg } in defs {
883 clobbers.remove(PReg::from(preg.to_real_reg().unwrap()));
884 collector.reg_fixed_def(vreg, *preg);
885 }
886 collector.reg_clobbers(clobbers);
887 }
888 Inst::CallInd { link, info } => {
889 let CallInfo {
890 dest,
891 uses,
892 defs,
893 clobbers,
894 ..
895 } = &mut **info;
896 collector.reg_use(dest);
897 for CallArgPair { vreg, preg } in uses {
898 collector.reg_fixed_use(vreg, *preg);
899 }
900 let mut clobbers = *clobbers;
901 clobbers.add(link.to_reg().to_real_reg().unwrap().into());
902 for CallRetPair { vreg, preg } in defs {
903 clobbers.remove(PReg::from(preg.to_real_reg().unwrap()));
904 collector.reg_fixed_def(vreg, *preg);
905 }
906 collector.reg_clobbers(clobbers);
907 }
908 Inst::ReturnCall { info } => {
909 let ReturnCallInfo { uses, .. } = &mut **info;
910 for CallArgPair { vreg, preg } in uses {
911 collector.reg_fixed_use(vreg, *preg);
912 }
913 }
914 Inst::ReturnCallInd { info } => {
915 let ReturnCallInfo { dest, uses, .. } = &mut **info;
916 collector.reg_use(dest);
917 for CallArgPair { vreg, preg } in uses {
918 collector.reg_fixed_use(vreg, *preg);
919 }
920 }
921 Inst::ElfTlsGetOffset {
922 tls_offset,
923 got,
924 got_offset,
925 ..
926 } => {
927 collector.reg_fixed_use(got, gpr(12));
928 collector.reg_fixed_use(got_offset, gpr(2));
929 collector.reg_fixed_def(tls_offset, gpr(2));
930
931 let mut clobbers = S390xMachineDeps::get_regs_clobbered_by_call(CallConv::SystemV);
932 clobbers.add(gpr_preg(14));
933 clobbers.remove(gpr_preg(2));
934 collector.reg_clobbers(clobbers);
935 }
936 Inst::Args { args } => {
937 for ArgPair { vreg, preg } in args {
938 collector.reg_fixed_def(vreg, *preg);
939 }
940 }
941 Inst::Rets { rets } => {
942 for RetPair { vreg, preg } in rets {
943 collector.reg_fixed_use(vreg, *preg);
944 }
945 }
946 Inst::Ret { .. } => {
947 }
950 Inst::Jump { .. } => {}
951 Inst::IndirectBr { rn, .. } => {
952 collector.reg_use(rn);
953 }
954 Inst::CondBr { .. } => {}
955 Inst::Nop0 | Inst::Nop2 => {}
956 Inst::Debugtrap => {}
957 Inst::Trap { .. } => {}
958 Inst::TrapIf { .. } => {}
959 Inst::JTSequence { ridx, .. } => {
960 collector.reg_use(ridx);
961 collector.reg_fixed_nonallocatable(gpr_preg(1));
962 }
963 Inst::LoadSymbolReloc { rd, .. } => {
964 collector.reg_def(rd);
965 collector.reg_fixed_nonallocatable(gpr_preg(1));
966 }
967 Inst::LoadAddr { rd, mem } => {
968 collector.reg_def(rd);
969 memarg_operands(mem, collector);
970 }
971 Inst::StackProbeLoop { probe_count, .. } => {
972 collector.reg_early_def(probe_count);
973 }
974 Inst::Loop { body, .. } => {
975 let mut collector = DenyReuseVisitor {
980 inner: collector.inner,
981 deny_reuse: true,
982 };
983 for inst in body {
984 s390x_get_operands(inst, &mut collector);
985 }
986 }
987 Inst::CondBreak { .. } => {}
988 Inst::Unwind { .. } => {}
989 Inst::DummyUse { reg } => {
990 collector.reg_use(reg);
991 }
992 }
993}
994
995struct DenyReuseVisitor<'a, T> {
996 inner: &'a mut T,
997 deny_reuse: bool,
998}
999
1000impl<T: OperandVisitor> OperandVisitor for DenyReuseVisitor<'_, T> {
1001 fn add_operand(
1002 &mut self,
1003 reg: &mut Reg,
1004 constraint: regalloc2::OperandConstraint,
1005 kind: regalloc2::OperandKind,
1006 pos: regalloc2::OperandPos,
1007 ) {
1008 debug_assert!(
1009 !self.deny_reuse || !matches!(constraint, regalloc2::OperandConstraint::Reuse(_))
1010 );
1011 self.inner.add_operand(reg, constraint, kind, pos);
1012 }
1013
1014 fn debug_assert_is_allocatable_preg(&self, reg: regalloc2::PReg, expected: bool) {
1015 self.inner.debug_assert_is_allocatable_preg(reg, expected);
1016 }
1017
1018 fn reg_clobbers(&mut self, regs: PRegSet) {
1019 self.inner.reg_clobbers(regs);
1020 }
1021}
1022
1023impl MachInst for Inst {
1027 type ABIMachineSpec = S390xMachineDeps;
1028 type LabelUse = LabelUse;
1029 const TRAP_OPCODE: &'static [u8] = &[0, 0];
1030
1031 fn get_operands(&mut self, collector: &mut impl OperandVisitor) {
1032 s390x_get_operands(
1033 self,
1034 &mut DenyReuseVisitor {
1035 inner: collector,
1036 deny_reuse: false,
1037 },
1038 );
1039 }
1040
1041 fn is_move(&self) -> Option<(Writable<Reg>, Reg)> {
1042 match self {
1043 &Inst::Mov32 { rd, rm } => Some((rd, rm)),
1044 &Inst::Mov64 { rd, rm } => Some((rd, rm)),
1045 &Inst::FpuMove32 { rd, rn } => Some((rd, rn)),
1046 &Inst::FpuMove64 { rd, rn } => Some((rd, rn)),
1047 &Inst::VecMov { rd, rn } => Some((rd, rn)),
1048 _ => None,
1049 }
1050 }
1051
1052 fn is_included_in_clobbers(&self) -> bool {
1053 match self {
1061 &Inst::Args { .. } => false,
1062 &Inst::Call { ref info, .. } => info.caller_conv != info.callee_conv,
1063 &Inst::CallInd { ref info, .. } => info.caller_conv != info.callee_conv,
1064 &Inst::ElfTlsGetOffset { .. } => false,
1065 _ => true,
1066 }
1067 }
1068
1069 fn is_trap(&self) -> bool {
1070 match self {
1071 Self::Trap { .. } => true,
1072 _ => false,
1073 }
1074 }
1075
1076 fn is_args(&self) -> bool {
1077 match self {
1078 Self::Args { .. } => true,
1079 _ => false,
1080 }
1081 }
1082
1083 fn is_term(&self) -> MachTerminator {
1084 match self {
1085 &Inst::Rets { .. } => MachTerminator::Ret,
1086 &Inst::ReturnCall { .. } | &Inst::ReturnCallInd { .. } => MachTerminator::RetCall,
1087 &Inst::Jump { .. } => MachTerminator::Uncond,
1088 &Inst::CondBr { .. } => MachTerminator::Cond,
1089 &Inst::IndirectBr { .. } => MachTerminator::Indirect,
1090 &Inst::JTSequence { .. } => MachTerminator::Indirect,
1091 _ => MachTerminator::None,
1092 }
1093 }
1094
1095 fn is_mem_access(&self) -> bool {
1096 panic!("TODO FILL ME OUT")
1097 }
1098
1099 fn is_safepoint(&self) -> bool {
1100 match self {
1101 Inst::Call { .. } | Inst::CallInd { .. } => true,
1102 _ => false,
1103 }
1104 }
1105
1106 fn gen_move(to_reg: Writable<Reg>, from_reg: Reg, ty: Type) -> Inst {
1107 assert!(ty.bits() <= 128);
1108 if ty.bits() <= 32 {
1109 Inst::mov32(to_reg, from_reg)
1110 } else if ty.bits() <= 64 {
1111 Inst::mov64(to_reg, from_reg)
1112 } else {
1113 Inst::mov128(to_reg, from_reg)
1114 }
1115 }
1116
1117 fn gen_nop(preferred_size: usize) -> Inst {
1118 if preferred_size == 0 {
1119 Inst::Nop0
1120 } else {
1121 assert!(preferred_size >= 2);
1123 Inst::Nop2
1124 }
1125 }
1126
1127 fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])> {
1128 match ty {
1129 types::I8 => Ok((&[RegClass::Int], &[types::I8])),
1130 types::I16 => Ok((&[RegClass::Int], &[types::I16])),
1131 types::I32 => Ok((&[RegClass::Int], &[types::I32])),
1132 types::I64 => Ok((&[RegClass::Int], &[types::I64])),
1133 types::F32 => Ok((&[RegClass::Float], &[types::F32])),
1134 types::F64 => Ok((&[RegClass::Float], &[types::F64])),
1135 types::I128 => Ok((&[RegClass::Float], &[types::I128])),
1136 _ if ty.is_vector() && ty.bits() == 128 => Ok((&[RegClass::Float], &[types::I8X16])),
1137 _ => Err(CodegenError::Unsupported(format!(
1138 "Unexpected SSA-value type: {ty}"
1139 ))),
1140 }
1141 }
1142
1143 fn canonical_type_for_rc(rc: RegClass) -> Type {
1144 match rc {
1145 RegClass::Int => types::I64,
1146 RegClass::Float => types::I8X16,
1147 RegClass::Vector => unreachable!(),
1148 }
1149 }
1150
1151 fn gen_jump(target: MachLabel) -> Inst {
1152 Inst::Jump { dest: target }
1153 }
1154
1155 fn worst_case_size() -> CodeOffset {
1156 44
1164 }
1165
1166 fn ref_type_regclass(_: &settings::Flags) -> RegClass {
1167 RegClass::Int
1168 }
1169
1170 fn gen_dummy_use(reg: Reg) -> Inst {
1171 Inst::DummyUse { reg }
1172 }
1173
1174 fn function_alignment() -> FunctionAlignment {
1175 FunctionAlignment {
1176 minimum: 4,
1177 preferred: 4,
1178 }
1179 }
1180}
1181
1182fn mem_finalize_for_show(mem: &MemArg, state: &EmitState, mi: MemInstType) -> (String, MemArg) {
1186 let (mem_insts, mem) = mem_finalize(mem, state, mi);
1187 let mut mem_str = mem_insts
1188 .into_iter()
1189 .map(|inst| inst.print_with_state(&mut EmitState::default()))
1190 .collect::<Vec<_>>()
1191 .join(" ; ");
1192 if !mem_str.is_empty() {
1193 mem_str += " ; ";
1194 }
1195
1196 (mem_str, mem)
1197}
1198
1199impl Inst {
1200 fn print_with_state(&self, state: &mut EmitState) -> String {
1201 match self {
1202 &Inst::Nop0 => "nop-zero-len".to_string(),
1203 &Inst::Nop2 => "nop".to_string(),
1204 &Inst::AluRRR { alu_op, rd, rn, rm } => {
1205 let (op, have_rr) = match alu_op {
1206 ALUOp::Add32 => ("ark", true),
1207 ALUOp::Add64 => ("agrk", true),
1208 ALUOp::AddLogical32 => ("alrk", true),
1209 ALUOp::AddLogical64 => ("algrk", true),
1210 ALUOp::Sub32 => ("srk", true),
1211 ALUOp::Sub64 => ("sgrk", true),
1212 ALUOp::SubLogical32 => ("slrk", true),
1213 ALUOp::SubLogical64 => ("slgrk", true),
1214 ALUOp::Mul32 => ("msrkc", true),
1215 ALUOp::Mul64 => ("msgrkc", true),
1216 ALUOp::And32 => ("nrk", true),
1217 ALUOp::And64 => ("ngrk", true),
1218 ALUOp::Orr32 => ("ork", true),
1219 ALUOp::Orr64 => ("ogrk", true),
1220 ALUOp::Xor32 => ("xrk", true),
1221 ALUOp::Xor64 => ("xgrk", true),
1222 ALUOp::NotAnd32 => ("nnrk", false),
1223 ALUOp::NotAnd64 => ("nngrk", false),
1224 ALUOp::NotOrr32 => ("nork", false),
1225 ALUOp::NotOrr64 => ("nogrk", false),
1226 ALUOp::NotXor32 => ("nxrk", false),
1227 ALUOp::NotXor64 => ("nxgrk", false),
1228 ALUOp::AndNot32 => ("ncrk", false),
1229 ALUOp::AndNot64 => ("ncgrk", false),
1230 ALUOp::OrrNot32 => ("ocrk", false),
1231 ALUOp::OrrNot64 => ("ocgrk", false),
1232 _ => unreachable!(),
1233 };
1234 if have_rr && rd.to_reg() == rn {
1235 let inst = Inst::AluRR {
1236 alu_op,
1237 rd,
1238 ri: rd.to_reg(),
1239 rm,
1240 };
1241 return inst.print_with_state(state);
1242 }
1243 let rd = pretty_print_reg(rd.to_reg());
1244 let rn = pretty_print_reg(rn);
1245 let rm = pretty_print_reg(rm);
1246 format!("{op} {rd}, {rn}, {rm}")
1247 }
1248 &Inst::AluRRSImm16 {
1249 alu_op,
1250 rd,
1251 rn,
1252 imm,
1253 } => {
1254 if rd.to_reg() == rn {
1255 let inst = Inst::AluRSImm16 {
1256 alu_op,
1257 rd,
1258 ri: rd.to_reg(),
1259 imm,
1260 };
1261 return inst.print_with_state(state);
1262 }
1263 let op = match alu_op {
1264 ALUOp::Add32 => "ahik",
1265 ALUOp::Add64 => "aghik",
1266 _ => unreachable!(),
1267 };
1268 let rd = pretty_print_reg(rd.to_reg());
1269 let rn = pretty_print_reg(rn);
1270 format!("{op} {rd}, {rn}, {imm}")
1271 }
1272 &Inst::AluRR { alu_op, rd, ri, rm } => {
1273 let op = match alu_op {
1274 ALUOp::Add32 => "ar",
1275 ALUOp::Add64 => "agr",
1276 ALUOp::Add64Ext32 => "agfr",
1277 ALUOp::AddLogical32 => "alr",
1278 ALUOp::AddLogical64 => "algr",
1279 ALUOp::AddLogical64Ext32 => "algfr",
1280 ALUOp::Sub32 => "sr",
1281 ALUOp::Sub64 => "sgr",
1282 ALUOp::Sub64Ext32 => "sgfr",
1283 ALUOp::SubLogical32 => "slr",
1284 ALUOp::SubLogical64 => "slgr",
1285 ALUOp::SubLogical64Ext32 => "slgfr",
1286 ALUOp::Mul32 => "msr",
1287 ALUOp::Mul64 => "msgr",
1288 ALUOp::Mul64Ext32 => "msgfr",
1289 ALUOp::And32 => "nr",
1290 ALUOp::And64 => "ngr",
1291 ALUOp::Orr32 => "or",
1292 ALUOp::Orr64 => "ogr",
1293 ALUOp::Xor32 => "xr",
1294 ALUOp::Xor64 => "xgr",
1295 _ => unreachable!(),
1296 };
1297 let rd = pretty_print_reg_mod(rd, ri);
1298 let rm = pretty_print_reg(rm);
1299 format!("{op} {rd}, {rm}")
1300 }
1301 &Inst::AluRX {
1302 alu_op,
1303 rd,
1304 ri,
1305 ref mem,
1306 } => {
1307 let (opcode_rx, opcode_rxy) = match alu_op {
1308 ALUOp::Add32 => (Some("a"), Some("ay")),
1309 ALUOp::Add32Ext16 => (Some("ah"), Some("ahy")),
1310 ALUOp::Add64 => (None, Some("ag")),
1311 ALUOp::Add64Ext16 => (None, Some("agh")),
1312 ALUOp::Add64Ext32 => (None, Some("agf")),
1313 ALUOp::AddLogical32 => (Some("al"), Some("aly")),
1314 ALUOp::AddLogical64 => (None, Some("alg")),
1315 ALUOp::AddLogical64Ext32 => (None, Some("algf")),
1316 ALUOp::Sub32 => (Some("s"), Some("sy")),
1317 ALUOp::Sub32Ext16 => (Some("sh"), Some("shy")),
1318 ALUOp::Sub64 => (None, Some("sg")),
1319 ALUOp::Sub64Ext16 => (None, Some("sgh")),
1320 ALUOp::Sub64Ext32 => (None, Some("sgf")),
1321 ALUOp::SubLogical32 => (Some("sl"), Some("sly")),
1322 ALUOp::SubLogical64 => (None, Some("slg")),
1323 ALUOp::SubLogical64Ext32 => (None, Some("slgf")),
1324 ALUOp::Mul32 => (Some("ms"), Some("msy")),
1325 ALUOp::Mul32Ext16 => (Some("mh"), Some("mhy")),
1326 ALUOp::Mul64 => (None, Some("msg")),
1327 ALUOp::Mul64Ext16 => (None, Some("mgh")),
1328 ALUOp::Mul64Ext32 => (None, Some("msgf")),
1329 ALUOp::And32 => (Some("n"), Some("ny")),
1330 ALUOp::And64 => (None, Some("ng")),
1331 ALUOp::Orr32 => (Some("o"), Some("oy")),
1332 ALUOp::Orr64 => (None, Some("og")),
1333 ALUOp::Xor32 => (Some("x"), Some("xy")),
1334 ALUOp::Xor64 => (None, Some("xg")),
1335 _ => unreachable!(),
1336 };
1337
1338 let rd = pretty_print_reg_mod(rd, ri);
1339 let mem = mem.clone();
1340 let (mem_str, mem) = mem_finalize_for_show(
1341 &mem,
1342 state,
1343 MemInstType {
1344 have_d12: opcode_rx.is_some(),
1345 have_d20: opcode_rxy.is_some(),
1346 have_pcrel: false,
1347 have_unaligned_pcrel: false,
1348 have_index: true,
1349 },
1350 );
1351 let op = match &mem {
1352 &MemArg::BXD12 { .. } => opcode_rx,
1353 &MemArg::BXD20 { .. } => opcode_rxy,
1354 _ => unreachable!(),
1355 };
1356 let mem = mem.pretty_print_default();
1357
1358 format!("{}{} {}, {}", mem_str, op.unwrap(), rd, mem)
1359 }
1360 &Inst::AluRSImm16 {
1361 alu_op,
1362 rd,
1363 ri,
1364 imm,
1365 } => {
1366 let op = match alu_op {
1367 ALUOp::Add32 => "ahi",
1368 ALUOp::Add64 => "aghi",
1369 ALUOp::Mul32 => "mhi",
1370 ALUOp::Mul64 => "mghi",
1371 _ => unreachable!(),
1372 };
1373 let rd = pretty_print_reg_mod(rd, ri);
1374 format!("{op} {rd}, {imm}")
1375 }
1376 &Inst::AluRSImm32 {
1377 alu_op,
1378 rd,
1379 ri,
1380 imm,
1381 } => {
1382 let op = match alu_op {
1383 ALUOp::Add32 => "afi",
1384 ALUOp::Add64 => "agfi",
1385 ALUOp::Mul32 => "msfi",
1386 ALUOp::Mul64 => "msgfi",
1387 _ => unreachable!(),
1388 };
1389 let rd = pretty_print_reg_mod(rd, ri);
1390 format!("{op} {rd}, {imm}")
1391 }
1392 &Inst::AluRUImm32 {
1393 alu_op,
1394 rd,
1395 ri,
1396 imm,
1397 } => {
1398 let op = match alu_op {
1399 ALUOp::AddLogical32 => "alfi",
1400 ALUOp::AddLogical64 => "algfi",
1401 ALUOp::SubLogical32 => "slfi",
1402 ALUOp::SubLogical64 => "slgfi",
1403 _ => unreachable!(),
1404 };
1405 let rd = pretty_print_reg_mod(rd, ri);
1406 format!("{op} {rd}, {imm}")
1407 }
1408 &Inst::AluRUImm16Shifted {
1409 alu_op,
1410 rd,
1411 ri,
1412 imm,
1413 } => {
1414 let op = match (alu_op, imm.shift) {
1415 (ALUOp::And32, 0) => "nill",
1416 (ALUOp::And32, 1) => "nilh",
1417 (ALUOp::And64, 0) => "nill",
1418 (ALUOp::And64, 1) => "nilh",
1419 (ALUOp::And64, 2) => "nihl",
1420 (ALUOp::And64, 3) => "nihh",
1421 (ALUOp::Orr32, 0) => "oill",
1422 (ALUOp::Orr32, 1) => "oilh",
1423 (ALUOp::Orr64, 0) => "oill",
1424 (ALUOp::Orr64, 1) => "oilh",
1425 (ALUOp::Orr64, 2) => "oihl",
1426 (ALUOp::Orr64, 3) => "oihh",
1427 _ => unreachable!(),
1428 };
1429 let rd = pretty_print_reg_mod(rd, ri);
1430 format!("{} {}, {}", op, rd, imm.bits)
1431 }
1432 &Inst::AluRUImm32Shifted {
1433 alu_op,
1434 rd,
1435 ri,
1436 imm,
1437 } => {
1438 let op = match (alu_op, imm.shift) {
1439 (ALUOp::And32, 0) => "nilf",
1440 (ALUOp::And64, 0) => "nilf",
1441 (ALUOp::And64, 1) => "nihf",
1442 (ALUOp::Orr32, 0) => "oilf",
1443 (ALUOp::Orr64, 0) => "oilf",
1444 (ALUOp::Orr64, 1) => "oihf",
1445 (ALUOp::Xor32, 0) => "xilf",
1446 (ALUOp::Xor64, 0) => "xilf",
1447 (ALUOp::Xor64, 1) => "xihf",
1448 _ => unreachable!(),
1449 };
1450 let rd = pretty_print_reg_mod(rd, ri);
1451 format!("{} {}, {}", op, rd, imm.bits)
1452 }
1453 &Inst::SMulWide { rd, rn, rm } => {
1454 let op = "mgrk";
1455 let rn = pretty_print_reg(rn);
1456 let rm = pretty_print_reg(rm);
1457 let rd = pretty_print_regpair(rd.to_regpair());
1458 format!("{op} {rd}, {rn}, {rm}")
1459 }
1460 &Inst::UMulWide { rd, ri, rn } => {
1461 let op = "mlgr";
1462 let rn = pretty_print_reg(rn);
1463 let rd = pretty_print_regpair_mod_lo(rd, ri);
1464 format!("{op} {rd}, {rn}")
1465 }
1466 &Inst::SDivMod32 { rd, ri, rn } => {
1467 let op = "dsgfr";
1468 let rn = pretty_print_reg(rn);
1469 let rd = pretty_print_regpair_mod_lo(rd, ri);
1470 format!("{op} {rd}, {rn}")
1471 }
1472 &Inst::SDivMod64 { rd, ri, rn } => {
1473 let op = "dsgr";
1474 let rn = pretty_print_reg(rn);
1475 let rd = pretty_print_regpair_mod_lo(rd, ri);
1476 format!("{op} {rd}, {rn}")
1477 }
1478 &Inst::UDivMod32 { rd, ri, rn } => {
1479 let op = "dlr";
1480 let rn = pretty_print_reg(rn);
1481 let rd = pretty_print_regpair_mod(rd, ri);
1482 format!("{op} {rd}, {rn}")
1483 }
1484 &Inst::UDivMod64 { rd, ri, rn } => {
1485 let op = "dlgr";
1486 let rn = pretty_print_reg(rn);
1487 let rd = pretty_print_regpair_mod(rd, ri);
1488 format!("{op} {rd}, {rn}")
1489 }
1490 &Inst::Flogr { rd, rn } => {
1491 let op = "flogr";
1492 let rn = pretty_print_reg(rn);
1493 let rd = pretty_print_regpair(rd.to_regpair());
1494 format!("{op} {rd}, {rn}")
1495 }
1496 &Inst::ShiftRR {
1497 shift_op,
1498 rd,
1499 rn,
1500 shift_imm,
1501 shift_reg,
1502 } => {
1503 let op = match shift_op {
1504 ShiftOp::RotL32 => "rll",
1505 ShiftOp::RotL64 => "rllg",
1506 ShiftOp::LShL32 => "sllk",
1507 ShiftOp::LShL64 => "sllg",
1508 ShiftOp::LShR32 => "srlk",
1509 ShiftOp::LShR64 => "srlg",
1510 ShiftOp::AShR32 => "srak",
1511 ShiftOp::AShR64 => "srag",
1512 };
1513 let rd = pretty_print_reg(rd.to_reg());
1514 let rn = pretty_print_reg(rn);
1515 let shift_reg = if shift_reg != zero_reg() {
1516 format!("({})", pretty_print_reg(shift_reg))
1517 } else {
1518 "".to_string()
1519 };
1520 format!("{op} {rd}, {rn}, {shift_imm}{shift_reg}")
1521 }
1522 &Inst::RxSBG {
1523 op,
1524 rd,
1525 ri,
1526 rn,
1527 start_bit,
1528 end_bit,
1529 rotate_amt,
1530 } => {
1531 let op = match op {
1532 RxSBGOp::Insert => "risbgn",
1533 RxSBGOp::And => "rnsbg",
1534 RxSBGOp::Or => "rosbg",
1535 RxSBGOp::Xor => "rxsbg",
1536 };
1537 let rd = pretty_print_reg_mod(rd, ri);
1538 let rn = pretty_print_reg(rn);
1539 format!(
1540 "{} {}, {}, {}, {}, {}",
1541 op,
1542 rd,
1543 rn,
1544 start_bit,
1545 end_bit,
1546 (rotate_amt as u8) & 63
1547 )
1548 }
1549 &Inst::RxSBGTest {
1550 op,
1551 rd,
1552 rn,
1553 start_bit,
1554 end_bit,
1555 rotate_amt,
1556 } => {
1557 let op = match op {
1558 RxSBGOp::And => "rnsbg",
1559 RxSBGOp::Or => "rosbg",
1560 RxSBGOp::Xor => "rxsbg",
1561 _ => unreachable!(),
1562 };
1563 let rd = pretty_print_reg(rd);
1564 let rn = pretty_print_reg(rn);
1565 format!(
1566 "{} {}, {}, {}, {}, {}",
1567 op,
1568 rd,
1569 rn,
1570 start_bit | 0x80,
1571 end_bit,
1572 (rotate_amt as u8) & 63
1573 )
1574 }
1575 &Inst::UnaryRR { op, rd, rn } => {
1576 let (op, extra) = match op {
1577 UnaryOp::Abs32 => ("lpr", ""),
1578 UnaryOp::Abs64 => ("lpgr", ""),
1579 UnaryOp::Abs64Ext32 => ("lpgfr", ""),
1580 UnaryOp::Neg32 => ("lcr", ""),
1581 UnaryOp::Neg64 => ("lcgr", ""),
1582 UnaryOp::Neg64Ext32 => ("lcgfr", ""),
1583 UnaryOp::PopcntByte => ("popcnt", ""),
1584 UnaryOp::PopcntReg => ("popcnt", ", 8"),
1585 UnaryOp::BSwap32 => ("lrvr", ""),
1586 UnaryOp::BSwap64 => ("lrvgr", ""),
1587 };
1588 let rd = pretty_print_reg(rd.to_reg());
1589 let rn = pretty_print_reg(rn);
1590 format!("{op} {rd}, {rn}{extra}")
1591 }
1592 &Inst::CmpRR { op, rn, rm } => {
1593 let op = match op {
1594 CmpOp::CmpS32 => "cr",
1595 CmpOp::CmpS64 => "cgr",
1596 CmpOp::CmpS64Ext32 => "cgfr",
1597 CmpOp::CmpL32 => "clr",
1598 CmpOp::CmpL64 => "clgr",
1599 CmpOp::CmpL64Ext32 => "clgfr",
1600 _ => unreachable!(),
1601 };
1602 let rn = pretty_print_reg(rn);
1603 let rm = pretty_print_reg(rm);
1604 format!("{op} {rn}, {rm}")
1605 }
1606 &Inst::CmpRX { op, rn, ref mem } => {
1607 let (opcode_rx, opcode_rxy, opcode_ril) = match op {
1608 CmpOp::CmpS32 => (Some("c"), Some("cy"), Some("crl")),
1609 CmpOp::CmpS32Ext16 => (Some("ch"), Some("chy"), Some("chrl")),
1610 CmpOp::CmpS64 => (None, Some("cg"), Some("cgrl")),
1611 CmpOp::CmpS64Ext16 => (None, Some("cgh"), Some("cghrl")),
1612 CmpOp::CmpS64Ext32 => (None, Some("cgf"), Some("cgfrl")),
1613 CmpOp::CmpL32 => (Some("cl"), Some("cly"), Some("clrl")),
1614 CmpOp::CmpL32Ext16 => (None, None, Some("clhrl")),
1615 CmpOp::CmpL64 => (None, Some("clg"), Some("clgrl")),
1616 CmpOp::CmpL64Ext16 => (None, None, Some("clghrl")),
1617 CmpOp::CmpL64Ext32 => (None, Some("clgf"), Some("clgfrl")),
1618 };
1619
1620 let rn = pretty_print_reg(rn);
1621 let mem = mem.clone();
1622 let (mem_str, mem) = mem_finalize_for_show(
1623 &mem,
1624 state,
1625 MemInstType {
1626 have_d12: opcode_rx.is_some(),
1627 have_d20: opcode_rxy.is_some(),
1628 have_pcrel: opcode_ril.is_some(),
1629 have_unaligned_pcrel: false,
1630 have_index: true,
1631 },
1632 );
1633 let op = match &mem {
1634 &MemArg::BXD12 { .. } => opcode_rx,
1635 &MemArg::BXD20 { .. } => opcode_rxy,
1636 &MemArg::Label { .. } | &MemArg::Symbol { .. } => opcode_ril,
1637 _ => unreachable!(),
1638 };
1639 let mem = mem.pretty_print_default();
1640
1641 format!("{}{} {}, {}", mem_str, op.unwrap(), rn, mem)
1642 }
1643 &Inst::CmpRSImm16 { op, rn, imm } => {
1644 let op = match op {
1645 CmpOp::CmpS32 => "chi",
1646 CmpOp::CmpS64 => "cghi",
1647 _ => unreachable!(),
1648 };
1649 let rn = pretty_print_reg(rn);
1650 format!("{op} {rn}, {imm}")
1651 }
1652 &Inst::CmpRSImm32 { op, rn, imm } => {
1653 let op = match op {
1654 CmpOp::CmpS32 => "cfi",
1655 CmpOp::CmpS64 => "cgfi",
1656 _ => unreachable!(),
1657 };
1658 let rn = pretty_print_reg(rn);
1659 format!("{op} {rn}, {imm}")
1660 }
1661 &Inst::CmpRUImm32 { op, rn, imm } => {
1662 let op = match op {
1663 CmpOp::CmpL32 => "clfi",
1664 CmpOp::CmpL64 => "clgfi",
1665 _ => unreachable!(),
1666 };
1667 let rn = pretty_print_reg(rn);
1668 format!("{op} {rn}, {imm}")
1669 }
1670 &Inst::CmpTrapRR {
1671 op, rn, rm, cond, ..
1672 } => {
1673 let op = match op {
1674 CmpOp::CmpS32 => "crt",
1675 CmpOp::CmpS64 => "cgrt",
1676 CmpOp::CmpL32 => "clrt",
1677 CmpOp::CmpL64 => "clgrt",
1678 _ => unreachable!(),
1679 };
1680 let rn = pretty_print_reg(rn);
1681 let rm = pretty_print_reg(rm);
1682 let cond = cond.pretty_print_default();
1683 format!("{op}{cond} {rn}, {rm}")
1684 }
1685 &Inst::CmpTrapRSImm16 {
1686 op, rn, imm, cond, ..
1687 } => {
1688 let op = match op {
1689 CmpOp::CmpS32 => "cit",
1690 CmpOp::CmpS64 => "cgit",
1691 _ => unreachable!(),
1692 };
1693 let rn = pretty_print_reg(rn);
1694 let cond = cond.pretty_print_default();
1695 format!("{op}{cond} {rn}, {imm}")
1696 }
1697 &Inst::CmpTrapRUImm16 {
1698 op, rn, imm, cond, ..
1699 } => {
1700 let op = match op {
1701 CmpOp::CmpL32 => "clfit",
1702 CmpOp::CmpL64 => "clgit",
1703 _ => unreachable!(),
1704 };
1705 let rn = pretty_print_reg(rn);
1706 let cond = cond.pretty_print_default();
1707 format!("{op}{cond} {rn}, {imm}")
1708 }
1709 &Inst::AtomicRmw {
1710 alu_op,
1711 rd,
1712 rn,
1713 ref mem,
1714 } => {
1715 let op = match alu_op {
1716 ALUOp::Add32 => "laa",
1717 ALUOp::Add64 => "laag",
1718 ALUOp::AddLogical32 => "laal",
1719 ALUOp::AddLogical64 => "laalg",
1720 ALUOp::And32 => "lan",
1721 ALUOp::And64 => "lang",
1722 ALUOp::Orr32 => "lao",
1723 ALUOp::Orr64 => "laog",
1724 ALUOp::Xor32 => "lax",
1725 ALUOp::Xor64 => "laxg",
1726 _ => unreachable!(),
1727 };
1728
1729 let rd = pretty_print_reg(rd.to_reg());
1730 let rn = pretty_print_reg(rn);
1731 let mem = mem.clone();
1732 let (mem_str, mem) = mem_finalize_for_show(
1733 &mem,
1734 state,
1735 MemInstType {
1736 have_d12: false,
1737 have_d20: true,
1738 have_pcrel: false,
1739 have_unaligned_pcrel: false,
1740 have_index: false,
1741 },
1742 );
1743 let mem = mem.pretty_print_default();
1744 format!("{mem_str}{op} {rd}, {rn}, {mem}")
1745 }
1746 &Inst::AtomicCas32 {
1747 rd,
1748 ri,
1749 rn,
1750 ref mem,
1751 }
1752 | &Inst::AtomicCas64 {
1753 rd,
1754 ri,
1755 rn,
1756 ref mem,
1757 } => {
1758 let (opcode_rs, opcode_rsy) = match self {
1759 &Inst::AtomicCas32 { .. } => (Some("cs"), Some("csy")),
1760 &Inst::AtomicCas64 { .. } => (None, Some("csg")),
1761 _ => unreachable!(),
1762 };
1763
1764 let rd = pretty_print_reg_mod(rd, ri);
1765 let rn = pretty_print_reg(rn);
1766 let mem = mem.clone();
1767 let (mem_str, mem) = mem_finalize_for_show(
1768 &mem,
1769 state,
1770 MemInstType {
1771 have_d12: opcode_rs.is_some(),
1772 have_d20: opcode_rsy.is_some(),
1773 have_pcrel: false,
1774 have_unaligned_pcrel: false,
1775 have_index: false,
1776 },
1777 );
1778 let op = match &mem {
1779 &MemArg::BXD12 { .. } => opcode_rs,
1780 &MemArg::BXD20 { .. } => opcode_rsy,
1781 _ => unreachable!(),
1782 };
1783 let mem = mem.pretty_print_default();
1784
1785 format!("{}{} {}, {}, {}", mem_str, op.unwrap(), rd, rn, mem)
1786 }
1787 &Inst::Fence => "bcr 14, 0".to_string(),
1788 &Inst::Load32 { rd, ref mem }
1789 | &Inst::Load32ZExt8 { rd, ref mem }
1790 | &Inst::Load32SExt8 { rd, ref mem }
1791 | &Inst::Load32ZExt16 { rd, ref mem }
1792 | &Inst::Load32SExt16 { rd, ref mem }
1793 | &Inst::Load64 { rd, ref mem }
1794 | &Inst::Load64ZExt8 { rd, ref mem }
1795 | &Inst::Load64SExt8 { rd, ref mem }
1796 | &Inst::Load64ZExt16 { rd, ref mem }
1797 | &Inst::Load64SExt16 { rd, ref mem }
1798 | &Inst::Load64ZExt32 { rd, ref mem }
1799 | &Inst::Load64SExt32 { rd, ref mem }
1800 | &Inst::LoadRev16 { rd, ref mem }
1801 | &Inst::LoadRev32 { rd, ref mem }
1802 | &Inst::LoadRev64 { rd, ref mem } => {
1803 let (opcode_rx, opcode_rxy, opcode_ril) = match self {
1804 &Inst::Load32 { .. } => (Some("l"), Some("ly"), Some("lrl")),
1805 &Inst::Load32ZExt8 { .. } => (None, Some("llc"), None),
1806 &Inst::Load32SExt8 { .. } => (None, Some("lb"), None),
1807 &Inst::Load32ZExt16 { .. } => (None, Some("llh"), Some("llhrl")),
1808 &Inst::Load32SExt16 { .. } => (Some("lh"), Some("lhy"), Some("lhrl")),
1809 &Inst::Load64 { .. } => (None, Some("lg"), Some("lgrl")),
1810 &Inst::Load64ZExt8 { .. } => (None, Some("llgc"), None),
1811 &Inst::Load64SExt8 { .. } => (None, Some("lgb"), None),
1812 &Inst::Load64ZExt16 { .. } => (None, Some("llgh"), Some("llghrl")),
1813 &Inst::Load64SExt16 { .. } => (None, Some("lgh"), Some("lghrl")),
1814 &Inst::Load64ZExt32 { .. } => (None, Some("llgf"), Some("llgfrl")),
1815 &Inst::Load64SExt32 { .. } => (None, Some("lgf"), Some("lgfrl")),
1816 &Inst::LoadRev16 { .. } => (None, Some("lrvh"), None),
1817 &Inst::LoadRev32 { .. } => (None, Some("lrv"), None),
1818 &Inst::LoadRev64 { .. } => (None, Some("lrvg"), None),
1819 _ => unreachable!(),
1820 };
1821
1822 let rd = pretty_print_reg(rd.to_reg());
1823 let mem = mem.clone();
1824 let (mem_str, mem) = mem_finalize_for_show(
1825 &mem,
1826 state,
1827 MemInstType {
1828 have_d12: opcode_rx.is_some(),
1829 have_d20: opcode_rxy.is_some(),
1830 have_pcrel: opcode_ril.is_some(),
1831 have_unaligned_pcrel: false,
1832 have_index: true,
1833 },
1834 );
1835 let op = match &mem {
1836 &MemArg::BXD12 { .. } => opcode_rx,
1837 &MemArg::BXD20 { .. } => opcode_rxy,
1838 &MemArg::Label { .. } | &MemArg::Symbol { .. } => opcode_ril,
1839 _ => unreachable!(),
1840 };
1841 let mem = mem.pretty_print_default();
1842 format!("{}{} {}, {}", mem_str, op.unwrap(), rd, mem)
1843 }
1844 &Inst::Store8 { rd, ref mem }
1845 | &Inst::Store16 { rd, ref mem }
1846 | &Inst::Store32 { rd, ref mem }
1847 | &Inst::Store64 { rd, ref mem }
1848 | &Inst::StoreRev16 { rd, ref mem }
1849 | &Inst::StoreRev32 { rd, ref mem }
1850 | &Inst::StoreRev64 { rd, ref mem } => {
1851 let (opcode_rx, opcode_rxy, opcode_ril) = match self {
1852 &Inst::Store8 { .. } => (Some("stc"), Some("stcy"), None),
1853 &Inst::Store16 { .. } => (Some("sth"), Some("sthy"), Some("sthrl")),
1854 &Inst::Store32 { .. } => (Some("st"), Some("sty"), Some("strl")),
1855 &Inst::Store64 { .. } => (None, Some("stg"), Some("stgrl")),
1856 &Inst::StoreRev16 { .. } => (None, Some("strvh"), None),
1857 &Inst::StoreRev32 { .. } => (None, Some("strv"), None),
1858 &Inst::StoreRev64 { .. } => (None, Some("strvg"), None),
1859 _ => unreachable!(),
1860 };
1861
1862 let rd = pretty_print_reg(rd);
1863 let mem = mem.clone();
1864 let (mem_str, mem) = mem_finalize_for_show(
1865 &mem,
1866 state,
1867 MemInstType {
1868 have_d12: opcode_rx.is_some(),
1869 have_d20: opcode_rxy.is_some(),
1870 have_pcrel: opcode_ril.is_some(),
1871 have_unaligned_pcrel: false,
1872 have_index: true,
1873 },
1874 );
1875 let op = match &mem {
1876 &MemArg::BXD12 { .. } => opcode_rx,
1877 &MemArg::BXD20 { .. } => opcode_rxy,
1878 &MemArg::Label { .. } | &MemArg::Symbol { .. } => opcode_ril,
1879 _ => unreachable!(),
1880 };
1881 let mem = mem.pretty_print_default();
1882
1883 format!("{}{} {}, {}", mem_str, op.unwrap(), rd, mem)
1884 }
1885 &Inst::StoreImm8 { imm, ref mem } => {
1886 let mem = mem.clone();
1887 let (mem_str, mem) = mem_finalize_for_show(
1888 &mem,
1889 state,
1890 MemInstType {
1891 have_d12: true,
1892 have_d20: true,
1893 have_pcrel: false,
1894 have_unaligned_pcrel: false,
1895 have_index: false,
1896 },
1897 );
1898 let op = match &mem {
1899 &MemArg::BXD12 { .. } => "mvi",
1900 &MemArg::BXD20 { .. } => "mviy",
1901 _ => unreachable!(),
1902 };
1903 let mem = mem.pretty_print_default();
1904
1905 format!("{mem_str}{op} {mem}, {imm}")
1906 }
1907 &Inst::StoreImm16 { imm, ref mem }
1908 | &Inst::StoreImm32SExt16 { imm, ref mem }
1909 | &Inst::StoreImm64SExt16 { imm, ref mem } => {
1910 let mem = mem.clone();
1911 let (mem_str, mem) = mem_finalize_for_show(
1912 &mem,
1913 state,
1914 MemInstType {
1915 have_d12: false,
1916 have_d20: true,
1917 have_pcrel: false,
1918 have_unaligned_pcrel: false,
1919 have_index: false,
1920 },
1921 );
1922 let op = match self {
1923 &Inst::StoreImm16 { .. } => "mvhhi",
1924 &Inst::StoreImm32SExt16 { .. } => "mvhi",
1925 &Inst::StoreImm64SExt16 { .. } => "mvghi",
1926 _ => unreachable!(),
1927 };
1928 let mem = mem.pretty_print_default();
1929
1930 format!("{mem_str}{op} {mem}, {imm}")
1931 }
1932 &Inst::LoadMultiple64 { rt, rt2, ref mem } => {
1933 let mem = mem.clone();
1934 let (mem_str, mem) = mem_finalize_for_show(
1935 &mem,
1936 state,
1937 MemInstType {
1938 have_d12: false,
1939 have_d20: true,
1940 have_pcrel: false,
1941 have_unaligned_pcrel: false,
1942 have_index: false,
1943 },
1944 );
1945 let rt = pretty_print_reg(rt.to_reg());
1946 let rt2 = pretty_print_reg(rt2.to_reg());
1947 let mem = mem.pretty_print_default();
1948 format!("{mem_str}lmg {rt}, {rt2}, {mem}")
1949 }
1950 &Inst::StoreMultiple64 { rt, rt2, ref mem } => {
1951 let mem = mem.clone();
1952 let (mem_str, mem) = mem_finalize_for_show(
1953 &mem,
1954 state,
1955 MemInstType {
1956 have_d12: false,
1957 have_d20: true,
1958 have_pcrel: false,
1959 have_unaligned_pcrel: false,
1960 have_index: false,
1961 },
1962 );
1963 let rt = pretty_print_reg(rt);
1964 let rt2 = pretty_print_reg(rt2);
1965 let mem = mem.pretty_print_default();
1966 format!("{mem_str}stmg {rt}, {rt2}, {mem}")
1967 }
1968 &Inst::Mov64 { rd, rm } => {
1969 let rd = pretty_print_reg(rd.to_reg());
1970 let rm = pretty_print_reg(rm);
1971 format!("lgr {rd}, {rm}")
1972 }
1973 &Inst::MovPReg { rd, rm } => {
1974 let rd = pretty_print_reg(rd.to_reg());
1975 let rm = show_reg(rm.into());
1976 format!("lgr {rd}, {rm}")
1977 }
1978 &Inst::Mov32 { rd, rm } => {
1979 let rd = pretty_print_reg(rd.to_reg());
1980 let rm = pretty_print_reg(rm);
1981 format!("lr {rd}, {rm}")
1982 }
1983 &Inst::Mov32Imm { rd, ref imm } => {
1984 let rd = pretty_print_reg(rd.to_reg());
1985 format!("iilf {rd}, {imm}")
1986 }
1987 &Inst::Mov32SImm16 { rd, ref imm } => {
1988 let rd = pretty_print_reg(rd.to_reg());
1989 format!("lhi {rd}, {imm}")
1990 }
1991 &Inst::Mov64SImm16 { rd, ref imm } => {
1992 let rd = pretty_print_reg(rd.to_reg());
1993 format!("lghi {rd}, {imm}")
1994 }
1995 &Inst::Mov64SImm32 { rd, ref imm } => {
1996 let rd = pretty_print_reg(rd.to_reg());
1997 format!("lgfi {rd}, {imm}")
1998 }
1999 &Inst::Mov64UImm16Shifted { rd, ref imm } => {
2000 let rd = pretty_print_reg(rd.to_reg());
2001 let op = match imm.shift {
2002 0 => "llill",
2003 1 => "llilh",
2004 2 => "llihl",
2005 3 => "llihh",
2006 _ => unreachable!(),
2007 };
2008 format!("{} {}, {}", op, rd, imm.bits)
2009 }
2010 &Inst::Mov64UImm32Shifted { rd, ref imm } => {
2011 let rd = pretty_print_reg(rd.to_reg());
2012 let op = match imm.shift {
2013 0 => "llilf",
2014 1 => "llihf",
2015 _ => unreachable!(),
2016 };
2017 format!("{} {}, {}", op, rd, imm.bits)
2018 }
2019 &Inst::Insert64UImm16Shifted { rd, ri, ref imm } => {
2020 let rd = pretty_print_reg_mod(rd, ri);
2021 let op = match imm.shift {
2022 0 => "iill",
2023 1 => "iilh",
2024 2 => "iihl",
2025 3 => "iihh",
2026 _ => unreachable!(),
2027 };
2028 format!("{} {}, {}", op, rd, imm.bits)
2029 }
2030 &Inst::Insert64UImm32Shifted { rd, ri, ref imm } => {
2031 let rd = pretty_print_reg_mod(rd, ri);
2032 let op = match imm.shift {
2033 0 => "iilf",
2034 1 => "iihf",
2035 _ => unreachable!(),
2036 };
2037 format!("{} {}, {}", op, rd, imm.bits)
2038 }
2039 &Inst::LoadAR { rd, ar } => {
2040 let rd = pretty_print_reg(rd.to_reg());
2041 format!("ear {rd}, %a{ar}")
2042 }
2043 &Inst::InsertAR { rd, ri, ar } => {
2044 let rd = pretty_print_reg_mod(rd, ri);
2045 format!("ear {rd}, %a{ar}")
2046 }
2047 &Inst::CMov32 { rd, cond, ri, rm } => {
2048 let rd = pretty_print_reg_mod(rd, ri);
2049 let rm = pretty_print_reg(rm);
2050 let cond = cond.pretty_print_default();
2051 format!("locr{cond} {rd}, {rm}")
2052 }
2053 &Inst::CMov64 { rd, cond, ri, rm } => {
2054 let rd = pretty_print_reg_mod(rd, ri);
2055 let rm = pretty_print_reg(rm);
2056 let cond = cond.pretty_print_default();
2057 format!("locgr{cond} {rd}, {rm}")
2058 }
2059 &Inst::CMov32SImm16 {
2060 rd,
2061 cond,
2062 ri,
2063 ref imm,
2064 } => {
2065 let rd = pretty_print_reg_mod(rd, ri);
2066 let cond = cond.pretty_print_default();
2067 format!("lochi{cond} {rd}, {imm}")
2068 }
2069 &Inst::CMov64SImm16 {
2070 rd,
2071 cond,
2072 ri,
2073 ref imm,
2074 } => {
2075 let rd = pretty_print_reg_mod(rd, ri);
2076 let cond = cond.pretty_print_default();
2077 format!("locghi{cond} {rd}, {imm}")
2078 }
2079 &Inst::FpuMove32 { rd, rn } => {
2080 let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2081 let (rn, rn_fpr) = pretty_print_fpr(rn);
2082 if rd_fpr.is_some() && rn_fpr.is_some() {
2083 format!("ler {}, {}", rd_fpr.unwrap(), rn_fpr.unwrap())
2084 } else {
2085 format!("vlr {rd}, {rn}")
2086 }
2087 }
2088 &Inst::FpuMove64 { rd, rn } => {
2089 let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2090 let (rn, rn_fpr) = pretty_print_fpr(rn);
2091 if rd_fpr.is_some() && rn_fpr.is_some() {
2092 format!("ldr {}, {}", rd_fpr.unwrap(), rn_fpr.unwrap())
2093 } else {
2094 format!("vlr {rd}, {rn}")
2095 }
2096 }
2097 &Inst::FpuCMov32 { rd, cond, rm, .. } => {
2098 let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2099 let (rm, rm_fpr) = pretty_print_fpr(rm);
2100 if rd_fpr.is_some() && rm_fpr.is_some() {
2101 let cond = cond.invert().pretty_print_default();
2102 format!("j{} 6 ; ler {}, {}", cond, rd_fpr.unwrap(), rm_fpr.unwrap())
2103 } else {
2104 let cond = cond.invert().pretty_print_default();
2105 format!("j{cond} 10 ; vlr {rd}, {rm}")
2106 }
2107 }
2108 &Inst::FpuCMov64 { rd, cond, rm, .. } => {
2109 let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2110 let (rm, rm_fpr) = pretty_print_fpr(rm);
2111 if rd_fpr.is_some() && rm_fpr.is_some() {
2112 let cond = cond.invert().pretty_print_default();
2113 format!("j{} 6 ; ldr {}, {}", cond, rd_fpr.unwrap(), rm_fpr.unwrap())
2114 } else {
2115 let cond = cond.invert().pretty_print_default();
2116 format!("j{cond} 10 ; vlr {rd}, {rm}")
2117 }
2118 }
2119 &Inst::FpuRR { fpu_op, rd, rn } => {
2120 let (op, op_fpr) = match fpu_op {
2121 FPUOp1::Abs32 => ("wflpsb", Some("lpebr")),
2122 FPUOp1::Abs64 => ("wflpdb", Some("lpdbr")),
2123 FPUOp1::Abs32x4 => ("vflpsb", None),
2124 FPUOp1::Abs64x2 => ("vflpdb", None),
2125 FPUOp1::Neg32 => ("wflcsb", Some("lcebr")),
2126 FPUOp1::Neg64 => ("wflcdb", Some("lcdbr")),
2127 FPUOp1::Neg32x4 => ("vflcsb", None),
2128 FPUOp1::Neg64x2 => ("vflcdb", None),
2129 FPUOp1::NegAbs32 => ("wflnsb", Some("lnebr")),
2130 FPUOp1::NegAbs64 => ("wflndb", Some("lndbr")),
2131 FPUOp1::NegAbs32x4 => ("vflnsb", None),
2132 FPUOp1::NegAbs64x2 => ("vflndb", None),
2133 FPUOp1::Sqrt32 => ("wfsqsb", Some("sqebr")),
2134 FPUOp1::Sqrt64 => ("wfsqdb", Some("sqdbr")),
2135 FPUOp1::Sqrt32x4 => ("vfsqsb", None),
2136 FPUOp1::Sqrt64x2 => ("vfsqdb", None),
2137 FPUOp1::Cvt32To64 => ("wldeb", Some("ldebr")),
2138 FPUOp1::Cvt32x4To64x2 => ("vldeb", None),
2139 };
2140
2141 let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2142 let (rn, rn_fpr) = pretty_print_fpr(rn);
2143 if op_fpr.is_some() && rd_fpr.is_some() && rn_fpr.is_some() {
2144 format!(
2145 "{} {}, {}",
2146 op_fpr.unwrap(),
2147 rd_fpr.unwrap(),
2148 rn_fpr.unwrap()
2149 )
2150 } else if op.starts_with('w') {
2151 format!("{} {}, {}", op, rd_fpr.unwrap_or(rd), rn_fpr.unwrap_or(rn))
2152 } else {
2153 format!("{op} {rd}, {rn}")
2154 }
2155 }
2156 &Inst::FpuRRR { fpu_op, rd, rn, rm } => {
2157 let (op, opt_m6, op_fpr) = match fpu_op {
2158 FPUOp2::Add32 => ("wfasb", "", Some("aebr")),
2159 FPUOp2::Add64 => ("wfadb", "", Some("adbr")),
2160 FPUOp2::Add32x4 => ("vfasb", "", None),
2161 FPUOp2::Add64x2 => ("vfadb", "", None),
2162 FPUOp2::Sub32 => ("wfssb", "", Some("sebr")),
2163 FPUOp2::Sub64 => ("wfsdb", "", Some("sdbr")),
2164 FPUOp2::Sub32x4 => ("vfssb", "", None),
2165 FPUOp2::Sub64x2 => ("vfsdb", "", None),
2166 FPUOp2::Mul32 => ("wfmsb", "", Some("meebr")),
2167 FPUOp2::Mul64 => ("wfmdb", "", Some("mdbr")),
2168 FPUOp2::Mul32x4 => ("vfmsb", "", None),
2169 FPUOp2::Mul64x2 => ("vfmdb", "", None),
2170 FPUOp2::Div32 => ("wfdsb", "", Some("debr")),
2171 FPUOp2::Div64 => ("wfddb", "", Some("ddbr")),
2172 FPUOp2::Div32x4 => ("vfdsb", "", None),
2173 FPUOp2::Div64x2 => ("vfddb", "", None),
2174 FPUOp2::Max32 => ("wfmaxsb", ", 1", None),
2175 FPUOp2::Max64 => ("wfmaxdb", ", 1", None),
2176 FPUOp2::Max32x4 => ("vfmaxsb", ", 1", None),
2177 FPUOp2::Max64x2 => ("vfmaxdb", ", 1", None),
2178 FPUOp2::Min32 => ("wfminsb", ", 1", None),
2179 FPUOp2::Min64 => ("wfmindb", ", 1", None),
2180 FPUOp2::Min32x4 => ("vfminsb", ", 1", None),
2181 FPUOp2::Min64x2 => ("vfmindb", ", 1", None),
2182 FPUOp2::MaxPseudo32 => ("wfmaxsb", ", 3", None),
2183 FPUOp2::MaxPseudo64 => ("wfmaxdb", ", 3", None),
2184 FPUOp2::MaxPseudo32x4 => ("vfmaxsb", ", 3", None),
2185 FPUOp2::MaxPseudo64x2 => ("vfmaxdb", ", 3", None),
2186 FPUOp2::MinPseudo32 => ("wfminsb", ", 3", None),
2187 FPUOp2::MinPseudo64 => ("wfmindb", ", 3", None),
2188 FPUOp2::MinPseudo32x4 => ("vfminsb", ", 3", None),
2189 FPUOp2::MinPseudo64x2 => ("vfmindb", ", 3", None),
2190 };
2191
2192 let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2193 let (rn, rn_fpr) = pretty_print_fpr(rn);
2194 let (rm, rm_fpr) = pretty_print_fpr(rm);
2195 if op_fpr.is_some() && rd == rn && rd_fpr.is_some() && rm_fpr.is_some() {
2196 format!(
2197 "{} {}, {}",
2198 op_fpr.unwrap(),
2199 rd_fpr.unwrap(),
2200 rm_fpr.unwrap()
2201 )
2202 } else if op.starts_with('w') {
2203 format!(
2204 "{} {}, {}, {}{}",
2205 op,
2206 rd_fpr.unwrap_or(rd),
2207 rn_fpr.unwrap_or(rn),
2208 rm_fpr.unwrap_or(rm),
2209 opt_m6
2210 )
2211 } else {
2212 format!("{op} {rd}, {rn}, {rm}{opt_m6}")
2213 }
2214 }
2215 &Inst::FpuRRRR {
2216 fpu_op,
2217 rd,
2218 rn,
2219 rm,
2220 ra,
2221 } => {
2222 let (op, op_fpr) = match fpu_op {
2223 FPUOp3::MAdd32 => ("wfmasb", Some("maebr")),
2224 FPUOp3::MAdd64 => ("wfmadb", Some("madbr")),
2225 FPUOp3::MAdd32x4 => ("vfmasb", None),
2226 FPUOp3::MAdd64x2 => ("vfmadb", None),
2227 FPUOp3::MSub32 => ("wfmssb", Some("msebr")),
2228 FPUOp3::MSub64 => ("wfmsdb", Some("msdbr")),
2229 FPUOp3::MSub32x4 => ("vfmssb", None),
2230 FPUOp3::MSub64x2 => ("vfmsdb", None),
2231 };
2232
2233 let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2234 let (rn, rn_fpr) = pretty_print_fpr(rn);
2235 let (rm, rm_fpr) = pretty_print_fpr(rm);
2236 let (ra, ra_fpr) = pretty_print_fpr(ra);
2237 if op_fpr.is_some()
2238 && rd == ra
2239 && rd_fpr.is_some()
2240 && rn_fpr.is_some()
2241 && rm_fpr.is_some()
2242 {
2243 format!(
2244 "{} {}, {}, {}",
2245 op_fpr.unwrap(),
2246 rd_fpr.unwrap(),
2247 rn_fpr.unwrap(),
2248 rm_fpr.unwrap()
2249 )
2250 } else if op.starts_with('w') {
2251 format!(
2252 "{} {}, {}, {}, {}",
2253 op,
2254 rd_fpr.unwrap_or(rd),
2255 rn_fpr.unwrap_or(rn),
2256 rm_fpr.unwrap_or(rm),
2257 ra_fpr.unwrap_or(ra)
2258 )
2259 } else {
2260 format!("{op} {rd}, {rn}, {rm}, {ra}")
2261 }
2262 }
2263 &Inst::FpuCmp32 { rn, rm } => {
2264 let (rn, rn_fpr) = pretty_print_fpr(rn);
2265 let (rm, rm_fpr) = pretty_print_fpr(rm);
2266 if rn_fpr.is_some() && rm_fpr.is_some() {
2267 format!("cebr {}, {}", rn_fpr.unwrap(), rm_fpr.unwrap())
2268 } else {
2269 format!("wfcsb {}, {}", rn_fpr.unwrap_or(rn), rm_fpr.unwrap_or(rm))
2270 }
2271 }
2272 &Inst::FpuCmp64 { rn, rm } => {
2273 let (rn, rn_fpr) = pretty_print_fpr(rn);
2274 let (rm, rm_fpr) = pretty_print_fpr(rm);
2275 if rn_fpr.is_some() && rm_fpr.is_some() {
2276 format!("cdbr {}, {}", rn_fpr.unwrap(), rm_fpr.unwrap())
2277 } else {
2278 format!("wfcdb {}, {}", rn_fpr.unwrap_or(rn), rm_fpr.unwrap_or(rm))
2279 }
2280 }
2281 &Inst::LoadFpuConst32 { rd, const_data } => {
2282 let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2283 let tmp = pretty_print_reg(writable_spilltmp_reg().to_reg());
2284 if rd_fpr.is_some() {
2285 format!(
2286 "bras {}, 8 ; data.f32 {} ; le {}, 0({})",
2287 tmp,
2288 f32::from_bits(const_data),
2289 rd_fpr.unwrap(),
2290 tmp
2291 )
2292 } else {
2293 format!(
2294 "bras {}, 8 ; data.f32 {} ; vlef {}, 0({}), 0",
2295 tmp,
2296 f32::from_bits(const_data),
2297 rd,
2298 tmp
2299 )
2300 }
2301 }
2302 &Inst::LoadFpuConst64 { rd, const_data } => {
2303 let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2304 let tmp = pretty_print_reg(writable_spilltmp_reg().to_reg());
2305 if rd_fpr.is_some() {
2306 format!(
2307 "bras {}, 12 ; data.f64 {} ; ld {}, 0({})",
2308 tmp,
2309 f64::from_bits(const_data),
2310 rd_fpr.unwrap(),
2311 tmp
2312 )
2313 } else {
2314 format!(
2315 "bras {}, 12 ; data.f64 {} ; vleg {}, 0({}), 0",
2316 tmp,
2317 f64::from_bits(const_data),
2318 rd,
2319 tmp
2320 )
2321 }
2322 }
2323 &Inst::FpuRound { op, mode, rd, rn } => {
2324 let mode = match mode {
2325 FpuRoundMode::Current => 0,
2326 FpuRoundMode::ToNearest => 1,
2327 FpuRoundMode::ShorterPrecision => 3,
2328 FpuRoundMode::ToNearestTiesToEven => 4,
2329 FpuRoundMode::ToZero => 5,
2330 FpuRoundMode::ToPosInfinity => 6,
2331 FpuRoundMode::ToNegInfinity => 7,
2332 };
2333 let (opcode, opcode_fpr) = match op {
2334 FpuRoundOp::Cvt64To32 => ("wledb", Some("ledbra")),
2335 FpuRoundOp::Cvt64x2To32x4 => ("vledb", None),
2336 FpuRoundOp::Round32 => ("wfisb", Some("fiebr")),
2337 FpuRoundOp::Round64 => ("wfidb", Some("fidbr")),
2338 FpuRoundOp::Round32x4 => ("vfisb", None),
2339 FpuRoundOp::Round64x2 => ("vfidb", None),
2340 FpuRoundOp::ToSInt32 => ("wcfeb", None),
2341 FpuRoundOp::ToSInt64 => ("wcgdb", None),
2342 FpuRoundOp::ToUInt32 => ("wclfeb", None),
2343 FpuRoundOp::ToUInt64 => ("wclgdb", None),
2344 FpuRoundOp::ToSInt32x4 => ("vcfeb", None),
2345 FpuRoundOp::ToSInt64x2 => ("vcgdb", None),
2346 FpuRoundOp::ToUInt32x4 => ("vclfeb", None),
2347 FpuRoundOp::ToUInt64x2 => ("vclgdb", None),
2348 FpuRoundOp::FromSInt32 => ("wcefb", None),
2349 FpuRoundOp::FromSInt64 => ("wcdgb", None),
2350 FpuRoundOp::FromUInt32 => ("wcelfb", None),
2351 FpuRoundOp::FromUInt64 => ("wcdlgb", None),
2352 FpuRoundOp::FromSInt32x4 => ("vcefb", None),
2353 FpuRoundOp::FromSInt64x2 => ("vcdgb", None),
2354 FpuRoundOp::FromUInt32x4 => ("vcelfb", None),
2355 FpuRoundOp::FromUInt64x2 => ("vcdlgb", None),
2356 };
2357
2358 let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2359 let (rn, rn_fpr) = pretty_print_fpr(rn);
2360 if opcode_fpr.is_some() && rd_fpr.is_some() && rn_fpr.is_some() {
2361 format!(
2362 "{} {}, {}, {}{}",
2363 opcode_fpr.unwrap(),
2364 rd_fpr.unwrap(),
2365 mode,
2366 rn_fpr.unwrap(),
2367 if opcode_fpr.unwrap().ends_with('a') {
2368 ", 0"
2369 } else {
2370 ""
2371 }
2372 )
2373 } else if opcode.starts_with('w') {
2374 format!(
2375 "{} {}, {}, 0, {}",
2376 opcode,
2377 rd_fpr.unwrap_or(rd),
2378 rn_fpr.unwrap_or(rn),
2379 mode
2380 )
2381 } else {
2382 format!("{opcode} {rd}, {rn}, 0, {mode}")
2383 }
2384 }
2385 &Inst::VecRRR { op, rd, rn, rm } => {
2386 let op = match op {
2387 VecBinaryOp::Add8x16 => "vab",
2388 VecBinaryOp::Add16x8 => "vah",
2389 VecBinaryOp::Add32x4 => "vaf",
2390 VecBinaryOp::Add64x2 => "vag",
2391 VecBinaryOp::Add128 => "vaq",
2392 VecBinaryOp::Sub8x16 => "vsb",
2393 VecBinaryOp::Sub16x8 => "vsh",
2394 VecBinaryOp::Sub32x4 => "vsf",
2395 VecBinaryOp::Sub64x2 => "vsg",
2396 VecBinaryOp::Sub128 => "vsq",
2397 VecBinaryOp::Mul8x16 => "vmlb",
2398 VecBinaryOp::Mul16x8 => "vmlhw",
2399 VecBinaryOp::Mul32x4 => "vmlf",
2400 VecBinaryOp::UMulHi8x16 => "vmlhb",
2401 VecBinaryOp::UMulHi16x8 => "vmlhh",
2402 VecBinaryOp::UMulHi32x4 => "vmlhf",
2403 VecBinaryOp::SMulHi8x16 => "vmhb",
2404 VecBinaryOp::SMulHi16x8 => "vmhh",
2405 VecBinaryOp::SMulHi32x4 => "vmhf",
2406 VecBinaryOp::UMulEven8x16 => "vmleb",
2407 VecBinaryOp::UMulEven16x8 => "vmleh",
2408 VecBinaryOp::UMulEven32x4 => "vmlef",
2409 VecBinaryOp::SMulEven8x16 => "vmeb",
2410 VecBinaryOp::SMulEven16x8 => "vmeh",
2411 VecBinaryOp::SMulEven32x4 => "vmef",
2412 VecBinaryOp::UMulOdd8x16 => "vmlob",
2413 VecBinaryOp::UMulOdd16x8 => "vmloh",
2414 VecBinaryOp::UMulOdd32x4 => "vmlof",
2415 VecBinaryOp::SMulOdd8x16 => "vmob",
2416 VecBinaryOp::SMulOdd16x8 => "vmoh",
2417 VecBinaryOp::SMulOdd32x4 => "vmof",
2418 VecBinaryOp::UMax8x16 => "vmxlb",
2419 VecBinaryOp::UMax16x8 => "vmxlh",
2420 VecBinaryOp::UMax32x4 => "vmxlf",
2421 VecBinaryOp::UMax64x2 => "vmxlg",
2422 VecBinaryOp::SMax8x16 => "vmxb",
2423 VecBinaryOp::SMax16x8 => "vmxh",
2424 VecBinaryOp::SMax32x4 => "vmxf",
2425 VecBinaryOp::SMax64x2 => "vmxg",
2426 VecBinaryOp::UMin8x16 => "vmnlb",
2427 VecBinaryOp::UMin16x8 => "vmnlh",
2428 VecBinaryOp::UMin32x4 => "vmnlf",
2429 VecBinaryOp::UMin64x2 => "vmnlg",
2430 VecBinaryOp::SMin8x16 => "vmnb",
2431 VecBinaryOp::SMin16x8 => "vmnh",
2432 VecBinaryOp::SMin32x4 => "vmnf",
2433 VecBinaryOp::SMin64x2 => "vmng",
2434 VecBinaryOp::UAvg8x16 => "vavglb",
2435 VecBinaryOp::UAvg16x8 => "vavglh",
2436 VecBinaryOp::UAvg32x4 => "vavglf",
2437 VecBinaryOp::UAvg64x2 => "vavglg",
2438 VecBinaryOp::SAvg8x16 => "vavgb",
2439 VecBinaryOp::SAvg16x8 => "vavgh",
2440 VecBinaryOp::SAvg32x4 => "vavgf",
2441 VecBinaryOp::SAvg64x2 => "vavgg",
2442 VecBinaryOp::And128 => "vn",
2443 VecBinaryOp::Orr128 => "vo",
2444 VecBinaryOp::Xor128 => "vx",
2445 VecBinaryOp::NotAnd128 => "vnn",
2446 VecBinaryOp::NotOrr128 => "vno",
2447 VecBinaryOp::NotXor128 => "vnx",
2448 VecBinaryOp::AndNot128 => "vnc",
2449 VecBinaryOp::OrrNot128 => "voc",
2450 VecBinaryOp::BitPermute128 => "vbperm",
2451 VecBinaryOp::LShLByByte128 => "vslb",
2452 VecBinaryOp::LShRByByte128 => "vsrlb",
2453 VecBinaryOp::AShRByByte128 => "vsrab",
2454 VecBinaryOp::LShLByBit128 => "vsl",
2455 VecBinaryOp::LShRByBit128 => "vsrl",
2456 VecBinaryOp::AShRByBit128 => "vsra",
2457 VecBinaryOp::Pack16x8 => "vpkh",
2458 VecBinaryOp::Pack32x4 => "vpkf",
2459 VecBinaryOp::Pack64x2 => "vpkg",
2460 VecBinaryOp::PackUSat16x8 => "vpklsh",
2461 VecBinaryOp::PackUSat32x4 => "vpklsf",
2462 VecBinaryOp::PackUSat64x2 => "vpklsg",
2463 VecBinaryOp::PackSSat16x8 => "vpksh",
2464 VecBinaryOp::PackSSat32x4 => "vpksf",
2465 VecBinaryOp::PackSSat64x2 => "vpksg",
2466 VecBinaryOp::MergeLow8x16 => "vmrlb",
2467 VecBinaryOp::MergeLow16x8 => "vmrlh",
2468 VecBinaryOp::MergeLow32x4 => "vmrlf",
2469 VecBinaryOp::MergeLow64x2 => "vmrlg",
2470 VecBinaryOp::MergeHigh8x16 => "vmrhb",
2471 VecBinaryOp::MergeHigh16x8 => "vmrhh",
2472 VecBinaryOp::MergeHigh32x4 => "vmrhf",
2473 VecBinaryOp::MergeHigh64x2 => "vmrhg",
2474 };
2475 let rd = pretty_print_reg(rd.to_reg());
2476 let rn = pretty_print_reg(rn);
2477 let rm = pretty_print_reg(rm);
2478 format!("{op} {rd}, {rn}, {rm}")
2479 }
2480 &Inst::VecRR { op, rd, rn } => {
2481 let op = match op {
2482 VecUnaryOp::Abs8x16 => "vlpb",
2483 VecUnaryOp::Abs16x8 => "vlph",
2484 VecUnaryOp::Abs32x4 => "vlpf",
2485 VecUnaryOp::Abs64x2 => "vlpg",
2486 VecUnaryOp::Neg8x16 => "vlcb",
2487 VecUnaryOp::Neg16x8 => "vlch",
2488 VecUnaryOp::Neg32x4 => "vlcf",
2489 VecUnaryOp::Neg64x2 => "vlcg",
2490 VecUnaryOp::Popcnt8x16 => "vpopctb",
2491 VecUnaryOp::Popcnt16x8 => "vpopcth",
2492 VecUnaryOp::Popcnt32x4 => "vpopctf",
2493 VecUnaryOp::Popcnt64x2 => "vpopctg",
2494 VecUnaryOp::Clz8x16 => "vclzb",
2495 VecUnaryOp::Clz16x8 => "vclzh",
2496 VecUnaryOp::Clz32x4 => "vclzf",
2497 VecUnaryOp::Clz64x2 => "vclzg",
2498 VecUnaryOp::Ctz8x16 => "vctzb",
2499 VecUnaryOp::Ctz16x8 => "vctzh",
2500 VecUnaryOp::Ctz32x4 => "vctzf",
2501 VecUnaryOp::Ctz64x2 => "vctzg",
2502 VecUnaryOp::UnpackULow8x16 => "vupllb",
2503 VecUnaryOp::UnpackULow16x8 => "vupllh",
2504 VecUnaryOp::UnpackULow32x4 => "vupllf",
2505 VecUnaryOp::UnpackUHigh8x16 => "vuplhb",
2506 VecUnaryOp::UnpackUHigh16x8 => "vuplhh",
2507 VecUnaryOp::UnpackUHigh32x4 => "vuplhf",
2508 VecUnaryOp::UnpackSLow8x16 => "vuplb",
2509 VecUnaryOp::UnpackSLow16x8 => "vuplh",
2510 VecUnaryOp::UnpackSLow32x4 => "vuplf",
2511 VecUnaryOp::UnpackSHigh8x16 => "vuphb",
2512 VecUnaryOp::UnpackSHigh16x8 => "vuphh",
2513 VecUnaryOp::UnpackSHigh32x4 => "vuphf",
2514 };
2515 let rd = pretty_print_reg(rd.to_reg());
2516 let rn = pretty_print_reg(rn);
2517 format!("{op} {rd}, {rn}")
2518 }
2519 &Inst::VecShiftRR {
2520 shift_op,
2521 rd,
2522 rn,
2523 shift_imm,
2524 shift_reg,
2525 } => {
2526 let op = match shift_op {
2527 VecShiftOp::RotL8x16 => "verllb",
2528 VecShiftOp::RotL16x8 => "verllh",
2529 VecShiftOp::RotL32x4 => "verllf",
2530 VecShiftOp::RotL64x2 => "verllg",
2531 VecShiftOp::LShL8x16 => "veslb",
2532 VecShiftOp::LShL16x8 => "veslh",
2533 VecShiftOp::LShL32x4 => "veslf",
2534 VecShiftOp::LShL64x2 => "veslg",
2535 VecShiftOp::LShR8x16 => "vesrlb",
2536 VecShiftOp::LShR16x8 => "vesrlh",
2537 VecShiftOp::LShR32x4 => "vesrlf",
2538 VecShiftOp::LShR64x2 => "vesrlg",
2539 VecShiftOp::AShR8x16 => "vesrab",
2540 VecShiftOp::AShR16x8 => "vesrah",
2541 VecShiftOp::AShR32x4 => "vesraf",
2542 VecShiftOp::AShR64x2 => "vesrag",
2543 };
2544 let rd = pretty_print_reg(rd.to_reg());
2545 let rn = pretty_print_reg(rn);
2546 let shift_reg = if shift_reg != zero_reg() {
2547 format!("({})", pretty_print_reg(shift_reg))
2548 } else {
2549 "".to_string()
2550 };
2551 format!("{op} {rd}, {rn}, {shift_imm}{shift_reg}")
2552 }
2553 &Inst::VecSelect { rd, rn, rm, ra } => {
2554 let rd = pretty_print_reg(rd.to_reg());
2555 let rn = pretty_print_reg(rn);
2556 let rm = pretty_print_reg(rm);
2557 let ra = pretty_print_reg(ra);
2558 format!("vsel {rd}, {rn}, {rm}, {ra}")
2559 }
2560 &Inst::VecPermute { rd, rn, rm, ra } => {
2561 let rd = pretty_print_reg(rd.to_reg());
2562 let rn = pretty_print_reg(rn);
2563 let rm = pretty_print_reg(rm);
2564 let ra = pretty_print_reg(ra);
2565 format!("vperm {rd}, {rn}, {rm}, {ra}")
2566 }
2567 &Inst::VecPermuteDWImm {
2568 rd,
2569 rn,
2570 rm,
2571 idx1,
2572 idx2,
2573 } => {
2574 let rd = pretty_print_reg(rd.to_reg());
2575 let rn = pretty_print_reg(rn);
2576 let rm = pretty_print_reg(rm);
2577 let m4 = (idx1 & 1) * 4 + (idx2 & 1);
2578 format!("vpdi {rd}, {rn}, {rm}, {m4}")
2579 }
2580 &Inst::VecIntCmp { op, rd, rn, rm } | &Inst::VecIntCmpS { op, rd, rn, rm } => {
2581 let op = match op {
2582 VecIntCmpOp::CmpEq8x16 => "vceqb",
2583 VecIntCmpOp::CmpEq16x8 => "vceqh",
2584 VecIntCmpOp::CmpEq32x4 => "vceqf",
2585 VecIntCmpOp::CmpEq64x2 => "vceqg",
2586 VecIntCmpOp::SCmpHi8x16 => "vchb",
2587 VecIntCmpOp::SCmpHi16x8 => "vchh",
2588 VecIntCmpOp::SCmpHi32x4 => "vchf",
2589 VecIntCmpOp::SCmpHi64x2 => "vchg",
2590 VecIntCmpOp::UCmpHi8x16 => "vchlb",
2591 VecIntCmpOp::UCmpHi16x8 => "vchlh",
2592 VecIntCmpOp::UCmpHi32x4 => "vchlf",
2593 VecIntCmpOp::UCmpHi64x2 => "vchlg",
2594 };
2595 let s = match self {
2596 &Inst::VecIntCmp { .. } => "",
2597 &Inst::VecIntCmpS { .. } => "s",
2598 _ => unreachable!(),
2599 };
2600 let rd = pretty_print_reg(rd.to_reg());
2601 let rn = pretty_print_reg(rn);
2602 let rm = pretty_print_reg(rm);
2603 format!("{op}{s} {rd}, {rn}, {rm}")
2604 }
2605 &Inst::VecFloatCmp { op, rd, rn, rm } | &Inst::VecFloatCmpS { op, rd, rn, rm } => {
2606 let op = match op {
2607 VecFloatCmpOp::CmpEq32x4 => "vfcesb",
2608 VecFloatCmpOp::CmpEq64x2 => "vfcedb",
2609 VecFloatCmpOp::CmpHi32x4 => "vfchsb",
2610 VecFloatCmpOp::CmpHi64x2 => "vfchdb",
2611 VecFloatCmpOp::CmpHiEq32x4 => "vfchesb",
2612 VecFloatCmpOp::CmpHiEq64x2 => "vfchedb",
2613 };
2614 let s = match self {
2615 &Inst::VecFloatCmp { .. } => "",
2616 &Inst::VecFloatCmpS { .. } => "s",
2617 _ => unreachable!(),
2618 };
2619 let rd = pretty_print_reg(rd.to_reg());
2620 let rn = pretty_print_reg(rn);
2621 let rm = pretty_print_reg(rm);
2622 format!("{op}{s} {rd}, {rn}, {rm}")
2623 }
2624 &Inst::VecInt128SCmpHi { tmp, rn, rm } | &Inst::VecInt128UCmpHi { tmp, rn, rm } => {
2625 let op = match self {
2626 &Inst::VecInt128SCmpHi { .. } => "vecg",
2627 &Inst::VecInt128UCmpHi { .. } => "veclg",
2628 _ => unreachable!(),
2629 };
2630 let tmp = pretty_print_reg(tmp.to_reg());
2631 let rn = pretty_print_reg(rn);
2632 let rm = pretty_print_reg(rm);
2633 format!("{op} {rm}, {rn} ; jne 10 ; vchlgs {tmp}, {rn}, {rm}")
2634 }
2635 &Inst::VecLoad { rd, ref mem }
2636 | &Inst::VecLoadRev { rd, ref mem }
2637 | &Inst::VecLoadByte16Rev { rd, ref mem }
2638 | &Inst::VecLoadByte32Rev { rd, ref mem }
2639 | &Inst::VecLoadByte64Rev { rd, ref mem }
2640 | &Inst::VecLoadElt16Rev { rd, ref mem }
2641 | &Inst::VecLoadElt32Rev { rd, ref mem }
2642 | &Inst::VecLoadElt64Rev { rd, ref mem } => {
2643 let opcode = match self {
2644 &Inst::VecLoad { .. } => "vl",
2645 &Inst::VecLoadRev { .. } => "vlbrq",
2646 &Inst::VecLoadByte16Rev { .. } => "vlbrh",
2647 &Inst::VecLoadByte32Rev { .. } => "vlbrf",
2648 &Inst::VecLoadByte64Rev { .. } => "vlbrg",
2649 &Inst::VecLoadElt16Rev { .. } => "vlerh",
2650 &Inst::VecLoadElt32Rev { .. } => "vlerf",
2651 &Inst::VecLoadElt64Rev { .. } => "vlerg",
2652 _ => unreachable!(),
2653 };
2654
2655 let rd = pretty_print_reg(rd.to_reg());
2656 let mem = mem.clone();
2657 let (mem_str, mem) = mem_finalize_for_show(
2658 &mem,
2659 state,
2660 MemInstType {
2661 have_d12: true,
2662 have_d20: false,
2663 have_pcrel: false,
2664 have_unaligned_pcrel: false,
2665 have_index: true,
2666 },
2667 );
2668 let mem = mem.pretty_print_default();
2669 format!("{mem_str}{opcode} {rd}, {mem}")
2670 }
2671 &Inst::VecStore { rd, ref mem }
2672 | &Inst::VecStoreRev { rd, ref mem }
2673 | &Inst::VecStoreByte16Rev { rd, ref mem }
2674 | &Inst::VecStoreByte32Rev { rd, ref mem }
2675 | &Inst::VecStoreByte64Rev { rd, ref mem }
2676 | &Inst::VecStoreElt16Rev { rd, ref mem }
2677 | &Inst::VecStoreElt32Rev { rd, ref mem }
2678 | &Inst::VecStoreElt64Rev { rd, ref mem } => {
2679 let opcode = match self {
2680 &Inst::VecStore { .. } => "vst",
2681 &Inst::VecStoreRev { .. } => "vstbrq",
2682 &Inst::VecStoreByte16Rev { .. } => "vstbrh",
2683 &Inst::VecStoreByte32Rev { .. } => "vstbrf",
2684 &Inst::VecStoreByte64Rev { .. } => "vstbrg",
2685 &Inst::VecStoreElt16Rev { .. } => "vsterh",
2686 &Inst::VecStoreElt32Rev { .. } => "vsterf",
2687 &Inst::VecStoreElt64Rev { .. } => "vsterg",
2688 _ => unreachable!(),
2689 };
2690
2691 let rd = pretty_print_reg(rd);
2692 let mem = mem.clone();
2693 let (mem_str, mem) = mem_finalize_for_show(
2694 &mem,
2695 state,
2696 MemInstType {
2697 have_d12: true,
2698 have_d20: false,
2699 have_pcrel: false,
2700 have_unaligned_pcrel: false,
2701 have_index: true,
2702 },
2703 );
2704 let mem = mem.pretty_print_default();
2705 format!("{mem_str}{opcode} {rd}, {mem}")
2706 }
2707 &Inst::VecLoadReplicate { size, rd, ref mem }
2708 | &Inst::VecLoadReplicateRev { size, rd, ref mem } => {
2709 let opcode = match (self, size) {
2710 (&Inst::VecLoadReplicate { .. }, 8) => "vlrepb",
2711 (&Inst::VecLoadReplicate { .. }, 16) => "vlreph",
2712 (&Inst::VecLoadReplicate { .. }, 32) => "vlrepf",
2713 (&Inst::VecLoadReplicate { .. }, 64) => "vlrepg",
2714 (&Inst::VecLoadReplicateRev { .. }, 16) => "vlbrreph",
2715 (&Inst::VecLoadReplicateRev { .. }, 32) => "vlbrrepf",
2716 (&Inst::VecLoadReplicateRev { .. }, 64) => "vlbrrepg",
2717 _ => unreachable!(),
2718 };
2719
2720 let rd = pretty_print_reg(rd.to_reg());
2721 let mem = mem.clone();
2722 let (mem_str, mem) = mem_finalize_for_show(
2723 &mem,
2724 state,
2725 MemInstType {
2726 have_d12: true,
2727 have_d20: false,
2728 have_pcrel: false,
2729 have_unaligned_pcrel: false,
2730 have_index: true,
2731 },
2732 );
2733 let mem = mem.pretty_print_default();
2734 format!("{mem_str}{opcode} {rd}, {mem}")
2735 }
2736 &Inst::VecMov { rd, rn } => {
2737 let rd = pretty_print_reg(rd.to_reg());
2738 let rn = pretty_print_reg(rn);
2739 format!("vlr {rd}, {rn}")
2740 }
2741 &Inst::VecCMov { rd, cond, ri, rm } => {
2742 let rd = pretty_print_reg_mod(rd, ri);
2743 let rm = pretty_print_reg(rm);
2744 let cond = cond.invert().pretty_print_default();
2745 format!("j{cond} 10 ; vlr {rd}, {rm}")
2746 }
2747 &Inst::MovToVec128 { rd, rn, rm } => {
2748 let rd = pretty_print_reg(rd.to_reg());
2749 let rn = pretty_print_reg(rn);
2750 let rm = pretty_print_reg(rm);
2751 format!("vlvgp {rd}, {rn}, {rm}")
2752 }
2753 &Inst::VecLoadConst { rd, const_data } => {
2754 let rd = pretty_print_reg(rd.to_reg());
2755 let tmp = pretty_print_reg(writable_spilltmp_reg().to_reg());
2756 format!("bras {tmp}, 20 ; data.u128 0x{const_data:032x} ; vl {rd}, 0({tmp})")
2757 }
2758 &Inst::VecLoadConstReplicate {
2759 size,
2760 rd,
2761 const_data,
2762 } => {
2763 let rd = pretty_print_reg(rd.to_reg());
2764 let tmp = pretty_print_reg(writable_spilltmp_reg().to_reg());
2765 let (opcode, data) = match size {
2766 32 => ("vlrepf", format!("0x{:08x}", const_data as u32)),
2767 64 => ("vlrepg", format!("0x{const_data:016x}")),
2768 _ => unreachable!(),
2769 };
2770 format!(
2771 "bras {}, {} ; data.u{} {} ; {} {}, 0({})",
2772 tmp,
2773 4 + size / 8,
2774 size,
2775 data,
2776 opcode,
2777 rd,
2778 tmp
2779 )
2780 }
2781 &Inst::VecImmByteMask { rd, mask } => {
2782 let rd = pretty_print_reg(rd.to_reg());
2783 format!("vgbm {rd}, {mask}")
2784 }
2785 &Inst::VecImmBitMask {
2786 size,
2787 rd,
2788 start_bit,
2789 end_bit,
2790 } => {
2791 let rd = pretty_print_reg(rd.to_reg());
2792 let op = match size {
2793 8 => "vgmb",
2794 16 => "vgmh",
2795 32 => "vgmf",
2796 64 => "vgmg",
2797 _ => unreachable!(),
2798 };
2799 format!("{op} {rd}, {start_bit}, {end_bit}")
2800 }
2801 &Inst::VecImmReplicate { size, rd, imm } => {
2802 let rd = pretty_print_reg(rd.to_reg());
2803 let op = match size {
2804 8 => "vrepib",
2805 16 => "vrepih",
2806 32 => "vrepif",
2807 64 => "vrepig",
2808 _ => unreachable!(),
2809 };
2810 format!("{op} {rd}, {imm}")
2811 }
2812 &Inst::VecLoadLane {
2813 size,
2814 rd,
2815 ref mem,
2816 lane_imm,
2817 ..
2818 }
2819 | &Inst::VecLoadLaneRev {
2820 size,
2821 rd,
2822 ref mem,
2823 lane_imm,
2824 ..
2825 } => {
2826 let opcode_vrx = match (self, size) {
2827 (&Inst::VecLoadLane { .. }, 8) => "vleb",
2828 (&Inst::VecLoadLane { .. }, 16) => "vleh",
2829 (&Inst::VecLoadLane { .. }, 32) => "vlef",
2830 (&Inst::VecLoadLane { .. }, 64) => "vleg",
2831 (&Inst::VecLoadLaneRev { .. }, 16) => "vlebrh",
2832 (&Inst::VecLoadLaneRev { .. }, 32) => "vlebrf",
2833 (&Inst::VecLoadLaneRev { .. }, 64) => "vlebrg",
2834 _ => unreachable!(),
2835 };
2836
2837 let (rd, _) = pretty_print_fpr(rd.to_reg());
2838 let mem = mem.clone();
2839 let (mem_str, mem) = mem_finalize_for_show(
2840 &mem,
2841 state,
2842 MemInstType {
2843 have_d12: true,
2844 have_d20: false,
2845 have_pcrel: false,
2846 have_unaligned_pcrel: false,
2847 have_index: true,
2848 },
2849 );
2850 let mem = mem.pretty_print_default();
2851 format!("{mem_str}{opcode_vrx} {rd}, {mem}, {lane_imm}")
2852 }
2853 &Inst::VecLoadLaneUndef {
2854 size,
2855 rd,
2856 ref mem,
2857 lane_imm,
2858 }
2859 | &Inst::VecLoadLaneRevUndef {
2860 size,
2861 rd,
2862 ref mem,
2863 lane_imm,
2864 } => {
2865 let (opcode_vrx, opcode_rx, opcode_rxy) = match (self, size) {
2866 (&Inst::VecLoadLaneUndef { .. }, 8) => ("vleb", None, None),
2867 (&Inst::VecLoadLaneUndef { .. }, 16) => ("vleh", None, None),
2868 (&Inst::VecLoadLaneUndef { .. }, 32) => ("vlef", Some("le"), Some("ley")),
2869 (&Inst::VecLoadLaneUndef { .. }, 64) => ("vleg", Some("ld"), Some("ldy")),
2870 (&Inst::VecLoadLaneRevUndef { .. }, 16) => ("vlebrh", None, None),
2871 (&Inst::VecLoadLaneRevUndef { .. }, 32) => ("vlebrf", None, None),
2872 (&Inst::VecLoadLaneRevUndef { .. }, 64) => ("vlebrg", None, None),
2873 _ => unreachable!(),
2874 };
2875
2876 let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2877 let mem = mem.clone();
2878 if lane_imm == 0 && rd_fpr.is_some() && opcode_rx.is_some() {
2879 let (mem_str, mem) = mem_finalize_for_show(
2880 &mem,
2881 state,
2882 MemInstType {
2883 have_d12: true,
2884 have_d20: true,
2885 have_pcrel: false,
2886 have_unaligned_pcrel: false,
2887 have_index: true,
2888 },
2889 );
2890 let op = match &mem {
2891 &MemArg::BXD12 { .. } => opcode_rx,
2892 &MemArg::BXD20 { .. } => opcode_rxy,
2893 _ => unreachable!(),
2894 };
2895 let mem = mem.pretty_print_default();
2896 format!("{}{} {}, {}", mem_str, op.unwrap(), rd_fpr.unwrap(), mem)
2897 } else {
2898 let (mem_str, mem) = mem_finalize_for_show(
2899 &mem,
2900 state,
2901 MemInstType {
2902 have_d12: true,
2903 have_d20: false,
2904 have_pcrel: false,
2905 have_unaligned_pcrel: false,
2906 have_index: true,
2907 },
2908 );
2909 let mem = mem.pretty_print_default();
2910 format!("{mem_str}{opcode_vrx} {rd}, {mem}, {lane_imm}")
2911 }
2912 }
2913 &Inst::VecStoreLane {
2914 size,
2915 rd,
2916 ref mem,
2917 lane_imm,
2918 }
2919 | &Inst::VecStoreLaneRev {
2920 size,
2921 rd,
2922 ref mem,
2923 lane_imm,
2924 } => {
2925 let (opcode_vrx, opcode_rx, opcode_rxy) = match (self, size) {
2926 (&Inst::VecStoreLane { .. }, 8) => ("vsteb", None, None),
2927 (&Inst::VecStoreLane { .. }, 16) => ("vsteh", None, None),
2928 (&Inst::VecStoreLane { .. }, 32) => ("vstef", Some("ste"), Some("stey")),
2929 (&Inst::VecStoreLane { .. }, 64) => ("vsteg", Some("std"), Some("stdy")),
2930 (&Inst::VecStoreLaneRev { .. }, 16) => ("vstebrh", None, None),
2931 (&Inst::VecStoreLaneRev { .. }, 32) => ("vstebrf", None, None),
2932 (&Inst::VecStoreLaneRev { .. }, 64) => ("vstebrg", None, None),
2933 _ => unreachable!(),
2934 };
2935
2936 let (rd, rd_fpr) = pretty_print_fpr(rd);
2937 let mem = mem.clone();
2938 if lane_imm == 0 && rd_fpr.is_some() && opcode_rx.is_some() {
2939 let (mem_str, mem) = mem_finalize_for_show(
2940 &mem,
2941 state,
2942 MemInstType {
2943 have_d12: true,
2944 have_d20: true,
2945 have_pcrel: false,
2946 have_unaligned_pcrel: false,
2947 have_index: true,
2948 },
2949 );
2950 let op = match &mem {
2951 &MemArg::BXD12 { .. } => opcode_rx,
2952 &MemArg::BXD20 { .. } => opcode_rxy,
2953 _ => unreachable!(),
2954 };
2955 let mem = mem.pretty_print_default();
2956 format!("{}{} {}, {}", mem_str, op.unwrap(), rd_fpr.unwrap(), mem)
2957 } else {
2958 let (mem_str, mem) = mem_finalize_for_show(
2959 &mem,
2960 state,
2961 MemInstType {
2962 have_d12: true,
2963 have_d20: false,
2964 have_pcrel: false,
2965 have_unaligned_pcrel: false,
2966 have_index: true,
2967 },
2968 );
2969 let mem = mem.pretty_print_default();
2970 format!("{mem_str}{opcode_vrx} {rd}, {mem}, {lane_imm}",)
2971 }
2972 }
2973 &Inst::VecInsertLane {
2974 size,
2975 rd,
2976 ri,
2977 rn,
2978 lane_imm,
2979 lane_reg,
2980 } => {
2981 let op = match size {
2982 8 => "vlvgb",
2983 16 => "vlvgh",
2984 32 => "vlvgf",
2985 64 => "vlvgg",
2986 _ => unreachable!(),
2987 };
2988 let rd = pretty_print_reg_mod(rd, ri);
2989 let rn = pretty_print_reg(rn);
2990 let lane_reg = if lane_reg != zero_reg() {
2991 format!("({})", pretty_print_reg(lane_reg))
2992 } else {
2993 "".to_string()
2994 };
2995 format!("{op} {rd}, {rn}, {lane_imm}{lane_reg}")
2996 }
2997 &Inst::VecInsertLaneUndef {
2998 size,
2999 rd,
3000 rn,
3001 lane_imm,
3002 lane_reg,
3003 } => {
3004 let (opcode_vrs, opcode_rre) = match size {
3005 8 => ("vlvgb", None),
3006 16 => ("vlvgh", None),
3007 32 => ("vlvgf", None),
3008 64 => ("vlvgg", Some("ldgr")),
3009 _ => unreachable!(),
3010 };
3011 let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
3012 let rn = pretty_print_reg(rn);
3013 let lane_reg = if lane_reg != zero_reg() {
3014 format!("({})", pretty_print_reg(lane_reg))
3015 } else {
3016 "".to_string()
3017 };
3018 if opcode_rre.is_some() && lane_imm == 0 && lane_reg.is_empty() && rd_fpr.is_some()
3019 {
3020 format!("{} {}, {}", opcode_rre.unwrap(), rd_fpr.unwrap(), rn)
3021 } else {
3022 format!("{opcode_vrs} {rd}, {rn}, {lane_imm}{lane_reg}")
3023 }
3024 }
3025 &Inst::VecExtractLane {
3026 size,
3027 rd,
3028 rn,
3029 lane_imm,
3030 lane_reg,
3031 } => {
3032 let (opcode_vrs, opcode_rre) = match size {
3033 8 => ("vlgvb", None),
3034 16 => ("vlgvh", None),
3035 32 => ("vlgvf", None),
3036 64 => ("vlgvg", Some("lgdr")),
3037 _ => unreachable!(),
3038 };
3039 let rd = pretty_print_reg(rd.to_reg());
3040 let (rn, rn_fpr) = pretty_print_fpr(rn);
3041 let lane_reg = if lane_reg != zero_reg() {
3042 format!("({})", pretty_print_reg(lane_reg))
3043 } else {
3044 "".to_string()
3045 };
3046 if opcode_rre.is_some() && lane_imm == 0 && lane_reg.is_empty() && rn_fpr.is_some()
3047 {
3048 format!("{} {}, {}", opcode_rre.unwrap(), rd, rn_fpr.unwrap())
3049 } else {
3050 format!("{opcode_vrs} {rd}, {rn}, {lane_imm}{lane_reg}")
3051 }
3052 }
3053 &Inst::VecInsertLaneImm {
3054 size,
3055 rd,
3056 ri,
3057 imm,
3058 lane_imm,
3059 } => {
3060 let op = match size {
3061 8 => "vleib",
3062 16 => "vleih",
3063 32 => "vleif",
3064 64 => "vleig",
3065 _ => unreachable!(),
3066 };
3067 let rd = pretty_print_reg_mod(rd, ri);
3068 format!("{op} {rd}, {imm}, {lane_imm}")
3069 }
3070 &Inst::VecReplicateLane {
3071 size,
3072 rd,
3073 rn,
3074 lane_imm,
3075 } => {
3076 let op = match size {
3077 8 => "vrepb",
3078 16 => "vreph",
3079 32 => "vrepf",
3080 64 => "vrepg",
3081 _ => unreachable!(),
3082 };
3083 let rd = pretty_print_reg(rd.to_reg());
3084 let rn = pretty_print_reg(rn);
3085 format!("{op} {rd}, {rn}, {lane_imm}")
3086 }
3087 &Inst::Extend {
3088 rd,
3089 rn,
3090 signed,
3091 from_bits,
3092 to_bits,
3093 } => {
3094 let rd = pretty_print_reg(rd.to_reg());
3095 let rn = pretty_print_reg(rn);
3096 let op = match (signed, from_bits, to_bits) {
3097 (_, 1, 32) => "llcr",
3098 (_, 1, 64) => "llgcr",
3099 (false, 8, 32) => "llcr",
3100 (false, 8, 64) => "llgcr",
3101 (true, 8, 32) => "lbr",
3102 (true, 8, 64) => "lgbr",
3103 (false, 16, 32) => "llhr",
3104 (false, 16, 64) => "llghr",
3105 (true, 16, 32) => "lhr",
3106 (true, 16, 64) => "lghr",
3107 (false, 32, 64) => "llgfr",
3108 (true, 32, 64) => "lgfr",
3109 _ => panic!("Unsupported Extend case: {self:?}"),
3110 };
3111 format!("{op} {rd}, {rn}")
3112 }
3113 &Inst::AllocateArgs { size } => {
3114 if let Ok(size) = i16::try_from(size) {
3115 format!("aghi {}, {}", show_reg(stack_reg()), -size)
3116 } else {
3117 format!("slgfi {}, {}", show_reg(stack_reg()), size)
3118 }
3119 }
3120 &Inst::Call { link, ref info } => {
3121 let link = link.to_reg();
3122 let callee_pop_size = if info.callee_pop_size > 0 {
3123 format!(" ; callee_pop_size {}", info.callee_pop_size)
3124 } else {
3125 "".to_string()
3126 };
3127 format!(
3128 "brasl {}, {}{}",
3129 show_reg(link),
3130 info.dest.display(None),
3131 callee_pop_size
3132 )
3133 }
3134 &Inst::CallInd { link, ref info, .. } => {
3135 let link = link.to_reg();
3136 let rn = pretty_print_reg(info.dest);
3137 let callee_pop_size = if info.callee_pop_size > 0 {
3138 format!(" ; callee_pop_size {}", info.callee_pop_size)
3139 } else {
3140 "".to_string()
3141 };
3142 format!("basr {}, {}{}", show_reg(link), rn, callee_pop_size)
3143 }
3144 &Inst::ReturnCall { ref info } => {
3145 let callee_pop_size = if info.callee_pop_size > 0 {
3146 format!(" ; callee_pop_size {}", info.callee_pop_size)
3147 } else {
3148 "".to_string()
3149 };
3150 format!("return_call {}{}", info.dest.display(None), callee_pop_size)
3151 }
3152 &Inst::ReturnCallInd { ref info } => {
3153 let rn = pretty_print_reg(info.dest);
3154 let callee_pop_size = if info.callee_pop_size > 0 {
3155 format!(" ; callee_pop_size {}", info.callee_pop_size)
3156 } else {
3157 "".to_string()
3158 };
3159 format!("return_call_ind {rn}{callee_pop_size}")
3160 }
3161 &Inst::ElfTlsGetOffset { ref symbol, .. } => {
3162 let dest = match &**symbol {
3163 SymbolReloc::TlsGd { name } => {
3164 format!("tls_gdcall:{}", name.display(None))
3165 }
3166 _ => unreachable!(),
3167 };
3168 format!("brasl {}, {}", show_reg(gpr(14)), dest)
3169 }
3170 &Inst::Args { ref args } => {
3171 let mut s = "args".to_string();
3172 for arg in args {
3173 let preg = pretty_print_reg(arg.preg);
3174 let def = pretty_print_reg(arg.vreg.to_reg());
3175 write!(&mut s, " {def}={preg}").unwrap();
3176 }
3177 s
3178 }
3179 &Inst::Rets { ref rets } => {
3180 let mut s = "rets".to_string();
3181 for ret in rets {
3182 let preg = pretty_print_reg(ret.preg);
3183 let vreg = pretty_print_reg(ret.vreg);
3184 write!(&mut s, " {vreg}={preg}").unwrap();
3185 }
3186 s
3187 }
3188 &Inst::Ret { link } => {
3189 let link = show_reg(link);
3190 format!("br {link}")
3191 }
3192 &Inst::Jump { dest } => {
3193 let dest = dest.to_string();
3194 format!("jg {dest}")
3195 }
3196 &Inst::IndirectBr { rn, .. } => {
3197 let rn = pretty_print_reg(rn);
3198 format!("br {rn}")
3199 }
3200 &Inst::CondBr {
3201 taken,
3202 not_taken,
3203 cond,
3204 } => {
3205 let taken = taken.to_string();
3206 let not_taken = not_taken.to_string();
3207 let cond = cond.pretty_print_default();
3208 format!("jg{cond} {taken} ; jg {not_taken}")
3209 }
3210 &Inst::Debugtrap => ".word 0x0001 # debugtrap".to_string(),
3211 &Inst::Trap { trap_code } => {
3212 format!(".word 0x0000 # trap={trap_code}")
3213 }
3214 &Inst::TrapIf { cond, trap_code } => {
3215 let cond = cond.pretty_print_default();
3216 format!("jg{cond} .+2 # trap={trap_code}")
3217 }
3218 &Inst::JTSequence {
3219 ridx,
3220 default,
3221 default_cond,
3222 ref targets,
3223 } => {
3224 let ridx = pretty_print_reg(ridx);
3225 let rtmp = pretty_print_reg(writable_spilltmp_reg().to_reg());
3226 let jt_entries: String = targets
3227 .iter()
3228 .map(|label| format!(" {}", label.to_string()))
3229 .collect();
3230 format!(
3231 concat!(
3232 "jg{} {} ; ",
3233 "larl {}, 14 ; ",
3234 "agf {}, 0({}, {}) ; ",
3235 "br {} ; ",
3236 "jt_entries{}"
3237 ),
3238 default_cond.pretty_print_default(),
3239 default.to_string(),
3240 rtmp,
3241 rtmp,
3242 rtmp,
3243 ridx,
3244 rtmp,
3245 jt_entries,
3246 )
3247 }
3248 &Inst::LoadSymbolReloc {
3249 rd,
3250 ref symbol_reloc,
3251 } => {
3252 let rd = pretty_print_reg(rd.to_reg());
3253 let tmp = pretty_print_reg(writable_spilltmp_reg().to_reg());
3254 let symbol = match &**symbol_reloc {
3255 SymbolReloc::Absolute { name, offset } => {
3256 format!("{} + {}", name.display(None), offset)
3257 }
3258 SymbolReloc::TlsGd { name } => format!("{}@tlsgd", name.display(None)),
3259 };
3260 format!("bras {tmp}, 12 ; data {symbol} ; lg {rd}, 0({tmp})")
3261 }
3262 &Inst::LoadAddr { rd, ref mem } => {
3263 let rd = pretty_print_reg(rd.to_reg());
3264 let mem = mem.clone();
3265 let (mem_str, mem) = mem_finalize_for_show(
3266 &mem,
3267 state,
3268 MemInstType {
3269 have_d12: true,
3270 have_d20: true,
3271 have_pcrel: true,
3272 have_unaligned_pcrel: true,
3273 have_index: true,
3274 },
3275 );
3276 let op = match &mem {
3277 &MemArg::BXD12 { .. } => "la",
3278 &MemArg::BXD20 { .. } => "lay",
3279 &MemArg::Label { .. } | &MemArg::Symbol { .. } => "larl",
3280 _ => unreachable!(),
3281 };
3282 let mem = mem.pretty_print_default();
3283
3284 format!("{mem_str}{op} {rd}, {mem}")
3285 }
3286 &Inst::StackProbeLoop {
3287 probe_count,
3288 guard_size,
3289 } => {
3290 let probe_count = pretty_print_reg(probe_count.to_reg());
3291 let stack_reg = pretty_print_reg(stack_reg());
3292 format!("0: aghi {stack_reg}, -{guard_size} ; mvi 0({stack_reg}), 0 ; brct {probe_count}, 0b")
3293 }
3294 &Inst::Loop { ref body, cond } => {
3295 let body = body
3296 .into_iter()
3297 .map(|inst| inst.print_with_state(state))
3298 .collect::<Vec<_>>()
3299 .join(" ; ");
3300 let cond = cond.pretty_print_default();
3301 format!("0: {body} ; jg{cond} 0b ; 1:")
3302 }
3303 &Inst::CondBreak { cond } => {
3304 let cond = cond.pretty_print_default();
3305 format!("jg{cond} 1f")
3306 }
3307 &Inst::Unwind { ref inst } => {
3308 format!("unwind {inst:?}")
3309 }
3310 &Inst::DummyUse { reg } => {
3311 let reg = pretty_print_reg(reg);
3312 format!("dummy_use {reg}")
3313 }
3314 }
3315 }
3316}
3317
3318#[derive(Clone, Copy, Debug, PartialEq, Eq)]
3323pub enum LabelUse {
3324 #[allow(dead_code)]
3325 BranchRI,
3327 BranchRIL,
3329 PCRel32,
3332 PCRel32Dbl,
3335}
3336
3337impl MachInstLabelUse for LabelUse {
3338 const ALIGN: CodeOffset = 2;
3340
3341 fn max_pos_range(self) -> CodeOffset {
3343 match self {
3344 LabelUse::BranchRI => ((1 << 15) - 1) << 1,
3346 LabelUse::BranchRIL => 0xffff_fffe,
3348 LabelUse::PCRel32 => 0x7fff_ffff,
3350 LabelUse::PCRel32Dbl => 0xffff_fffc,
3352 }
3353 }
3354
3355 fn max_neg_range(self) -> CodeOffset {
3357 match self {
3358 LabelUse::BranchRI => (1 << 15) << 1,
3360 LabelUse::BranchRIL => 0xffff_ffff,
3363 LabelUse::PCRel32 => 0x8000_0000,
3365 LabelUse::PCRel32Dbl => 0xffff_ffff,
3368 }
3369 }
3370
3371 fn patch_size(self) -> CodeOffset {
3373 match self {
3374 LabelUse::BranchRI => 4,
3375 LabelUse::BranchRIL => 6,
3376 LabelUse::PCRel32 => 4,
3377 LabelUse::PCRel32Dbl => 4,
3378 }
3379 }
3380
3381 fn patch(self, buffer: &mut [u8], use_offset: CodeOffset, label_offset: CodeOffset) {
3383 let pc_rel = (label_offset as i64) - (use_offset as i64);
3384 debug_assert!(pc_rel <= self.max_pos_range() as i64);
3385 debug_assert!(pc_rel >= -(self.max_neg_range() as i64));
3386 debug_assert!(pc_rel & 1 == 0);
3387 let pc_rel_shifted = pc_rel >> 1;
3388
3389 match self {
3390 LabelUse::BranchRI => {
3391 buffer[2..4].clone_from_slice(&u16::to_be_bytes(pc_rel_shifted as u16));
3392 }
3393 LabelUse::BranchRIL => {
3394 buffer[2..6].clone_from_slice(&u32::to_be_bytes(pc_rel_shifted as u32));
3395 }
3396 LabelUse::PCRel32 => {
3397 let insn_word = u32::from_be_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
3398 let insn_word = insn_word.wrapping_add(pc_rel as u32);
3399 buffer[0..4].clone_from_slice(&u32::to_be_bytes(insn_word));
3400 }
3401 LabelUse::PCRel32Dbl => {
3402 let insn_word = u32::from_be_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
3403 let insn_word = insn_word.wrapping_add((pc_rel_shifted + 1) as u32);
3404 buffer[0..4].clone_from_slice(&u32::to_be_bytes(insn_word));
3405 }
3406 }
3407 }
3408
3409 fn supports_veneer(self) -> bool {
3411 false
3412 }
3413
3414 fn veneer_size(self) -> CodeOffset {
3416 0
3417 }
3418
3419 fn worst_case_veneer_size() -> CodeOffset {
3420 0
3421 }
3422
3423 fn generate_veneer(
3426 self,
3427 _buffer: &mut [u8],
3428 _veneer_offset: CodeOffset,
3429 ) -> (CodeOffset, LabelUse) {
3430 unreachable!();
3431 }
3432
3433 fn from_reloc(reloc: Reloc, addend: Addend) -> Option<Self> {
3434 match (reloc, addend) {
3435 (Reloc::S390xPCRel32Dbl, 2) => Some(LabelUse::PCRel32Dbl),
3436 (Reloc::S390xPLTRel32Dbl, 2) => Some(LabelUse::PCRel32Dbl),
3437 _ => None,
3438 }
3439 }
3440}