1pub use emit_state::EmitState;
4
5use crate::binemit::{Addend, CodeOffset, Reloc};
6use crate::ir::{types, ExternalName, LibCall, TrapCode, Type};
7use crate::isa::x64::abi::X64ABIMachineSpec;
8use crate::isa::x64::inst::regs::{pretty_print_reg, show_ireg_sized};
9use crate::isa::x64::settings as x64_settings;
10use crate::isa::{CallConv, FunctionAlignment};
11use crate::{machinst::*, trace};
12use crate::{settings, CodegenError, CodegenResult};
13use alloc::boxed::Box;
14use alloc::vec::Vec;
15use smallvec::{smallvec, SmallVec};
16use std::fmt::{self, Write};
17use std::string::{String, ToString};
18
19pub mod args;
20mod emit;
21mod emit_state;
22#[cfg(test)]
23mod emit_tests;
24pub mod external;
25pub mod regs;
26mod stack_switch;
27pub mod unwind;
28
29use args::*;
30
31pub use super::lower::isle::generated_code::AtomicRmwSeqOp;
36pub use super::lower::isle::generated_code::MInst as Inst;
37
38#[derive(Clone, Debug)]
40pub struct ReturnCallInfo<T> {
41 pub dest: T,
43
44 pub new_stack_arg_size: u32,
47
48 pub uses: CallArgList,
50
51 pub tmp: WritableGpr,
53}
54
55#[test]
56#[cfg(target_pointer_width = "64")]
57fn inst_size_test() {
58 assert_eq!(56, std::mem::size_of::<Inst>());
61}
62
63pub(crate) fn low32_will_sign_extend_to_64(x: u64) -> bool {
64 let xs = x as i64;
65 xs == ((xs << 32) >> 32)
66}
67
68impl Inst {
69 fn available_in_any_isa(&self) -> SmallVec<[InstructionSet; 2]> {
74 match self {
75 Inst::AluRmiR { .. }
78 | Inst::AtomicRmwSeq { .. }
79 | Inst::Bswap { .. }
80 | Inst::CallKnown { .. }
81 | Inst::CallUnknown { .. }
82 | Inst::ReturnCallKnown { .. }
83 | Inst::ReturnCallUnknown { .. }
84 | Inst::CheckedSRemSeq { .. }
85 | Inst::CheckedSRemSeq8 { .. }
86 | Inst::Cmove { .. }
87 | Inst::CmpRmiR { .. }
88 | Inst::CvtFloatToSintSeq { .. }
89 | Inst::CvtFloatToUintSeq { .. }
90 | Inst::CvtUint64ToFloatSeq { .. }
91 | Inst::Div { .. }
92 | Inst::Div8 { .. }
93 | Inst::Fence { .. }
94 | Inst::Hlt
95 | Inst::Imm { .. }
96 | Inst::JmpCond { .. }
97 | Inst::JmpCondOr { .. }
98 | Inst::WinchJmpIf { .. }
99 | Inst::JmpKnown { .. }
100 | Inst::JmpTableSeq { .. }
101 | Inst::JmpUnknown { .. }
102 | Inst::LoadEffectiveAddress { .. }
103 | Inst::LoadExtName { .. }
104 | Inst::LockCmpxchg { .. }
105 | Inst::LockXadd { .. }
106 | Inst::Xchg { .. }
107 | Inst::Mov64MR { .. }
108 | Inst::MovImmM { .. }
109 | Inst::MovRM { .. }
110 | Inst::MovRR { .. }
111 | Inst::MovFromPReg { .. }
112 | Inst::MovToPReg { .. }
113 | Inst::MovsxRmR { .. }
114 | Inst::MovzxRmR { .. }
115 | Inst::Mul { .. }
116 | Inst::Mul8 { .. }
117 | Inst::IMul { .. }
118 | Inst::IMulImm { .. }
119 | Inst::Neg { .. }
120 | Inst::Not { .. }
121 | Inst::Nop { .. }
122 | Inst::Pop64 { .. }
123 | Inst::Push64 { .. }
124 | Inst::StackProbeLoop { .. }
125 | Inst::Args { .. }
126 | Inst::Rets { .. }
127 | Inst::Ret { .. }
128 | Inst::Setcc { .. }
129 | Inst::ShiftR { .. }
130 | Inst::SignExtendData { .. }
131 | Inst::StackSwitchBasic { .. }
132 | Inst::TrapIf { .. }
133 | Inst::TrapIfAnd { .. }
134 | Inst::TrapIfOr { .. }
135 | Inst::Ud2 { .. }
136 | Inst::XmmCmove { .. }
137 | Inst::XmmCmpRmR { .. }
138 | Inst::XmmMinMaxSeq { .. }
139 | Inst::XmmUninitializedValue { .. }
140 | Inst::ElfTlsGetAddr { .. }
141 | Inst::MachOTlsGetAddr { .. }
142 | Inst::CoffTlsGetAddr { .. }
143 | Inst::Unwind { .. }
144 | Inst::DummyUse { .. }
145 | Inst::AluConstOp { .. } => smallvec![],
146
147 Inst::LockCmpxchg16b { .. }
148 | Inst::Atomic128RmwSeq { .. }
149 | Inst::Atomic128XchgSeq { .. } => smallvec![InstructionSet::CMPXCHG16b],
150
151 Inst::AluRmRVex { op, .. } => op.available_from(),
152 Inst::UnaryRmR { op, .. } => op.available_from(),
153 Inst::UnaryRmRVex { op, .. } => op.available_from(),
154 Inst::UnaryRmRImmVex { op, .. } => op.available_from(),
155
156 Inst::GprToXmm { op, .. }
158 | Inst::XmmMovRM { op, .. }
159 | Inst::XmmMovRMImm { op, .. }
160 | Inst::XmmRmiReg { opcode: op, .. }
161 | Inst::XmmRmR { op, .. }
162 | Inst::XmmRmRUnaligned { op, .. }
163 | Inst::XmmRmRBlend { op, .. }
164 | Inst::XmmRmRImm { op, .. }
165 | Inst::XmmToGpr { op, .. }
166 | Inst::XmmToGprImm { op, .. }
167 | Inst::XmmUnaryRmRImm { op, .. }
168 | Inst::XmmUnaryRmRUnaligned { op, .. }
169 | Inst::XmmUnaryRmR { op, .. }
170 | Inst::CvtIntToFloat { op, .. } => smallvec![op.available_from()],
171
172 Inst::XmmUnaryRmREvex { op, .. }
173 | Inst::XmmRmREvex { op, .. }
174 | Inst::XmmRmREvex3 { op, .. }
175 | Inst::XmmUnaryRmRImmEvex { op, .. } => op.available_from(),
176
177 Inst::XmmRmiRVex { op, .. }
178 | Inst::XmmRmRVex3 { op, .. }
179 | Inst::XmmRmRImmVex { op, .. }
180 | Inst::XmmRmRBlendVex { op, .. }
181 | Inst::XmmVexPinsr { op, .. }
182 | Inst::XmmUnaryRmRVex { op, .. }
183 | Inst::XmmUnaryRmRImmVex { op, .. }
184 | Inst::XmmMovRMVex { op, .. }
185 | Inst::XmmMovRMImmVex { op, .. }
186 | Inst::XmmToGprImmVex { op, .. }
187 | Inst::XmmToGprVex { op, .. }
188 | Inst::GprToXmmVex { op, .. }
189 | Inst::CvtIntToFloatVex { op, .. }
190 | Inst::XmmCmpRmRVex { op, .. } => op.available_from(),
191
192 Inst::MulX { .. } => smallvec![InstructionSet::BMI2],
193
194 Inst::External { inst } => {
195 use cranelift_assembler_x64::Feature::*;
196 let mut features = smallvec![];
197 for f in inst.features() {
198 match f {
199 _64b | compat => {}
200 sse => features.push(InstructionSet::SSE),
201 }
202 }
203 features
204 }
205 }
206 }
207}
208
209impl Inst {
212 pub(crate) fn nop(len: u8) -> Self {
213 debug_assert!(len <= 15);
214 Self::Nop { len }
215 }
216
217 pub(crate) fn alu_rmi_r(
218 size: OperandSize,
219 op: AluRmiROpcode,
220 src: RegMemImm,
221 dst: Writable<Reg>,
222 ) -> Self {
223 src.assert_regclass_is(RegClass::Int);
224 debug_assert!(dst.to_reg().class() == RegClass::Int);
225 Self::AluRmiR {
226 size,
227 op,
228 src1: Gpr::unwrap_new(dst.to_reg()),
229 src2: GprMemImm::unwrap_new(src),
230 dst: WritableGpr::from_writable_reg(dst).unwrap(),
231 }
232 }
233
234 #[allow(dead_code)]
235 pub(crate) fn unary_rm_r(
236 size: OperandSize,
237 op: UnaryRmROpcode,
238 src: RegMem,
239 dst: Writable<Reg>,
240 ) -> Self {
241 src.assert_regclass_is(RegClass::Int);
242 debug_assert!(dst.to_reg().class() == RegClass::Int);
243 debug_assert!(size.is_one_of(&[
244 OperandSize::Size16,
245 OperandSize::Size32,
246 OperandSize::Size64
247 ]));
248 Self::UnaryRmR {
249 size,
250 op,
251 src: GprMem::unwrap_new(src),
252 dst: WritableGpr::from_writable_reg(dst).unwrap(),
253 }
254 }
255
256 pub(crate) fn not(size: OperandSize, src: Writable<Reg>) -> Inst {
257 debug_assert_eq!(src.to_reg().class(), RegClass::Int);
258 Inst::Not {
259 size,
260 src: Gpr::unwrap_new(src.to_reg()),
261 dst: WritableGpr::from_writable_reg(src).unwrap(),
262 }
263 }
264
265 pub(crate) fn div(
266 size: OperandSize,
267 sign: DivSignedness,
268 trap: TrapCode,
269 divisor: RegMem,
270 dividend_lo: Gpr,
271 dividend_hi: Gpr,
272 dst_quotient: WritableGpr,
273 dst_remainder: WritableGpr,
274 ) -> Inst {
275 divisor.assert_regclass_is(RegClass::Int);
276 Inst::Div {
277 size,
278 sign,
279 trap,
280 divisor: GprMem::unwrap_new(divisor),
281 dividend_lo,
282 dividend_hi,
283 dst_quotient,
284 dst_remainder,
285 }
286 }
287
288 pub(crate) fn div8(
289 sign: DivSignedness,
290 trap: TrapCode,
291 divisor: RegMem,
292 dividend: Gpr,
293 dst: WritableGpr,
294 ) -> Inst {
295 divisor.assert_regclass_is(RegClass::Int);
296 Inst::Div8 {
297 sign,
298 trap,
299 divisor: GprMem::unwrap_new(divisor),
300 dividend,
301 dst,
302 }
303 }
304
305 pub(crate) fn imm(dst_size: OperandSize, simm64: u64, dst: Writable<Reg>) -> Inst {
306 debug_assert!(dst_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
307 debug_assert!(dst.to_reg().class() == RegClass::Int);
308 let dst_size = match dst_size {
311 OperandSize::Size64 if simm64 > u32::max_value() as u64 => OperandSize::Size64,
312 _ => OperandSize::Size32,
313 };
314 Inst::Imm {
315 dst_size,
316 simm64,
317 dst: WritableGpr::from_writable_reg(dst).unwrap(),
318 }
319 }
320
321 pub(crate) fn mov_r_r(size: OperandSize, src: Reg, dst: Writable<Reg>) -> Inst {
322 debug_assert!(size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
323 debug_assert!(src.class() == RegClass::Int);
324 debug_assert!(dst.to_reg().class() == RegClass::Int);
325 let src = Gpr::unwrap_new(src);
326 let dst = WritableGpr::from_writable_reg(dst).unwrap();
327 Inst::MovRR { size, src, dst }
328 }
329
330 pub(crate) fn xmm_unary_rm_r(op: SseOpcode, src: RegMem, dst: Writable<Reg>) -> Inst {
332 src.assert_regclass_is(RegClass::Float);
333 debug_assert!(dst.to_reg().class() == RegClass::Float);
334 Inst::XmmUnaryRmR {
335 op,
336 src: XmmMemAligned::unwrap_new(src),
337 dst: WritableXmm::from_writable_reg(dst).unwrap(),
338 }
339 }
340
341 pub(crate) fn xmm_rm_r(op: SseOpcode, src: RegMem, dst: Writable<Reg>) -> Self {
342 src.assert_regclass_is(RegClass::Float);
343 debug_assert!(dst.to_reg().class() == RegClass::Float);
344 Inst::XmmRmR {
345 op,
346 src1: Xmm::unwrap_new(dst.to_reg()),
347 src2: XmmMemAligned::unwrap_new(src),
348 dst: WritableXmm::from_writable_reg(dst).unwrap(),
349 }
350 }
351
352 #[cfg(test)]
353 pub(crate) fn xmm_rmr_vex3(op: AvxOpcode, src3: RegMem, src2: Reg, dst: Writable<Reg>) -> Self {
354 src3.assert_regclass_is(RegClass::Float);
355 debug_assert!(src2.class() == RegClass::Float);
356 debug_assert!(dst.to_reg().class() == RegClass::Float);
357 Inst::XmmRmRVex3 {
358 op,
359 src3: XmmMem::unwrap_new(src3),
360 src2: Xmm::unwrap_new(src2),
361 src1: Xmm::unwrap_new(dst.to_reg()),
362 dst: WritableXmm::from_writable_reg(dst).unwrap(),
363 }
364 }
365
366 pub(crate) fn xmm_mov_r_m(op: SseOpcode, src: Reg, dst: impl Into<SyntheticAmode>) -> Inst {
367 debug_assert!(src.class() == RegClass::Float);
368 Inst::XmmMovRM {
369 op,
370 src: Xmm::unwrap_new(src),
371 dst: dst.into(),
372 }
373 }
374
375 pub(crate) fn xmm_to_gpr(
376 op: SseOpcode,
377 src: Reg,
378 dst: Writable<Reg>,
379 dst_size: OperandSize,
380 ) -> Inst {
381 debug_assert!(src.class() == RegClass::Float);
382 debug_assert!(dst.to_reg().class() == RegClass::Int);
383 debug_assert!(dst_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
384 Inst::XmmToGpr {
385 op,
386 src: Xmm::unwrap_new(src),
387 dst: WritableGpr::from_writable_reg(dst).unwrap(),
388 dst_size,
389 }
390 }
391
392 pub(crate) fn gpr_to_xmm(
393 op: SseOpcode,
394 src: RegMem,
395 src_size: OperandSize,
396 dst: Writable<Reg>,
397 ) -> Inst {
398 src.assert_regclass_is(RegClass::Int);
399 debug_assert!(src_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
400 debug_assert!(dst.to_reg().class() == RegClass::Float);
401 Inst::GprToXmm {
402 op,
403 src: GprMem::unwrap_new(src),
404 dst: WritableXmm::from_writable_reg(dst).unwrap(),
405 src_size,
406 }
407 }
408
409 pub(crate) fn xmm_cmp_rm_r(op: SseOpcode, src1: Reg, src2: RegMem) -> Inst {
410 src2.assert_regclass_is(RegClass::Float);
411 debug_assert!(src1.class() == RegClass::Float);
412 let src2 = XmmMemAligned::unwrap_new(src2);
413 let src1 = Xmm::unwrap_new(src1);
414 Inst::XmmCmpRmR { op, src1, src2 }
415 }
416
417 #[allow(dead_code)]
418 pub(crate) fn xmm_min_max_seq(
419 size: OperandSize,
420 is_min: bool,
421 lhs: Reg,
422 rhs: Reg,
423 dst: Writable<Reg>,
424 ) -> Inst {
425 debug_assert!(size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
426 debug_assert_eq!(lhs.class(), RegClass::Float);
427 debug_assert_eq!(rhs.class(), RegClass::Float);
428 debug_assert_eq!(dst.to_reg().class(), RegClass::Float);
429 Inst::XmmMinMaxSeq {
430 size,
431 is_min,
432 lhs: Xmm::unwrap_new(lhs),
433 rhs: Xmm::unwrap_new(rhs),
434 dst: WritableXmm::from_writable_reg(dst).unwrap(),
435 }
436 }
437
438 pub(crate) fn movzx_rm_r(ext_mode: ExtMode, src: RegMem, dst: Writable<Reg>) -> Inst {
439 src.assert_regclass_is(RegClass::Int);
440 debug_assert!(dst.to_reg().class() == RegClass::Int);
441 let src = GprMem::unwrap_new(src);
442 let dst = WritableGpr::from_writable_reg(dst).unwrap();
443 Inst::MovzxRmR { ext_mode, src, dst }
444 }
445
446 pub(crate) fn movsx_rm_r(ext_mode: ExtMode, src: RegMem, dst: Writable<Reg>) -> Inst {
447 src.assert_regclass_is(RegClass::Int);
448 debug_assert!(dst.to_reg().class() == RegClass::Int);
449 let src = GprMem::unwrap_new(src);
450 let dst = WritableGpr::from_writable_reg(dst).unwrap();
451 Inst::MovsxRmR { ext_mode, src, dst }
452 }
453
454 pub(crate) fn mov64_m_r(src: impl Into<SyntheticAmode>, dst: Writable<Reg>) -> Inst {
455 debug_assert!(dst.to_reg().class() == RegClass::Int);
456 Inst::Mov64MR {
457 src: src.into(),
458 dst: WritableGpr::from_writable_reg(dst).unwrap(),
459 }
460 }
461
462 pub(crate) fn mov_r_m(size: OperandSize, src: Reg, dst: impl Into<SyntheticAmode>) -> Inst {
463 debug_assert!(src.class() == RegClass::Int);
464 Inst::MovRM {
465 size,
466 src: Gpr::unwrap_new(src),
467 dst: dst.into(),
468 }
469 }
470
471 pub(crate) fn lea(addr: impl Into<SyntheticAmode>, dst: Writable<Reg>) -> Inst {
472 debug_assert!(dst.to_reg().class() == RegClass::Int);
473 Inst::LoadEffectiveAddress {
474 addr: addr.into(),
475 dst: WritableGpr::from_writable_reg(dst).unwrap(),
476 size: OperandSize::Size64,
477 }
478 }
479
480 pub(crate) fn shift_r(
481 size: OperandSize,
482 kind: ShiftKind,
483 num_bits: Imm8Gpr,
484 src: Reg,
485 dst: Writable<Reg>,
486 ) -> Inst {
487 if let &Imm8Reg::Imm8 { imm: num_bits } = num_bits.as_imm8_reg() {
488 debug_assert!(num_bits < size.to_bits());
489 }
490 debug_assert!(dst.to_reg().class() == RegClass::Int);
491 Inst::ShiftR {
492 size,
493 kind,
494 src: Gpr::unwrap_new(src),
495 num_bits,
496 dst: WritableGpr::from_writable_reg(dst).unwrap(),
497 }
498 }
499
500 pub(crate) fn cmp_rmi_r(size: OperandSize, src1: Reg, src2: RegMemImm) -> Inst {
503 src2.assert_regclass_is(RegClass::Int);
504 debug_assert_eq!(src1.class(), RegClass::Int);
505 Inst::CmpRmiR {
506 size,
507 src1: Gpr::unwrap_new(src1),
508 src2: GprMemImm::unwrap_new(src2),
509 opcode: CmpOpcode::Cmp,
510 }
511 }
512
513 pub(crate) fn trap(trap_code: TrapCode) -> Inst {
514 Inst::Ud2 { trap_code }
515 }
516
517 pub(crate) fn trap_if(cc: CC, trap_code: TrapCode) -> Inst {
518 Inst::TrapIf { cc, trap_code }
519 }
520
521 pub(crate) fn cmove(size: OperandSize, cc: CC, src: RegMem, dst: Writable<Reg>) -> Inst {
522 debug_assert!(size.is_one_of(&[
523 OperandSize::Size16,
524 OperandSize::Size32,
525 OperandSize::Size64
526 ]));
527 debug_assert!(dst.to_reg().class() == RegClass::Int);
528 Inst::Cmove {
529 size,
530 cc,
531 consequent: GprMem::unwrap_new(src),
532 alternative: Gpr::unwrap_new(dst.to_reg()),
533 dst: WritableGpr::from_writable_reg(dst).unwrap(),
534 }
535 }
536
537 pub(crate) fn push64(src: RegMemImm) -> Inst {
538 src.assert_regclass_is(RegClass::Int);
539 let src = GprMemImm::unwrap_new(src);
540 Inst::Push64 { src }
541 }
542
543 pub(crate) fn pop64(dst: Writable<Reg>) -> Inst {
544 debug_assert!(dst.to_reg().class() == RegClass::Int);
545 let dst = WritableGpr::from_writable_reg(dst).unwrap();
546 Inst::Pop64 { dst }
547 }
548
549 pub(crate) fn call_known(info: Box<CallInfo<ExternalName>>) -> Inst {
550 Inst::CallKnown { info }
551 }
552
553 pub(crate) fn call_unknown(info: Box<CallInfo<RegMem>>) -> Inst {
554 info.dest.assert_regclass_is(RegClass::Int);
555 Inst::CallUnknown { info }
556 }
557
558 pub(crate) fn ret(stack_bytes_to_pop: u32) -> Inst {
559 Inst::Ret { stack_bytes_to_pop }
560 }
561
562 pub(crate) fn jmp_known(dst: MachLabel) -> Inst {
563 Inst::JmpKnown { dst }
564 }
565
566 pub(crate) fn jmp_unknown(target: RegMem) -> Inst {
567 target.assert_regclass_is(RegClass::Int);
568 Inst::JmpUnknown { target }
569 }
570
571 pub(crate) fn load(
575 ty: Type,
576 from_addr: impl Into<SyntheticAmode>,
577 to_reg: Writable<Reg>,
578 ext_kind: ExtKind,
579 ) -> Inst {
580 let rc = to_reg.to_reg().class();
581 match rc {
582 RegClass::Int => {
583 let ext_mode = match ty.bytes() {
584 1 => Some(ExtMode::BQ),
585 2 => Some(ExtMode::WQ),
586 4 => Some(ExtMode::LQ),
587 8 => None,
588 _ => unreachable!("the type should never use a scalar load: {}", ty),
589 };
590 if let Some(ext_mode) = ext_mode {
591 match ext_kind {
593 ExtKind::SignExtend => {
594 Inst::movsx_rm_r(ext_mode, RegMem::mem(from_addr), to_reg)
595 }
596 ExtKind::ZeroExtend => {
597 Inst::movzx_rm_r(ext_mode, RegMem::mem(from_addr), to_reg)
598 }
599 ExtKind::None => {
600 panic!("expected an extension kind for extension mode: {ext_mode:?}")
601 }
602 }
603 } else {
604 Inst::mov64_m_r(from_addr, to_reg)
606 }
607 }
608 RegClass::Float => {
609 let opcode = match ty {
610 types::F16 => panic!("loading a f16 requires multiple instructions"),
611 types::F32 => SseOpcode::Movss,
612 types::F64 => SseOpcode::Movsd,
613 types::F32X4 => SseOpcode::Movups,
614 types::F64X2 => SseOpcode::Movupd,
615 _ if (ty.is_float() || ty.is_vector()) && ty.bits() == 128 => SseOpcode::Movdqu,
616 _ => unimplemented!("unable to load type: {}", ty),
617 };
618 Inst::xmm_unary_rm_r(opcode, RegMem::mem(from_addr), to_reg)
619 }
620 RegClass::Vector => unreachable!(),
621 }
622 }
623
624 pub(crate) fn store(ty: Type, from_reg: Reg, to_addr: impl Into<SyntheticAmode>) -> Inst {
626 let rc = from_reg.class();
627 match rc {
628 RegClass::Int => Inst::mov_r_m(OperandSize::from_ty(ty), from_reg, to_addr),
629 RegClass::Float => {
630 let opcode = match ty {
631 types::F16 => panic!("storing a f16 requires multiple instructions"),
632 types::F32 => SseOpcode::Movss,
633 types::F64 => SseOpcode::Movsd,
634 types::F32X4 => SseOpcode::Movups,
635 types::F64X2 => SseOpcode::Movupd,
636 _ if (ty.is_float() || ty.is_vector()) && ty.bits() == 128 => SseOpcode::Movdqu,
637 _ => unimplemented!("unable to store type: {}", ty),
638 };
639 Inst::xmm_mov_r_m(opcode, from_reg, to_addr)
640 }
641 RegClass::Vector => unreachable!(),
642 }
643 }
644}
645
646impl PrettyPrint for Inst {
650 fn pretty_print(&self, _size: u8) -> String {
651 fn ljustify(s: String) -> String {
652 let w = 7;
653 if s.len() >= w {
654 s
655 } else {
656 let need = usize::min(w, w - s.len());
657 s + &format!("{nil: <width$}", nil = "", width = need)
658 }
659 }
660
661 fn ljustify2(s1: String, s2: String) -> String {
662 ljustify(s1 + &s2)
663 }
664
665 fn suffix_lq(size: OperandSize) -> String {
666 match size {
667 OperandSize::Size32 => "l",
668 OperandSize::Size64 => "q",
669 _ => unreachable!(),
670 }
671 .to_string()
672 }
673
674 #[allow(dead_code)]
675 fn suffix_lqb(size: OperandSize) -> String {
676 match size {
677 OperandSize::Size32 => "l",
678 OperandSize::Size64 => "q",
679 _ => unreachable!(),
680 }
681 .to_string()
682 }
683
684 fn suffix_bwlq(size: OperandSize) -> String {
685 match size {
686 OperandSize::Size8 => "b".to_string(),
687 OperandSize::Size16 => "w".to_string(),
688 OperandSize::Size32 => "l".to_string(),
689 OperandSize::Size64 => "q".to_string(),
690 }
691 }
692
693 match self {
694 Inst::Nop { len } => format!("{} len={}", ljustify("nop".to_string()), len),
695
696 Inst::AluRmiR {
697 size,
698 op,
699 src1,
700 src2,
701 dst,
702 } => {
703 let size_bytes = size.to_bytes();
704 let src1 = pretty_print_reg(src1.to_reg(), size_bytes);
705 let dst = pretty_print_reg(dst.to_reg().to_reg(), size_bytes);
706 let src2 = src2.pretty_print(size_bytes);
707 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
708 format!("{op} {src1}, {src2}, {dst}")
709 }
710 Inst::AluConstOp { op, dst, size } => {
711 let size_bytes = size.to_bytes();
712 let dst = pretty_print_reg(dst.to_reg().to_reg(), size_bytes);
713 let op = ljustify2(op.to_string(), suffix_lqb(*size));
714 format!("{op} {dst}, {dst}, {dst}")
715 }
716 Inst::AluRmRVex {
717 size,
718 op,
719 src1,
720 src2,
721 dst,
722 } => {
723 let size_bytes = size.to_bytes();
724 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
725 let src1 = pretty_print_reg(src1.to_reg(), size_bytes);
726 let src2 = src2.pretty_print(size_bytes);
727 let op = ljustify2(op.to_string(), String::new());
728 format!("{op} {src2}, {src1}, {dst}")
729 }
730 Inst::UnaryRmR { src, dst, op, size } => {
731 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
732 let src = src.pretty_print(size.to_bytes());
733 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
734 format!("{op} {src}, {dst}")
735 }
736
737 Inst::UnaryRmRVex { src, dst, op, size } => {
738 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
739 let src = src.pretty_print(size.to_bytes());
740 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
741 format!("{op} {src}, {dst}")
742 }
743
744 Inst::UnaryRmRImmVex {
745 src,
746 dst,
747 op,
748 size,
749 imm,
750 } => {
751 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
752 let src = src.pretty_print(size.to_bytes());
753 format!(
754 "{} ${imm}, {src}, {dst}",
755 ljustify2(op.to_string(), suffix_bwlq(*size))
756 )
757 }
758
759 Inst::Not { size, src, dst } => {
760 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
761 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
762 let op = ljustify2("not".to_string(), suffix_bwlq(*size));
763 format!("{op} {src}, {dst}")
764 }
765
766 Inst::Neg { size, src, dst } => {
767 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
768 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
769 let op = ljustify2("neg".to_string(), suffix_bwlq(*size));
770 format!("{op} {src}, {dst}")
771 }
772
773 Inst::Div {
774 size,
775 sign,
776 trap,
777 divisor,
778 dividend_lo,
779 dividend_hi,
780 dst_quotient,
781 dst_remainder,
782 } => {
783 let divisor = divisor.pretty_print(size.to_bytes());
784 let dividend_lo = pretty_print_reg(dividend_lo.to_reg(), size.to_bytes());
785 let dividend_hi = pretty_print_reg(dividend_hi.to_reg(), size.to_bytes());
786 let dst_quotient =
787 pretty_print_reg(dst_quotient.to_reg().to_reg(), size.to_bytes());
788 let dst_remainder =
789 pretty_print_reg(dst_remainder.to_reg().to_reg(), size.to_bytes());
790 let op = ljustify(match sign {
791 DivSignedness::Signed => "idiv".to_string(),
792 DivSignedness::Unsigned => "div".to_string(),
793 });
794 format!(
795 "{op} {dividend_lo}, {dividend_hi}, {divisor}, {dst_quotient}, {dst_remainder} ; trap={trap}"
796 )
797 }
798
799 Inst::Div8 {
800 sign,
801 trap,
802 divisor,
803 dividend,
804 dst,
805 } => {
806 let divisor = divisor.pretty_print(1);
807 let dividend = pretty_print_reg(dividend.to_reg(), 1);
808 let dst = pretty_print_reg(dst.to_reg().to_reg(), 1);
809 let op = ljustify(match sign {
810 DivSignedness::Signed => "idiv".to_string(),
811 DivSignedness::Unsigned => "div".to_string(),
812 });
813 format!("{op} {dividend}, {divisor}, {dst} ; trap={trap}")
814 }
815
816 Inst::Mul {
817 size,
818 signed,
819 src1,
820 src2,
821 dst_lo,
822 dst_hi,
823 } => {
824 let src1 = pretty_print_reg(src1.to_reg(), size.to_bytes());
825 let dst_lo = pretty_print_reg(dst_lo.to_reg().to_reg(), size.to_bytes());
826 let dst_hi = pretty_print_reg(dst_hi.to_reg().to_reg(), size.to_bytes());
827 let src2 = src2.pretty_print(size.to_bytes());
828 let suffix = suffix_bwlq(*size);
829 let op = ljustify(if *signed {
830 format!("imul{suffix}")
831 } else {
832 format!("mul{suffix}")
833 });
834 format!("{op} {src1}, {src2}, {dst_lo}, {dst_hi}")
835 }
836
837 Inst::MulX {
838 size,
839 src1,
840 src2,
841 dst_lo,
842 dst_hi,
843 } => {
844 let src1 = pretty_print_reg(src1.to_reg(), size.to_bytes());
845 let dst_hi = pretty_print_reg(dst_hi.to_reg().to_reg(), size.to_bytes());
846 let dst_lo = if dst_lo.to_reg().is_invalid_sentinel() {
847 dst_hi.clone()
848 } else {
849 pretty_print_reg(dst_lo.to_reg().to_reg(), size.to_bytes())
850 };
851 let src2 = src2.pretty_print(size.to_bytes());
852 let suffix = suffix_bwlq(*size);
853 let op = ljustify(format!("mulx{suffix}"));
854 format!("{op} {src1}, {src2}, {dst_lo}, {dst_hi}")
855 }
856
857 Inst::Mul8 {
858 signed,
859 src1,
860 src2,
861 dst,
862 } => {
863 let src1 = pretty_print_reg(src1.to_reg(), 1);
864 let dst = pretty_print_reg(dst.to_reg().to_reg(), 1);
865 let src2 = src2.pretty_print(1);
866 let op = ljustify(if *signed {
867 "imulb".to_string()
868 } else {
869 "mulb".to_string()
870 });
871 format!("{op} {src1}, {src2}, {dst}")
872 }
873
874 Inst::IMul {
875 size,
876 src1,
877 src2,
878 dst,
879 } => {
880 let src1 = pretty_print_reg(src1.to_reg(), size.to_bytes());
881 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
882 let src2 = src2.pretty_print(size.to_bytes());
883 let suffix = suffix_bwlq(*size);
884 let op = ljustify(format!("imul{suffix}"));
885 format!("{op} {src1}, {src2}, {dst}")
886 }
887
888 Inst::IMulImm {
889 size,
890 src1,
891 src2,
892 dst,
893 } => {
894 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
895 let src1 = src1.pretty_print(size.to_bytes());
896 let suffix = suffix_bwlq(*size);
897 let op = ljustify(format!("imul{suffix}"));
898 format!("{op} {src1}, {src2:#x}, {dst}")
899 }
900
901 Inst::CheckedSRemSeq {
902 size,
903 divisor,
904 dividend_lo,
905 dividend_hi,
906 dst_quotient,
907 dst_remainder,
908 } => {
909 let divisor = pretty_print_reg(divisor.to_reg(), size.to_bytes());
910 let dividend_lo = pretty_print_reg(dividend_lo.to_reg(), size.to_bytes());
911 let dividend_hi = pretty_print_reg(dividend_hi.to_reg(), size.to_bytes());
912 let dst_quotient =
913 pretty_print_reg(dst_quotient.to_reg().to_reg(), size.to_bytes());
914 let dst_remainder =
915 pretty_print_reg(dst_remainder.to_reg().to_reg(), size.to_bytes());
916 format!(
917 "checked_srem_seq {dividend_lo}, {dividend_hi}, \
918 {divisor}, {dst_quotient}, {dst_remainder}",
919 )
920 }
921
922 Inst::CheckedSRemSeq8 {
923 divisor,
924 dividend,
925 dst,
926 } => {
927 let divisor = pretty_print_reg(divisor.to_reg(), 1);
928 let dividend = pretty_print_reg(dividend.to_reg(), 1);
929 let dst = pretty_print_reg(dst.to_reg().to_reg(), 1);
930 format!("checked_srem_seq {dividend}, {divisor}, {dst}")
931 }
932
933 Inst::SignExtendData { size, src, dst } => {
934 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
935 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
936 let op = match size {
937 OperandSize::Size8 => "cbw",
938 OperandSize::Size16 => "cwd",
939 OperandSize::Size32 => "cdq",
940 OperandSize::Size64 => "cqo",
941 };
942 format!("{op} {src}, {dst}")
943 }
944
945 Inst::XmmUnaryRmR { op, src, dst, .. } => {
946 let dst = pretty_print_reg(dst.to_reg().to_reg(), op.src_size());
947 let src = src.pretty_print(op.src_size());
948 let op = ljustify(op.to_string());
949 format!("{op} {src}, {dst}")
950 }
951
952 Inst::XmmUnaryRmRUnaligned { op, src, dst, .. } => {
953 let dst = pretty_print_reg(dst.to_reg().to_reg(), op.src_size());
954 let src = src.pretty_print(op.src_size());
955 let op = ljustify(op.to_string());
956 format!("{op} {src}, {dst}")
957 }
958
959 Inst::XmmUnaryRmRImm {
960 op, src, dst, imm, ..
961 } => {
962 let dst = pretty_print_reg(dst.to_reg().to_reg(), op.src_size());
963 let src = src.pretty_print(op.src_size());
964 let op = ljustify(op.to_string());
965 format!("{op} ${imm}, {src}, {dst}")
966 }
967
968 Inst::XmmUnaryRmRVex { op, src, dst, .. } => {
969 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
970 let src = src.pretty_print(8);
971 let op = ljustify(op.to_string());
972 format!("{op} {src}, {dst}")
973 }
974
975 Inst::XmmUnaryRmRImmVex {
976 op, src, dst, imm, ..
977 } => {
978 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
979 let src = src.pretty_print(8);
980 let op = ljustify(op.to_string());
981 format!("{op} ${imm}, {src}, {dst}")
982 }
983
984 Inst::XmmUnaryRmREvex { op, src, dst, .. } => {
985 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
986 let src = src.pretty_print(8);
987 let op = ljustify(op.to_string());
988 format!("{op} {src}, {dst}")
989 }
990
991 Inst::XmmUnaryRmRImmEvex {
992 op, src, dst, imm, ..
993 } => {
994 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
995 let src = src.pretty_print(8);
996 let op = ljustify(op.to_string());
997 format!("{op} ${imm}, {src}, {dst}")
998 }
999
1000 Inst::XmmMovRM { op, src, dst, .. } => {
1001 let src = pretty_print_reg(src.to_reg(), 8);
1002 let dst = dst.pretty_print(8);
1003 let op = ljustify(op.to_string());
1004 format!("{op} {src}, {dst}")
1005 }
1006
1007 Inst::XmmMovRMVex { op, src, dst, .. } => {
1008 let src = pretty_print_reg(src.to_reg(), 8);
1009 let dst = dst.pretty_print(8);
1010 let op = ljustify(op.to_string());
1011 format!("{op} {src}, {dst}")
1012 }
1013
1014 Inst::XmmMovRMImm {
1015 op, src, dst, imm, ..
1016 } => {
1017 let src = pretty_print_reg(src.to_reg(), 8);
1018 let dst = dst.pretty_print(8);
1019 let op = ljustify(op.to_string());
1020 format!("{op} ${imm}, {src}, {dst}")
1021 }
1022
1023 Inst::XmmMovRMImmVex {
1024 op, src, dst, imm, ..
1025 } => {
1026 let src = pretty_print_reg(src.to_reg(), 8);
1027 let dst = dst.pretty_print(8);
1028 let op = ljustify(op.to_string());
1029 format!("{op} ${imm}, {src}, {dst}")
1030 }
1031
1032 Inst::XmmRmR {
1033 op,
1034 src1,
1035 src2,
1036 dst,
1037 ..
1038 } => {
1039 let src1 = pretty_print_reg(src1.to_reg(), 8);
1040 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1041 let src2 = src2.pretty_print(8);
1042 let op = ljustify(op.to_string());
1043 format!("{op} {src1}, {src2}, {dst}")
1044 }
1045
1046 Inst::XmmRmRUnaligned {
1047 op,
1048 src1,
1049 src2,
1050 dst,
1051 ..
1052 } => {
1053 let src1 = pretty_print_reg(src1.to_reg(), 8);
1054 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1055 let src2 = src2.pretty_print(8);
1056 let op = ljustify(op.to_string());
1057 format!("{op} {src1}, {src2}, {dst}")
1058 }
1059
1060 Inst::XmmRmRBlend {
1061 op,
1062 src1,
1063 src2,
1064 mask,
1065 dst,
1066 } => {
1067 let src1 = pretty_print_reg(src1.to_reg(), 8);
1068 let mask = mask.to_reg();
1069 let mask = if mask.is_virtual() {
1070 format!(" <{}>", show_ireg_sized(mask, 8))
1071 } else {
1072 debug_assert_eq!(mask, regs::xmm0());
1073 String::new()
1074 };
1075 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1076 let src2 = src2.pretty_print(8);
1077 let op = ljustify(op.to_string());
1078 format!("{op} {src1}, {src2}, {dst}{mask}")
1079 }
1080
1081 Inst::XmmRmiRVex {
1082 op,
1083 src1,
1084 src2,
1085 dst,
1086 ..
1087 } => {
1088 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1089 let src1 = pretty_print_reg(src1.to_reg(), 8);
1090 let src2 = src2.pretty_print(8);
1091 let op = ljustify(op.to_string());
1092 format!("{op} {src1}, {src2}, {dst}")
1093 }
1094
1095 Inst::XmmRmRImmVex {
1096 op,
1097 src1,
1098 src2,
1099 dst,
1100 imm,
1101 ..
1102 } => {
1103 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1104 let src1 = pretty_print_reg(src1.to_reg(), 8);
1105 let src2 = src2.pretty_print(8);
1106 let op = ljustify(op.to_string());
1107 format!("{op} ${imm}, {src1}, {src2}, {dst}")
1108 }
1109
1110 Inst::XmmVexPinsr {
1111 op,
1112 src1,
1113 src2,
1114 dst,
1115 imm,
1116 ..
1117 } => {
1118 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1119 let src1 = pretty_print_reg(src1.to_reg(), 8);
1120 let src2 = src2.pretty_print(8);
1121 let op = ljustify(op.to_string());
1122 format!("{op} ${imm}, {src1}, {src2}, {dst}")
1123 }
1124
1125 Inst::XmmRmRVex3 {
1126 op,
1127 src1,
1128 src2,
1129 src3,
1130 dst,
1131 ..
1132 } => {
1133 let src1 = pretty_print_reg(src1.to_reg(), 8);
1134 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1135 let src2 = pretty_print_reg(src2.to_reg(), 8);
1136 let src3 = src3.pretty_print(8);
1137 let op = ljustify(op.to_string());
1138 format!("{op} {src1}, {src2}, {src3}, {dst}")
1139 }
1140
1141 Inst::XmmRmRBlendVex {
1142 op,
1143 src1,
1144 src2,
1145 mask,
1146 dst,
1147 ..
1148 } => {
1149 let src1 = pretty_print_reg(src1.to_reg(), 8);
1150 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1151 let src2 = src2.pretty_print(8);
1152 let mask = pretty_print_reg(mask.to_reg(), 8);
1153 let op = ljustify(op.to_string());
1154 format!("{op} {src1}, {src2}, {mask}, {dst}")
1155 }
1156
1157 Inst::XmmRmREvex {
1158 op,
1159 src1,
1160 src2,
1161 dst,
1162 ..
1163 } => {
1164 let src1 = pretty_print_reg(src1.to_reg(), 8);
1165 let src2 = src2.pretty_print(8);
1166 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1167 let op = ljustify(op.to_string());
1168 format!("{op} {src2}, {src1}, {dst}")
1169 }
1170
1171 Inst::XmmRmREvex3 {
1172 op,
1173 src1,
1174 src2,
1175 src3,
1176 dst,
1177 ..
1178 } => {
1179 let src1 = pretty_print_reg(src1.to_reg(), 8);
1180 let src2 = pretty_print_reg(src2.to_reg(), 8);
1181 let src3 = src3.pretty_print(8);
1182 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1183 let op = ljustify(op.to_string());
1184 format!("{op} {src3}, {src2}, {src1}, {dst}")
1185 }
1186
1187 Inst::XmmMinMaxSeq {
1188 lhs,
1189 rhs,
1190 dst,
1191 is_min,
1192 size,
1193 } => {
1194 let rhs = pretty_print_reg(rhs.to_reg(), 8);
1195 let lhs = pretty_print_reg(lhs.to_reg(), 8);
1196 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1197 let op = ljustify2(
1198 if *is_min {
1199 "xmm min seq ".to_string()
1200 } else {
1201 "xmm max seq ".to_string()
1202 },
1203 format!("f{}", size.to_bits()),
1204 );
1205 format!("{op} {lhs}, {rhs}, {dst}")
1206 }
1207
1208 Inst::XmmRmRImm {
1209 op,
1210 src1,
1211 src2,
1212 dst,
1213 imm,
1214 size,
1215 ..
1216 } => {
1217 let src1 = pretty_print_reg(*src1, 8);
1218 let dst = pretty_print_reg(dst.to_reg(), 8);
1219 let src2 = src2.pretty_print(8);
1220 let op = ljustify(format!(
1221 "{}{}",
1222 op.to_string(),
1223 if *size == OperandSize::Size64 {
1224 ".w"
1225 } else {
1226 ""
1227 }
1228 ));
1229 format!("{op} ${imm}, {src1}, {src2}, {dst}")
1230 }
1231
1232 Inst::XmmUninitializedValue { dst } => {
1233 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1234 let op = ljustify("uninit".into());
1235 format!("{op} {dst}")
1236 }
1237
1238 Inst::XmmToGpr {
1239 op,
1240 src,
1241 dst,
1242 dst_size,
1243 } => {
1244 let dst_size = dst_size.to_bytes();
1245 let src = pretty_print_reg(src.to_reg(), 8);
1246 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size);
1247 let op = ljustify(op.to_string());
1248 format!("{op} {src}, {dst}")
1249 }
1250
1251 Inst::XmmToGprVex {
1252 op,
1253 src,
1254 dst,
1255 dst_size,
1256 } => {
1257 let dst_size = dst_size.to_bytes();
1258 let src = pretty_print_reg(src.to_reg(), 8);
1259 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size);
1260 let op = ljustify(op.to_string());
1261 format!("{op} {src}, {dst}")
1262 }
1263
1264 Inst::XmmToGprImm { op, src, dst, imm } => {
1265 let src = pretty_print_reg(src.to_reg(), 8);
1266 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1267 let op = ljustify(op.to_string());
1268 format!("{op} ${imm}, {src}, {dst}")
1269 }
1270
1271 Inst::XmmToGprImmVex { op, src, dst, imm } => {
1272 let src = pretty_print_reg(src.to_reg(), 8);
1273 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1274 let op = ljustify(op.to_string());
1275 format!("{op} ${imm}, {src}, {dst}")
1276 }
1277
1278 Inst::GprToXmm {
1279 op,
1280 src,
1281 src_size,
1282 dst,
1283 } => {
1284 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1285 let src = src.pretty_print(src_size.to_bytes());
1286 let op = ljustify(op.to_string());
1287 format!("{op} {src}, {dst}")
1288 }
1289
1290 Inst::GprToXmmVex {
1291 op,
1292 src,
1293 src_size,
1294 dst,
1295 } => {
1296 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1297 let src = src.pretty_print(src_size.to_bytes());
1298 let op = ljustify(op.to_string());
1299 format!("{op} {src}, {dst}")
1300 }
1301
1302 Inst::XmmCmpRmR { op, src1, src2 } => {
1303 let src1 = pretty_print_reg(src1.to_reg(), 8);
1304 let src2 = src2.pretty_print(8);
1305 let op = ljustify(op.to_string());
1306 format!("{op} {src2}, {src1}")
1307 }
1308
1309 Inst::CvtIntToFloat {
1310 op,
1311 src1,
1312 src2,
1313 dst,
1314 src2_size,
1315 } => {
1316 let src1 = pretty_print_reg(src1.to_reg(), 8);
1317 let dst = pretty_print_reg(*dst.to_reg(), 8);
1318 let src2 = src2.pretty_print(src2_size.to_bytes());
1319 let op = ljustify(op.to_string());
1320 format!("{op} {src1}, {src2}, {dst}")
1321 }
1322
1323 Inst::CvtIntToFloatVex {
1324 op,
1325 src1,
1326 src2,
1327 dst,
1328 src2_size,
1329 } => {
1330 let dst = pretty_print_reg(*dst.to_reg(), 8);
1331 let src1 = pretty_print_reg(src1.to_reg(), 8);
1332 let src2 = src2.pretty_print(src2_size.to_bytes());
1333 let op = ljustify(op.to_string());
1334 format!("{op} {src1}, {src2}, {dst}")
1335 }
1336
1337 Inst::XmmCmpRmRVex { op, src1, src2 } => {
1338 let src1 = pretty_print_reg(src1.to_reg(), 8);
1339 let src2 = src2.pretty_print(8);
1340 format!("{} {src2}, {src1}", ljustify(op.to_string()))
1341 }
1342
1343 Inst::CvtUint64ToFloatSeq {
1344 src,
1345 dst,
1346 dst_size,
1347 tmp_gpr1,
1348 tmp_gpr2,
1349 ..
1350 } => {
1351 let src = pretty_print_reg(src.to_reg(), 8);
1352 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size.to_bytes());
1353 let tmp_gpr1 = pretty_print_reg(tmp_gpr1.to_reg().to_reg(), 8);
1354 let tmp_gpr2 = pretty_print_reg(tmp_gpr2.to_reg().to_reg(), 8);
1355 let op = ljustify(format!(
1356 "u64_to_{}_seq",
1357 if *dst_size == OperandSize::Size64 {
1358 "f64"
1359 } else {
1360 "f32"
1361 }
1362 ));
1363 format!("{op} {src}, {dst}, {tmp_gpr1}, {tmp_gpr2}")
1364 }
1365
1366 Inst::CvtFloatToSintSeq {
1367 src,
1368 dst,
1369 src_size,
1370 dst_size,
1371 tmp_xmm,
1372 tmp_gpr,
1373 is_saturating,
1374 } => {
1375 let src = pretty_print_reg(src.to_reg(), src_size.to_bytes());
1376 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size.to_bytes());
1377 let tmp_gpr = pretty_print_reg(tmp_gpr.to_reg().to_reg(), 8);
1378 let tmp_xmm = pretty_print_reg(tmp_xmm.to_reg().to_reg(), 8);
1379 let op = ljustify(format!(
1380 "cvt_float{}_to_sint{}{}_seq",
1381 src_size.to_bits(),
1382 dst_size.to_bits(),
1383 if *is_saturating { "_sat" } else { "" },
1384 ));
1385 format!("{op} {src}, {dst}, {tmp_gpr}, {tmp_xmm}")
1386 }
1387
1388 Inst::CvtFloatToUintSeq {
1389 src,
1390 dst,
1391 src_size,
1392 dst_size,
1393 tmp_gpr,
1394 tmp_xmm,
1395 tmp_xmm2,
1396 is_saturating,
1397 } => {
1398 let src = pretty_print_reg(src.to_reg(), src_size.to_bytes());
1399 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size.to_bytes());
1400 let tmp_gpr = pretty_print_reg(tmp_gpr.to_reg().to_reg(), 8);
1401 let tmp_xmm = pretty_print_reg(tmp_xmm.to_reg().to_reg(), 8);
1402 let tmp_xmm2 = pretty_print_reg(tmp_xmm2.to_reg().to_reg(), 8);
1403 let op = ljustify(format!(
1404 "cvt_float{}_to_uint{}{}_seq",
1405 src_size.to_bits(),
1406 dst_size.to_bits(),
1407 if *is_saturating { "_sat" } else { "" },
1408 ));
1409 format!("{op} {src}, {dst}, {tmp_gpr}, {tmp_xmm}, {tmp_xmm2}")
1410 }
1411
1412 Inst::Imm {
1413 dst_size,
1414 simm64,
1415 dst,
1416 } => {
1417 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size.to_bytes());
1418 if *dst_size == OperandSize::Size64 {
1419 let op = ljustify("movabsq".to_string());
1420 let imm = *simm64 as i64;
1421 format!("{op} ${imm}, {dst}")
1422 } else {
1423 let op = ljustify("movl".to_string());
1424 let imm = (*simm64 as u32) as i32;
1425 format!("{op} ${imm}, {dst}")
1426 }
1427 }
1428
1429 Inst::MovImmM { size, simm32, dst } => {
1430 let dst = dst.pretty_print(size.to_bytes());
1431 let suffix = suffix_bwlq(*size);
1432 let imm = match *size {
1433 OperandSize::Size8 => ((*simm32 as u8) as i8).to_string(),
1434 OperandSize::Size16 => ((*simm32 as u16) as i16).to_string(),
1435 OperandSize::Size32 => simm32.to_string(),
1436 OperandSize::Size64 => (*simm32 as i64).to_string(),
1437 };
1438 let op = ljustify2("mov".to_string(), suffix);
1439 format!("{op} ${imm}, {dst}")
1440 }
1441
1442 Inst::MovRR { size, src, dst } => {
1443 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
1444 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1445 let op = ljustify2("mov".to_string(), suffix_lq(*size));
1446 format!("{op} {src}, {dst}")
1447 }
1448
1449 Inst::MovFromPReg { src, dst } => {
1450 let src: Reg = (*src).into();
1451 let src = regs::show_ireg_sized(src, 8);
1452 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1453 let op = ljustify("movq".to_string());
1454 format!("{op} {src}, {dst}")
1455 }
1456
1457 Inst::MovToPReg { src, dst } => {
1458 let src = pretty_print_reg(src.to_reg(), 8);
1459 let dst: Reg = (*dst).into();
1460 let dst = regs::show_ireg_sized(dst, 8);
1461 let op = ljustify("movq".to_string());
1462 format!("{op} {src}, {dst}")
1463 }
1464
1465 Inst::MovzxRmR {
1466 ext_mode, src, dst, ..
1467 } => {
1468 let dst_size = if *ext_mode == ExtMode::LQ {
1469 4
1470 } else {
1471 ext_mode.dst_size()
1472 };
1473 let dst = pretty_print_reg(dst.to_reg().to_reg(), dst_size);
1474 let src = src.pretty_print(ext_mode.src_size());
1475
1476 if *ext_mode == ExtMode::LQ {
1477 let op = ljustify("movl".to_string());
1478 format!("{op} {src}, {dst}")
1479 } else {
1480 let op = ljustify2("movz".to_string(), ext_mode.to_string());
1481 format!("{op} {src}, {dst}")
1482 }
1483 }
1484
1485 Inst::Mov64MR { src, dst, .. } => {
1486 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1487 let src = src.pretty_print(8);
1488 let op = ljustify("movq".to_string());
1489 format!("{op} {src}, {dst}")
1490 }
1491
1492 Inst::LoadEffectiveAddress { addr, dst, size } => {
1493 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1494 let addr = addr.pretty_print(8);
1495 let op = ljustify("lea".to_string());
1496 format!("{op} {addr}, {dst}")
1497 }
1498
1499 Inst::MovsxRmR {
1500 ext_mode, src, dst, ..
1501 } => {
1502 let dst = pretty_print_reg(dst.to_reg().to_reg(), ext_mode.dst_size());
1503 let src = src.pretty_print(ext_mode.src_size());
1504 let op = ljustify2("movs".to_string(), ext_mode.to_string());
1505 format!("{op} {src}, {dst}")
1506 }
1507
1508 Inst::MovRM { size, src, dst, .. } => {
1509 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
1510 let dst = dst.pretty_print(size.to_bytes());
1511 let op = ljustify2("mov".to_string(), suffix_bwlq(*size));
1512 format!("{op} {src}, {dst}")
1513 }
1514
1515 Inst::ShiftR {
1516 size,
1517 kind,
1518 num_bits,
1519 src,
1520 dst,
1521 ..
1522 } => {
1523 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
1524 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1525 match num_bits.as_imm8_reg() {
1526 &Imm8Reg::Reg { reg } => {
1527 let reg = pretty_print_reg(reg, 1);
1528 let op = ljustify2(kind.to_string(), suffix_bwlq(*size));
1529 format!("{op} {reg}, {src}, {dst}")
1530 }
1531
1532 &Imm8Reg::Imm8 { imm: num_bits } => {
1533 let op = ljustify2(kind.to_string(), suffix_bwlq(*size));
1534 format!("{op} ${num_bits}, {src}, {dst}")
1535 }
1536 }
1537 }
1538
1539 Inst::XmmRmiReg {
1540 opcode,
1541 src1,
1542 src2,
1543 dst,
1544 ..
1545 } => {
1546 let src1 = pretty_print_reg(src1.to_reg(), 8);
1547 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1548 let src2 = src2.pretty_print(8);
1549 let op = ljustify(opcode.to_string());
1550 format!("{op} {src1}, {src2}, {dst}")
1551 }
1552
1553 Inst::CmpRmiR {
1554 size,
1555 src1,
1556 src2,
1557 opcode,
1558 } => {
1559 let src1 = pretty_print_reg(src1.to_reg(), size.to_bytes());
1560 let src2 = src2.pretty_print(size.to_bytes());
1561 let op = match opcode {
1562 CmpOpcode::Cmp => "cmp",
1563 CmpOpcode::Test => "test",
1564 };
1565 let op = ljustify2(op.to_string(), suffix_bwlq(*size));
1566 format!("{op} {src2}, {src1}")
1567 }
1568
1569 Inst::Setcc { cc, dst } => {
1570 let dst = pretty_print_reg(dst.to_reg().to_reg(), 1);
1571 let op = ljustify2("set".to_string(), cc.to_string());
1572 format!("{op} {dst}")
1573 }
1574
1575 Inst::Bswap { size, src, dst } => {
1576 let src = pretty_print_reg(src.to_reg(), size.to_bytes());
1577 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1578 let op = ljustify2("bswap".to_string(), suffix_bwlq(*size));
1579 format!("{op} {src}, {dst}")
1580 }
1581
1582 Inst::Cmove {
1583 size,
1584 cc,
1585 consequent,
1586 alternative,
1587 dst,
1588 } => {
1589 let alternative = pretty_print_reg(alternative.to_reg(), size.to_bytes());
1590 let dst = pretty_print_reg(dst.to_reg().to_reg(), size.to_bytes());
1591 let consequent = consequent.pretty_print(size.to_bytes());
1592 let op = ljustify(format!("cmov{}{}", cc.to_string(), suffix_bwlq(*size)));
1593 format!("{op} {consequent}, {alternative}, {dst}")
1594 }
1595
1596 Inst::XmmCmove {
1597 ty,
1598 cc,
1599 consequent,
1600 alternative,
1601 dst,
1602 ..
1603 } => {
1604 let size = u8::try_from(ty.bytes()).unwrap();
1605 let alternative = pretty_print_reg(alternative.to_reg(), size);
1606 let dst = pretty_print_reg(dst.to_reg().to_reg(), size);
1607 let consequent = pretty_print_reg(consequent.to_reg(), size);
1608 let suffix = match *ty {
1609 types::F64 => "sd",
1610 types::F32 => "ss",
1611 types::F16 => "ss",
1612 types::F32X4 => "aps",
1613 types::F64X2 => "apd",
1614 _ => "dqa",
1615 };
1616 let cc = cc.invert();
1617 format!(
1618 "mov{suffix} {alternative}, {dst}; \
1619 j{cc} $next; \
1620 mov{suffix} {consequent}, {dst}; \
1621 $next:"
1622 )
1623 }
1624
1625 Inst::Push64 { src } => {
1626 let src = src.pretty_print(8);
1627 let op = ljustify("pushq".to_string());
1628 format!("{op} {src}")
1629 }
1630
1631 Inst::StackProbeLoop {
1632 tmp,
1633 frame_size,
1634 guard_size,
1635 } => {
1636 let tmp = pretty_print_reg(tmp.to_reg(), 8);
1637 let op = ljustify("stack_probe_loop".to_string());
1638 format!("{op} {tmp}, frame_size={frame_size}, guard_size={guard_size}")
1639 }
1640
1641 Inst::Pop64 { dst } => {
1642 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1643 let op = ljustify("popq".to_string());
1644 format!("{op} {dst}")
1645 }
1646
1647 Inst::CallKnown { info } => {
1648 let op = ljustify("call".to_string());
1649 let try_call = info
1650 .try_call_info
1651 .as_ref()
1652 .map(|tci| pretty_print_try_call(tci))
1653 .unwrap_or_default();
1654 format!("{op} {:?}{try_call}", info.dest)
1655 }
1656
1657 Inst::CallUnknown { info } => {
1658 let dest = info.dest.pretty_print(8);
1659 let op = ljustify("call".to_string());
1660 let try_call = info
1661 .try_call_info
1662 .as_ref()
1663 .map(|tci| pretty_print_try_call(tci))
1664 .unwrap_or_default();
1665 format!("{op} *{dest}{try_call}")
1666 }
1667
1668 Inst::ReturnCallKnown { info } => {
1669 let ReturnCallInfo {
1670 uses,
1671 new_stack_arg_size,
1672 tmp,
1673 dest,
1674 } = &**info;
1675 let tmp = pretty_print_reg(tmp.to_reg().to_reg(), 8);
1676 let mut s = format!("return_call_known {dest:?} ({new_stack_arg_size}) tmp={tmp}");
1677 for ret in uses {
1678 let preg = regs::show_reg(ret.preg);
1679 let vreg = pretty_print_reg(ret.vreg, 8);
1680 write!(&mut s, " {vreg}={preg}").unwrap();
1681 }
1682 s
1683 }
1684
1685 Inst::ReturnCallUnknown { info } => {
1686 let ReturnCallInfo {
1687 uses,
1688 new_stack_arg_size,
1689 tmp,
1690 dest,
1691 } = &**info;
1692 let callee = pretty_print_reg(*dest, 8);
1693 let tmp = pretty_print_reg(tmp.to_reg().to_reg(), 8);
1694 let mut s =
1695 format!("return_call_unknown {callee} ({new_stack_arg_size}) tmp={tmp}");
1696 for ret in uses {
1697 let preg = regs::show_reg(ret.preg);
1698 let vreg = pretty_print_reg(ret.vreg, 8);
1699 write!(&mut s, " {vreg}={preg}").unwrap();
1700 }
1701 s
1702 }
1703
1704 Inst::Args { args } => {
1705 let mut s = "args".to_string();
1706 for arg in args {
1707 let preg = regs::show_reg(arg.preg);
1708 let def = pretty_print_reg(arg.vreg.to_reg(), 8);
1709 write!(&mut s, " {def}={preg}").unwrap();
1710 }
1711 s
1712 }
1713
1714 Inst::Rets { rets } => {
1715 let mut s = "rets".to_string();
1716 for ret in rets {
1717 let preg = regs::show_reg(ret.preg);
1718 let vreg = pretty_print_reg(ret.vreg, 8);
1719 write!(&mut s, " {vreg}={preg}").unwrap();
1720 }
1721 s
1722 }
1723
1724 Inst::Ret { stack_bytes_to_pop } => {
1725 let mut s = "ret".to_string();
1726 if *stack_bytes_to_pop != 0 {
1727 write!(&mut s, " {stack_bytes_to_pop}").unwrap();
1728 }
1729 s
1730 }
1731
1732 Inst::StackSwitchBasic {
1733 store_context_ptr,
1734 load_context_ptr,
1735 in_payload0,
1736 out_payload0,
1737 } => {
1738 let store_context_ptr = pretty_print_reg(**store_context_ptr, 8);
1739 let load_context_ptr = pretty_print_reg(**load_context_ptr, 8);
1740 let in_payload0 = pretty_print_reg(**in_payload0, 8);
1741 let out_payload0 = pretty_print_reg(*out_payload0.to_reg(), 8);
1742 format!("{out_payload0} = stack_switch_basic {store_context_ptr}, {load_context_ptr}, {in_payload0}")
1743 }
1744
1745 Inst::JmpKnown { dst } => {
1746 let op = ljustify("jmp".to_string());
1747 let dst = dst.to_string();
1748 format!("{op} {dst}")
1749 }
1750
1751 Inst::WinchJmpIf { cc, taken } => {
1752 let taken = taken.to_string();
1753 let op = ljustify2("j".to_string(), cc.to_string());
1754 format!("{op} {taken}")
1755 }
1756
1757 Inst::JmpCondOr {
1758 cc1,
1759 cc2,
1760 taken,
1761 not_taken,
1762 } => {
1763 let taken = taken.to_string();
1764 let not_taken = not_taken.to_string();
1765 let op = ljustify(format!("j{cc1},{cc2}"));
1766 format!("{op} {taken}; j {not_taken}")
1767 }
1768
1769 Inst::JmpCond {
1770 cc,
1771 taken,
1772 not_taken,
1773 } => {
1774 let taken = taken.to_string();
1775 let not_taken = not_taken.to_string();
1776 let op = ljustify2("j".to_string(), cc.to_string());
1777 format!("{op} {taken}; j {not_taken}")
1778 }
1779
1780 Inst::JmpTableSeq {
1781 idx, tmp1, tmp2, ..
1782 } => {
1783 let idx = pretty_print_reg(*idx, 8);
1784 let tmp1 = pretty_print_reg(tmp1.to_reg(), 8);
1785 let tmp2 = pretty_print_reg(tmp2.to_reg(), 8);
1786 let op = ljustify("br_table".into());
1787 format!("{op} {idx}, {tmp1}, {tmp2}")
1788 }
1789
1790 Inst::JmpUnknown { target } => {
1791 let target = target.pretty_print(8);
1792 let op = ljustify("jmp".to_string());
1793 format!("{op} *{target}")
1794 }
1795
1796 Inst::TrapIf { cc, trap_code, .. } => {
1797 format!("j{cc} #trap={trap_code}")
1798 }
1799
1800 Inst::TrapIfAnd {
1801 cc1,
1802 cc2,
1803 trap_code,
1804 ..
1805 } => {
1806 let cc1 = cc1.invert();
1807 let cc2 = cc2.invert();
1808 format!("trap_if_and {cc1}, {cc2}, {trap_code}")
1809 }
1810
1811 Inst::TrapIfOr {
1812 cc1,
1813 cc2,
1814 trap_code,
1815 ..
1816 } => {
1817 let cc2 = cc2.invert();
1818 format!("trap_if_or {cc1}, {cc2}, {trap_code}")
1819 }
1820
1821 Inst::LoadExtName {
1822 dst, name, offset, ..
1823 } => {
1824 let dst = pretty_print_reg(dst.to_reg(), 8);
1825 let name = name.display(None);
1826 let op = ljustify("load_ext_name".into());
1827 format!("{op} {name}+{offset}, {dst}")
1828 }
1829
1830 Inst::LockCmpxchg {
1831 ty,
1832 replacement,
1833 expected,
1834 mem,
1835 dst_old,
1836 ..
1837 } => {
1838 let size = ty.bytes() as u8;
1839 let replacement = pretty_print_reg(*replacement, size);
1840 let expected = pretty_print_reg(*expected, size);
1841 let dst_old = pretty_print_reg(dst_old.to_reg(), size);
1842 let mem = mem.pretty_print(size);
1843 let suffix = suffix_bwlq(OperandSize::from_bytes(size as u32));
1844 format!(
1845 "lock cmpxchg{suffix} {replacement}, {mem}, expected={expected}, dst_old={dst_old}"
1846 )
1847 }
1848
1849 Inst::LockCmpxchg16b {
1850 replacement_low,
1851 replacement_high,
1852 expected_low,
1853 expected_high,
1854 mem,
1855 dst_old_low,
1856 dst_old_high,
1857 ..
1858 } => {
1859 let replacement_low = pretty_print_reg(*replacement_low, 8);
1860 let replacement_high = pretty_print_reg(*replacement_high, 8);
1861 let expected_low = pretty_print_reg(*expected_low, 8);
1862 let expected_high = pretty_print_reg(*expected_high, 8);
1863 let dst_old_low = pretty_print_reg(dst_old_low.to_reg(), 8);
1864 let dst_old_high = pretty_print_reg(dst_old_high.to_reg(), 8);
1865 let mem = mem.pretty_print(16);
1866 format!(
1867 "lock cmpxchg16b {mem}, replacement={replacement_high}:{replacement_low}, expected={expected_high}:{expected_low}, dst_old={dst_old_high}:{dst_old_low}"
1868 )
1869 }
1870
1871 Inst::LockXadd {
1872 size,
1873 operand,
1874 mem,
1875 dst_old,
1876 } => {
1877 let operand = pretty_print_reg(*operand, size.to_bytes());
1878 let dst_old = pretty_print_reg(dst_old.to_reg(), size.to_bytes());
1879 let mem = mem.pretty_print(size.to_bytes());
1880 let suffix = suffix_bwlq(*size);
1881 format!("lock xadd{suffix} {operand}, {mem}, dst_old={dst_old}")
1882 }
1883
1884 Inst::Xchg {
1885 size,
1886 operand,
1887 mem,
1888 dst_old,
1889 } => {
1890 let operand = pretty_print_reg(*operand, size.to_bytes());
1891 let dst_old = pretty_print_reg(dst_old.to_reg(), size.to_bytes());
1892 let mem = mem.pretty_print(size.to_bytes());
1893 let suffix = suffix_bwlq(*size);
1894 format!("xchg{suffix} {operand}, {mem}, dst_old={dst_old}")
1895 }
1896
1897 Inst::AtomicRmwSeq { ty, op, .. } => {
1898 let ty = ty.bits();
1899 format!(
1900 "atomically {{ {ty}_bits_at_[%r9] {op:?}= %r10; %rax = old_value_at_[%r9]; %r11, %rflags = trash }}"
1901 )
1902 }
1903
1904 Inst::Atomic128RmwSeq {
1905 op,
1906 mem,
1907 operand_low,
1908 operand_high,
1909 temp_low,
1910 temp_high,
1911 dst_old_low,
1912 dst_old_high,
1913 } => {
1914 let operand_low = pretty_print_reg(*operand_low, 8);
1915 let operand_high = pretty_print_reg(*operand_high, 8);
1916 let temp_low = pretty_print_reg(temp_low.to_reg(), 8);
1917 let temp_high = pretty_print_reg(temp_high.to_reg(), 8);
1918 let dst_old_low = pretty_print_reg(dst_old_low.to_reg(), 8);
1919 let dst_old_high = pretty_print_reg(dst_old_high.to_reg(), 8);
1920 let mem = mem.pretty_print(16);
1921 format!("atomically {{ {dst_old_high}:{dst_old_low} = {mem}; {temp_high}:{temp_low} = {dst_old_high}:{dst_old_low} {op:?} {operand_high}:{operand_low}; {mem} = {temp_high}:{temp_low} }}")
1922 }
1923
1924 Inst::Atomic128XchgSeq {
1925 mem,
1926 operand_low,
1927 operand_high,
1928 dst_old_low,
1929 dst_old_high,
1930 } => {
1931 let operand_low = pretty_print_reg(*operand_low, 8);
1932 let operand_high = pretty_print_reg(*operand_high, 8);
1933 let dst_old_low = pretty_print_reg(dst_old_low.to_reg(), 8);
1934 let dst_old_high = pretty_print_reg(dst_old_high.to_reg(), 8);
1935 let mem = mem.pretty_print(16);
1936 format!("atomically {{ {dst_old_high}:{dst_old_low} = {mem}; {mem} = {operand_high}:{operand_low} }}")
1937 }
1938
1939 Inst::Fence { kind } => match kind {
1940 FenceKind::MFence => "mfence".to_string(),
1941 FenceKind::LFence => "lfence".to_string(),
1942 FenceKind::SFence => "sfence".to_string(),
1943 },
1944
1945 Inst::Hlt => "hlt".into(),
1946
1947 Inst::Ud2 { trap_code } => format!("ud2 {trap_code}"),
1948
1949 Inst::ElfTlsGetAddr { symbol, dst } => {
1950 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1951 format!("{dst} = elf_tls_get_addr {symbol:?}")
1952 }
1953
1954 Inst::MachOTlsGetAddr { symbol, dst } => {
1955 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1956 format!("{dst} = macho_tls_get_addr {symbol:?}")
1957 }
1958
1959 Inst::CoffTlsGetAddr { symbol, dst, tmp } => {
1960 let dst = pretty_print_reg(dst.to_reg().to_reg(), 8);
1961 let tmp = tmp.to_reg().to_reg();
1962
1963 let mut s = format!("{dst} = coff_tls_get_addr {symbol:?}");
1964 if tmp.is_virtual() {
1965 let tmp = show_ireg_sized(tmp, 8);
1966 write!(&mut s, ", {tmp}").unwrap();
1967 };
1968
1969 s
1970 }
1971
1972 Inst::Unwind { inst } => format!("unwind {inst:?}"),
1973
1974 Inst::DummyUse { reg } => {
1975 let reg = pretty_print_reg(*reg, 8);
1976 format!("dummy_use {reg}")
1977 }
1978
1979 Inst::External { inst } => {
1980 format!("{inst}")
1981 }
1982 }
1983 }
1984}
1985
1986fn pretty_print_try_call(info: &TryCallInfo) -> String {
1987 let dests = info
1988 .exception_dests
1989 .iter()
1990 .map(|(tag, label)| format!("{tag:?}: {label:?}"))
1991 .collect::<Vec<_>>()
1992 .join(", ");
1993 format!("; jmp {:?}; catch [{dests}]", info.continuation)
1994}
1995
1996impl fmt::Debug for Inst {
1997 fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
1998 write!(fmt, "{}", self.pretty_print_inst(&mut Default::default()))
1999 }
2000}
2001
2002fn x64_get_operands(inst: &mut Inst, collector: &mut impl OperandVisitor) {
2003 match inst {
2013 Inst::AluRmiR {
2014 src1, src2, dst, ..
2015 } => {
2016 collector.reg_use(src1);
2017 collector.reg_reuse_def(dst, 0);
2018 src2.get_operands(collector);
2019 }
2020 Inst::AluConstOp { dst, .. } => collector.reg_def(dst),
2021 Inst::AluRmRVex {
2022 src1, src2, dst, ..
2023 } => {
2024 collector.reg_def(dst);
2025 collector.reg_use(src1);
2026 src2.get_operands(collector);
2027 }
2028 Inst::Not { src, dst, .. } => {
2029 collector.reg_use(src);
2030 collector.reg_reuse_def(dst, 0);
2031 }
2032 Inst::Neg { src, dst, .. } => {
2033 collector.reg_use(src);
2034 collector.reg_reuse_def(dst, 0);
2035 }
2036 Inst::Div {
2037 divisor,
2038 dividend_lo,
2039 dividend_hi,
2040 dst_quotient,
2041 dst_remainder,
2042 ..
2043 } => {
2044 divisor.get_operands(collector);
2045 collector.reg_fixed_use(dividend_lo, regs::rax());
2046 collector.reg_fixed_use(dividend_hi, regs::rdx());
2047 collector.reg_fixed_def(dst_quotient, regs::rax());
2048 collector.reg_fixed_def(dst_remainder, regs::rdx());
2049 }
2050 Inst::CheckedSRemSeq {
2051 divisor,
2052 dividend_lo,
2053 dividend_hi,
2054 dst_quotient,
2055 dst_remainder,
2056 ..
2057 } => {
2058 collector.reg_use(divisor);
2059 collector.reg_fixed_use(dividend_lo, regs::rax());
2060 collector.reg_fixed_use(dividend_hi, regs::rdx());
2061 collector.reg_fixed_def(dst_quotient, regs::rax());
2062 collector.reg_fixed_def(dst_remainder, regs::rdx());
2063 }
2064 Inst::Div8 {
2065 divisor,
2066 dividend,
2067 dst,
2068 ..
2069 } => {
2070 divisor.get_operands(collector);
2071 collector.reg_fixed_use(dividend, regs::rax());
2072 collector.reg_fixed_def(dst, regs::rax());
2073 }
2074 Inst::CheckedSRemSeq8 {
2075 divisor,
2076 dividend,
2077 dst,
2078 ..
2079 } => {
2080 collector.reg_use(divisor);
2081 collector.reg_fixed_use(dividend, regs::rax());
2082 collector.reg_fixed_def(dst, regs::rax());
2083 }
2084 Inst::Mul {
2085 src1,
2086 src2,
2087 dst_lo,
2088 dst_hi,
2089 ..
2090 } => {
2091 collector.reg_fixed_use(src1, regs::rax());
2092 collector.reg_fixed_def(dst_lo, regs::rax());
2093 collector.reg_fixed_def(dst_hi, regs::rdx());
2094 src2.get_operands(collector);
2095 }
2096 Inst::Mul8 {
2097 src1, src2, dst, ..
2098 } => {
2099 collector.reg_fixed_use(src1, regs::rax());
2100 collector.reg_fixed_def(dst, regs::rax());
2101 src2.get_operands(collector);
2102 }
2103 Inst::IMul {
2104 src1, src2, dst, ..
2105 } => {
2106 collector.reg_use(src1);
2107 collector.reg_reuse_def(dst, 0);
2108 src2.get_operands(collector);
2109 }
2110 Inst::IMulImm { src1, dst, .. } => {
2111 collector.reg_def(dst);
2112 src1.get_operands(collector);
2113 }
2114 Inst::MulX {
2115 src1,
2116 src2,
2117 dst_lo,
2118 dst_hi,
2119 ..
2120 } => {
2121 if !dst_lo.to_reg().is_invalid_sentinel() {
2122 collector.reg_def(dst_lo);
2123 }
2124 collector.reg_def(dst_hi);
2125 collector.reg_fixed_use(src1, regs::rdx());
2126 src2.get_operands(collector);
2127 }
2128 Inst::SignExtendData { size, src, dst } => {
2129 match size {
2130 OperandSize::Size8 => {
2131 collector.reg_fixed_use(src, regs::rax());
2134 collector.reg_fixed_def(dst, regs::rax());
2135 }
2136 _ => {
2137 collector.reg_fixed_use(src, regs::rax());
2140 collector.reg_fixed_def(dst, regs::rdx());
2141 }
2142 }
2143 }
2144 Inst::UnaryRmR { src, dst, .. }
2145 | Inst::UnaryRmRVex { src, dst, .. }
2146 | Inst::UnaryRmRImmVex { src, dst, .. } => {
2147 collector.reg_def(dst);
2148 src.get_operands(collector);
2149 }
2150 Inst::XmmUnaryRmR { src, dst, .. } | Inst::XmmUnaryRmRImm { src, dst, .. } => {
2151 collector.reg_def(dst);
2152 src.get_operands(collector);
2153 }
2154 Inst::XmmUnaryRmREvex { src, dst, .. }
2155 | Inst::XmmUnaryRmRImmEvex { src, dst, .. }
2156 | Inst::XmmUnaryRmRUnaligned { src, dst, .. }
2157 | Inst::XmmUnaryRmRVex { src, dst, .. }
2158 | Inst::XmmUnaryRmRImmVex { src, dst, .. } => {
2159 collector.reg_def(dst);
2160 src.get_operands(collector);
2161 }
2162 Inst::XmmRmR {
2163 src1, src2, dst, ..
2164 } => {
2165 collector.reg_use(src1);
2166 collector.reg_reuse_def(dst, 0);
2167 src2.get_operands(collector);
2168 }
2169 Inst::XmmRmRUnaligned {
2170 src1, src2, dst, ..
2171 } => {
2172 collector.reg_use(src1);
2173 collector.reg_reuse_def(dst, 0);
2174 src2.get_operands(collector);
2175 }
2176 Inst::XmmRmRBlend {
2177 src1,
2178 src2,
2179 mask,
2180 dst,
2181 op,
2182 } => {
2183 assert!(matches!(
2184 op,
2185 SseOpcode::Blendvpd | SseOpcode::Blendvps | SseOpcode::Pblendvb
2186 ));
2187 collector.reg_use(src1);
2188 collector.reg_fixed_use(mask, regs::xmm0());
2189 collector.reg_reuse_def(dst, 0);
2190 src2.get_operands(collector);
2191 }
2192 Inst::XmmRmiRVex {
2193 src1, src2, dst, ..
2194 } => {
2195 collector.reg_def(dst);
2196 collector.reg_use(src1);
2197 src2.get_operands(collector);
2198 }
2199 Inst::XmmRmRImmVex {
2200 src1, src2, dst, ..
2201 } => {
2202 collector.reg_def(dst);
2203 collector.reg_use(src1);
2204 src2.get_operands(collector);
2205 }
2206 Inst::XmmVexPinsr {
2207 src1, src2, dst, ..
2208 } => {
2209 collector.reg_def(dst);
2210 collector.reg_use(src1);
2211 src2.get_operands(collector);
2212 }
2213 Inst::XmmRmRVex3 {
2214 src1,
2215 src2,
2216 src3,
2217 dst,
2218 ..
2219 } => {
2220 collector.reg_use(src1);
2221 collector.reg_reuse_def(dst, 0);
2222 collector.reg_use(src2);
2223 src3.get_operands(collector);
2224 }
2225 Inst::XmmRmRBlendVex {
2226 src1,
2227 src2,
2228 mask,
2229 dst,
2230 ..
2231 } => {
2232 collector.reg_def(dst);
2233 collector.reg_use(src1);
2234 src2.get_operands(collector);
2235 collector.reg_use(mask);
2236 }
2237 Inst::XmmRmREvex {
2238 op,
2239 src1,
2240 src2,
2241 dst,
2242 ..
2243 } => {
2244 assert_ne!(*op, Avx512Opcode::Vpermi2b);
2245 collector.reg_use(src1);
2246 src2.get_operands(collector);
2247 collector.reg_def(dst);
2248 }
2249 Inst::XmmRmREvex3 {
2250 op,
2251 src1,
2252 src2,
2253 src3,
2254 dst,
2255 ..
2256 } => {
2257 assert_eq!(*op, Avx512Opcode::Vpermi2b);
2258 collector.reg_use(src1);
2259 collector.reg_use(src2);
2260 src3.get_operands(collector);
2261 collector.reg_reuse_def(dst, 0); }
2263 Inst::XmmRmRImm {
2264 src1, src2, dst, ..
2265 } => {
2266 collector.reg_use(src1);
2267 collector.reg_reuse_def(dst, 0);
2268 src2.get_operands(collector);
2269 }
2270 Inst::XmmUninitializedValue { dst } => collector.reg_def(dst),
2271 Inst::XmmMinMaxSeq { lhs, rhs, dst, .. } => {
2272 collector.reg_use(rhs);
2273 collector.reg_use(lhs);
2274 collector.reg_reuse_def(dst, 0); }
2276 Inst::XmmRmiReg {
2277 src1, src2, dst, ..
2278 } => {
2279 collector.reg_use(src1);
2280 collector.reg_reuse_def(dst, 0); src2.get_operands(collector);
2282 }
2283 Inst::XmmMovRM { src, dst, .. }
2284 | Inst::XmmMovRMVex { src, dst, .. }
2285 | Inst::XmmMovRMImm { src, dst, .. }
2286 | Inst::XmmMovRMImmVex { src, dst, .. } => {
2287 collector.reg_use(src);
2288 dst.get_operands(collector);
2289 }
2290 Inst::XmmCmpRmR { src1, src2, .. } => {
2291 collector.reg_use(src1);
2292 src2.get_operands(collector);
2293 }
2294 Inst::XmmCmpRmRVex { src1, src2, .. } => {
2295 collector.reg_use(src1);
2296 src2.get_operands(collector);
2297 }
2298 Inst::Imm { dst, .. } => {
2299 collector.reg_def(dst);
2300 }
2301 Inst::MovRR { src, dst, .. } => {
2302 collector.reg_use(src);
2303 collector.reg_def(dst);
2304 }
2305 Inst::MovFromPReg { dst, src } => {
2306 debug_assert!(dst.to_reg().to_reg().is_virtual());
2307 collector.reg_fixed_nonallocatable(*src);
2308 collector.reg_def(dst);
2309 }
2310 Inst::MovToPReg { dst, src } => {
2311 debug_assert!(src.to_reg().is_virtual());
2312 collector.reg_use(src);
2313 collector.reg_fixed_nonallocatable(*dst);
2314 }
2315 Inst::XmmToGpr { src, dst, .. }
2316 | Inst::XmmToGprVex { src, dst, .. }
2317 | Inst::XmmToGprImm { src, dst, .. }
2318 | Inst::XmmToGprImmVex { src, dst, .. } => {
2319 collector.reg_use(src);
2320 collector.reg_def(dst);
2321 }
2322 Inst::GprToXmm { src, dst, .. } | Inst::GprToXmmVex { src, dst, .. } => {
2323 collector.reg_def(dst);
2324 src.get_operands(collector);
2325 }
2326 Inst::CvtIntToFloat {
2327 src1, src2, dst, ..
2328 } => {
2329 collector.reg_use(src1);
2330 collector.reg_reuse_def(dst, 0);
2331 src2.get_operands(collector);
2332 }
2333 Inst::CvtIntToFloatVex {
2334 src1, src2, dst, ..
2335 } => {
2336 collector.reg_def(dst);
2337 collector.reg_use(src1);
2338 src2.get_operands(collector);
2339 }
2340 Inst::CvtUint64ToFloatSeq {
2341 src,
2342 dst,
2343 tmp_gpr1,
2344 tmp_gpr2,
2345 ..
2346 } => {
2347 collector.reg_use(src);
2348 collector.reg_early_def(dst);
2349 collector.reg_early_def(tmp_gpr1);
2350 collector.reg_early_def(tmp_gpr2);
2351 }
2352 Inst::CvtFloatToSintSeq {
2353 src,
2354 dst,
2355 tmp_xmm,
2356 tmp_gpr,
2357 ..
2358 } => {
2359 collector.reg_use(src);
2360 collector.reg_early_def(dst);
2361 collector.reg_early_def(tmp_gpr);
2362 collector.reg_early_def(tmp_xmm);
2363 }
2364 Inst::CvtFloatToUintSeq {
2365 src,
2366 dst,
2367 tmp_gpr,
2368 tmp_xmm,
2369 tmp_xmm2,
2370 ..
2371 } => {
2372 collector.reg_use(src);
2373 collector.reg_early_def(dst);
2374 collector.reg_early_def(tmp_gpr);
2375 collector.reg_early_def(tmp_xmm);
2376 collector.reg_early_def(tmp_xmm2);
2377 }
2378
2379 Inst::MovImmM { dst, .. } => {
2380 dst.get_operands(collector);
2381 }
2382
2383 Inst::MovzxRmR { src, dst, .. } => {
2384 collector.reg_def(dst);
2385 src.get_operands(collector);
2386 }
2387 Inst::Mov64MR { src, dst, .. } => {
2388 collector.reg_def(dst);
2389 src.get_operands(collector);
2390 }
2391 Inst::LoadEffectiveAddress { addr: src, dst, .. } => {
2392 collector.reg_def(dst);
2393 src.get_operands(collector);
2394 }
2395 Inst::MovsxRmR { src, dst, .. } => {
2396 collector.reg_def(dst);
2397 src.get_operands(collector);
2398 }
2399 Inst::MovRM { src, dst, .. } => {
2400 collector.reg_use(src);
2401 dst.get_operands(collector);
2402 }
2403 Inst::ShiftR {
2404 num_bits, src, dst, ..
2405 } => {
2406 collector.reg_use(src);
2407 collector.reg_reuse_def(dst, 0);
2408 if let Imm8Reg::Reg { reg } = num_bits.as_imm8_reg_mut() {
2409 collector.reg_fixed_use(reg, regs::rcx());
2410 }
2411 }
2412 Inst::CmpRmiR { src1, src2, .. } => {
2413 collector.reg_use(src1);
2414 src2.get_operands(collector);
2415 }
2416 Inst::Setcc { dst, .. } => {
2417 collector.reg_def(dst);
2418 }
2419 Inst::Bswap { src, dst, .. } => {
2420 collector.reg_use(src);
2421 collector.reg_reuse_def(dst, 0);
2422 }
2423 Inst::Cmove {
2424 consequent,
2425 alternative,
2426 dst,
2427 ..
2428 } => {
2429 collector.reg_use(alternative);
2430 collector.reg_reuse_def(dst, 0);
2431 consequent.get_operands(collector);
2432 }
2433 Inst::XmmCmove {
2434 consequent,
2435 alternative,
2436 dst,
2437 ..
2438 } => {
2439 collector.reg_use(alternative);
2440 collector.reg_reuse_def(dst, 0);
2441 collector.reg_use(consequent);
2442 }
2443 Inst::Push64 { src } => {
2444 src.get_operands(collector);
2445 }
2446 Inst::Pop64 { dst } => {
2447 collector.reg_def(dst);
2448 }
2449 Inst::StackProbeLoop { tmp, .. } => {
2450 collector.reg_early_def(tmp);
2451 }
2452
2453 Inst::CallKnown { info } => {
2454 let CallInfo {
2459 uses,
2460 defs,
2461 clobbers,
2462 dest,
2463 ..
2464 } = &mut **info;
2465 debug_assert_ne!(*dest, ExternalName::LibCall(LibCall::Probestack));
2466 for CallArgPair { vreg, preg } in uses {
2467 collector.reg_fixed_use(vreg, *preg);
2468 }
2469 for CallRetPair { vreg, location } in defs {
2470 match location {
2471 RetLocation::Reg(preg, ..) => collector.reg_fixed_def(vreg, *preg),
2472 RetLocation::Stack(..) => collector.any_def(vreg),
2473 }
2474 }
2475 collector.reg_clobbers(*clobbers);
2476 }
2477
2478 Inst::CallUnknown { info } => {
2479 let CallInfo {
2480 uses,
2481 defs,
2482 clobbers,
2483 callee_conv,
2484 dest,
2485 ..
2486 } = &mut **info;
2487 match dest {
2488 RegMem::Reg { reg } if *callee_conv == CallConv::Winch => {
2489 collector.reg_fixed_use(reg, regs::r10());
2493 }
2494 _ => dest.get_operands(collector),
2495 }
2496 for CallArgPair { vreg, preg } in uses {
2497 collector.reg_fixed_use(vreg, *preg);
2498 }
2499 for CallRetPair { vreg, location } in defs {
2500 match location {
2501 RetLocation::Reg(preg, ..) => collector.reg_fixed_def(vreg, *preg),
2502 RetLocation::Stack(..) => collector.any_def(vreg),
2503 }
2504 }
2505 collector.reg_clobbers(*clobbers);
2506 }
2507 Inst::StackSwitchBasic {
2508 store_context_ptr,
2509 load_context_ptr,
2510 in_payload0,
2511 out_payload0,
2512 } => {
2513 collector.reg_use(load_context_ptr);
2514 collector.reg_use(store_context_ptr);
2515 collector.reg_fixed_use(in_payload0, stack_switch::payload_register());
2516 collector.reg_fixed_def(out_payload0, stack_switch::payload_register());
2517
2518 let mut clobbers = crate::isa::x64::abi::ALL_CLOBBERS;
2519 clobbers.remove(
2521 stack_switch::payload_register()
2522 .to_real_reg()
2523 .unwrap()
2524 .into(),
2525 );
2526 collector.reg_clobbers(clobbers);
2527 }
2528
2529 Inst::ReturnCallKnown { info } => {
2530 let ReturnCallInfo {
2531 dest, uses, tmp, ..
2532 } = &mut **info;
2533 collector.reg_fixed_def(tmp, regs::r11());
2534 debug_assert_ne!(*dest, ExternalName::LibCall(LibCall::Probestack));
2536 for CallArgPair { vreg, preg } in uses {
2537 collector.reg_fixed_use(vreg, *preg);
2538 }
2539 }
2540
2541 Inst::ReturnCallUnknown { info } => {
2542 let ReturnCallInfo {
2543 dest, uses, tmp, ..
2544 } = &mut **info;
2545
2546 collector.reg_fixed_use(dest, regs::r10());
2552
2553 collector.reg_fixed_def(tmp, regs::r11());
2554 for CallArgPair { vreg, preg } in uses {
2555 collector.reg_fixed_use(vreg, *preg);
2556 }
2557 }
2558
2559 Inst::JmpTableSeq {
2560 idx, tmp1, tmp2, ..
2561 } => {
2562 collector.reg_use(idx);
2563 collector.reg_early_def(tmp1);
2564 collector.reg_def(tmp2);
2568 }
2569
2570 Inst::JmpUnknown { target } => {
2571 target.get_operands(collector);
2572 }
2573
2574 Inst::LoadExtName { dst, .. } => {
2575 collector.reg_def(dst);
2576 }
2577
2578 Inst::LockCmpxchg {
2579 replacement,
2580 expected,
2581 mem,
2582 dst_old,
2583 ..
2584 } => {
2585 collector.reg_use(replacement);
2586 collector.reg_fixed_use(expected, regs::rax());
2587 collector.reg_fixed_def(dst_old, regs::rax());
2588 mem.get_operands(collector);
2589 }
2590
2591 Inst::LockCmpxchg16b {
2592 replacement_low,
2593 replacement_high,
2594 expected_low,
2595 expected_high,
2596 mem,
2597 dst_old_low,
2598 dst_old_high,
2599 ..
2600 } => {
2601 collector.reg_fixed_use(replacement_low, regs::rbx());
2602 collector.reg_fixed_use(replacement_high, regs::rcx());
2603 collector.reg_fixed_use(expected_low, regs::rax());
2604 collector.reg_fixed_use(expected_high, regs::rdx());
2605 collector.reg_fixed_def(dst_old_low, regs::rax());
2606 collector.reg_fixed_def(dst_old_high, regs::rdx());
2607 mem.get_operands(collector);
2608 }
2609
2610 Inst::LockXadd {
2611 operand,
2612 mem,
2613 dst_old,
2614 ..
2615 } => {
2616 collector.reg_use(operand);
2617 collector.reg_reuse_def(dst_old, 0);
2618 mem.get_operands(collector);
2619 }
2620
2621 Inst::Xchg {
2622 operand,
2623 mem,
2624 dst_old,
2625 ..
2626 } => {
2627 collector.reg_use(operand);
2628 collector.reg_reuse_def(dst_old, 0);
2629 mem.get_operands(collector);
2630 }
2631
2632 Inst::AtomicRmwSeq {
2633 operand,
2634 temp,
2635 dst_old,
2636 mem,
2637 ..
2638 } => {
2639 collector.reg_late_use(operand);
2640 collector.reg_early_def(temp);
2641 collector.reg_fixed_def(dst_old, regs::rax());
2644 mem.get_operands_late(collector)
2645 }
2646
2647 Inst::Atomic128RmwSeq {
2648 operand_low,
2649 operand_high,
2650 temp_low,
2651 temp_high,
2652 dst_old_low,
2653 dst_old_high,
2654 mem,
2655 ..
2656 } => {
2657 collector.reg_late_use(operand_low);
2659 collector.reg_late_use(operand_high);
2660 collector.reg_fixed_def(temp_low, regs::rbx());
2661 collector.reg_fixed_def(temp_high, regs::rcx());
2662 collector.reg_fixed_def(dst_old_low, regs::rax());
2663 collector.reg_fixed_def(dst_old_high, regs::rdx());
2664 mem.get_operands_late(collector)
2665 }
2666
2667 Inst::Atomic128XchgSeq {
2668 operand_low,
2669 operand_high,
2670 dst_old_low,
2671 dst_old_high,
2672 mem,
2673 ..
2674 } => {
2675 collector.reg_fixed_late_use(operand_low, regs::rbx());
2677 collector.reg_fixed_late_use(operand_high, regs::rcx());
2678 collector.reg_fixed_def(dst_old_low, regs::rax());
2679 collector.reg_fixed_def(dst_old_high, regs::rdx());
2680 mem.get_operands_late(collector)
2681 }
2682
2683 Inst::Args { args } => {
2684 for ArgPair { vreg, preg } in args {
2685 collector.reg_fixed_def(vreg, *preg);
2686 }
2687 }
2688
2689 Inst::Rets { rets } => {
2690 for RetPair { vreg, preg } in rets {
2693 collector.reg_fixed_use(vreg, *preg);
2694 }
2695 }
2696
2697 Inst::JmpKnown { .. }
2698 | Inst::WinchJmpIf { .. }
2699 | Inst::JmpCond { .. }
2700 | Inst::JmpCondOr { .. }
2701 | Inst::Ret { .. }
2702 | Inst::Nop { .. }
2703 | Inst::TrapIf { .. }
2704 | Inst::TrapIfAnd { .. }
2705 | Inst::TrapIfOr { .. }
2706 | Inst::Hlt
2707 | Inst::Ud2 { .. }
2708 | Inst::Fence { .. } => {
2709 }
2711
2712 Inst::ElfTlsGetAddr { dst, .. } | Inst::MachOTlsGetAddr { dst, .. } => {
2713 collector.reg_fixed_def(dst, regs::rax());
2714 let mut clobbers =
2721 X64ABIMachineSpec::get_regs_clobbered_by_call(CallConv::SystemV, false);
2722 clobbers.remove(regs::gpr_preg(regs::ENC_RAX));
2723 collector.reg_clobbers(clobbers);
2724 }
2725
2726 Inst::CoffTlsGetAddr { dst, tmp, .. } => {
2727 collector.reg_fixed_def(dst, regs::rax());
2732
2733 collector.reg_fixed_def(tmp, regs::rcx());
2735 }
2736
2737 Inst::Unwind { .. } => {}
2738
2739 Inst::DummyUse { reg } => {
2740 collector.reg_use(reg);
2741 }
2742
2743 Inst::External { inst } => {
2744 inst.visit(&mut external::RegallocVisitor { collector });
2745 }
2746 }
2747}
2748
2749impl MachInst for Inst {
2753 type ABIMachineSpec = X64ABIMachineSpec;
2754
2755 fn get_operands(&mut self, collector: &mut impl OperandVisitor) {
2756 x64_get_operands(self, collector)
2757 }
2758
2759 fn is_move(&self) -> Option<(Writable<Reg>, Reg)> {
2760 match self {
2761 Self::MovRR { size, src, dst, .. } if *size == OperandSize::Size64 => {
2766 Some((dst.to_writable_reg(), src.to_reg()))
2767 }
2768 Self::XmmUnaryRmR { op, src, dst, .. }
2773 if *op == SseOpcode::Movss
2774 || *op == SseOpcode::Movsd
2775 || *op == SseOpcode::Movaps
2776 || *op == SseOpcode::Movapd
2777 || *op == SseOpcode::Movups
2778 || *op == SseOpcode::Movupd
2779 || *op == SseOpcode::Movdqa
2780 || *op == SseOpcode::Movdqu =>
2781 {
2782 if let RegMem::Reg { reg } = src.clone().to_reg_mem() {
2783 Some((dst.to_writable_reg(), reg))
2784 } else {
2785 None
2786 }
2787 }
2788 _ => None,
2789 }
2790 }
2791
2792 fn is_included_in_clobbers(&self) -> bool {
2793 match self {
2794 &Inst::Args { .. } => false,
2795 _ => true,
2796 }
2797 }
2798
2799 fn is_trap(&self) -> bool {
2800 match self {
2801 Self::Ud2 { .. } => true,
2802 _ => false,
2803 }
2804 }
2805
2806 fn is_args(&self) -> bool {
2807 match self {
2808 Self::Args { .. } => true,
2809 _ => false,
2810 }
2811 }
2812
2813 fn is_term(&self) -> MachTerminator {
2814 match self {
2815 &Self::Rets { .. } => MachTerminator::Ret,
2817 &Self::ReturnCallKnown { .. } | &Self::ReturnCallUnknown { .. } => {
2818 MachTerminator::RetCall
2819 }
2820 &Self::JmpKnown { .. } => MachTerminator::Branch,
2821 &Self::JmpCond { .. } => MachTerminator::Branch,
2822 &Self::JmpCondOr { .. } => MachTerminator::Branch,
2823 &Self::JmpTableSeq { .. } => MachTerminator::Branch,
2824 &Self::CallKnown { ref info } if info.try_call_info.is_some() => MachTerminator::Branch,
2825 &Self::CallUnknown { ref info } if info.try_call_info.is_some() => {
2826 MachTerminator::Branch
2827 }
2828 _ => MachTerminator::None,
2830 }
2831 }
2832
2833 fn is_low_level_branch(&self) -> bool {
2834 match self {
2835 &Self::WinchJmpIf { .. } => true,
2836 _ => false,
2837 }
2838 }
2839
2840 fn is_mem_access(&self) -> bool {
2841 panic!("TODO FILL ME OUT")
2842 }
2843
2844 fn gen_move(dst_reg: Writable<Reg>, src_reg: Reg, ty: Type) -> Inst {
2845 trace!(
2846 "Inst::gen_move {:?} -> {:?} (type: {:?})",
2847 src_reg,
2848 dst_reg.to_reg(),
2849 ty
2850 );
2851 let rc_dst = dst_reg.to_reg().class();
2852 let rc_src = src_reg.class();
2853 debug_assert!(rc_dst == rc_src);
2855 match rc_dst {
2856 RegClass::Int => Inst::mov_r_r(OperandSize::Size64, src_reg, dst_reg),
2857 RegClass::Float => {
2858 let opcode = match ty {
2863 types::F16 | types::F32 | types::F64 | types::F32X4 => SseOpcode::Movaps,
2864 types::F64X2 => SseOpcode::Movapd,
2865 _ if (ty.is_float() || ty.is_vector()) && ty.bits() == 128 => SseOpcode::Movdqa,
2866 _ => unimplemented!("unable to move type: {}", ty),
2867 };
2868 Inst::xmm_unary_rm_r(opcode, RegMem::reg(src_reg), dst_reg)
2869 }
2870 RegClass::Vector => unreachable!(),
2871 }
2872 }
2873
2874 fn gen_nop(preferred_size: usize) -> Inst {
2875 Inst::nop(std::cmp::min(preferred_size, 15) as u8)
2876 }
2877
2878 fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])> {
2879 match ty {
2880 types::I8 => Ok((&[RegClass::Int], &[types::I8])),
2881 types::I16 => Ok((&[RegClass::Int], &[types::I16])),
2882 types::I32 => Ok((&[RegClass::Int], &[types::I32])),
2883 types::I64 => Ok((&[RegClass::Int], &[types::I64])),
2884 types::F16 => Ok((&[RegClass::Float], &[types::F16])),
2885 types::F32 => Ok((&[RegClass::Float], &[types::F32])),
2886 types::F64 => Ok((&[RegClass::Float], &[types::F64])),
2887 types::F128 => Ok((&[RegClass::Float], &[types::F128])),
2888 types::I128 => Ok((&[RegClass::Int, RegClass::Int], &[types::I64, types::I64])),
2889 _ if ty.is_vector() => {
2890 assert!(ty.bits() <= 128);
2891 Ok((&[RegClass::Float], &[types::I8X16]))
2892 }
2893 _ => Err(CodegenError::Unsupported(format!(
2894 "Unexpected SSA-value type: {ty}"
2895 ))),
2896 }
2897 }
2898
2899 fn canonical_type_for_rc(rc: RegClass) -> Type {
2900 match rc {
2901 RegClass::Float => types::I8X16,
2902 RegClass::Int => types::I64,
2903 RegClass::Vector => unreachable!(),
2904 }
2905 }
2906
2907 fn gen_jump(label: MachLabel) -> Inst {
2908 Inst::jmp_known(label)
2909 }
2910
2911 fn gen_imm_u64(value: u64, dst: Writable<Reg>) -> Option<Self> {
2912 Some(Inst::imm(OperandSize::Size64, value, dst))
2913 }
2914
2915 fn gen_imm_f64(value: f64, tmp: Writable<Reg>, dst: Writable<Reg>) -> SmallVec<[Self; 2]> {
2916 let imm_to_gpr = Inst::imm(OperandSize::Size64, value.to_bits(), tmp);
2917 let gpr_to_xmm = Self::gpr_to_xmm(
2918 SseOpcode::Movd,
2919 tmp.to_reg().into(),
2920 OperandSize::Size64,
2921 dst,
2922 );
2923 smallvec![imm_to_gpr, gpr_to_xmm]
2924 }
2925
2926 fn gen_dummy_use(reg: Reg) -> Self {
2927 Inst::DummyUse { reg }
2928 }
2929
2930 fn worst_case_size() -> CodeOffset {
2931 15
2932 }
2933
2934 fn ref_type_regclass(_: &settings::Flags) -> RegClass {
2935 RegClass::Int
2936 }
2937
2938 fn is_safepoint(&self) -> bool {
2939 match self {
2940 Inst::CallKnown { .. } | Inst::CallUnknown { .. } => true,
2941 _ => false,
2942 }
2943 }
2944
2945 fn function_alignment() -> FunctionAlignment {
2946 FunctionAlignment {
2947 minimum: 1,
2948 preferred: 32,
2951 }
2952 }
2953
2954 type LabelUse = LabelUse;
2955
2956 const TRAP_OPCODE: &'static [u8] = &[0x0f, 0x0b];
2957}
2958
2959pub struct EmitInfo {
2961 pub(super) flags: settings::Flags,
2962 isa_flags: x64_settings::Flags,
2963}
2964
2965impl EmitInfo {
2966 pub fn new(flags: settings::Flags, isa_flags: x64_settings::Flags) -> Self {
2968 Self { flags, isa_flags }
2969 }
2970}
2971
2972impl MachInstEmit for Inst {
2973 type State = EmitState;
2974 type Info = EmitInfo;
2975
2976 fn emit(&self, sink: &mut MachBuffer<Inst>, info: &Self::Info, state: &mut Self::State) {
2977 emit::emit(self, sink, info, state);
2978 }
2979
2980 fn pretty_print_inst(&self, _: &mut Self::State) -> String {
2981 PrettyPrint::pretty_print(self, 0)
2982 }
2983}
2984
2985#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2987pub enum LabelUse {
2988 JmpRel32,
2992
2993 PCRel32,
2996}
2997
2998impl MachInstLabelUse for LabelUse {
2999 const ALIGN: CodeOffset = 1;
3000
3001 fn max_pos_range(self) -> CodeOffset {
3002 match self {
3003 LabelUse::JmpRel32 | LabelUse::PCRel32 => 0x7fff_ffff,
3004 }
3005 }
3006
3007 fn max_neg_range(self) -> CodeOffset {
3008 match self {
3009 LabelUse::JmpRel32 | LabelUse::PCRel32 => 0x8000_0000,
3010 }
3011 }
3012
3013 fn patch_size(self) -> CodeOffset {
3014 match self {
3015 LabelUse::JmpRel32 | LabelUse::PCRel32 => 4,
3016 }
3017 }
3018
3019 fn patch(self, buffer: &mut [u8], use_offset: CodeOffset, label_offset: CodeOffset) {
3020 let pc_rel = (label_offset as i64) - (use_offset as i64);
3021 debug_assert!(pc_rel <= self.max_pos_range() as i64);
3022 debug_assert!(pc_rel >= -(self.max_neg_range() as i64));
3023 let pc_rel = pc_rel as u32;
3024 match self {
3025 LabelUse::JmpRel32 => {
3026 let addend = u32::from_le_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
3027 let value = pc_rel.wrapping_add(addend).wrapping_sub(4);
3028 buffer.copy_from_slice(&value.to_le_bytes()[..]);
3029 }
3030 LabelUse::PCRel32 => {
3031 let addend = u32::from_le_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
3032 let value = pc_rel.wrapping_add(addend);
3033 buffer.copy_from_slice(&value.to_le_bytes()[..]);
3034 }
3035 }
3036 }
3037
3038 fn supports_veneer(self) -> bool {
3039 match self {
3040 LabelUse::JmpRel32 | LabelUse::PCRel32 => false,
3041 }
3042 }
3043
3044 fn veneer_size(self) -> CodeOffset {
3045 match self {
3046 LabelUse::JmpRel32 | LabelUse::PCRel32 => 0,
3047 }
3048 }
3049
3050 fn worst_case_veneer_size() -> CodeOffset {
3051 0
3052 }
3053
3054 fn generate_veneer(self, _: &mut [u8], _: CodeOffset) -> (CodeOffset, LabelUse) {
3055 match self {
3056 LabelUse::JmpRel32 | LabelUse::PCRel32 => {
3057 panic!("Veneer not supported for JumpRel32 label-use.");
3058 }
3059 }
3060 }
3061
3062 fn from_reloc(reloc: Reloc, addend: Addend) -> Option<Self> {
3063 match (reloc, addend) {
3064 (Reloc::X86CallPCRel4, -4) => Some(LabelUse::JmpRel32),
3065 _ => None,
3066 }
3067 }
3068}