1pub mod generated_code;
5
6use crate::ir::ExternalName;
8use crate::isa::s390x::S390xBackend;
9use crate::isa::s390x::abi::REG_SAVE_AREA_SIZE;
10use crate::isa::s390x::inst::{
11 CallInstDest, Cond, Inst as MInst, LaneOrder, MemArg, RegPair, ReturnCallInfo, SImm20,
12 SymbolReloc, UImm12, UImm16Shifted, UImm32Shifted, WritableRegPair, gpr, stack_reg,
13 writable_gpr, zero_reg,
14};
15use crate::machinst::isle::*;
16use crate::machinst::{CallInfo, MachLabel, Reg, TryCallInfo, non_writable_value_regs};
17use crate::{
18 ir::{
19 AtomicRmwOp, BlockCall, Endianness, Inst, InstructionData, KnownSymbol, MemFlags, Opcode,
20 TrapCode, Value, ValueList, condcodes::*, immediates::*, types::*,
21 },
22 isa::CallConv,
23 machinst::{
24 ArgPair, CallArgList, CallRetList, InstOutput, MachInst, VCodeConstant, VCodeConstantData,
25 },
26};
27use alloc::boxed::Box;
28use alloc::vec::Vec;
29use core::cell::Cell;
30use regalloc2::PReg;
31
32type BoxCallInfo = Box<CallInfo<CallInstDest>>;
33type BoxReturnCallInfo = Box<ReturnCallInfo<CallInstDest>>;
34type VecMachLabel = Vec<MachLabel>;
35type BoxExternalName = Box<ExternalName>;
36type BoxSymbolReloc = Box<SymbolReloc>;
37type VecMInst = Vec<MInst>;
38type VecMInstBuilder = Cell<Vec<MInst>>;
39type VecArgPair = Vec<ArgPair>;
40
41pub(crate) fn lower(
43 lower_ctx: &mut Lower<MInst>,
44 backend: &S390xBackend,
45 inst: Inst,
46) -> Option<InstOutput> {
47 let mut isle_ctx = IsleContext { lower_ctx, backend };
50 generated_code::constructor_lower(&mut isle_ctx, inst)
51}
52
53pub(crate) fn lower_branch(
55 lower_ctx: &mut Lower<MInst>,
56 backend: &S390xBackend,
57 branch: Inst,
58 targets: &[MachLabel],
59) -> Option<()> {
60 let mut isle_ctx = IsleContext { lower_ctx, backend };
63 generated_code::constructor_lower_branch(&mut isle_ctx, branch, targets)
64}
65
66impl generated_code::Context for IsleContext<'_, '_, MInst, S390xBackend> {
67 isle_lower_prelude_methods!();
68
69 #[inline]
70 fn call_inst_dest_direct(&mut self, name: ExternalName) -> CallInstDest {
71 CallInstDest::Direct { name }
72 }
73
74 #[inline]
75 fn call_inst_dest_indirect(&mut self, reg: Reg) -> CallInstDest {
76 CallInstDest::Indirect { reg }
77 }
78
79 fn abi_emit_call_adjust_stack(&mut self, abi: Sig) -> Unit {
84 let sig_data = &self.lower_ctx.sigs()[abi];
85 if sig_data.call_conv() == CallConv::Tail {
86 let arg_space = sig_data.sized_stack_arg_space();
87 if arg_space > 0 {
88 if self.backend.flags.preserve_frame_pointers() {
89 let tmp = self.lower_ctx.alloc_tmp(I64).only_reg().unwrap();
90 let src_mem = MemArg::reg(stack_reg(), MemFlags::trusted());
91 let dst_mem = MemArg::reg(stack_reg(), MemFlags::trusted());
92 self.emit(&MInst::Load64 {
93 rd: tmp,
94 mem: src_mem,
95 });
96 self.emit(&MInst::AllocateArgs { size: arg_space });
97 self.emit(&MInst::Store64 {
98 rd: tmp.to_reg(),
99 mem: dst_mem,
100 });
101 } else {
102 self.emit(&MInst::AllocateArgs { size: arg_space });
103 }
104 }
105 }
106 }
107
108 fn abi_emit_return_call_adjust_stack(&mut self, abi: Sig) -> Unit {
113 let sig_data = &self.lower_ctx.sigs()[abi];
114 let arg_space = sig_data.sized_stack_arg_space();
115 if arg_space > 0 && self.backend.flags.preserve_frame_pointers() {
116 let tmp = self.lower_ctx.alloc_tmp(I64).only_reg().unwrap();
117 let src_mem = MemArg::InitialSPOffset { off: 0 };
118 let dst_mem = MemArg::InitialSPOffset {
119 off: -(arg_space as i64),
120 };
121 self.emit(&MInst::Load64 {
122 rd: tmp,
123 mem: src_mem,
124 });
125 self.emit(&MInst::Store64 {
126 rd: tmp.to_reg(),
127 mem: dst_mem,
128 });
129 }
130 }
131
132 fn abi_prepare_args(&mut self, abi: Sig, (list, off): ValueSlice) -> ValueRegsVec {
136 let lane_order = LaneOrder::from(self.lower_ctx.sigs()[abi].call_conv());
137 let lane_swap_needed = self.lane_order() != lane_order;
138
139 (off..list.len(&self.lower_ctx.dfg().value_lists))
140 .map(|ix| {
141 let val = list.get(ix, &self.lower_ctx.dfg().value_lists).unwrap();
142 let ty = self.lower_ctx.dfg().value_type(val);
143 let regs = self.put_in_regs(val);
144
145 if lane_swap_needed && ty.is_vector() && ty.lane_count() >= 2 {
146 let tmp_regs = self.lower_ctx.alloc_tmp(ty);
147 self.emit(&MInst::VecEltRev {
148 lane_count: ty.lane_count(),
149 rd: tmp_regs.only_reg().unwrap(),
150 rn: regs.only_reg().unwrap(),
151 });
152 non_writable_value_regs(tmp_regs)
153 } else {
154 regs
155 }
156 })
157 .collect()
158 }
159
160 fn gen_call_info(
161 &mut self,
162 sig: Sig,
163 dest: CallInstDest,
164 uses: CallArgList,
165 defs: CallRetList,
166 try_call_info: Option<TryCallInfo>,
167 patchable: bool,
168 ) -> BoxCallInfo {
169 let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();
170 let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
171 let total_space = if self.lower_ctx.sigs()[sig].call_conv() != CallConv::Tail {
172 REG_SAVE_AREA_SIZE + stack_arg_space + stack_ret_space
173 } else {
174 REG_SAVE_AREA_SIZE + stack_ret_space
175 };
176 self.lower_ctx
177 .abi_mut()
178 .accumulate_outgoing_args_size(total_space);
179
180 Box::new(
181 self.lower_ctx
182 .gen_call_info(sig, dest, uses, defs, try_call_info, patchable),
183 )
184 }
185
186 fn gen_return_call_info(
187 &mut self,
188 sig: Sig,
189 dest: CallInstDest,
190 uses: CallArgList,
191 ) -> BoxReturnCallInfo {
192 let callee_pop_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
193 self.lower_ctx
194 .abi_mut()
195 .accumulate_tail_args_size(callee_pop_size);
196
197 Box::new(ReturnCallInfo {
198 dest,
199 uses,
200 callee_pop_size,
201 })
202 }
203
204 fn abi_for_elf_tls_get_offset(&mut self) {
205 self.lower_ctx
206 .abi_mut()
207 .accumulate_outgoing_args_size(REG_SAVE_AREA_SIZE);
208 }
209
210 #[inline]
211 fn box_symbol_reloc(&mut self, symbol_reloc: &SymbolReloc) -> BoxSymbolReloc {
212 Box::new(symbol_reloc.clone())
213 }
214
215 #[inline]
216 fn mie3_enabled(&mut self, _: Type) -> Option<()> {
217 if self.backend.isa_flags.has_mie3() {
218 Some(())
219 } else {
220 None
221 }
222 }
223
224 #[inline]
225 fn mie3_disabled(&mut self, _: Type) -> Option<()> {
226 if !self.backend.isa_flags.has_mie3() {
227 Some(())
228 } else {
229 None
230 }
231 }
232
233 #[inline]
234 fn mie4_enabled(&mut self, _: Type) -> Option<()> {
235 if self.backend.isa_flags.has_mie4() {
236 Some(())
237 } else {
238 None
239 }
240 }
241
242 #[inline]
243 fn mie4_disabled(&mut self, _: Type) -> Option<()> {
244 if !self.backend.isa_flags.has_mie4() {
245 Some(())
246 } else {
247 None
248 }
249 }
250
251 #[inline]
252 fn vxrs_ext2_enabled(&mut self, _: Type) -> Option<()> {
253 if self.backend.isa_flags.has_vxrs_ext2() {
254 Some(())
255 } else {
256 None
257 }
258 }
259
260 #[inline]
261 fn vxrs_ext2_disabled(&mut self, _: Type) -> Option<()> {
262 if !self.backend.isa_flags.has_vxrs_ext2() {
263 Some(())
264 } else {
265 None
266 }
267 }
268
269 #[inline]
270 fn vxrs_ext3_enabled(&mut self, _: Type) -> Option<()> {
271 if self.backend.isa_flags.has_vxrs_ext3() {
272 Some(())
273 } else {
274 None
275 }
276 }
277
278 #[inline]
279 fn vxrs_ext3_disabled(&mut self, _: Type) -> Option<()> {
280 if !self.backend.isa_flags.has_vxrs_ext3() {
281 Some(())
282 } else {
283 None
284 }
285 }
286
287 #[inline]
288 fn writable_gpr(&mut self, regno: u8) -> WritableReg {
289 writable_gpr(regno)
290 }
291
292 #[inline]
293 fn zero_reg(&mut self) -> Reg {
294 zero_reg()
295 }
296
297 #[inline]
298 fn gpr32_ty(&mut self, ty: Type) -> Option<Type> {
299 match ty {
300 I8 | I16 | I32 => Some(ty),
301 _ => None,
302 }
303 }
304
305 #[inline]
306 fn gpr64_ty(&mut self, ty: Type) -> Option<Type> {
307 match ty {
308 I64 => Some(ty),
309 _ => None,
310 }
311 }
312
313 #[inline]
314 fn vr128_ty(&mut self, ty: Type) -> Option<Type> {
315 match ty {
316 I128 | F128 => Some(ty),
317 _ if ty.is_vector() && ty.bits() == 128 => Some(ty),
318 _ => None,
319 }
320 }
321
322 #[inline]
323 fn uimm32shifted(&mut self, n: u32, shift: u8) -> UImm32Shifted {
324 UImm32Shifted::maybe_with_shift(n, shift).unwrap()
325 }
326
327 #[inline]
328 fn uimm16shifted(&mut self, n: u16, shift: u8) -> UImm16Shifted {
329 UImm16Shifted::maybe_with_shift(n, shift).unwrap()
330 }
331
332 #[inline]
333 fn i64_nonequal(&mut self, val: i64, cmp: i64) -> Option<i64> {
334 if val != cmp { Some(val) } else { None }
335 }
336
337 #[inline]
338 fn u64_pair_split(&mut self, n: u128) -> (u64, u64) {
339 ((n >> 64) as u64, n as u64)
340 }
341
342 #[inline]
343 fn u64_pair_concat(&mut self, hi: u64, lo: u64) -> u128 {
344 (hi as u128) << 64 | (lo as u128)
345 }
346
347 #[inline]
348 fn u32_pair_split(&mut self, n: u64) -> (u32, u32) {
349 ((n >> 32) as u32, n as u32)
350 }
351
352 #[inline]
353 fn u32_pair_concat(&mut self, hi: u32, lo: u32) -> u64 {
354 (hi as u64) << 32 | (lo as u64)
355 }
356
357 #[inline]
358 fn u16_pair_split(&mut self, n: u32) -> (u16, u16) {
359 ((n >> 16) as u16, n as u16)
360 }
361
362 #[inline]
363 fn u16_pair_concat(&mut self, hi: u16, lo: u16) -> u32 {
364 (hi as u32) << 16 | (lo as u32)
365 }
366
367 #[inline]
368 fn u8_pair_split(&mut self, n: u16) -> (u8, u8) {
369 ((n >> 8) as u8, n as u8)
370 }
371
372 #[inline]
373 fn u8_pair_concat(&mut self, hi: u8, lo: u8) -> u16 {
374 (hi as u16) << 8 | (lo as u16)
375 }
376
377 #[inline]
378 fn u64_nonzero_hipart(&mut self, n: u64) -> Option<u64> {
379 let part = n & 0xffff_ffff_0000_0000;
380 if part != 0 { Some(part) } else { None }
381 }
382
383 #[inline]
384 fn u64_nonzero_lopart(&mut self, n: u64) -> Option<u64> {
385 let part = n & 0x0000_0000_ffff_ffff;
386 if part != 0 { Some(part) } else { None }
387 }
388
389 #[inline]
390 fn uimm32shifted_from_u64(&mut self, n: u64) -> Option<UImm32Shifted> {
391 UImm32Shifted::maybe_from_u64(n)
392 }
393
394 #[inline]
395 fn uimm16shifted_from_u64(&mut self, n: u64) -> Option<UImm16Shifted> {
396 UImm16Shifted::maybe_from_u64(n)
397 }
398
399 #[inline]
400 fn lane_order(&mut self) -> LaneOrder {
401 LaneOrder::from(self.lower_ctx.abi().call_conv())
402 }
403
404 #[inline]
405 fn be_lane_idx(&mut self, ty: Type, idx: u8) -> u8 {
406 match self.lane_order() {
407 LaneOrder::LittleEndian => ty.lane_count() as u8 - 1 - idx,
408 LaneOrder::BigEndian => idx,
409 }
410 }
411
412 #[inline]
413 fn be_vec_const(&mut self, ty: Type, n: u128) -> u128 {
414 match self.lane_order() {
415 LaneOrder::LittleEndian => n,
416 LaneOrder::BigEndian if ty.lane_count() == 1 => n,
417 LaneOrder::BigEndian => {
418 let lane_count = ty.lane_count();
419 let lane_bits = ty.lane_bits();
420 let lane_mask = (1u128 << lane_bits) - 1;
421 let mut n_le = n;
422 let mut n_be = 0u128;
423 for _ in 0..lane_count {
424 n_be = (n_be << lane_bits) | (n_le & lane_mask);
425 n_le = n_le >> lane_bits;
426 }
427 n_be
428 }
429 }
430 }
431
432 #[inline]
433 fn lane_byte_mask(&mut self, ty: Type, idx: u8) -> u16 {
434 let lane_bytes = (ty.lane_bits() / 8) as u8;
435 let lane_mask = (1u16 << lane_bytes) - 1;
436 lane_mask << (16 - ((idx + 1) * lane_bytes))
437 }
438
439 #[inline]
440 fn shuffle_mask_from_u128(&mut self, idx: u128) -> (u128, u16) {
441 let bytes = match self.lane_order() {
442 LaneOrder::LittleEndian => idx.to_be_bytes().map(|x| {
443 if x < 16 {
444 15 - x
445 } else if x < 32 {
446 47 - x
447 } else {
448 128
449 }
450 }),
451 LaneOrder::BigEndian => idx.to_le_bytes().map(|x| if x < 32 { x } else { 128 }),
452 };
453 let and_mask = bytes.iter().fold(0, |acc, &x| (acc << 1) | (x < 32) as u16);
454 let permute_mask = u128::from_be_bytes(bytes);
455 (permute_mask, and_mask)
456 }
457
458 #[inline]
459 fn u64_from_value(&mut self, val: Value) -> Option<u64> {
460 let inst = self.lower_ctx.dfg().value_def(val).inst()?;
461 let constant = self.lower_ctx.get_constant(inst)?;
462 let ty = self.lower_ctx.output_ty(inst, 0);
463 Some(zero_extend_to_u64(constant, self.ty_bits(ty)))
464 }
465
466 #[inline]
467 fn u64_from_inverted_value(&mut self, val: Value) -> Option<u64> {
468 let inst = self.lower_ctx.dfg().value_def(val).inst()?;
469 let constant = self.lower_ctx.get_constant(inst)?;
470 let ty = self.lower_ctx.output_ty(inst, 0);
471 Some(zero_extend_to_u64(!constant, self.ty_bits(ty)))
472 }
473
474 #[inline]
475 fn u32_from_value(&mut self, val: Value) -> Option<u32> {
476 let constant = self.u64_from_value(val)?;
477 let imm = u32::try_from(constant).ok()?;
478 Some(imm)
479 }
480
481 #[inline]
482 fn u8_from_value(&mut self, val: Value) -> Option<u8> {
483 let constant = self.u64_from_value(val)?;
484 let imm = u8::try_from(constant).ok()?;
485 Some(imm)
486 }
487
488 #[inline]
489 fn u64_from_signed_value(&mut self, val: Value) -> Option<u64> {
490 let inst = self.lower_ctx.dfg().value_def(val).inst()?;
491 let constant = self.lower_ctx.get_constant(inst)?;
492 let ty = self.lower_ctx.output_ty(inst, 0);
493 Some(sign_extend_to_u64(constant, self.ty_bits(ty)))
494 }
495
496 #[inline]
497 fn i64_from_value(&mut self, val: Value) -> Option<i64> {
498 let constant = self.u64_from_signed_value(val)? as i64;
499 Some(constant)
500 }
501
502 #[inline]
503 fn i32_from_value(&mut self, val: Value) -> Option<i32> {
504 let constant = self.u64_from_signed_value(val)? as i64;
505 let imm = i32::try_from(constant).ok()?;
506 Some(imm)
507 }
508
509 #[inline]
510 fn i16_from_value(&mut self, val: Value) -> Option<i16> {
511 let constant = self.u64_from_signed_value(val)? as i64;
512 let imm = i16::try_from(constant).ok()?;
513 Some(imm)
514 }
515
516 #[inline]
517 fn i16_from_swapped_value(&mut self, val: Value) -> Option<i16> {
518 let constant = self.u64_from_signed_value(val)? as i64;
519 let imm = i16::try_from(constant).ok()?;
520 Some(imm.swap_bytes())
521 }
522
523 #[inline]
524 fn i64_from_negated_value(&mut self, val: Value) -> Option<i64> {
525 let constant = self.u64_from_signed_value(val)? as i64;
526 let imm = constant.wrapping_neg();
527 Some(imm)
528 }
529
530 #[inline]
531 fn i32_from_negated_value(&mut self, val: Value) -> Option<i32> {
532 let constant = self.u64_from_signed_value(val)? as i64;
533 let imm = i32::try_from(constant.wrapping_neg()).ok()?;
534 Some(imm)
535 }
536
537 #[inline]
538 fn i16_from_negated_value(&mut self, val: Value) -> Option<i16> {
539 let constant = self.u64_from_signed_value(val)? as i64;
540 let imm = i16::try_from(constant.wrapping_neg()).ok()?;
541 Some(imm)
542 }
543
544 #[inline]
545 fn uimm16shifted_from_value(&mut self, val: Value) -> Option<UImm16Shifted> {
546 let constant = self.u64_from_value(val)?;
547 UImm16Shifted::maybe_from_u64(constant)
548 }
549
550 #[inline]
551 fn uimm32shifted_from_value(&mut self, val: Value) -> Option<UImm32Shifted> {
552 let constant = self.u64_from_value(val)?;
553 UImm32Shifted::maybe_from_u64(constant)
554 }
555
556 #[inline]
557 fn uimm16shifted_from_inverted_value(&mut self, val: Value) -> Option<UImm16Shifted> {
558 let constant = self.u64_from_inverted_value(val)?;
559 let imm = UImm16Shifted::maybe_from_u64(constant)?;
560 Some(imm.negate_bits())
561 }
562
563 #[inline]
564 fn uimm32shifted_from_inverted_value(&mut self, val: Value) -> Option<UImm32Shifted> {
565 let constant = self.u64_from_inverted_value(val)?;
566 let imm = UImm32Shifted::maybe_from_u64(constant)?;
567 Some(imm.negate_bits())
568 }
569
570 #[inline]
571 fn len_minus_one(&mut self, len: u64) -> Option<u8> {
572 if len > 0 && len <= 256 {
573 Some((len - 1) as u8)
574 } else {
575 None
576 }
577 }
578
579 #[inline]
580 fn mask_amt_imm(&mut self, ty: Type, amt: i64) -> u8 {
581 let mask = ty.lane_bits() - 1;
582 (amt as u8) & (mask as u8)
583 }
584
585 #[inline]
586 fn mask_as_cond(&mut self, mask: u8) -> Cond {
587 Cond::from_mask(mask)
588 }
589
590 #[inline]
591 fn intcc_as_cond(&mut self, cc: &IntCC) -> Cond {
592 Cond::from_intcc(*cc)
593 }
594
595 #[inline]
596 fn floatcc_as_cond(&mut self, cc: &FloatCC) -> Cond {
597 Cond::from_floatcc(*cc)
598 }
599
600 #[inline]
601 fn invert_cond(&mut self, cond: &Cond) -> Cond {
602 Cond::invert(*cond)
603 }
604
605 #[inline]
606 fn signed(&mut self, cc: &IntCC) -> Option<()> {
607 if condcode_is_signed(*cc) {
608 Some(())
609 } else {
610 None
611 }
612 }
613
614 #[inline]
615 fn unsigned(&mut self, cc: &IntCC) -> Option<()> {
616 if !condcode_is_signed(*cc) {
617 Some(())
618 } else {
619 None
620 }
621 }
622
623 #[inline]
624 fn zero_offset(&mut self) -> Offset32 {
625 Offset32::new(0)
626 }
627
628 #[inline]
629 fn i64_from_offset(&mut self, off: Offset32) -> i64 {
630 i64::from(off)
631 }
632
633 #[inline]
634 fn fcvt_to_uint_ub32(&mut self, size: u8) -> u64 {
635 (2.0_f32).powi(size.into()).to_bits() as u64
636 }
637
638 #[inline]
639 fn fcvt_to_uint_lb32(&mut self) -> u64 {
640 (-1.0_f32).to_bits() as u64
641 }
642
643 #[inline]
644 fn fcvt_to_uint_ub64(&mut self, size: u8) -> u64 {
645 (2.0_f64).powi(size.into()).to_bits()
646 }
647
648 #[inline]
649 fn fcvt_to_uint_lb64(&mut self) -> u64 {
650 (-1.0_f64).to_bits()
651 }
652
653 #[inline]
654 fn fcvt_to_uint_ub128(&mut self, size: u8) -> u128 {
655 Ieee128::pow2(size).bits()
656 }
657
658 #[inline]
659 fn fcvt_to_uint_lb128(&mut self) -> u128 {
660 (-Ieee128::pow2(0)).bits()
661 }
662
663 #[inline]
664 fn fcvt_to_sint_ub32(&mut self, size: u8) -> u64 {
665 (2.0_f32).powi((size - 1).into()).to_bits() as u64
666 }
667
668 #[inline]
669 fn fcvt_to_sint_lb32(&mut self, size: u8) -> u64 {
670 let lb = (-2.0_f32).powi((size - 1).into());
671 core::cmp::max(lb.to_bits() + 1, (lb - 1.0).to_bits()) as u64
672 }
673
674 #[inline]
675 fn fcvt_to_sint_ub64(&mut self, size: u8) -> u64 {
676 (2.0_f64).powi((size - 1).into()).to_bits()
677 }
678
679 #[inline]
680 fn fcvt_to_sint_lb64(&mut self, size: u8) -> u64 {
681 let lb = (-2.0_f64).powi((size - 1).into());
682 core::cmp::max(lb.to_bits() + 1, (lb - 1.0).to_bits())
683 }
684
685 #[inline]
686 fn fcvt_to_sint_ub128(&mut self, size: u8) -> u128 {
687 Ieee128::pow2(size - 1).bits()
688 }
689
690 #[inline]
691 fn fcvt_to_sint_lb128(&mut self, size: u8) -> u128 {
692 Ieee128::fcvt_to_sint_negative_overflow(size).bits()
693 }
694
695 #[inline]
696 fn littleendian(&mut self, flags: MemFlags) -> Option<()> {
697 let endianness = flags.endianness(Endianness::Big);
698 if endianness == Endianness::Little {
699 Some(())
700 } else {
701 None
702 }
703 }
704
705 #[inline]
706 fn bigendian(&mut self, flags: MemFlags) -> Option<()> {
707 let endianness = flags.endianness(Endianness::Big);
708 if endianness == Endianness::Big {
709 Some(())
710 } else {
711 None
712 }
713 }
714
715 #[inline]
716 fn memflags_trusted(&mut self) -> MemFlags {
717 MemFlags::trusted()
718 }
719
720 #[inline]
721 fn memarg_imm_from_offset(&mut self, imm: Offset32) -> Option<SImm20> {
722 SImm20::maybe_from_i64(i64::from(imm))
723 }
724
725 #[inline]
726 fn memarg_imm_from_offset_plus_bias(&mut self, imm: Offset32, bias: u8) -> Option<SImm20> {
727 let final_offset = i64::from(imm) + bias as i64;
728 SImm20::maybe_from_i64(final_offset)
729 }
730
731 #[inline]
732 fn memarg_reg_plus_reg(&mut self, x: Reg, y: Reg, bias: u8, flags: MemFlags) -> MemArg {
733 MemArg::BXD12 {
734 base: x,
735 index: y,
736 disp: UImm12::maybe_from_u64(bias as u64).unwrap(),
737 flags,
738 }
739 }
740
741 #[inline]
742 fn memarg_reg_plus_reg_plus_off(
743 &mut self,
744 x: Reg,
745 y: Reg,
746 offset: &SImm20,
747 flags: MemFlags,
748 ) -> MemArg {
749 if let Some(imm) = UImm12::maybe_from_simm20(*offset) {
750 MemArg::BXD12 {
751 base: x,
752 index: y,
753 disp: imm,
754 flags,
755 }
756 } else {
757 MemArg::BXD20 {
758 base: x,
759 index: y,
760 disp: *offset,
761 flags,
762 }
763 }
764 }
765
766 #[inline]
767 fn memarg_reg_plus_off(&mut self, reg: Reg, off: i64, bias: u8, flags: MemFlags) -> MemArg {
768 MemArg::reg_plus_off(reg, off + (bias as i64), flags)
769 }
770
771 #[inline]
772 fn memarg_symbol(&mut self, name: ExternalName, offset: i32, flags: MemFlags) -> MemArg {
773 MemArg::Symbol {
774 name: Box::new(name),
775 offset,
776 flags,
777 }
778 }
779
780 #[inline]
781 fn memarg_got(&mut self) -> MemArg {
782 MemArg::Symbol {
783 name: Box::new(ExternalName::KnownSymbol(KnownSymbol::ElfGlobalOffsetTable)),
784 offset: 0,
785 flags: MemFlags::trusted(),
786 }
787 }
788
789 #[inline]
790 fn memarg_const(&mut self, constant: VCodeConstant) -> MemArg {
791 MemArg::Constant { constant }
792 }
793
794 #[inline]
795 fn memarg_symbol_offset_sum(&mut self, off1: i64, off2: i64) -> Option<i32> {
796 let off = i32::try_from(off1 + off2).ok()?;
797 if off & 1 == 0 { Some(off) } else { None }
798 }
799
800 #[inline]
801 fn memarg_frame_pointer_offset(&mut self) -> MemArg {
802 MemArg::reg(stack_reg(), MemFlags::trusted())
804 }
805
806 #[inline]
807 fn memarg_return_address_offset(&mut self) -> MemArg {
808 MemArg::InitialSPOffset { off: 14 * 8 }
810 }
811
812 #[inline]
813 fn inst_builder_new(&mut self) -> VecMInstBuilder {
814 Cell::new(Vec::<MInst>::new())
815 }
816
817 #[inline]
818 fn inst_builder_push(&mut self, builder: &VecMInstBuilder, inst: &MInst) -> Unit {
819 let mut vec = builder.take();
820 vec.push(inst.clone());
821 builder.set(vec);
822 }
823
824 #[inline]
825 fn inst_builder_finish(&mut self, builder: &VecMInstBuilder) -> Vec<MInst> {
826 builder.take()
827 }
828
829 #[inline]
830 fn real_reg(&mut self, reg: WritableReg) -> Option<WritableReg> {
831 if reg.to_reg().is_real() {
832 Some(reg)
833 } else {
834 None
835 }
836 }
837
838 #[inline]
839 fn same_reg(&mut self, dst: WritableReg, src: Reg) -> Option<Reg> {
840 if dst.to_reg() == src { Some(src) } else { None }
841 }
842
843 #[inline]
844 fn sinkable_inst(&mut self, val: Value) -> Option<Inst> {
845 self.is_sinkable_inst(val)
846 }
847
848 #[inline]
849 fn emit(&mut self, inst: &MInst) -> Unit {
850 self.lower_ctx.emit(inst.clone());
851 }
852
853 #[inline]
854 fn preg_stack(&mut self) -> PReg {
855 stack_reg().to_real_reg().unwrap().into()
856 }
857
858 #[inline]
859 fn preg_gpr_0(&mut self) -> PReg {
860 gpr(0).to_real_reg().unwrap().into()
861 }
862
863 #[inline]
864 fn writable_regpair(&mut self, hi: WritableReg, lo: WritableReg) -> WritableRegPair {
865 WritableRegPair { hi, lo }
866 }
867
868 #[inline]
869 fn writable_regpair_hi(&mut self, w: WritableRegPair) -> WritableReg {
870 w.hi
871 }
872
873 #[inline]
874 fn writable_regpair_lo(&mut self, w: WritableRegPair) -> WritableReg {
875 w.lo
876 }
877
878 #[inline]
879 fn regpair(&mut self, hi: Reg, lo: Reg) -> RegPair {
880 RegPair { hi, lo }
881 }
882
883 #[inline]
884 fn regpair_hi(&mut self, w: RegPair) -> Reg {
885 w.hi
886 }
887
888 #[inline]
889 fn regpair_lo(&mut self, w: RegPair) -> Reg {
890 w.lo
891 }
892}
893
894#[inline]
896fn zero_extend_to_u64(value: u64, from_bits: u8) -> u64 {
897 assert!(from_bits <= 64);
898 if from_bits >= 64 {
899 value
900 } else {
901 value & ((1u64 << from_bits) - 1)
902 }
903}
904
905#[inline]
907fn sign_extend_to_u64(value: u64, from_bits: u8) -> u64 {
908 assert!(from_bits <= 64);
909 if from_bits >= 64 {
910 value
911 } else {
912 (((value << (64 - from_bits)) as i64) >> (64 - from_bits)) as u64
913 }
914}
915
916#[inline]
921fn condcode_is_signed(cc: IntCC) -> bool {
922 match cc {
923 IntCC::Equal => false,
924 IntCC::NotEqual => false,
925 IntCC::SignedGreaterThanOrEqual => true,
926 IntCC::SignedGreaterThan => true,
927 IntCC::SignedLessThanOrEqual => true,
928 IntCC::SignedLessThan => true,
929 IntCC::UnsignedGreaterThanOrEqual => false,
930 IntCC::UnsignedGreaterThan => false,
931 IntCC::UnsignedLessThanOrEqual => false,
932 IntCC::UnsignedLessThan => false,
933 }
934}