1pub mod generated_code;
5
6use crate::ir::ExternalName;
7use crate::isa::s390x::abi::{S390xMachineDeps, REG_SAVE_AREA_SIZE};
9use crate::isa::s390x::inst::{
10 gpr, stack_reg, writable_gpr, zero_reg, Cond, Inst as MInst, LaneOrder, MemArg, RegPair,
11 ReturnCallInfo, SymbolReloc, UImm12, UImm16Shifted, UImm32Shifted, WritableRegPair,
12};
13use crate::isa::s390x::S390xBackend;
14use crate::machinst::isle::*;
15use crate::machinst::{CallInfo, MachLabel, Reg};
16use crate::{
17 ir::{
18 condcodes::*, immediates::*, types::*, ArgumentExtension, ArgumentPurpose, AtomicRmwOp,
19 BlockCall, Endianness, Inst, InstructionData, KnownSymbol, MemFlags, Opcode, TrapCode,
20 Value, ValueList,
21 },
22 isa::CallConv,
23 machinst::abi::ABIMachineSpec,
24 machinst::{
25 ArgPair, CallArgList, CallArgPair, CallRetList, CallRetPair, InstOutput, MachInst,
26 VCodeConstant, VCodeConstantData,
27 },
28};
29use regalloc2::PReg;
30use smallvec::smallvec;
31use std::boxed::Box;
32use std::cell::Cell;
33use std::vec::Vec;
34
35type BoxCallInfo = Box<CallInfo<ExternalName>>;
36type BoxCallIndInfo = Box<CallInfo<Reg>>;
37type BoxReturnCallInfo = Box<ReturnCallInfo<ExternalName>>;
38type BoxReturnCallIndInfo = Box<ReturnCallInfo<Reg>>;
39type VecMachLabel = Vec<MachLabel>;
40type BoxExternalName = Box<ExternalName>;
41type BoxSymbolReloc = Box<SymbolReloc>;
42type VecMInst = Vec<MInst>;
43type VecMInstBuilder = Cell<Vec<MInst>>;
44type VecArgPair = Vec<ArgPair>;
45type CallArgListBuilder = Cell<CallArgList>;
46
47pub(crate) fn lower(
49 lower_ctx: &mut Lower<MInst>,
50 backend: &S390xBackend,
51 inst: Inst,
52) -> Option<InstOutput> {
53 let mut isle_ctx = IsleContext { lower_ctx, backend };
56 generated_code::constructor_lower(&mut isle_ctx, inst)
57}
58
59pub(crate) fn lower_branch(
61 lower_ctx: &mut Lower<MInst>,
62 backend: &S390xBackend,
63 branch: Inst,
64 targets: &[MachLabel],
65) -> Option<()> {
66 let mut isle_ctx = IsleContext { lower_ctx, backend };
69 generated_code::constructor_lower_branch(&mut isle_ctx, branch, targets)
70}
71
72impl generated_code::Context for IsleContext<'_, '_, MInst, S390xBackend> {
73 isle_lower_prelude_methods!();
74
75 fn gen_return_call(
76 &mut self,
77 callee_sig: SigRef,
78 callee: ExternalName,
79 distance: RelocDistance,
80 args: ValueSlice,
81 ) -> InstOutput {
82 let _ = (callee_sig, callee, distance, args);
83 todo!()
84 }
85
86 fn gen_return_call_indirect(
87 &mut self,
88 callee_sig: SigRef,
89 callee: Value,
90 args: ValueSlice,
91 ) -> InstOutput {
92 let _ = (callee_sig, callee, args);
93 todo!()
94 }
95
96 #[inline]
97 fn args_builder_new(&mut self) -> CallArgListBuilder {
98 Cell::new(CallArgList::new())
99 }
100
101 #[inline]
102 fn args_builder_push(
103 &mut self,
104 builder: &CallArgListBuilder,
105 vreg: Reg,
106 preg: RealReg,
107 ) -> Unit {
108 let mut args = builder.take();
109 args.push(CallArgPair {
110 vreg,
111 preg: preg.into(),
112 });
113 builder.set(args);
114 }
115
116 #[inline]
117 fn args_builder_finish(&mut self, builder: &CallArgListBuilder) -> CallArgList {
118 builder.take()
119 }
120
121 fn defs_init(&mut self, abi: Sig) -> CallRetList {
122 let mut defs = smallvec![];
124 for i in 0..self.lower_ctx.sigs().num_rets(abi) {
125 if let &ABIArg::Slots {
126 ref slots, purpose, ..
127 } = &self.lower_ctx.sigs().get_ret(abi, i)
128 {
129 if purpose == ArgumentPurpose::StructReturn {
130 continue;
131 }
132 for slot in slots {
133 match slot {
134 &ABIArgSlot::Reg { reg, ty, .. } => {
135 let value_regs = self.lower_ctx.alloc_tmp(ty);
136 defs.push(CallRetPair {
137 vreg: value_regs.only_reg().unwrap(),
138 preg: reg.into(),
139 });
140 }
141 _ => {}
142 }
143 }
144 }
145 }
146 defs
147 }
148
149 fn defs_lookup(&mut self, defs: &CallRetList, reg: RealReg) -> Reg {
150 let reg = Reg::from(reg);
151 for def in defs {
152 if def.preg == reg {
153 return def.vreg.to_reg();
154 }
155 }
156 unreachable!()
157 }
158
159 fn abi_sig(&mut self, sig_ref: SigRef) -> Sig {
160 self.lower_ctx.sigs().abi_sig_for_sig_ref(sig_ref)
161 }
162
163 fn abi_first_ret(&mut self, sig_ref: SigRef, abi: Sig) -> usize {
164 let sig = &self.lower_ctx.dfg().signatures[sig_ref];
167 self.lower_ctx.sigs().num_rets(abi) - sig.returns.len()
168 }
169
170 fn abi_lane_order(&mut self, abi: Sig) -> LaneOrder {
171 lane_order_for_call_conv(self.lower_ctx.sigs()[abi].call_conv())
172 }
173
174 fn abi_call_stack_args(&mut self, abi: Sig) -> MemArg {
175 let sig_data = &self.lower_ctx.sigs()[abi];
176 if sig_data.call_conv() != CallConv::Tail {
177 let arg_space = sig_data.sized_stack_arg_space() as u32;
180 self.lower_ctx
181 .abi_mut()
182 .accumulate_outgoing_args_size(arg_space);
183 MemArg::reg_plus_off(stack_reg(), 0, MemFlags::trusted())
184 } else {
185 let arg_space = sig_data.sized_stack_arg_space() as u32;
189 if arg_space > 0 {
190 if self.backend.flags.preserve_frame_pointers() {
191 let tmp = self.lower_ctx.alloc_tmp(I64).only_reg().unwrap();
192 let src_mem = MemArg::reg(stack_reg(), MemFlags::trusted());
193 let dst_mem = MemArg::reg(stack_reg(), MemFlags::trusted());
194 self.emit(&MInst::Load64 {
195 rd: tmp,
196 mem: src_mem,
197 });
198 self.emit(&MInst::AllocateArgs { size: arg_space });
199 self.emit(&MInst::Store64 {
200 rd: tmp.to_reg(),
201 mem: dst_mem,
202 });
203 } else {
204 self.emit(&MInst::AllocateArgs { size: arg_space });
205 }
206 }
207 MemArg::reg_plus_off(stack_reg(), arg_space.into(), MemFlags::trusted())
208 }
209 }
210
211 fn abi_call_stack_rets(&mut self, abi: Sig) -> MemArg {
212 let sig_data = &self.lower_ctx.sigs()[abi];
213 if sig_data.call_conv() != CallConv::Tail {
214 let arg_space = sig_data.sized_stack_arg_space() as u32;
217 let ret_space = sig_data.sized_stack_ret_space() as u32;
218 self.lower_ctx
219 .abi_mut()
220 .accumulate_outgoing_args_size(arg_space + ret_space);
221 MemArg::reg_plus_off(stack_reg(), arg_space.into(), MemFlags::trusted())
222 } else {
223 let ret_space = sig_data.sized_stack_ret_space() as u32;
226 self.lower_ctx
227 .abi_mut()
228 .accumulate_outgoing_args_size(REG_SAVE_AREA_SIZE + ret_space);
229 MemArg::NominalSPOffset {
230 off: REG_SAVE_AREA_SIZE as i64,
231 }
232 }
233 }
234
235 fn abi_return_call_stack_args(&mut self, abi: Sig) -> MemArg {
236 let sig_data = &self.lower_ctx.sigs()[abi];
240 let arg_space = sig_data.sized_stack_arg_space() as u32;
241 self.lower_ctx
242 .abi_mut()
243 .accumulate_tail_args_size(arg_space);
244 if arg_space > 0 {
245 let tmp = self.lower_ctx.alloc_tmp(I64).only_reg().unwrap();
246 let src_mem = MemArg::InitialSPOffset { off: 0 };
247 let dst_mem = MemArg::InitialSPOffset {
248 off: -(arg_space as i64),
249 };
250 self.emit(&MInst::Load64 {
251 rd: tmp,
252 mem: src_mem,
253 });
254 self.emit(&MInst::Store64 {
255 rd: tmp.to_reg(),
256 mem: dst_mem,
257 });
258 }
259 MemArg::InitialSPOffset { off: 0 }
260 }
261
262 fn abi_call_info(
263 &mut self,
264 abi: Sig,
265 dest: ExternalName,
266 uses: &CallArgList,
267 defs: &CallRetList,
268 ) -> BoxCallInfo {
269 Box::new(self.abi_call_info_no_dest(abi, uses, defs).map(|()| dest))
270 }
271
272 fn abi_call_ind_info(
273 &mut self,
274 abi: Sig,
275 dest: Reg,
276 uses: &CallArgList,
277 defs: &CallRetList,
278 ) -> BoxCallIndInfo {
279 Box::new(self.abi_call_info_no_dest(abi, uses, defs).map(|()| dest))
280 }
281
282 fn abi_return_call_info(
283 &mut self,
284 abi: Sig,
285 name: ExternalName,
286 uses: &CallArgList,
287 ) -> BoxReturnCallInfo {
288 let sig_data = &self.lower_ctx.sigs()[abi];
289 let callee_pop_size = sig_data.sized_stack_arg_space() as u32;
290 Box::new(ReturnCallInfo {
291 dest: name.clone(),
292 uses: uses.clone(),
293 callee_pop_size,
294 })
295 }
296
297 fn abi_return_call_ind_info(
298 &mut self,
299 abi: Sig,
300 target: Reg,
301 uses: &CallArgList,
302 ) -> BoxReturnCallIndInfo {
303 let sig_data = &self.lower_ctx.sigs()[abi];
304 let callee_pop_size = sig_data.sized_stack_arg_space() as u32;
305 Box::new(ReturnCallInfo {
306 dest: target,
307 uses: uses.clone(),
308 callee_pop_size,
309 })
310 }
311
312 fn abi_for_elf_tls_get_offset(&mut self) {
313 self.lower_ctx
314 .abi_mut()
315 .accumulate_outgoing_args_size(REG_SAVE_AREA_SIZE);
316 }
317
318 #[inline]
319 fn box_symbol_reloc(&mut self, symbol_reloc: &SymbolReloc) -> BoxSymbolReloc {
320 Box::new(symbol_reloc.clone())
321 }
322
323 #[inline]
324 fn mie2_enabled(&mut self, _: Type) -> Option<()> {
325 if self.backend.isa_flags.has_mie2() {
326 Some(())
327 } else {
328 None
329 }
330 }
331
332 #[inline]
333 fn mie2_disabled(&mut self, _: Type) -> Option<()> {
334 if !self.backend.isa_flags.has_mie2() {
335 Some(())
336 } else {
337 None
338 }
339 }
340
341 #[inline]
342 fn vxrs_ext2_enabled(&mut self, _: Type) -> Option<()> {
343 if self.backend.isa_flags.has_vxrs_ext2() {
344 Some(())
345 } else {
346 None
347 }
348 }
349
350 #[inline]
351 fn vxrs_ext2_disabled(&mut self, _: Type) -> Option<()> {
352 if !self.backend.isa_flags.has_vxrs_ext2() {
353 Some(())
354 } else {
355 None
356 }
357 }
358
359 #[inline]
360 fn writable_gpr(&mut self, regno: u8) -> WritableReg {
361 writable_gpr(regno)
362 }
363
364 #[inline]
365 fn zero_reg(&mut self) -> Reg {
366 zero_reg()
367 }
368
369 #[inline]
370 fn gpr32_ty(&mut self, ty: Type) -> Option<Type> {
371 match ty {
372 I8 | I16 | I32 => Some(ty),
373 _ => None,
374 }
375 }
376
377 #[inline]
378 fn gpr64_ty(&mut self, ty: Type) -> Option<Type> {
379 match ty {
380 I64 => Some(ty),
381 _ => None,
382 }
383 }
384
385 #[inline]
386 fn vr128_ty(&mut self, ty: Type) -> Option<Type> {
387 match ty {
388 I128 => Some(ty),
389 _ if ty.is_vector() && ty.bits() == 128 => Some(ty),
390 _ => None,
391 }
392 }
393
394 #[inline]
395 fn uimm32shifted(&mut self, n: u32, shift: u8) -> UImm32Shifted {
396 UImm32Shifted::maybe_with_shift(n, shift).unwrap()
397 }
398
399 #[inline]
400 fn uimm16shifted(&mut self, n: u16, shift: u8) -> UImm16Shifted {
401 UImm16Shifted::maybe_with_shift(n, shift).unwrap()
402 }
403
404 #[inline]
405 fn i64_nonequal(&mut self, val: i64, cmp: i64) -> Option<i64> {
406 if val != cmp {
407 Some(val)
408 } else {
409 None
410 }
411 }
412
413 #[inline]
414 fn u64_pair_split(&mut self, n: u128) -> (u64, u64) {
415 ((n >> 64) as u64, n as u64)
416 }
417
418 #[inline]
419 fn u64_pair_concat(&mut self, hi: u64, lo: u64) -> u128 {
420 (hi as u128) << 64 | (lo as u128)
421 }
422
423 #[inline]
424 fn u32_pair_split(&mut self, n: u64) -> (u32, u32) {
425 ((n >> 32) as u32, n as u32)
426 }
427
428 #[inline]
429 fn u32_pair_concat(&mut self, hi: u32, lo: u32) -> u64 {
430 (hi as u64) << 32 | (lo as u64)
431 }
432
433 #[inline]
434 fn u16_pair_split(&mut self, n: u32) -> (u16, u16) {
435 ((n >> 16) as u16, n as u16)
436 }
437
438 #[inline]
439 fn u16_pair_concat(&mut self, hi: u16, lo: u16) -> u32 {
440 (hi as u32) << 16 | (lo as u32)
441 }
442
443 #[inline]
444 fn u8_pair_split(&mut self, n: u16) -> (u8, u8) {
445 ((n >> 8) as u8, n as u8)
446 }
447
448 #[inline]
449 fn u8_pair_concat(&mut self, hi: u8, lo: u8) -> u16 {
450 (hi as u16) << 8 | (lo as u16)
451 }
452
453 #[inline]
454 fn u8_as_u16(&mut self, n: u8) -> u16 {
455 n as u16
456 }
457
458 #[inline]
459 fn u64_truncate_to_u32(&mut self, n: u64) -> u32 {
460 n as u32
461 }
462
463 #[inline]
464 fn u64_as_i16(&mut self, n: u64) -> i16 {
465 n as i16
466 }
467
468 #[inline]
469 fn u64_nonzero_hipart(&mut self, n: u64) -> Option<u64> {
470 let part = n & 0xffff_ffff_0000_0000;
471 if part != 0 {
472 Some(part)
473 } else {
474 None
475 }
476 }
477
478 #[inline]
479 fn u64_nonzero_lopart(&mut self, n: u64) -> Option<u64> {
480 let part = n & 0x0000_0000_ffff_ffff;
481 if part != 0 {
482 Some(part)
483 } else {
484 None
485 }
486 }
487
488 #[inline]
489 fn i32_from_u64(&mut self, n: u64) -> Option<i32> {
490 if let Ok(imm) = i32::try_from(n as i64) {
491 Some(imm)
492 } else {
493 None
494 }
495 }
496
497 #[inline]
498 fn i16_from_u64(&mut self, n: u64) -> Option<i16> {
499 if let Ok(imm) = i16::try_from(n as i64) {
500 Some(imm)
501 } else {
502 None
503 }
504 }
505
506 #[inline]
507 fn i16_from_u32(&mut self, n: u32) -> Option<i16> {
508 if let Ok(imm) = i16::try_from(n as i32) {
509 Some(imm)
510 } else {
511 None
512 }
513 }
514
515 #[inline]
516 fn uimm32shifted_from_u64(&mut self, n: u64) -> Option<UImm32Shifted> {
517 UImm32Shifted::maybe_from_u64(n)
518 }
519
520 #[inline]
521 fn uimm16shifted_from_u64(&mut self, n: u64) -> Option<UImm16Shifted> {
522 UImm16Shifted::maybe_from_u64(n)
523 }
524
525 #[inline]
526 fn lane_order(&mut self) -> LaneOrder {
527 lane_order_for_call_conv(self.lower_ctx.abi().call_conv(self.lower_ctx.sigs()))
528 }
529
530 #[inline]
531 fn be_lane_idx(&mut self, ty: Type, idx: u8) -> u8 {
532 match self.lane_order() {
533 LaneOrder::LittleEndian => ty.lane_count() as u8 - 1 - idx,
534 LaneOrder::BigEndian => idx,
535 }
536 }
537
538 #[inline]
539 fn be_vec_const(&mut self, ty: Type, n: u128) -> u128 {
540 match self.lane_order() {
541 LaneOrder::LittleEndian => n,
542 LaneOrder::BigEndian => {
543 let lane_count = ty.lane_count();
544 let lane_bits = ty.lane_bits();
545 let lane_mask = (1u128 << lane_bits) - 1;
546 let mut n_le = n;
547 let mut n_be = 0u128;
548 for _ in 0..lane_count {
549 n_be = (n_be << lane_bits) | (n_le & lane_mask);
550 n_le = n_le >> lane_bits;
551 }
552 n_be
553 }
554 }
555 }
556
557 #[inline]
558 fn lane_byte_mask(&mut self, ty: Type, idx: u8) -> u16 {
559 let lane_bytes = (ty.lane_bits() / 8) as u8;
560 let lane_mask = (1u16 << lane_bytes) - 1;
561 lane_mask << (16 - ((idx + 1) * lane_bytes))
562 }
563
564 #[inline]
565 fn shuffle_mask_from_u128(&mut self, idx: u128) -> (u128, u16) {
566 let bytes = match self.lane_order() {
567 LaneOrder::LittleEndian => idx.to_be_bytes().map(|x| {
568 if x < 16 {
569 15 - x
570 } else if x < 32 {
571 47 - x
572 } else {
573 128
574 }
575 }),
576 LaneOrder::BigEndian => idx.to_le_bytes().map(|x| if x < 32 { x } else { 128 }),
577 };
578 let and_mask = bytes.iter().fold(0, |acc, &x| (acc << 1) | (x < 32) as u16);
579 let permute_mask = u128::from_be_bytes(bytes);
580 (permute_mask, and_mask)
581 }
582
583 #[inline]
584 fn u64_from_value(&mut self, val: Value) -> Option<u64> {
585 let inst = self.lower_ctx.dfg().value_def(val).inst()?;
586 let constant = self.lower_ctx.get_constant(inst)?;
587 let ty = self.lower_ctx.output_ty(inst, 0);
588 Some(zero_extend_to_u64(constant, self.ty_bits(ty)))
589 }
590
591 #[inline]
592 fn u64_from_inverted_value(&mut self, val: Value) -> Option<u64> {
593 let inst = self.lower_ctx.dfg().value_def(val).inst()?;
594 let constant = self.lower_ctx.get_constant(inst)?;
595 let ty = self.lower_ctx.output_ty(inst, 0);
596 Some(zero_extend_to_u64(!constant, self.ty_bits(ty)))
597 }
598
599 #[inline]
600 fn u32_from_value(&mut self, val: Value) -> Option<u32> {
601 let constant = self.u64_from_value(val)?;
602 let imm = u32::try_from(constant).ok()?;
603 Some(imm)
604 }
605
606 #[inline]
607 fn u8_from_value(&mut self, val: Value) -> Option<u8> {
608 let constant = self.u64_from_value(val)?;
609 let imm = u8::try_from(constant).ok()?;
610 Some(imm)
611 }
612
613 #[inline]
614 fn u64_from_signed_value(&mut self, val: Value) -> Option<u64> {
615 let inst = self.lower_ctx.dfg().value_def(val).inst()?;
616 let constant = self.lower_ctx.get_constant(inst)?;
617 let ty = self.lower_ctx.output_ty(inst, 0);
618 Some(sign_extend_to_u64(constant, self.ty_bits(ty)))
619 }
620
621 #[inline]
622 fn i64_from_value(&mut self, val: Value) -> Option<i64> {
623 let constant = self.u64_from_signed_value(val)? as i64;
624 Some(constant)
625 }
626
627 #[inline]
628 fn i32_from_value(&mut self, val: Value) -> Option<i32> {
629 let constant = self.u64_from_signed_value(val)? as i64;
630 let imm = i32::try_from(constant).ok()?;
631 Some(imm)
632 }
633
634 #[inline]
635 fn i16_from_value(&mut self, val: Value) -> Option<i16> {
636 let constant = self.u64_from_signed_value(val)? as i64;
637 let imm = i16::try_from(constant).ok()?;
638 Some(imm)
639 }
640
641 #[inline]
642 fn i16_from_swapped_value(&mut self, val: Value) -> Option<i16> {
643 let constant = self.u64_from_signed_value(val)? as i64;
644 let imm = i16::try_from(constant).ok()?;
645 Some(imm.swap_bytes())
646 }
647
648 #[inline]
649 fn i64_from_negated_value(&mut self, val: Value) -> Option<i64> {
650 let constant = self.u64_from_signed_value(val)? as i64;
651 let imm = constant.wrapping_neg();
652 Some(imm)
653 }
654
655 #[inline]
656 fn i32_from_negated_value(&mut self, val: Value) -> Option<i32> {
657 let constant = self.u64_from_signed_value(val)? as i64;
658 let imm = i32::try_from(constant.wrapping_neg()).ok()?;
659 Some(imm)
660 }
661
662 #[inline]
663 fn i16_from_negated_value(&mut self, val: Value) -> Option<i16> {
664 let constant = self.u64_from_signed_value(val)? as i64;
665 let imm = i16::try_from(constant.wrapping_neg()).ok()?;
666 Some(imm)
667 }
668
669 #[inline]
670 fn uimm16shifted_from_value(&mut self, val: Value) -> Option<UImm16Shifted> {
671 let constant = self.u64_from_value(val)?;
672 UImm16Shifted::maybe_from_u64(constant)
673 }
674
675 #[inline]
676 fn uimm32shifted_from_value(&mut self, val: Value) -> Option<UImm32Shifted> {
677 let constant = self.u64_from_value(val)?;
678 UImm32Shifted::maybe_from_u64(constant)
679 }
680
681 #[inline]
682 fn uimm16shifted_from_inverted_value(&mut self, val: Value) -> Option<UImm16Shifted> {
683 let constant = self.u64_from_inverted_value(val)?;
684 let imm = UImm16Shifted::maybe_from_u64(constant)?;
685 Some(imm.negate_bits())
686 }
687
688 #[inline]
689 fn uimm32shifted_from_inverted_value(&mut self, val: Value) -> Option<UImm32Shifted> {
690 let constant = self.u64_from_inverted_value(val)?;
691 let imm = UImm32Shifted::maybe_from_u64(constant)?;
692 Some(imm.negate_bits())
693 }
694
695 #[inline]
696 fn len_minus_one(&mut self, len: u64) -> Option<u8> {
697 if len > 0 && len <= 256 {
698 Some((len - 1) as u8)
699 } else {
700 None
701 }
702 }
703
704 #[inline]
705 fn mask_amt_imm(&mut self, ty: Type, amt: i64) -> u8 {
706 let mask = ty.lane_bits() - 1;
707 (amt as u8) & (mask as u8)
708 }
709
710 #[inline]
711 fn mask_as_cond(&mut self, mask: u8) -> Cond {
712 Cond::from_mask(mask)
713 }
714
715 #[inline]
716 fn intcc_as_cond(&mut self, cc: &IntCC) -> Cond {
717 Cond::from_intcc(*cc)
718 }
719
720 #[inline]
721 fn floatcc_as_cond(&mut self, cc: &FloatCC) -> Cond {
722 Cond::from_floatcc(*cc)
723 }
724
725 #[inline]
726 fn invert_cond(&mut self, cond: &Cond) -> Cond {
727 Cond::invert(*cond)
728 }
729
730 #[inline]
731 fn signed(&mut self, cc: &IntCC) -> Option<()> {
732 if condcode_is_signed(*cc) {
733 Some(())
734 } else {
735 None
736 }
737 }
738
739 #[inline]
740 fn unsigned(&mut self, cc: &IntCC) -> Option<()> {
741 if !condcode_is_signed(*cc) {
742 Some(())
743 } else {
744 None
745 }
746 }
747
748 #[inline]
749 fn zero_offset(&mut self) -> Offset32 {
750 Offset32::new(0)
751 }
752
753 #[inline]
754 fn i64_from_offset(&mut self, off: Offset32) -> i64 {
755 i64::from(off)
756 }
757
758 #[inline]
759 fn fcvt_to_uint_ub32(&mut self, size: u8) -> u64 {
760 (2.0_f32).powi(size.into()).to_bits() as u64
761 }
762
763 #[inline]
764 fn fcvt_to_uint_lb32(&mut self) -> u64 {
765 (-1.0_f32).to_bits() as u64
766 }
767
768 #[inline]
769 fn fcvt_to_uint_ub64(&mut self, size: u8) -> u64 {
770 (2.0_f64).powi(size.into()).to_bits()
771 }
772
773 #[inline]
774 fn fcvt_to_uint_lb64(&mut self) -> u64 {
775 (-1.0_f64).to_bits()
776 }
777
778 #[inline]
779 fn fcvt_to_sint_ub32(&mut self, size: u8) -> u64 {
780 (2.0_f32).powi((size - 1).into()).to_bits() as u64
781 }
782
783 #[inline]
784 fn fcvt_to_sint_lb32(&mut self, size: u8) -> u64 {
785 let lb = (-2.0_f32).powi((size - 1).into());
786 std::cmp::max(lb.to_bits() + 1, (lb - 1.0).to_bits()) as u64
787 }
788
789 #[inline]
790 fn fcvt_to_sint_ub64(&mut self, size: u8) -> u64 {
791 (2.0_f64).powi((size - 1).into()).to_bits()
792 }
793
794 #[inline]
795 fn fcvt_to_sint_lb64(&mut self, size: u8) -> u64 {
796 let lb = (-2.0_f64).powi((size - 1).into());
797 std::cmp::max(lb.to_bits() + 1, (lb - 1.0).to_bits())
798 }
799
800 #[inline]
801 fn littleendian(&mut self, flags: MemFlags) -> Option<()> {
802 let endianness = flags.endianness(Endianness::Big);
803 if endianness == Endianness::Little {
804 Some(())
805 } else {
806 None
807 }
808 }
809
810 #[inline]
811 fn bigendian(&mut self, flags: MemFlags) -> Option<()> {
812 let endianness = flags.endianness(Endianness::Big);
813 if endianness == Endianness::Big {
814 Some(())
815 } else {
816 None
817 }
818 }
819
820 #[inline]
821 fn memflags_trusted(&mut self) -> MemFlags {
822 MemFlags::trusted()
823 }
824
825 #[inline]
826 fn memarg_flags(&mut self, mem: &MemArg) -> MemFlags {
827 mem.get_flags()
828 }
829
830 #[inline]
831 fn memarg_offset(&mut self, base: &MemArg, offset: i64) -> MemArg {
832 MemArg::offset(base, offset)
833 }
834
835 #[inline]
836 fn memarg_reg_plus_reg(&mut self, x: Reg, y: Reg, bias: u8, flags: MemFlags) -> MemArg {
837 MemArg::BXD12 {
838 base: x,
839 index: y,
840 disp: UImm12::maybe_from_u64(bias as u64).unwrap(),
841 flags,
842 }
843 }
844
845 #[inline]
846 fn memarg_reg_plus_off(&mut self, reg: Reg, off: i64, bias: u8, flags: MemFlags) -> MemArg {
847 MemArg::reg_plus_off(reg, off + (bias as i64), flags)
848 }
849
850 #[inline]
851 fn memarg_symbol(&mut self, name: ExternalName, offset: i32, flags: MemFlags) -> MemArg {
852 MemArg::Symbol {
853 name: Box::new(name),
854 offset,
855 flags,
856 }
857 }
858
859 #[inline]
860 fn memarg_got(&mut self) -> MemArg {
861 MemArg::Symbol {
862 name: Box::new(ExternalName::KnownSymbol(KnownSymbol::ElfGlobalOffsetTable)),
863 offset: 0,
864 flags: MemFlags::trusted(),
865 }
866 }
867
868 #[inline]
869 fn memarg_symbol_offset_sum(&mut self, off1: i64, off2: i64) -> Option<i32> {
870 let off = i32::try_from(off1 + off2).ok()?;
871 if off & 1 == 0 {
872 Some(off)
873 } else {
874 None
875 }
876 }
877
878 #[inline]
879 fn memarg_frame_pointer_offset(&mut self) -> MemArg {
880 MemArg::NominalSPOffset { off: 0 }
882 }
883
884 #[inline]
885 fn memarg_return_address_offset(&mut self) -> MemArg {
886 MemArg::InitialSPOffset { off: 14 * 8 }
888 }
889
890 #[inline]
891 fn inst_builder_new(&mut self) -> VecMInstBuilder {
892 Cell::new(Vec::<MInst>::new())
893 }
894
895 #[inline]
896 fn inst_builder_push(&mut self, builder: &VecMInstBuilder, inst: &MInst) -> Unit {
897 let mut vec = builder.take();
898 vec.push(inst.clone());
899 builder.set(vec);
900 }
901
902 #[inline]
903 fn inst_builder_finish(&mut self, builder: &VecMInstBuilder) -> Vec<MInst> {
904 builder.take()
905 }
906
907 #[inline]
908 fn real_reg(&mut self, reg: WritableReg) -> Option<WritableReg> {
909 if reg.to_reg().is_real() {
910 Some(reg)
911 } else {
912 None
913 }
914 }
915
916 #[inline]
917 fn same_reg(&mut self, dst: WritableReg, src: Reg) -> Option<Reg> {
918 if dst.to_reg() == src {
919 Some(src)
920 } else {
921 None
922 }
923 }
924
925 #[inline]
926 fn sinkable_inst(&mut self, val: Value) -> Option<Inst> {
927 self.is_sinkable_inst(val)
928 }
929
930 #[inline]
931 fn emit(&mut self, inst: &MInst) -> Unit {
932 self.lower_ctx.emit(inst.clone());
933 }
934
935 #[inline]
936 fn preg_stack(&mut self) -> PReg {
937 stack_reg().to_real_reg().unwrap().into()
938 }
939
940 #[inline]
941 fn preg_gpr_0(&mut self) -> PReg {
942 gpr(0).to_real_reg().unwrap().into()
943 }
944
945 #[inline]
946 fn writable_regpair(&mut self, hi: WritableReg, lo: WritableReg) -> WritableRegPair {
947 WritableRegPair { hi, lo }
948 }
949
950 #[inline]
951 fn writable_regpair_hi(&mut self, w: WritableRegPair) -> WritableReg {
952 w.hi
953 }
954
955 #[inline]
956 fn writable_regpair_lo(&mut self, w: WritableRegPair) -> WritableReg {
957 w.lo
958 }
959
960 #[inline]
961 fn regpair(&mut self, hi: Reg, lo: Reg) -> RegPair {
962 RegPair { hi, lo }
963 }
964
965 #[inline]
966 fn regpair_hi(&mut self, w: RegPair) -> Reg {
967 w.hi
968 }
969
970 #[inline]
971 fn regpair_lo(&mut self, w: RegPair) -> Reg {
972 w.lo
973 }
974}
975
976impl IsleContext<'_, '_, MInst, S390xBackend> {
977 fn abi_call_info_no_dest(
978 &mut self,
979 abi: Sig,
980 uses: &CallArgList,
981 defs: &CallRetList,
982 ) -> CallInfo<()> {
983 let sig_data = &self.lower_ctx.sigs()[abi];
984 let clobbers = S390xMachineDeps::get_regs_clobbered_by_call(sig_data.call_conv());
987 let callee_pop_size = if sig_data.call_conv() == CallConv::Tail {
988 sig_data.sized_stack_arg_space() as u32
989 } else {
990 0
991 };
992 CallInfo {
993 dest: (),
994 uses: uses.clone(),
995 defs: defs.clone(),
996 clobbers,
997 callee_pop_size,
998 caller_conv: self.lower_ctx.abi().call_conv(self.lower_ctx.sigs()),
999 callee_conv: self.lower_ctx.sigs()[abi].call_conv(),
1000 }
1001 }
1002}
1003
1004#[inline]
1006fn lane_order_for_call_conv(call_conv: CallConv) -> LaneOrder {
1007 match call_conv {
1008 CallConv::Tail => LaneOrder::LittleEndian,
1009 _ => LaneOrder::BigEndian,
1010 }
1011}
1012
1013#[inline]
1015fn zero_extend_to_u64(value: u64, from_bits: u8) -> u64 {
1016 assert!(from_bits <= 64);
1017 if from_bits >= 64 {
1018 value
1019 } else {
1020 value & ((1u64 << from_bits) - 1)
1021 }
1022}
1023
1024#[inline]
1026fn sign_extend_to_u64(value: u64, from_bits: u8) -> u64 {
1027 assert!(from_bits <= 64);
1028 if from_bits >= 64 {
1029 value
1030 } else {
1031 (((value << (64 - from_bits)) as i64) >> (64 - from_bits)) as u64
1032 }
1033}
1034
1035#[inline]
1040fn condcode_is_signed(cc: IntCC) -> bool {
1041 match cc {
1042 IntCC::Equal => false,
1043 IntCC::NotEqual => false,
1044 IntCC::SignedGreaterThanOrEqual => true,
1045 IntCC::SignedGreaterThan => true,
1046 IntCC::SignedLessThanOrEqual => true,
1047 IntCC::SignedLessThan => true,
1048 IntCC::UnsignedGreaterThanOrEqual => false,
1049 IntCC::UnsignedGreaterThan => false,
1050 IntCC::UnsignedLessThanOrEqual => false,
1051 IntCC::UnsignedLessThan => false,
1052 }
1053}