1pub mod generated_code;
5
6use crate::ir::ExternalName;
8use crate::isa::s390x::S390xBackend;
9use crate::isa::s390x::abi::REG_SAVE_AREA_SIZE;
10use crate::isa::s390x::inst::{
11 CallInstDest, Cond, Inst as MInst, LaneOrder, MemArg, RegPair, ReturnCallInfo, SymbolReloc,
12 UImm12, UImm16Shifted, UImm32Shifted, WritableRegPair, gpr, stack_reg, writable_gpr, zero_reg,
13};
14use crate::machinst::isle::*;
15use crate::machinst::{CallInfo, MachLabel, Reg, TryCallInfo, non_writable_value_regs};
16use crate::{
17 ir::{
18 AtomicRmwOp, BlockCall, Endianness, Inst, InstructionData, KnownSymbol, MemFlags, Opcode,
19 TrapCode, Value, ValueList, condcodes::*, immediates::*, types::*,
20 },
21 isa::CallConv,
22 machinst::{
23 ArgPair, CallArgList, CallRetList, InstOutput, MachInst, VCodeConstant, VCodeConstantData,
24 },
25};
26use regalloc2::PReg;
27use std::boxed::Box;
28use std::cell::Cell;
29use std::vec::Vec;
30
31type BoxCallInfo = Box<CallInfo<CallInstDest>>;
32type BoxReturnCallInfo = Box<ReturnCallInfo<CallInstDest>>;
33type VecMachLabel = Vec<MachLabel>;
34type BoxExternalName = Box<ExternalName>;
35type BoxSymbolReloc = Box<SymbolReloc>;
36type VecMInst = Vec<MInst>;
37type VecMInstBuilder = Cell<Vec<MInst>>;
38type VecArgPair = Vec<ArgPair>;
39
40pub(crate) fn lower(
42 lower_ctx: &mut Lower<MInst>,
43 backend: &S390xBackend,
44 inst: Inst,
45) -> Option<InstOutput> {
46 let mut isle_ctx = IsleContext { lower_ctx, backend };
49 generated_code::constructor_lower(&mut isle_ctx, inst)
50}
51
52pub(crate) fn lower_branch(
54 lower_ctx: &mut Lower<MInst>,
55 backend: &S390xBackend,
56 branch: Inst,
57 targets: &[MachLabel],
58) -> Option<()> {
59 let mut isle_ctx = IsleContext { lower_ctx, backend };
62 generated_code::constructor_lower_branch(&mut isle_ctx, branch, targets)
63}
64
65impl generated_code::Context for IsleContext<'_, '_, MInst, S390xBackend> {
66 isle_lower_prelude_methods!();
67
68 #[inline]
69 fn call_inst_dest_direct(&mut self, name: ExternalName) -> CallInstDest {
70 CallInstDest::Direct { name }
71 }
72
73 #[inline]
74 fn call_inst_dest_indirect(&mut self, reg: Reg) -> CallInstDest {
75 CallInstDest::Indirect { reg }
76 }
77
78 fn abi_emit_call_adjust_stack(&mut self, abi: Sig) -> Unit {
83 let sig_data = &self.lower_ctx.sigs()[abi];
84 if sig_data.call_conv() == CallConv::Tail {
85 let arg_space = sig_data.sized_stack_arg_space();
86 if arg_space > 0 {
87 if self.backend.flags.preserve_frame_pointers() {
88 let tmp = self.lower_ctx.alloc_tmp(I64).only_reg().unwrap();
89 let src_mem = MemArg::reg(stack_reg(), MemFlags::trusted());
90 let dst_mem = MemArg::reg(stack_reg(), MemFlags::trusted());
91 self.emit(&MInst::Load64 {
92 rd: tmp,
93 mem: src_mem,
94 });
95 self.emit(&MInst::AllocateArgs { size: arg_space });
96 self.emit(&MInst::Store64 {
97 rd: tmp.to_reg(),
98 mem: dst_mem,
99 });
100 } else {
101 self.emit(&MInst::AllocateArgs { size: arg_space });
102 }
103 }
104 }
105 }
106
107 fn abi_emit_return_call_adjust_stack(&mut self, abi: Sig) -> Unit {
112 let sig_data = &self.lower_ctx.sigs()[abi];
113 let arg_space = sig_data.sized_stack_arg_space();
114 if arg_space > 0 && self.backend.flags.preserve_frame_pointers() {
115 let tmp = self.lower_ctx.alloc_tmp(I64).only_reg().unwrap();
116 let src_mem = MemArg::InitialSPOffset { off: 0 };
117 let dst_mem = MemArg::InitialSPOffset {
118 off: -(arg_space as i64),
119 };
120 self.emit(&MInst::Load64 {
121 rd: tmp,
122 mem: src_mem,
123 });
124 self.emit(&MInst::Store64 {
125 rd: tmp.to_reg(),
126 mem: dst_mem,
127 });
128 }
129 }
130
131 fn abi_prepare_args(&mut self, abi: Sig, (list, off): ValueSlice) -> ValueRegsVec {
135 let lane_order = LaneOrder::from(self.lower_ctx.sigs()[abi].call_conv());
136 let lane_swap_needed = self.lane_order() != lane_order;
137
138 (off..list.len(&self.lower_ctx.dfg().value_lists))
139 .map(|ix| {
140 let val = list.get(ix, &self.lower_ctx.dfg().value_lists).unwrap();
141 let ty = self.lower_ctx.dfg().value_type(val);
142 let regs = self.put_in_regs(val);
143
144 if lane_swap_needed && ty.is_vector() && ty.lane_count() >= 2 {
145 let tmp_regs = self.lower_ctx.alloc_tmp(ty);
146 self.emit(&MInst::VecEltRev {
147 lane_count: ty.lane_count(),
148 rd: tmp_regs.only_reg().unwrap(),
149 rn: regs.only_reg().unwrap(),
150 });
151 non_writable_value_regs(tmp_regs)
152 } else {
153 regs
154 }
155 })
156 .collect()
157 }
158
159 fn gen_call_info(
160 &mut self,
161 sig: Sig,
162 dest: CallInstDest,
163 uses: CallArgList,
164 defs: CallRetList,
165 try_call_info: Option<TryCallInfo>,
166 ) -> BoxCallInfo {
167 let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();
168 let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
169 let total_space = if self.lower_ctx.sigs()[sig].call_conv() != CallConv::Tail {
170 REG_SAVE_AREA_SIZE + stack_arg_space + stack_ret_space
171 } else {
172 REG_SAVE_AREA_SIZE + stack_ret_space
173 };
174 self.lower_ctx
175 .abi_mut()
176 .accumulate_outgoing_args_size(total_space);
177
178 Box::new(
179 self.lower_ctx
180 .gen_call_info(sig, dest, uses, defs, try_call_info),
181 )
182 }
183
184 fn gen_return_call_info(
185 &mut self,
186 sig: Sig,
187 dest: CallInstDest,
188 uses: CallArgList,
189 ) -> BoxReturnCallInfo {
190 let callee_pop_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
191 self.lower_ctx
192 .abi_mut()
193 .accumulate_tail_args_size(callee_pop_size);
194
195 Box::new(ReturnCallInfo {
196 dest,
197 uses,
198 callee_pop_size,
199 })
200 }
201
202 fn abi_for_elf_tls_get_offset(&mut self) {
203 self.lower_ctx
204 .abi_mut()
205 .accumulate_outgoing_args_size(REG_SAVE_AREA_SIZE);
206 }
207
208 #[inline]
209 fn box_symbol_reloc(&mut self, symbol_reloc: &SymbolReloc) -> BoxSymbolReloc {
210 Box::new(symbol_reloc.clone())
211 }
212
213 #[inline]
214 fn mie2_enabled(&mut self, _: Type) -> Option<()> {
215 if self.backend.isa_flags.has_mie2() {
216 Some(())
217 } else {
218 None
219 }
220 }
221
222 #[inline]
223 fn mie2_disabled(&mut self, _: Type) -> Option<()> {
224 if !self.backend.isa_flags.has_mie2() {
225 Some(())
226 } else {
227 None
228 }
229 }
230
231 #[inline]
232 fn vxrs_ext2_enabled(&mut self, _: Type) -> Option<()> {
233 if self.backend.isa_flags.has_vxrs_ext2() {
234 Some(())
235 } else {
236 None
237 }
238 }
239
240 #[inline]
241 fn vxrs_ext2_disabled(&mut self, _: Type) -> Option<()> {
242 if !self.backend.isa_flags.has_vxrs_ext2() {
243 Some(())
244 } else {
245 None
246 }
247 }
248
249 #[inline]
250 fn writable_gpr(&mut self, regno: u8) -> WritableReg {
251 writable_gpr(regno)
252 }
253
254 #[inline]
255 fn zero_reg(&mut self) -> Reg {
256 zero_reg()
257 }
258
259 #[inline]
260 fn gpr32_ty(&mut self, ty: Type) -> Option<Type> {
261 match ty {
262 I8 | I16 | I32 => Some(ty),
263 _ => None,
264 }
265 }
266
267 #[inline]
268 fn gpr64_ty(&mut self, ty: Type) -> Option<Type> {
269 match ty {
270 I64 => Some(ty),
271 _ => None,
272 }
273 }
274
275 #[inline]
276 fn vr128_ty(&mut self, ty: Type) -> Option<Type> {
277 match ty {
278 I128 | F128 => Some(ty),
279 _ if ty.is_vector() && ty.bits() == 128 => Some(ty),
280 _ => None,
281 }
282 }
283
284 #[inline]
285 fn uimm32shifted(&mut self, n: u32, shift: u8) -> UImm32Shifted {
286 UImm32Shifted::maybe_with_shift(n, shift).unwrap()
287 }
288
289 #[inline]
290 fn uimm16shifted(&mut self, n: u16, shift: u8) -> UImm16Shifted {
291 UImm16Shifted::maybe_with_shift(n, shift).unwrap()
292 }
293
294 #[inline]
295 fn i64_nonequal(&mut self, val: i64, cmp: i64) -> Option<i64> {
296 if val != cmp { Some(val) } else { None }
297 }
298
299 #[inline]
300 fn u64_pair_split(&mut self, n: u128) -> (u64, u64) {
301 ((n >> 64) as u64, n as u64)
302 }
303
304 #[inline]
305 fn u64_pair_concat(&mut self, hi: u64, lo: u64) -> u128 {
306 (hi as u128) << 64 | (lo as u128)
307 }
308
309 #[inline]
310 fn u32_pair_split(&mut self, n: u64) -> (u32, u32) {
311 ((n >> 32) as u32, n as u32)
312 }
313
314 #[inline]
315 fn u32_pair_concat(&mut self, hi: u32, lo: u32) -> u64 {
316 (hi as u64) << 32 | (lo as u64)
317 }
318
319 #[inline]
320 fn u16_pair_split(&mut self, n: u32) -> (u16, u16) {
321 ((n >> 16) as u16, n as u16)
322 }
323
324 #[inline]
325 fn u16_pair_concat(&mut self, hi: u16, lo: u16) -> u32 {
326 (hi as u32) << 16 | (lo as u32)
327 }
328
329 #[inline]
330 fn u8_pair_split(&mut self, n: u16) -> (u8, u8) {
331 ((n >> 8) as u8, n as u8)
332 }
333
334 #[inline]
335 fn u8_pair_concat(&mut self, hi: u8, lo: u8) -> u16 {
336 (hi as u16) << 8 | (lo as u16)
337 }
338
339 #[inline]
340 fn u64_nonzero_hipart(&mut self, n: u64) -> Option<u64> {
341 let part = n & 0xffff_ffff_0000_0000;
342 if part != 0 { Some(part) } else { None }
343 }
344
345 #[inline]
346 fn u64_nonzero_lopart(&mut self, n: u64) -> Option<u64> {
347 let part = n & 0x0000_0000_ffff_ffff;
348 if part != 0 { Some(part) } else { None }
349 }
350
351 #[inline]
352 fn uimm32shifted_from_u64(&mut self, n: u64) -> Option<UImm32Shifted> {
353 UImm32Shifted::maybe_from_u64(n)
354 }
355
356 #[inline]
357 fn uimm16shifted_from_u64(&mut self, n: u64) -> Option<UImm16Shifted> {
358 UImm16Shifted::maybe_from_u64(n)
359 }
360
361 #[inline]
362 fn lane_order(&mut self) -> LaneOrder {
363 LaneOrder::from(self.lower_ctx.abi().call_conv())
364 }
365
366 #[inline]
367 fn be_lane_idx(&mut self, ty: Type, idx: u8) -> u8 {
368 match self.lane_order() {
369 LaneOrder::LittleEndian => ty.lane_count() as u8 - 1 - idx,
370 LaneOrder::BigEndian => idx,
371 }
372 }
373
374 #[inline]
375 fn be_vec_const(&mut self, ty: Type, n: u128) -> u128 {
376 match self.lane_order() {
377 LaneOrder::LittleEndian => n,
378 LaneOrder::BigEndian if ty.lane_count() == 1 => n,
379 LaneOrder::BigEndian => {
380 let lane_count = ty.lane_count();
381 let lane_bits = ty.lane_bits();
382 let lane_mask = (1u128 << lane_bits) - 1;
383 let mut n_le = n;
384 let mut n_be = 0u128;
385 for _ in 0..lane_count {
386 n_be = (n_be << lane_bits) | (n_le & lane_mask);
387 n_le = n_le >> lane_bits;
388 }
389 n_be
390 }
391 }
392 }
393
394 #[inline]
395 fn lane_byte_mask(&mut self, ty: Type, idx: u8) -> u16 {
396 let lane_bytes = (ty.lane_bits() / 8) as u8;
397 let lane_mask = (1u16 << lane_bytes) - 1;
398 lane_mask << (16 - ((idx + 1) * lane_bytes))
399 }
400
401 #[inline]
402 fn shuffle_mask_from_u128(&mut self, idx: u128) -> (u128, u16) {
403 let bytes = match self.lane_order() {
404 LaneOrder::LittleEndian => idx.to_be_bytes().map(|x| {
405 if x < 16 {
406 15 - x
407 } else if x < 32 {
408 47 - x
409 } else {
410 128
411 }
412 }),
413 LaneOrder::BigEndian => idx.to_le_bytes().map(|x| if x < 32 { x } else { 128 }),
414 };
415 let and_mask = bytes.iter().fold(0, |acc, &x| (acc << 1) | (x < 32) as u16);
416 let permute_mask = u128::from_be_bytes(bytes);
417 (permute_mask, and_mask)
418 }
419
420 #[inline]
421 fn u64_from_value(&mut self, val: Value) -> Option<u64> {
422 let inst = self.lower_ctx.dfg().value_def(val).inst()?;
423 let constant = self.lower_ctx.get_constant(inst)?;
424 let ty = self.lower_ctx.output_ty(inst, 0);
425 Some(zero_extend_to_u64(constant, self.ty_bits(ty)))
426 }
427
428 #[inline]
429 fn u64_from_inverted_value(&mut self, val: Value) -> Option<u64> {
430 let inst = self.lower_ctx.dfg().value_def(val).inst()?;
431 let constant = self.lower_ctx.get_constant(inst)?;
432 let ty = self.lower_ctx.output_ty(inst, 0);
433 Some(zero_extend_to_u64(!constant, self.ty_bits(ty)))
434 }
435
436 #[inline]
437 fn u32_from_value(&mut self, val: Value) -> Option<u32> {
438 let constant = self.u64_from_value(val)?;
439 let imm = u32::try_from(constant).ok()?;
440 Some(imm)
441 }
442
443 #[inline]
444 fn u8_from_value(&mut self, val: Value) -> Option<u8> {
445 let constant = self.u64_from_value(val)?;
446 let imm = u8::try_from(constant).ok()?;
447 Some(imm)
448 }
449
450 #[inline]
451 fn u64_from_signed_value(&mut self, val: Value) -> Option<u64> {
452 let inst = self.lower_ctx.dfg().value_def(val).inst()?;
453 let constant = self.lower_ctx.get_constant(inst)?;
454 let ty = self.lower_ctx.output_ty(inst, 0);
455 Some(sign_extend_to_u64(constant, self.ty_bits(ty)))
456 }
457
458 #[inline]
459 fn i64_from_value(&mut self, val: Value) -> Option<i64> {
460 let constant = self.u64_from_signed_value(val)? as i64;
461 Some(constant)
462 }
463
464 #[inline]
465 fn i32_from_value(&mut self, val: Value) -> Option<i32> {
466 let constant = self.u64_from_signed_value(val)? as i64;
467 let imm = i32::try_from(constant).ok()?;
468 Some(imm)
469 }
470
471 #[inline]
472 fn i16_from_value(&mut self, val: Value) -> Option<i16> {
473 let constant = self.u64_from_signed_value(val)? as i64;
474 let imm = i16::try_from(constant).ok()?;
475 Some(imm)
476 }
477
478 #[inline]
479 fn i16_from_swapped_value(&mut self, val: Value) -> Option<i16> {
480 let constant = self.u64_from_signed_value(val)? as i64;
481 let imm = i16::try_from(constant).ok()?;
482 Some(imm.swap_bytes())
483 }
484
485 #[inline]
486 fn i64_from_negated_value(&mut self, val: Value) -> Option<i64> {
487 let constant = self.u64_from_signed_value(val)? as i64;
488 let imm = constant.wrapping_neg();
489 Some(imm)
490 }
491
492 #[inline]
493 fn i32_from_negated_value(&mut self, val: Value) -> Option<i32> {
494 let constant = self.u64_from_signed_value(val)? as i64;
495 let imm = i32::try_from(constant.wrapping_neg()).ok()?;
496 Some(imm)
497 }
498
499 #[inline]
500 fn i16_from_negated_value(&mut self, val: Value) -> Option<i16> {
501 let constant = self.u64_from_signed_value(val)? as i64;
502 let imm = i16::try_from(constant.wrapping_neg()).ok()?;
503 Some(imm)
504 }
505
506 #[inline]
507 fn uimm16shifted_from_value(&mut self, val: Value) -> Option<UImm16Shifted> {
508 let constant = self.u64_from_value(val)?;
509 UImm16Shifted::maybe_from_u64(constant)
510 }
511
512 #[inline]
513 fn uimm32shifted_from_value(&mut self, val: Value) -> Option<UImm32Shifted> {
514 let constant = self.u64_from_value(val)?;
515 UImm32Shifted::maybe_from_u64(constant)
516 }
517
518 #[inline]
519 fn uimm16shifted_from_inverted_value(&mut self, val: Value) -> Option<UImm16Shifted> {
520 let constant = self.u64_from_inverted_value(val)?;
521 let imm = UImm16Shifted::maybe_from_u64(constant)?;
522 Some(imm.negate_bits())
523 }
524
525 #[inline]
526 fn uimm32shifted_from_inverted_value(&mut self, val: Value) -> Option<UImm32Shifted> {
527 let constant = self.u64_from_inverted_value(val)?;
528 let imm = UImm32Shifted::maybe_from_u64(constant)?;
529 Some(imm.negate_bits())
530 }
531
532 #[inline]
533 fn len_minus_one(&mut self, len: u64) -> Option<u8> {
534 if len > 0 && len <= 256 {
535 Some((len - 1) as u8)
536 } else {
537 None
538 }
539 }
540
541 #[inline]
542 fn mask_amt_imm(&mut self, ty: Type, amt: i64) -> u8 {
543 let mask = ty.lane_bits() - 1;
544 (amt as u8) & (mask as u8)
545 }
546
547 #[inline]
548 fn mask_as_cond(&mut self, mask: u8) -> Cond {
549 Cond::from_mask(mask)
550 }
551
552 #[inline]
553 fn intcc_as_cond(&mut self, cc: &IntCC) -> Cond {
554 Cond::from_intcc(*cc)
555 }
556
557 #[inline]
558 fn floatcc_as_cond(&mut self, cc: &FloatCC) -> Cond {
559 Cond::from_floatcc(*cc)
560 }
561
562 #[inline]
563 fn invert_cond(&mut self, cond: &Cond) -> Cond {
564 Cond::invert(*cond)
565 }
566
567 #[inline]
568 fn signed(&mut self, cc: &IntCC) -> Option<()> {
569 if condcode_is_signed(*cc) {
570 Some(())
571 } else {
572 None
573 }
574 }
575
576 #[inline]
577 fn unsigned(&mut self, cc: &IntCC) -> Option<()> {
578 if !condcode_is_signed(*cc) {
579 Some(())
580 } else {
581 None
582 }
583 }
584
585 #[inline]
586 fn zero_offset(&mut self) -> Offset32 {
587 Offset32::new(0)
588 }
589
590 #[inline]
591 fn i64_from_offset(&mut self, off: Offset32) -> i64 {
592 i64::from(off)
593 }
594
595 #[inline]
596 fn fcvt_to_uint_ub32(&mut self, size: u8) -> u64 {
597 (2.0_f32).powi(size.into()).to_bits() as u64
598 }
599
600 #[inline]
601 fn fcvt_to_uint_lb32(&mut self) -> u64 {
602 (-1.0_f32).to_bits() as u64
603 }
604
605 #[inline]
606 fn fcvt_to_uint_ub64(&mut self, size: u8) -> u64 {
607 (2.0_f64).powi(size.into()).to_bits()
608 }
609
610 #[inline]
611 fn fcvt_to_uint_lb64(&mut self) -> u64 {
612 (-1.0_f64).to_bits()
613 }
614
615 #[inline]
616 fn fcvt_to_uint_ub128(&mut self, size: u8) -> u128 {
617 Ieee128::pow2(size).bits()
618 }
619
620 #[inline]
621 fn fcvt_to_uint_lb128(&mut self) -> u128 {
622 (-Ieee128::pow2(0)).bits()
623 }
624
625 #[inline]
626 fn fcvt_to_sint_ub32(&mut self, size: u8) -> u64 {
627 (2.0_f32).powi((size - 1).into()).to_bits() as u64
628 }
629
630 #[inline]
631 fn fcvt_to_sint_lb32(&mut self, size: u8) -> u64 {
632 let lb = (-2.0_f32).powi((size - 1).into());
633 std::cmp::max(lb.to_bits() + 1, (lb - 1.0).to_bits()) as u64
634 }
635
636 #[inline]
637 fn fcvt_to_sint_ub64(&mut self, size: u8) -> u64 {
638 (2.0_f64).powi((size - 1).into()).to_bits()
639 }
640
641 #[inline]
642 fn fcvt_to_sint_lb64(&mut self, size: u8) -> u64 {
643 let lb = (-2.0_f64).powi((size - 1).into());
644 std::cmp::max(lb.to_bits() + 1, (lb - 1.0).to_bits())
645 }
646
647 #[inline]
648 fn fcvt_to_sint_ub128(&mut self, size: u8) -> u128 {
649 Ieee128::pow2(size - 1).bits()
650 }
651
652 #[inline]
653 fn fcvt_to_sint_lb128(&mut self, size: u8) -> u128 {
654 Ieee128::fcvt_to_sint_negative_overflow(size).bits()
655 }
656
657 #[inline]
658 fn littleendian(&mut self, flags: MemFlags) -> Option<()> {
659 let endianness = flags.endianness(Endianness::Big);
660 if endianness == Endianness::Little {
661 Some(())
662 } else {
663 None
664 }
665 }
666
667 #[inline]
668 fn bigendian(&mut self, flags: MemFlags) -> Option<()> {
669 let endianness = flags.endianness(Endianness::Big);
670 if endianness == Endianness::Big {
671 Some(())
672 } else {
673 None
674 }
675 }
676
677 #[inline]
678 fn memflags_trusted(&mut self) -> MemFlags {
679 MemFlags::trusted()
680 }
681
682 #[inline]
683 fn memarg_reg_plus_reg(&mut self, x: Reg, y: Reg, bias: u8, flags: MemFlags) -> MemArg {
684 MemArg::BXD12 {
685 base: x,
686 index: y,
687 disp: UImm12::maybe_from_u64(bias as u64).unwrap(),
688 flags,
689 }
690 }
691
692 #[inline]
693 fn memarg_reg_plus_off(&mut self, reg: Reg, off: i64, bias: u8, flags: MemFlags) -> MemArg {
694 MemArg::reg_plus_off(reg, off + (bias as i64), flags)
695 }
696
697 #[inline]
698 fn memarg_symbol(&mut self, name: ExternalName, offset: i32, flags: MemFlags) -> MemArg {
699 MemArg::Symbol {
700 name: Box::new(name),
701 offset,
702 flags,
703 }
704 }
705
706 #[inline]
707 fn memarg_got(&mut self) -> MemArg {
708 MemArg::Symbol {
709 name: Box::new(ExternalName::KnownSymbol(KnownSymbol::ElfGlobalOffsetTable)),
710 offset: 0,
711 flags: MemFlags::trusted(),
712 }
713 }
714
715 #[inline]
716 fn memarg_const(&mut self, constant: VCodeConstant) -> MemArg {
717 MemArg::Constant { constant }
718 }
719
720 #[inline]
721 fn memarg_symbol_offset_sum(&mut self, off1: i64, off2: i64) -> Option<i32> {
722 let off = i32::try_from(off1 + off2).ok()?;
723 if off & 1 == 0 { Some(off) } else { None }
724 }
725
726 #[inline]
727 fn memarg_frame_pointer_offset(&mut self) -> MemArg {
728 MemArg::reg(stack_reg(), MemFlags::trusted())
730 }
731
732 #[inline]
733 fn memarg_return_address_offset(&mut self) -> MemArg {
734 MemArg::InitialSPOffset { off: 14 * 8 }
736 }
737
738 #[inline]
739 fn inst_builder_new(&mut self) -> VecMInstBuilder {
740 Cell::new(Vec::<MInst>::new())
741 }
742
743 #[inline]
744 fn inst_builder_push(&mut self, builder: &VecMInstBuilder, inst: &MInst) -> Unit {
745 let mut vec = builder.take();
746 vec.push(inst.clone());
747 builder.set(vec);
748 }
749
750 #[inline]
751 fn inst_builder_finish(&mut self, builder: &VecMInstBuilder) -> Vec<MInst> {
752 builder.take()
753 }
754
755 #[inline]
756 fn real_reg(&mut self, reg: WritableReg) -> Option<WritableReg> {
757 if reg.to_reg().is_real() {
758 Some(reg)
759 } else {
760 None
761 }
762 }
763
764 #[inline]
765 fn same_reg(&mut self, dst: WritableReg, src: Reg) -> Option<Reg> {
766 if dst.to_reg() == src { Some(src) } else { None }
767 }
768
769 #[inline]
770 fn sinkable_inst(&mut self, val: Value) -> Option<Inst> {
771 self.is_sinkable_inst(val)
772 }
773
774 #[inline]
775 fn emit(&mut self, inst: &MInst) -> Unit {
776 self.lower_ctx.emit(inst.clone());
777 }
778
779 #[inline]
780 fn preg_stack(&mut self) -> PReg {
781 stack_reg().to_real_reg().unwrap().into()
782 }
783
784 #[inline]
785 fn preg_gpr_0(&mut self) -> PReg {
786 gpr(0).to_real_reg().unwrap().into()
787 }
788
789 #[inline]
790 fn writable_regpair(&mut self, hi: WritableReg, lo: WritableReg) -> WritableRegPair {
791 WritableRegPair { hi, lo }
792 }
793
794 #[inline]
795 fn writable_regpair_hi(&mut self, w: WritableRegPair) -> WritableReg {
796 w.hi
797 }
798
799 #[inline]
800 fn writable_regpair_lo(&mut self, w: WritableRegPair) -> WritableReg {
801 w.lo
802 }
803
804 #[inline]
805 fn regpair(&mut self, hi: Reg, lo: Reg) -> RegPair {
806 RegPair { hi, lo }
807 }
808
809 #[inline]
810 fn regpair_hi(&mut self, w: RegPair) -> Reg {
811 w.hi
812 }
813
814 #[inline]
815 fn regpair_lo(&mut self, w: RegPair) -> Reg {
816 w.lo
817 }
818}
819
820#[inline]
822fn zero_extend_to_u64(value: u64, from_bits: u8) -> u64 {
823 assert!(from_bits <= 64);
824 if from_bits >= 64 {
825 value
826 } else {
827 value & ((1u64 << from_bits) - 1)
828 }
829}
830
831#[inline]
833fn sign_extend_to_u64(value: u64, from_bits: u8) -> u64 {
834 assert!(from_bits <= 64);
835 if from_bits >= 64 {
836 value
837 } else {
838 (((value << (64 - from_bits)) as i64) >> (64 - from_bits)) as u64
839 }
840}
841
842#[inline]
847fn condcode_is_signed(cc: IntCC) -> bool {
848 match cc {
849 IntCC::Equal => false,
850 IntCC::NotEqual => false,
851 IntCC::SignedGreaterThanOrEqual => true,
852 IntCC::SignedGreaterThan => true,
853 IntCC::SignedLessThanOrEqual => true,
854 IntCC::SignedLessThan => true,
855 IntCC::UnsignedGreaterThanOrEqual => false,
856 IntCC::UnsignedGreaterThan => false,
857 IntCC::UnsignedLessThanOrEqual => false,
858 IntCC::UnsignedLessThan => false,
859 }
860}