1pub mod generated_code;
5
6use crate::ir::ExternalName;
8use crate::isa::s390x::S390xBackend;
9use crate::isa::s390x::abi::REG_SAVE_AREA_SIZE;
10use crate::isa::s390x::inst::{
11 CallInstDest, Cond, Inst as MInst, LaneOrder, MemArg, RegPair, ReturnCallInfo, SymbolReloc,
12 UImm12, UImm16Shifted, UImm32Shifted, WritableRegPair, gpr, stack_reg, writable_gpr, zero_reg,
13};
14use crate::machinst::isle::*;
15use crate::machinst::{CallInfo, MachLabel, Reg, TryCallInfo, non_writable_value_regs};
16use crate::{
17 ir::{
18 AtomicRmwOp, BlockCall, Endianness, Inst, InstructionData, KnownSymbol, MemFlags, Opcode,
19 TrapCode, Value, ValueList, condcodes::*, immediates::*, types::*,
20 },
21 isa::CallConv,
22 machinst::{
23 ArgPair, CallArgList, CallRetList, InstOutput, MachInst, VCodeConstant, VCodeConstantData,
24 },
25};
26use regalloc2::PReg;
27use std::boxed::Box;
28use std::cell::Cell;
29use std::vec::Vec;
30
31type BoxCallInfo = Box<CallInfo<CallInstDest>>;
32type BoxReturnCallInfo = Box<ReturnCallInfo<CallInstDest>>;
33type VecMachLabel = Vec<MachLabel>;
34type BoxExternalName = Box<ExternalName>;
35type BoxSymbolReloc = Box<SymbolReloc>;
36type VecMInst = Vec<MInst>;
37type VecMInstBuilder = Cell<Vec<MInst>>;
38type VecArgPair = Vec<ArgPair>;
39
40pub(crate) fn lower(
42 lower_ctx: &mut Lower<MInst>,
43 backend: &S390xBackend,
44 inst: Inst,
45) -> Option<InstOutput> {
46 let mut isle_ctx = IsleContext { lower_ctx, backend };
49 generated_code::constructor_lower(&mut isle_ctx, inst)
50}
51
52pub(crate) fn lower_branch(
54 lower_ctx: &mut Lower<MInst>,
55 backend: &S390xBackend,
56 branch: Inst,
57 targets: &[MachLabel],
58) -> Option<()> {
59 let mut isle_ctx = IsleContext { lower_ctx, backend };
62 generated_code::constructor_lower_branch(&mut isle_ctx, branch, targets)
63}
64
65impl generated_code::Context for IsleContext<'_, '_, MInst, S390xBackend> {
66 isle_lower_prelude_methods!();
67
68 #[inline]
69 fn call_inst_dest_direct(&mut self, name: ExternalName) -> CallInstDest {
70 CallInstDest::Direct { name }
71 }
72
73 #[inline]
74 fn call_inst_dest_indirect(&mut self, reg: Reg) -> CallInstDest {
75 CallInstDest::Indirect { reg }
76 }
77
78 fn abi_emit_call_adjust_stack(&mut self, abi: Sig) -> Unit {
83 let sig_data = &self.lower_ctx.sigs()[abi];
84 if sig_data.call_conv() == CallConv::Tail {
85 let arg_space = sig_data.sized_stack_arg_space();
86 if arg_space > 0 {
87 if self.backend.flags.preserve_frame_pointers() {
88 let tmp = self.lower_ctx.alloc_tmp(I64).only_reg().unwrap();
89 let src_mem = MemArg::reg(stack_reg(), MemFlags::trusted());
90 let dst_mem = MemArg::reg(stack_reg(), MemFlags::trusted());
91 self.emit(&MInst::Load64 {
92 rd: tmp,
93 mem: src_mem,
94 });
95 self.emit(&MInst::AllocateArgs { size: arg_space });
96 self.emit(&MInst::Store64 {
97 rd: tmp.to_reg(),
98 mem: dst_mem,
99 });
100 } else {
101 self.emit(&MInst::AllocateArgs { size: arg_space });
102 }
103 }
104 }
105 }
106
107 fn abi_emit_return_call_adjust_stack(&mut self, abi: Sig) -> Unit {
112 let sig_data = &self.lower_ctx.sigs()[abi];
113 let arg_space = sig_data.sized_stack_arg_space();
114 if arg_space > 0 && self.backend.flags.preserve_frame_pointers() {
115 let tmp = self.lower_ctx.alloc_tmp(I64).only_reg().unwrap();
116 let src_mem = MemArg::InitialSPOffset { off: 0 };
117 let dst_mem = MemArg::InitialSPOffset {
118 off: -(arg_space as i64),
119 };
120 self.emit(&MInst::Load64 {
121 rd: tmp,
122 mem: src_mem,
123 });
124 self.emit(&MInst::Store64 {
125 rd: tmp.to_reg(),
126 mem: dst_mem,
127 });
128 }
129 }
130
131 fn abi_prepare_args(&mut self, abi: Sig, (list, off): ValueSlice) -> ValueRegsVec {
135 let lane_order = LaneOrder::from(self.lower_ctx.sigs()[abi].call_conv());
136 let lane_swap_needed = self.lane_order() != lane_order;
137
138 (off..list.len(&self.lower_ctx.dfg().value_lists))
139 .map(|ix| {
140 let val = list.get(ix, &self.lower_ctx.dfg().value_lists).unwrap();
141 let ty = self.lower_ctx.dfg().value_type(val);
142 let regs = self.put_in_regs(val);
143
144 if lane_swap_needed && ty.is_vector() && ty.lane_count() >= 2 {
145 let tmp_regs = self.lower_ctx.alloc_tmp(ty);
146 self.emit(&MInst::VecEltRev {
147 lane_count: ty.lane_count(),
148 rd: tmp_regs.only_reg().unwrap(),
149 rn: regs.only_reg().unwrap(),
150 });
151 non_writable_value_regs(tmp_regs)
152 } else {
153 regs
154 }
155 })
156 .collect()
157 }
158
159 fn gen_call_info(
160 &mut self,
161 sig: Sig,
162 dest: CallInstDest,
163 uses: CallArgList,
164 defs: CallRetList,
165 try_call_info: Option<TryCallInfo>,
166 patchable: bool,
167 ) -> BoxCallInfo {
168 let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();
169 let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
170 let total_space = if self.lower_ctx.sigs()[sig].call_conv() != CallConv::Tail {
171 REG_SAVE_AREA_SIZE + stack_arg_space + stack_ret_space
172 } else {
173 REG_SAVE_AREA_SIZE + stack_ret_space
174 };
175 self.lower_ctx
176 .abi_mut()
177 .accumulate_outgoing_args_size(total_space);
178
179 Box::new(
180 self.lower_ctx
181 .gen_call_info(sig, dest, uses, defs, try_call_info, patchable),
182 )
183 }
184
185 fn gen_return_call_info(
186 &mut self,
187 sig: Sig,
188 dest: CallInstDest,
189 uses: CallArgList,
190 ) -> BoxReturnCallInfo {
191 let callee_pop_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
192 self.lower_ctx
193 .abi_mut()
194 .accumulate_tail_args_size(callee_pop_size);
195
196 Box::new(ReturnCallInfo {
197 dest,
198 uses,
199 callee_pop_size,
200 })
201 }
202
203 fn abi_for_elf_tls_get_offset(&mut self) {
204 self.lower_ctx
205 .abi_mut()
206 .accumulate_outgoing_args_size(REG_SAVE_AREA_SIZE);
207 }
208
209 #[inline]
210 fn box_symbol_reloc(&mut self, symbol_reloc: &SymbolReloc) -> BoxSymbolReloc {
211 Box::new(symbol_reloc.clone())
212 }
213
214 #[inline]
215 fn mie3_enabled(&mut self, _: Type) -> Option<()> {
216 if self.backend.isa_flags.has_mie3() {
217 Some(())
218 } else {
219 None
220 }
221 }
222
223 #[inline]
224 fn mie3_disabled(&mut self, _: Type) -> Option<()> {
225 if !self.backend.isa_flags.has_mie3() {
226 Some(())
227 } else {
228 None
229 }
230 }
231
232 #[inline]
233 fn vxrs_ext2_enabled(&mut self, _: Type) -> Option<()> {
234 if self.backend.isa_flags.has_vxrs_ext2() {
235 Some(())
236 } else {
237 None
238 }
239 }
240
241 #[inline]
242 fn vxrs_ext2_disabled(&mut self, _: Type) -> Option<()> {
243 if !self.backend.isa_flags.has_vxrs_ext2() {
244 Some(())
245 } else {
246 None
247 }
248 }
249
250 #[inline]
251 fn writable_gpr(&mut self, regno: u8) -> WritableReg {
252 writable_gpr(regno)
253 }
254
255 #[inline]
256 fn zero_reg(&mut self) -> Reg {
257 zero_reg()
258 }
259
260 #[inline]
261 fn gpr32_ty(&mut self, ty: Type) -> Option<Type> {
262 match ty {
263 I8 | I16 | I32 => Some(ty),
264 _ => None,
265 }
266 }
267
268 #[inline]
269 fn gpr64_ty(&mut self, ty: Type) -> Option<Type> {
270 match ty {
271 I64 => Some(ty),
272 _ => None,
273 }
274 }
275
276 #[inline]
277 fn vr128_ty(&mut self, ty: Type) -> Option<Type> {
278 match ty {
279 I128 | F128 => Some(ty),
280 _ if ty.is_vector() && ty.bits() == 128 => Some(ty),
281 _ => None,
282 }
283 }
284
285 #[inline]
286 fn uimm32shifted(&mut self, n: u32, shift: u8) -> UImm32Shifted {
287 UImm32Shifted::maybe_with_shift(n, shift).unwrap()
288 }
289
290 #[inline]
291 fn uimm16shifted(&mut self, n: u16, shift: u8) -> UImm16Shifted {
292 UImm16Shifted::maybe_with_shift(n, shift).unwrap()
293 }
294
295 #[inline]
296 fn i64_nonequal(&mut self, val: i64, cmp: i64) -> Option<i64> {
297 if val != cmp { Some(val) } else { None }
298 }
299
300 #[inline]
301 fn u64_pair_split(&mut self, n: u128) -> (u64, u64) {
302 ((n >> 64) as u64, n as u64)
303 }
304
305 #[inline]
306 fn u64_pair_concat(&mut self, hi: u64, lo: u64) -> u128 {
307 (hi as u128) << 64 | (lo as u128)
308 }
309
310 #[inline]
311 fn u32_pair_split(&mut self, n: u64) -> (u32, u32) {
312 ((n >> 32) as u32, n as u32)
313 }
314
315 #[inline]
316 fn u32_pair_concat(&mut self, hi: u32, lo: u32) -> u64 {
317 (hi as u64) << 32 | (lo as u64)
318 }
319
320 #[inline]
321 fn u16_pair_split(&mut self, n: u32) -> (u16, u16) {
322 ((n >> 16) as u16, n as u16)
323 }
324
325 #[inline]
326 fn u16_pair_concat(&mut self, hi: u16, lo: u16) -> u32 {
327 (hi as u32) << 16 | (lo as u32)
328 }
329
330 #[inline]
331 fn u8_pair_split(&mut self, n: u16) -> (u8, u8) {
332 ((n >> 8) as u8, n as u8)
333 }
334
335 #[inline]
336 fn u8_pair_concat(&mut self, hi: u8, lo: u8) -> u16 {
337 (hi as u16) << 8 | (lo as u16)
338 }
339
340 #[inline]
341 fn u64_nonzero_hipart(&mut self, n: u64) -> Option<u64> {
342 let part = n & 0xffff_ffff_0000_0000;
343 if part != 0 { Some(part) } else { None }
344 }
345
346 #[inline]
347 fn u64_nonzero_lopart(&mut self, n: u64) -> Option<u64> {
348 let part = n & 0x0000_0000_ffff_ffff;
349 if part != 0 { Some(part) } else { None }
350 }
351
352 #[inline]
353 fn uimm32shifted_from_u64(&mut self, n: u64) -> Option<UImm32Shifted> {
354 UImm32Shifted::maybe_from_u64(n)
355 }
356
357 #[inline]
358 fn uimm16shifted_from_u64(&mut self, n: u64) -> Option<UImm16Shifted> {
359 UImm16Shifted::maybe_from_u64(n)
360 }
361
362 #[inline]
363 fn lane_order(&mut self) -> LaneOrder {
364 LaneOrder::from(self.lower_ctx.abi().call_conv())
365 }
366
367 #[inline]
368 fn be_lane_idx(&mut self, ty: Type, idx: u8) -> u8 {
369 match self.lane_order() {
370 LaneOrder::LittleEndian => ty.lane_count() as u8 - 1 - idx,
371 LaneOrder::BigEndian => idx,
372 }
373 }
374
375 #[inline]
376 fn be_vec_const(&mut self, ty: Type, n: u128) -> u128 {
377 match self.lane_order() {
378 LaneOrder::LittleEndian => n,
379 LaneOrder::BigEndian if ty.lane_count() == 1 => n,
380 LaneOrder::BigEndian => {
381 let lane_count = ty.lane_count();
382 let lane_bits = ty.lane_bits();
383 let lane_mask = (1u128 << lane_bits) - 1;
384 let mut n_le = n;
385 let mut n_be = 0u128;
386 for _ in 0..lane_count {
387 n_be = (n_be << lane_bits) | (n_le & lane_mask);
388 n_le = n_le >> lane_bits;
389 }
390 n_be
391 }
392 }
393 }
394
395 #[inline]
396 fn lane_byte_mask(&mut self, ty: Type, idx: u8) -> u16 {
397 let lane_bytes = (ty.lane_bits() / 8) as u8;
398 let lane_mask = (1u16 << lane_bytes) - 1;
399 lane_mask << (16 - ((idx + 1) * lane_bytes))
400 }
401
402 #[inline]
403 fn shuffle_mask_from_u128(&mut self, idx: u128) -> (u128, u16) {
404 let bytes = match self.lane_order() {
405 LaneOrder::LittleEndian => idx.to_be_bytes().map(|x| {
406 if x < 16 {
407 15 - x
408 } else if x < 32 {
409 47 - x
410 } else {
411 128
412 }
413 }),
414 LaneOrder::BigEndian => idx.to_le_bytes().map(|x| if x < 32 { x } else { 128 }),
415 };
416 let and_mask = bytes.iter().fold(0, |acc, &x| (acc << 1) | (x < 32) as u16);
417 let permute_mask = u128::from_be_bytes(bytes);
418 (permute_mask, and_mask)
419 }
420
421 #[inline]
422 fn u64_from_value(&mut self, val: Value) -> Option<u64> {
423 let inst = self.lower_ctx.dfg().value_def(val).inst()?;
424 let constant = self.lower_ctx.get_constant(inst)?;
425 let ty = self.lower_ctx.output_ty(inst, 0);
426 Some(zero_extend_to_u64(constant, self.ty_bits(ty)))
427 }
428
429 #[inline]
430 fn u64_from_inverted_value(&mut self, val: Value) -> Option<u64> {
431 let inst = self.lower_ctx.dfg().value_def(val).inst()?;
432 let constant = self.lower_ctx.get_constant(inst)?;
433 let ty = self.lower_ctx.output_ty(inst, 0);
434 Some(zero_extend_to_u64(!constant, self.ty_bits(ty)))
435 }
436
437 #[inline]
438 fn u32_from_value(&mut self, val: Value) -> Option<u32> {
439 let constant = self.u64_from_value(val)?;
440 let imm = u32::try_from(constant).ok()?;
441 Some(imm)
442 }
443
444 #[inline]
445 fn u8_from_value(&mut self, val: Value) -> Option<u8> {
446 let constant = self.u64_from_value(val)?;
447 let imm = u8::try_from(constant).ok()?;
448 Some(imm)
449 }
450
451 #[inline]
452 fn u64_from_signed_value(&mut self, val: Value) -> Option<u64> {
453 let inst = self.lower_ctx.dfg().value_def(val).inst()?;
454 let constant = self.lower_ctx.get_constant(inst)?;
455 let ty = self.lower_ctx.output_ty(inst, 0);
456 Some(sign_extend_to_u64(constant, self.ty_bits(ty)))
457 }
458
459 #[inline]
460 fn i64_from_value(&mut self, val: Value) -> Option<i64> {
461 let constant = self.u64_from_signed_value(val)? as i64;
462 Some(constant)
463 }
464
465 #[inline]
466 fn i32_from_value(&mut self, val: Value) -> Option<i32> {
467 let constant = self.u64_from_signed_value(val)? as i64;
468 let imm = i32::try_from(constant).ok()?;
469 Some(imm)
470 }
471
472 #[inline]
473 fn i16_from_value(&mut self, val: Value) -> Option<i16> {
474 let constant = self.u64_from_signed_value(val)? as i64;
475 let imm = i16::try_from(constant).ok()?;
476 Some(imm)
477 }
478
479 #[inline]
480 fn i16_from_swapped_value(&mut self, val: Value) -> Option<i16> {
481 let constant = self.u64_from_signed_value(val)? as i64;
482 let imm = i16::try_from(constant).ok()?;
483 Some(imm.swap_bytes())
484 }
485
486 #[inline]
487 fn i64_from_negated_value(&mut self, val: Value) -> Option<i64> {
488 let constant = self.u64_from_signed_value(val)? as i64;
489 let imm = constant.wrapping_neg();
490 Some(imm)
491 }
492
493 #[inline]
494 fn i32_from_negated_value(&mut self, val: Value) -> Option<i32> {
495 let constant = self.u64_from_signed_value(val)? as i64;
496 let imm = i32::try_from(constant.wrapping_neg()).ok()?;
497 Some(imm)
498 }
499
500 #[inline]
501 fn i16_from_negated_value(&mut self, val: Value) -> Option<i16> {
502 let constant = self.u64_from_signed_value(val)? as i64;
503 let imm = i16::try_from(constant.wrapping_neg()).ok()?;
504 Some(imm)
505 }
506
507 #[inline]
508 fn uimm16shifted_from_value(&mut self, val: Value) -> Option<UImm16Shifted> {
509 let constant = self.u64_from_value(val)?;
510 UImm16Shifted::maybe_from_u64(constant)
511 }
512
513 #[inline]
514 fn uimm32shifted_from_value(&mut self, val: Value) -> Option<UImm32Shifted> {
515 let constant = self.u64_from_value(val)?;
516 UImm32Shifted::maybe_from_u64(constant)
517 }
518
519 #[inline]
520 fn uimm16shifted_from_inverted_value(&mut self, val: Value) -> Option<UImm16Shifted> {
521 let constant = self.u64_from_inverted_value(val)?;
522 let imm = UImm16Shifted::maybe_from_u64(constant)?;
523 Some(imm.negate_bits())
524 }
525
526 #[inline]
527 fn uimm32shifted_from_inverted_value(&mut self, val: Value) -> Option<UImm32Shifted> {
528 let constant = self.u64_from_inverted_value(val)?;
529 let imm = UImm32Shifted::maybe_from_u64(constant)?;
530 Some(imm.negate_bits())
531 }
532
533 #[inline]
534 fn len_minus_one(&mut self, len: u64) -> Option<u8> {
535 if len > 0 && len <= 256 {
536 Some((len - 1) as u8)
537 } else {
538 None
539 }
540 }
541
542 #[inline]
543 fn mask_amt_imm(&mut self, ty: Type, amt: i64) -> u8 {
544 let mask = ty.lane_bits() - 1;
545 (amt as u8) & (mask as u8)
546 }
547
548 #[inline]
549 fn mask_as_cond(&mut self, mask: u8) -> Cond {
550 Cond::from_mask(mask)
551 }
552
553 #[inline]
554 fn intcc_as_cond(&mut self, cc: &IntCC) -> Cond {
555 Cond::from_intcc(*cc)
556 }
557
558 #[inline]
559 fn floatcc_as_cond(&mut self, cc: &FloatCC) -> Cond {
560 Cond::from_floatcc(*cc)
561 }
562
563 #[inline]
564 fn invert_cond(&mut self, cond: &Cond) -> Cond {
565 Cond::invert(*cond)
566 }
567
568 #[inline]
569 fn signed(&mut self, cc: &IntCC) -> Option<()> {
570 if condcode_is_signed(*cc) {
571 Some(())
572 } else {
573 None
574 }
575 }
576
577 #[inline]
578 fn unsigned(&mut self, cc: &IntCC) -> Option<()> {
579 if !condcode_is_signed(*cc) {
580 Some(())
581 } else {
582 None
583 }
584 }
585
586 #[inline]
587 fn zero_offset(&mut self) -> Offset32 {
588 Offset32::new(0)
589 }
590
591 #[inline]
592 fn i64_from_offset(&mut self, off: Offset32) -> i64 {
593 i64::from(off)
594 }
595
596 #[inline]
597 fn fcvt_to_uint_ub32(&mut self, size: u8) -> u64 {
598 (2.0_f32).powi(size.into()).to_bits() as u64
599 }
600
601 #[inline]
602 fn fcvt_to_uint_lb32(&mut self) -> u64 {
603 (-1.0_f32).to_bits() as u64
604 }
605
606 #[inline]
607 fn fcvt_to_uint_ub64(&mut self, size: u8) -> u64 {
608 (2.0_f64).powi(size.into()).to_bits()
609 }
610
611 #[inline]
612 fn fcvt_to_uint_lb64(&mut self) -> u64 {
613 (-1.0_f64).to_bits()
614 }
615
616 #[inline]
617 fn fcvt_to_uint_ub128(&mut self, size: u8) -> u128 {
618 Ieee128::pow2(size).bits()
619 }
620
621 #[inline]
622 fn fcvt_to_uint_lb128(&mut self) -> u128 {
623 (-Ieee128::pow2(0)).bits()
624 }
625
626 #[inline]
627 fn fcvt_to_sint_ub32(&mut self, size: u8) -> u64 {
628 (2.0_f32).powi((size - 1).into()).to_bits() as u64
629 }
630
631 #[inline]
632 fn fcvt_to_sint_lb32(&mut self, size: u8) -> u64 {
633 let lb = (-2.0_f32).powi((size - 1).into());
634 std::cmp::max(lb.to_bits() + 1, (lb - 1.0).to_bits()) as u64
635 }
636
637 #[inline]
638 fn fcvt_to_sint_ub64(&mut self, size: u8) -> u64 {
639 (2.0_f64).powi((size - 1).into()).to_bits()
640 }
641
642 #[inline]
643 fn fcvt_to_sint_lb64(&mut self, size: u8) -> u64 {
644 let lb = (-2.0_f64).powi((size - 1).into());
645 std::cmp::max(lb.to_bits() + 1, (lb - 1.0).to_bits())
646 }
647
648 #[inline]
649 fn fcvt_to_sint_ub128(&mut self, size: u8) -> u128 {
650 Ieee128::pow2(size - 1).bits()
651 }
652
653 #[inline]
654 fn fcvt_to_sint_lb128(&mut self, size: u8) -> u128 {
655 Ieee128::fcvt_to_sint_negative_overflow(size).bits()
656 }
657
658 #[inline]
659 fn littleendian(&mut self, flags: MemFlags) -> Option<()> {
660 let endianness = flags.endianness(Endianness::Big);
661 if endianness == Endianness::Little {
662 Some(())
663 } else {
664 None
665 }
666 }
667
668 #[inline]
669 fn bigendian(&mut self, flags: MemFlags) -> Option<()> {
670 let endianness = flags.endianness(Endianness::Big);
671 if endianness == Endianness::Big {
672 Some(())
673 } else {
674 None
675 }
676 }
677
678 #[inline]
679 fn memflags_trusted(&mut self) -> MemFlags {
680 MemFlags::trusted()
681 }
682
683 #[inline]
684 fn memarg_reg_plus_reg(&mut self, x: Reg, y: Reg, bias: u8, flags: MemFlags) -> MemArg {
685 MemArg::BXD12 {
686 base: x,
687 index: y,
688 disp: UImm12::maybe_from_u64(bias as u64).unwrap(),
689 flags,
690 }
691 }
692
693 #[inline]
694 fn memarg_reg_plus_off(&mut self, reg: Reg, off: i64, bias: u8, flags: MemFlags) -> MemArg {
695 MemArg::reg_plus_off(reg, off + (bias as i64), flags)
696 }
697
698 #[inline]
699 fn memarg_symbol(&mut self, name: ExternalName, offset: i32, flags: MemFlags) -> MemArg {
700 MemArg::Symbol {
701 name: Box::new(name),
702 offset,
703 flags,
704 }
705 }
706
707 #[inline]
708 fn memarg_got(&mut self) -> MemArg {
709 MemArg::Symbol {
710 name: Box::new(ExternalName::KnownSymbol(KnownSymbol::ElfGlobalOffsetTable)),
711 offset: 0,
712 flags: MemFlags::trusted(),
713 }
714 }
715
716 #[inline]
717 fn memarg_const(&mut self, constant: VCodeConstant) -> MemArg {
718 MemArg::Constant { constant }
719 }
720
721 #[inline]
722 fn memarg_symbol_offset_sum(&mut self, off1: i64, off2: i64) -> Option<i32> {
723 let off = i32::try_from(off1 + off2).ok()?;
724 if off & 1 == 0 { Some(off) } else { None }
725 }
726
727 #[inline]
728 fn memarg_frame_pointer_offset(&mut self) -> MemArg {
729 MemArg::reg(stack_reg(), MemFlags::trusted())
731 }
732
733 #[inline]
734 fn memarg_return_address_offset(&mut self) -> MemArg {
735 MemArg::InitialSPOffset { off: 14 * 8 }
737 }
738
739 #[inline]
740 fn inst_builder_new(&mut self) -> VecMInstBuilder {
741 Cell::new(Vec::<MInst>::new())
742 }
743
744 #[inline]
745 fn inst_builder_push(&mut self, builder: &VecMInstBuilder, inst: &MInst) -> Unit {
746 let mut vec = builder.take();
747 vec.push(inst.clone());
748 builder.set(vec);
749 }
750
751 #[inline]
752 fn inst_builder_finish(&mut self, builder: &VecMInstBuilder) -> Vec<MInst> {
753 builder.take()
754 }
755
756 #[inline]
757 fn real_reg(&mut self, reg: WritableReg) -> Option<WritableReg> {
758 if reg.to_reg().is_real() {
759 Some(reg)
760 } else {
761 None
762 }
763 }
764
765 #[inline]
766 fn same_reg(&mut self, dst: WritableReg, src: Reg) -> Option<Reg> {
767 if dst.to_reg() == src { Some(src) } else { None }
768 }
769
770 #[inline]
771 fn sinkable_inst(&mut self, val: Value) -> Option<Inst> {
772 self.is_sinkable_inst(val)
773 }
774
775 #[inline]
776 fn emit(&mut self, inst: &MInst) -> Unit {
777 self.lower_ctx.emit(inst.clone());
778 }
779
780 #[inline]
781 fn preg_stack(&mut self) -> PReg {
782 stack_reg().to_real_reg().unwrap().into()
783 }
784
785 #[inline]
786 fn preg_gpr_0(&mut self) -> PReg {
787 gpr(0).to_real_reg().unwrap().into()
788 }
789
790 #[inline]
791 fn writable_regpair(&mut self, hi: WritableReg, lo: WritableReg) -> WritableRegPair {
792 WritableRegPair { hi, lo }
793 }
794
795 #[inline]
796 fn writable_regpair_hi(&mut self, w: WritableRegPair) -> WritableReg {
797 w.hi
798 }
799
800 #[inline]
801 fn writable_regpair_lo(&mut self, w: WritableRegPair) -> WritableReg {
802 w.lo
803 }
804
805 #[inline]
806 fn regpair(&mut self, hi: Reg, lo: Reg) -> RegPair {
807 RegPair { hi, lo }
808 }
809
810 #[inline]
811 fn regpair_hi(&mut self, w: RegPair) -> Reg {
812 w.hi
813 }
814
815 #[inline]
816 fn regpair_lo(&mut self, w: RegPair) -> Reg {
817 w.lo
818 }
819}
820
821#[inline]
823fn zero_extend_to_u64(value: u64, from_bits: u8) -> u64 {
824 assert!(from_bits <= 64);
825 if from_bits >= 64 {
826 value
827 } else {
828 value & ((1u64 << from_bits) - 1)
829 }
830}
831
832#[inline]
834fn sign_extend_to_u64(value: u64, from_bits: u8) -> u64 {
835 assert!(from_bits <= 64);
836 if from_bits >= 64 {
837 value
838 } else {
839 (((value << (64 - from_bits)) as i64) >> (64 - from_bits)) as u64
840 }
841}
842
843#[inline]
848fn condcode_is_signed(cc: IntCC) -> bool {
849 match cc {
850 IntCC::Equal => false,
851 IntCC::NotEqual => false,
852 IntCC::SignedGreaterThanOrEqual => true,
853 IntCC::SignedGreaterThan => true,
854 IntCC::SignedLessThanOrEqual => true,
855 IntCC::SignedLessThan => true,
856 IntCC::UnsignedGreaterThanOrEqual => false,
857 IntCC::UnsignedGreaterThan => false,
858 IntCC::UnsignedLessThanOrEqual => false,
859 IntCC::UnsignedLessThan => false,
860 }
861}