1pub mod generated_code;
5use generated_code::{Context, ImmExtend};
6
7use super::{
9 ASIMDFPModImm, ASIMDMovModImm, BranchTarget, CallInfo, Cond, CondBrKind, ExtendOp, FPUOpRI,
10 FPUOpRIMod, FloatCC, Imm12, ImmLogic, ImmShift, Inst as MInst, IntCC, MachLabel, MemLabel,
11 MoveWideConst, MoveWideOp, NZCV, Opcode, OperandSize, Reg, SImm9, ScalarSize, ShiftOpAndAmt,
12 UImm5, UImm12Scaled, VecMisc2, VectorSize, fp_reg, lower_condcode, lower_fp_condcode,
13 stack_reg, writable_link_reg, writable_zero_reg, zero_reg,
14};
15use crate::ir::{ArgumentExtension, condcodes};
16use crate::isa;
17use crate::isa::aarch64::AArch64Backend;
18use crate::isa::aarch64::inst::{FPULeftShiftImm, FPURightShiftImm, ReturnCallInfo};
19use crate::machinst::isle::*;
20use crate::{
21 binemit::CodeOffset,
22 ir::{
23 AtomicRmwOp, BlockCall, ExternalName, Inst, InstructionData, MemFlags, TrapCode, Value,
24 ValueList, immediates::*, types::*,
25 },
26 isa::aarch64::abi::AArch64MachineDeps,
27 isa::aarch64::inst::SImm7Scaled,
28 isa::aarch64::inst::args::{ShiftOp, ShiftOpShiftImm},
29 machinst::{
30 CallArgList, CallRetList, InstOutput, MachInst, VCodeConstant, VCodeConstantData,
31 abi::ArgPair, ty_bits,
32 },
33};
34use alloc::boxed::Box;
35use alloc::vec::Vec;
36use core::u32;
37use regalloc2::PReg;
38
39type BoxCallInfo = Box<CallInfo<ExternalName>>;
40type BoxCallIndInfo = Box<CallInfo<Reg>>;
41type BoxReturnCallInfo = Box<ReturnCallInfo<ExternalName>>;
42type BoxReturnCallIndInfo = Box<ReturnCallInfo<Reg>>;
43type VecMachLabel = Vec<MachLabel>;
44type BoxExternalName = Box<ExternalName>;
45type VecArgPair = Vec<ArgPair>;
46
47pub(crate) fn lower(
49 lower_ctx: &mut Lower<MInst>,
50 backend: &AArch64Backend,
51 inst: Inst,
52) -> Option<InstOutput> {
53 let mut isle_ctx = IsleContext { lower_ctx, backend };
56 generated_code::constructor_lower(&mut isle_ctx, inst)
57}
58
59pub(crate) fn lower_branch(
60 lower_ctx: &mut Lower<MInst>,
61 backend: &AArch64Backend,
62 branch: Inst,
63 targets: &[MachLabel],
64) -> Option<()> {
65 let mut isle_ctx = IsleContext { lower_ctx, backend };
68 generated_code::constructor_lower_branch(&mut isle_ctx, branch, targets)
69}
70
71pub struct ExtendedValue {
72 val: Value,
73 extend: ExtendOp,
74}
75
76impl Context for IsleContext<'_, '_, MInst, AArch64Backend> {
77 isle_lower_prelude_methods!();
78
79 fn gen_call_info(
80 &mut self,
81 sig: Sig,
82 dest: ExternalName,
83 uses: CallArgList,
84 defs: CallRetList,
85 try_call_info: Option<TryCallInfo>,
86 patchable: bool,
87 ) -> BoxCallInfo {
88 let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();
89 let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
90 self.lower_ctx
91 .abi_mut()
92 .accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);
93
94 Box::new(
95 self.lower_ctx
96 .gen_call_info(sig, dest, uses, defs, try_call_info, patchable),
97 )
98 }
99
100 fn gen_call_ind_info(
101 &mut self,
102 sig: Sig,
103 dest: Reg,
104 uses: CallArgList,
105 defs: CallRetList,
106 try_call_info: Option<TryCallInfo>,
107 ) -> BoxCallIndInfo {
108 let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();
109 let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
110 self.lower_ctx
111 .abi_mut()
112 .accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);
113
114 Box::new(
115 self.lower_ctx
116 .gen_call_info(sig, dest, uses, defs, try_call_info, false),
117 )
118 }
119
120 fn gen_return_call_info(
121 &mut self,
122 sig: Sig,
123 dest: ExternalName,
124 uses: CallArgList,
125 ) -> BoxReturnCallInfo {
126 let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
127 self.lower_ctx
128 .abi_mut()
129 .accumulate_tail_args_size(new_stack_arg_size);
130
131 let key =
132 AArch64MachineDeps::select_api_key(&self.backend.isa_flags, isa::CallConv::Tail, true);
133
134 Box::new(ReturnCallInfo {
135 dest,
136 uses,
137 key,
138 new_stack_arg_size,
139 })
140 }
141
142 fn gen_return_call_ind_info(
143 &mut self,
144 sig: Sig,
145 dest: Reg,
146 uses: CallArgList,
147 ) -> BoxReturnCallIndInfo {
148 let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
149 self.lower_ctx
150 .abi_mut()
151 .accumulate_tail_args_size(new_stack_arg_size);
152
153 let key =
154 AArch64MachineDeps::select_api_key(&self.backend.isa_flags, isa::CallConv::Tail, true);
155
156 Box::new(ReturnCallInfo {
157 dest,
158 uses,
159 key,
160 new_stack_arg_size,
161 })
162 }
163
164 fn sign_return_address_disabled(&mut self) -> Option<()> {
165 if self.backend.isa_flags.sign_return_address() {
166 None
167 } else {
168 Some(())
169 }
170 }
171
172 fn use_lse(&mut self, _: Inst) -> Option<()> {
173 if self.backend.isa_flags.has_lse() {
174 Some(())
175 } else {
176 None
177 }
178 }
179
180 fn use_fp16(&mut self) -> bool {
181 self.backend.isa_flags.has_fp16()
182 }
183
184 fn move_wide_const_from_u64(&mut self, ty: Type, n: u64) -> Option<MoveWideConst> {
185 let bits = ty.bits();
186 let n = if bits < 64 {
187 n & !(u64::MAX << bits)
188 } else {
189 n
190 };
191 MoveWideConst::maybe_from_u64(n)
192 }
193
194 fn move_wide_const_from_inverted_u64(&mut self, ty: Type, n: u64) -> Option<MoveWideConst> {
195 self.move_wide_const_from_u64(ty, !n)
196 }
197
198 fn imm_logic_from_u64(&mut self, ty: Type, n: u64) -> Option<ImmLogic> {
199 ImmLogic::maybe_from_u64(n, ty)
200 }
201
202 fn imm_size_from_type(&mut self, ty: Type) -> Option<u16> {
203 match ty {
204 I32 => Some(32),
205 I64 => Some(64),
206 _ => None,
207 }
208 }
209
210 fn imm_logic_from_imm64(&mut self, ty: Type, n: Imm64) -> Option<ImmLogic> {
211 let ty = if ty.bits() < 32 { I32 } else { ty };
212 self.imm_logic_from_u64(ty, n.bits() as u64)
213 }
214
215 fn imm12_from_u64(&mut self, n: u64) -> Option<Imm12> {
216 Imm12::maybe_from_u64(n)
217 }
218
219 fn imm_shift_from_u8(&mut self, n: u8) -> ImmShift {
220 ImmShift::maybe_from_u64(n.into()).unwrap()
221 }
222
223 fn lshr_from_u64(&mut self, ty: Type, n: u64) -> Option<ShiftOpAndAmt> {
224 let shiftimm = ShiftOpShiftImm::maybe_from_shift(n)?;
225 if let Ok(bits) = u8::try_from(ty_bits(ty)) {
226 let shiftimm = shiftimm.mask(bits);
227 Some(ShiftOpAndAmt::new(ShiftOp::LSR, shiftimm))
228 } else {
229 None
230 }
231 }
232
233 fn lshl_from_imm64(&mut self, ty: Type, n: Imm64) -> Option<ShiftOpAndAmt> {
234 self.lshl_from_u64(ty, n.bits() as u64)
235 }
236
237 fn lshl_from_u64(&mut self, ty: Type, n: u64) -> Option<ShiftOpAndAmt> {
238 let shiftimm = ShiftOpShiftImm::maybe_from_shift(n)?;
239 let shiftee_bits = ty_bits(ty);
240 if shiftee_bits <= core::u8::MAX as usize {
241 let shiftimm = shiftimm.mask(shiftee_bits as u8);
242 Some(ShiftOpAndAmt::new(ShiftOp::LSL, shiftimm))
243 } else {
244 None
245 }
246 }
247
248 fn ashr_from_u64(&mut self, ty: Type, n: u64) -> Option<ShiftOpAndAmt> {
249 let shiftimm = ShiftOpShiftImm::maybe_from_shift(n)?;
250 let shiftee_bits = ty_bits(ty);
251 if shiftee_bits <= core::u8::MAX as usize {
252 let shiftimm = shiftimm.mask(shiftee_bits as u8);
253 Some(ShiftOpAndAmt::new(ShiftOp::ASR, shiftimm))
254 } else {
255 None
256 }
257 }
258
259 fn integral_ty(&mut self, ty: Type) -> Option<Type> {
260 match ty {
261 I8 | I16 | I32 | I64 => Some(ty),
262 _ => None,
263 }
264 }
265
266 fn is_zero_simm9(&mut self, imm: &SImm9) -> Option<()> {
267 if imm.value() == 0 { Some(()) } else { None }
268 }
269
270 fn is_zero_uimm12(&mut self, imm: &UImm12Scaled) -> Option<()> {
271 if imm.value() == 0 { Some(()) } else { None }
272 }
273
274 fn valid_atomic_transaction(&mut self, ty: Type) -> Option<Type> {
276 match ty {
277 I8 | I16 | I32 | I64 => Some(ty),
278 _ => None,
279 }
280 }
281
282 fn load_constant_full(
288 &mut self,
289 ty: Type,
290 extend: &ImmExtend,
291 extend_to: &OperandSize,
292 value: u64,
293 ) -> Reg {
294 let bits = ty.bits();
295
296 let value = match (extend_to, *extend) {
297 (OperandSize::Size32, ImmExtend::Sign) if bits < 32 => {
298 let shift = 32 - bits;
299 let value = value as i32;
300
301 ((value << shift) >> shift) as u32 as u64
305 }
306 (OperandSize::Size32, ImmExtend::Zero) if bits < 32 => {
307 value & !((u32::MAX as u64) << bits)
308 }
309 (OperandSize::Size64, ImmExtend::Sign) if bits < 64 => {
310 let shift = 64 - bits;
311 let value = value as i64;
312
313 ((value << shift) >> shift) as u64
314 }
315 (OperandSize::Size64, ImmExtend::Zero) if bits < 64 => value & !(u64::MAX << bits),
316 _ => value,
317 };
318
319 fn get(value: u64, shift: u8) -> u16 {
322 (value >> (shift * 16)) as u16
323 }
324 fn replace(mut old: u64, new: u16, shift: u8) -> u64 {
325 let offset = shift * 16;
326 old &= !(0xffff << offset);
327 old |= u64::from(new) << offset;
328 old
329 }
330
331 let size = if value >> 32 == 0 {
336 OperandSize::Size32
337 } else {
338 OperandSize::Size64
339 };
340
341 let (mut running_value, op, first) =
349 [(MoveWideOp::MovZ, 0), (MoveWideOp::MovN, size.max_value())]
350 .into_iter()
351 .map(|(op, base)| {
352 let first = (0..(size.bits() / 16))
356 .find(|&i| get(base ^ value, i) != 0)
358 .unwrap_or(0);
360 (replace(base, get(value, first), first), op, first)
362 })
363 .min_by_key(|(base, ..)| (0..4).filter(|&i| get(base ^ value, i) != 0).count())
365 .unwrap();
367
368 let mut rd = self.temp_writable_reg(I64);
372 self.lower_ctx.emit(MInst::MovWide {
373 op,
374 rd,
375 imm: MoveWideConst {
376 bits: match op {
377 MoveWideOp::MovZ => get(value, first),
378 MoveWideOp::MovN => !get(value, first),
379 },
380 shift: first,
381 },
382 size,
383 });
384 if self.backend.flags.enable_pcc() {
385 self.lower_ctx
386 .add_range_fact(rd.to_reg(), 64, running_value, running_value);
387 }
388
389 for shift in (first + 1)..(size.bits() / 16) {
392 let bits = get(value, shift);
393 if bits != get(running_value, shift) {
394 let rn = rd.to_reg();
395 rd = self.temp_writable_reg(I64);
396 self.lower_ctx.emit(MInst::MovK {
397 rd,
398 rn,
399 imm: MoveWideConst { bits, shift },
400 size,
401 });
402 running_value = replace(running_value, bits, shift);
403 if self.backend.flags.enable_pcc() {
404 self.lower_ctx
405 .add_range_fact(rd.to_reg(), 64, running_value, running_value);
406 }
407 }
408 }
409
410 debug_assert_eq!(value, running_value);
411 return rd.to_reg();
412 }
413
414 fn zero_reg(&mut self) -> Reg {
415 zero_reg()
416 }
417
418 fn stack_reg(&mut self) -> Reg {
419 stack_reg()
420 }
421
422 fn fp_reg(&mut self) -> Reg {
423 fp_reg()
424 }
425
426 fn writable_link_reg(&mut self) -> WritableReg {
427 writable_link_reg()
428 }
429
430 fn extended_value_from_value(&mut self, val: Value) -> Option<ExtendedValue> {
431 let (val, extend) = super::get_as_extended_value(self.lower_ctx, val)?;
432 Some(ExtendedValue { val, extend })
433 }
434
435 fn put_extended_in_reg(&mut self, reg: &ExtendedValue) -> Reg {
436 self.put_in_reg(reg.val)
437 }
438
439 fn get_extended_op(&mut self, reg: &ExtendedValue) -> ExtendOp {
440 reg.extend
441 }
442
443 fn emit(&mut self, inst: &MInst) -> Unit {
444 self.lower_ctx.emit(inst.clone());
445 }
446
447 fn cond_br_zero(&mut self, reg: Reg, size: &OperandSize) -> CondBrKind {
448 CondBrKind::Zero(reg, *size)
449 }
450
451 fn cond_br_not_zero(&mut self, reg: Reg, size: &OperandSize) -> CondBrKind {
452 CondBrKind::NotZero(reg, *size)
453 }
454
455 fn cond_br_cond(&mut self, cond: &Cond) -> CondBrKind {
456 CondBrKind::Cond(*cond)
457 }
458
459 fn nzcv(&mut self, n: bool, z: bool, c: bool, v: bool) -> NZCV {
460 NZCV::new(n, z, c, v)
461 }
462
463 fn u8_into_uimm5(&mut self, x: u8) -> UImm5 {
464 UImm5::maybe_from_u8(x).unwrap()
465 }
466
467 fn u8_into_imm12(&mut self, x: u8) -> Imm12 {
468 Imm12::maybe_from_u64(x.into()).unwrap()
469 }
470
471 fn writable_zero_reg(&mut self) -> WritableReg {
472 writable_zero_reg()
473 }
474
475 fn shift_mask(&mut self, ty: Type) -> ImmLogic {
476 debug_assert!(ty.lane_bits().is_power_of_two());
477
478 let mask = (ty.lane_bits() - 1) as u64;
479 ImmLogic::maybe_from_u64(mask, I32).unwrap()
480 }
481
482 fn imm_shift_from_imm64(&mut self, ty: Type, val: Imm64) -> Option<ImmShift> {
483 let imm_value = (val.bits() as u64) & ((ty.bits() - 1) as u64);
484 ImmShift::maybe_from_u64(imm_value)
485 }
486
487 fn u64_into_imm_logic(&mut self, ty: Type, val: u64) -> ImmLogic {
488 ImmLogic::maybe_from_u64(val, ty).unwrap()
489 }
490
491 fn negate_imm_shift(&mut self, ty: Type, mut imm: ImmShift) -> ImmShift {
492 let size = u8::try_from(ty.bits()).unwrap();
493 imm.imm = size.wrapping_sub(imm.value());
494 imm.imm &= size - 1;
495 imm
496 }
497
498 fn rotr_mask(&mut self, ty: Type) -> ImmLogic {
499 ImmLogic::maybe_from_u64((ty.bits() - 1) as u64, I32).unwrap()
500 }
501
502 fn rotr_opposite_amount(&mut self, ty: Type, val: ImmShift) -> ImmShift {
503 let amount = val.value() & u8::try_from(ty.bits() - 1).unwrap();
504 ImmShift::maybe_from_u64(u64::from(ty.bits()) - u64::from(amount)).unwrap()
505 }
506
507 fn icmp_zero_cond(&mut self, cond: &IntCC) -> Option<IntCC> {
508 match cond {
509 &IntCC::Equal
510 | &IntCC::SignedGreaterThanOrEqual
511 | &IntCC::SignedGreaterThan
512 | &IntCC::SignedLessThanOrEqual
513 | &IntCC::SignedLessThan => Some(*cond),
514 _ => None,
515 }
516 }
517
518 fn fcmp_zero_cond(&mut self, cond: &FloatCC) -> Option<FloatCC> {
519 match cond {
520 &FloatCC::Equal
521 | &FloatCC::GreaterThanOrEqual
522 | &FloatCC::GreaterThan
523 | &FloatCC::LessThanOrEqual
524 | &FloatCC::LessThan => Some(*cond),
525 _ => None,
526 }
527 }
528
529 fn fcmp_zero_cond_not_eq(&mut self, cond: &FloatCC) -> Option<FloatCC> {
530 match cond {
531 &FloatCC::NotEqual => Some(FloatCC::NotEqual),
532 _ => None,
533 }
534 }
535
536 fn icmp_zero_cond_not_eq(&mut self, cond: &IntCC) -> Option<IntCC> {
537 match cond {
538 &IntCC::NotEqual => Some(IntCC::NotEqual),
539 _ => None,
540 }
541 }
542
543 fn float_cc_cmp_zero_to_vec_misc_op(&mut self, cond: &FloatCC) -> VecMisc2 {
544 match cond {
545 &FloatCC::Equal => VecMisc2::Fcmeq0,
546 &FloatCC::GreaterThanOrEqual => VecMisc2::Fcmge0,
547 &FloatCC::LessThanOrEqual => VecMisc2::Fcmle0,
548 &FloatCC::GreaterThan => VecMisc2::Fcmgt0,
549 &FloatCC::LessThan => VecMisc2::Fcmlt0,
550 _ => panic!(),
551 }
552 }
553
554 fn int_cc_cmp_zero_to_vec_misc_op(&mut self, cond: &IntCC) -> VecMisc2 {
555 match cond {
556 &IntCC::Equal => VecMisc2::Cmeq0,
557 &IntCC::SignedGreaterThanOrEqual => VecMisc2::Cmge0,
558 &IntCC::SignedLessThanOrEqual => VecMisc2::Cmle0,
559 &IntCC::SignedGreaterThan => VecMisc2::Cmgt0,
560 &IntCC::SignedLessThan => VecMisc2::Cmlt0,
561 _ => panic!(),
562 }
563 }
564
565 fn float_cc_cmp_zero_to_vec_misc_op_swap(&mut self, cond: &FloatCC) -> VecMisc2 {
566 match cond {
567 &FloatCC::Equal => VecMisc2::Fcmeq0,
568 &FloatCC::GreaterThanOrEqual => VecMisc2::Fcmle0,
569 &FloatCC::LessThanOrEqual => VecMisc2::Fcmge0,
570 &FloatCC::GreaterThan => VecMisc2::Fcmlt0,
571 &FloatCC::LessThan => VecMisc2::Fcmgt0,
572 _ => panic!(),
573 }
574 }
575
576 fn int_cc_cmp_zero_to_vec_misc_op_swap(&mut self, cond: &IntCC) -> VecMisc2 {
577 match cond {
578 &IntCC::Equal => VecMisc2::Cmeq0,
579 &IntCC::SignedGreaterThanOrEqual => VecMisc2::Cmle0,
580 &IntCC::SignedLessThanOrEqual => VecMisc2::Cmge0,
581 &IntCC::SignedGreaterThan => VecMisc2::Cmlt0,
582 &IntCC::SignedLessThan => VecMisc2::Cmgt0,
583 _ => panic!(),
584 }
585 }
586
587 fn fp_cond_code(&mut self, cc: &condcodes::FloatCC) -> Cond {
588 lower_fp_condcode(*cc)
589 }
590
591 fn cond_code(&mut self, cc: &condcodes::IntCC) -> Cond {
592 lower_condcode(*cc)
593 }
594
595 fn invert_cond(&mut self, cond: &Cond) -> Cond {
596 (*cond).invert()
597 }
598 fn preg_sp(&mut self) -> PReg {
599 super::regs::stack_reg().to_real_reg().unwrap().into()
600 }
601
602 fn preg_fp(&mut self) -> PReg {
603 super::regs::fp_reg().to_real_reg().unwrap().into()
604 }
605
606 fn preg_link(&mut self) -> PReg {
607 super::regs::link_reg().to_real_reg().unwrap().into()
608 }
609
610 fn preg_pinned(&mut self) -> PReg {
611 super::regs::pinned_reg().to_real_reg().unwrap().into()
612 }
613
614 fn branch_target(&mut self, label: MachLabel) -> BranchTarget {
615 BranchTarget::Label(label)
616 }
617
618 fn targets_jt_space(&mut self, elements: &BoxVecMachLabel) -> CodeOffset {
619 (4 * (8 + elements.len())).try_into().unwrap()
623 }
624
625 fn min_fp_value(&mut self, signed: bool, in_bits: u8, out_bits: u8) -> Reg {
626 if in_bits == 32 {
627 let min = match (signed, out_bits) {
629 (true, 8) => i8::MIN as f32 - 1.,
630 (true, 16) => i16::MIN as f32 - 1.,
631 (true, 32) => i32::MIN as f32, (true, 64) => i64::MIN as f32, (false, _) => -1.,
635 _ => unimplemented!(
636 "unexpected {} output size of {} bits for 32-bit input",
637 if signed { "signed" } else { "unsigned" },
638 out_bits
639 ),
640 };
641
642 generated_code::constructor_constant_f32(self, min.to_bits())
643 } else if in_bits == 64 {
644 let min = match (signed, out_bits) {
646 (true, 8) => i8::MIN as f64 - 1.,
647 (true, 16) => i16::MIN as f64 - 1.,
648 (true, 32) => i32::MIN as f64 - 1.,
649 (true, 64) => i64::MIN as f64,
650
651 (false, _) => -1.,
652 _ => unimplemented!(
653 "unexpected {} output size of {} bits for 64-bit input",
654 if signed { "signed" } else { "unsigned" },
655 out_bits
656 ),
657 };
658
659 generated_code::constructor_constant_f64(self, min.to_bits())
660 } else {
661 unimplemented!(
662 "unexpected input size for min_fp_value: {} (signed: {}, output size: {})",
663 in_bits,
664 signed,
665 out_bits
666 );
667 }
668 }
669
670 fn max_fp_value(&mut self, signed: bool, in_bits: u8, out_bits: u8) -> Reg {
671 if in_bits == 32 {
672 let max = match (signed, out_bits) {
674 (true, 8) => i8::MAX as f32 + 1.,
675 (true, 16) => i16::MAX as f32 + 1.,
676 (true, 32) => (i32::MAX as u64 + 1) as f32,
677 (true, 64) => (i64::MAX as u64 + 1) as f32,
678
679 (false, 8) => u8::MAX as f32 + 1.,
680 (false, 16) => u16::MAX as f32 + 1.,
681 (false, 32) => (u32::MAX as u64 + 1) as f32,
682 (false, 64) => (u64::MAX as u128 + 1) as f32,
683 _ => unimplemented!(
684 "unexpected {} output size of {} bits for 32-bit input",
685 if signed { "signed" } else { "unsigned" },
686 out_bits
687 ),
688 };
689
690 generated_code::constructor_constant_f32(self, max.to_bits())
691 } else if in_bits == 64 {
692 let max = match (signed, out_bits) {
694 (true, 8) => i8::MAX as f64 + 1.,
695 (true, 16) => i16::MAX as f64 + 1.,
696 (true, 32) => i32::MAX as f64 + 1.,
697 (true, 64) => (i64::MAX as u64 + 1) as f64,
698
699 (false, 8) => u8::MAX as f64 + 1.,
700 (false, 16) => u16::MAX as f64 + 1.,
701 (false, 32) => u32::MAX as f64 + 1.,
702 (false, 64) => (u64::MAX as u128 + 1) as f64,
703 _ => unimplemented!(
704 "unexpected {} output size of {} bits for 64-bit input",
705 if signed { "signed" } else { "unsigned" },
706 out_bits
707 ),
708 };
709
710 generated_code::constructor_constant_f64(self, max.to_bits())
711 } else {
712 unimplemented!(
713 "unexpected input size for max_fp_value: {} (signed: {}, output size: {})",
714 in_bits,
715 signed,
716 out_bits
717 );
718 }
719 }
720
721 fn fpu_op_ri_ushr(&mut self, ty_bits: u8, shift: u8) -> FPUOpRI {
722 if ty_bits == 32 {
723 FPUOpRI::UShr32(FPURightShiftImm::maybe_from_u8(shift, ty_bits).unwrap())
724 } else if ty_bits == 64 {
725 FPUOpRI::UShr64(FPURightShiftImm::maybe_from_u8(shift, ty_bits).unwrap())
726 } else {
727 unimplemented!(
728 "unexpected input size for fpu_op_ri_ushr: {} (shift: {})",
729 ty_bits,
730 shift
731 );
732 }
733 }
734
735 fn fpu_op_ri_sli(&mut self, ty_bits: u8, shift: u8) -> FPUOpRIMod {
736 if ty_bits == 32 {
737 FPUOpRIMod::Sli32(FPULeftShiftImm::maybe_from_u8(shift, ty_bits).unwrap())
738 } else if ty_bits == 64 {
739 FPUOpRIMod::Sli64(FPULeftShiftImm::maybe_from_u8(shift, ty_bits).unwrap())
740 } else {
741 unimplemented!(
742 "unexpected input size for fpu_op_ri_sli: {} (shift: {})",
743 ty_bits,
744 shift
745 );
746 }
747 }
748
749 fn vec_extract_imm4_from_immediate(&mut self, imm: Immediate) -> Option<u8> {
750 let bytes = self.lower_ctx.get_immediate_data(imm).as_slice();
751
752 if bytes.windows(2).all(|a| a[0] + 1 == a[1]) && bytes[0] < 16 {
753 Some(bytes[0])
754 } else {
755 None
756 }
757 }
758
759 fn shuffle_dup8_from_imm(&mut self, imm: Immediate) -> Option<u8> {
760 let bytes = self.lower_ctx.get_immediate_data(imm).as_slice();
761 if bytes.iter().all(|b| *b == bytes[0]) && bytes[0] < 16 {
762 Some(bytes[0])
763 } else {
764 None
765 }
766 }
767 fn shuffle_dup16_from_imm(&mut self, imm: Immediate) -> Option<u8> {
768 let (a, b, c, d, e, f, g, h) = self.shuffle16_from_imm(imm)?;
769 if a == b && b == c && c == d && d == e && e == f && f == g && g == h && a < 8 {
770 Some(a)
771 } else {
772 None
773 }
774 }
775 fn shuffle_dup32_from_imm(&mut self, imm: Immediate) -> Option<u8> {
776 let (a, b, c, d) = self.shuffle32_from_imm(imm)?;
777 if a == b && b == c && c == d && a < 4 {
778 Some(a)
779 } else {
780 None
781 }
782 }
783 fn shuffle_dup64_from_imm(&mut self, imm: Immediate) -> Option<u8> {
784 let (a, b) = self.shuffle64_from_imm(imm)?;
785 if a == b && a < 2 { Some(a) } else { None }
786 }
787
788 fn asimd_mov_mod_imm_zero(&mut self, size: &ScalarSize) -> ASIMDMovModImm {
789 ASIMDMovModImm::zero(*size)
790 }
791
792 fn asimd_mov_mod_imm_from_u64(
793 &mut self,
794 val: u64,
795 size: &ScalarSize,
796 ) -> Option<ASIMDMovModImm> {
797 ASIMDMovModImm::maybe_from_u64(val, *size)
798 }
799
800 fn asimd_fp_mod_imm_from_u64(&mut self, val: u64, size: &ScalarSize) -> Option<ASIMDFPModImm> {
801 ASIMDFPModImm::maybe_from_u64(val, *size)
802 }
803
804 fn u64_low32_bits_unset(&mut self, val: u64) -> Option<u64> {
805 if val & 0xffffffff == 0 {
806 Some(val)
807 } else {
808 None
809 }
810 }
811
812 fn shift_masked_imm(&mut self, ty: Type, imm: u64) -> u8 {
813 (imm as u8) & ((ty.lane_bits() - 1) as u8)
814 }
815
816 fn simm7_scaled_from_i64(&mut self, val: i64, ty: Type) -> Option<SImm7Scaled> {
817 SImm7Scaled::maybe_from_i64(val, ty)
818 }
819
820 fn simm9_from_i64(&mut self, val: i64) -> Option<SImm9> {
821 SImm9::maybe_from_i64(val)
822 }
823
824 fn uimm12_scaled_from_i64(&mut self, val: i64, ty: Type) -> Option<UImm12Scaled> {
825 UImm12Scaled::maybe_from_i64(val, ty)
826 }
827
828 fn test_and_compare_bit_const(&mut self, ty: Type, n: u64) -> Option<u8> {
829 if n.count_ones() != 1 {
830 return None;
831 }
832 let bit = n.trailing_zeros();
833 if bit >= ty.bits() {
834 return None;
835 }
836 Some(bit as u8)
837 }
838
839 fn a64_extr_imm(&mut self, ty: Type, shift: ImmShift) -> ShiftOpAndAmt {
841 let (op, expected) = match ty {
846 types::I32 => (ShiftOp::LSL, 0b00),
847 types::I64 => (ShiftOp::LSR, 0b01),
848 _ => unreachable!(),
849 };
850 assert_eq!(op.bits(), expected);
851 ShiftOpAndAmt::new(
852 op,
853 ShiftOpShiftImm::maybe_from_shift(shift.value().into()).unwrap(),
854 )
855 }
856
857 fn is_pic(&mut self) -> bool {
858 self.backend.flags.is_pic()
859 }
860}