1pub mod generated_code;
5use generated_code::{Context, ImmExtend};
6
7use super::{
9 ASIMDFPModImm, ASIMDMovModImm, BranchTarget, CallInfo, Cond, CondBrKind, ExtendOp, FPUOpRI,
10 FPUOpRIMod, FloatCC, Imm12, ImmLogic, ImmShift, Inst as MInst, IntCC, MachLabel, MemLabel,
11 MoveWideConst, MoveWideOp, NZCV, Opcode, OperandSize, Reg, SImm9, ScalarSize, ShiftOpAndAmt,
12 UImm5, UImm12Scaled, VecMisc2, VectorSize, fp_reg, lower_condcode, lower_fp_condcode,
13 stack_reg, writable_link_reg, writable_zero_reg, zero_reg,
14};
15use crate::ir::{ArgumentExtension, condcodes};
16use crate::isa;
17use crate::isa::aarch64::AArch64Backend;
18use crate::isa::aarch64::inst::{FPULeftShiftImm, FPURightShiftImm, ReturnCallInfo};
19use crate::machinst::isle::*;
20use crate::{
21 binemit::CodeOffset,
22 ir::{
23 AtomicRmwOp, BlockCall, ExternalName, Inst, InstructionData, MemFlags, TrapCode, Value,
24 ValueList, immediates::*, types::*,
25 },
26 isa::aarch64::abi::AArch64MachineDeps,
27 isa::aarch64::inst::SImm7Scaled,
28 isa::aarch64::inst::args::{ShiftOp, ShiftOpShiftImm},
29 machinst::{
30 CallArgList, CallRetList, InstOutput, MachInst, VCodeConstant, VCodeConstantData,
31 abi::ArgPair, ty_bits,
32 },
33};
34use core::u32;
35use regalloc2::PReg;
36use std::boxed::Box;
37use std::vec::Vec;
38
39type BoxCallInfo = Box<CallInfo<ExternalName>>;
40type BoxCallIndInfo = Box<CallInfo<Reg>>;
41type BoxReturnCallInfo = Box<ReturnCallInfo<ExternalName>>;
42type BoxReturnCallIndInfo = Box<ReturnCallInfo<Reg>>;
43type VecMachLabel = Vec<MachLabel>;
44type BoxExternalName = Box<ExternalName>;
45type VecArgPair = Vec<ArgPair>;
46
47pub(crate) fn lower(
49 lower_ctx: &mut Lower<MInst>,
50 backend: &AArch64Backend,
51 inst: Inst,
52) -> Option<InstOutput> {
53 let mut isle_ctx = IsleContext { lower_ctx, backend };
56 generated_code::constructor_lower(&mut isle_ctx, inst)
57}
58
59pub(crate) fn lower_branch(
60 lower_ctx: &mut Lower<MInst>,
61 backend: &AArch64Backend,
62 branch: Inst,
63 targets: &[MachLabel],
64) -> Option<()> {
65 let mut isle_ctx = IsleContext { lower_ctx, backend };
68 generated_code::constructor_lower_branch(&mut isle_ctx, branch, targets)
69}
70
71pub struct ExtendedValue {
72 val: Value,
73 extend: ExtendOp,
74}
75
76impl Context for IsleContext<'_, '_, MInst, AArch64Backend> {
77 isle_lower_prelude_methods!();
78
79 fn gen_call_info(
80 &mut self,
81 sig: Sig,
82 dest: ExternalName,
83 uses: CallArgList,
84 defs: CallRetList,
85 try_call_info: Option<TryCallInfo>,
86 ) -> BoxCallInfo {
87 let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();
88 let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
89 self.lower_ctx
90 .abi_mut()
91 .accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);
92
93 Box::new(
94 self.lower_ctx
95 .gen_call_info(sig, dest, uses, defs, try_call_info),
96 )
97 }
98
99 fn gen_call_ind_info(
100 &mut self,
101 sig: Sig,
102 dest: Reg,
103 uses: CallArgList,
104 defs: CallRetList,
105 try_call_info: Option<TryCallInfo>,
106 ) -> BoxCallIndInfo {
107 let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();
108 let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
109 self.lower_ctx
110 .abi_mut()
111 .accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);
112
113 Box::new(
114 self.lower_ctx
115 .gen_call_info(sig, dest, uses, defs, try_call_info),
116 )
117 }
118
119 fn gen_return_call_info(
120 &mut self,
121 sig: Sig,
122 dest: ExternalName,
123 uses: CallArgList,
124 ) -> BoxReturnCallInfo {
125 let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
126 self.lower_ctx
127 .abi_mut()
128 .accumulate_tail_args_size(new_stack_arg_size);
129
130 let key =
131 AArch64MachineDeps::select_api_key(&self.backend.isa_flags, isa::CallConv::Tail, true);
132
133 Box::new(ReturnCallInfo {
134 dest,
135 uses,
136 key,
137 new_stack_arg_size,
138 })
139 }
140
141 fn gen_return_call_ind_info(
142 &mut self,
143 sig: Sig,
144 dest: Reg,
145 uses: CallArgList,
146 ) -> BoxReturnCallIndInfo {
147 let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
148 self.lower_ctx
149 .abi_mut()
150 .accumulate_tail_args_size(new_stack_arg_size);
151
152 let key =
153 AArch64MachineDeps::select_api_key(&self.backend.isa_flags, isa::CallConv::Tail, true);
154
155 Box::new(ReturnCallInfo {
156 dest,
157 uses,
158 key,
159 new_stack_arg_size,
160 })
161 }
162
163 fn sign_return_address_disabled(&mut self) -> Option<()> {
164 if self.backend.isa_flags.sign_return_address() {
165 None
166 } else {
167 Some(())
168 }
169 }
170
171 fn use_lse(&mut self, _: Inst) -> Option<()> {
172 if self.backend.isa_flags.has_lse() {
173 Some(())
174 } else {
175 None
176 }
177 }
178
179 fn use_fp16(&mut self) -> bool {
180 self.backend.isa_flags.has_fp16()
181 }
182
183 fn move_wide_const_from_u64(&mut self, ty: Type, n: u64) -> Option<MoveWideConst> {
184 let bits = ty.bits();
185 let n = if bits < 64 {
186 n & !(u64::MAX << bits)
187 } else {
188 n
189 };
190 MoveWideConst::maybe_from_u64(n)
191 }
192
193 fn move_wide_const_from_inverted_u64(&mut self, ty: Type, n: u64) -> Option<MoveWideConst> {
194 self.move_wide_const_from_u64(ty, !n)
195 }
196
197 fn imm_logic_from_u64(&mut self, ty: Type, n: u64) -> Option<ImmLogic> {
198 ImmLogic::maybe_from_u64(n, ty)
199 }
200
201 fn imm_size_from_type(&mut self, ty: Type) -> Option<u16> {
202 match ty {
203 I32 => Some(32),
204 I64 => Some(64),
205 _ => None,
206 }
207 }
208
209 fn imm_logic_from_imm64(&mut self, ty: Type, n: Imm64) -> Option<ImmLogic> {
210 let ty = if ty.bits() < 32 { I32 } else { ty };
211 self.imm_logic_from_u64(ty, n.bits() as u64)
212 }
213
214 fn imm12_from_u64(&mut self, n: u64) -> Option<Imm12> {
215 Imm12::maybe_from_u64(n)
216 }
217
218 fn imm_shift_from_u8(&mut self, n: u8) -> ImmShift {
219 ImmShift::maybe_from_u64(n.into()).unwrap()
220 }
221
222 fn lshr_from_u64(&mut self, ty: Type, n: u64) -> Option<ShiftOpAndAmt> {
223 let shiftimm = ShiftOpShiftImm::maybe_from_shift(n)?;
224 if let Ok(bits) = u8::try_from(ty_bits(ty)) {
225 let shiftimm = shiftimm.mask(bits);
226 Some(ShiftOpAndAmt::new(ShiftOp::LSR, shiftimm))
227 } else {
228 None
229 }
230 }
231
232 fn lshl_from_imm64(&mut self, ty: Type, n: Imm64) -> Option<ShiftOpAndAmt> {
233 self.lshl_from_u64(ty, n.bits() as u64)
234 }
235
236 fn lshl_from_u64(&mut self, ty: Type, n: u64) -> Option<ShiftOpAndAmt> {
237 let shiftimm = ShiftOpShiftImm::maybe_from_shift(n)?;
238 let shiftee_bits = ty_bits(ty);
239 if shiftee_bits <= std::u8::MAX as usize {
240 let shiftimm = shiftimm.mask(shiftee_bits as u8);
241 Some(ShiftOpAndAmt::new(ShiftOp::LSL, shiftimm))
242 } else {
243 None
244 }
245 }
246
247 fn ashr_from_u64(&mut self, ty: Type, n: u64) -> Option<ShiftOpAndAmt> {
248 let shiftimm = ShiftOpShiftImm::maybe_from_shift(n)?;
249 let shiftee_bits = ty_bits(ty);
250 if shiftee_bits <= std::u8::MAX as usize {
251 let shiftimm = shiftimm.mask(shiftee_bits as u8);
252 Some(ShiftOpAndAmt::new(ShiftOp::ASR, shiftimm))
253 } else {
254 None
255 }
256 }
257
258 fn integral_ty(&mut self, ty: Type) -> Option<Type> {
259 match ty {
260 I8 | I16 | I32 | I64 => Some(ty),
261 _ => None,
262 }
263 }
264
265 fn is_zero_simm9(&mut self, imm: &SImm9) -> Option<()> {
266 if imm.value() == 0 { Some(()) } else { None }
267 }
268
269 fn is_zero_uimm12(&mut self, imm: &UImm12Scaled) -> Option<()> {
270 if imm.value() == 0 { Some(()) } else { None }
271 }
272
273 fn valid_atomic_transaction(&mut self, ty: Type) -> Option<Type> {
275 match ty {
276 I8 | I16 | I32 | I64 => Some(ty),
277 _ => None,
278 }
279 }
280
281 fn load_constant_full(
287 &mut self,
288 ty: Type,
289 extend: &ImmExtend,
290 extend_to: &OperandSize,
291 value: u64,
292 ) -> Reg {
293 let bits = ty.bits();
294
295 let value = match (extend_to, *extend) {
296 (OperandSize::Size32, ImmExtend::Sign) if bits < 32 => {
297 let shift = 32 - bits;
298 let value = value as i32;
299
300 ((value << shift) >> shift) as u32 as u64
304 }
305 (OperandSize::Size32, ImmExtend::Zero) if bits < 32 => {
306 value & !((u32::MAX as u64) << bits)
307 }
308 (OperandSize::Size64, ImmExtend::Sign) if bits < 64 => {
309 let shift = 64 - bits;
310 let value = value as i64;
311
312 ((value << shift) >> shift) as u64
313 }
314 (OperandSize::Size64, ImmExtend::Zero) if bits < 64 => value & !(u64::MAX << bits),
315 _ => value,
316 };
317
318 fn get(value: u64, shift: u8) -> u16 {
321 (value >> (shift * 16)) as u16
322 }
323 fn replace(mut old: u64, new: u16, shift: u8) -> u64 {
324 let offset = shift * 16;
325 old &= !(0xffff << offset);
326 old |= u64::from(new) << offset;
327 old
328 }
329
330 let size = if value >> 32 == 0 {
335 OperandSize::Size32
336 } else {
337 OperandSize::Size64
338 };
339
340 let (mut running_value, op, first) =
348 [(MoveWideOp::MovZ, 0), (MoveWideOp::MovN, size.max_value())]
349 .into_iter()
350 .map(|(op, base)| {
351 let first = (0..(size.bits() / 16))
355 .find(|&i| get(base ^ value, i) != 0)
357 .unwrap_or(0);
359 (replace(base, get(value, first), first), op, first)
361 })
362 .min_by_key(|(base, ..)| (0..4).filter(|&i| get(base ^ value, i) != 0).count())
364 .unwrap();
366
367 let mut rd = self.temp_writable_reg(I64);
371 self.lower_ctx.emit(MInst::MovWide {
372 op,
373 rd,
374 imm: MoveWideConst {
375 bits: match op {
376 MoveWideOp::MovZ => get(value, first),
377 MoveWideOp::MovN => !get(value, first),
378 },
379 shift: first,
380 },
381 size,
382 });
383 if self.backend.flags.enable_pcc() {
384 self.lower_ctx
385 .add_range_fact(rd.to_reg(), 64, running_value, running_value);
386 }
387
388 for shift in (first + 1)..(size.bits() / 16) {
391 let bits = get(value, shift);
392 if bits != get(running_value, shift) {
393 let rn = rd.to_reg();
394 rd = self.temp_writable_reg(I64);
395 self.lower_ctx.emit(MInst::MovK {
396 rd,
397 rn,
398 imm: MoveWideConst { bits, shift },
399 size,
400 });
401 running_value = replace(running_value, bits, shift);
402 if self.backend.flags.enable_pcc() {
403 self.lower_ctx
404 .add_range_fact(rd.to_reg(), 64, running_value, running_value);
405 }
406 }
407 }
408
409 debug_assert_eq!(value, running_value);
410 return rd.to_reg();
411 }
412
413 fn zero_reg(&mut self) -> Reg {
414 zero_reg()
415 }
416
417 fn stack_reg(&mut self) -> Reg {
418 stack_reg()
419 }
420
421 fn fp_reg(&mut self) -> Reg {
422 fp_reg()
423 }
424
425 fn writable_link_reg(&mut self) -> WritableReg {
426 writable_link_reg()
427 }
428
429 fn extended_value_from_value(&mut self, val: Value) -> Option<ExtendedValue> {
430 let (val, extend) = super::get_as_extended_value(self.lower_ctx, val)?;
431 Some(ExtendedValue { val, extend })
432 }
433
434 fn put_extended_in_reg(&mut self, reg: &ExtendedValue) -> Reg {
435 self.put_in_reg(reg.val)
436 }
437
438 fn get_extended_op(&mut self, reg: &ExtendedValue) -> ExtendOp {
439 reg.extend
440 }
441
442 fn emit(&mut self, inst: &MInst) -> Unit {
443 self.lower_ctx.emit(inst.clone());
444 }
445
446 fn cond_br_zero(&mut self, reg: Reg, size: &OperandSize) -> CondBrKind {
447 CondBrKind::Zero(reg, *size)
448 }
449
450 fn cond_br_not_zero(&mut self, reg: Reg, size: &OperandSize) -> CondBrKind {
451 CondBrKind::NotZero(reg, *size)
452 }
453
454 fn cond_br_cond(&mut self, cond: &Cond) -> CondBrKind {
455 CondBrKind::Cond(*cond)
456 }
457
458 fn nzcv(&mut self, n: bool, z: bool, c: bool, v: bool) -> NZCV {
459 NZCV::new(n, z, c, v)
460 }
461
462 fn u8_into_uimm5(&mut self, x: u8) -> UImm5 {
463 UImm5::maybe_from_u8(x).unwrap()
464 }
465
466 fn u8_into_imm12(&mut self, x: u8) -> Imm12 {
467 Imm12::maybe_from_u64(x.into()).unwrap()
468 }
469
470 fn writable_zero_reg(&mut self) -> WritableReg {
471 writable_zero_reg()
472 }
473
474 fn shift_mask(&mut self, ty: Type) -> ImmLogic {
475 debug_assert!(ty.lane_bits().is_power_of_two());
476
477 let mask = (ty.lane_bits() - 1) as u64;
478 ImmLogic::maybe_from_u64(mask, I32).unwrap()
479 }
480
481 fn imm_shift_from_imm64(&mut self, ty: Type, val: Imm64) -> Option<ImmShift> {
482 let imm_value = (val.bits() as u64) & ((ty.bits() - 1) as u64);
483 ImmShift::maybe_from_u64(imm_value)
484 }
485
486 fn u64_into_imm_logic(&mut self, ty: Type, val: u64) -> ImmLogic {
487 ImmLogic::maybe_from_u64(val, ty).unwrap()
488 }
489
490 fn negate_imm_shift(&mut self, ty: Type, mut imm: ImmShift) -> ImmShift {
491 let size = u8::try_from(ty.bits()).unwrap();
492 imm.imm = size.wrapping_sub(imm.value());
493 imm.imm &= size - 1;
494 imm
495 }
496
497 fn rotr_mask(&mut self, ty: Type) -> ImmLogic {
498 ImmLogic::maybe_from_u64((ty.bits() - 1) as u64, I32).unwrap()
499 }
500
501 fn rotr_opposite_amount(&mut self, ty: Type, val: ImmShift) -> ImmShift {
502 let amount = val.value() & u8::try_from(ty.bits() - 1).unwrap();
503 ImmShift::maybe_from_u64(u64::from(ty.bits()) - u64::from(amount)).unwrap()
504 }
505
506 fn icmp_zero_cond(&mut self, cond: &IntCC) -> Option<IntCC> {
507 match cond {
508 &IntCC::Equal
509 | &IntCC::SignedGreaterThanOrEqual
510 | &IntCC::SignedGreaterThan
511 | &IntCC::SignedLessThanOrEqual
512 | &IntCC::SignedLessThan => Some(*cond),
513 _ => None,
514 }
515 }
516
517 fn fcmp_zero_cond(&mut self, cond: &FloatCC) -> Option<FloatCC> {
518 match cond {
519 &FloatCC::Equal
520 | &FloatCC::GreaterThanOrEqual
521 | &FloatCC::GreaterThan
522 | &FloatCC::LessThanOrEqual
523 | &FloatCC::LessThan => Some(*cond),
524 _ => None,
525 }
526 }
527
528 fn fcmp_zero_cond_not_eq(&mut self, cond: &FloatCC) -> Option<FloatCC> {
529 match cond {
530 &FloatCC::NotEqual => Some(FloatCC::NotEqual),
531 _ => None,
532 }
533 }
534
535 fn icmp_zero_cond_not_eq(&mut self, cond: &IntCC) -> Option<IntCC> {
536 match cond {
537 &IntCC::NotEqual => Some(IntCC::NotEqual),
538 _ => None,
539 }
540 }
541
542 fn float_cc_cmp_zero_to_vec_misc_op(&mut self, cond: &FloatCC) -> VecMisc2 {
543 match cond {
544 &FloatCC::Equal => VecMisc2::Fcmeq0,
545 &FloatCC::GreaterThanOrEqual => VecMisc2::Fcmge0,
546 &FloatCC::LessThanOrEqual => VecMisc2::Fcmle0,
547 &FloatCC::GreaterThan => VecMisc2::Fcmgt0,
548 &FloatCC::LessThan => VecMisc2::Fcmlt0,
549 _ => panic!(),
550 }
551 }
552
553 fn int_cc_cmp_zero_to_vec_misc_op(&mut self, cond: &IntCC) -> VecMisc2 {
554 match cond {
555 &IntCC::Equal => VecMisc2::Cmeq0,
556 &IntCC::SignedGreaterThanOrEqual => VecMisc2::Cmge0,
557 &IntCC::SignedLessThanOrEqual => VecMisc2::Cmle0,
558 &IntCC::SignedGreaterThan => VecMisc2::Cmgt0,
559 &IntCC::SignedLessThan => VecMisc2::Cmlt0,
560 _ => panic!(),
561 }
562 }
563
564 fn float_cc_cmp_zero_to_vec_misc_op_swap(&mut self, cond: &FloatCC) -> VecMisc2 {
565 match cond {
566 &FloatCC::Equal => VecMisc2::Fcmeq0,
567 &FloatCC::GreaterThanOrEqual => VecMisc2::Fcmle0,
568 &FloatCC::LessThanOrEqual => VecMisc2::Fcmge0,
569 &FloatCC::GreaterThan => VecMisc2::Fcmlt0,
570 &FloatCC::LessThan => VecMisc2::Fcmgt0,
571 _ => panic!(),
572 }
573 }
574
575 fn int_cc_cmp_zero_to_vec_misc_op_swap(&mut self, cond: &IntCC) -> VecMisc2 {
576 match cond {
577 &IntCC::Equal => VecMisc2::Cmeq0,
578 &IntCC::SignedGreaterThanOrEqual => VecMisc2::Cmle0,
579 &IntCC::SignedLessThanOrEqual => VecMisc2::Cmge0,
580 &IntCC::SignedGreaterThan => VecMisc2::Cmlt0,
581 &IntCC::SignedLessThan => VecMisc2::Cmgt0,
582 _ => panic!(),
583 }
584 }
585
586 fn fp_cond_code(&mut self, cc: &condcodes::FloatCC) -> Cond {
587 lower_fp_condcode(*cc)
588 }
589
590 fn cond_code(&mut self, cc: &condcodes::IntCC) -> Cond {
591 lower_condcode(*cc)
592 }
593
594 fn invert_cond(&mut self, cond: &Cond) -> Cond {
595 (*cond).invert()
596 }
597 fn preg_sp(&mut self) -> PReg {
598 super::regs::stack_reg().to_real_reg().unwrap().into()
599 }
600
601 fn preg_fp(&mut self) -> PReg {
602 super::regs::fp_reg().to_real_reg().unwrap().into()
603 }
604
605 fn preg_link(&mut self) -> PReg {
606 super::regs::link_reg().to_real_reg().unwrap().into()
607 }
608
609 fn preg_pinned(&mut self) -> PReg {
610 super::regs::pinned_reg().to_real_reg().unwrap().into()
611 }
612
613 fn branch_target(&mut self, label: MachLabel) -> BranchTarget {
614 BranchTarget::Label(label)
615 }
616
617 fn targets_jt_space(&mut self, elements: &BoxVecMachLabel) -> CodeOffset {
618 (4 * (8 + elements.len())).try_into().unwrap()
622 }
623
624 fn min_fp_value(&mut self, signed: bool, in_bits: u8, out_bits: u8) -> Reg {
625 if in_bits == 32 {
626 let min = match (signed, out_bits) {
628 (true, 8) => i8::MIN as f32 - 1.,
629 (true, 16) => i16::MIN as f32 - 1.,
630 (true, 32) => i32::MIN as f32, (true, 64) => i64::MIN as f32, (false, _) => -1.,
634 _ => unimplemented!(
635 "unexpected {} output size of {} bits for 32-bit input",
636 if signed { "signed" } else { "unsigned" },
637 out_bits
638 ),
639 };
640
641 generated_code::constructor_constant_f32(self, min.to_bits())
642 } else if in_bits == 64 {
643 let min = match (signed, out_bits) {
645 (true, 8) => i8::MIN as f64 - 1.,
646 (true, 16) => i16::MIN as f64 - 1.,
647 (true, 32) => i32::MIN as f64 - 1.,
648 (true, 64) => i64::MIN as f64,
649
650 (false, _) => -1.,
651 _ => unimplemented!(
652 "unexpected {} output size of {} bits for 64-bit input",
653 if signed { "signed" } else { "unsigned" },
654 out_bits
655 ),
656 };
657
658 generated_code::constructor_constant_f64(self, min.to_bits())
659 } else {
660 unimplemented!(
661 "unexpected input size for min_fp_value: {} (signed: {}, output size: {})",
662 in_bits,
663 signed,
664 out_bits
665 );
666 }
667 }
668
669 fn max_fp_value(&mut self, signed: bool, in_bits: u8, out_bits: u8) -> Reg {
670 if in_bits == 32 {
671 let max = match (signed, out_bits) {
673 (true, 8) => i8::MAX as f32 + 1.,
674 (true, 16) => i16::MAX as f32 + 1.,
675 (true, 32) => (i32::MAX as u64 + 1) as f32,
676 (true, 64) => (i64::MAX as u64 + 1) as f32,
677
678 (false, 8) => u8::MAX as f32 + 1.,
679 (false, 16) => u16::MAX as f32 + 1.,
680 (false, 32) => (u32::MAX as u64 + 1) as f32,
681 (false, 64) => (u64::MAX as u128 + 1) as f32,
682 _ => unimplemented!(
683 "unexpected {} output size of {} bits for 32-bit input",
684 if signed { "signed" } else { "unsigned" },
685 out_bits
686 ),
687 };
688
689 generated_code::constructor_constant_f32(self, max.to_bits())
690 } else if in_bits == 64 {
691 let max = match (signed, out_bits) {
693 (true, 8) => i8::MAX as f64 + 1.,
694 (true, 16) => i16::MAX as f64 + 1.,
695 (true, 32) => i32::MAX as f64 + 1.,
696 (true, 64) => (i64::MAX as u64 + 1) as f64,
697
698 (false, 8) => u8::MAX as f64 + 1.,
699 (false, 16) => u16::MAX as f64 + 1.,
700 (false, 32) => u32::MAX as f64 + 1.,
701 (false, 64) => (u64::MAX as u128 + 1) as f64,
702 _ => unimplemented!(
703 "unexpected {} output size of {} bits for 64-bit input",
704 if signed { "signed" } else { "unsigned" },
705 out_bits
706 ),
707 };
708
709 generated_code::constructor_constant_f64(self, max.to_bits())
710 } else {
711 unimplemented!(
712 "unexpected input size for max_fp_value: {} (signed: {}, output size: {})",
713 in_bits,
714 signed,
715 out_bits
716 );
717 }
718 }
719
720 fn fpu_op_ri_ushr(&mut self, ty_bits: u8, shift: u8) -> FPUOpRI {
721 if ty_bits == 32 {
722 FPUOpRI::UShr32(FPURightShiftImm::maybe_from_u8(shift, ty_bits).unwrap())
723 } else if ty_bits == 64 {
724 FPUOpRI::UShr64(FPURightShiftImm::maybe_from_u8(shift, ty_bits).unwrap())
725 } else {
726 unimplemented!(
727 "unexpected input size for fpu_op_ri_ushr: {} (shift: {})",
728 ty_bits,
729 shift
730 );
731 }
732 }
733
734 fn fpu_op_ri_sli(&mut self, ty_bits: u8, shift: u8) -> FPUOpRIMod {
735 if ty_bits == 32 {
736 FPUOpRIMod::Sli32(FPULeftShiftImm::maybe_from_u8(shift, ty_bits).unwrap())
737 } else if ty_bits == 64 {
738 FPUOpRIMod::Sli64(FPULeftShiftImm::maybe_from_u8(shift, ty_bits).unwrap())
739 } else {
740 unimplemented!(
741 "unexpected input size for fpu_op_ri_sli: {} (shift: {})",
742 ty_bits,
743 shift
744 );
745 }
746 }
747
748 fn vec_extract_imm4_from_immediate(&mut self, imm: Immediate) -> Option<u8> {
749 let bytes = self.lower_ctx.get_immediate_data(imm).as_slice();
750
751 if bytes.windows(2).all(|a| a[0] + 1 == a[1]) && bytes[0] < 16 {
752 Some(bytes[0])
753 } else {
754 None
755 }
756 }
757
758 fn shuffle_dup8_from_imm(&mut self, imm: Immediate) -> Option<u8> {
759 let bytes = self.lower_ctx.get_immediate_data(imm).as_slice();
760 if bytes.iter().all(|b| *b == bytes[0]) && bytes[0] < 16 {
761 Some(bytes[0])
762 } else {
763 None
764 }
765 }
766 fn shuffle_dup16_from_imm(&mut self, imm: Immediate) -> Option<u8> {
767 let (a, b, c, d, e, f, g, h) = self.shuffle16_from_imm(imm)?;
768 if a == b && b == c && c == d && d == e && e == f && f == g && g == h && a < 8 {
769 Some(a)
770 } else {
771 None
772 }
773 }
774 fn shuffle_dup32_from_imm(&mut self, imm: Immediate) -> Option<u8> {
775 let (a, b, c, d) = self.shuffle32_from_imm(imm)?;
776 if a == b && b == c && c == d && a < 4 {
777 Some(a)
778 } else {
779 None
780 }
781 }
782 fn shuffle_dup64_from_imm(&mut self, imm: Immediate) -> Option<u8> {
783 let (a, b) = self.shuffle64_from_imm(imm)?;
784 if a == b && a < 2 { Some(a) } else { None }
785 }
786
787 fn asimd_mov_mod_imm_zero(&mut self, size: &ScalarSize) -> ASIMDMovModImm {
788 ASIMDMovModImm::zero(*size)
789 }
790
791 fn asimd_mov_mod_imm_from_u64(
792 &mut self,
793 val: u64,
794 size: &ScalarSize,
795 ) -> Option<ASIMDMovModImm> {
796 ASIMDMovModImm::maybe_from_u64(val, *size)
797 }
798
799 fn asimd_fp_mod_imm_from_u64(&mut self, val: u64, size: &ScalarSize) -> Option<ASIMDFPModImm> {
800 ASIMDFPModImm::maybe_from_u64(val, *size)
801 }
802
803 fn u64_low32_bits_unset(&mut self, val: u64) -> Option<u64> {
804 if val & 0xffffffff == 0 {
805 Some(val)
806 } else {
807 None
808 }
809 }
810
811 fn shift_masked_imm(&mut self, ty: Type, imm: u64) -> u8 {
812 (imm as u8) & ((ty.lane_bits() - 1) as u8)
813 }
814
815 fn simm7_scaled_from_i64(&mut self, val: i64, ty: Type) -> Option<SImm7Scaled> {
816 SImm7Scaled::maybe_from_i64(val, ty)
817 }
818
819 fn simm9_from_i64(&mut self, val: i64) -> Option<SImm9> {
820 SImm9::maybe_from_i64(val)
821 }
822
823 fn uimm12_scaled_from_i64(&mut self, val: i64, ty: Type) -> Option<UImm12Scaled> {
824 UImm12Scaled::maybe_from_i64(val, ty)
825 }
826
827 fn test_and_compare_bit_const(&mut self, ty: Type, n: u64) -> Option<u8> {
828 if n.count_ones() != 1 {
829 return None;
830 }
831 let bit = n.trailing_zeros();
832 if bit >= ty.bits() {
833 return None;
834 }
835 Some(bit as u8)
836 }
837
838 fn a64_extr_imm(&mut self, ty: Type, shift: ImmShift) -> ShiftOpAndAmt {
840 let (op, expected) = match ty {
845 types::I32 => (ShiftOp::LSL, 0b00),
846 types::I64 => (ShiftOp::LSR, 0b01),
847 _ => unreachable!(),
848 };
849 assert_eq!(op.bits(), expected);
850 ShiftOpAndAmt::new(
851 op,
852 ShiftOpShiftImm::maybe_from_shift(shift.value().into()).unwrap(),
853 )
854 }
855}