cranelift_codegen/isa/aarch64/lower/
isle.rs

1//! ISLE integration glue code for aarch64 lowering.
2
3// Pull in the ISLE generated code.
4pub mod generated_code;
5use generated_code::{Context, ImmExtend};
6
7// Types that the generated ISLE code uses via `use super::*`.
8use super::{
9    fp_reg, lower_condcode, lower_fp_condcode, stack_reg, writable_link_reg, writable_zero_reg,
10    zero_reg, ASIMDFPModImm, ASIMDMovModImm, BranchTarget, CallInfo, Cond, CondBrKind, ExtendOp,
11    FPUOpRI, FPUOpRIMod, FloatCC, Imm12, ImmLogic, ImmShift, Inst as MInst, IntCC, MachLabel,
12    MemLabel, MoveWideConst, MoveWideOp, Opcode, OperandSize, Reg, SImm9, ScalarSize,
13    ShiftOpAndAmt, UImm12Scaled, UImm5, VecMisc2, VectorSize, NZCV,
14};
15use crate::ir::{condcodes, ArgumentExtension};
16use crate::isa;
17use crate::isa::aarch64::inst::{FPULeftShiftImm, FPURightShiftImm, ReturnCallInfo};
18use crate::isa::aarch64::AArch64Backend;
19use crate::machinst::isle::*;
20use crate::{
21    binemit::CodeOffset,
22    ir::{
23        immediates::*, types::*, AtomicRmwOp, BlockCall, ExternalName, Inst, InstructionData,
24        MemFlags, TrapCode, Value, ValueList,
25    },
26    isa::aarch64::abi::AArch64CallSite,
27    isa::aarch64::inst::args::{ShiftOp, ShiftOpShiftImm},
28    isa::aarch64::inst::SImm7Scaled,
29    machinst::{
30        abi::ArgPair, ty_bits, InstOutput, IsTailCall, MachInst, VCodeConstant, VCodeConstantData,
31    },
32};
33use core::u32;
34use regalloc2::PReg;
35use std::boxed::Box;
36use std::vec::Vec;
37
38type BoxCallInfo = Box<CallInfo<ExternalName>>;
39type BoxCallIndInfo = Box<CallInfo<Reg>>;
40type BoxReturnCallInfo = Box<ReturnCallInfo<ExternalName>>;
41type BoxReturnCallIndInfo = Box<ReturnCallInfo<Reg>>;
42type VecMachLabel = Vec<MachLabel>;
43type BoxExternalName = Box<ExternalName>;
44type VecArgPair = Vec<ArgPair>;
45
46/// The main entry point for lowering with ISLE.
47pub(crate) fn lower(
48    lower_ctx: &mut Lower<MInst>,
49    backend: &AArch64Backend,
50    inst: Inst,
51) -> Option<InstOutput> {
52    // TODO: reuse the ISLE context across lowerings so we can reuse its
53    // internal heap allocations.
54    let mut isle_ctx = IsleContext { lower_ctx, backend };
55    generated_code::constructor_lower(&mut isle_ctx, inst)
56}
57
58pub(crate) fn lower_branch(
59    lower_ctx: &mut Lower<MInst>,
60    backend: &AArch64Backend,
61    branch: Inst,
62    targets: &[MachLabel],
63) -> Option<()> {
64    // TODO: reuse the ISLE context across lowerings so we can reuse its
65    // internal heap allocations.
66    let mut isle_ctx = IsleContext { lower_ctx, backend };
67    generated_code::constructor_lower_branch(&mut isle_ctx, branch, targets)
68}
69
70pub struct ExtendedValue {
71    val: Value,
72    extend: ExtendOp,
73}
74
75impl Context for IsleContext<'_, '_, MInst, AArch64Backend> {
76    isle_lower_prelude_methods!();
77    isle_prelude_caller_methods!(AArch64CallSite);
78
79    fn sign_return_address_disabled(&mut self) -> Option<()> {
80        if self.backend.isa_flags.sign_return_address() {
81            None
82        } else {
83            Some(())
84        }
85    }
86
87    fn use_lse(&mut self, _: Inst) -> Option<()> {
88        if self.backend.isa_flags.has_lse() {
89            Some(())
90        } else {
91            None
92        }
93    }
94
95    fn use_fp16(&mut self) -> bool {
96        self.backend.isa_flags.has_fp16()
97    }
98
99    fn move_wide_const_from_u64(&mut self, ty: Type, n: u64) -> Option<MoveWideConst> {
100        let bits = ty.bits();
101        let n = if bits < 64 {
102            n & !(u64::MAX << bits)
103        } else {
104            n
105        };
106        MoveWideConst::maybe_from_u64(n)
107    }
108
109    fn move_wide_const_from_inverted_u64(&mut self, ty: Type, n: u64) -> Option<MoveWideConst> {
110        self.move_wide_const_from_u64(ty, !n)
111    }
112
113    fn imm_logic_from_u64(&mut self, ty: Type, n: u64) -> Option<ImmLogic> {
114        ImmLogic::maybe_from_u64(n, ty)
115    }
116
117    fn imm_size_from_type(&mut self, ty: Type) -> Option<u16> {
118        match ty {
119            I32 => Some(32),
120            I64 => Some(64),
121            _ => None,
122        }
123    }
124
125    fn imm_logic_from_imm64(&mut self, ty: Type, n: Imm64) -> Option<ImmLogic> {
126        let ty = if ty.bits() < 32 { I32 } else { ty };
127        self.imm_logic_from_u64(ty, n.bits() as u64)
128    }
129
130    fn imm12_from_u64(&mut self, n: u64) -> Option<Imm12> {
131        Imm12::maybe_from_u64(n)
132    }
133
134    fn imm_shift_from_u8(&mut self, n: u8) -> ImmShift {
135        ImmShift::maybe_from_u64(n.into()).unwrap()
136    }
137
138    fn lshr_from_u64(&mut self, ty: Type, n: u64) -> Option<ShiftOpAndAmt> {
139        let shiftimm = ShiftOpShiftImm::maybe_from_shift(n)?;
140        if let Ok(bits) = u8::try_from(ty_bits(ty)) {
141            let shiftimm = shiftimm.mask(bits);
142            Some(ShiftOpAndAmt::new(ShiftOp::LSR, shiftimm))
143        } else {
144            None
145        }
146    }
147
148    fn lshl_from_imm64(&mut self, ty: Type, n: Imm64) -> Option<ShiftOpAndAmt> {
149        self.lshl_from_u64(ty, n.bits() as u64)
150    }
151
152    fn lshl_from_u64(&mut self, ty: Type, n: u64) -> Option<ShiftOpAndAmt> {
153        let shiftimm = ShiftOpShiftImm::maybe_from_shift(n)?;
154        let shiftee_bits = ty_bits(ty);
155        if shiftee_bits <= std::u8::MAX as usize {
156            let shiftimm = shiftimm.mask(shiftee_bits as u8);
157            Some(ShiftOpAndAmt::new(ShiftOp::LSL, shiftimm))
158        } else {
159            None
160        }
161    }
162
163    fn ashr_from_u64(&mut self, ty: Type, n: u64) -> Option<ShiftOpAndAmt> {
164        let shiftimm = ShiftOpShiftImm::maybe_from_shift(n)?;
165        let shiftee_bits = ty_bits(ty);
166        if shiftee_bits <= std::u8::MAX as usize {
167            let shiftimm = shiftimm.mask(shiftee_bits as u8);
168            Some(ShiftOpAndAmt::new(ShiftOp::ASR, shiftimm))
169        } else {
170            None
171        }
172    }
173
174    fn integral_ty(&mut self, ty: Type) -> Option<Type> {
175        match ty {
176            I8 | I16 | I32 | I64 => Some(ty),
177            _ => None,
178        }
179    }
180
181    fn is_zero_simm9(&mut self, imm: &SImm9) -> Option<()> {
182        if imm.value() == 0 {
183            Some(())
184        } else {
185            None
186        }
187    }
188
189    fn is_zero_uimm12(&mut self, imm: &UImm12Scaled) -> Option<()> {
190        if imm.value() == 0 {
191            Some(())
192        } else {
193            None
194        }
195    }
196
197    /// This is target-word-size dependent.  And it excludes booleans and reftypes.
198    fn valid_atomic_transaction(&mut self, ty: Type) -> Option<Type> {
199        match ty {
200            I8 | I16 | I32 | I64 => Some(ty),
201            _ => None,
202        }
203    }
204
205    /// This is the fallback case for loading a 64-bit integral constant into a
206    /// register.
207    ///
208    /// The logic here is nontrivial enough that it's not really worth porting
209    /// this over to ISLE.
210    fn load_constant_full(
211        &mut self,
212        ty: Type,
213        extend: &ImmExtend,
214        extend_to: &OperandSize,
215        value: u64,
216    ) -> Reg {
217        let bits = ty.bits();
218
219        let value = match (extend_to, *extend) {
220            (OperandSize::Size32, ImmExtend::Sign) if bits < 32 => {
221                let shift = 32 - bits;
222                let value = value as i32;
223
224                // we cast first to a u32 and then to a u64, to ensure that we are representing a
225                // i32 in a u64, and not a i64. This is important, otherwise value will not fit in
226                // 32 bits
227                ((value << shift) >> shift) as u32 as u64
228            }
229            (OperandSize::Size32, ImmExtend::Zero) if bits < 32 => {
230                value & !((u32::MAX as u64) << bits)
231            }
232            (OperandSize::Size64, ImmExtend::Sign) if bits < 64 => {
233                let shift = 64 - bits;
234                let value = value as i64;
235
236                ((value << shift) >> shift) as u64
237            }
238            (OperandSize::Size64, ImmExtend::Zero) if bits < 64 => value & !(u64::MAX << bits),
239            _ => value,
240        };
241
242        // Divide the value into 16-bit slices that we can manipulate using
243        // `movz`, `movn`, and `movk`.
244        fn get(value: u64, shift: u8) -> u16 {
245            (value >> (shift * 16)) as u16
246        }
247        fn replace(mut old: u64, new: u16, shift: u8) -> u64 {
248            let offset = shift * 16;
249            old &= !(0xffff << offset);
250            old |= u64::from(new) << offset;
251            old
252        }
253
254        // The 32-bit versions of `movz`/`movn`/`movk` will clear the upper 32
255        // bits, so if that's the outcome we want we might as well use them. For
256        // simplicity and ease of reading the disassembly, we use the same size
257        // for all instructions in the sequence.
258        let size = if value >> 32 == 0 {
259            OperandSize::Size32
260        } else {
261            OperandSize::Size64
262        };
263
264        // The `movz` instruction initially sets all bits to zero, while `movn`
265        // initially sets all bits to one. A good choice of initial value can
266        // reduce the number of `movk` instructions we need afterward, so we
267        // check both variants to determine which is closest to the constant
268        // we actually wanted. In case they're equally good, we prefer `movz`
269        // because the assembly listings are generally harder to read when the
270        // operands are negated.
271        let (mut running_value, op, first) =
272            [(MoveWideOp::MovZ, 0), (MoveWideOp::MovN, size.max_value())]
273                .into_iter()
274                .map(|(op, base)| {
275                    // Both `movz` and `movn` can overwrite one slice after setting
276                    // the initial value; we get to pick which one. 32-bit variants
277                    // can only modify the lower two slices.
278                    let first = (0..(size.bits() / 16))
279                        // Pick one slice that's different from the initial value
280                        .find(|&i| get(base ^ value, i) != 0)
281                        // If none are different, we still have to pick one
282                        .unwrap_or(0);
283                    // Compute the value we'll get from this `movz`/`movn`
284                    (replace(base, get(value, first), first), op, first)
285                })
286                // Count how many `movk` instructions we'll need.
287                .min_by_key(|(base, ..)| (0..4).filter(|&i| get(base ^ value, i) != 0).count())
288                // `variants` isn't empty so `min_by_key` always returns something.
289                .unwrap();
290
291        // Build the initial instruction we chose above, putting the result
292        // into a new temporary virtual register. Note that the encoding for the
293        // immediate operand is bitwise-inverted for `movn`.
294        let mut rd = self.temp_writable_reg(I64);
295        self.lower_ctx.emit(MInst::MovWide {
296            op,
297            rd,
298            imm: MoveWideConst {
299                bits: match op {
300                    MoveWideOp::MovZ => get(value, first),
301                    MoveWideOp::MovN => !get(value, first),
302                },
303                shift: first,
304            },
305            size,
306        });
307        if self.backend.flags.enable_pcc() {
308            self.lower_ctx
309                .add_range_fact(rd.to_reg(), 64, running_value, running_value);
310        }
311
312        // Emit a `movk` instruction for each remaining slice of the desired
313        // constant that does not match the initial value constructed above.
314        for shift in (first + 1)..(size.bits() / 16) {
315            let bits = get(value, shift);
316            if bits != get(running_value, shift) {
317                let rn = rd.to_reg();
318                rd = self.temp_writable_reg(I64);
319                self.lower_ctx.emit(MInst::MovK {
320                    rd,
321                    rn,
322                    imm: MoveWideConst { bits, shift },
323                    size,
324                });
325                running_value = replace(running_value, bits, shift);
326                if self.backend.flags.enable_pcc() {
327                    self.lower_ctx
328                        .add_range_fact(rd.to_reg(), 64, running_value, running_value);
329                }
330            }
331        }
332
333        debug_assert_eq!(value, running_value);
334        return rd.to_reg();
335    }
336
337    fn zero_reg(&mut self) -> Reg {
338        zero_reg()
339    }
340
341    fn stack_reg(&mut self) -> Reg {
342        stack_reg()
343    }
344
345    fn fp_reg(&mut self) -> Reg {
346        fp_reg()
347    }
348
349    fn writable_link_reg(&mut self) -> WritableReg {
350        writable_link_reg()
351    }
352
353    fn extended_value_from_value(&mut self, val: Value) -> Option<ExtendedValue> {
354        let (val, extend) = super::get_as_extended_value(self.lower_ctx, val)?;
355        Some(ExtendedValue { val, extend })
356    }
357
358    fn put_extended_in_reg(&mut self, reg: &ExtendedValue) -> Reg {
359        self.put_in_reg(reg.val)
360    }
361
362    fn get_extended_op(&mut self, reg: &ExtendedValue) -> ExtendOp {
363        reg.extend
364    }
365
366    fn emit(&mut self, inst: &MInst) -> Unit {
367        self.lower_ctx.emit(inst.clone());
368    }
369
370    fn cond_br_zero(&mut self, reg: Reg, size: &OperandSize) -> CondBrKind {
371        CondBrKind::Zero(reg, *size)
372    }
373
374    fn cond_br_not_zero(&mut self, reg: Reg, size: &OperandSize) -> CondBrKind {
375        CondBrKind::NotZero(reg, *size)
376    }
377
378    fn cond_br_cond(&mut self, cond: &Cond) -> CondBrKind {
379        CondBrKind::Cond(*cond)
380    }
381
382    fn nzcv(&mut self, n: bool, z: bool, c: bool, v: bool) -> NZCV {
383        NZCV::new(n, z, c, v)
384    }
385
386    fn u8_into_uimm5(&mut self, x: u8) -> UImm5 {
387        UImm5::maybe_from_u8(x).unwrap()
388    }
389
390    fn u8_into_imm12(&mut self, x: u8) -> Imm12 {
391        Imm12::maybe_from_u64(x.into()).unwrap()
392    }
393
394    fn writable_zero_reg(&mut self) -> WritableReg {
395        writable_zero_reg()
396    }
397
398    fn shift_mask(&mut self, ty: Type) -> ImmLogic {
399        debug_assert!(ty.lane_bits().is_power_of_two());
400
401        let mask = (ty.lane_bits() - 1) as u64;
402        ImmLogic::maybe_from_u64(mask, I32).unwrap()
403    }
404
405    fn imm_shift_from_imm64(&mut self, ty: Type, val: Imm64) -> Option<ImmShift> {
406        let imm_value = (val.bits() as u64) & ((ty.bits() - 1) as u64);
407        ImmShift::maybe_from_u64(imm_value)
408    }
409
410    fn u64_into_imm_logic(&mut self, ty: Type, val: u64) -> ImmLogic {
411        ImmLogic::maybe_from_u64(val, ty).unwrap()
412    }
413
414    fn negate_imm_shift(&mut self, ty: Type, mut imm: ImmShift) -> ImmShift {
415        let size = u8::try_from(ty.bits()).unwrap();
416        imm.imm = size.wrapping_sub(imm.value());
417        imm.imm &= size - 1;
418        imm
419    }
420
421    fn rotr_mask(&mut self, ty: Type) -> ImmLogic {
422        ImmLogic::maybe_from_u64((ty.bits() - 1) as u64, I32).unwrap()
423    }
424
425    fn rotr_opposite_amount(&mut self, ty: Type, val: ImmShift) -> ImmShift {
426        let amount = val.value() & u8::try_from(ty.bits() - 1).unwrap();
427        ImmShift::maybe_from_u64(u64::from(ty.bits()) - u64::from(amount)).unwrap()
428    }
429
430    fn icmp_zero_cond(&mut self, cond: &IntCC) -> Option<IntCC> {
431        match cond {
432            &IntCC::Equal
433            | &IntCC::SignedGreaterThanOrEqual
434            | &IntCC::SignedGreaterThan
435            | &IntCC::SignedLessThanOrEqual
436            | &IntCC::SignedLessThan => Some(*cond),
437            _ => None,
438        }
439    }
440
441    fn fcmp_zero_cond(&mut self, cond: &FloatCC) -> Option<FloatCC> {
442        match cond {
443            &FloatCC::Equal
444            | &FloatCC::GreaterThanOrEqual
445            | &FloatCC::GreaterThan
446            | &FloatCC::LessThanOrEqual
447            | &FloatCC::LessThan => Some(*cond),
448            _ => None,
449        }
450    }
451
452    fn fcmp_zero_cond_not_eq(&mut self, cond: &FloatCC) -> Option<FloatCC> {
453        match cond {
454            &FloatCC::NotEqual => Some(FloatCC::NotEqual),
455            _ => None,
456        }
457    }
458
459    fn icmp_zero_cond_not_eq(&mut self, cond: &IntCC) -> Option<IntCC> {
460        match cond {
461            &IntCC::NotEqual => Some(IntCC::NotEqual),
462            _ => None,
463        }
464    }
465
466    fn float_cc_cmp_zero_to_vec_misc_op(&mut self, cond: &FloatCC) -> VecMisc2 {
467        match cond {
468            &FloatCC::Equal => VecMisc2::Fcmeq0,
469            &FloatCC::GreaterThanOrEqual => VecMisc2::Fcmge0,
470            &FloatCC::LessThanOrEqual => VecMisc2::Fcmle0,
471            &FloatCC::GreaterThan => VecMisc2::Fcmgt0,
472            &FloatCC::LessThan => VecMisc2::Fcmlt0,
473            _ => panic!(),
474        }
475    }
476
477    fn int_cc_cmp_zero_to_vec_misc_op(&mut self, cond: &IntCC) -> VecMisc2 {
478        match cond {
479            &IntCC::Equal => VecMisc2::Cmeq0,
480            &IntCC::SignedGreaterThanOrEqual => VecMisc2::Cmge0,
481            &IntCC::SignedLessThanOrEqual => VecMisc2::Cmle0,
482            &IntCC::SignedGreaterThan => VecMisc2::Cmgt0,
483            &IntCC::SignedLessThan => VecMisc2::Cmlt0,
484            _ => panic!(),
485        }
486    }
487
488    fn float_cc_cmp_zero_to_vec_misc_op_swap(&mut self, cond: &FloatCC) -> VecMisc2 {
489        match cond {
490            &FloatCC::Equal => VecMisc2::Fcmeq0,
491            &FloatCC::GreaterThanOrEqual => VecMisc2::Fcmle0,
492            &FloatCC::LessThanOrEqual => VecMisc2::Fcmge0,
493            &FloatCC::GreaterThan => VecMisc2::Fcmlt0,
494            &FloatCC::LessThan => VecMisc2::Fcmgt0,
495            _ => panic!(),
496        }
497    }
498
499    fn int_cc_cmp_zero_to_vec_misc_op_swap(&mut self, cond: &IntCC) -> VecMisc2 {
500        match cond {
501            &IntCC::Equal => VecMisc2::Cmeq0,
502            &IntCC::SignedGreaterThanOrEqual => VecMisc2::Cmle0,
503            &IntCC::SignedLessThanOrEqual => VecMisc2::Cmge0,
504            &IntCC::SignedGreaterThan => VecMisc2::Cmlt0,
505            &IntCC::SignedLessThan => VecMisc2::Cmgt0,
506            _ => panic!(),
507        }
508    }
509
510    fn fp_cond_code(&mut self, cc: &condcodes::FloatCC) -> Cond {
511        lower_fp_condcode(*cc)
512    }
513
514    fn cond_code(&mut self, cc: &condcodes::IntCC) -> Cond {
515        lower_condcode(*cc)
516    }
517
518    fn invert_cond(&mut self, cond: &Cond) -> Cond {
519        (*cond).invert()
520    }
521    fn preg_sp(&mut self) -> PReg {
522        super::regs::stack_reg().to_real_reg().unwrap().into()
523    }
524
525    fn preg_fp(&mut self) -> PReg {
526        super::regs::fp_reg().to_real_reg().unwrap().into()
527    }
528
529    fn preg_link(&mut self) -> PReg {
530        super::regs::link_reg().to_real_reg().unwrap().into()
531    }
532
533    fn preg_pinned(&mut self) -> PReg {
534        super::regs::pinned_reg().to_real_reg().unwrap().into()
535    }
536
537    fn branch_target(&mut self, label: MachLabel) -> BranchTarget {
538        BranchTarget::Label(label)
539    }
540
541    fn targets_jt_space(&mut self, elements: &BoxVecMachLabel) -> CodeOffset {
542        // calculate the number of bytes needed for the jumptable sequence:
543        // 4 bytes per instruction, with 8 instructions base + the size of
544        // the jumptable more.
545        (4 * (8 + elements.len())).try_into().unwrap()
546    }
547
548    fn min_fp_value(&mut self, signed: bool, in_bits: u8, out_bits: u8) -> Reg {
549        if in_bits == 32 {
550            // From float32.
551            let min = match (signed, out_bits) {
552                (true, 8) => i8::MIN as f32 - 1.,
553                (true, 16) => i16::MIN as f32 - 1.,
554                (true, 32) => i32::MIN as f32, // I32_MIN - 1 isn't precisely representable as a f32.
555                (true, 64) => i64::MIN as f32, // I64_MIN - 1 isn't precisely representable as a f32.
556
557                (false, _) => -1.,
558                _ => unimplemented!(
559                    "unexpected {} output size of {} bits for 32-bit input",
560                    if signed { "signed" } else { "unsigned" },
561                    out_bits
562                ),
563            };
564
565            generated_code::constructor_constant_f32(self, min.to_bits())
566        } else if in_bits == 64 {
567            // From float64.
568            let min = match (signed, out_bits) {
569                (true, 8) => i8::MIN as f64 - 1.,
570                (true, 16) => i16::MIN as f64 - 1.,
571                (true, 32) => i32::MIN as f64 - 1.,
572                (true, 64) => i64::MIN as f64,
573
574                (false, _) => -1.,
575                _ => unimplemented!(
576                    "unexpected {} output size of {} bits for 64-bit input",
577                    if signed { "signed" } else { "unsigned" },
578                    out_bits
579                ),
580            };
581
582            generated_code::constructor_constant_f64(self, min.to_bits())
583        } else {
584            unimplemented!(
585                "unexpected input size for min_fp_value: {} (signed: {}, output size: {})",
586                in_bits,
587                signed,
588                out_bits
589            );
590        }
591    }
592
593    fn max_fp_value(&mut self, signed: bool, in_bits: u8, out_bits: u8) -> Reg {
594        if in_bits == 32 {
595            // From float32.
596            let max = match (signed, out_bits) {
597                (true, 8) => i8::MAX as f32 + 1.,
598                (true, 16) => i16::MAX as f32 + 1.,
599                (true, 32) => (i32::MAX as u64 + 1) as f32,
600                (true, 64) => (i64::MAX as u64 + 1) as f32,
601
602                (false, 8) => u8::MAX as f32 + 1.,
603                (false, 16) => u16::MAX as f32 + 1.,
604                (false, 32) => (u32::MAX as u64 + 1) as f32,
605                (false, 64) => (u64::MAX as u128 + 1) as f32,
606                _ => unimplemented!(
607                    "unexpected {} output size of {} bits for 32-bit input",
608                    if signed { "signed" } else { "unsigned" },
609                    out_bits
610                ),
611            };
612
613            generated_code::constructor_constant_f32(self, max.to_bits())
614        } else if in_bits == 64 {
615            // From float64.
616            let max = match (signed, out_bits) {
617                (true, 8) => i8::MAX as f64 + 1.,
618                (true, 16) => i16::MAX as f64 + 1.,
619                (true, 32) => i32::MAX as f64 + 1.,
620                (true, 64) => (i64::MAX as u64 + 1) as f64,
621
622                (false, 8) => u8::MAX as f64 + 1.,
623                (false, 16) => u16::MAX as f64 + 1.,
624                (false, 32) => u32::MAX as f64 + 1.,
625                (false, 64) => (u64::MAX as u128 + 1) as f64,
626                _ => unimplemented!(
627                    "unexpected {} output size of {} bits for 64-bit input",
628                    if signed { "signed" } else { "unsigned" },
629                    out_bits
630                ),
631            };
632
633            generated_code::constructor_constant_f64(self, max.to_bits())
634        } else {
635            unimplemented!(
636                "unexpected input size for max_fp_value: {} (signed: {}, output size: {})",
637                in_bits,
638                signed,
639                out_bits
640            );
641        }
642    }
643
644    fn fpu_op_ri_ushr(&mut self, ty_bits: u8, shift: u8) -> FPUOpRI {
645        if ty_bits == 32 {
646            FPUOpRI::UShr32(FPURightShiftImm::maybe_from_u8(shift, ty_bits).unwrap())
647        } else if ty_bits == 64 {
648            FPUOpRI::UShr64(FPURightShiftImm::maybe_from_u8(shift, ty_bits).unwrap())
649        } else {
650            unimplemented!(
651                "unexpected input size for fpu_op_ri_ushr: {} (shift: {})",
652                ty_bits,
653                shift
654            );
655        }
656    }
657
658    fn fpu_op_ri_sli(&mut self, ty_bits: u8, shift: u8) -> FPUOpRIMod {
659        if ty_bits == 32 {
660            FPUOpRIMod::Sli32(FPULeftShiftImm::maybe_from_u8(shift, ty_bits).unwrap())
661        } else if ty_bits == 64 {
662            FPUOpRIMod::Sli64(FPULeftShiftImm::maybe_from_u8(shift, ty_bits).unwrap())
663        } else {
664            unimplemented!(
665                "unexpected input size for fpu_op_ri_sli: {} (shift: {})",
666                ty_bits,
667                shift
668            );
669        }
670    }
671
672    fn vec_extract_imm4_from_immediate(&mut self, imm: Immediate) -> Option<u8> {
673        let bytes = self.lower_ctx.get_immediate_data(imm).as_slice();
674
675        if bytes.windows(2).all(|a| a[0] + 1 == a[1]) && bytes[0] < 16 {
676            Some(bytes[0])
677        } else {
678            None
679        }
680    }
681
682    fn shuffle_dup8_from_imm(&mut self, imm: Immediate) -> Option<u8> {
683        let bytes = self.lower_ctx.get_immediate_data(imm).as_slice();
684        if bytes.iter().all(|b| *b == bytes[0]) && bytes[0] < 16 {
685            Some(bytes[0])
686        } else {
687            None
688        }
689    }
690    fn shuffle_dup16_from_imm(&mut self, imm: Immediate) -> Option<u8> {
691        let (a, b, c, d, e, f, g, h) = self.shuffle16_from_imm(imm)?;
692        if a == b && b == c && c == d && d == e && e == f && f == g && g == h && a < 8 {
693            Some(a)
694        } else {
695            None
696        }
697    }
698    fn shuffle_dup32_from_imm(&mut self, imm: Immediate) -> Option<u8> {
699        let (a, b, c, d) = self.shuffle32_from_imm(imm)?;
700        if a == b && b == c && c == d && a < 4 {
701            Some(a)
702        } else {
703            None
704        }
705    }
706    fn shuffle_dup64_from_imm(&mut self, imm: Immediate) -> Option<u8> {
707        let (a, b) = self.shuffle64_from_imm(imm)?;
708        if a == b && a < 2 {
709            Some(a)
710        } else {
711            None
712        }
713    }
714
715    fn asimd_mov_mod_imm_zero(&mut self, size: &ScalarSize) -> ASIMDMovModImm {
716        ASIMDMovModImm::zero(*size)
717    }
718
719    fn asimd_mov_mod_imm_from_u64(
720        &mut self,
721        val: u64,
722        size: &ScalarSize,
723    ) -> Option<ASIMDMovModImm> {
724        ASIMDMovModImm::maybe_from_u64(val, *size)
725    }
726
727    fn asimd_fp_mod_imm_from_u64(&mut self, val: u64, size: &ScalarSize) -> Option<ASIMDFPModImm> {
728        ASIMDFPModImm::maybe_from_u64(val, *size)
729    }
730
731    fn u64_low32_bits_unset(&mut self, val: u64) -> Option<u64> {
732        if val & 0xffffffff == 0 {
733            Some(val)
734        } else {
735            None
736        }
737    }
738
739    fn shift_masked_imm(&mut self, ty: Type, imm: u64) -> u8 {
740        (imm as u8) & ((ty.lane_bits() - 1) as u8)
741    }
742
743    fn simm7_scaled_from_i64(&mut self, val: i64, ty: Type) -> Option<SImm7Scaled> {
744        SImm7Scaled::maybe_from_i64(val, ty)
745    }
746
747    fn simm9_from_i64(&mut self, val: i64) -> Option<SImm9> {
748        SImm9::maybe_from_i64(val)
749    }
750
751    fn uimm12_scaled_from_i64(&mut self, val: i64, ty: Type) -> Option<UImm12Scaled> {
752        UImm12Scaled::maybe_from_i64(val, ty)
753    }
754
755    fn test_and_compare_bit_const(&mut self, ty: Type, n: u64) -> Option<u8> {
756        if n.count_ones() != 1 {
757            return None;
758        }
759        let bit = n.trailing_zeros();
760        if bit >= ty.bits() {
761            return None;
762        }
763        Some(bit as u8)
764    }
765
766    /// Use as a helper when generating `AluRRRShift` for `extr` instructions.
767    fn a64_extr_imm(&mut self, ty: Type, shift: ImmShift) -> ShiftOpAndAmt {
768        // The `ShiftOpAndAmt` immediate is used with `AluRRRShift` shape which
769        // requires `ShiftOpAndAmt` so the shift of `ty` and `shift` are
770        // translated into `ShiftOpAndAmt` here. The `ShiftOp` value here is
771        // only used for its encoding, not its logical meaning.
772        let (op, expected) = match ty {
773            types::I32 => (ShiftOp::LSL, 0b00),
774            types::I64 => (ShiftOp::LSR, 0b01),
775            _ => unreachable!(),
776        };
777        assert_eq!(op.bits(), expected);
778        ShiftOpAndAmt::new(
779            op,
780            ShiftOpShiftImm::maybe_from_shift(shift.value().into()).unwrap(),
781        )
782    }
783}