1#[allow(unused)]
5pub mod generated_code;
6use generated_code::MInst;
7
8use self::generated_code::{FpuOPWidth, VecAluOpRR, VecLmul};
10use crate::isa;
11use crate::isa::riscv64::abi::Riscv64ABICallSite;
12use crate::isa::riscv64::lower::args::{
13 FReg, VReg, WritableFReg, WritableVReg, WritableXReg, XReg,
14};
15use crate::isa::riscv64::Riscv64Backend;
16use crate::machinst::Reg;
17use crate::machinst::{isle::*, CallInfo, MachInst};
18use crate::machinst::{VCodeConstant, VCodeConstantData};
19use crate::{
20 ir::{
21 immediates::*, types::*, AtomicRmwOp, BlockCall, ExternalName, Inst, InstructionData,
22 MemFlags, Opcode, TrapCode, Value, ValueList,
23 },
24 isa::riscv64::inst::*,
25 machinst::{ArgPair, InstOutput, IsTailCall},
26};
27use regalloc2::PReg;
28use std::boxed::Box;
29use std::vec::Vec;
30
31type BoxCallInfo = Box<CallInfo<ExternalName>>;
32type BoxCallIndInfo = Box<CallInfo<Reg>>;
33type BoxReturnCallInfo = Box<ReturnCallInfo<ExternalName>>;
34type BoxReturnCallIndInfo = Box<ReturnCallInfo<Reg>>;
35type BoxExternalName = Box<ExternalName>;
36type VecMachLabel = Vec<MachLabel>;
37type VecArgPair = Vec<ArgPair>;
38
39pub(crate) struct RV64IsleContext<'a, 'b, I, B>
40where
41 I: VCodeInst,
42 B: LowerBackend,
43{
44 pub lower_ctx: &'a mut Lower<'b, I>,
45 pub backend: &'a B,
46 min_vec_reg_size: u64,
49}
50
51impl<'a, 'b> RV64IsleContext<'a, 'b, MInst, Riscv64Backend> {
52 fn new(lower_ctx: &'a mut Lower<'b, MInst>, backend: &'a Riscv64Backend) -> Self {
53 Self {
54 lower_ctx,
55 backend,
56 min_vec_reg_size: backend.isa_flags.min_vec_reg_size(),
57 }
58 }
59}
60
61impl generated_code::Context for RV64IsleContext<'_, '_, MInst, Riscv64Backend> {
62 isle_lower_prelude_methods!();
63 isle_prelude_caller_methods!(Riscv64ABICallSite);
64
65 fn fpu_op_width_from_ty(&mut self, ty: Type) -> FpuOPWidth {
66 match ty {
67 F16 => FpuOPWidth::H,
68 F32 => FpuOPWidth::S,
69 F64 => FpuOPWidth::D,
70 F128 => FpuOPWidth::Q,
71 _ => unimplemented!("Unimplemented FPU Op Width: {ty}"),
72 }
73 }
74
75 fn vreg_new(&mut self, r: Reg) -> VReg {
76 VReg::new(r).unwrap()
77 }
78 fn writable_vreg_new(&mut self, r: WritableReg) -> WritableVReg {
79 r.map(|wr| VReg::new(wr).unwrap())
80 }
81 fn writable_vreg_to_vreg(&mut self, arg0: WritableVReg) -> VReg {
82 arg0.to_reg()
83 }
84 fn writable_vreg_to_writable_reg(&mut self, arg0: WritableVReg) -> WritableReg {
85 arg0.map(|vr| vr.to_reg())
86 }
87 fn vreg_to_reg(&mut self, arg0: VReg) -> Reg {
88 *arg0
89 }
90 fn xreg_new(&mut self, r: Reg) -> XReg {
91 XReg::new(r).unwrap()
92 }
93 fn writable_xreg_new(&mut self, r: WritableReg) -> WritableXReg {
94 r.map(|wr| XReg::new(wr).unwrap())
95 }
96 fn writable_xreg_to_xreg(&mut self, arg0: WritableXReg) -> XReg {
97 arg0.to_reg()
98 }
99 fn writable_xreg_to_writable_reg(&mut self, arg0: WritableXReg) -> WritableReg {
100 arg0.map(|xr| xr.to_reg())
101 }
102 fn xreg_to_reg(&mut self, arg0: XReg) -> Reg {
103 *arg0
104 }
105 fn freg_new(&mut self, r: Reg) -> FReg {
106 FReg::new(r).unwrap()
107 }
108 fn writable_freg_new(&mut self, r: WritableReg) -> WritableFReg {
109 r.map(|wr| FReg::new(wr).unwrap())
110 }
111 fn writable_freg_to_freg(&mut self, arg0: WritableFReg) -> FReg {
112 arg0.to_reg()
113 }
114 fn writable_freg_to_writable_reg(&mut self, arg0: WritableFReg) -> WritableReg {
115 arg0.map(|fr| fr.to_reg())
116 }
117 fn freg_to_reg(&mut self, arg0: FReg) -> Reg {
118 *arg0
119 }
120
121 fn min_vec_reg_size(&mut self) -> u64 {
122 self.min_vec_reg_size
123 }
124
125 #[inline]
126 fn ty_vec_fits_in_register(&mut self, ty: Type) -> Option<Type> {
127 if ty.is_vector() && (ty.bits() as u64) <= self.min_vec_reg_size() {
128 Some(ty)
129 } else {
130 None
131 }
132 }
133
134 fn ty_supported(&mut self, ty: Type) -> Option<Type> {
135 let lane_type = ty.lane_type();
136 let supported = match ty {
137 ty if ty.is_int() => true,
139 F16 => self.backend.isa_flags.has_zfh(),
141 F32 => self.backend.isa_flags.has_f(),
143 F64 => self.backend.isa_flags.has_d(),
145
146 ty if self.ty_vec_fits_in_register(ty).is_some()
149 && lane_type.is_int()
150 && lane_type.bits() <= 64 =>
151 {
152 true
153 }
154
155 ty if self.ty_vec_fits_in_register(ty).is_some()
162 && lane_type.is_float()
163 && self.ty_supported(lane_type).is_some()
164 && (lane_type.bits() == 32 || lane_type.bits() == 64) =>
166 {
167 true
168 }
169
170 _ => false,
172 };
173
174 if supported {
175 Some(ty)
176 } else {
177 None
178 }
179 }
180
181 fn ty_supported_float(&mut self, ty: Type) -> Option<Type> {
182 self.ty_supported(ty).filter(|ty| ty.is_float())
183 }
184
185 fn ty_supported_vec(&mut self, ty: Type) -> Option<Type> {
186 self.ty_supported(ty).filter(|ty| ty.is_vector())
187 }
188
189 fn load_ra(&mut self) -> Reg {
190 if self.backend.flags.preserve_frame_pointers() {
191 let tmp = self.temp_writable_reg(I64);
192 self.emit(&MInst::Load {
193 rd: tmp,
194 op: LoadOP::Ld,
195 flags: MemFlags::trusted(),
196 from: AMode::FPOffset(8),
197 });
198 tmp.to_reg()
199 } else {
200 link_reg()
201 }
202 }
203
204 fn label_to_br_target(&mut self, label: MachLabel) -> CondBrTarget {
205 CondBrTarget::Label(label)
206 }
207
208 fn imm12_and(&mut self, imm: Imm12, x: u64) -> Imm12 {
209 Imm12::from_i16(imm.as_i16() & (x as i16))
210 }
211
212 fn fli_constant_from_u64(&mut self, ty: Type, imm: u64) -> Option<FliConstant> {
213 FliConstant::maybe_from_u64(ty, imm)
214 }
215
216 fn fli_constant_from_negated_u64(&mut self, ty: Type, imm: u64) -> Option<FliConstant> {
217 let negated_imm = match ty {
218 F64 => imm ^ 0x8000000000000000,
219 F32 => imm ^ 0x80000000,
220 _ => unimplemented!(),
221 };
222
223 FliConstant::maybe_from_u64(ty, negated_imm)
224 }
225
226 fn i64_generate_imm(&mut self, imm: i64) -> Option<(Imm20, Imm12)> {
227 MInst::generate_imm(imm as u64)
228 }
229
230 fn i64_shift_for_lui(&mut self, imm: i64) -> Option<(u64, Imm12)> {
231 let trailing = imm.trailing_zeros();
232 if trailing < 12 {
233 return None;
234 }
235
236 let shift = Imm12::from_i16(trailing as i16 - 12);
237 let base = (imm as u64) >> trailing;
238 Some((base, shift))
239 }
240
241 fn i64_shift(&mut self, imm: i64) -> Option<(i64, Imm12)> {
242 let trailing = imm.trailing_zeros();
243 if trailing == 0 {
245 return None;
246 }
247
248 let shift = Imm12::from_i16(trailing as i16);
249 let base = imm >> trailing;
250 Some((base, shift))
251 }
252
253 #[inline]
254 fn emit(&mut self, arg0: &MInst) -> Unit {
255 self.lower_ctx.emit(arg0.clone());
256 }
257 #[inline]
258 fn imm12_from_u64(&mut self, arg0: u64) -> Option<Imm12> {
259 Imm12::maybe_from_u64(arg0)
260 }
261 #[inline]
262 fn imm12_from_i64(&mut self, arg0: i64) -> Option<Imm12> {
263 Imm12::maybe_from_i64(arg0)
264 }
265 #[inline]
266 fn imm12_is_zero(&mut self, imm: Imm12) -> Option<()> {
267 if imm.as_i16() == 0 {
268 Some(())
269 } else {
270 None
271 }
272 }
273
274 #[inline]
275 fn imm20_from_u64(&mut self, arg0: u64) -> Option<Imm20> {
276 Imm20::maybe_from_u64(arg0)
277 }
278 #[inline]
279 fn imm20_from_i64(&mut self, arg0: i64) -> Option<Imm20> {
280 Imm20::maybe_from_i64(arg0)
281 }
282 #[inline]
283 fn imm20_is_zero(&mut self, imm: Imm20) -> Option<()> {
284 if imm.as_i32() == 0 {
285 Some(())
286 } else {
287 None
288 }
289 }
290
291 #[inline]
292 fn imm5_from_u64(&mut self, arg0: u64) -> Option<Imm5> {
293 Imm5::maybe_from_i8(i8::try_from(arg0 as i64).ok()?)
294 }
295 #[inline]
296 fn imm5_from_i64(&mut self, arg0: i64) -> Option<Imm5> {
297 Imm5::maybe_from_i8(i8::try_from(arg0).ok()?)
298 }
299 #[inline]
300 fn i8_to_imm5(&mut self, arg0: i8) -> Option<Imm5> {
301 Imm5::maybe_from_i8(arg0)
302 }
303 #[inline]
304 fn uimm5_bitcast_to_imm5(&mut self, arg0: UImm5) -> Imm5 {
305 Imm5::from_bits(arg0.bits() as u8)
306 }
307 #[inline]
308 fn uimm5_from_u8(&mut self, arg0: u8) -> Option<UImm5> {
309 UImm5::maybe_from_u8(arg0)
310 }
311 #[inline]
312 fn uimm5_from_u64(&mut self, arg0: u64) -> Option<UImm5> {
313 arg0.try_into().ok().and_then(UImm5::maybe_from_u8)
314 }
315 #[inline]
316 fn writable_zero_reg(&mut self) -> WritableReg {
317 writable_zero_reg()
318 }
319 #[inline]
320 fn zero_reg(&mut self) -> XReg {
321 XReg::new(zero_reg()).unwrap()
322 }
323 fn is_non_zero_reg(&mut self, reg: XReg) -> Option<()> {
324 if reg != self.zero_reg() {
325 Some(())
326 } else {
327 None
328 }
329 }
330 fn is_zero_reg(&mut self, reg: XReg) -> Option<()> {
331 if reg == self.zero_reg() {
332 Some(())
333 } else {
334 None
335 }
336 }
337 #[inline]
338 fn imm_from_bits(&mut self, val: u64) -> Imm12 {
339 Imm12::maybe_from_u64(val).unwrap()
340 }
341 #[inline]
342 fn imm_from_neg_bits(&mut self, val: i64) -> Imm12 {
343 Imm12::maybe_from_i64(val).unwrap()
344 }
345
346 fn frm_bits(&mut self, frm: &FRM) -> UImm5 {
347 UImm5::maybe_from_u8(frm.bits()).unwrap()
348 }
349
350 fn u8_as_i32(&mut self, x: u8) -> i32 {
351 x as i32
352 }
353
354 fn imm12_const(&mut self, val: i32) -> Imm12 {
355 if let Some(res) = Imm12::maybe_from_i64(val as i64) {
356 res
357 } else {
358 panic!("Unable to make an Imm12 value from {val}")
359 }
360 }
361 fn imm12_const_add(&mut self, val: i32, add: i32) -> Imm12 {
362 Imm12::maybe_from_i64((val + add) as i64).unwrap()
363 }
364 fn imm12_add(&mut self, val: Imm12, add: i32) -> Option<Imm12> {
365 Imm12::maybe_from_i64((i32::from(val.as_i16()) + add).into())
366 }
367
368 fn gen_shamt(&mut self, ty: Type, shamt: XReg) -> ValueRegs {
370 let ty_bits = if ty.bits() > 64 { 64 } else { ty.bits() };
371 let ty_bits = i16::try_from(ty_bits).unwrap();
372 let shamt = {
373 let tmp = self.temp_writable_reg(I64);
374 self.emit(&MInst::AluRRImm12 {
375 alu_op: AluOPRRI::Andi,
376 rd: tmp,
377 rs: shamt.to_reg(),
378 imm12: Imm12::from_i16(ty_bits - 1),
379 });
380 tmp.to_reg()
381 };
382 let len_sub_shamt = {
383 let tmp = self.temp_writable_reg(I64);
384 self.emit(&MInst::load_imm12(tmp, Imm12::from_i16(ty_bits)));
385 let len_sub_shamt = self.temp_writable_reg(I64);
386 self.emit(&MInst::AluRRR {
387 alu_op: AluOPRRR::Sub,
388 rd: len_sub_shamt,
389 rs1: tmp.to_reg(),
390 rs2: shamt,
391 });
392 len_sub_shamt.to_reg()
393 };
394 ValueRegs::two(shamt, len_sub_shamt)
395 }
396
397 fn has_v(&mut self) -> bool {
398 self.backend.isa_flags.has_v()
399 }
400
401 fn has_m(&mut self) -> bool {
402 self.backend.isa_flags.has_m()
403 }
404
405 fn has_zfa(&mut self) -> bool {
406 self.backend.isa_flags.has_zfa()
407 }
408
409 fn has_zfh(&mut self) -> bool {
410 self.backend.isa_flags.has_zfh()
411 }
412
413 fn has_zbkb(&mut self) -> bool {
414 self.backend.isa_flags.has_zbkb()
415 }
416
417 fn has_zba(&mut self) -> bool {
418 self.backend.isa_flags.has_zba()
419 }
420
421 fn has_zbb(&mut self) -> bool {
422 self.backend.isa_flags.has_zbb()
423 }
424
425 fn has_zbc(&mut self) -> bool {
426 self.backend.isa_flags.has_zbc()
427 }
428
429 fn has_zbs(&mut self) -> bool {
430 self.backend.isa_flags.has_zbs()
431 }
432
433 fn has_zicond(&mut self) -> bool {
434 self.backend.isa_flags.has_zicond()
435 }
436
437 fn gen_reg_offset_amode(&mut self, base: Reg, offset: i64) -> AMode {
438 AMode::RegOffset(base, offset)
439 }
440
441 fn gen_sp_offset_amode(&mut self, offset: i64) -> AMode {
442 AMode::SPOffset(offset)
443 }
444
445 fn gen_fp_offset_amode(&mut self, offset: i64) -> AMode {
446 AMode::FPOffset(offset)
447 }
448
449 fn gen_stack_slot_amode(&mut self, ss: StackSlot, offset: i64) -> AMode {
450 let stack_off = self.lower_ctx.abi().sized_stackslot_offsets()[ss] as i64;
452 let sp_off: i64 = stack_off + offset;
453 AMode::SlotOffset(sp_off)
454 }
455
456 fn gen_const_amode(&mut self, c: VCodeConstant) -> AMode {
457 AMode::Const(c)
458 }
459
460 fn valid_atomic_transaction(&mut self, ty: Type) -> Option<Type> {
461 if ty.is_int() && ty.bits() <= 64 {
462 Some(ty)
463 } else {
464 None
465 }
466 }
467 fn is_atomic_rmw_max_etc(&mut self, op: &AtomicRmwOp) -> Option<(AtomicRmwOp, bool)> {
468 let op = *op;
469 match op {
470 crate::ir::AtomicRmwOp::Umin => Some((op, false)),
471 crate::ir::AtomicRmwOp::Umax => Some((op, false)),
472 crate::ir::AtomicRmwOp::Smin => Some((op, true)),
473 crate::ir::AtomicRmwOp::Smax => Some((op, true)),
474 _ => None,
475 }
476 }
477
478 fn sinkable_inst(&mut self, val: Value) -> Option<Inst> {
479 self.is_sinkable_inst(val)
480 }
481
482 fn load_op(&mut self, ty: Type) -> LoadOP {
483 LoadOP::from_type(ty)
484 }
485 fn store_op(&mut self, ty: Type) -> StoreOP {
486 StoreOP::from_type(ty)
487 }
488 fn load_ext_name(&mut self, name: ExternalName, offset: i64) -> Reg {
489 let tmp = self.temp_writable_reg(I64);
490 self.emit(&MInst::LoadExtName {
491 rd: tmp,
492 name: Box::new(name),
493 offset,
494 });
495 tmp.to_reg()
496 }
497
498 fn gen_stack_addr(&mut self, slot: StackSlot, offset: Offset32) -> Reg {
499 let result = self.temp_writable_reg(I64);
500 let i = self
501 .lower_ctx
502 .abi()
503 .sized_stackslot_addr(slot, i64::from(offset) as u32, result);
504 self.emit(&i);
505 result.to_reg()
506 }
507 fn atomic_amo(&mut self) -> AMO {
508 AMO::SeqCst
509 }
510
511 fn lower_br_table(&mut self, index: Reg, targets: &[MachLabel]) -> Unit {
512 let tmp1 = self.temp_writable_reg(I64);
513 let tmp2 = self.temp_writable_reg(I64);
514 self.emit(&MInst::BrTable {
515 index,
516 tmp1,
517 tmp2,
518 targets: targets.to_vec(),
519 });
520 }
521
522 fn fp_reg(&mut self) -> PReg {
523 px_reg(8)
524 }
525
526 fn sp_reg(&mut self) -> PReg {
527 px_reg(2)
528 }
529
530 #[inline]
531 fn int_compare(&mut self, kind: &IntCC, rs1: XReg, rs2: XReg) -> IntegerCompare {
532 IntegerCompare {
533 kind: *kind,
534 rs1: rs1.to_reg(),
535 rs2: rs2.to_reg(),
536 }
537 }
538
539 #[inline]
540 fn int_compare_decompose(&mut self, cmp: IntegerCompare) -> (IntCC, XReg, XReg) {
541 (cmp.kind, self.xreg_new(cmp.rs1), self.xreg_new(cmp.rs2))
542 }
543
544 #[inline]
545 fn vstate_from_type(&mut self, ty: Type) -> VState {
546 VState::from_type(ty)
547 }
548
549 #[inline]
550 fn vstate_mf2(&mut self, vs: VState) -> VState {
551 VState {
552 vtype: VType {
553 lmul: VecLmul::LmulF2,
554 ..vs.vtype
555 },
556 ..vs
557 }
558 }
559
560 fn vec_alu_rr_dst_type(&mut self, op: &VecAluOpRR) -> Type {
561 MInst::canonical_type_for_rc(op.dst_regclass())
562 }
563
564 fn bclr_imm(&mut self, ty: Type, i: u64) -> Option<Imm12> {
565 let neg = !i & (u64::MAX >> (64 - ty.bits()));
568 if neg.count_ones() != 1 {
569 return None;
570 }
571 Imm12::maybe_from_u64(neg.trailing_zeros().into())
572 }
573
574 fn binvi_imm(&mut self, i: u64) -> Option<Imm12> {
575 if i.count_ones() != 1 {
576 return None;
577 }
578 Imm12::maybe_from_u64(i.trailing_zeros().into())
579 }
580 fn bseti_imm(&mut self, i: u64) -> Option<Imm12> {
581 self.binvi_imm(i)
582 }
583
584 fn fcvt_smin_bound(&mut self, float: Type, int: Type, saturating: bool) -> u64 {
585 match (int, float) {
586 (I8, F32) if saturating => f32::from(i8::MIN).to_bits().into(),
594 (I8, F64) if saturating => f64::from(i8::MIN).to_bits(),
595 (I16, F32) if saturating => f32::from(i16::MIN).to_bits().into(),
596 (I16, F64) if saturating => f64::from(i16::MIN).to_bits(),
597
598 (_, F32) if !saturating => f32_cvt_to_int_bounds(true, int.bits()).0.to_bits().into(),
599 (_, F64) if !saturating => f64_cvt_to_int_bounds(true, int.bits()).0.to_bits(),
600 _ => unimplemented!(),
601 }
602 }
603
604 fn fcvt_smax_bound(&mut self, float: Type, int: Type, saturating: bool) -> u64 {
605 match (int, float) {
607 (I8, F32) if saturating => f32::from(i8::MAX).to_bits().into(),
608 (I8, F64) if saturating => f64::from(i8::MAX).to_bits(),
609 (I16, F32) if saturating => f32::from(i16::MAX).to_bits().into(),
610 (I16, F64) if saturating => f64::from(i16::MAX).to_bits(),
611
612 (_, F32) if !saturating => f32_cvt_to_int_bounds(true, int.bits()).1.to_bits().into(),
613 (_, F64) if !saturating => f64_cvt_to_int_bounds(true, int.bits()).1.to_bits(),
614 _ => unimplemented!(),
615 }
616 }
617
618 fn fcvt_umax_bound(&mut self, float: Type, int: Type, saturating: bool) -> u64 {
619 match (int, float) {
621 (I8, F32) if saturating => f32::from(u8::MAX).to_bits().into(),
622 (I8, F64) if saturating => f64::from(u8::MAX).to_bits(),
623 (I16, F32) if saturating => f32::from(u16::MAX).to_bits().into(),
624 (I16, F64) if saturating => f64::from(u16::MAX).to_bits(),
625
626 (_, F32) if !saturating => f32_cvt_to_int_bounds(false, int.bits()).1.to_bits().into(),
627 (_, F64) if !saturating => f64_cvt_to_int_bounds(false, int.bits()).1.to_bits(),
628 _ => unimplemented!(),
629 }
630 }
631
632 fn fcvt_umin_bound(&mut self, float: Type, saturating: bool) -> u64 {
633 assert!(!saturating);
634 match float {
635 F32 => (-1.0f32).to_bits().into(),
636 F64 => (-1.0f64).to_bits(),
637 _ => unimplemented!(),
638 }
639 }
640}
641
642pub(crate) fn lower(
644 lower_ctx: &mut Lower<MInst>,
645 backend: &Riscv64Backend,
646 inst: Inst,
647) -> Option<InstOutput> {
648 let mut isle_ctx = RV64IsleContext::new(lower_ctx, backend);
651 generated_code::constructor_lower(&mut isle_ctx, inst)
652}
653
654pub(crate) fn lower_branch(
656 lower_ctx: &mut Lower<MInst>,
657 backend: &Riscv64Backend,
658 branch: Inst,
659 targets: &[MachLabel],
660) -> Option<()> {
661 let mut isle_ctx = RV64IsleContext::new(lower_ctx, backend);
664 generated_code::constructor_lower_branch(&mut isle_ctx, branch, targets)
665}