cranelift_codegen/machinst/lower.rs
1//! This module implements lowering (instruction selection) from Cranelift IR
2//! to machine instructions with virtual registers. This is *almost* the final
3//! machine code, except for register allocation.
4
5// TODO: separate the IR-query core of `Lower` from the lowering logic built on
6// top of it, e.g. the side-effect/coloring analysis and the scan support.
7
8use crate::entity::SecondaryMap;
9use crate::inst_predicates::{has_lowering_side_effect, is_constant_64bit};
10use crate::ir::pcc::{Fact, FactContext, PccError, PccResult};
11use crate::ir::{
12 ArgumentPurpose, Block, BlockArg, Constant, ConstantData, DataFlowGraph, ExternalName,
13 Function, GlobalValue, GlobalValueData, Immediate, Inst, InstructionData, MemFlags,
14 RelSourceLoc, SigRef, Signature, Type, Value, ValueDef, ValueLabelAssignments, ValueLabelStart,
15};
16use crate::machinst::valueregs::InvalidSentinel;
17use crate::machinst::{
18 ABIMachineSpec, BackwardsInsnIndex, BlockIndex, BlockLoweringOrder, CallArgList, CallInfo,
19 CallRetList, Callee, InsnIndex, LoweredBlock, MachLabel, Reg, Sig, SigSet, TryCallInfo, VCode,
20 VCodeBuilder, VCodeConstant, VCodeConstantData, VCodeConstants, VCodeInst, ValueRegs, Writable,
21 writable_value_regs,
22};
23use crate::settings::Flags;
24use crate::{CodegenError, CodegenResult, trace};
25use alloc::vec::Vec;
26use cranelift_control::ControlPlane;
27use rustc_hash::{FxHashMap, FxHashSet};
28use smallvec::{SmallVec, smallvec};
29use std::fmt::Debug;
30
31use super::{VCodeBuildDirection, VRegAllocator};
32
33/// A vector of ValueRegs, used to represent the outputs of an instruction.
34pub type InstOutput = SmallVec<[ValueRegs<Reg>; 2]>;
35
36/// An "instruction color" partitions CLIF instructions by side-effecting ops.
37/// All instructions with the same "color" are guaranteed not to be separated by
38/// any side-effecting op (for this purpose, loads are also considered
39/// side-effecting, to avoid subtle questions w.r.t. the memory model), and
40/// furthermore, it is guaranteed that for any two instructions A and B such
41/// that color(A) == color(B), either A dominates B and B postdominates A, or
42/// vice-versa. (For now, in practice, only ops in the same basic block can ever
43/// have the same color, trivially providing the second condition.) Intuitively,
44/// this means that the ops of the same color must always execute "together", as
45/// part of one atomic contiguous section of the dynamic execution trace, and
46/// they can be freely permuted (modulo true dataflow dependencies) without
47/// affecting program behavior.
48#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
49struct InstColor(u32);
50impl InstColor {
51 fn new(n: u32) -> InstColor {
52 InstColor(n)
53 }
54
55 /// Get an arbitrary index representing this color. The index is unique
56 /// *within a single function compilation*, but indices may be reused across
57 /// functions.
58 pub fn get(self) -> u32 {
59 self.0
60 }
61}
62
63/// A representation of all of the ways in which a value is available, aside
64/// from as a direct register.
65///
66/// - An instruction, if it would be allowed to occur at the current location
67/// instead (see [Lower::get_input_as_source_or_const()] for more details).
68///
69/// - A constant, if the value is known to be a constant.
70#[derive(Clone, Copy, Debug)]
71pub struct NonRegInput {
72 /// An instruction produces this value (as the given output), and its
73 /// computation (and side-effect if applicable) could occur at the
74 /// current instruction's location instead.
75 ///
76 /// If this instruction's operation is merged into the current instruction,
77 /// the backend must call [Lower::sink_inst()].
78 ///
79 /// This enum indicates whether this use of the source instruction
80 /// is unique or not.
81 pub inst: InputSourceInst,
82 /// The value is a known constant.
83 pub constant: Option<u64>,
84}
85
86/// When examining an input to an instruction, this enum provides one
87/// of several options: there is or isn't a single instruction (that
88/// we can see and merge with) that produces that input's value, and
89/// we are or aren't the single user of that instruction.
90#[derive(Clone, Copy, Debug)]
91pub enum InputSourceInst {
92 /// The input in question is the single, unique use of the given
93 /// instruction and output index, and it can be sunk to the
94 /// location of this input.
95 UniqueUse(Inst, usize),
96 /// The input in question is one of multiple uses of the given
97 /// instruction. It can still be sunk to the location of this
98 /// input.
99 Use(Inst, usize),
100 /// We cannot determine which instruction produced the input, or
101 /// it is one of several instructions (e.g., due to a control-flow
102 /// merge and blockparam), or the source instruction cannot be
103 /// allowed to sink to the current location due to side-effects.
104 None,
105}
106
107impl InputSourceInst {
108 /// Get the instruction and output index for this source, whether
109 /// we are its single or one of many users.
110 pub fn as_inst(&self) -> Option<(Inst, usize)> {
111 match self {
112 &InputSourceInst::UniqueUse(inst, output_idx)
113 | &InputSourceInst::Use(inst, output_idx) => Some((inst, output_idx)),
114 &InputSourceInst::None => None,
115 }
116 }
117}
118
119/// A machine backend.
120pub trait LowerBackend {
121 /// The machine instruction type.
122 type MInst: VCodeInst;
123
124 /// Lower a single instruction.
125 ///
126 /// For a branch, this function should not generate the actual branch
127 /// instruction. However, it must force any values it needs for the branch
128 /// edge (block-param actuals) into registers, because the actual branch
129 /// generation (`lower_branch()`) happens *after* any possible merged
130 /// out-edge.
131 ///
132 /// Returns `None` if no lowering for the instruction was found.
133 fn lower(&self, ctx: &mut Lower<Self::MInst>, inst: Inst) -> Option<InstOutput>;
134
135 /// Lower a block-terminating group of branches (which together can be seen
136 /// as one N-way branch), given a vcode MachLabel for each target.
137 ///
138 /// Returns `None` if no lowering for the branch was found.
139 fn lower_branch(
140 &self,
141 ctx: &mut Lower<Self::MInst>,
142 inst: Inst,
143 targets: &[MachLabel],
144 ) -> Option<()>;
145
146 /// A bit of a hack: give a fixed register that always holds the result of a
147 /// `get_pinned_reg` instruction, if known. This allows elision of moves
148 /// into the associated vreg, instead using the real reg directly.
149 fn maybe_pinned_reg(&self) -> Option<Reg> {
150 None
151 }
152
153 /// The type of state carried between `check_fact` invocations.
154 type FactFlowState: Default + Clone + Debug;
155
156 /// Check any facts about an instruction, given VCode with facts
157 /// on VRegs. Takes mutable `VCode` so that it can propagate some
158 /// kinds of facts automatically.
159 fn check_fact(
160 &self,
161 _ctx: &FactContext<'_>,
162 _vcode: &mut VCode<Self::MInst>,
163 _inst: InsnIndex,
164 _state: &mut Self::FactFlowState,
165 ) -> PccResult<()> {
166 Err(PccError::UnimplementedBackend)
167 }
168}
169
170/// Machine-independent lowering driver / machine-instruction container. Maintains a correspondence
171/// from original Inst to MachInsts.
172pub struct Lower<'func, I: VCodeInst> {
173 /// The function to lower.
174 pub(crate) f: &'func Function,
175
176 /// Lowered machine instructions.
177 vcode: VCodeBuilder<I>,
178
179 /// VReg allocation context, given to the vcode field at build time to finalize the vcode.
180 vregs: VRegAllocator<I>,
181
182 /// Mapping from `Value` (SSA value in IR) to virtual register.
183 value_regs: SecondaryMap<Value, ValueRegs<Reg>>,
184
185 /// sret registers, if needed.
186 sret_reg: Option<ValueRegs<Reg>>,
187
188 /// Instruction colors at block exits. From this map, we can recover all
189 /// instruction colors by scanning backward from the block end and
190 /// decrementing on any color-changing (side-effecting) instruction.
191 block_end_colors: SecondaryMap<Block, InstColor>,
192
193 /// Instruction colors at side-effecting ops. This is the *entry* color,
194 /// i.e., the version of global state that exists before an instruction
195 /// executes. For each side-effecting instruction, the *exit* color is its
196 /// entry color plus one.
197 side_effect_inst_entry_colors: FxHashMap<Inst, InstColor>,
198
199 /// Current color as we scan during lowering. While we are lowering an
200 /// instruction, this is equal to the color *at entry to* the instruction.
201 cur_scan_entry_color: Option<InstColor>,
202
203 /// Current instruction as we scan during lowering.
204 cur_inst: Option<Inst>,
205
206 /// Instruction constant values, if known.
207 inst_constants: FxHashMap<Inst, u64>,
208
209 /// Use-counts per SSA value, as counted in the input IR. These
210 /// are "coarsened", in the abstract-interpretation sense: we only
211 /// care about "0, 1, many" states, as this is all we need and
212 /// this lets us do an efficient fixpoint analysis.
213 ///
214 /// See doc comment on `ValueUseState` for more details.
215 value_ir_uses: SecondaryMap<Value, ValueUseState>,
216
217 /// Actual uses of each SSA value so far, incremented while lowering.
218 value_lowered_uses: SecondaryMap<Value, u32>,
219
220 /// Effectful instructions that have been sunk; they are not codegen'd at
221 /// their original locations.
222 inst_sunk: FxHashSet<Inst>,
223
224 /// Instructions collected for the CLIF inst in progress, in forward order.
225 ir_insts: Vec<I>,
226
227 /// Try-call block arg normal-return values, indexed by instruction.
228 try_call_rets: FxHashMap<Inst, SmallVec<[ValueRegs<Writable<Reg>>; 2]>>,
229
230 /// Try-call block arg exceptional-return payloads, indexed by
231 /// instruction. Payloads are carried in registers per the ABI and
232 /// can only be one register each.
233 try_call_payloads: FxHashMap<Inst, SmallVec<[Writable<Reg>; 2]>>,
234
235 /// The register to use for GetPinnedReg, if any, on this architecture.
236 pinned_reg: Option<Reg>,
237
238 /// Compilation flags.
239 flags: Flags,
240}
241
242/// How is a value used in the IR?
243///
244/// This can be seen as a coarsening of an integer count. We only need
245/// distinct states for zero, one, or many.
246///
247/// This analysis deserves further explanation. The basic idea is that
248/// we want to allow instruction lowering to know whether a value that
249/// an instruction references is *only* referenced by that one use, or
250/// by others as well. This is necessary to know when we might want to
251/// move a side-effect: we cannot, for example, duplicate a load, so
252/// we cannot let instruction lowering match a load as part of a
253/// subpattern and potentially incorporate it.
254///
255/// Note that a lot of subtlety comes into play once we have
256/// *indirect* uses. The classical example of this in our development
257/// history was the x86 compare instruction, which is incorporated
258/// into flags users (e.g. `selectif`, `trueif`, branches) and can
259/// subsequently incorporate loads, or at least we would like it
260/// to. However, danger awaits: the compare might be the only user of
261/// a load, so we might think we can just move the load (and nothing
262/// is duplicated -- success!), except that the compare itself is
263/// codegen'd in multiple places, where it is incorporated as a
264/// subpattern itself.
265///
266/// So we really want a notion of "unique all the way along the
267/// matching path". Rust's `&T` and `&mut T` offer a partial analogy
268/// to the semantics that we want here: we want to know when we've
269/// matched a unique use of an instruction, and that instruction's
270/// unique use of another instruction, etc, just as `&mut T` can only
271/// be obtained by going through a chain of `&mut T`. If one has a
272/// `&T` to a struct containing `&mut T` (one of several uses of an
273/// instruction that itself has a unique use of an instruction), one
274/// can only get a `&T` (one can only get a "I am one of several users
275/// of this instruction" result).
276///
277/// We could track these paths, either dynamically as one "looks up the operand
278/// tree" or precomputed. But the former requires state and means that the
279/// `Lower` API carries that state implicitly, which we'd like to avoid if we
280/// can. And the latter implies O(n^2) storage: it is an all-pairs property (is
281/// inst `i` unique from the point of view of `j`).
282///
283/// To make matters even a little more complex still, a value that is
284/// not uniquely used when initially viewing the IR can *become*
285/// uniquely used, at least as a root allowing further unique uses of
286/// e.g. loads to merge, if no other instruction actually merges
287/// it. To be more concrete, if we have `v1 := load; v2 := op v1; v3
288/// := op v2; v4 := op v2` then `v2` is non-uniquely used, so from the
289/// point of view of lowering `v4` or `v3`, we cannot merge the load
290/// at `v1`. But if we decide just to use the assigned register for
291/// `v2` at both `v3` and `v4`, then we only actually codegen `v2`
292/// once, so it *is* a unique root at that point and we *can* merge
293/// the load.
294///
295/// Note also that the color scheme is not sufficient to give us this
296/// information, for various reasons: reasoning about side-effects
297/// does not tell us about potential duplication of uses through pure
298/// ops.
299///
300/// To keep things simple and avoid error-prone lowering APIs that
301/// would extract more information about whether instruction merging
302/// happens or not (we don't have that info now, and it would be
303/// difficult to refactor to get it and make that refactor 100%
304/// correct), we give up on the above "can become unique if not
305/// actually merged" point. Instead, we compute a
306/// transitive-uniqueness. That is what this enum represents.
307///
308/// There is one final caveat as well to the result of this analysis. Notably,
309/// we define some instructions to be "root" instructions, which means that we
310/// assume they will always be codegen'd at the root of a matching tree, and not
311/// matched. (This comes with the caveat that we actually enforce this property
312/// by making them "opaque" to subtree matching in
313/// `get_value_as_source_or_const`). Because they will always be codegen'd once,
314/// they in some sense "reset" multiplicity: these root instructions can be used
315/// many times, but because their result(s) are only computed once, they only
316/// use their inputs once.
317///
318/// We currently define all multi-result instructions to be "root" instructions,
319/// because it is too complex to reason about matching through them, and they
320/// cause too-coarse-grained approximation of multiplicity otherwise: the
321/// analysis would have to assume (as it used to!) that they are always
322/// multiply-used, simply because they have multiple outputs even if those
323/// outputs are used only once.
324///
325/// In the future we could define other instructions to be "root" instructions
326/// as well, if we make the corresponding change to get_value_as_source_or_const
327/// as well.
328///
329/// To define `ValueUseState` more plainly: a value is `Unused` if no references
330/// exist to it; `Once` if only one other op refers to it, *and* that other op
331/// is `Unused` or `Once`; and `Multiple` otherwise. In other words, `Multiple`
332/// is contagious (except through root instructions): even if an op's result
333/// value is directly used only once in the CLIF, that value is `Multiple` if
334/// the op that uses it is itself used multiple times (hence could be codegen'd
335/// multiple times). In brief, this analysis tells us whether, if every op
336/// merged all of its operand tree, a given op could be codegen'd in more than
337/// one place.
338///
339/// To compute this, we first consider direct uses. At this point
340/// `Unused` answers are correct, `Multiple` answers are correct, but
341/// some `Once`s may change to `Multiple`s. Then we propagate
342/// `Multiple` transitively using a workqueue/fixpoint algorithm.
343#[derive(Clone, Copy, Debug, PartialEq, Eq)]
344enum ValueUseState {
345 /// Not used at all.
346 Unused,
347 /// Used exactly once.
348 Once,
349 /// Used multiple times.
350 Multiple,
351}
352
353impl ValueUseState {
354 /// Add one use.
355 fn inc(&mut self) {
356 let new = match self {
357 Self::Unused => Self::Once,
358 Self::Once | Self::Multiple => Self::Multiple,
359 };
360 *self = new;
361 }
362}
363
364/// Notion of "relocation distance". This gives an estimate of how far away a symbol will be from a
365/// reference.
366#[derive(Clone, Copy, Debug, PartialEq, Eq)]
367pub enum RelocDistance {
368 /// Target of relocation is "nearby". The threshold for this is fuzzy but should be interpreted
369 /// as approximately "within the compiled output of one module"; e.g., within AArch64's +/-
370 /// 128MB offset. If unsure, use `Far` instead.
371 Near,
372 /// Target of relocation could be anywhere in the address space.
373 Far,
374}
375
376impl<'func, I: VCodeInst> Lower<'func, I> {
377 /// Prepare a new lowering context for the given IR function.
378 pub fn new(
379 f: &'func Function,
380 abi: Callee<I::ABIMachineSpec>,
381 emit_info: I::Info,
382 block_order: BlockLoweringOrder,
383 sigs: SigSet,
384 flags: Flags,
385 ) -> CodegenResult<Self> {
386 let constants = VCodeConstants::with_capacity(f.dfg.constants.len());
387 let vcode = VCodeBuilder::new(
388 sigs,
389 abi,
390 emit_info,
391 block_order,
392 constants,
393 VCodeBuildDirection::Backward,
394 flags.log2_min_function_alignment(),
395 );
396
397 // We usually need two VRegs per instruction result, plus extras for
398 // various temporaries, but two per Value is a good starting point.
399 let mut vregs = VRegAllocator::with_capacity(f.dfg.num_values() * 2);
400
401 let mut value_regs = SecondaryMap::with_default(ValueRegs::invalid());
402 let mut try_call_rets = FxHashMap::default();
403 let mut try_call_payloads = FxHashMap::default();
404
405 // Assign a vreg to each block param, each inst result, and
406 // each edge-defined block-call arg.
407 for bb in f.layout.blocks() {
408 for ¶m in f.dfg.block_params(bb) {
409 let ty = f.dfg.value_type(param);
410 if value_regs[param].is_invalid() {
411 let regs = vregs.alloc_with_maybe_fact(ty, f.dfg.facts[param].clone())?;
412 value_regs[param] = regs;
413 trace!("bb {} param {}: regs {:?}", bb, param, regs);
414 }
415 }
416 for inst in f.layout.block_insts(bb) {
417 for &result in f.dfg.inst_results(inst) {
418 let ty = f.dfg.value_type(result);
419 if value_regs[result].is_invalid() && !ty.is_invalid() {
420 let regs = vregs.alloc_with_maybe_fact(ty, f.dfg.facts[result].clone())?;
421 value_regs[result] = regs;
422 trace!(
423 "bb {} inst {} ({:?}): result {} regs {:?}",
424 bb, inst, f.dfg.insts[inst], result, regs,
425 );
426 }
427 }
428
429 if let Some(et) = f.dfg.insts[inst].exception_table() {
430 let exdata = &f.dfg.exception_tables[et];
431 let sig = &f.dfg.signatures[exdata.signature()];
432
433 let mut rets = smallvec![];
434 for ty in sig.returns.iter().map(|ret| ret.value_type) {
435 rets.push(vregs.alloc(ty)?.map(|r| Writable::from_reg(r)));
436 }
437 try_call_rets.insert(inst, rets);
438
439 let mut payloads = smallvec![];
440 // Note that this is intentionally using the calling
441 // convention of the callee to determine what payload types
442 // are available. The callee defines that, not the calling
443 // convention of the caller.
444 for &ty in sig
445 .call_conv
446 .exception_payload_types(I::ABIMachineSpec::word_type())
447 {
448 payloads.push(Writable::from_reg(vregs.alloc(ty)?.only_reg().unwrap()));
449 }
450 try_call_payloads.insert(inst, payloads);
451 }
452 }
453 }
454
455 // Find the sret register, if it's used.
456 let mut sret_param = None;
457 for ret in vcode.abi().signature().returns.iter() {
458 if ret.purpose == ArgumentPurpose::StructReturn {
459 let entry_bb = f.stencil.layout.entry_block().unwrap();
460 for (¶m, sig_param) in f
461 .dfg
462 .block_params(entry_bb)
463 .iter()
464 .zip(vcode.abi().signature().params.iter())
465 {
466 if sig_param.purpose == ArgumentPurpose::StructReturn {
467 assert!(sret_param.is_none());
468 sret_param = Some(param);
469 }
470 }
471
472 assert!(sret_param.is_some());
473 }
474 }
475
476 let sret_reg = sret_param.map(|param| {
477 let regs = value_regs[param];
478 assert!(regs.len() == 1);
479 regs
480 });
481
482 // Compute instruction colors, find constant instructions, and find instructions with
483 // side-effects, in one combined pass.
484 let mut cur_color = 0;
485 let mut block_end_colors = SecondaryMap::with_default(InstColor::new(0));
486 let mut side_effect_inst_entry_colors = FxHashMap::default();
487 let mut inst_constants = FxHashMap::default();
488 for bb in f.layout.blocks() {
489 cur_color += 1;
490 for inst in f.layout.block_insts(bb) {
491 let side_effect = has_lowering_side_effect(f, inst);
492
493 trace!("bb {} inst {} has color {}", bb, inst, cur_color);
494 if side_effect {
495 side_effect_inst_entry_colors.insert(inst, InstColor::new(cur_color));
496 trace!(" -> side-effecting; incrementing color for next inst");
497 cur_color += 1;
498 }
499
500 // Determine if this is a constant; if so, add to the table.
501 if let Some(c) = is_constant_64bit(f, inst) {
502 trace!(" -> constant: {}", c);
503 inst_constants.insert(inst, c);
504 }
505 }
506
507 block_end_colors[bb] = InstColor::new(cur_color);
508 }
509
510 let value_ir_uses = compute_use_states(f, sret_param);
511
512 Ok(Lower {
513 f,
514 vcode,
515 vregs,
516 value_regs,
517 sret_reg,
518 block_end_colors,
519 side_effect_inst_entry_colors,
520 inst_constants,
521 value_ir_uses,
522 value_lowered_uses: SecondaryMap::default(),
523 inst_sunk: FxHashSet::default(),
524 cur_scan_entry_color: None,
525 cur_inst: None,
526 ir_insts: vec![],
527 try_call_rets,
528 try_call_payloads,
529 pinned_reg: None,
530 flags,
531 })
532 }
533
534 pub fn sigs(&self) -> &SigSet {
535 self.vcode.sigs()
536 }
537
538 pub fn sigs_mut(&mut self) -> &mut SigSet {
539 self.vcode.sigs_mut()
540 }
541
542 pub fn vregs_mut(&mut self) -> &mut VRegAllocator<I> {
543 &mut self.vregs
544 }
545
546 fn gen_arg_setup(&mut self) {
547 if let Some(entry_bb) = self.f.layout.entry_block() {
548 trace!(
549 "gen_arg_setup: entry BB {} args are:\n{:?}",
550 entry_bb,
551 self.f.dfg.block_params(entry_bb)
552 );
553
554 for (i, param) in self.f.dfg.block_params(entry_bb).iter().enumerate() {
555 if self.value_ir_uses[*param] == ValueUseState::Unused {
556 continue;
557 }
558 let regs = writable_value_regs(self.value_regs[*param]);
559 for insn in self
560 .vcode
561 .vcode
562 .abi
563 .gen_copy_arg_to_regs(&self.vcode.vcode.sigs, i, regs, &mut self.vregs)
564 .into_iter()
565 {
566 self.emit(insn);
567 }
568 }
569 if let Some(insn) = self
570 .vcode
571 .vcode
572 .abi
573 .gen_retval_area_setup(&self.vcode.vcode.sigs, &mut self.vregs)
574 {
575 self.emit(insn);
576 }
577
578 // The `args` instruction below must come first. Finish
579 // the current "IR inst" (with a default source location,
580 // as for other special instructions inserted during
581 // lowering) and continue the scan backward.
582 self.finish_ir_inst(Default::default());
583
584 if let Some(insn) = self.vcode.vcode.abi.take_args() {
585 self.emit(insn);
586 }
587 }
588 }
589
590 /// Generate the return instruction.
591 pub fn gen_return(&mut self, rets: &[ValueRegs<Reg>]) {
592 let mut out_rets = vec![];
593
594 let mut rets = rets.into_iter();
595 for (i, ret) in self
596 .abi()
597 .signature()
598 .returns
599 .clone()
600 .into_iter()
601 .enumerate()
602 {
603 let regs = if ret.purpose == ArgumentPurpose::StructReturn {
604 self.sret_reg.unwrap()
605 } else {
606 *rets.next().unwrap()
607 };
608
609 let (regs, insns) = self.vcode.abi().gen_copy_regs_to_retval(
610 self.vcode.sigs(),
611 i,
612 regs,
613 &mut self.vregs,
614 );
615 out_rets.extend(regs);
616 for insn in insns {
617 self.emit(insn);
618 }
619 }
620
621 // Hack: generate a virtual instruction that uses vmctx in
622 // order to keep it alive for the duration of the function,
623 // for the benefit of debuginfo.
624 if self.f.dfg.values_labels.is_some() {
625 if let Some(vmctx_val) = self.f.special_param(ArgumentPurpose::VMContext) {
626 if self.value_ir_uses[vmctx_val] != ValueUseState::Unused {
627 let vmctx_reg = self.value_regs[vmctx_val].only_reg().unwrap();
628 self.emit(I::gen_dummy_use(vmctx_reg));
629 }
630 }
631 }
632
633 let inst = self.abi().gen_rets(out_rets);
634 self.emit(inst);
635 }
636
637 /// Generate list of registers to hold the output of a call with
638 /// signature `sig`.
639 pub fn gen_call_output(&mut self, sig: &Signature) -> InstOutput {
640 let mut rets = smallvec![];
641 for ty in sig.returns.iter().map(|ret| ret.value_type) {
642 rets.push(self.vregs.alloc_with_deferred_error(ty));
643 }
644 rets
645 }
646
647 /// Likewise, but for a `SigRef` instead.
648 pub fn gen_call_output_from_sig_ref(&mut self, sig_ref: SigRef) -> InstOutput {
649 self.gen_call_output(&self.f.dfg.signatures[sig_ref])
650 }
651
652 /// Set up arguments values `args` for a call with signature `sig`.
653 pub fn gen_call_args(&mut self, sig: Sig, args: &[ValueRegs<Reg>]) -> CallArgList {
654 let (uses, insts) = self.vcode.abi().gen_call_args(
655 self.vcode.sigs(),
656 sig,
657 args,
658 /* is_tail_call */ false,
659 &self.flags,
660 &mut self.vregs,
661 );
662 for insn in insts {
663 self.emit(insn);
664 }
665 uses
666 }
667
668 /// Likewise, but for a `return_call`.
669 pub fn gen_return_call_args(&mut self, sig: Sig, args: &[ValueRegs<Reg>]) -> CallArgList {
670 let (uses, insts) = self.vcode.abi().gen_call_args(
671 self.vcode.sigs(),
672 sig,
673 args,
674 /* is_tail_call */ true,
675 &self.flags,
676 &mut self.vregs,
677 );
678 for insn in insts {
679 self.emit(insn);
680 }
681 uses
682 }
683
684 /// Set up return values `outputs` for a call with signature `sig`.
685 pub fn gen_call_rets(&mut self, sig: Sig, outputs: &[ValueRegs<Reg>]) -> CallRetList {
686 self.vcode
687 .abi()
688 .gen_call_rets(self.vcode.sigs(), sig, outputs, None, &mut self.vregs)
689 }
690
691 /// Likewise, but for a `try_call`.
692 pub fn gen_try_call_rets(&mut self, sig: Sig) -> CallRetList {
693 let ir_inst = self.cur_inst.unwrap();
694 let mut outputs: SmallVec<[ValueRegs<Reg>; 2]> = smallvec![];
695 for return_def in self.try_call_rets.get(&ir_inst).unwrap() {
696 outputs.push(return_def.map(|r| r.to_reg()));
697 }
698 let payloads = Some(&self.try_call_payloads.get(&ir_inst).unwrap()[..]);
699
700 self.vcode
701 .abi()
702 .gen_call_rets(self.vcode.sigs(), sig, &outputs, payloads, &mut self.vregs)
703 }
704
705 /// Populate a `CallInfo` for a call with signature `sig`.
706 pub fn gen_call_info<T>(
707 &mut self,
708 sig: Sig,
709 dest: T,
710 uses: CallArgList,
711 defs: CallRetList,
712 try_call_info: Option<TryCallInfo>,
713 patchable: bool,
714 ) -> CallInfo<T> {
715 self.vcode.abi().gen_call_info(
716 self.vcode.sigs(),
717 sig,
718 dest,
719 uses,
720 defs,
721 try_call_info,
722 patchable,
723 )
724 }
725
726 /// Has this instruction been sunk to a use-site (i.e., away from its
727 /// original location)?
728 fn is_inst_sunk(&self, inst: Inst) -> bool {
729 self.inst_sunk.contains(&inst)
730 }
731
732 // Is any result of this instruction needed?
733 fn is_any_inst_result_needed(&self, inst: Inst) -> bool {
734 self.f
735 .dfg
736 .inst_results(inst)
737 .iter()
738 .any(|&result| self.value_lowered_uses[result] > 0)
739 }
740
741 fn lower_clif_block<B: LowerBackend<MInst = I>>(
742 &mut self,
743 backend: &B,
744 block: Block,
745 ctrl_plane: &mut ControlPlane,
746 ) -> CodegenResult<()> {
747 self.cur_scan_entry_color = Some(self.block_end_colors[block]);
748 // Lowering loop:
749 // - For each non-branch instruction, in reverse order:
750 // - If side-effecting (load, store, branch/call/return,
751 // possible trap), or if used outside of this block, or if
752 // demanded by another inst, then lower.
753 //
754 // That's it! Lowering of side-effecting ops will force all *needed*
755 // (live) non-side-effecting ops to be lowered at the right places, via
756 // the `use_input_reg()` callback on the `Lower` (that's us). That's
757 // because `use_input_reg()` sets the eager/demand bit for any insts
758 // whose result registers are used.
759 //
760 // We set the VCodeBuilder to "backward" mode, so we emit
761 // blocks in reverse order wrt the BlockIndex sequence, and
762 // emit instructions in reverse order within blocks. Because
763 // the machine backend calls `ctx.emit()` in forward order, we
764 // collect per-IR-inst lowered instructions in `ir_insts`,
765 // then reverse these and append to the VCode at the end of
766 // each IR instruction.
767 for inst in self.f.layout.block_insts(block).rev() {
768 let data = &self.f.dfg.insts[inst];
769 let has_side_effect = has_lowering_side_effect(self.f, inst);
770 // If inst has been sunk to another location, skip it.
771 if self.is_inst_sunk(inst) {
772 continue;
773 }
774 // Are any outputs used at least once?
775 let value_needed = self.is_any_inst_result_needed(inst);
776 trace!(
777 "lower_clif_block: block {} inst {} ({:?}) is_branch {} side_effect {} value_needed {}",
778 block,
779 inst,
780 data,
781 data.opcode().is_branch(),
782 has_side_effect,
783 value_needed,
784 );
785
786 // Update scan state to color prior to this inst (as we are scanning
787 // backward).
788 self.cur_inst = Some(inst);
789 if has_side_effect {
790 let entry_color = *self
791 .side_effect_inst_entry_colors
792 .get(&inst)
793 .expect("every side-effecting inst should have a color-map entry");
794 self.cur_scan_entry_color = Some(entry_color);
795 }
796
797 // Skip lowering branches; these are handled separately
798 // (see `lower_clif_branches()` below).
799 if self.f.dfg.insts[inst].opcode().is_branch() {
800 continue;
801 }
802
803 // Value defined by "inst" becomes live after it in normal
804 // order, and therefore **before** in reversed order.
805 self.emit_value_label_live_range_start_for_inst(inst);
806
807 // Normal instruction: codegen if the instruction is side-effecting
808 // or any of its outputs is used.
809 if has_side_effect || value_needed {
810 trace!("lowering: inst {}: {}", inst, self.f.dfg.display_inst(inst));
811 let temp_regs = match backend.lower(self, inst) {
812 Some(regs) => regs,
813 None => {
814 let ty = if self.num_outputs(inst) > 0 {
815 Some(self.output_ty(inst, 0))
816 } else {
817 None
818 };
819 return Err(CodegenError::Unsupported(format!(
820 "should be implemented in ISLE: inst = `{}`, type = `{:?}`",
821 self.f.dfg.display_inst(inst),
822 ty
823 )));
824 }
825 };
826
827 // The ISLE generated code emits its own registers to define the
828 // instruction's lowered values in. However, other instructions
829 // that use this SSA value will be lowered assuming that the value
830 // is generated into a pre-assigned, different, register.
831 //
832 // To connect the two, we set up "aliases" in the VCodeBuilder
833 // that apply when it is building the Operand table for the
834 // regalloc to use. These aliases effectively rewrite any use of
835 // the pre-assigned register to the register that was returned by
836 // the ISLE lowering logic.
837 let results = self.f.dfg.inst_results(inst);
838 debug_assert_eq!(temp_regs.len(), results.len());
839 for (regs, &result) in temp_regs.iter().zip(results) {
840 let dsts = self.value_regs[result];
841 let mut regs = regs.regs().iter();
842 for &dst in dsts.regs().iter() {
843 let temp = regs.next().copied().unwrap_or(Reg::invalid_sentinel());
844 trace!("set vreg alias: {result:?} = {dst:?}, lowering = {temp:?}");
845 self.vregs.set_vreg_alias(dst, temp);
846 }
847 }
848 }
849
850 let start = self.vcode.vcode.num_insts();
851 let loc = self.srcloc(inst);
852 self.finish_ir_inst(loc);
853
854 // If the instruction had a user stack map, forward it from the CLIF
855 // to the vcode.
856 if let Some(entries) = self.f.dfg.user_stack_map_entries(inst) {
857 let end = self.vcode.vcode.num_insts();
858 debug_assert!(end > start);
859 debug_assert_eq!(
860 (start..end)
861 .filter(|i| self.vcode.vcode[InsnIndex::new(*i)].is_safepoint())
862 .count(),
863 1
864 );
865 for i in start..end {
866 let iix = InsnIndex::new(i);
867 if self.vcode.vcode[iix].is_safepoint() {
868 trace!(
869 "Adding user stack map from clif\n\n\
870 {inst:?} `{}`\n\n\
871 to vcode\n\n\
872 {iix:?} `{}`",
873 self.f.dfg.display_inst(inst),
874 &self.vcode.vcode[iix].pretty_print_inst(&mut Default::default()),
875 );
876 self.vcode
877 .add_user_stack_map(BackwardsInsnIndex::new(iix.index()), entries);
878 break;
879 }
880 }
881 }
882
883 // If the CLIF instruction had debug tags, copy them to
884 // the VCode. Place on all VCode instructions lowered from
885 // this CLIF instruction.
886 let debug_tags = self.f.debug_tags.get(inst);
887 if !debug_tags.is_empty() && self.vcode.vcode.num_insts() > 0 {
888 let end = self.vcode.vcode.num_insts();
889 for i in start..end {
890 let backwards_index = BackwardsInsnIndex::new(i);
891 log::trace!(
892 "debug tags on {inst}; associating {debug_tags:?} with {backwards_index:?}"
893 );
894 self.vcode.add_debug_tags(backwards_index, debug_tags);
895 }
896 }
897
898 // maybe insert random instruction
899 if ctrl_plane.get_decision() {
900 if ctrl_plane.get_decision() {
901 let imm: u64 = ctrl_plane.get_arbitrary();
902 let reg = self.alloc_tmp(crate::ir::types::I64).regs()[0];
903 I::gen_imm_u64(imm, reg).map(|inst| self.emit(inst));
904 } else {
905 let imm: f64 = ctrl_plane.get_arbitrary();
906 let tmp = self.alloc_tmp(crate::ir::types::I64).regs()[0];
907 let reg = self.alloc_tmp(crate::ir::types::F64).regs()[0];
908 for inst in I::gen_imm_f64(imm, tmp, reg) {
909 self.emit(inst);
910 }
911 }
912 }
913 }
914
915 // Add the block params to this block.
916 self.add_block_params(block)?;
917
918 self.cur_scan_entry_color = None;
919 Ok(())
920 }
921
922 fn add_block_params(&mut self, block: Block) -> CodegenResult<()> {
923 for ¶m in self.f.dfg.block_params(block) {
924 for ® in self.value_regs[param].regs() {
925 let vreg = reg.to_virtual_reg().unwrap();
926 self.vcode.add_block_param(vreg);
927 }
928 }
929 Ok(())
930 }
931
932 fn get_value_labels<'a>(&'a self, val: Value, depth: usize) -> Option<&'a [ValueLabelStart]> {
933 if let Some(ref values_labels) = self.f.dfg.values_labels {
934 debug_assert!(self.f.dfg.value_is_real(val));
935 trace!(
936 "get_value_labels: val {} -> {:?}",
937 val,
938 values_labels.get(&val)
939 );
940 match values_labels.get(&val) {
941 Some(&ValueLabelAssignments::Starts(ref list)) => Some(&list[..]),
942 Some(&ValueLabelAssignments::Alias { value, .. }) if depth < 10 => {
943 self.get_value_labels(value, depth + 1)
944 }
945 _ => None,
946 }
947 } else {
948 None
949 }
950 }
951
952 fn emit_value_label_marks_for_value(&mut self, val: Value) {
953 let regs = self.value_regs[val];
954 if regs.len() > 1 {
955 return;
956 }
957 let reg = regs.only_reg().unwrap();
958
959 if let Some(label_starts) = self.get_value_labels(val, 0) {
960 let labels = label_starts
961 .iter()
962 .map(|&ValueLabelStart { label, .. }| label)
963 .collect::<FxHashSet<_>>();
964 for label in labels {
965 trace!(
966 "value labeling: defines val {:?} -> reg {:?} -> label {:?}",
967 val, reg, label,
968 );
969 self.vcode.add_value_label(reg, label);
970 }
971 }
972 }
973
974 fn emit_value_label_live_range_start_for_inst(&mut self, inst: Inst) {
975 if self.f.dfg.values_labels.is_none() {
976 return;
977 }
978
979 trace!(
980 "value labeling: srcloc {}: inst {}",
981 self.srcloc(inst),
982 inst
983 );
984 for &val in self.f.dfg.inst_results(inst) {
985 self.emit_value_label_marks_for_value(val);
986 }
987 }
988
989 fn emit_value_label_live_range_start_for_block_args(&mut self, block: Block) {
990 if self.f.dfg.values_labels.is_none() {
991 return;
992 }
993
994 trace!("value labeling: block {}", block);
995 for &arg in self.f.dfg.block_params(block) {
996 self.emit_value_label_marks_for_value(arg);
997 }
998 self.finish_ir_inst(Default::default());
999 }
1000
1001 fn finish_ir_inst(&mut self, loc: RelSourceLoc) {
1002 // The VCodeBuilder builds in reverse order (and reverses at
1003 // the end), but `ir_insts` is in forward order, so reverse
1004 // it.
1005 for inst in self.ir_insts.drain(..).rev() {
1006 self.vcode.push(inst, loc);
1007 }
1008 }
1009
1010 fn finish_bb(&mut self) {
1011 self.vcode.end_bb();
1012 }
1013
1014 fn lower_clif_branch<B: LowerBackend<MInst = I>>(
1015 &mut self,
1016 backend: &B,
1017 // Lowered block index:
1018 bindex: BlockIndex,
1019 // Original CLIF block:
1020 block: Block,
1021 branch: Inst,
1022 targets: &[MachLabel],
1023 ) -> CodegenResult<()> {
1024 trace!(
1025 "lower_clif_branch: block {} branch {:?} targets {:?}",
1026 block, branch, targets,
1027 );
1028 // When considering code-motion opportunities, consider the current
1029 // program point to be this branch.
1030 self.cur_inst = Some(branch);
1031
1032 // Lower the branch in ISLE.
1033 backend
1034 .lower_branch(self, branch, targets)
1035 .unwrap_or_else(|| {
1036 panic!(
1037 "should be implemented in ISLE: branch = `{}`",
1038 self.f.dfg.display_inst(branch),
1039 )
1040 });
1041 let loc = self.srcloc(branch);
1042 self.finish_ir_inst(loc);
1043 // Add block param outputs for current block.
1044 self.lower_branch_blockparam_args(bindex);
1045 Ok(())
1046 }
1047
1048 fn lower_branch_blockparam_args(&mut self, block: BlockIndex) {
1049 let mut branch_arg_vregs: SmallVec<[Reg; 16]> = smallvec![];
1050
1051 // TODO: why not make `block_order` public?
1052 for succ_idx in 0..self.vcode.block_order().succ_indices(block).1.len() {
1053 branch_arg_vregs.clear();
1054 let (succ, args) = self.collect_block_call(block, succ_idx, &mut branch_arg_vregs);
1055 self.vcode.add_succ(succ, args);
1056 }
1057 }
1058
1059 fn collect_branch_and_targets(
1060 &self,
1061 bindex: BlockIndex,
1062 _bb: Block,
1063 targets: &mut SmallVec<[MachLabel; 2]>,
1064 ) -> Option<Inst> {
1065 targets.clear();
1066 let (opt_inst, succs) = self.vcode.block_order().succ_indices(bindex);
1067 targets.extend(succs.iter().map(|succ| MachLabel::from_block(*succ)));
1068 opt_inst
1069 }
1070
1071 /// Collect the outgoing block-call arguments for a given edge out
1072 /// of a lowered block.
1073 fn collect_block_call<'a>(
1074 &mut self,
1075 block: BlockIndex,
1076 succ_idx: usize,
1077 buffer: &'a mut SmallVec<[Reg; 16]>,
1078 ) -> (BlockIndex, &'a [Reg]) {
1079 let block_order = self.vcode.block_order();
1080 let (_, succs) = block_order.succ_indices(block);
1081 let succ = succs[succ_idx];
1082 let this_lb = block_order.lowered_order()[block.index()];
1083 let succ_lb = block_order.lowered_order()[succ.index()];
1084
1085 let (branch_inst, succ_idx) = match (this_lb, succ_lb) {
1086 (_, LoweredBlock::CriticalEdge { .. }) => {
1087 // The successor is a split-critical-edge block. In this
1088 // case, this block-call has no arguments, and the
1089 // arguments go on the critical edge block's unconditional
1090 // branch instead.
1091 return (succ, &[]);
1092 }
1093 (LoweredBlock::CriticalEdge { pred, succ_idx, .. }, _) => {
1094 // This is a split-critical-edge block. In this case, our
1095 // block-call has the arguments that in the CLIF appear in
1096 // the predecessor's branch to this edge.
1097 let branch_inst = self.f.layout.last_inst(pred).unwrap();
1098 (branch_inst, succ_idx as usize)
1099 }
1100
1101 (this, _) => {
1102 let block = this.orig_block().unwrap();
1103 // Ordinary block, with an ordinary block as
1104 // successor. Take the arguments from the branch.
1105 let branch_inst = self.f.layout.last_inst(block).unwrap();
1106 (branch_inst, succ_idx)
1107 }
1108 };
1109
1110 let block_call = self.f.dfg.insts[branch_inst]
1111 .branch_destination(&self.f.dfg.jump_tables, &self.f.dfg.exception_tables)[succ_idx];
1112 for arg in block_call.args(&self.f.dfg.value_lists) {
1113 match arg {
1114 BlockArg::Value(arg) => {
1115 debug_assert!(self.f.dfg.value_is_real(arg));
1116 let regs = self.put_value_in_regs(arg);
1117 buffer.extend_from_slice(regs.regs());
1118 }
1119 BlockArg::TryCallRet(i) => {
1120 let regs = self.try_call_rets.get(&branch_inst).unwrap()[i as usize]
1121 .map(|r| r.to_reg());
1122 buffer.extend_from_slice(regs.regs());
1123 }
1124 BlockArg::TryCallExn(i) => {
1125 let reg =
1126 self.try_call_payloads.get(&branch_inst).unwrap()[i as usize].to_reg();
1127 buffer.push(reg);
1128 }
1129 }
1130 }
1131 (succ, &buffer[..])
1132 }
1133
1134 /// Lower the function.
1135 pub fn lower<B: LowerBackend<MInst = I>>(
1136 mut self,
1137 backend: &B,
1138 ctrl_plane: &mut ControlPlane,
1139 ) -> CodegenResult<VCode<I>> {
1140 trace!("about to lower function: {:?}", self.f);
1141
1142 self.vcode.init_retval_area(&mut self.vregs)?;
1143
1144 // Get the pinned reg here (we only parameterize this function on `B`,
1145 // not the whole `Lower` impl).
1146 self.pinned_reg = backend.maybe_pinned_reg();
1147
1148 self.vcode.set_entry(BlockIndex::new(0));
1149
1150 // Reused vectors for branch lowering.
1151 let mut targets: SmallVec<[MachLabel; 2]> = SmallVec::new();
1152
1153 // get a copy of the lowered order; we hold this separately because we
1154 // need a mut ref to the vcode to mutate it below.
1155 let lowered_order: SmallVec<[LoweredBlock; 64]> = self
1156 .vcode
1157 .block_order()
1158 .lowered_order()
1159 .iter()
1160 .cloned()
1161 .collect();
1162
1163 // Main lowering loop over lowered blocks.
1164 for (bindex, lb) in lowered_order.iter().enumerate().rev() {
1165 let bindex = BlockIndex::new(bindex);
1166
1167 // Lower the block body in reverse order (see comment in
1168 // `lower_clif_block()` for rationale).
1169
1170 // End branch.
1171 if let Some(bb) = lb.orig_block() {
1172 if let Some(branch) = self.collect_branch_and_targets(bindex, bb, &mut targets) {
1173 self.lower_clif_branch(backend, bindex, bb, branch, &targets)?;
1174 self.finish_ir_inst(self.srcloc(branch));
1175 }
1176 } else {
1177 // If no orig block, this must be a pure edge block;
1178 // get the successor and emit a jump. This block has
1179 // no block params; and this jump's block-call args
1180 // will be filled in by
1181 // `lower_branch_blockparam_args`.
1182 let succ = self.vcode.block_order().succ_indices(bindex).1[0];
1183 self.emit(I::gen_jump(MachLabel::from_block(succ)));
1184 self.finish_ir_inst(Default::default());
1185 self.lower_branch_blockparam_args(bindex);
1186 }
1187
1188 // Original block body.
1189 if let Some(bb) = lb.orig_block() {
1190 self.lower_clif_block(backend, bb, ctrl_plane)?;
1191 self.emit_value_label_live_range_start_for_block_args(bb);
1192 }
1193
1194 if bindex.index() == 0 {
1195 // Set up the function with arg vreg inits.
1196 self.gen_arg_setup();
1197 self.finish_ir_inst(Default::default());
1198 }
1199
1200 self.finish_bb();
1201
1202 // Check for any deferred vreg-temp allocation errors, and
1203 // bubble one up at this time if it exists.
1204 if let Some(e) = self.vregs.take_deferred_error() {
1205 return Err(e);
1206 }
1207 }
1208
1209 // Now that we've emitted all instructions into the
1210 // VCodeBuilder, let's build the VCode.
1211 trace!(
1212 "built vcode:\n{:?}Backwards {:?}",
1213 &self.vregs, &self.vcode.vcode
1214 );
1215 let vcode = self.vcode.build(self.vregs);
1216
1217 Ok(vcode)
1218 }
1219
1220 pub fn value_is_unused(&self, val: Value) -> bool {
1221 match self.value_ir_uses[val] {
1222 ValueUseState::Unused => true,
1223 _ => false,
1224 }
1225 }
1226
1227 pub fn block_successor_label(&self, block: Block, succ: usize) -> MachLabel {
1228 trace!("block_successor_label: block {block} succ {succ}");
1229 let lowered = self
1230 .vcode
1231 .block_order()
1232 .lowered_index_for_block(block)
1233 .expect("Unreachable block");
1234 trace!(" -> lowered block {lowered:?}");
1235 let (_, succs) = self.vcode.block_order().succ_indices(lowered);
1236 trace!(" -> succs {succs:?}");
1237 let succ_block = *succs.get(succ).expect("Successor index out of range");
1238 MachLabel::from_block(succ_block)
1239 }
1240}
1241
1242/// Pre-analysis: compute `value_ir_uses`. See comment on
1243/// `ValueUseState` for a description of what this analysis
1244/// computes.
1245fn compute_use_states(
1246 f: &Function,
1247 sret_param: Option<Value>,
1248) -> SecondaryMap<Value, ValueUseState> {
1249 // We perform the analysis without recursion, so we don't
1250 // overflow the stack on long chains of ops in the input.
1251 //
1252 // This is sort of a hybrid of a "shallow use-count" pass and
1253 // a DFS. We iterate over all instructions and mark their args
1254 // as used. However when we increment a use-count to
1255 // "Multiple" we push its args onto the stack and do a DFS,
1256 // immediately marking the whole dependency tree as
1257 // Multiple. Doing both (shallow use-counting over all insts,
1258 // and deep Multiple propagation) lets us trim both
1259 // traversals, stopping recursion when a node is already at
1260 // the appropriate state.
1261 //
1262 // In particular, note that the *coarsening* into {Unused,
1263 // Once, Multiple} is part of what makes this pass more
1264 // efficient than a full indirect-use-counting pass.
1265
1266 let mut value_ir_uses = SecondaryMap::with_default(ValueUseState::Unused);
1267
1268 if let Some(sret_param) = sret_param {
1269 // There's an implicit use of the struct-return parameter in each
1270 // copy of the function epilogue, which we count here.
1271 value_ir_uses[sret_param] = ValueUseState::Multiple;
1272 }
1273
1274 // Stack of iterators over Values as we do DFS to mark
1275 // Multiple-state subtrees. The iterator type is whatever is
1276 // returned by `uses` below.
1277 let mut stack: SmallVec<[_; 16]> = smallvec![];
1278
1279 // Find the args for the inst corresponding to the given value.
1280 //
1281 // Note that "root" instructions are skipped here. This means that multiple
1282 // uses of any result of a multi-result instruction are not considered
1283 // multiple uses of the operands of a multi-result instruction. This
1284 // requires tight coupling with `get_value_as_source_or_const` above which
1285 // is the consumer of the map that this function is producing.
1286 let uses = |value| {
1287 trace!(" -> pushing args for {} onto stack", value);
1288 if let ValueDef::Result(src_inst, _) = f.dfg.value_def(value) {
1289 if is_value_use_root(f, src_inst) {
1290 None
1291 } else {
1292 Some(f.dfg.inst_values(src_inst))
1293 }
1294 } else {
1295 None
1296 }
1297 };
1298
1299 // Do a DFS through `value_ir_uses` to mark a subtree as
1300 // Multiple.
1301 for inst in f
1302 .layout
1303 .blocks()
1304 .flat_map(|block| f.layout.block_insts(block))
1305 {
1306 // Iterate over all values used by all instructions, noting an
1307 // additional use on each operand.
1308 for arg in f.dfg.inst_values(inst) {
1309 debug_assert!(f.dfg.value_is_real(arg));
1310 let old = value_ir_uses[arg];
1311 value_ir_uses[arg].inc();
1312 let new = value_ir_uses[arg];
1313 trace!("arg {} used, old state {:?}, new {:?}", arg, old, new);
1314
1315 // On transition to Multiple, do DFS.
1316 if old == ValueUseState::Multiple || new != ValueUseState::Multiple {
1317 continue;
1318 }
1319 if let Some(iter) = uses(arg) {
1320 stack.push(iter);
1321 }
1322 while let Some(iter) = stack.last_mut() {
1323 if let Some(value) = iter.next() {
1324 debug_assert!(f.dfg.value_is_real(value));
1325 trace!(" -> DFS reaches {}", value);
1326 if value_ir_uses[value] == ValueUseState::Multiple {
1327 // Truncate DFS here: no need to go further,
1328 // as whole subtree must already be Multiple.
1329 // With debug asserts, check one level of
1330 // that invariant at least.
1331 debug_assert!(uses(value).into_iter().flatten().all(|arg| {
1332 debug_assert!(f.dfg.value_is_real(arg));
1333 value_ir_uses[arg] == ValueUseState::Multiple
1334 }));
1335 continue;
1336 }
1337 value_ir_uses[value] = ValueUseState::Multiple;
1338 trace!(" -> became Multiple");
1339 if let Some(iter) = uses(value) {
1340 stack.push(iter);
1341 }
1342 } else {
1343 // Empty iterator, discard.
1344 stack.pop();
1345 }
1346 }
1347 }
1348 }
1349
1350 value_ir_uses
1351}
1352
1353/// Definition of a "root" instruction for the calculation of `ValueUseState`.
1354///
1355/// This function calculates whether `inst` is considered a "root" for value-use
1356/// information. This concept is used to forcibly prevent looking-through the
1357/// instruction during `get_value_as_source_or_const` as it additionally
1358/// prevents propagating `Multiple`-used results of the `inst` here to the
1359/// operands of the instruction.
1360///
1361/// Currently this is defined as multi-result instructions. That means that
1362/// lowerings are never allowed to look through a multi-result instruction to
1363/// generate patterns. Note that this isn't possible in ISLE today anyway so
1364/// this isn't currently much of a loss.
1365///
1366/// The main purpose of this function is to prevent the operands of a
1367/// multi-result instruction from being forcibly considered `Multiple`-used
1368/// regardless of circumstances.
1369fn is_value_use_root(f: &Function, inst: Inst) -> bool {
1370 f.dfg.inst_results(inst).len() > 1
1371}
1372
1373/// Function-level queries.
1374impl<'func, I: VCodeInst> Lower<'func, I> {
1375 pub fn dfg(&self) -> &DataFlowGraph {
1376 &self.f.dfg
1377 }
1378
1379 /// Get the `Callee`.
1380 pub fn abi(&self) -> &Callee<I::ABIMachineSpec> {
1381 self.vcode.abi()
1382 }
1383
1384 /// Get the `Callee`.
1385 pub fn abi_mut(&mut self) -> &mut Callee<I::ABIMachineSpec> {
1386 self.vcode.abi_mut()
1387 }
1388}
1389
1390/// Instruction input/output queries.
1391impl<'func, I: VCodeInst> Lower<'func, I> {
1392 /// Get the instdata for a given IR instruction.
1393 pub fn data(&self, ir_inst: Inst) -> &InstructionData {
1394 &self.f.dfg.insts[ir_inst]
1395 }
1396
1397 /// Likewise, but starting with a GlobalValue identifier.
1398 pub fn symbol_value_data<'b>(
1399 &'b self,
1400 global_value: GlobalValue,
1401 ) -> Option<(&'b ExternalName, RelocDistance, i64)> {
1402 let gvdata = &self.f.global_values[global_value];
1403 match gvdata {
1404 &GlobalValueData::Symbol {
1405 ref name,
1406 ref offset,
1407 colocated,
1408 ..
1409 } => {
1410 let offset = offset.bits();
1411 let dist = if colocated {
1412 RelocDistance::Near
1413 } else {
1414 RelocDistance::Far
1415 };
1416 Some((name, dist, offset))
1417 }
1418 _ => None,
1419 }
1420 }
1421
1422 /// Returns the memory flags of a given memory access.
1423 pub fn memflags(&self, ir_inst: Inst) -> Option<MemFlags> {
1424 match &self.f.dfg.insts[ir_inst] {
1425 &InstructionData::AtomicCas { flags, .. } => Some(flags),
1426 &InstructionData::AtomicRmw { flags, .. } => Some(flags),
1427 &InstructionData::Load { flags, .. }
1428 | &InstructionData::LoadNoOffset { flags, .. }
1429 | &InstructionData::Store { flags, .. } => Some(flags),
1430 &InstructionData::StoreNoOffset { flags, .. } => Some(flags),
1431 _ => None,
1432 }
1433 }
1434
1435 /// Get the source location for a given instruction.
1436 pub fn srcloc(&self, ir_inst: Inst) -> RelSourceLoc {
1437 self.f.rel_srclocs()[ir_inst]
1438 }
1439
1440 /// Get the number of inputs to the given IR instruction. This is a count only of the Value
1441 /// arguments to the instruction: block arguments will not be included in this count.
1442 pub fn num_inputs(&self, ir_inst: Inst) -> usize {
1443 self.f.dfg.inst_args(ir_inst).len()
1444 }
1445
1446 /// Get the number of outputs to the given IR instruction.
1447 pub fn num_outputs(&self, ir_inst: Inst) -> usize {
1448 self.f.dfg.inst_results(ir_inst).len()
1449 }
1450
1451 /// Get the type for an instruction's input.
1452 pub fn input_ty(&self, ir_inst: Inst, idx: usize) -> Type {
1453 self.value_ty(self.input_as_value(ir_inst, idx))
1454 }
1455
1456 /// Get the type for a value.
1457 pub fn value_ty(&self, val: Value) -> Type {
1458 self.f.dfg.value_type(val)
1459 }
1460
1461 /// Get the type for an instruction's output.
1462 pub fn output_ty(&self, ir_inst: Inst, idx: usize) -> Type {
1463 self.f.dfg.value_type(self.f.dfg.inst_results(ir_inst)[idx])
1464 }
1465
1466 /// Get the value of a constant instruction (`iconst`, etc.) as a 64-bit
1467 /// value, if possible.
1468 pub fn get_constant(&self, ir_inst: Inst) -> Option<u64> {
1469 self.inst_constants.get(&ir_inst).map(|&c| {
1470 // The upper bits must be zero, enforced during legalization and by
1471 // the CLIF verifier.
1472 debug_assert_eq!(c, {
1473 let input_size = self.output_ty(ir_inst, 0).bits() as u64;
1474 let shift = 64 - input_size;
1475 (c << shift) >> shift
1476 });
1477 c
1478 })
1479 }
1480
1481 /// Get the input as one of two options other than a direct register:
1482 ///
1483 /// - An instruction, given that it is effect-free or able to sink its
1484 /// effect to the current instruction being lowered, and given it has only
1485 /// one output, and if effect-ful, given that this is the only use;
1486 /// - A constant, if the value is a constant.
1487 ///
1488 /// The instruction input may be available in either of these forms. It may
1489 /// be available in neither form, if the conditions are not met; if so, use
1490 /// `put_input_in_regs()` instead to get it in a register.
1491 ///
1492 /// If the backend merges the effect of a side-effecting instruction, it
1493 /// must call `sink_inst()`. When this is called, it indicates that the
1494 /// effect has been sunk to the current scan location. The sunk
1495 /// instruction's result(s) must have *no* uses remaining, because it will
1496 /// not be codegen'd (it has been integrated into the current instruction).
1497 pub fn input_as_value(&self, ir_inst: Inst, idx: usize) -> Value {
1498 let val = self.f.dfg.inst_args(ir_inst)[idx];
1499 debug_assert!(self.f.dfg.value_is_real(val));
1500 val
1501 }
1502
1503 /// Resolves a particular input of an instruction to the `Value` that it is
1504 /// represented with.
1505 ///
1506 /// For more information see [`Lower::get_value_as_source_or_const`].
1507 pub fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput {
1508 let val = self.input_as_value(ir_inst, idx);
1509 self.get_value_as_source_or_const(val)
1510 }
1511
1512 /// Resolves a `Value` definition to the source instruction it came from
1513 /// plus whether it's a unique-use of that instruction.
1514 ///
1515 /// This function is the workhorse of pattern-matching in ISLE which enables
1516 /// combining multiple instructions together. This is used implicitly in
1517 /// patterns such as `(iadd x (iconst y))` where this function is used to
1518 /// extract the `(iconst y)` operand.
1519 ///
1520 /// At its core this function is a wrapper around
1521 /// [`DataFlowGraph::value_def`]. This function applies a filter on top of
1522 /// that, however, to determine when it is actually safe to "look through"
1523 /// the `val` definition here and view the underlying instruction. This
1524 /// protects against duplicating side effects, such as loads, for example.
1525 ///
1526 /// Internally this uses the data computed from `compute_use_states` along
1527 /// with other instruction properties to know what to return.
1528 pub fn get_value_as_source_or_const(&self, val: Value) -> NonRegInput {
1529 trace!(
1530 "get_input_for_val: val {} at cur_inst {:?} cur_scan_entry_color {:?}",
1531 val, self.cur_inst, self.cur_scan_entry_color,
1532 );
1533 let inst = match self.f.dfg.value_def(val) {
1534 // OK to merge source instruction if we have a source
1535 // instruction, and one of these two conditions hold:
1536 //
1537 // - It has no side-effects and this instruction is not a "value-use
1538 // root" instruction. Instructions which are considered "roots"
1539 // for value-use calculations do not have accurate information
1540 // known about the `ValueUseState` of their operands. This is
1541 // currently done for multi-result instructions to prevent a use
1542 // of each result from forcing all operands of the multi-result
1543 // instruction to also be `Multiple`. This in turn means that the
1544 // `ValueUseState` for operands of a "root" instruction to be a
1545 // lie if pattern matching were to look through the multi-result
1546 // instruction. As a result the "look through this instruction"
1547 // logic only succeeds if it's not a root instruction.
1548 //
1549 // - It has a side-effect, has one output value, that one
1550 // output has only one use, directly or indirectly (so
1551 // cannot be duplicated -- see comment on
1552 // `ValueUseState`), and the instruction's color is *one
1553 // less than* the current scan color.
1554 //
1555 // This latter set of conditions is testing whether a
1556 // side-effecting instruction can sink to the current scan
1557 // location; this is possible if the in-color of this inst is
1558 // equal to the out-color of the producing inst, so no other
1559 // side-effecting ops occur between them (which will only be true
1560 // if they are in the same BB, because color increments at each BB
1561 // start).
1562 //
1563 // If it is actually sunk, then in `merge_inst()`, we update the
1564 // scan color so that as we scan over the range past which the
1565 // instruction was sunk, we allow other instructions (that came
1566 // prior to the sunk instruction) to sink.
1567 ValueDef::Result(src_inst, result_idx) => {
1568 let src_side_effect = has_lowering_side_effect(self.f, src_inst);
1569 trace!(" -> src inst {}", self.f.dfg.display_inst(src_inst));
1570 trace!(" -> has lowering side effect: {}", src_side_effect);
1571 if is_value_use_root(self.f, src_inst) {
1572 // If this instruction is a "root instruction" then it's
1573 // required that we can't look through it to see the
1574 // definition. This means that the `ValueUseState` for the
1575 // operands of this result assume that this instruction is
1576 // generated exactly once which might get violated were we
1577 // to allow looking through it.
1578 trace!(" -> is a root instruction");
1579 InputSourceInst::None
1580 } else if !src_side_effect {
1581 // Otherwise if this instruction has no side effects and the
1582 // value is used only once then we can look through it with
1583 // a "unique" tag. A non-unique `Use` can be shown for other
1584 // values ensuring consumers know how it's computed but that
1585 // it's not available to omit.
1586 if self.value_ir_uses[val] == ValueUseState::Once {
1587 InputSourceInst::UniqueUse(src_inst, result_idx)
1588 } else {
1589 InputSourceInst::Use(src_inst, result_idx)
1590 }
1591 } else {
1592 // Side-effect: test whether this is the only use of the
1593 // only result of the instruction, and whether colors allow
1594 // the code-motion.
1595 trace!(
1596 " -> side-effecting op {} for val {}: use state {:?}",
1597 src_inst, val, self.value_ir_uses[val]
1598 );
1599 if self.cur_scan_entry_color.is_some()
1600 && self.value_ir_uses[val] == ValueUseState::Once
1601 && self.num_outputs(src_inst) == 1
1602 && self
1603 .side_effect_inst_entry_colors
1604 .get(&src_inst)
1605 .unwrap()
1606 .get()
1607 + 1
1608 == self.cur_scan_entry_color.unwrap().get()
1609 {
1610 InputSourceInst::UniqueUse(src_inst, 0)
1611 } else {
1612 InputSourceInst::None
1613 }
1614 }
1615 }
1616 _ => InputSourceInst::None,
1617 };
1618 let constant = inst.as_inst().and_then(|(inst, _)| self.get_constant(inst));
1619
1620 NonRegInput { inst, constant }
1621 }
1622
1623 /// Increment the reference count for the Value, ensuring that it gets lowered.
1624 pub fn increment_lowered_uses(&mut self, val: Value) {
1625 self.value_lowered_uses[val] += 1
1626 }
1627
1628 /// Put the `idx`th input into register(s) and return the assigned register.
1629 pub fn put_input_in_regs(&mut self, ir_inst: Inst, idx: usize) -> ValueRegs<Reg> {
1630 let val = self.f.dfg.inst_args(ir_inst)[idx];
1631 self.put_value_in_regs(val)
1632 }
1633
1634 /// Put the given value into register(s) and return the assigned register.
1635 pub fn put_value_in_regs(&mut self, val: Value) -> ValueRegs<Reg> {
1636 debug_assert!(self.f.dfg.value_is_real(val));
1637 trace!("put_value_in_regs: val {}", val);
1638
1639 if let Some(inst) = self.f.dfg.value_def(val).inst() {
1640 assert!(!self.inst_sunk.contains(&inst));
1641 }
1642
1643 let regs = self.value_regs[val];
1644 trace!(" -> regs {:?}", regs);
1645 assert!(regs.is_valid());
1646
1647 self.value_lowered_uses[val] += 1;
1648
1649 regs
1650 }
1651
1652 /// Get the ValueRegs for the edge-defined values for special
1653 /// try-call-return block arguments.
1654 pub fn try_call_return_defs(&mut self, ir_inst: Inst) -> &[ValueRegs<Writable<Reg>>] {
1655 &self.try_call_rets.get(&ir_inst).unwrap()[..]
1656 }
1657
1658 /// Get the Regs for the edge-defined values for special
1659 /// try-call-return exception payload arguments.
1660 pub fn try_call_exception_defs(&mut self, ir_inst: Inst) -> &[Writable<Reg>] {
1661 &self.try_call_payloads.get(&ir_inst).unwrap()[..]
1662 }
1663}
1664
1665/// Codegen primitives: allocate temps, emit instructions, set result registers,
1666/// ask for an input to be gen'd into a register.
1667impl<'func, I: VCodeInst> Lower<'func, I> {
1668 /// Get a new temp.
1669 pub fn alloc_tmp(&mut self, ty: Type) -> ValueRegs<Writable<Reg>> {
1670 writable_value_regs(self.vregs.alloc_with_deferred_error(ty))
1671 }
1672
1673 /// Get the current root instruction that we are lowering.
1674 pub fn cur_inst(&self) -> Inst {
1675 self.cur_inst.unwrap()
1676 }
1677
1678 /// Emit a machine instruction.
1679 pub fn emit(&mut self, mach_inst: I) {
1680 trace!("emit: {:?}", mach_inst);
1681 self.ir_insts.push(mach_inst);
1682 }
1683
1684 /// Indicate that the side-effect of an instruction has been sunk to the
1685 /// current scan location. This should only be done with the instruction's
1686 /// original results are not used (i.e., `put_input_in_regs` is not invoked
1687 /// for the input produced by the sunk instruction), otherwise the
1688 /// side-effect will occur twice.
1689 pub fn sink_inst(&mut self, ir_inst: Inst) {
1690 assert!(has_lowering_side_effect(self.f, ir_inst));
1691 assert!(self.cur_scan_entry_color.is_some());
1692
1693 for result in self.dfg().inst_results(ir_inst) {
1694 assert!(self.value_lowered_uses[*result] == 0);
1695 }
1696
1697 let sunk_inst_entry_color = self
1698 .side_effect_inst_entry_colors
1699 .get(&ir_inst)
1700 .cloned()
1701 .unwrap();
1702 let sunk_inst_exit_color = InstColor::new(sunk_inst_entry_color.get() + 1);
1703 assert!(sunk_inst_exit_color == self.cur_scan_entry_color.unwrap());
1704 self.cur_scan_entry_color = Some(sunk_inst_entry_color);
1705 self.inst_sunk.insert(ir_inst);
1706 }
1707
1708 /// Retrieve immediate data given a handle.
1709 pub fn get_immediate_data(&self, imm: Immediate) -> &ConstantData {
1710 self.f.dfg.immediates.get(imm).unwrap()
1711 }
1712
1713 /// Retrieve constant data given a handle.
1714 pub fn get_constant_data(&self, constant_handle: Constant) -> &ConstantData {
1715 self.f.dfg.constants.get(constant_handle)
1716 }
1717
1718 /// Indicate that a constant should be emitted.
1719 pub fn use_constant(&mut self, constant: VCodeConstantData) -> VCodeConstant {
1720 self.vcode.constants().insert(constant)
1721 }
1722
1723 /// Cause the value in `reg` to be in a virtual reg, by copying it into a
1724 /// new virtual reg if `reg` is a real reg. `ty` describes the type of the
1725 /// value in `reg`.
1726 pub fn ensure_in_vreg(&mut self, reg: Reg, ty: Type) -> Reg {
1727 if reg.to_virtual_reg().is_some() {
1728 reg
1729 } else {
1730 let new_reg = self.alloc_tmp(ty).only_reg().unwrap();
1731 self.emit(I::gen_move(new_reg, reg, ty));
1732 new_reg.to_reg()
1733 }
1734 }
1735
1736 /// Add a range fact to a register, if no other fact is present.
1737 pub fn add_range_fact(&mut self, reg: Reg, bit_width: u16, min: u64, max: u64) {
1738 if self.flags.enable_pcc() {
1739 self.vregs.set_fact_if_missing(
1740 reg.to_virtual_reg().unwrap(),
1741 Fact::Range {
1742 bit_width,
1743 min,
1744 max,
1745 },
1746 );
1747 }
1748 }
1749}
1750
1751#[cfg(test)]
1752mod tests {
1753 use super::ValueUseState;
1754 use crate::cursor::{Cursor, FuncCursor};
1755 use crate::ir::types;
1756 use crate::ir::{Function, InstBuilder};
1757
1758 #[test]
1759 fn multi_result_use_once() {
1760 let mut func = Function::new();
1761 let block0 = func.dfg.make_block();
1762 let mut pos = FuncCursor::new(&mut func);
1763 pos.insert_block(block0);
1764 let v1 = pos.ins().iconst(types::I64, 0);
1765 let v2 = pos.ins().iconst(types::I64, 1);
1766 let v3 = pos.ins().iconcat(v1, v2);
1767 let (v4, v5) = pos.ins().isplit(v3);
1768 pos.ins().return_(&[v4, v5]);
1769 let func = pos.func;
1770
1771 let uses = super::compute_use_states(&func, None);
1772 assert_eq!(uses[v1], ValueUseState::Once);
1773 assert_eq!(uses[v2], ValueUseState::Once);
1774 assert_eq!(uses[v3], ValueUseState::Once);
1775 assert_eq!(uses[v4], ValueUseState::Once);
1776 assert_eq!(uses[v5], ValueUseState::Once);
1777 }
1778
1779 #[test]
1780 fn results_used_twice_but_not_operands() {
1781 let mut func = Function::new();
1782 let block0 = func.dfg.make_block();
1783 let mut pos = FuncCursor::new(&mut func);
1784 pos.insert_block(block0);
1785 let v1 = pos.ins().iconst(types::I64, 0);
1786 let v2 = pos.ins().iconst(types::I64, 1);
1787 let v3 = pos.ins().iconcat(v1, v2);
1788 let (v4, v5) = pos.ins().isplit(v3);
1789 pos.ins().return_(&[v4, v4]);
1790 let func = pos.func;
1791
1792 let uses = super::compute_use_states(&func, None);
1793 assert_eq!(uses[v1], ValueUseState::Once);
1794 assert_eq!(uses[v2], ValueUseState::Once);
1795 assert_eq!(uses[v3], ValueUseState::Once);
1796 assert_eq!(uses[v4], ValueUseState::Multiple);
1797 assert_eq!(uses[v5], ValueUseState::Unused);
1798 }
1799}