cranelift_codegen/machinst/
lower.rs

1//! This module implements lowering (instruction selection) from Cranelift IR
2//! to machine instructions with virtual registers. This is *almost* the final
3//! machine code, except for register allocation.
4
5// TODO: separate the IR-query core of `Lower` from the lowering logic built on
6// top of it, e.g. the side-effect/coloring analysis and the scan support.
7
8use crate::entity::SecondaryMap;
9use crate::inst_predicates::{has_lowering_side_effect, is_constant_64bit};
10use crate::ir::pcc::{Fact, FactContext, PccError, PccResult};
11use crate::ir::{
12    ArgumentPurpose, Block, BlockArg, Constant, ConstantData, DataFlowGraph, ExternalName,
13    Function, GlobalValue, GlobalValueData, Immediate, Inst, InstructionData, MemFlags,
14    RelSourceLoc, SigRef, Signature, Type, Value, ValueDef, ValueLabelAssignments, ValueLabelStart,
15};
16use crate::machinst::valueregs::InvalidSentinel;
17use crate::machinst::{
18    ABIMachineSpec, BackwardsInsnIndex, BlockIndex, BlockLoweringOrder, CallArgList, CallInfo,
19    CallRetList, Callee, InsnIndex, LoweredBlock, MachLabel, Reg, Sig, SigSet, TryCallInfo, VCode,
20    VCodeBuilder, VCodeConstant, VCodeConstantData, VCodeConstants, VCodeInst, ValueRegs, Writable,
21    writable_value_regs,
22};
23use crate::settings::Flags;
24use crate::{CodegenError, CodegenResult, trace};
25use alloc::vec::Vec;
26use cranelift_control::ControlPlane;
27use rustc_hash::{FxHashMap, FxHashSet};
28use smallvec::{SmallVec, smallvec};
29use std::fmt::Debug;
30
31use super::{VCodeBuildDirection, VRegAllocator};
32
33/// A vector of ValueRegs, used to represent the outputs of an instruction.
34pub type InstOutput = SmallVec<[ValueRegs<Reg>; 2]>;
35
36/// An "instruction color" partitions CLIF instructions by side-effecting ops.
37/// All instructions with the same "color" are guaranteed not to be separated by
38/// any side-effecting op (for this purpose, loads are also considered
39/// side-effecting, to avoid subtle questions w.r.t. the memory model), and
40/// furthermore, it is guaranteed that for any two instructions A and B such
41/// that color(A) == color(B), either A dominates B and B postdominates A, or
42/// vice-versa. (For now, in practice, only ops in the same basic block can ever
43/// have the same color, trivially providing the second condition.) Intuitively,
44/// this means that the ops of the same color must always execute "together", as
45/// part of one atomic contiguous section of the dynamic execution trace, and
46/// they can be freely permuted (modulo true dataflow dependencies) without
47/// affecting program behavior.
48#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
49struct InstColor(u32);
50impl InstColor {
51    fn new(n: u32) -> InstColor {
52        InstColor(n)
53    }
54
55    /// Get an arbitrary index representing this color. The index is unique
56    /// *within a single function compilation*, but indices may be reused across
57    /// functions.
58    pub fn get(self) -> u32 {
59        self.0
60    }
61}
62
63/// A representation of all of the ways in which a value is available, aside
64/// from as a direct register.
65///
66/// - An instruction, if it would be allowed to occur at the current location
67///   instead (see [Lower::get_input_as_source_or_const()] for more details).
68///
69/// - A constant, if the value is known to be a constant.
70#[derive(Clone, Copy, Debug)]
71pub struct NonRegInput {
72    /// An instruction produces this value (as the given output), and its
73    /// computation (and side-effect if applicable) could occur at the
74    /// current instruction's location instead.
75    ///
76    /// If this instruction's operation is merged into the current instruction,
77    /// the backend must call [Lower::sink_inst()].
78    ///
79    /// This enum indicates whether this use of the source instruction
80    /// is unique or not.
81    pub inst: InputSourceInst,
82    /// The value is a known constant.
83    pub constant: Option<u64>,
84}
85
86/// When examining an input to an instruction, this enum provides one
87/// of several options: there is or isn't a single instruction (that
88/// we can see and merge with) that produces that input's value, and
89/// we are or aren't the single user of that instruction.
90#[derive(Clone, Copy, Debug)]
91pub enum InputSourceInst {
92    /// The input in question is the single, unique use of the given
93    /// instruction and output index, and it can be sunk to the
94    /// location of this input.
95    UniqueUse(Inst, usize),
96    /// The input in question is one of multiple uses of the given
97    /// instruction. It can still be sunk to the location of this
98    /// input.
99    Use(Inst, usize),
100    /// We cannot determine which instruction produced the input, or
101    /// it is one of several instructions (e.g., due to a control-flow
102    /// merge and blockparam), or the source instruction cannot be
103    /// allowed to sink to the current location due to side-effects.
104    None,
105}
106
107impl InputSourceInst {
108    /// Get the instruction and output index for this source, whether
109    /// we are its single or one of many users.
110    pub fn as_inst(&self) -> Option<(Inst, usize)> {
111        match self {
112            &InputSourceInst::UniqueUse(inst, output_idx)
113            | &InputSourceInst::Use(inst, output_idx) => Some((inst, output_idx)),
114            &InputSourceInst::None => None,
115        }
116    }
117}
118
119/// A machine backend.
120pub trait LowerBackend {
121    /// The machine instruction type.
122    type MInst: VCodeInst;
123
124    /// Lower a single instruction.
125    ///
126    /// For a branch, this function should not generate the actual branch
127    /// instruction. However, it must force any values it needs for the branch
128    /// edge (block-param actuals) into registers, because the actual branch
129    /// generation (`lower_branch()`) happens *after* any possible merged
130    /// out-edge.
131    ///
132    /// Returns `None` if no lowering for the instruction was found.
133    fn lower(&self, ctx: &mut Lower<Self::MInst>, inst: Inst) -> Option<InstOutput>;
134
135    /// Lower a block-terminating group of branches (which together can be seen
136    /// as one N-way branch), given a vcode MachLabel for each target.
137    ///
138    /// Returns `None` if no lowering for the branch was found.
139    fn lower_branch(
140        &self,
141        ctx: &mut Lower<Self::MInst>,
142        inst: Inst,
143        targets: &[MachLabel],
144    ) -> Option<()>;
145
146    /// A bit of a hack: give a fixed register that always holds the result of a
147    /// `get_pinned_reg` instruction, if known.  This allows elision of moves
148    /// into the associated vreg, instead using the real reg directly.
149    fn maybe_pinned_reg(&self) -> Option<Reg> {
150        None
151    }
152
153    /// The type of state carried between `check_fact` invocations.
154    type FactFlowState: Default + Clone + Debug;
155
156    /// Check any facts about an instruction, given VCode with facts
157    /// on VRegs. Takes mutable `VCode` so that it can propagate some
158    /// kinds of facts automatically.
159    fn check_fact(
160        &self,
161        _ctx: &FactContext<'_>,
162        _vcode: &mut VCode<Self::MInst>,
163        _inst: InsnIndex,
164        _state: &mut Self::FactFlowState,
165    ) -> PccResult<()> {
166        Err(PccError::UnimplementedBackend)
167    }
168}
169
170/// Machine-independent lowering driver / machine-instruction container. Maintains a correspondence
171/// from original Inst to MachInsts.
172pub struct Lower<'func, I: VCodeInst> {
173    /// The function to lower.
174    pub(crate) f: &'func Function,
175
176    /// Lowered machine instructions.
177    vcode: VCodeBuilder<I>,
178
179    /// VReg allocation context, given to the vcode field at build time to finalize the vcode.
180    vregs: VRegAllocator<I>,
181
182    /// Mapping from `Value` (SSA value in IR) to virtual register.
183    value_regs: SecondaryMap<Value, ValueRegs<Reg>>,
184
185    /// sret registers, if needed.
186    sret_reg: Option<ValueRegs<Reg>>,
187
188    /// Instruction colors at block exits. From this map, we can recover all
189    /// instruction colors by scanning backward from the block end and
190    /// decrementing on any color-changing (side-effecting) instruction.
191    block_end_colors: SecondaryMap<Block, InstColor>,
192
193    /// Instruction colors at side-effecting ops. This is the *entry* color,
194    /// i.e., the version of global state that exists before an instruction
195    /// executes.  For each side-effecting instruction, the *exit* color is its
196    /// entry color plus one.
197    side_effect_inst_entry_colors: FxHashMap<Inst, InstColor>,
198
199    /// Current color as we scan during lowering. While we are lowering an
200    /// instruction, this is equal to the color *at entry to* the instruction.
201    cur_scan_entry_color: Option<InstColor>,
202
203    /// Current instruction as we scan during lowering.
204    cur_inst: Option<Inst>,
205
206    /// Instruction constant values, if known.
207    inst_constants: FxHashMap<Inst, u64>,
208
209    /// Use-counts per SSA value, as counted in the input IR. These
210    /// are "coarsened", in the abstract-interpretation sense: we only
211    /// care about "0, 1, many" states, as this is all we need and
212    /// this lets us do an efficient fixpoint analysis.
213    ///
214    /// See doc comment on `ValueUseState` for more details.
215    value_ir_uses: SecondaryMap<Value, ValueUseState>,
216
217    /// Actual uses of each SSA value so far, incremented while lowering.
218    value_lowered_uses: SecondaryMap<Value, u32>,
219
220    /// Effectful instructions that have been sunk; they are not codegen'd at
221    /// their original locations.
222    inst_sunk: FxHashSet<Inst>,
223
224    /// Instructions collected for the CLIF inst in progress, in forward order.
225    ir_insts: Vec<I>,
226
227    /// Try-call block arg normal-return values, indexed by instruction.
228    try_call_rets: FxHashMap<Inst, SmallVec<[ValueRegs<Writable<Reg>>; 2]>>,
229
230    /// Try-call block arg exceptional-return payloads, indexed by
231    /// instruction. Payloads are carried in registers per the ABI and
232    /// can only be one register each.
233    try_call_payloads: FxHashMap<Inst, SmallVec<[Writable<Reg>; 2]>>,
234
235    /// The register to use for GetPinnedReg, if any, on this architecture.
236    pinned_reg: Option<Reg>,
237
238    /// Compilation flags.
239    flags: Flags,
240}
241
242/// How is a value used in the IR?
243///
244/// This can be seen as a coarsening of an integer count. We only need
245/// distinct states for zero, one, or many.
246///
247/// This analysis deserves further explanation. The basic idea is that
248/// we want to allow instruction lowering to know whether a value that
249/// an instruction references is *only* referenced by that one use, or
250/// by others as well. This is necessary to know when we might want to
251/// move a side-effect: we cannot, for example, duplicate a load, so
252/// we cannot let instruction lowering match a load as part of a
253/// subpattern and potentially incorporate it.
254///
255/// Note that a lot of subtlety comes into play once we have
256/// *indirect* uses. The classical example of this in our development
257/// history was the x86 compare instruction, which is incorporated
258/// into flags users (e.g. `selectif`, `trueif`, branches) and can
259/// subsequently incorporate loads, or at least we would like it
260/// to. However, danger awaits: the compare might be the only user of
261/// a load, so we might think we can just move the load (and nothing
262/// is duplicated -- success!), except that the compare itself is
263/// codegen'd in multiple places, where it is incorporated as a
264/// subpattern itself.
265///
266/// So we really want a notion of "unique all the way along the
267/// matching path". Rust's `&T` and `&mut T` offer a partial analogy
268/// to the semantics that we want here: we want to know when we've
269/// matched a unique use of an instruction, and that instruction's
270/// unique use of another instruction, etc, just as `&mut T` can only
271/// be obtained by going through a chain of `&mut T`. If one has a
272/// `&T` to a struct containing `&mut T` (one of several uses of an
273/// instruction that itself has a unique use of an instruction), one
274/// can only get a `&T` (one can only get a "I am one of several users
275/// of this instruction" result).
276///
277/// We could track these paths, either dynamically as one "looks up the operand
278/// tree" or precomputed. But the former requires state and means that the
279/// `Lower` API carries that state implicitly, which we'd like to avoid if we
280/// can. And the latter implies O(n^2) storage: it is an all-pairs property (is
281/// inst `i` unique from the point of view of `j`).
282///
283/// To make matters even a little more complex still, a value that is
284/// not uniquely used when initially viewing the IR can *become*
285/// uniquely used, at least as a root allowing further unique uses of
286/// e.g. loads to merge, if no other instruction actually merges
287/// it. To be more concrete, if we have `v1 := load; v2 := op v1; v3
288/// := op v2; v4 := op v2` then `v2` is non-uniquely used, so from the
289/// point of view of lowering `v4` or `v3`, we cannot merge the load
290/// at `v1`. But if we decide just to use the assigned register for
291/// `v2` at both `v3` and `v4`, then we only actually codegen `v2`
292/// once, so it *is* a unique root at that point and we *can* merge
293/// the load.
294///
295/// Note also that the color scheme is not sufficient to give us this
296/// information, for various reasons: reasoning about side-effects
297/// does not tell us about potential duplication of uses through pure
298/// ops.
299///
300/// To keep things simple and avoid error-prone lowering APIs that
301/// would extract more information about whether instruction merging
302/// happens or not (we don't have that info now, and it would be
303/// difficult to refactor to get it and make that refactor 100%
304/// correct), we give up on the above "can become unique if not
305/// actually merged" point. Instead, we compute a
306/// transitive-uniqueness. That is what this enum represents.
307///
308/// There is one final caveat as well to the result of this analysis.  Notably,
309/// we define some instructions to be "root" instructions, which means that we
310/// assume they will always be codegen'd at the root of a matching tree, and not
311/// matched. (This comes with the caveat that we actually enforce this property
312/// by making them "opaque" to subtree matching in
313/// `get_value_as_source_or_const`). Because they will always be codegen'd once,
314/// they in some sense "reset" multiplicity: these root instructions can be used
315/// many times, but because their result(s) are only computed once, they only
316/// use their inputs once.
317///
318/// We currently define all multi-result instructions to be "root" instructions,
319/// because it is too complex to reason about matching through them, and they
320/// cause too-coarse-grained approximation of multiplicity otherwise: the
321/// analysis would have to assume (as it used to!) that they are always
322/// multiply-used, simply because they have multiple outputs even if those
323/// outputs are used only once.
324///
325/// In the future we could define other instructions to be "root" instructions
326/// as well, if we make the corresponding change to get_value_as_source_or_const
327/// as well.
328///
329/// To define `ValueUseState` more plainly: a value is `Unused` if no references
330/// exist to it; `Once` if only one other op refers to it, *and* that other op
331/// is `Unused` or `Once`; and `Multiple` otherwise. In other words, `Multiple`
332/// is contagious (except through root instructions): even if an op's result
333/// value is directly used only once in the CLIF, that value is `Multiple` if
334/// the op that uses it is itself used multiple times (hence could be codegen'd
335/// multiple times). In brief, this analysis tells us whether, if every op
336/// merged all of its operand tree, a given op could be codegen'd in more than
337/// one place.
338///
339/// To compute this, we first consider direct uses. At this point
340/// `Unused` answers are correct, `Multiple` answers are correct, but
341/// some `Once`s may change to `Multiple`s. Then we propagate
342/// `Multiple` transitively using a workqueue/fixpoint algorithm.
343#[derive(Clone, Copy, Debug, PartialEq, Eq)]
344enum ValueUseState {
345    /// Not used at all.
346    Unused,
347    /// Used exactly once.
348    Once,
349    /// Used multiple times.
350    Multiple,
351}
352
353impl ValueUseState {
354    /// Add one use.
355    fn inc(&mut self) {
356        let new = match self {
357            Self::Unused => Self::Once,
358            Self::Once | Self::Multiple => Self::Multiple,
359        };
360        *self = new;
361    }
362}
363
364/// Notion of "relocation distance". This gives an estimate of how far away a symbol will be from a
365/// reference.
366#[derive(Clone, Copy, Debug, PartialEq, Eq)]
367pub enum RelocDistance {
368    /// Target of relocation is "nearby". The threshold for this is fuzzy but should be interpreted
369    /// as approximately "within the compiled output of one module"; e.g., within AArch64's +/-
370    /// 128MB offset. If unsure, use `Far` instead.
371    Near,
372    /// Target of relocation could be anywhere in the address space.
373    Far,
374}
375
376impl<'func, I: VCodeInst> Lower<'func, I> {
377    /// Prepare a new lowering context for the given IR function.
378    pub fn new(
379        f: &'func Function,
380        abi: Callee<I::ABIMachineSpec>,
381        emit_info: I::Info,
382        block_order: BlockLoweringOrder,
383        sigs: SigSet,
384        flags: Flags,
385    ) -> CodegenResult<Self> {
386        let constants = VCodeConstants::with_capacity(f.dfg.constants.len());
387        let vcode = VCodeBuilder::new(
388            sigs,
389            abi,
390            emit_info,
391            block_order,
392            constants,
393            VCodeBuildDirection::Backward,
394            flags.log2_min_function_alignment(),
395        );
396
397        // We usually need two VRegs per instruction result, plus extras for
398        // various temporaries, but two per Value is a good starting point.
399        let mut vregs = VRegAllocator::with_capacity(f.dfg.num_values() * 2);
400
401        let mut value_regs = SecondaryMap::with_default(ValueRegs::invalid());
402        let mut try_call_rets = FxHashMap::default();
403        let mut try_call_payloads = FxHashMap::default();
404
405        // Assign a vreg to each block param, each inst result, and
406        // each edge-defined block-call arg.
407        for bb in f.layout.blocks() {
408            for &param in f.dfg.block_params(bb) {
409                let ty = f.dfg.value_type(param);
410                if value_regs[param].is_invalid() {
411                    let regs = vregs.alloc_with_maybe_fact(ty, f.dfg.facts[param].clone())?;
412                    value_regs[param] = regs;
413                    trace!("bb {} param {}: regs {:?}", bb, param, regs);
414                }
415            }
416            for inst in f.layout.block_insts(bb) {
417                for &result in f.dfg.inst_results(inst) {
418                    let ty = f.dfg.value_type(result);
419                    if value_regs[result].is_invalid() && !ty.is_invalid() {
420                        let regs = vregs.alloc_with_maybe_fact(ty, f.dfg.facts[result].clone())?;
421                        value_regs[result] = regs;
422                        trace!(
423                            "bb {} inst {} ({:?}): result {} regs {:?}",
424                            bb, inst, f.dfg.insts[inst], result, regs,
425                        );
426                    }
427                }
428
429                if let Some(et) = f.dfg.insts[inst].exception_table() {
430                    let exdata = &f.dfg.exception_tables[et];
431                    let sig = &f.dfg.signatures[exdata.signature()];
432
433                    let mut rets = smallvec![];
434                    for ty in sig.returns.iter().map(|ret| ret.value_type) {
435                        rets.push(vregs.alloc(ty)?.map(|r| Writable::from_reg(r)));
436                    }
437                    try_call_rets.insert(inst, rets);
438
439                    let mut payloads = smallvec![];
440                    for &ty in sig
441                        .call_conv
442                        .exception_payload_types(I::ABIMachineSpec::word_type())
443                    {
444                        payloads.push(Writable::from_reg(vregs.alloc(ty)?.only_reg().unwrap()));
445                    }
446                    try_call_payloads.insert(inst, payloads);
447                }
448            }
449        }
450
451        // Find the sret register, if it's used.
452        let mut sret_param = None;
453        for ret in vcode.abi().signature().returns.iter() {
454            if ret.purpose == ArgumentPurpose::StructReturn {
455                let entry_bb = f.stencil.layout.entry_block().unwrap();
456                for (&param, sig_param) in f
457                    .dfg
458                    .block_params(entry_bb)
459                    .iter()
460                    .zip(vcode.abi().signature().params.iter())
461                {
462                    if sig_param.purpose == ArgumentPurpose::StructReturn {
463                        assert!(sret_param.is_none());
464                        sret_param = Some(param);
465                    }
466                }
467
468                assert!(sret_param.is_some());
469            }
470        }
471
472        let sret_reg = sret_param.map(|param| {
473            let regs = value_regs[param];
474            assert!(regs.len() == 1);
475            regs
476        });
477
478        // Compute instruction colors, find constant instructions, and find instructions with
479        // side-effects, in one combined pass.
480        let mut cur_color = 0;
481        let mut block_end_colors = SecondaryMap::with_default(InstColor::new(0));
482        let mut side_effect_inst_entry_colors = FxHashMap::default();
483        let mut inst_constants = FxHashMap::default();
484        for bb in f.layout.blocks() {
485            cur_color += 1;
486            for inst in f.layout.block_insts(bb) {
487                let side_effect = has_lowering_side_effect(f, inst);
488
489                trace!("bb {} inst {} has color {}", bb, inst, cur_color);
490                if side_effect {
491                    side_effect_inst_entry_colors.insert(inst, InstColor::new(cur_color));
492                    trace!(" -> side-effecting; incrementing color for next inst");
493                    cur_color += 1;
494                }
495
496                // Determine if this is a constant; if so, add to the table.
497                if let Some(c) = is_constant_64bit(f, inst) {
498                    trace!(" -> constant: {}", c);
499                    inst_constants.insert(inst, c);
500                }
501            }
502
503            block_end_colors[bb] = InstColor::new(cur_color);
504        }
505
506        let value_ir_uses = compute_use_states(f, sret_param);
507
508        Ok(Lower {
509            f,
510            vcode,
511            vregs,
512            value_regs,
513            sret_reg,
514            block_end_colors,
515            side_effect_inst_entry_colors,
516            inst_constants,
517            value_ir_uses,
518            value_lowered_uses: SecondaryMap::default(),
519            inst_sunk: FxHashSet::default(),
520            cur_scan_entry_color: None,
521            cur_inst: None,
522            ir_insts: vec![],
523            try_call_rets,
524            try_call_payloads,
525            pinned_reg: None,
526            flags,
527        })
528    }
529
530    pub fn sigs(&self) -> &SigSet {
531        self.vcode.sigs()
532    }
533
534    pub fn sigs_mut(&mut self) -> &mut SigSet {
535        self.vcode.sigs_mut()
536    }
537
538    pub fn vregs_mut(&mut self) -> &mut VRegAllocator<I> {
539        &mut self.vregs
540    }
541
542    fn gen_arg_setup(&mut self) {
543        if let Some(entry_bb) = self.f.layout.entry_block() {
544            trace!(
545                "gen_arg_setup: entry BB {} args are:\n{:?}",
546                entry_bb,
547                self.f.dfg.block_params(entry_bb)
548            );
549
550            for (i, param) in self.f.dfg.block_params(entry_bb).iter().enumerate() {
551                if self.value_ir_uses[*param] == ValueUseState::Unused {
552                    continue;
553                }
554                let regs = writable_value_regs(self.value_regs[*param]);
555                for insn in self
556                    .vcode
557                    .vcode
558                    .abi
559                    .gen_copy_arg_to_regs(&self.vcode.vcode.sigs, i, regs, &mut self.vregs)
560                    .into_iter()
561                {
562                    self.emit(insn);
563                }
564            }
565            if let Some(insn) = self
566                .vcode
567                .vcode
568                .abi
569                .gen_retval_area_setup(&self.vcode.vcode.sigs, &mut self.vregs)
570            {
571                self.emit(insn);
572            }
573
574            // The `args` instruction below must come first. Finish
575            // the current "IR inst" (with a default source location,
576            // as for other special instructions inserted during
577            // lowering) and continue the scan backward.
578            self.finish_ir_inst(Default::default());
579
580            if let Some(insn) = self.vcode.vcode.abi.take_args() {
581                self.emit(insn);
582            }
583        }
584    }
585
586    /// Generate the return instruction.
587    pub fn gen_return(&mut self, rets: &[ValueRegs<Reg>]) {
588        let mut out_rets = vec![];
589
590        let mut rets = rets.into_iter();
591        for (i, ret) in self
592            .abi()
593            .signature()
594            .returns
595            .clone()
596            .into_iter()
597            .enumerate()
598        {
599            let regs = if ret.purpose == ArgumentPurpose::StructReturn {
600                self.sret_reg.unwrap()
601            } else {
602                *rets.next().unwrap()
603            };
604
605            let (regs, insns) = self.vcode.abi().gen_copy_regs_to_retval(
606                self.vcode.sigs(),
607                i,
608                regs,
609                &mut self.vregs,
610            );
611            out_rets.extend(regs);
612            for insn in insns {
613                self.emit(insn);
614            }
615        }
616
617        // Hack: generate a virtual instruction that uses vmctx in
618        // order to keep it alive for the duration of the function,
619        // for the benefit of debuginfo.
620        if self.f.dfg.values_labels.is_some() {
621            if let Some(vmctx_val) = self.f.special_param(ArgumentPurpose::VMContext) {
622                if self.value_ir_uses[vmctx_val] != ValueUseState::Unused {
623                    let vmctx_reg = self.value_regs[vmctx_val].only_reg().unwrap();
624                    self.emit(I::gen_dummy_use(vmctx_reg));
625                }
626            }
627        }
628
629        let inst = self.abi().gen_rets(out_rets);
630        self.emit(inst);
631    }
632
633    /// Generate list of registers to hold the output of a call with
634    /// signature `sig`.
635    pub fn gen_call_output(&mut self, sig: &Signature) -> InstOutput {
636        let mut rets = smallvec![];
637        for ty in sig.returns.iter().map(|ret| ret.value_type) {
638            rets.push(self.vregs.alloc_with_deferred_error(ty));
639        }
640        rets
641    }
642
643    /// Likewise, but for a `SigRef` instead.
644    pub fn gen_call_output_from_sig_ref(&mut self, sig_ref: SigRef) -> InstOutput {
645        self.gen_call_output(&self.f.dfg.signatures[sig_ref])
646    }
647
648    /// Set up arguments values `args` for a call with signature `sig`.
649    pub fn gen_call_args(&mut self, sig: Sig, args: &[ValueRegs<Reg>]) -> CallArgList {
650        let (uses, insts) = self.vcode.abi().gen_call_args(
651            self.vcode.sigs(),
652            sig,
653            args,
654            /* is_tail_call */ false,
655            &self.flags,
656            &mut self.vregs,
657        );
658        for insn in insts {
659            self.emit(insn);
660        }
661        uses
662    }
663
664    /// Likewise, but for a `return_call`.
665    pub fn gen_return_call_args(&mut self, sig: Sig, args: &[ValueRegs<Reg>]) -> CallArgList {
666        let (uses, insts) = self.vcode.abi().gen_call_args(
667            self.vcode.sigs(),
668            sig,
669            args,
670            /* is_tail_call */ true,
671            &self.flags,
672            &mut self.vregs,
673        );
674        for insn in insts {
675            self.emit(insn);
676        }
677        uses
678    }
679
680    /// Set up return values `outputs` for a call with signature `sig`.
681    pub fn gen_call_rets(&mut self, sig: Sig, outputs: &[ValueRegs<Reg>]) -> CallRetList {
682        self.vcode
683            .abi()
684            .gen_call_rets(self.vcode.sigs(), sig, outputs, None, &mut self.vregs)
685    }
686
687    /// Likewise, but for a `try_call`.
688    pub fn gen_try_call_rets(&mut self, sig: Sig) -> CallRetList {
689        let ir_inst = self.cur_inst.unwrap();
690        let mut outputs: SmallVec<[ValueRegs<Reg>; 2]> = smallvec![];
691        for return_def in self.try_call_rets.get(&ir_inst).unwrap() {
692            outputs.push(return_def.map(|r| r.to_reg()));
693        }
694        let payloads = Some(&self.try_call_payloads.get(&ir_inst).unwrap()[..]);
695
696        self.vcode
697            .abi()
698            .gen_call_rets(self.vcode.sigs(), sig, &outputs, payloads, &mut self.vregs)
699    }
700
701    /// Populate a `CallInfo` for a call with signature `sig`.
702    pub fn gen_call_info<T>(
703        &mut self,
704        sig: Sig,
705        dest: T,
706        uses: CallArgList,
707        defs: CallRetList,
708        try_call_info: Option<TryCallInfo>,
709    ) -> CallInfo<T> {
710        self.vcode
711            .abi()
712            .gen_call_info(self.vcode.sigs(), sig, dest, uses, defs, try_call_info)
713    }
714
715    /// Has this instruction been sunk to a use-site (i.e., away from its
716    /// original location)?
717    fn is_inst_sunk(&self, inst: Inst) -> bool {
718        self.inst_sunk.contains(&inst)
719    }
720
721    // Is any result of this instruction needed?
722    fn is_any_inst_result_needed(&self, inst: Inst) -> bool {
723        self.f
724            .dfg
725            .inst_results(inst)
726            .iter()
727            .any(|&result| self.value_lowered_uses[result] > 0)
728    }
729
730    fn lower_clif_block<B: LowerBackend<MInst = I>>(
731        &mut self,
732        backend: &B,
733        block: Block,
734        ctrl_plane: &mut ControlPlane,
735    ) -> CodegenResult<()> {
736        self.cur_scan_entry_color = Some(self.block_end_colors[block]);
737        // Lowering loop:
738        // - For each non-branch instruction, in reverse order:
739        //   - If side-effecting (load, store, branch/call/return,
740        //     possible trap), or if used outside of this block, or if
741        //     demanded by another inst, then lower.
742        //
743        // That's it! Lowering of side-effecting ops will force all *needed*
744        // (live) non-side-effecting ops to be lowered at the right places, via
745        // the `use_input_reg()` callback on the `Lower` (that's us). That's
746        // because `use_input_reg()` sets the eager/demand bit for any insts
747        // whose result registers are used.
748        //
749        // We set the VCodeBuilder to "backward" mode, so we emit
750        // blocks in reverse order wrt the BlockIndex sequence, and
751        // emit instructions in reverse order within blocks.  Because
752        // the machine backend calls `ctx.emit()` in forward order, we
753        // collect per-IR-inst lowered instructions in `ir_insts`,
754        // then reverse these and append to the VCode at the end of
755        // each IR instruction.
756        for inst in self.f.layout.block_insts(block).rev() {
757            let data = &self.f.dfg.insts[inst];
758            let has_side_effect = has_lowering_side_effect(self.f, inst);
759            // If  inst has been sunk to another location, skip it.
760            if self.is_inst_sunk(inst) {
761                continue;
762            }
763            // Are any outputs used at least once?
764            let value_needed = self.is_any_inst_result_needed(inst);
765            trace!(
766                "lower_clif_block: block {} inst {} ({:?}) is_branch {} side_effect {} value_needed {}",
767                block,
768                inst,
769                data,
770                data.opcode().is_branch(),
771                has_side_effect,
772                value_needed,
773            );
774
775            // Update scan state to color prior to this inst (as we are scanning
776            // backward).
777            self.cur_inst = Some(inst);
778            if has_side_effect {
779                let entry_color = *self
780                    .side_effect_inst_entry_colors
781                    .get(&inst)
782                    .expect("every side-effecting inst should have a color-map entry");
783                self.cur_scan_entry_color = Some(entry_color);
784            }
785
786            // Skip lowering branches; these are handled separately
787            // (see `lower_clif_branches()` below).
788            if self.f.dfg.insts[inst].opcode().is_branch() {
789                continue;
790            }
791
792            // Value defined by "inst" becomes live after it in normal
793            // order, and therefore **before** in reversed order.
794            self.emit_value_label_live_range_start_for_inst(inst);
795
796            // Normal instruction: codegen if the instruction is side-effecting
797            // or any of its outputs is used.
798            if has_side_effect || value_needed {
799                trace!("lowering: inst {}: {}", inst, self.f.dfg.display_inst(inst));
800                let temp_regs = match backend.lower(self, inst) {
801                    Some(regs) => regs,
802                    None => {
803                        let ty = if self.num_outputs(inst) > 0 {
804                            Some(self.output_ty(inst, 0))
805                        } else {
806                            None
807                        };
808                        return Err(CodegenError::Unsupported(format!(
809                            "should be implemented in ISLE: inst = `{}`, type = `{:?}`",
810                            self.f.dfg.display_inst(inst),
811                            ty
812                        )));
813                    }
814                };
815
816                // The ISLE generated code emits its own registers to define the
817                // instruction's lowered values in. However, other instructions
818                // that use this SSA value will be lowered assuming that the value
819                // is generated into a pre-assigned, different, register.
820                //
821                // To connect the two, we set up "aliases" in the VCodeBuilder
822                // that apply when it is building the Operand table for the
823                // regalloc to use. These aliases effectively rewrite any use of
824                // the pre-assigned register to the register that was returned by
825                // the ISLE lowering logic.
826                let results = self.f.dfg.inst_results(inst);
827                debug_assert_eq!(temp_regs.len(), results.len());
828                for (regs, &result) in temp_regs.iter().zip(results) {
829                    let dsts = self.value_regs[result];
830                    let mut regs = regs.regs().iter();
831                    for &dst in dsts.regs().iter() {
832                        let temp = regs.next().copied().unwrap_or(Reg::invalid_sentinel());
833                        trace!("set vreg alias: {result:?} = {dst:?}, lowering = {temp:?}");
834                        self.vregs.set_vreg_alias(dst, temp);
835                    }
836                }
837            }
838
839            let start = self.vcode.vcode.num_insts();
840            let loc = self.srcloc(inst);
841            self.finish_ir_inst(loc);
842
843            // If the instruction had a user stack map, forward it from the CLIF
844            // to the vcode.
845            if let Some(entries) = self.f.dfg.user_stack_map_entries(inst) {
846                let end = self.vcode.vcode.num_insts();
847                debug_assert!(end > start);
848                debug_assert_eq!(
849                    (start..end)
850                        .filter(|i| self.vcode.vcode[InsnIndex::new(*i)].is_safepoint())
851                        .count(),
852                    1
853                );
854                for i in start..end {
855                    let iix = InsnIndex::new(i);
856                    if self.vcode.vcode[iix].is_safepoint() {
857                        trace!(
858                            "Adding user stack map from clif\n\n\
859                                 {inst:?} `{}`\n\n\
860                             to vcode\n\n\
861                                 {iix:?} `{}`",
862                            self.f.dfg.display_inst(inst),
863                            &self.vcode.vcode[iix].pretty_print_inst(&mut Default::default()),
864                        );
865                        self.vcode
866                            .add_user_stack_map(BackwardsInsnIndex::new(iix.index()), entries);
867                        break;
868                    }
869                }
870            }
871
872            // maybe insert random instruction
873            if ctrl_plane.get_decision() {
874                if ctrl_plane.get_decision() {
875                    let imm: u64 = ctrl_plane.get_arbitrary();
876                    let reg = self.alloc_tmp(crate::ir::types::I64).regs()[0];
877                    I::gen_imm_u64(imm, reg).map(|inst| self.emit(inst));
878                } else {
879                    let imm: f64 = ctrl_plane.get_arbitrary();
880                    let tmp = self.alloc_tmp(crate::ir::types::I64).regs()[0];
881                    let reg = self.alloc_tmp(crate::ir::types::F64).regs()[0];
882                    for inst in I::gen_imm_f64(imm, tmp, reg) {
883                        self.emit(inst);
884                    }
885                }
886            }
887        }
888
889        // Add the block params to this block.
890        self.add_block_params(block)?;
891
892        self.cur_scan_entry_color = None;
893        Ok(())
894    }
895
896    fn add_block_params(&mut self, block: Block) -> CodegenResult<()> {
897        for &param in self.f.dfg.block_params(block) {
898            for &reg in self.value_regs[param].regs() {
899                let vreg = reg.to_virtual_reg().unwrap();
900                self.vcode.add_block_param(vreg);
901            }
902        }
903        Ok(())
904    }
905
906    fn get_value_labels<'a>(&'a self, val: Value, depth: usize) -> Option<&'a [ValueLabelStart]> {
907        if let Some(ref values_labels) = self.f.dfg.values_labels {
908            debug_assert!(self.f.dfg.value_is_real(val));
909            trace!(
910                "get_value_labels: val {} -> {:?}",
911                val,
912                values_labels.get(&val)
913            );
914            match values_labels.get(&val) {
915                Some(&ValueLabelAssignments::Starts(ref list)) => Some(&list[..]),
916                Some(&ValueLabelAssignments::Alias { value, .. }) if depth < 10 => {
917                    self.get_value_labels(value, depth + 1)
918                }
919                _ => None,
920            }
921        } else {
922            None
923        }
924    }
925
926    fn emit_value_label_marks_for_value(&mut self, val: Value) {
927        let regs = self.value_regs[val];
928        if regs.len() > 1 {
929            return;
930        }
931        let reg = regs.only_reg().unwrap();
932
933        if let Some(label_starts) = self.get_value_labels(val, 0) {
934            let labels = label_starts
935                .iter()
936                .map(|&ValueLabelStart { label, .. }| label)
937                .collect::<FxHashSet<_>>();
938            for label in labels {
939                trace!(
940                    "value labeling: defines val {:?} -> reg {:?} -> label {:?}",
941                    val, reg, label,
942                );
943                self.vcode.add_value_label(reg, label);
944            }
945        }
946    }
947
948    fn emit_value_label_live_range_start_for_inst(&mut self, inst: Inst) {
949        if self.f.dfg.values_labels.is_none() {
950            return;
951        }
952
953        trace!(
954            "value labeling: srcloc {}: inst {}",
955            self.srcloc(inst),
956            inst
957        );
958        for &val in self.f.dfg.inst_results(inst) {
959            self.emit_value_label_marks_for_value(val);
960        }
961    }
962
963    fn emit_value_label_live_range_start_for_block_args(&mut self, block: Block) {
964        if self.f.dfg.values_labels.is_none() {
965            return;
966        }
967
968        trace!("value labeling: block {}", block);
969        for &arg in self.f.dfg.block_params(block) {
970            self.emit_value_label_marks_for_value(arg);
971        }
972        self.finish_ir_inst(Default::default());
973    }
974
975    fn finish_ir_inst(&mut self, loc: RelSourceLoc) {
976        // The VCodeBuilder builds in reverse order (and reverses at
977        // the end), but `ir_insts` is in forward order, so reverse
978        // it.
979        for inst in self.ir_insts.drain(..).rev() {
980            self.vcode.push(inst, loc);
981        }
982    }
983
984    fn finish_bb(&mut self) {
985        self.vcode.end_bb();
986    }
987
988    fn lower_clif_branch<B: LowerBackend<MInst = I>>(
989        &mut self,
990        backend: &B,
991        // Lowered block index:
992        bindex: BlockIndex,
993        // Original CLIF block:
994        block: Block,
995        branch: Inst,
996        targets: &[MachLabel],
997    ) -> CodegenResult<()> {
998        trace!(
999            "lower_clif_branch: block {} branch {:?} targets {:?}",
1000            block, branch, targets,
1001        );
1002        // When considering code-motion opportunities, consider the current
1003        // program point to be this branch.
1004        self.cur_inst = Some(branch);
1005
1006        // Lower the branch in ISLE.
1007        backend
1008            .lower_branch(self, branch, targets)
1009            .unwrap_or_else(|| {
1010                panic!(
1011                    "should be implemented in ISLE: branch = `{}`",
1012                    self.f.dfg.display_inst(branch),
1013                )
1014            });
1015        let loc = self.srcloc(branch);
1016        self.finish_ir_inst(loc);
1017        // Add block param outputs for current block.
1018        self.lower_branch_blockparam_args(bindex);
1019        Ok(())
1020    }
1021
1022    fn lower_branch_blockparam_args(&mut self, block: BlockIndex) {
1023        let mut branch_arg_vregs: SmallVec<[Reg; 16]> = smallvec![];
1024
1025        // TODO: why not make `block_order` public?
1026        for succ_idx in 0..self.vcode.block_order().succ_indices(block).1.len() {
1027            branch_arg_vregs.clear();
1028            let (succ, args) = self.collect_block_call(block, succ_idx, &mut branch_arg_vregs);
1029            self.vcode.add_succ(succ, args);
1030        }
1031    }
1032
1033    fn collect_branch_and_targets(
1034        &self,
1035        bindex: BlockIndex,
1036        _bb: Block,
1037        targets: &mut SmallVec<[MachLabel; 2]>,
1038    ) -> Option<Inst> {
1039        targets.clear();
1040        let (opt_inst, succs) = self.vcode.block_order().succ_indices(bindex);
1041        targets.extend(succs.iter().map(|succ| MachLabel::from_block(*succ)));
1042        opt_inst
1043    }
1044
1045    /// Collect the outgoing block-call arguments for a given edge out
1046    /// of a lowered block.
1047    fn collect_block_call<'a>(
1048        &mut self,
1049        block: BlockIndex,
1050        succ_idx: usize,
1051        buffer: &'a mut SmallVec<[Reg; 16]>,
1052    ) -> (BlockIndex, &'a [Reg]) {
1053        let block_order = self.vcode.block_order();
1054        let (_, succs) = block_order.succ_indices(block);
1055        let succ = succs[succ_idx];
1056        let this_lb = block_order.lowered_order()[block.index()];
1057        let succ_lb = block_order.lowered_order()[succ.index()];
1058
1059        let (branch_inst, succ_idx) = match (this_lb, succ_lb) {
1060            (_, LoweredBlock::CriticalEdge { .. }) => {
1061                // The successor is a split-critical-edge block. In this
1062                // case, this block-call has no arguments, and the
1063                // arguments go on the critical edge block's unconditional
1064                // branch instead.
1065                return (succ, &[]);
1066            }
1067            (LoweredBlock::CriticalEdge { pred, succ_idx, .. }, _) => {
1068                // This is a split-critical-edge block. In this case, our
1069                // block-call has the arguments that in the CLIF appear in
1070                // the predecessor's branch to this edge.
1071                let branch_inst = self.f.layout.last_inst(pred).unwrap();
1072                (branch_inst, succ_idx as usize)
1073            }
1074
1075            (this, _) => {
1076                let block = this.orig_block().unwrap();
1077                // Ordinary block, with an ordinary block as
1078                // successor. Take the arguments from the branch.
1079                let branch_inst = self.f.layout.last_inst(block).unwrap();
1080                (branch_inst, succ_idx)
1081            }
1082        };
1083
1084        let block_call = self.f.dfg.insts[branch_inst]
1085            .branch_destination(&self.f.dfg.jump_tables, &self.f.dfg.exception_tables)[succ_idx];
1086        for arg in block_call.args(&self.f.dfg.value_lists) {
1087            match arg {
1088                BlockArg::Value(arg) => {
1089                    debug_assert!(self.f.dfg.value_is_real(arg));
1090                    let regs = self.put_value_in_regs(arg);
1091                    buffer.extend_from_slice(regs.regs());
1092                }
1093                BlockArg::TryCallRet(i) => {
1094                    let regs = self.try_call_rets.get(&branch_inst).unwrap()[i as usize]
1095                        .map(|r| r.to_reg());
1096                    buffer.extend_from_slice(regs.regs());
1097                }
1098                BlockArg::TryCallExn(i) => {
1099                    let reg =
1100                        self.try_call_payloads.get(&branch_inst).unwrap()[i as usize].to_reg();
1101                    buffer.push(reg);
1102                }
1103            }
1104        }
1105        (succ, &buffer[..])
1106    }
1107
1108    /// Lower the function.
1109    pub fn lower<B: LowerBackend<MInst = I>>(
1110        mut self,
1111        backend: &B,
1112        ctrl_plane: &mut ControlPlane,
1113    ) -> CodegenResult<VCode<I>> {
1114        trace!("about to lower function: {:?}", self.f);
1115
1116        self.vcode.init_retval_area(&mut self.vregs)?;
1117
1118        // Get the pinned reg here (we only parameterize this function on `B`,
1119        // not the whole `Lower` impl).
1120        self.pinned_reg = backend.maybe_pinned_reg();
1121
1122        self.vcode.set_entry(BlockIndex::new(0));
1123
1124        // Reused vectors for branch lowering.
1125        let mut targets: SmallVec<[MachLabel; 2]> = SmallVec::new();
1126
1127        // get a copy of the lowered order; we hold this separately because we
1128        // need a mut ref to the vcode to mutate it below.
1129        let lowered_order: SmallVec<[LoweredBlock; 64]> = self
1130            .vcode
1131            .block_order()
1132            .lowered_order()
1133            .iter()
1134            .cloned()
1135            .collect();
1136
1137        // Main lowering loop over lowered blocks.
1138        for (bindex, lb) in lowered_order.iter().enumerate().rev() {
1139            let bindex = BlockIndex::new(bindex);
1140
1141            // Lower the block body in reverse order (see comment in
1142            // `lower_clif_block()` for rationale).
1143
1144            // End branch.
1145            if let Some(bb) = lb.orig_block() {
1146                if let Some(branch) = self.collect_branch_and_targets(bindex, bb, &mut targets) {
1147                    self.lower_clif_branch(backend, bindex, bb, branch, &targets)?;
1148                    self.finish_ir_inst(self.srcloc(branch));
1149                }
1150            } else {
1151                // If no orig block, this must be a pure edge block;
1152                // get the successor and emit a jump. This block has
1153                // no block params; and this jump's block-call args
1154                // will be filled in by
1155                // `lower_branch_blockparam_args`.
1156                let succ = self.vcode.block_order().succ_indices(bindex).1[0];
1157                self.emit(I::gen_jump(MachLabel::from_block(succ)));
1158                self.finish_ir_inst(Default::default());
1159                self.lower_branch_blockparam_args(bindex);
1160            }
1161
1162            // Original block body.
1163            if let Some(bb) = lb.orig_block() {
1164                self.lower_clif_block(backend, bb, ctrl_plane)?;
1165                self.emit_value_label_live_range_start_for_block_args(bb);
1166            }
1167
1168            if bindex.index() == 0 {
1169                // Set up the function with arg vreg inits.
1170                self.gen_arg_setup();
1171                self.finish_ir_inst(Default::default());
1172            }
1173
1174            self.finish_bb();
1175
1176            // Check for any deferred vreg-temp allocation errors, and
1177            // bubble one up at this time if it exists.
1178            if let Some(e) = self.vregs.take_deferred_error() {
1179                return Err(e);
1180            }
1181        }
1182
1183        // Now that we've emitted all instructions into the
1184        // VCodeBuilder, let's build the VCode.
1185        trace!(
1186            "built vcode:\n{:?}Backwards {:?}",
1187            &self.vregs, &self.vcode.vcode
1188        );
1189        let vcode = self.vcode.build(self.vregs);
1190
1191        Ok(vcode)
1192    }
1193
1194    pub fn value_is_unused(&self, val: Value) -> bool {
1195        match self.value_ir_uses[val] {
1196            ValueUseState::Unused => true,
1197            _ => false,
1198        }
1199    }
1200}
1201
1202/// Pre-analysis: compute `value_ir_uses`. See comment on
1203/// `ValueUseState` for a description of what this analysis
1204/// computes.
1205fn compute_use_states(
1206    f: &Function,
1207    sret_param: Option<Value>,
1208) -> SecondaryMap<Value, ValueUseState> {
1209    // We perform the analysis without recursion, so we don't
1210    // overflow the stack on long chains of ops in the input.
1211    //
1212    // This is sort of a hybrid of a "shallow use-count" pass and
1213    // a DFS. We iterate over all instructions and mark their args
1214    // as used. However when we increment a use-count to
1215    // "Multiple" we push its args onto the stack and do a DFS,
1216    // immediately marking the whole dependency tree as
1217    // Multiple. Doing both (shallow use-counting over all insts,
1218    // and deep Multiple propagation) lets us trim both
1219    // traversals, stopping recursion when a node is already at
1220    // the appropriate state.
1221    //
1222    // In particular, note that the *coarsening* into {Unused,
1223    // Once, Multiple} is part of what makes this pass more
1224    // efficient than a full indirect-use-counting pass.
1225
1226    let mut value_ir_uses = SecondaryMap::with_default(ValueUseState::Unused);
1227
1228    if let Some(sret_param) = sret_param {
1229        // There's an implicit use of the struct-return parameter in each
1230        // copy of the function epilogue, which we count here.
1231        value_ir_uses[sret_param] = ValueUseState::Multiple;
1232    }
1233
1234    // Stack of iterators over Values as we do DFS to mark
1235    // Multiple-state subtrees. The iterator type is whatever is
1236    // returned by `uses` below.
1237    let mut stack: SmallVec<[_; 16]> = smallvec![];
1238
1239    // Find the args for the inst corresponding to the given value.
1240    //
1241    // Note that "root" instructions are skipped here. This means that multiple
1242    // uses of any result of a multi-result instruction are not considered
1243    // multiple uses of the operands of a multi-result instruction. This
1244    // requires tight coupling with `get_value_as_source_or_const` above which
1245    // is the consumer of the map that this function is producing.
1246    let uses = |value| {
1247        trace!(" -> pushing args for {} onto stack", value);
1248        if let ValueDef::Result(src_inst, _) = f.dfg.value_def(value) {
1249            if is_value_use_root(f, src_inst) {
1250                None
1251            } else {
1252                Some(f.dfg.inst_values(src_inst))
1253            }
1254        } else {
1255            None
1256        }
1257    };
1258
1259    // Do a DFS through `value_ir_uses` to mark a subtree as
1260    // Multiple.
1261    for inst in f
1262        .layout
1263        .blocks()
1264        .flat_map(|block| f.layout.block_insts(block))
1265    {
1266        // Iterate over all values used by all instructions, noting an
1267        // additional use on each operand.
1268        for arg in f.dfg.inst_values(inst) {
1269            debug_assert!(f.dfg.value_is_real(arg));
1270            let old = value_ir_uses[arg];
1271            value_ir_uses[arg].inc();
1272            let new = value_ir_uses[arg];
1273            trace!("arg {} used, old state {:?}, new {:?}", arg, old, new);
1274
1275            // On transition to Multiple, do DFS.
1276            if old == ValueUseState::Multiple || new != ValueUseState::Multiple {
1277                continue;
1278            }
1279            if let Some(iter) = uses(arg) {
1280                stack.push(iter);
1281            }
1282            while let Some(iter) = stack.last_mut() {
1283                if let Some(value) = iter.next() {
1284                    debug_assert!(f.dfg.value_is_real(value));
1285                    trace!(" -> DFS reaches {}", value);
1286                    if value_ir_uses[value] == ValueUseState::Multiple {
1287                        // Truncate DFS here: no need to go further,
1288                        // as whole subtree must already be Multiple.
1289                        // With debug asserts, check one level of
1290                        // that invariant at least.
1291                        debug_assert!(uses(value).into_iter().flatten().all(|arg| {
1292                            debug_assert!(f.dfg.value_is_real(arg));
1293                            value_ir_uses[arg] == ValueUseState::Multiple
1294                        }));
1295                        continue;
1296                    }
1297                    value_ir_uses[value] = ValueUseState::Multiple;
1298                    trace!(" -> became Multiple");
1299                    if let Some(iter) = uses(value) {
1300                        stack.push(iter);
1301                    }
1302                } else {
1303                    // Empty iterator, discard.
1304                    stack.pop();
1305                }
1306            }
1307        }
1308    }
1309
1310    value_ir_uses
1311}
1312
1313/// Definition of a "root" instruction for the calculation of `ValueUseState`.
1314///
1315/// This function calculates whether `inst` is considered a "root" for value-use
1316/// information. This concept is used to forcibly prevent looking-through the
1317/// instruction during `get_value_as_source_or_const` as it additionally
1318/// prevents propagating `Multiple`-used results of the `inst` here to the
1319/// operands of the instruction.
1320///
1321/// Currently this is defined as multi-result instructions. That means that
1322/// lowerings are never allowed to look through a multi-result instruction to
1323/// generate patterns. Note that this isn't possible in ISLE today anyway so
1324/// this isn't currently much of a loss.
1325///
1326/// The main purpose of this function is to prevent the operands of a
1327/// multi-result instruction from being forcibly considered `Multiple`-used
1328/// regardless of circumstances.
1329fn is_value_use_root(f: &Function, inst: Inst) -> bool {
1330    f.dfg.inst_results(inst).len() > 1
1331}
1332
1333/// Function-level queries.
1334impl<'func, I: VCodeInst> Lower<'func, I> {
1335    pub fn dfg(&self) -> &DataFlowGraph {
1336        &self.f.dfg
1337    }
1338
1339    /// Get the `Callee`.
1340    pub fn abi(&self) -> &Callee<I::ABIMachineSpec> {
1341        self.vcode.abi()
1342    }
1343
1344    /// Get the `Callee`.
1345    pub fn abi_mut(&mut self) -> &mut Callee<I::ABIMachineSpec> {
1346        self.vcode.abi_mut()
1347    }
1348}
1349
1350/// Instruction input/output queries.
1351impl<'func, I: VCodeInst> Lower<'func, I> {
1352    /// Get the instdata for a given IR instruction.
1353    pub fn data(&self, ir_inst: Inst) -> &InstructionData {
1354        &self.f.dfg.insts[ir_inst]
1355    }
1356
1357    /// Likewise, but starting with a GlobalValue identifier.
1358    pub fn symbol_value_data<'b>(
1359        &'b self,
1360        global_value: GlobalValue,
1361    ) -> Option<(&'b ExternalName, RelocDistance, i64)> {
1362        let gvdata = &self.f.global_values[global_value];
1363        match gvdata {
1364            &GlobalValueData::Symbol {
1365                ref name,
1366                ref offset,
1367                colocated,
1368                ..
1369            } => {
1370                let offset = offset.bits();
1371                let dist = if colocated {
1372                    RelocDistance::Near
1373                } else {
1374                    RelocDistance::Far
1375                };
1376                Some((name, dist, offset))
1377            }
1378            _ => None,
1379        }
1380    }
1381
1382    /// Returns the memory flags of a given memory access.
1383    pub fn memflags(&self, ir_inst: Inst) -> Option<MemFlags> {
1384        match &self.f.dfg.insts[ir_inst] {
1385            &InstructionData::AtomicCas { flags, .. } => Some(flags),
1386            &InstructionData::AtomicRmw { flags, .. } => Some(flags),
1387            &InstructionData::Load { flags, .. }
1388            | &InstructionData::LoadNoOffset { flags, .. }
1389            | &InstructionData::Store { flags, .. } => Some(flags),
1390            &InstructionData::StoreNoOffset { flags, .. } => Some(flags),
1391            _ => None,
1392        }
1393    }
1394
1395    /// Get the source location for a given instruction.
1396    pub fn srcloc(&self, ir_inst: Inst) -> RelSourceLoc {
1397        self.f.rel_srclocs()[ir_inst]
1398    }
1399
1400    /// Get the number of inputs to the given IR instruction. This is a count only of the Value
1401    /// arguments to the instruction: block arguments will not be included in this count.
1402    pub fn num_inputs(&self, ir_inst: Inst) -> usize {
1403        self.f.dfg.inst_args(ir_inst).len()
1404    }
1405
1406    /// Get the number of outputs to the given IR instruction.
1407    pub fn num_outputs(&self, ir_inst: Inst) -> usize {
1408        self.f.dfg.inst_results(ir_inst).len()
1409    }
1410
1411    /// Get the type for an instruction's input.
1412    pub fn input_ty(&self, ir_inst: Inst, idx: usize) -> Type {
1413        self.value_ty(self.input_as_value(ir_inst, idx))
1414    }
1415
1416    /// Get the type for a value.
1417    pub fn value_ty(&self, val: Value) -> Type {
1418        self.f.dfg.value_type(val)
1419    }
1420
1421    /// Get the type for an instruction's output.
1422    pub fn output_ty(&self, ir_inst: Inst, idx: usize) -> Type {
1423        self.f.dfg.value_type(self.f.dfg.inst_results(ir_inst)[idx])
1424    }
1425
1426    /// Get the value of a constant instruction (`iconst`, etc.) as a 64-bit
1427    /// value, if possible.
1428    pub fn get_constant(&self, ir_inst: Inst) -> Option<u64> {
1429        self.inst_constants.get(&ir_inst).map(|&c| {
1430            // The upper bits must be zero, enforced during legalization and by
1431            // the CLIF verifier.
1432            debug_assert_eq!(c, {
1433                let input_size = self.output_ty(ir_inst, 0).bits() as u64;
1434                let shift = 64 - input_size;
1435                (c << shift) >> shift
1436            });
1437            c
1438        })
1439    }
1440
1441    /// Get the input as one of two options other than a direct register:
1442    ///
1443    /// - An instruction, given that it is effect-free or able to sink its
1444    ///   effect to the current instruction being lowered, and given it has only
1445    ///   one output, and if effect-ful, given that this is the only use;
1446    /// - A constant, if the value is a constant.
1447    ///
1448    /// The instruction input may be available in either of these forms.  It may
1449    /// be available in neither form, if the conditions are not met; if so, use
1450    /// `put_input_in_regs()` instead to get it in a register.
1451    ///
1452    /// If the backend merges the effect of a side-effecting instruction, it
1453    /// must call `sink_inst()`. When this is called, it indicates that the
1454    /// effect has been sunk to the current scan location. The sunk
1455    /// instruction's result(s) must have *no* uses remaining, because it will
1456    /// not be codegen'd (it has been integrated into the current instruction).
1457    pub fn input_as_value(&self, ir_inst: Inst, idx: usize) -> Value {
1458        let val = self.f.dfg.inst_args(ir_inst)[idx];
1459        debug_assert!(self.f.dfg.value_is_real(val));
1460        val
1461    }
1462
1463    /// Resolves a particular input of an instruction to the `Value` that it is
1464    /// represented with.
1465    ///
1466    /// For more information see [`Lower::get_value_as_source_or_const`].
1467    pub fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput {
1468        let val = self.input_as_value(ir_inst, idx);
1469        self.get_value_as_source_or_const(val)
1470    }
1471
1472    /// Resolves a `Value` definition to the source instruction it came from
1473    /// plus whether it's a unique-use of that instruction.
1474    ///
1475    /// This function is the workhorse of pattern-matching in ISLE which enables
1476    /// combining multiple instructions together. This is used implicitly in
1477    /// patterns such as `(iadd x (iconst y))` where this function is used to
1478    /// extract the `(iconst y)` operand.
1479    ///
1480    /// At its core this function is a wrapper around
1481    /// [`DataFlowGraph::value_def`]. This function applies a filter on top of
1482    /// that, however, to determine when it is actually safe to "look through"
1483    /// the `val` definition here and view the underlying instruction. This
1484    /// protects against duplicating side effects, such as loads, for example.
1485    ///
1486    /// Internally this uses the data computed from `compute_use_states` along
1487    /// with other instruction properties to know what to return.
1488    pub fn get_value_as_source_or_const(&self, val: Value) -> NonRegInput {
1489        trace!(
1490            "get_input_for_val: val {} at cur_inst {:?} cur_scan_entry_color {:?}",
1491            val, self.cur_inst, self.cur_scan_entry_color,
1492        );
1493        let inst = match self.f.dfg.value_def(val) {
1494            // OK to merge source instruction if we have a source
1495            // instruction, and one of these two conditions hold:
1496            //
1497            // - It has no side-effects and this instruction is not a "value-use
1498            //   root" instruction. Instructions which are considered "roots"
1499            //   for value-use calculations do not have accurate information
1500            //   known about the `ValueUseState` of their operands. This is
1501            //   currently done for multi-result instructions to prevent a use
1502            //   of each result from forcing all operands of the multi-result
1503            //   instruction to also be `Multiple`. This in turn means that the
1504            //   `ValueUseState` for operands of a "root" instruction to be a
1505            //   lie if pattern matching were to look through the multi-result
1506            //   instruction. As a result the "look through this instruction"
1507            //   logic only succeeds if it's not a root instruction.
1508            //
1509            // - It has a side-effect, has one output value, that one
1510            //   output has only one use, directly or indirectly (so
1511            //   cannot be duplicated -- see comment on
1512            //   `ValueUseState`), and the instruction's color is *one
1513            //   less than* the current scan color.
1514            //
1515            //   This latter set of conditions is testing whether a
1516            //   side-effecting instruction can sink to the current scan
1517            //   location; this is possible if the in-color of this inst is
1518            //   equal to the out-color of the producing inst, so no other
1519            //   side-effecting ops occur between them (which will only be true
1520            //   if they are in the same BB, because color increments at each BB
1521            //   start).
1522            //
1523            //   If it is actually sunk, then in `merge_inst()`, we update the
1524            //   scan color so that as we scan over the range past which the
1525            //   instruction was sunk, we allow other instructions (that came
1526            //   prior to the sunk instruction) to sink.
1527            ValueDef::Result(src_inst, result_idx) => {
1528                let src_side_effect = has_lowering_side_effect(self.f, src_inst);
1529                trace!(" -> src inst {}", src_inst);
1530                trace!(" -> has lowering side effect: {}", src_side_effect);
1531                if is_value_use_root(self.f, src_inst) {
1532                    // If this instruction is a "root instruction" then it's
1533                    // required that we can't look through it to see the
1534                    // definition. This means that the `ValueUseState` for the
1535                    // operands of this result assume that this instruction is
1536                    // generated exactly once which might get violated were we
1537                    // to allow looking through it.
1538                    trace!(" -> is a root instruction");
1539                    InputSourceInst::None
1540                } else if !src_side_effect {
1541                    // Otherwise if this instruction has no side effects and the
1542                    // value is used only once then we can look through it with
1543                    // a "unique" tag. A non-unique `Use` can be shown for other
1544                    // values ensuring consumers know how it's computed but that
1545                    // it's not available to omit.
1546                    if self.value_ir_uses[val] == ValueUseState::Once {
1547                        InputSourceInst::UniqueUse(src_inst, result_idx)
1548                    } else {
1549                        InputSourceInst::Use(src_inst, result_idx)
1550                    }
1551                } else {
1552                    // Side-effect: test whether this is the only use of the
1553                    // only result of the instruction, and whether colors allow
1554                    // the code-motion.
1555                    trace!(
1556                        " -> side-effecting op {} for val {}: use state {:?}",
1557                        src_inst, val, self.value_ir_uses[val]
1558                    );
1559                    if self.cur_scan_entry_color.is_some()
1560                        && self.value_ir_uses[val] == ValueUseState::Once
1561                        && self.num_outputs(src_inst) == 1
1562                        && self
1563                            .side_effect_inst_entry_colors
1564                            .get(&src_inst)
1565                            .unwrap()
1566                            .get()
1567                            + 1
1568                            == self.cur_scan_entry_color.unwrap().get()
1569                    {
1570                        InputSourceInst::UniqueUse(src_inst, 0)
1571                    } else {
1572                        InputSourceInst::None
1573                    }
1574                }
1575            }
1576            _ => InputSourceInst::None,
1577        };
1578        let constant = inst.as_inst().and_then(|(inst, _)| self.get_constant(inst));
1579
1580        NonRegInput { inst, constant }
1581    }
1582
1583    /// Increment the reference count for the Value, ensuring that it gets lowered.
1584    pub fn increment_lowered_uses(&mut self, val: Value) {
1585        self.value_lowered_uses[val] += 1
1586    }
1587
1588    /// Put the `idx`th input into register(s) and return the assigned register.
1589    pub fn put_input_in_regs(&mut self, ir_inst: Inst, idx: usize) -> ValueRegs<Reg> {
1590        let val = self.f.dfg.inst_args(ir_inst)[idx];
1591        self.put_value_in_regs(val)
1592    }
1593
1594    /// Put the given value into register(s) and return the assigned register.
1595    pub fn put_value_in_regs(&mut self, val: Value) -> ValueRegs<Reg> {
1596        debug_assert!(self.f.dfg.value_is_real(val));
1597        trace!("put_value_in_regs: val {}", val);
1598
1599        if let Some(inst) = self.f.dfg.value_def(val).inst() {
1600            assert!(!self.inst_sunk.contains(&inst));
1601        }
1602
1603        let regs = self.value_regs[val];
1604        trace!(" -> regs {:?}", regs);
1605        assert!(regs.is_valid());
1606
1607        self.value_lowered_uses[val] += 1;
1608
1609        regs
1610    }
1611
1612    /// Get the ValueRegs for the edge-defined values for special
1613    /// try-call-return block arguments.
1614    pub fn try_call_return_defs(&mut self, ir_inst: Inst) -> &[ValueRegs<Writable<Reg>>] {
1615        &self.try_call_rets.get(&ir_inst).unwrap()[..]
1616    }
1617
1618    /// Get the Regs for the edge-defined values for special
1619    /// try-call-return exception payload arguments.
1620    pub fn try_call_exception_defs(&mut self, ir_inst: Inst) -> &[Writable<Reg>] {
1621        &self.try_call_payloads.get(&ir_inst).unwrap()[..]
1622    }
1623}
1624
1625/// Codegen primitives: allocate temps, emit instructions, set result registers,
1626/// ask for an input to be gen'd into a register.
1627impl<'func, I: VCodeInst> Lower<'func, I> {
1628    /// Get a new temp.
1629    pub fn alloc_tmp(&mut self, ty: Type) -> ValueRegs<Writable<Reg>> {
1630        writable_value_regs(self.vregs.alloc_with_deferred_error(ty))
1631    }
1632
1633    /// Get the current root instruction that we are lowering.
1634    pub fn cur_inst(&self) -> Inst {
1635        self.cur_inst.unwrap()
1636    }
1637
1638    /// Emit a machine instruction.
1639    pub fn emit(&mut self, mach_inst: I) {
1640        trace!("emit: {:?}", mach_inst);
1641        self.ir_insts.push(mach_inst);
1642    }
1643
1644    /// Indicate that the side-effect of an instruction has been sunk to the
1645    /// current scan location. This should only be done with the instruction's
1646    /// original results are not used (i.e., `put_input_in_regs` is not invoked
1647    /// for the input produced by the sunk instruction), otherwise the
1648    /// side-effect will occur twice.
1649    pub fn sink_inst(&mut self, ir_inst: Inst) {
1650        assert!(has_lowering_side_effect(self.f, ir_inst));
1651        assert!(self.cur_scan_entry_color.is_some());
1652
1653        for result in self.dfg().inst_results(ir_inst) {
1654            assert!(self.value_lowered_uses[*result] == 0);
1655        }
1656
1657        let sunk_inst_entry_color = self
1658            .side_effect_inst_entry_colors
1659            .get(&ir_inst)
1660            .cloned()
1661            .unwrap();
1662        let sunk_inst_exit_color = InstColor::new(sunk_inst_entry_color.get() + 1);
1663        assert!(sunk_inst_exit_color == self.cur_scan_entry_color.unwrap());
1664        self.cur_scan_entry_color = Some(sunk_inst_entry_color);
1665        self.inst_sunk.insert(ir_inst);
1666    }
1667
1668    /// Retrieve immediate data given a handle.
1669    pub fn get_immediate_data(&self, imm: Immediate) -> &ConstantData {
1670        self.f.dfg.immediates.get(imm).unwrap()
1671    }
1672
1673    /// Retrieve constant data given a handle.
1674    pub fn get_constant_data(&self, constant_handle: Constant) -> &ConstantData {
1675        self.f.dfg.constants.get(constant_handle)
1676    }
1677
1678    /// Indicate that a constant should be emitted.
1679    pub fn use_constant(&mut self, constant: VCodeConstantData) -> VCodeConstant {
1680        self.vcode.constants().insert(constant)
1681    }
1682
1683    /// Cause the value in `reg` to be in a virtual reg, by copying it into a
1684    /// new virtual reg if `reg` is a real reg. `ty` describes the type of the
1685    /// value in `reg`.
1686    pub fn ensure_in_vreg(&mut self, reg: Reg, ty: Type) -> Reg {
1687        if reg.to_virtual_reg().is_some() {
1688            reg
1689        } else {
1690            let new_reg = self.alloc_tmp(ty).only_reg().unwrap();
1691            self.emit(I::gen_move(new_reg, reg, ty));
1692            new_reg.to_reg()
1693        }
1694    }
1695
1696    /// Add a range fact to a register, if no other fact is present.
1697    pub fn add_range_fact(&mut self, reg: Reg, bit_width: u16, min: u64, max: u64) {
1698        if self.flags.enable_pcc() {
1699            self.vregs.set_fact_if_missing(
1700                reg.to_virtual_reg().unwrap(),
1701                Fact::Range {
1702                    bit_width,
1703                    min,
1704                    max,
1705                },
1706            );
1707        }
1708    }
1709}
1710
1711#[cfg(test)]
1712mod tests {
1713    use super::ValueUseState;
1714    use crate::cursor::{Cursor, FuncCursor};
1715    use crate::ir::types;
1716    use crate::ir::{Function, InstBuilder};
1717
1718    #[test]
1719    fn multi_result_use_once() {
1720        let mut func = Function::new();
1721        let block0 = func.dfg.make_block();
1722        let mut pos = FuncCursor::new(&mut func);
1723        pos.insert_block(block0);
1724        let v1 = pos.ins().iconst(types::I64, 0);
1725        let v2 = pos.ins().iconst(types::I64, 1);
1726        let v3 = pos.ins().iconcat(v1, v2);
1727        let (v4, v5) = pos.ins().isplit(v3);
1728        pos.ins().return_(&[v4, v5]);
1729        let func = pos.func;
1730
1731        let uses = super::compute_use_states(&func, None);
1732        assert_eq!(uses[v1], ValueUseState::Once);
1733        assert_eq!(uses[v2], ValueUseState::Once);
1734        assert_eq!(uses[v3], ValueUseState::Once);
1735        assert_eq!(uses[v4], ValueUseState::Once);
1736        assert_eq!(uses[v5], ValueUseState::Once);
1737    }
1738
1739    #[test]
1740    fn results_used_twice_but_not_operands() {
1741        let mut func = Function::new();
1742        let block0 = func.dfg.make_block();
1743        let mut pos = FuncCursor::new(&mut func);
1744        pos.insert_block(block0);
1745        let v1 = pos.ins().iconst(types::I64, 0);
1746        let v2 = pos.ins().iconst(types::I64, 1);
1747        let v3 = pos.ins().iconcat(v1, v2);
1748        let (v4, v5) = pos.ins().isplit(v3);
1749        pos.ins().return_(&[v4, v4]);
1750        let func = pos.func;
1751
1752        let uses = super::compute_use_states(&func, None);
1753        assert_eq!(uses[v1], ValueUseState::Once);
1754        assert_eq!(uses[v2], ValueUseState::Once);
1755        assert_eq!(uses[v3], ValueUseState::Once);
1756        assert_eq!(uses[v4], ValueUseState::Multiple);
1757        assert_eq!(uses[v5], ValueUseState::Unused);
1758    }
1759}