Skip to main content

cranelift_codegen/machinst/
lower.rs

1//! This module implements lowering (instruction selection) from Cranelift IR
2//! to machine instructions with virtual registers. This is *almost* the final
3//! machine code, except for register allocation.
4
5// TODO: separate the IR-query core of `Lower` from the lowering logic built on
6// top of it, e.g. the side-effect/coloring analysis and the scan support.
7
8use crate::entity::SecondaryMap;
9use crate::inst_predicates::{has_lowering_side_effect, is_constant_64bit};
10use crate::ir::{
11    ArgumentPurpose, Block, BlockArg, Constant, ConstantData, DataFlowGraph, ExternalName,
12    Function, GlobalValue, GlobalValueData, Immediate, Inst, InstructionData, MemFlags,
13    RelSourceLoc, SigRef, Signature, Type, Value, ValueDef, ValueLabelAssignments, ValueLabelStart,
14};
15use crate::machinst::valueregs::InvalidSentinel;
16use crate::machinst::{
17    ABIMachineSpec, BackwardsInsnIndex, BlockIndex, BlockLoweringOrder, CallArgList, CallInfo,
18    CallRetList, Callee, InsnIndex, LoweredBlock, MachLabel, Reg, Sig, SigSet, TryCallInfo, VCode,
19    VCodeBuilder, VCodeConstant, VCodeConstantData, VCodeConstants, VCodeInst, ValueRegs, Writable,
20    writable_value_regs,
21};
22use crate::settings::Flags;
23use crate::{CodegenError, CodegenResult, trace};
24use crate::{FxHashMap, FxHashSet};
25use alloc::vec::Vec;
26use core::fmt::Debug;
27use cranelift_control::ControlPlane;
28use smallvec::{SmallVec, smallvec};
29
30use super::{VCodeBuildDirection, VRegAllocator};
31
32/// A vector of ValueRegs, used to represent the outputs of an instruction.
33pub type InstOutput = SmallVec<[ValueRegs<Reg>; 2]>;
34
35/// An "instruction color" partitions CLIF instructions by side-effecting ops.
36/// All instructions with the same "color" are guaranteed not to be separated by
37/// any side-effecting op (for this purpose, loads are also considered
38/// side-effecting, to avoid subtle questions w.r.t. the memory model), and
39/// furthermore, it is guaranteed that for any two instructions A and B such
40/// that color(A) == color(B), either A dominates B and B postdominates A, or
41/// vice-versa. (For now, in practice, only ops in the same basic block can ever
42/// have the same color, trivially providing the second condition.) Intuitively,
43/// this means that the ops of the same color must always execute "together", as
44/// part of one atomic contiguous section of the dynamic execution trace, and
45/// they can be freely permuted (modulo true dataflow dependencies) without
46/// affecting program behavior.
47#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
48struct InstColor(u32);
49impl InstColor {
50    fn new(n: u32) -> InstColor {
51        InstColor(n)
52    }
53
54    /// Get an arbitrary index representing this color. The index is unique
55    /// *within a single function compilation*, but indices may be reused across
56    /// functions.
57    pub fn get(self) -> u32 {
58        self.0
59    }
60}
61
62/// A representation of all of the ways in which a value is available, aside
63/// from as a direct register.
64///
65/// - An instruction, if it would be allowed to occur at the current location
66///   instead (see [Lower::get_input_as_source_or_const()] for more details).
67///
68/// - A constant, if the value is known to be a constant.
69#[derive(Clone, Copy, Debug)]
70pub struct NonRegInput {
71    /// An instruction produces this value (as the given output), and its
72    /// computation (and side-effect if applicable) could occur at the
73    /// current instruction's location instead.
74    ///
75    /// If this instruction's operation is merged into the current instruction,
76    /// the backend must call [Lower::sink_inst()].
77    ///
78    /// This enum indicates whether this use of the source instruction
79    /// is unique or not.
80    pub inst: InputSourceInst,
81    /// The value is a known constant.
82    pub constant: Option<u64>,
83}
84
85/// When examining an input to an instruction, this enum provides one
86/// of several options: there is or isn't a single instruction (that
87/// we can see and merge with) that produces that input's value, and
88/// we are or aren't the single user of that instruction.
89#[derive(Clone, Copy, Debug)]
90pub enum InputSourceInst {
91    /// The input in question is the single, unique use of the given
92    /// instruction and output index, and it can be sunk to the
93    /// location of this input.
94    UniqueUse(Inst, usize),
95    /// The input in question is one of multiple uses of the given
96    /// instruction. It can still be sunk to the location of this
97    /// input.
98    Use(Inst, usize),
99    /// We cannot determine which instruction produced the input, or
100    /// it is one of several instructions (e.g., due to a control-flow
101    /// merge and blockparam), or the source instruction cannot be
102    /// allowed to sink to the current location due to side-effects.
103    None,
104}
105
106impl InputSourceInst {
107    /// Get the instruction and output index for this source, whether
108    /// we are its single or one of many users.
109    pub fn as_inst(&self) -> Option<(Inst, usize)> {
110        match self {
111            &InputSourceInst::UniqueUse(inst, output_idx)
112            | &InputSourceInst::Use(inst, output_idx) => Some((inst, output_idx)),
113            &InputSourceInst::None => None,
114        }
115    }
116}
117
118/// A machine backend.
119pub trait LowerBackend {
120    /// The machine instruction type.
121    type MInst: VCodeInst;
122
123    /// Lower a single instruction.
124    ///
125    /// For a branch, this function should not generate the actual branch
126    /// instruction. However, it must force any values it needs for the branch
127    /// edge (block-param actuals) into registers, because the actual branch
128    /// generation (`lower_branch()`) happens *after* any possible merged
129    /// out-edge.
130    ///
131    /// Returns `None` if no lowering for the instruction was found.
132    fn lower(&self, ctx: &mut Lower<Self::MInst>, inst: Inst) -> Option<InstOutput>;
133
134    /// Lower a block-terminating group of branches (which together can be seen
135    /// as one N-way branch), given a vcode MachLabel for each target.
136    ///
137    /// Returns `None` if no lowering for the branch was found.
138    fn lower_branch(
139        &self,
140        ctx: &mut Lower<Self::MInst>,
141        inst: Inst,
142        targets: &[MachLabel],
143    ) -> Option<()>;
144
145    /// A bit of a hack: give a fixed register that always holds the result of a
146    /// `get_pinned_reg` instruction, if known.  This allows elision of moves
147    /// into the associated vreg, instead using the real reg directly.
148    fn maybe_pinned_reg(&self) -> Option<Reg> {
149        None
150    }
151}
152
153/// Machine-independent lowering driver / machine-instruction container. Maintains a correspondence
154/// from original Inst to MachInsts.
155pub struct Lower<'func, I: VCodeInst> {
156    /// The function to lower.
157    pub(crate) f: &'func Function,
158
159    /// Lowered machine instructions.
160    vcode: VCodeBuilder<I>,
161
162    /// VReg allocation context, given to the vcode field at build time to finalize the vcode.
163    vregs: VRegAllocator<I>,
164
165    /// Mapping from `Value` (SSA value in IR) to virtual register.
166    value_regs: SecondaryMap<Value, ValueRegs<Reg>>,
167
168    /// sret registers, if needed.
169    sret_reg: Option<ValueRegs<Reg>>,
170
171    /// Instruction colors at block exits. From this map, we can recover all
172    /// instruction colors by scanning backward from the block end and
173    /// decrementing on any color-changing (side-effecting) instruction.
174    block_end_colors: SecondaryMap<Block, InstColor>,
175
176    /// Instruction colors at side-effecting ops. This is the *entry* color,
177    /// i.e., the version of global state that exists before an instruction
178    /// executes.  For each side-effecting instruction, the *exit* color is its
179    /// entry color plus one.
180    side_effect_inst_entry_colors: FxHashMap<Inst, InstColor>,
181
182    /// Current color as we scan during lowering. While we are lowering an
183    /// instruction, this is equal to the color *at entry to* the instruction.
184    cur_scan_entry_color: Option<InstColor>,
185
186    /// Current instruction as we scan during lowering.
187    cur_inst: Option<Inst>,
188
189    /// Instruction constant values, if known.
190    inst_constants: FxHashMap<Inst, u64>,
191
192    /// Use-counts per SSA value, as counted in the input IR. These
193    /// are "coarsened", in the abstract-interpretation sense: we only
194    /// care about "0, 1, many" states, as this is all we need and
195    /// this lets us do an efficient fixpoint analysis.
196    ///
197    /// See doc comment on `ValueUseState` for more details.
198    value_ir_uses: SecondaryMap<Value, ValueUseState>,
199
200    /// Actual uses of each SSA value so far, incremented while lowering.
201    value_lowered_uses: SecondaryMap<Value, u32>,
202
203    /// Effectful instructions that have been sunk; they are not codegen'd at
204    /// their original locations.
205    inst_sunk: FxHashSet<Inst>,
206
207    /// Instructions collected for the CLIF inst in progress, in forward order.
208    ir_insts: Vec<I>,
209
210    /// Try-call block arg normal-return values, indexed by instruction.
211    try_call_rets: FxHashMap<Inst, SmallVec<[ValueRegs<Writable<Reg>>; 2]>>,
212
213    /// Try-call block arg exceptional-return payloads, indexed by
214    /// instruction. Payloads are carried in registers per the ABI and
215    /// can only be one register each.
216    try_call_payloads: FxHashMap<Inst, SmallVec<[Writable<Reg>; 2]>>,
217
218    /// The register to use for GetPinnedReg, if any, on this architecture.
219    pinned_reg: Option<Reg>,
220
221    /// Compilation flags.
222    flags: Flags,
223}
224
225/// How is a value used in the IR?
226///
227/// This can be seen as a coarsening of an integer count. We only need
228/// distinct states for zero, one, or many.
229///
230/// This analysis deserves further explanation. The basic idea is that
231/// we want to allow instruction lowering to know whether a value that
232/// an instruction references is *only* referenced by that one use, or
233/// by others as well. This is necessary to know when we might want to
234/// move a side-effect: we cannot, for example, duplicate a load, so
235/// we cannot let instruction lowering match a load as part of a
236/// subpattern and potentially incorporate it.
237///
238/// Note that a lot of subtlety comes into play once we have
239/// *indirect* uses. The classical example of this in our development
240/// history was the x86 compare instruction, which is incorporated
241/// into flags users (e.g. `selectif`, `trueif`, branches) and can
242/// subsequently incorporate loads, or at least we would like it
243/// to. However, danger awaits: the compare might be the only user of
244/// a load, so we might think we can just move the load (and nothing
245/// is duplicated -- success!), except that the compare itself is
246/// codegen'd in multiple places, where it is incorporated as a
247/// subpattern itself.
248///
249/// So we really want a notion of "unique all the way along the
250/// matching path". Rust's `&T` and `&mut T` offer a partial analogy
251/// to the semantics that we want here: we want to know when we've
252/// matched a unique use of an instruction, and that instruction's
253/// unique use of another instruction, etc, just as `&mut T` can only
254/// be obtained by going through a chain of `&mut T`. If one has a
255/// `&T` to a struct containing `&mut T` (one of several uses of an
256/// instruction that itself has a unique use of an instruction), one
257/// can only get a `&T` (one can only get a "I am one of several users
258/// of this instruction" result).
259///
260/// We could track these paths, either dynamically as one "looks up the operand
261/// tree" or precomputed. But the former requires state and means that the
262/// `Lower` API carries that state implicitly, which we'd like to avoid if we
263/// can. And the latter implies O(n^2) storage: it is an all-pairs property (is
264/// inst `i` unique from the point of view of `j`).
265///
266/// To make matters even a little more complex still, a value that is
267/// not uniquely used when initially viewing the IR can *become*
268/// uniquely used, at least as a root allowing further unique uses of
269/// e.g. loads to merge, if no other instruction actually merges
270/// it. To be more concrete, if we have `v1 := load; v2 := op v1; v3
271/// := op v2; v4 := op v2` then `v2` is non-uniquely used, so from the
272/// point of view of lowering `v4` or `v3`, we cannot merge the load
273/// at `v1`. But if we decide just to use the assigned register for
274/// `v2` at both `v3` and `v4`, then we only actually codegen `v2`
275/// once, so it *is* a unique root at that point and we *can* merge
276/// the load.
277///
278/// Note also that the color scheme is not sufficient to give us this
279/// information, for various reasons: reasoning about side-effects
280/// does not tell us about potential duplication of uses through pure
281/// ops.
282///
283/// To keep things simple and avoid error-prone lowering APIs that
284/// would extract more information about whether instruction merging
285/// happens or not (we don't have that info now, and it would be
286/// difficult to refactor to get it and make that refactor 100%
287/// correct), we give up on the above "can become unique if not
288/// actually merged" point. Instead, we compute a
289/// transitive-uniqueness. That is what this enum represents.
290///
291/// There is one final caveat as well to the result of this analysis.  Notably,
292/// we define some instructions to be "root" instructions, which means that we
293/// assume they will always be codegen'd at the root of a matching tree, and not
294/// matched. (This comes with the caveat that we actually enforce this property
295/// by making them "opaque" to subtree matching in
296/// `get_value_as_source_or_const`). Because they will always be codegen'd once,
297/// they in some sense "reset" multiplicity: these root instructions can be used
298/// many times, but because their result(s) are only computed once, they only
299/// use their inputs once.
300///
301/// We currently define all multi-result instructions to be "root" instructions,
302/// because it is too complex to reason about matching through them, and they
303/// cause too-coarse-grained approximation of multiplicity otherwise: the
304/// analysis would have to assume (as it used to!) that they are always
305/// multiply-used, simply because they have multiple outputs even if those
306/// outputs are used only once.
307///
308/// In the future we could define other instructions to be "root" instructions
309/// as well, if we make the corresponding change to get_value_as_source_or_const
310/// as well.
311///
312/// To define `ValueUseState` more plainly: a value is `Unused` if no references
313/// exist to it; `Once` if only one other op refers to it, *and* that other op
314/// is `Unused` or `Once`; and `Multiple` otherwise. In other words, `Multiple`
315/// is contagious (except through root instructions): even if an op's result
316/// value is directly used only once in the CLIF, that value is `Multiple` if
317/// the op that uses it is itself used multiple times (hence could be codegen'd
318/// multiple times). In brief, this analysis tells us whether, if every op
319/// merged all of its operand tree, a given op could be codegen'd in more than
320/// one place.
321///
322/// To compute this, we first consider direct uses. At this point
323/// `Unused` answers are correct, `Multiple` answers are correct, but
324/// some `Once`s may change to `Multiple`s. Then we propagate
325/// `Multiple` transitively using a workqueue/fixpoint algorithm.
326#[derive(Clone, Copy, Debug, PartialEq, Eq)]
327enum ValueUseState {
328    /// Not used at all.
329    Unused,
330    /// Used exactly once.
331    Once,
332    /// Used multiple times.
333    Multiple,
334}
335
336impl ValueUseState {
337    /// Add one use.
338    fn inc(&mut self) {
339        let new = match self {
340            Self::Unused => Self::Once,
341            Self::Once | Self::Multiple => Self::Multiple,
342        };
343        *self = new;
344    }
345}
346
347/// Notion of "relocation distance". This gives an estimate of how far away a symbol will be from a
348/// reference.
349#[derive(Clone, Copy, Debug, PartialEq, Eq)]
350pub enum RelocDistance {
351    /// Target of relocation is "nearby". The threshold for this is fuzzy but should be interpreted
352    /// as approximately "within the compiled output of one module"; e.g., within AArch64's +/-
353    /// 128MB offset. If unsure, use `Far` instead.
354    Near,
355    /// Target of relocation could be anywhere in the address space.
356    Far,
357}
358
359impl<'func, I: VCodeInst> Lower<'func, I> {
360    /// Prepare a new lowering context for the given IR function.
361    pub fn new(
362        f: &'func Function,
363        abi: Callee<I::ABIMachineSpec>,
364        emit_info: I::Info,
365        block_order: BlockLoweringOrder,
366        sigs: SigSet,
367        flags: Flags,
368    ) -> CodegenResult<Self> {
369        let constants = VCodeConstants::with_capacity(f.dfg.constants.len());
370        let vcode = VCodeBuilder::new(
371            sigs,
372            abi,
373            emit_info,
374            block_order,
375            constants,
376            VCodeBuildDirection::Backward,
377            flags.log2_min_function_alignment(),
378        );
379
380        // We usually need two VRegs per instruction result, plus extras for
381        // various temporaries, but two per Value is a good starting point.
382        let mut vregs = VRegAllocator::with_capacity(f.dfg.num_values() * 2);
383
384        let mut value_regs = SecondaryMap::with_default(ValueRegs::invalid());
385        let mut try_call_rets = FxHashMap::default();
386        let mut try_call_payloads = FxHashMap::default();
387
388        // Assign a vreg to each block param, each inst result, and
389        // each edge-defined block-call arg.
390        for bb in f.layout.blocks() {
391            for &param in f.dfg.block_params(bb) {
392                let ty = f.dfg.value_type(param);
393                if value_regs[param].is_invalid() {
394                    let regs = vregs.alloc(ty)?;
395                    value_regs[param] = regs;
396                    trace!("bb {} param {}: regs {:?}", bb, param, regs);
397                }
398            }
399            for inst in f.layout.block_insts(bb) {
400                for &result in f.dfg.inst_results(inst) {
401                    let ty = f.dfg.value_type(result);
402                    if value_regs[result].is_invalid() && !ty.is_invalid() {
403                        let regs = vregs.alloc(ty)?;
404                        value_regs[result] = regs;
405                        trace!(
406                            "bb {} inst {} ({:?}): result {} regs {:?}",
407                            bb, inst, f.dfg.insts[inst], result, regs,
408                        );
409                    }
410                }
411
412                if let Some(et) = f.dfg.insts[inst].exception_table() {
413                    let exdata = &f.dfg.exception_tables[et];
414                    let sig = &f.dfg.signatures[exdata.signature()];
415
416                    let mut rets = smallvec![];
417                    for ty in sig.returns.iter().map(|ret| ret.value_type) {
418                        rets.push(vregs.alloc(ty)?.map(|r| Writable::from_reg(r)));
419                    }
420                    try_call_rets.insert(inst, rets);
421
422                    let mut payloads = smallvec![];
423                    // Note that this is intentionally using the calling
424                    // convention of the callee to determine what payload types
425                    // are available. The callee defines that, not the calling
426                    // convention of the caller.
427                    for &ty in sig
428                        .call_conv
429                        .exception_payload_types(I::ABIMachineSpec::word_type())
430                    {
431                        payloads.push(Writable::from_reg(vregs.alloc(ty)?.only_reg().unwrap()));
432                    }
433                    try_call_payloads.insert(inst, payloads);
434                }
435            }
436        }
437
438        // Find the sret register, if it's used.
439        let mut sret_param = None;
440        for ret in vcode.abi().signature().returns.iter() {
441            if ret.purpose == ArgumentPurpose::StructReturn {
442                let entry_bb = f.stencil.layout.entry_block().unwrap();
443                for (&param, sig_param) in f
444                    .dfg
445                    .block_params(entry_bb)
446                    .iter()
447                    .zip(vcode.abi().signature().params.iter())
448                {
449                    if sig_param.purpose == ArgumentPurpose::StructReturn {
450                        assert!(sret_param.is_none());
451                        sret_param = Some(param);
452                    }
453                }
454
455                assert!(sret_param.is_some());
456            }
457        }
458
459        let sret_reg = sret_param.map(|param| {
460            let regs = value_regs[param];
461            assert!(regs.len() == 1);
462            regs
463        });
464
465        // Compute instruction colors, find constant instructions, and find instructions with
466        // side-effects, in one combined pass.
467        let mut cur_color = 0;
468        let mut block_end_colors = SecondaryMap::with_default(InstColor::new(0));
469        let mut side_effect_inst_entry_colors = FxHashMap::default();
470        let mut inst_constants = FxHashMap::default();
471        for bb in f.layout.blocks() {
472            cur_color += 1;
473            for inst in f.layout.block_insts(bb) {
474                let side_effect = has_lowering_side_effect(f, inst);
475
476                trace!("bb {} inst {} has color {}", bb, inst, cur_color);
477                if side_effect {
478                    side_effect_inst_entry_colors.insert(inst, InstColor::new(cur_color));
479                    trace!(" -> side-effecting; incrementing color for next inst");
480                    cur_color += 1;
481                }
482
483                // Determine if this is a constant; if so, add to the table.
484                if let Some(c) = is_constant_64bit(f, inst) {
485                    trace!(" -> constant: {}", c);
486                    inst_constants.insert(inst, c);
487                }
488            }
489
490            block_end_colors[bb] = InstColor::new(cur_color);
491        }
492
493        let value_ir_uses = compute_use_states(f, sret_param);
494
495        Ok(Lower {
496            f,
497            vcode,
498            vregs,
499            value_regs,
500            sret_reg,
501            block_end_colors,
502            side_effect_inst_entry_colors,
503            inst_constants,
504            value_ir_uses,
505            value_lowered_uses: SecondaryMap::default(),
506            inst_sunk: FxHashSet::default(),
507            cur_scan_entry_color: None,
508            cur_inst: None,
509            ir_insts: vec![],
510            try_call_rets,
511            try_call_payloads,
512            pinned_reg: None,
513            flags,
514        })
515    }
516
517    pub fn sigs(&self) -> &SigSet {
518        self.vcode.sigs()
519    }
520
521    pub fn sigs_mut(&mut self) -> &mut SigSet {
522        self.vcode.sigs_mut()
523    }
524
525    fn gen_arg_setup(&mut self) {
526        if let Some(entry_bb) = self.f.layout.entry_block() {
527            trace!(
528                "gen_arg_setup: entry BB {} args are:\n{:?}",
529                entry_bb,
530                self.f.dfg.block_params(entry_bb)
531            );
532
533            for (i, param) in self.f.dfg.block_params(entry_bb).iter().enumerate() {
534                if self.value_ir_uses[*param] == ValueUseState::Unused {
535                    continue;
536                }
537                let regs = writable_value_regs(self.value_regs[*param]);
538                for insn in self
539                    .vcode
540                    .vcode
541                    .abi
542                    .gen_copy_arg_to_regs(&self.vcode.vcode.sigs, i, regs, &mut self.vregs)
543                    .into_iter()
544                {
545                    self.emit(insn);
546                }
547            }
548            if let Some(insn) = self
549                .vcode
550                .vcode
551                .abi
552                .gen_retval_area_setup(&self.vcode.vcode.sigs, &mut self.vregs)
553            {
554                self.emit(insn);
555            }
556
557            // The `args` instruction below must come first. Finish
558            // the current "IR inst" (with a default source location,
559            // as for other special instructions inserted during
560            // lowering) and continue the scan backward.
561            self.finish_ir_inst(Default::default());
562
563            if let Some(insn) = self.vcode.vcode.abi.take_args() {
564                self.emit(insn);
565            }
566        }
567    }
568
569    /// Generate the return instruction.
570    pub fn gen_return(&mut self, rets: &[ValueRegs<Reg>]) {
571        let mut out_rets = vec![];
572
573        let mut rets = rets.into_iter();
574        for (i, ret) in self
575            .abi()
576            .signature()
577            .returns
578            .clone()
579            .into_iter()
580            .enumerate()
581        {
582            let regs = if ret.purpose == ArgumentPurpose::StructReturn {
583                self.sret_reg.unwrap()
584            } else {
585                *rets.next().unwrap()
586            };
587
588            let (regs, insns) = self.vcode.abi().gen_copy_regs_to_retval(
589                self.vcode.sigs(),
590                i,
591                regs,
592                &mut self.vregs,
593            );
594            out_rets.extend(regs);
595            for insn in insns {
596                self.emit(insn);
597            }
598        }
599
600        // Hack: generate a virtual instruction that uses vmctx in
601        // order to keep it alive for the duration of the function,
602        // for the benefit of debuginfo.
603        if self.f.dfg.values_labels.is_some() {
604            if let Some(vmctx_val) = self.f.special_param(ArgumentPurpose::VMContext) {
605                if self.value_ir_uses[vmctx_val] != ValueUseState::Unused {
606                    let vmctx_reg = self.value_regs[vmctx_val].only_reg().unwrap();
607                    self.emit(I::gen_dummy_use(vmctx_reg));
608                }
609            }
610        }
611
612        let inst = self.abi().gen_rets(out_rets);
613        self.emit(inst);
614    }
615
616    /// Generate list of registers to hold the output of a call with
617    /// signature `sig`.
618    pub fn gen_call_output(&mut self, sig: &Signature) -> InstOutput {
619        let mut rets = smallvec![];
620        for ty in sig.returns.iter().map(|ret| ret.value_type) {
621            rets.push(self.vregs.alloc_with_deferred_error(ty));
622        }
623        rets
624    }
625
626    /// Likewise, but for a `SigRef` instead.
627    pub fn gen_call_output_from_sig_ref(&mut self, sig_ref: SigRef) -> InstOutput {
628        self.gen_call_output(&self.f.dfg.signatures[sig_ref])
629    }
630
631    /// Set up arguments values `args` for a call with signature `sig`.
632    pub fn gen_call_args(&mut self, sig: Sig, args: &[ValueRegs<Reg>]) -> CallArgList {
633        let (uses, insts) = self.vcode.abi().gen_call_args(
634            self.vcode.sigs(),
635            sig,
636            args,
637            /* is_tail_call */ false,
638            &self.flags,
639            &mut self.vregs,
640        );
641        for insn in insts {
642            self.emit(insn);
643        }
644        uses
645    }
646
647    /// Likewise, but for a `return_call`.
648    pub fn gen_return_call_args(&mut self, sig: Sig, args: &[ValueRegs<Reg>]) -> CallArgList {
649        let (uses, insts) = self.vcode.abi().gen_call_args(
650            self.vcode.sigs(),
651            sig,
652            args,
653            /* is_tail_call */ true,
654            &self.flags,
655            &mut self.vregs,
656        );
657        for insn in insts {
658            self.emit(insn);
659        }
660        uses
661    }
662
663    /// Set up return values `outputs` for a call with signature `sig`.
664    pub fn gen_call_rets(&mut self, sig: Sig, outputs: &[ValueRegs<Reg>]) -> CallRetList {
665        self.vcode
666            .abi()
667            .gen_call_rets(self.vcode.sigs(), sig, outputs, None, &mut self.vregs)
668    }
669
670    /// Likewise, but for a `try_call`.
671    pub fn gen_try_call_rets(&mut self, sig: Sig) -> CallRetList {
672        let ir_inst = self.cur_inst.unwrap();
673        let mut outputs: SmallVec<[ValueRegs<Reg>; 2]> = smallvec![];
674        for return_def in self.try_call_rets.get(&ir_inst).unwrap() {
675            outputs.push(return_def.map(|r| r.to_reg()));
676        }
677        let payloads = Some(&self.try_call_payloads.get(&ir_inst).unwrap()[..]);
678
679        self.vcode
680            .abi()
681            .gen_call_rets(self.vcode.sigs(), sig, &outputs, payloads, &mut self.vregs)
682    }
683
684    /// Populate a `CallInfo` for a call with signature `sig`.
685    pub fn gen_call_info<T>(
686        &mut self,
687        sig: Sig,
688        dest: T,
689        uses: CallArgList,
690        defs: CallRetList,
691        try_call_info: Option<TryCallInfo>,
692        patchable: bool,
693    ) -> CallInfo<T> {
694        self.vcode.abi().gen_call_info(
695            self.vcode.sigs(),
696            sig,
697            dest,
698            uses,
699            defs,
700            try_call_info,
701            patchable,
702        )
703    }
704
705    /// Has this instruction been sunk to a use-site (i.e., away from its
706    /// original location)?
707    fn is_inst_sunk(&self, inst: Inst) -> bool {
708        self.inst_sunk.contains(&inst)
709    }
710
711    // Is any result of this instruction needed?
712    fn is_any_inst_result_needed(&self, inst: Inst) -> bool {
713        self.f
714            .dfg
715            .inst_results(inst)
716            .iter()
717            .any(|&result| self.value_lowered_uses[result] > 0)
718    }
719
720    fn lower_clif_block<B: LowerBackend<MInst = I>>(
721        &mut self,
722        backend: &B,
723        block: Block,
724        ctrl_plane: &mut ControlPlane,
725    ) -> CodegenResult<()> {
726        self.cur_scan_entry_color = Some(self.block_end_colors[block]);
727        // Lowering loop:
728        // - For each non-branch instruction, in reverse order:
729        //   - If side-effecting (load, store, branch/call/return,
730        //     possible trap), or if used outside of this block, or if
731        //     demanded by another inst, then lower.
732        //
733        // That's it! Lowering of side-effecting ops will force all *needed*
734        // (live) non-side-effecting ops to be lowered at the right places, via
735        // the `use_input_reg()` callback on the `Lower` (that's us). That's
736        // because `use_input_reg()` sets the eager/demand bit for any insts
737        // whose result registers are used.
738        //
739        // We set the VCodeBuilder to "backward" mode, so we emit
740        // blocks in reverse order wrt the BlockIndex sequence, and
741        // emit instructions in reverse order within blocks.  Because
742        // the machine backend calls `ctx.emit()` in forward order, we
743        // collect per-IR-inst lowered instructions in `ir_insts`,
744        // then reverse these and append to the VCode at the end of
745        // each IR instruction.
746        for inst in self.f.layout.block_insts(block).rev() {
747            let data = &self.f.dfg.insts[inst];
748            let has_side_effect = has_lowering_side_effect(self.f, inst);
749            // If  inst has been sunk to another location, skip it.
750            if self.is_inst_sunk(inst) {
751                continue;
752            }
753            // Are any outputs used at least once?
754            let value_needed = self.is_any_inst_result_needed(inst);
755            trace!(
756                "lower_clif_block: block {} inst {} ({:?}) is_branch {} side_effect {} value_needed {}",
757                block,
758                inst,
759                data,
760                data.opcode().is_branch(),
761                has_side_effect,
762                value_needed,
763            );
764
765            // Update scan state to color prior to this inst (as we are scanning
766            // backward).
767            self.cur_inst = Some(inst);
768            if has_side_effect {
769                let entry_color = *self
770                    .side_effect_inst_entry_colors
771                    .get(&inst)
772                    .expect("every side-effecting inst should have a color-map entry");
773                self.cur_scan_entry_color = Some(entry_color);
774            }
775
776            // Skip lowering branches; these are handled separately
777            // (see `lower_clif_branches()` below).
778            if self.f.dfg.insts[inst].opcode().is_branch() {
779                continue;
780            }
781
782            // Value defined by "inst" becomes live after it in normal
783            // order, and therefore **before** in reversed order.
784            // Only emit value label aliases if the instruction will be lowered
785            // (otherwise we want to keep using the earlier label instead).
786            self.emit_value_label_live_range_start_for_inst(inst, has_side_effect || value_needed);
787
788            // Normal instruction: codegen if the instruction is side-effecting
789            // or any of its outputs is used.
790            if has_side_effect || value_needed {
791                trace!("lowering: inst {}: {}", inst, self.f.dfg.display_inst(inst));
792                let temp_regs = match backend.lower(self, inst) {
793                    Some(regs) => regs,
794                    None => {
795                        let ty = if self.num_outputs(inst) > 0 {
796                            Some(self.output_ty(inst, 0))
797                        } else {
798                            None
799                        };
800                        return Err(CodegenError::Unsupported(format!(
801                            "should be implemented in ISLE: inst = `{}`, type = `{:?}`",
802                            self.f.dfg.display_inst(inst),
803                            ty
804                        )));
805                    }
806                };
807
808                // The ISLE generated code emits its own registers to define the
809                // instruction's lowered values in. However, other instructions
810                // that use this SSA value will be lowered assuming that the value
811                // is generated into a pre-assigned, different, register.
812                //
813                // To connect the two, we set up "aliases" in the VCodeBuilder
814                // that apply when it is building the Operand table for the
815                // regalloc to use. These aliases effectively rewrite any use of
816                // the pre-assigned register to the register that was returned by
817                // the ISLE lowering logic.
818                let results = self.f.dfg.inst_results(inst);
819                debug_assert_eq!(temp_regs.len(), results.len());
820                for (regs, &result) in temp_regs.iter().zip(results) {
821                    let dsts = self.value_regs[result];
822                    let mut regs = regs.regs().iter();
823                    for &dst in dsts.regs().iter() {
824                        let temp = regs.next().copied().unwrap_or(Reg::invalid_sentinel());
825                        trace!("set vreg alias: {result:?} = {dst:?}, lowering = {temp:?}");
826                        self.vregs.set_vreg_alias(dst, temp);
827                    }
828                }
829            }
830
831            let start = self.vcode.vcode.num_insts();
832            let loc = self.srcloc(inst);
833            self.finish_ir_inst(loc);
834
835            // If the instruction had a user stack map, forward it from the CLIF
836            // to the vcode.
837            if let Some(entries) = self.f.dfg.user_stack_map_entries(inst) {
838                let end = self.vcode.vcode.num_insts();
839                debug_assert!(end > start);
840                debug_assert_eq!(
841                    (start..end)
842                        .filter(|i| self.vcode.vcode[InsnIndex::new(*i)].is_safepoint())
843                        .count(),
844                    1
845                );
846                for i in start..end {
847                    let iix = InsnIndex::new(i);
848                    if self.vcode.vcode[iix].is_safepoint() {
849                        trace!(
850                            "Adding user stack map from clif\n\n\
851                                 {inst:?} `{}`\n\n\
852                             to vcode\n\n\
853                                 {iix:?} `{}`",
854                            self.f.dfg.display_inst(inst),
855                            &self.vcode.vcode[iix].pretty_print_inst(&mut Default::default()),
856                        );
857                        self.vcode
858                            .add_user_stack_map(BackwardsInsnIndex::new(iix.index()), entries);
859                        break;
860                    }
861                }
862            }
863
864            // If the CLIF instruction had debug tags, copy them to
865            // the VCode. Place on all VCode instructions lowered from
866            // this CLIF instruction.
867            let debug_tags = self.f.debug_tags.get(inst);
868            if !debug_tags.is_empty() && self.vcode.vcode.num_insts() > 0 {
869                let end = self.vcode.vcode.num_insts();
870                for i in start..end {
871                    let backwards_index = BackwardsInsnIndex::new(i);
872                    log::trace!(
873                        "debug tags on {inst}; associating {debug_tags:?} with {backwards_index:?}"
874                    );
875                    self.vcode.add_debug_tags(backwards_index, debug_tags);
876                }
877            }
878
879            // maybe insert random instruction
880            if ctrl_plane.get_decision() {
881                if ctrl_plane.get_decision() {
882                    let imm: u64 = ctrl_plane.get_arbitrary();
883                    let reg = self.alloc_tmp(crate::ir::types::I64).regs()[0];
884                    I::gen_imm_u64(imm, reg).map(|inst| self.emit(inst));
885                } else {
886                    let imm: f64 = ctrl_plane.get_arbitrary();
887                    let tmp = self.alloc_tmp(crate::ir::types::I64).regs()[0];
888                    let reg = self.alloc_tmp(crate::ir::types::F64).regs()[0];
889                    for inst in I::gen_imm_f64(imm, tmp, reg) {
890                        self.emit(inst);
891                    }
892                }
893            }
894        }
895
896        // Add the block params to this block.
897        self.add_block_params(block)?;
898
899        self.cur_scan_entry_color = None;
900        Ok(())
901    }
902
903    fn add_block_params(&mut self, block: Block) -> CodegenResult<()> {
904        for &param in self.f.dfg.block_params(block) {
905            for &reg in self.value_regs[param].regs() {
906                let vreg = reg.to_virtual_reg().unwrap();
907                self.vcode.add_block_param(vreg);
908            }
909        }
910        Ok(())
911    }
912
913    fn get_value_labels<'a>(&'a self, val: Value, depth: usize) -> Option<&'a [ValueLabelStart]> {
914        if let Some(ref values_labels) = self.f.dfg.values_labels {
915            debug_assert!(self.f.dfg.value_is_real(val));
916            trace!(
917                "get_value_labels: val {} -> {:?}",
918                val,
919                values_labels.get(&val)
920            );
921            match values_labels.get(&val) {
922                Some(&ValueLabelAssignments::Starts(ref list)) => Some(&list[..]),
923                Some(&ValueLabelAssignments::Alias { value, .. }) if depth < 10 => {
924                    self.get_value_labels(value, depth + 1)
925                }
926                _ => None,
927            }
928        } else {
929            None
930        }
931    }
932
933    fn emit_value_label_marks_for_value(&mut self, val: Value, allow_alias: bool) {
934        let regs = self.value_regs[val];
935        if regs.len() > 1 {
936            return;
937        }
938        let reg = regs.only_reg().unwrap();
939
940        if let Some(label_starts) = self.get_value_labels(val, if allow_alias { 0 } else { !0 }) {
941            let labels = label_starts
942                .iter()
943                .map(|&ValueLabelStart { label, .. }| label)
944                .collect::<FxHashSet<_>>();
945            for label in labels {
946                trace!(
947                    "value labeling: defines val {:?} -> reg {:?} -> label {:?}",
948                    val, reg, label,
949                );
950                self.vcode.add_value_label(reg, label);
951            }
952        }
953    }
954
955    fn emit_value_label_live_range_start_for_inst(&mut self, inst: Inst, allow_alias: bool) {
956        if self.f.dfg.values_labels.is_none() {
957            return;
958        }
959
960        trace!(
961            "value labeling: srcloc {}: inst {}",
962            self.srcloc(inst),
963            inst
964        );
965        for &val in self.f.dfg.inst_results(inst) {
966            self.emit_value_label_marks_for_value(val, allow_alias);
967        }
968    }
969
970    fn emit_value_label_live_range_start_for_block_args(&mut self, block: Block) {
971        if self.f.dfg.values_labels.is_none() {
972            return;
973        }
974
975        trace!("value labeling: block {}", block);
976        for &arg in self.f.dfg.block_params(block) {
977            self.emit_value_label_marks_for_value(arg, true);
978        }
979        self.finish_ir_inst(Default::default());
980    }
981
982    fn finish_ir_inst(&mut self, loc: RelSourceLoc) {
983        // The VCodeBuilder builds in reverse order (and reverses at
984        // the end), but `ir_insts` is in forward order, so reverse
985        // it.
986        for inst in self.ir_insts.drain(..).rev() {
987            self.vcode.push(inst, loc);
988        }
989    }
990
991    fn finish_bb(&mut self) {
992        self.vcode.end_bb();
993    }
994
995    fn lower_clif_branch<B: LowerBackend<MInst = I>>(
996        &mut self,
997        backend: &B,
998        // Lowered block index:
999        bindex: BlockIndex,
1000        // Original CLIF block:
1001        block: Block,
1002        branch: Inst,
1003        targets: &[MachLabel],
1004    ) -> CodegenResult<()> {
1005        trace!(
1006            "lower_clif_branch: block {} branch {:?} targets {:?}",
1007            block, branch, targets,
1008        );
1009        // When considering code-motion opportunities, consider the current
1010        // program point to be this branch.
1011        self.cur_inst = Some(branch);
1012
1013        // Lower the branch in ISLE.
1014        backend
1015            .lower_branch(self, branch, targets)
1016            .unwrap_or_else(|| {
1017                panic!(
1018                    "should be implemented in ISLE: branch = `{}`",
1019                    self.f.dfg.display_inst(branch),
1020                )
1021            });
1022        let loc = self.srcloc(branch);
1023        self.finish_ir_inst(loc);
1024        // Add block param outputs for current block.
1025        self.lower_branch_blockparam_args(bindex);
1026        Ok(())
1027    }
1028
1029    fn lower_branch_blockparam_args(&mut self, block: BlockIndex) {
1030        let mut branch_arg_vregs: SmallVec<[Reg; 16]> = smallvec![];
1031
1032        // TODO: why not make `block_order` public?
1033        for succ_idx in 0..self.vcode.block_order().succ_indices(block).1.len() {
1034            branch_arg_vregs.clear();
1035            let (succ, args) = self.collect_block_call(block, succ_idx, &mut branch_arg_vregs);
1036            self.vcode.add_succ(succ, args);
1037        }
1038    }
1039
1040    fn collect_branch_and_targets(
1041        &self,
1042        bindex: BlockIndex,
1043        _bb: Block,
1044        targets: &mut SmallVec<[MachLabel; 2]>,
1045    ) -> Option<Inst> {
1046        targets.clear();
1047        let (opt_inst, succs) = self.vcode.block_order().succ_indices(bindex);
1048        targets.extend(succs.iter().map(|succ| MachLabel::from_block(*succ)));
1049        opt_inst
1050    }
1051
1052    /// Collect the outgoing block-call arguments for a given edge out
1053    /// of a lowered block.
1054    fn collect_block_call<'a>(
1055        &mut self,
1056        block: BlockIndex,
1057        succ_idx: usize,
1058        buffer: &'a mut SmallVec<[Reg; 16]>,
1059    ) -> (BlockIndex, &'a [Reg]) {
1060        let block_order = self.vcode.block_order();
1061        let (_, succs) = block_order.succ_indices(block);
1062        let succ = succs[succ_idx];
1063        let this_lb = block_order.lowered_order()[block.index()];
1064        let succ_lb = block_order.lowered_order()[succ.index()];
1065
1066        let (branch_inst, succ_idx) = match (this_lb, succ_lb) {
1067            (_, LoweredBlock::CriticalEdge { .. }) => {
1068                // The successor is a split-critical-edge block. In this
1069                // case, this block-call has no arguments, and the
1070                // arguments go on the critical edge block's unconditional
1071                // branch instead.
1072                return (succ, &[]);
1073            }
1074            (LoweredBlock::CriticalEdge { pred, succ_idx, .. }, _) => {
1075                // This is a split-critical-edge block. In this case, our
1076                // block-call has the arguments that in the CLIF appear in
1077                // the predecessor's branch to this edge.
1078                let branch_inst = self.f.layout.last_inst(pred).unwrap();
1079                (branch_inst, succ_idx as usize)
1080            }
1081
1082            (this, _) => {
1083                let block = this.orig_block().unwrap();
1084                // Ordinary block, with an ordinary block as
1085                // successor. Take the arguments from the branch.
1086                let branch_inst = self.f.layout.last_inst(block).unwrap();
1087                (branch_inst, succ_idx)
1088            }
1089        };
1090
1091        let block_call = self.f.dfg.insts[branch_inst]
1092            .branch_destination(&self.f.dfg.jump_tables, &self.f.dfg.exception_tables)[succ_idx];
1093        for arg in block_call.args(&self.f.dfg.value_lists) {
1094            match arg {
1095                BlockArg::Value(arg) => {
1096                    debug_assert!(self.f.dfg.value_is_real(arg));
1097                    let regs = self.put_value_in_regs(arg);
1098                    buffer.extend_from_slice(regs.regs());
1099                }
1100                BlockArg::TryCallRet(i) => {
1101                    let regs = self.try_call_rets.get(&branch_inst).unwrap()[i as usize]
1102                        .map(|r| r.to_reg());
1103                    buffer.extend_from_slice(regs.regs());
1104                }
1105                BlockArg::TryCallExn(i) => {
1106                    let reg =
1107                        self.try_call_payloads.get(&branch_inst).unwrap()[i as usize].to_reg();
1108                    buffer.push(reg);
1109                }
1110            }
1111        }
1112        (succ, &buffer[..])
1113    }
1114
1115    /// Lower the function.
1116    pub fn lower<B: LowerBackend<MInst = I>>(
1117        mut self,
1118        backend: &B,
1119        ctrl_plane: &mut ControlPlane,
1120    ) -> CodegenResult<VCode<I>> {
1121        trace!("about to lower function: {:?}", self.f);
1122
1123        self.vcode.init_retval_area(&mut self.vregs)?;
1124
1125        // Get the pinned reg here (we only parameterize this function on `B`,
1126        // not the whole `Lower` impl).
1127        self.pinned_reg = backend.maybe_pinned_reg();
1128
1129        self.vcode.set_entry(BlockIndex::new(0));
1130
1131        // Reused vectors for branch lowering.
1132        let mut targets: SmallVec<[MachLabel; 2]> = SmallVec::new();
1133
1134        // get a copy of the lowered order; we hold this separately because we
1135        // need a mut ref to the vcode to mutate it below.
1136        let lowered_order: SmallVec<[LoweredBlock; 64]> = self
1137            .vcode
1138            .block_order()
1139            .lowered_order()
1140            .iter()
1141            .cloned()
1142            .collect();
1143
1144        // Main lowering loop over lowered blocks.
1145        for (bindex, lb) in lowered_order.iter().enumerate().rev() {
1146            let bindex = BlockIndex::new(bindex);
1147
1148            // Lower the block body in reverse order (see comment in
1149            // `lower_clif_block()` for rationale).
1150
1151            // End branch.
1152            if let Some(bb) = lb.orig_block() {
1153                if let Some(branch) = self.collect_branch_and_targets(bindex, bb, &mut targets) {
1154                    let branch_start = self.vcode.vcode.num_insts();
1155                    self.lower_clif_branch(backend, bindex, bb, branch, &targets)?;
1156                    self.finish_ir_inst(self.srcloc(branch));
1157
1158                    // Branch instructions like try_call can also be safepoints
1159                    // that need stack maps. Forward the stack map from the CLIF
1160                    // branch to the VCode safepoint, just like we do for
1161                    // non-branch instructions in `lower_clif_block`.
1162                    if let Some(entries) = self.f.dfg.user_stack_map_entries(branch) {
1163                        let branch_end = self.vcode.vcode.num_insts();
1164                        for i in branch_start..branch_end {
1165                            let iix = InsnIndex::new(i);
1166                            if self.vcode.vcode[iix].is_safepoint() {
1167                                self.vcode.add_user_stack_map(
1168                                    BackwardsInsnIndex::new(iix.index()),
1169                                    entries,
1170                                );
1171                                break;
1172                            }
1173                        }
1174                    }
1175                }
1176            } else {
1177                // If no orig block, this must be a pure edge block;
1178                // get the successor and emit a jump. This block has
1179                // no block params; and this jump's block-call args
1180                // will be filled in by
1181                // `lower_branch_blockparam_args`.
1182                let succ = self.vcode.block_order().succ_indices(bindex).1[0];
1183                self.emit(I::gen_jump(MachLabel::from_block(succ)));
1184                self.finish_ir_inst(Default::default());
1185                self.lower_branch_blockparam_args(bindex);
1186            }
1187
1188            // Original block body.
1189            if let Some(bb) = lb.orig_block() {
1190                self.lower_clif_block(backend, bb, ctrl_plane)?;
1191                self.emit_value_label_live_range_start_for_block_args(bb);
1192            }
1193
1194            if bindex.index() == 0 {
1195                // Set up the function with arg vreg inits.
1196                self.gen_arg_setup();
1197                self.finish_ir_inst(Default::default());
1198            }
1199
1200            self.finish_bb();
1201
1202            // Check for any deferred vreg-temp allocation errors, and
1203            // bubble one up at this time if it exists.
1204            if let Some(e) = self.vregs.take_deferred_error() {
1205                return Err(e);
1206            }
1207        }
1208
1209        // Now that we've emitted all instructions into the
1210        // VCodeBuilder, let's build the VCode.
1211        trace!(
1212            "built vcode:\n{:?}Backwards {:?}",
1213            &self.vregs, &self.vcode.vcode
1214        );
1215        let vcode = self.vcode.build(self.vregs);
1216
1217        Ok(vcode)
1218    }
1219
1220    pub fn value_is_unused(&self, val: Value) -> bool {
1221        match self.value_ir_uses[val] {
1222            ValueUseState::Unused => true,
1223            _ => false,
1224        }
1225    }
1226
1227    pub fn block_successor_label(&self, block: Block, succ: usize) -> MachLabel {
1228        trace!("block_successor_label: block {block} succ {succ}");
1229        let lowered = self
1230            .vcode
1231            .block_order()
1232            .lowered_index_for_block(block)
1233            .expect("Unreachable block");
1234        trace!(" -> lowered block {lowered:?}");
1235        let (_, succs) = self.vcode.block_order().succ_indices(lowered);
1236        trace!(" -> succs {succs:?}");
1237        let succ_block = *succs.get(succ).expect("Successor index out of range");
1238        MachLabel::from_block(succ_block)
1239    }
1240}
1241
1242/// Pre-analysis: compute `value_ir_uses`. See comment on
1243/// `ValueUseState` for a description of what this analysis
1244/// computes.
1245fn compute_use_states(
1246    f: &Function,
1247    sret_param: Option<Value>,
1248) -> SecondaryMap<Value, ValueUseState> {
1249    // We perform the analysis without recursion, so we don't
1250    // overflow the stack on long chains of ops in the input.
1251    //
1252    // This is sort of a hybrid of a "shallow use-count" pass and
1253    // a DFS. We iterate over all instructions and mark their args
1254    // as used. However when we increment a use-count to
1255    // "Multiple" we push its args onto the stack and do a DFS,
1256    // immediately marking the whole dependency tree as
1257    // Multiple. Doing both (shallow use-counting over all insts,
1258    // and deep Multiple propagation) lets us trim both
1259    // traversals, stopping recursion when a node is already at
1260    // the appropriate state.
1261    //
1262    // In particular, note that the *coarsening* into {Unused,
1263    // Once, Multiple} is part of what makes this pass more
1264    // efficient than a full indirect-use-counting pass.
1265
1266    let mut value_ir_uses = SecondaryMap::with_default(ValueUseState::Unused);
1267
1268    if let Some(sret_param) = sret_param {
1269        // There's an implicit use of the struct-return parameter in each
1270        // copy of the function epilogue, which we count here.
1271        value_ir_uses[sret_param] = ValueUseState::Multiple;
1272    }
1273
1274    // Stack of iterators over Values as we do DFS to mark
1275    // Multiple-state subtrees. The iterator type is whatever is
1276    // returned by `uses` below.
1277    let mut stack: SmallVec<[_; 16]> = smallvec![];
1278
1279    // Find the args for the inst corresponding to the given value.
1280    //
1281    // Note that "root" instructions are skipped here. This means that multiple
1282    // uses of any result of a multi-result instruction are not considered
1283    // multiple uses of the operands of a multi-result instruction. This
1284    // requires tight coupling with `get_value_as_source_or_const` above which
1285    // is the consumer of the map that this function is producing.
1286    let uses = |value| {
1287        trace!(" -> pushing args for {} onto stack", value);
1288        if let ValueDef::Result(src_inst, _) = f.dfg.value_def(value) {
1289            if is_value_use_root(f, src_inst) {
1290                None
1291            } else {
1292                Some(f.dfg.inst_values(src_inst))
1293            }
1294        } else {
1295            None
1296        }
1297    };
1298
1299    // Do a DFS through `value_ir_uses` to mark a subtree as
1300    // Multiple.
1301    for inst in f
1302        .layout
1303        .blocks()
1304        .flat_map(|block| f.layout.block_insts(block))
1305    {
1306        // Iterate over all values used by all instructions, noting an
1307        // additional use on each operand.
1308        for arg in f.dfg.inst_values(inst) {
1309            debug_assert!(f.dfg.value_is_real(arg));
1310            let old = value_ir_uses[arg];
1311            value_ir_uses[arg].inc();
1312            let new = value_ir_uses[arg];
1313            trace!("arg {} used, old state {:?}, new {:?}", arg, old, new);
1314
1315            // On transition to Multiple, do DFS.
1316            if old == ValueUseState::Multiple || new != ValueUseState::Multiple {
1317                continue;
1318            }
1319            if let Some(iter) = uses(arg) {
1320                stack.push(iter);
1321            }
1322            while let Some(iter) = stack.last_mut() {
1323                if let Some(value) = iter.next() {
1324                    debug_assert!(f.dfg.value_is_real(value));
1325                    trace!(" -> DFS reaches {}", value);
1326                    if value_ir_uses[value] == ValueUseState::Multiple {
1327                        // Truncate DFS here: no need to go further,
1328                        // as whole subtree must already be Multiple.
1329                        // With debug asserts, check one level of
1330                        // that invariant at least.
1331                        debug_assert!(uses(value).into_iter().flatten().all(|arg| {
1332                            debug_assert!(f.dfg.value_is_real(arg));
1333                            value_ir_uses[arg] == ValueUseState::Multiple
1334                        }));
1335                        continue;
1336                    }
1337                    value_ir_uses[value] = ValueUseState::Multiple;
1338                    trace!(" -> became Multiple");
1339                    if let Some(iter) = uses(value) {
1340                        stack.push(iter);
1341                    }
1342                } else {
1343                    // Empty iterator, discard.
1344                    stack.pop();
1345                }
1346            }
1347        }
1348    }
1349
1350    value_ir_uses
1351}
1352
1353/// Definition of a "root" instruction for the calculation of `ValueUseState`.
1354///
1355/// This function calculates whether `inst` is considered a "root" for value-use
1356/// information. This concept is used to forcibly prevent looking-through the
1357/// instruction during `get_value_as_source_or_const` as it additionally
1358/// prevents propagating `Multiple`-used results of the `inst` here to the
1359/// operands of the instruction.
1360///
1361/// Currently this is defined as multi-result instructions. That means that
1362/// lowerings are never allowed to look through a multi-result instruction to
1363/// generate patterns. Note that this isn't possible in ISLE today anyway so
1364/// this isn't currently much of a loss.
1365///
1366/// The main purpose of this function is to prevent the operands of a
1367/// multi-result instruction from being forcibly considered `Multiple`-used
1368/// regardless of circumstances.
1369fn is_value_use_root(f: &Function, inst: Inst) -> bool {
1370    f.dfg.inst_results(inst).len() > 1
1371}
1372
1373/// Function-level queries.
1374impl<'func, I: VCodeInst> Lower<'func, I> {
1375    pub fn dfg(&self) -> &DataFlowGraph {
1376        &self.f.dfg
1377    }
1378
1379    /// Get the `Callee`.
1380    pub fn abi(&self) -> &Callee<I::ABIMachineSpec> {
1381        self.vcode.abi()
1382    }
1383
1384    /// Get the `Callee`.
1385    pub fn abi_mut(&mut self) -> &mut Callee<I::ABIMachineSpec> {
1386        self.vcode.abi_mut()
1387    }
1388}
1389
1390/// Instruction input/output queries.
1391impl<'func, I: VCodeInst> Lower<'func, I> {
1392    /// Get the instdata for a given IR instruction.
1393    pub fn data(&self, ir_inst: Inst) -> &InstructionData {
1394        &self.f.dfg.insts[ir_inst]
1395    }
1396
1397    /// Likewise, but starting with a GlobalValue identifier.
1398    pub fn symbol_value_data<'b>(
1399        &'b self,
1400        global_value: GlobalValue,
1401    ) -> Option<(&'b ExternalName, RelocDistance, i64)> {
1402        let gvdata = &self.f.global_values[global_value];
1403        match gvdata {
1404            &GlobalValueData::Symbol {
1405                ref name,
1406                ref offset,
1407                colocated,
1408                ..
1409            } => {
1410                let offset = offset.bits();
1411                let dist = if colocated {
1412                    RelocDistance::Near
1413                } else {
1414                    RelocDistance::Far
1415                };
1416                Some((name, dist, offset))
1417            }
1418            _ => None,
1419        }
1420    }
1421
1422    /// Returns the memory flags of a given memory access.
1423    pub fn memflags(&self, ir_inst: Inst) -> Option<MemFlags> {
1424        match &self.f.dfg.insts[ir_inst] {
1425            &InstructionData::AtomicCas { flags, .. } => Some(flags),
1426            &InstructionData::AtomicRmw { flags, .. } => Some(flags),
1427            &InstructionData::Load { flags, .. }
1428            | &InstructionData::LoadNoOffset { flags, .. }
1429            | &InstructionData::Store { flags, .. } => Some(flags),
1430            &InstructionData::StoreNoOffset { flags, .. } => Some(flags),
1431            _ => None,
1432        }
1433    }
1434
1435    /// Get the source location for a given instruction.
1436    pub fn srcloc(&self, ir_inst: Inst) -> RelSourceLoc {
1437        self.f.rel_srclocs()[ir_inst]
1438    }
1439
1440    /// Get the number of inputs to the given IR instruction. This is a count only of the Value
1441    /// arguments to the instruction: block arguments will not be included in this count.
1442    pub fn num_inputs(&self, ir_inst: Inst) -> usize {
1443        self.f.dfg.inst_args(ir_inst).len()
1444    }
1445
1446    /// Get the number of outputs to the given IR instruction.
1447    pub fn num_outputs(&self, ir_inst: Inst) -> usize {
1448        self.f.dfg.inst_results(ir_inst).len()
1449    }
1450
1451    /// Get the type for an instruction's input.
1452    pub fn input_ty(&self, ir_inst: Inst, idx: usize) -> Type {
1453        self.value_ty(self.input_as_value(ir_inst, idx))
1454    }
1455
1456    /// Get the type for a value.
1457    pub fn value_ty(&self, val: Value) -> Type {
1458        self.f.dfg.value_type(val)
1459    }
1460
1461    /// Get the type for an instruction's output.
1462    pub fn output_ty(&self, ir_inst: Inst, idx: usize) -> Type {
1463        self.f.dfg.value_type(self.f.dfg.inst_results(ir_inst)[idx])
1464    }
1465
1466    /// Get the value of a constant instruction (`iconst`, etc.) as a 64-bit
1467    /// value, if possible.
1468    pub fn get_constant(&self, ir_inst: Inst) -> Option<u64> {
1469        self.inst_constants.get(&ir_inst).map(|&c| {
1470            // The upper bits must be zero, enforced during legalization and by
1471            // the CLIF verifier.
1472            debug_assert_eq!(c, {
1473                let input_size = self.output_ty(ir_inst, 0).bits() as u64;
1474                let shift = 64 - input_size;
1475                (c << shift) >> shift
1476            });
1477            c
1478        })
1479    }
1480
1481    /// Get the input as one of two options other than a direct register:
1482    ///
1483    /// - An instruction, given that it is effect-free or able to sink its
1484    ///   effect to the current instruction being lowered, and given it has only
1485    ///   one output, and if effect-ful, given that this is the only use;
1486    /// - A constant, if the value is a constant.
1487    ///
1488    /// The instruction input may be available in either of these forms.  It may
1489    /// be available in neither form, if the conditions are not met; if so, use
1490    /// `put_input_in_regs()` instead to get it in a register.
1491    ///
1492    /// If the backend merges the effect of a side-effecting instruction, it
1493    /// must call `sink_inst()`. When this is called, it indicates that the
1494    /// effect has been sunk to the current scan location. The sunk
1495    /// instruction's result(s) must have *no* uses remaining, because it will
1496    /// not be codegen'd (it has been integrated into the current instruction).
1497    pub fn input_as_value(&self, ir_inst: Inst, idx: usize) -> Value {
1498        let val = self.f.dfg.inst_args(ir_inst)[idx];
1499        debug_assert!(self.f.dfg.value_is_real(val));
1500        val
1501    }
1502
1503    /// Resolves a particular input of an instruction to the `Value` that it is
1504    /// represented with.
1505    ///
1506    /// For more information see [`Lower::get_value_as_source_or_const`].
1507    pub fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput {
1508        let val = self.input_as_value(ir_inst, idx);
1509        self.get_value_as_source_or_const(val)
1510    }
1511
1512    /// Resolves a `Value` definition to the source instruction it came from
1513    /// plus whether it's a unique-use of that instruction.
1514    ///
1515    /// This function is the workhorse of pattern-matching in ISLE which enables
1516    /// combining multiple instructions together. This is used implicitly in
1517    /// patterns such as `(iadd x (iconst y))` where this function is used to
1518    /// extract the `(iconst y)` operand.
1519    ///
1520    /// At its core this function is a wrapper around
1521    /// [`DataFlowGraph::value_def`]. This function applies a filter on top of
1522    /// that, however, to determine when it is actually safe to "look through"
1523    /// the `val` definition here and view the underlying instruction. This
1524    /// protects against duplicating side effects, such as loads, for example.
1525    ///
1526    /// Internally this uses the data computed from `compute_use_states` along
1527    /// with other instruction properties to know what to return.
1528    pub fn get_value_as_source_or_const(&self, val: Value) -> NonRegInput {
1529        trace!(
1530            "get_input_for_val: val {} at cur_inst {:?} cur_scan_entry_color {:?}",
1531            val, self.cur_inst, self.cur_scan_entry_color,
1532        );
1533        let inst = match self.f.dfg.value_def(val) {
1534            // OK to merge source instruction if we have a source
1535            // instruction, and one of these two conditions hold:
1536            //
1537            // - It has no side-effects and this instruction is not a "value-use
1538            //   root" instruction. Instructions which are considered "roots"
1539            //   for value-use calculations do not have accurate information
1540            //   known about the `ValueUseState` of their operands. This is
1541            //   currently done for multi-result instructions to prevent a use
1542            //   of each result from forcing all operands of the multi-result
1543            //   instruction to also be `Multiple`. This in turn means that the
1544            //   `ValueUseState` for operands of a "root" instruction to be a
1545            //   lie if pattern matching were to look through the multi-result
1546            //   instruction. As a result the "look through this instruction"
1547            //   logic only succeeds if it's not a root instruction.
1548            //
1549            // - It has a side-effect, has one output value, that one
1550            //   output has only one use, directly or indirectly (so
1551            //   cannot be duplicated -- see comment on
1552            //   `ValueUseState`), and the instruction's color is *one
1553            //   less than* the current scan color.
1554            //
1555            //   This latter set of conditions is testing whether a
1556            //   side-effecting instruction can sink to the current scan
1557            //   location; this is possible if the in-color of this inst is
1558            //   equal to the out-color of the producing inst, so no other
1559            //   side-effecting ops occur between them (which will only be true
1560            //   if they are in the same BB, because color increments at each BB
1561            //   start).
1562            //
1563            //   If it is actually sunk, then in `merge_inst()`, we update the
1564            //   scan color so that as we scan over the range past which the
1565            //   instruction was sunk, we allow other instructions (that came
1566            //   prior to the sunk instruction) to sink.
1567            ValueDef::Result(src_inst, result_idx) => {
1568                let src_side_effect = has_lowering_side_effect(self.f, src_inst);
1569                trace!(" -> src inst {}", self.f.dfg.display_inst(src_inst));
1570                trace!(" -> has lowering side effect: {}", src_side_effect);
1571                if is_value_use_root(self.f, src_inst) {
1572                    // If this instruction is a "root instruction" then it's
1573                    // required that we can't look through it to see the
1574                    // definition. This means that the `ValueUseState` for the
1575                    // operands of this result assume that this instruction is
1576                    // generated exactly once which might get violated were we
1577                    // to allow looking through it.
1578                    trace!(" -> is a root instruction");
1579                    InputSourceInst::None
1580                } else if !src_side_effect {
1581                    // Otherwise if this instruction has no side effects and the
1582                    // value is used only once then we can look through it with
1583                    // a "unique" tag. A non-unique `Use` can be shown for other
1584                    // values ensuring consumers know how it's computed but that
1585                    // it's not available to omit.
1586                    if self.value_ir_uses[val] == ValueUseState::Once {
1587                        InputSourceInst::UniqueUse(src_inst, result_idx)
1588                    } else {
1589                        InputSourceInst::Use(src_inst, result_idx)
1590                    }
1591                } else {
1592                    // Side-effect: test whether this is the only use of the
1593                    // only result of the instruction, and whether colors allow
1594                    // the code-motion.
1595                    trace!(
1596                        " -> side-effecting op {} for val {}: use state {:?}",
1597                        src_inst, val, self.value_ir_uses[val]
1598                    );
1599                    if self.cur_scan_entry_color.is_some()
1600                        && self.value_ir_uses[val] == ValueUseState::Once
1601                        && self.num_outputs(src_inst) == 1
1602                        && self
1603                            .side_effect_inst_entry_colors
1604                            .get(&src_inst)
1605                            .unwrap()
1606                            .get()
1607                            + 1
1608                            == self.cur_scan_entry_color.unwrap().get()
1609                    {
1610                        InputSourceInst::UniqueUse(src_inst, 0)
1611                    } else {
1612                        InputSourceInst::None
1613                    }
1614                }
1615            }
1616            _ => InputSourceInst::None,
1617        };
1618        let constant = inst.as_inst().and_then(|(inst, _)| self.get_constant(inst));
1619
1620        NonRegInput { inst, constant }
1621    }
1622
1623    /// Increment the reference count for the Value, ensuring that it gets lowered.
1624    pub fn increment_lowered_uses(&mut self, val: Value) {
1625        self.value_lowered_uses[val] += 1
1626    }
1627
1628    /// Put the `idx`th input into register(s) and return the assigned register.
1629    pub fn put_input_in_regs(&mut self, ir_inst: Inst, idx: usize) -> ValueRegs<Reg> {
1630        let val = self.f.dfg.inst_args(ir_inst)[idx];
1631        self.put_value_in_regs(val)
1632    }
1633
1634    /// Put the given value into register(s) and return the assigned register.
1635    pub fn put_value_in_regs(&mut self, val: Value) -> ValueRegs<Reg> {
1636        debug_assert!(self.f.dfg.value_is_real(val));
1637        trace!("put_value_in_regs: val {}", val);
1638
1639        if let Some(inst) = self.f.dfg.value_def(val).inst() {
1640            assert!(!self.inst_sunk.contains(&inst));
1641        }
1642
1643        let regs = self.value_regs[val];
1644        trace!(" -> regs {:?}", regs);
1645        assert!(regs.is_valid());
1646
1647        self.value_lowered_uses[val] += 1;
1648
1649        regs
1650    }
1651}
1652
1653/// Codegen primitives: allocate temps, emit instructions, set result registers,
1654/// ask for an input to be gen'd into a register.
1655impl<'func, I: VCodeInst> Lower<'func, I> {
1656    /// Get a new temp.
1657    pub fn alloc_tmp(&mut self, ty: Type) -> ValueRegs<Writable<Reg>> {
1658        writable_value_regs(self.vregs.alloc_with_deferred_error(ty))
1659    }
1660
1661    /// Emit a machine instruction.
1662    pub fn emit(&mut self, mach_inst: I) {
1663        trace!("emit: {:?}", mach_inst);
1664        self.ir_insts.push(mach_inst);
1665    }
1666
1667    /// Indicate that the side-effect of an instruction has been sunk to the
1668    /// current scan location. This should only be done with the instruction's
1669    /// original results are not used (i.e., `put_input_in_regs` is not invoked
1670    /// for the input produced by the sunk instruction), otherwise the
1671    /// side-effect will occur twice.
1672    pub fn sink_inst(&mut self, ir_inst: Inst) {
1673        assert!(has_lowering_side_effect(self.f, ir_inst));
1674        assert!(self.cur_scan_entry_color.is_some());
1675
1676        for result in self.dfg().inst_results(ir_inst) {
1677            assert!(self.value_lowered_uses[*result] == 0);
1678        }
1679
1680        let sunk_inst_entry_color = self
1681            .side_effect_inst_entry_colors
1682            .get(&ir_inst)
1683            .cloned()
1684            .unwrap();
1685        let sunk_inst_exit_color = InstColor::new(sunk_inst_entry_color.get() + 1);
1686        assert!(sunk_inst_exit_color == self.cur_scan_entry_color.unwrap());
1687        self.cur_scan_entry_color = Some(sunk_inst_entry_color);
1688        self.inst_sunk.insert(ir_inst);
1689    }
1690
1691    /// Retrieve immediate data given a handle.
1692    pub fn get_immediate_data(&self, imm: Immediate) -> &ConstantData {
1693        self.f.dfg.immediates.get(imm).unwrap()
1694    }
1695
1696    /// Retrieve constant data given a handle.
1697    pub fn get_constant_data(&self, constant_handle: Constant) -> &ConstantData {
1698        self.f.dfg.constants.get(constant_handle)
1699    }
1700
1701    /// Indicate that a constant should be emitted.
1702    pub fn use_constant(&mut self, constant: VCodeConstantData) -> VCodeConstant {
1703        self.vcode.constants().insert(constant)
1704    }
1705}
1706
1707#[cfg(test)]
1708mod tests {
1709    use super::ValueUseState;
1710    use crate::cursor::{Cursor, FuncCursor};
1711    use crate::ir::types;
1712    use crate::ir::{Function, InstBuilder};
1713
1714    #[test]
1715    fn multi_result_use_once() {
1716        let mut func = Function::new();
1717        let block0 = func.dfg.make_block();
1718        let mut pos = FuncCursor::new(&mut func);
1719        pos.insert_block(block0);
1720        let v1 = pos.ins().iconst(types::I64, 0);
1721        let v2 = pos.ins().iconst(types::I64, 1);
1722        let v3 = pos.ins().iconcat(v1, v2);
1723        let (v4, v5) = pos.ins().isplit(v3);
1724        pos.ins().return_(&[v4, v5]);
1725        let func = pos.func;
1726
1727        let uses = super::compute_use_states(&func, None);
1728        assert_eq!(uses[v1], ValueUseState::Once);
1729        assert_eq!(uses[v2], ValueUseState::Once);
1730        assert_eq!(uses[v3], ValueUseState::Once);
1731        assert_eq!(uses[v4], ValueUseState::Once);
1732        assert_eq!(uses[v5], ValueUseState::Once);
1733    }
1734
1735    #[test]
1736    fn results_used_twice_but_not_operands() {
1737        let mut func = Function::new();
1738        let block0 = func.dfg.make_block();
1739        let mut pos = FuncCursor::new(&mut func);
1740        pos.insert_block(block0);
1741        let v1 = pos.ins().iconst(types::I64, 0);
1742        let v2 = pos.ins().iconst(types::I64, 1);
1743        let v3 = pos.ins().iconcat(v1, v2);
1744        let (v4, v5) = pos.ins().isplit(v3);
1745        pos.ins().return_(&[v4, v4]);
1746        let func = pos.func;
1747
1748        let uses = super::compute_use_states(&func, None);
1749        assert_eq!(uses[v1], ValueUseState::Once);
1750        assert_eq!(uses[v2], ValueUseState::Once);
1751        assert_eq!(uses[v3], ValueUseState::Once);
1752        assert_eq!(uses[v4], ValueUseState::Multiple);
1753        assert_eq!(uses[v5], ValueUseState::Unused);
1754    }
1755}