cranelift_codegen/machinst/
lower.rs

1//! This module implements lowering (instruction selection) from Cranelift IR
2//! to machine instructions with virtual registers. This is *almost* the final
3//! machine code, except for register allocation.
4
5// TODO: separate the IR-query core of `Lower` from the lowering logic built on
6// top of it, e.g. the side-effect/coloring analysis and the scan support.
7
8use crate::entity::SecondaryMap;
9use crate::inst_predicates::{has_lowering_side_effect, is_constant_64bit};
10use crate::ir::pcc::{Fact, FactContext, PccError, PccResult};
11use crate::ir::{
12    ArgumentPurpose, Block, BlockArg, Constant, ConstantData, DataFlowGraph, ExternalName,
13    Function, GlobalValue, GlobalValueData, Immediate, Inst, InstructionData, MemFlags,
14    RelSourceLoc, Type, Value, ValueDef, ValueLabelAssignments, ValueLabelStart,
15};
16use crate::machinst::valueregs::InvalidSentinel;
17use crate::machinst::{
18    writable_value_regs, ABIMachineSpec, BackwardsInsnIndex, BlockIndex, BlockLoweringOrder,
19    Callee, InsnIndex, LoweredBlock, MachLabel, Reg, SigSet, VCode, VCodeBuilder, VCodeConstant,
20    VCodeConstantData, VCodeConstants, VCodeInst, ValueRegs, Writable,
21};
22use crate::settings::Flags;
23use crate::{trace, CodegenError, CodegenResult};
24use alloc::vec::Vec;
25use cranelift_control::ControlPlane;
26use rustc_hash::{FxHashMap, FxHashSet};
27use smallvec::{smallvec, SmallVec};
28use std::fmt::Debug;
29
30use super::{VCodeBuildDirection, VRegAllocator};
31
32/// A vector of ValueRegs, used to represent the outputs of an instruction.
33pub type InstOutput = SmallVec<[ValueRegs<Reg>; 2]>;
34
35/// An "instruction color" partitions CLIF instructions by side-effecting ops.
36/// All instructions with the same "color" are guaranteed not to be separated by
37/// any side-effecting op (for this purpose, loads are also considered
38/// side-effecting, to avoid subtle questions w.r.t. the memory model), and
39/// furthermore, it is guaranteed that for any two instructions A and B such
40/// that color(A) == color(B), either A dominates B and B postdominates A, or
41/// vice-versa. (For now, in practice, only ops in the same basic block can ever
42/// have the same color, trivially providing the second condition.) Intuitively,
43/// this means that the ops of the same color must always execute "together", as
44/// part of one atomic contiguous section of the dynamic execution trace, and
45/// they can be freely permuted (modulo true dataflow dependencies) without
46/// affecting program behavior.
47#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
48struct InstColor(u32);
49impl InstColor {
50    fn new(n: u32) -> InstColor {
51        InstColor(n)
52    }
53
54    /// Get an arbitrary index representing this color. The index is unique
55    /// *within a single function compilation*, but indices may be reused across
56    /// functions.
57    pub fn get(self) -> u32 {
58        self.0
59    }
60}
61
62/// A representation of all of the ways in which a value is available, aside
63/// from as a direct register.
64///
65/// - An instruction, if it would be allowed to occur at the current location
66///   instead (see [Lower::get_input_as_source_or_const()] for more details).
67///
68/// - A constant, if the value is known to be a constant.
69#[derive(Clone, Copy, Debug)]
70pub struct NonRegInput {
71    /// An instruction produces this value (as the given output), and its
72    /// computation (and side-effect if applicable) could occur at the
73    /// current instruction's location instead.
74    ///
75    /// If this instruction's operation is merged into the current instruction,
76    /// the backend must call [Lower::sink_inst()].
77    ///
78    /// This enum indicates whether this use of the source instruction
79    /// is unique or not.
80    pub inst: InputSourceInst,
81    /// The value is a known constant.
82    pub constant: Option<u64>,
83}
84
85/// When examining an input to an instruction, this enum provides one
86/// of several options: there is or isn't a single instruction (that
87/// we can see and merge with) that produces that input's value, and
88/// we are or aren't the single user of that instruction.
89#[derive(Clone, Copy, Debug)]
90pub enum InputSourceInst {
91    /// The input in question is the single, unique use of the given
92    /// instruction and output index, and it can be sunk to the
93    /// location of this input.
94    UniqueUse(Inst, usize),
95    /// The input in question is one of multiple uses of the given
96    /// instruction. It can still be sunk to the location of this
97    /// input.
98    Use(Inst, usize),
99    /// We cannot determine which instruction produced the input, or
100    /// it is one of several instructions (e.g., due to a control-flow
101    /// merge and blockparam), or the source instruction cannot be
102    /// allowed to sink to the current location due to side-effects.
103    None,
104}
105
106impl InputSourceInst {
107    /// Get the instruction and output index for this source, whether
108    /// we are its single or one of many users.
109    pub fn as_inst(&self) -> Option<(Inst, usize)> {
110        match self {
111            &InputSourceInst::UniqueUse(inst, output_idx)
112            | &InputSourceInst::Use(inst, output_idx) => Some((inst, output_idx)),
113            &InputSourceInst::None => None,
114        }
115    }
116}
117
118/// A machine backend.
119pub trait LowerBackend {
120    /// The machine instruction type.
121    type MInst: VCodeInst;
122
123    /// Lower a single instruction.
124    ///
125    /// For a branch, this function should not generate the actual branch
126    /// instruction. However, it must force any values it needs for the branch
127    /// edge (block-param actuals) into registers, because the actual branch
128    /// generation (`lower_branch()`) happens *after* any possible merged
129    /// out-edge.
130    ///
131    /// Returns `None` if no lowering for the instruction was found.
132    fn lower(&self, ctx: &mut Lower<Self::MInst>, inst: Inst) -> Option<InstOutput>;
133
134    /// Lower a block-terminating group of branches (which together can be seen
135    /// as one N-way branch), given a vcode MachLabel for each target.
136    ///
137    /// Returns `None` if no lowering for the branch was found.
138    fn lower_branch(
139        &self,
140        ctx: &mut Lower<Self::MInst>,
141        inst: Inst,
142        targets: &[MachLabel],
143    ) -> Option<()>;
144
145    /// A bit of a hack: give a fixed register that always holds the result of a
146    /// `get_pinned_reg` instruction, if known.  This allows elision of moves
147    /// into the associated vreg, instead using the real reg directly.
148    fn maybe_pinned_reg(&self) -> Option<Reg> {
149        None
150    }
151
152    /// The type of state carried between `check_fact` invocations.
153    type FactFlowState: Default + Clone + Debug;
154
155    /// Check any facts about an instruction, given VCode with facts
156    /// on VRegs. Takes mutable `VCode` so that it can propagate some
157    /// kinds of facts automatically.
158    fn check_fact(
159        &self,
160        _ctx: &FactContext<'_>,
161        _vcode: &mut VCode<Self::MInst>,
162        _inst: InsnIndex,
163        _state: &mut Self::FactFlowState,
164    ) -> PccResult<()> {
165        Err(PccError::UnimplementedBackend)
166    }
167}
168
169/// Machine-independent lowering driver / machine-instruction container. Maintains a correspondence
170/// from original Inst to MachInsts.
171pub struct Lower<'func, I: VCodeInst> {
172    /// The function to lower.
173    pub(crate) f: &'func Function,
174
175    /// Lowered machine instructions.
176    vcode: VCodeBuilder<I>,
177
178    /// VReg allocation context, given to the vcode field at build time to finalize the vcode.
179    vregs: VRegAllocator<I>,
180
181    /// Mapping from `Value` (SSA value in IR) to virtual register.
182    value_regs: SecondaryMap<Value, ValueRegs<Reg>>,
183
184    /// sret registers, if needed.
185    sret_reg: Option<ValueRegs<Reg>>,
186
187    /// Instruction colors at block exits. From this map, we can recover all
188    /// instruction colors by scanning backward from the block end and
189    /// decrementing on any color-changing (side-effecting) instruction.
190    block_end_colors: SecondaryMap<Block, InstColor>,
191
192    /// Instruction colors at side-effecting ops. This is the *entry* color,
193    /// i.e., the version of global state that exists before an instruction
194    /// executes.  For each side-effecting instruction, the *exit* color is its
195    /// entry color plus one.
196    side_effect_inst_entry_colors: FxHashMap<Inst, InstColor>,
197
198    /// Current color as we scan during lowering. While we are lowering an
199    /// instruction, this is equal to the color *at entry to* the instruction.
200    cur_scan_entry_color: Option<InstColor>,
201
202    /// Current instruction as we scan during lowering.
203    cur_inst: Option<Inst>,
204
205    /// Instruction constant values, if known.
206    inst_constants: FxHashMap<Inst, u64>,
207
208    /// Use-counts per SSA value, as counted in the input IR. These
209    /// are "coarsened", in the abstract-interpretation sense: we only
210    /// care about "0, 1, many" states, as this is all we need and
211    /// this lets us do an efficient fixpoint analysis.
212    ///
213    /// See doc comment on `ValueUseState` for more details.
214    value_ir_uses: SecondaryMap<Value, ValueUseState>,
215
216    /// Actual uses of each SSA value so far, incremented while lowering.
217    value_lowered_uses: SecondaryMap<Value, u32>,
218
219    /// Effectful instructions that have been sunk; they are not codegen'd at
220    /// their original locations.
221    inst_sunk: FxHashSet<Inst>,
222
223    /// Instructions collected for the CLIF inst in progress, in forward order.
224    ir_insts: Vec<I>,
225
226    /// Try-call block arg normal-return values, indexed by instruction.
227    try_call_rets: FxHashMap<Inst, SmallVec<[ValueRegs<Writable<Reg>>; 2]>>,
228
229    /// Try-call block arg exceptional-return payloads, indexed by
230    /// instruction. Payloads are carried in registers per the ABI and
231    /// can only be one register each.
232    try_call_payloads: FxHashMap<Inst, SmallVec<[Writable<Reg>; 2]>>,
233
234    /// The register to use for GetPinnedReg, if any, on this architecture.
235    pinned_reg: Option<Reg>,
236
237    /// Compilation flags.
238    flags: Flags,
239}
240
241/// How is a value used in the IR?
242///
243/// This can be seen as a coarsening of an integer count. We only need
244/// distinct states for zero, one, or many.
245///
246/// This analysis deserves further explanation. The basic idea is that
247/// we want to allow instruction lowering to know whether a value that
248/// an instruction references is *only* referenced by that one use, or
249/// by others as well. This is necessary to know when we might want to
250/// move a side-effect: we cannot, for example, duplicate a load, so
251/// we cannot let instruction lowering match a load as part of a
252/// subpattern and potentially incorporate it.
253///
254/// Note that a lot of subtlety comes into play once we have
255/// *indirect* uses. The classical example of this in our development
256/// history was the x86 compare instruction, which is incorporated
257/// into flags users (e.g. `selectif`, `trueif`, branches) and can
258/// subsequently incorporate loads, or at least we would like it
259/// to. However, danger awaits: the compare might be the only user of
260/// a load, so we might think we can just move the load (and nothing
261/// is duplicated -- success!), except that the compare itself is
262/// codegen'd in multiple places, where it is incorporated as a
263/// subpattern itself.
264///
265/// So we really want a notion of "unique all the way along the
266/// matching path". Rust's `&T` and `&mut T` offer a partial analogy
267/// to the semantics that we want here: we want to know when we've
268/// matched a unique use of an instruction, and that instruction's
269/// unique use of another instruction, etc, just as `&mut T` can only
270/// be obtained by going through a chain of `&mut T`. If one has a
271/// `&T` to a struct containing `&mut T` (one of several uses of an
272/// instruction that itself has a unique use of an instruction), one
273/// can only get a `&T` (one can only get a "I am one of several users
274/// of this instruction" result).
275///
276/// We could track these paths, either dynamically as one "looks up the operand
277/// tree" or precomputed. But the former requires state and means that the
278/// `Lower` API carries that state implicitly, which we'd like to avoid if we
279/// can. And the latter implies O(n^2) storage: it is an all-pairs property (is
280/// inst `i` unique from the point of view of `j`).
281///
282/// To make matters even a little more complex still, a value that is
283/// not uniquely used when initially viewing the IR can *become*
284/// uniquely used, at least as a root allowing further unique uses of
285/// e.g. loads to merge, if no other instruction actually merges
286/// it. To be more concrete, if we have `v1 := load; v2 := op v1; v3
287/// := op v2; v4 := op v2` then `v2` is non-uniquely used, so from the
288/// point of view of lowering `v4` or `v3`, we cannot merge the load
289/// at `v1`. But if we decide just to use the assigned register for
290/// `v2` at both `v3` and `v4`, then we only actually codegen `v2`
291/// once, so it *is* a unique root at that point and we *can* merge
292/// the load.
293///
294/// Note also that the color scheme is not sufficient to give us this
295/// information, for various reasons: reasoning about side-effects
296/// does not tell us about potential duplication of uses through pure
297/// ops.
298///
299/// To keep things simple and avoid error-prone lowering APIs that
300/// would extract more information about whether instruction merging
301/// happens or not (we don't have that info now, and it would be
302/// difficult to refactor to get it and make that refactor 100%
303/// correct), we give up on the above "can become unique if not
304/// actually merged" point. Instead, we compute a
305/// transitive-uniqueness. That is what this enum represents.
306///
307/// There is one final caveat as well to the result of this analysis.  Notably,
308/// we define some instructions to be "root" instructions, which means that we
309/// assume they will always be codegen'd at the root of a matching tree, and not
310/// matched. (This comes with the caveat that we actually enforce this property
311/// by making them "opaque" to subtree matching in
312/// `get_value_as_source_or_const`). Because they will always be codegen'd once,
313/// they in some sense "reset" multiplicity: these root instructions can be used
314/// many times, but because their result(s) are only computed once, they only
315/// use their inputs once.
316///
317/// We currently define all multi-result instructions to be "root" instructions,
318/// because it is too complex to reason about matching through them, and they
319/// cause too-coarse-grained approximation of multiplicity otherwise: the
320/// analysis would have to assume (as it used to!) that they are always
321/// multiply-used, simply because they have multiple outputs even if those
322/// outputs are used only once.
323///
324/// In the future we could define other instructions to be "root" instructions
325/// as well, if we make the corresponding change to get_value_as_source_or_const
326/// as well.
327///
328/// To define `ValueUseState` more plainly: a value is `Unused` if no references
329/// exist to it; `Once` if only one other op refers to it, *and* that other op
330/// is `Unused` or `Once`; and `Multiple` otherwise. In other words, `Multiple`
331/// is contagious (except through root instructions): even if an op's result
332/// value is directly used only once in the CLIF, that value is `Multiple` if
333/// the op that uses it is itself used multiple times (hence could be codegen'd
334/// multiple times). In brief, this analysis tells us whether, if every op
335/// merged all of its operand tree, a given op could be codegen'd in more than
336/// one place.
337///
338/// To compute this, we first consider direct uses. At this point
339/// `Unused` answers are correct, `Multiple` answers are correct, but
340/// some `Once`s may change to `Multiple`s. Then we propagate
341/// `Multiple` transitively using a workqueue/fixpoint algorithm.
342#[derive(Clone, Copy, Debug, PartialEq, Eq)]
343enum ValueUseState {
344    /// Not used at all.
345    Unused,
346    /// Used exactly once.
347    Once,
348    /// Used multiple times.
349    Multiple,
350}
351
352impl ValueUseState {
353    /// Add one use.
354    fn inc(&mut self) {
355        let new = match self {
356            Self::Unused => Self::Once,
357            Self::Once | Self::Multiple => Self::Multiple,
358        };
359        *self = new;
360    }
361}
362
363/// Notion of "relocation distance". This gives an estimate of how far away a symbol will be from a
364/// reference.
365#[derive(Clone, Copy, Debug, PartialEq, Eq)]
366pub enum RelocDistance {
367    /// Target of relocation is "nearby". The threshold for this is fuzzy but should be interpreted
368    /// as approximately "within the compiled output of one module"; e.g., within AArch64's +/-
369    /// 128MB offset. If unsure, use `Far` instead.
370    Near,
371    /// Target of relocation could be anywhere in the address space.
372    Far,
373}
374
375impl<'func, I: VCodeInst> Lower<'func, I> {
376    /// Prepare a new lowering context for the given IR function.
377    pub fn new(
378        f: &'func Function,
379        abi: Callee<I::ABIMachineSpec>,
380        emit_info: I::Info,
381        block_order: BlockLoweringOrder,
382        sigs: SigSet,
383        flags: Flags,
384    ) -> CodegenResult<Self> {
385        let constants = VCodeConstants::with_capacity(f.dfg.constants.len());
386        let vcode = VCodeBuilder::new(
387            sigs,
388            abi,
389            emit_info,
390            block_order,
391            constants,
392            VCodeBuildDirection::Backward,
393            flags.log2_min_function_alignment(),
394        );
395
396        // We usually need two VRegs per instruction result, plus extras for
397        // various temporaries, but two per Value is a good starting point.
398        let mut vregs = VRegAllocator::with_capacity(f.dfg.num_values() * 2);
399
400        let mut value_regs = SecondaryMap::with_default(ValueRegs::invalid());
401        let mut try_call_rets = FxHashMap::default();
402        let mut try_call_payloads = FxHashMap::default();
403
404        // Assign a vreg to each block param, each inst result, and
405        // each edge-defined block-call arg.
406        for bb in f.layout.blocks() {
407            for &param in f.dfg.block_params(bb) {
408                let ty = f.dfg.value_type(param);
409                if value_regs[param].is_invalid() {
410                    let regs = vregs.alloc_with_maybe_fact(ty, f.dfg.facts[param].clone())?;
411                    value_regs[param] = regs;
412                    trace!("bb {} param {}: regs {:?}", bb, param, regs);
413                }
414            }
415            for inst in f.layout.block_insts(bb) {
416                for &result in f.dfg.inst_results(inst) {
417                    let ty = f.dfg.value_type(result);
418                    if value_regs[result].is_invalid() && !ty.is_invalid() {
419                        let regs = vregs.alloc_with_maybe_fact(ty, f.dfg.facts[result].clone())?;
420                        value_regs[result] = regs;
421                        trace!(
422                            "bb {} inst {} ({:?}): result {} regs {:?}",
423                            bb,
424                            inst,
425                            f.dfg.insts[inst],
426                            result,
427                            regs,
428                        );
429                    }
430                }
431
432                if let Some(et) = f.dfg.insts[inst].exception_table() {
433                    let exdata = &f.dfg.exception_tables[et];
434                    let sig = &f.dfg.signatures[exdata.signature()];
435
436                    let mut rets = smallvec![];
437                    for ty in sig.returns.iter().map(|ret| ret.value_type) {
438                        rets.push(vregs.alloc(ty)?.map(|r| Writable::from_reg(r)));
439                    }
440                    try_call_rets.insert(inst, rets);
441
442                    let mut payloads = smallvec![];
443                    for &ty in sig
444                        .call_conv
445                        .exception_payload_types(I::ABIMachineSpec::word_type())
446                    {
447                        payloads.push(Writable::from_reg(vregs.alloc(ty)?.only_reg().unwrap()));
448                    }
449                    try_call_payloads.insert(inst, payloads);
450                }
451            }
452        }
453
454        // Find the sret register, if it's used.
455        let mut sret_param = None;
456        for ret in vcode.abi().signature().returns.iter() {
457            if ret.purpose == ArgumentPurpose::StructReturn {
458                let entry_bb = f.stencil.layout.entry_block().unwrap();
459                for (&param, sig_param) in f
460                    .dfg
461                    .block_params(entry_bb)
462                    .iter()
463                    .zip(vcode.abi().signature().params.iter())
464                {
465                    if sig_param.purpose == ArgumentPurpose::StructReturn {
466                        assert!(sret_param.is_none());
467                        sret_param = Some(param);
468                    }
469                }
470
471                assert!(sret_param.is_some());
472            }
473        }
474
475        let sret_reg = sret_param.map(|param| {
476            let regs = value_regs[param];
477            assert!(regs.len() == 1);
478            regs
479        });
480
481        // Compute instruction colors, find constant instructions, and find instructions with
482        // side-effects, in one combined pass.
483        let mut cur_color = 0;
484        let mut block_end_colors = SecondaryMap::with_default(InstColor::new(0));
485        let mut side_effect_inst_entry_colors = FxHashMap::default();
486        let mut inst_constants = FxHashMap::default();
487        for bb in f.layout.blocks() {
488            cur_color += 1;
489            for inst in f.layout.block_insts(bb) {
490                let side_effect = has_lowering_side_effect(f, inst);
491
492                trace!("bb {} inst {} has color {}", bb, inst, cur_color);
493                if side_effect {
494                    side_effect_inst_entry_colors.insert(inst, InstColor::new(cur_color));
495                    trace!(" -> side-effecting; incrementing color for next inst");
496                    cur_color += 1;
497                }
498
499                // Determine if this is a constant; if so, add to the table.
500                if let Some(c) = is_constant_64bit(f, inst) {
501                    trace!(" -> constant: {}", c);
502                    inst_constants.insert(inst, c);
503                }
504            }
505
506            block_end_colors[bb] = InstColor::new(cur_color);
507        }
508
509        let value_ir_uses = compute_use_states(f, sret_param);
510
511        Ok(Lower {
512            f,
513            vcode,
514            vregs,
515            value_regs,
516            sret_reg,
517            block_end_colors,
518            side_effect_inst_entry_colors,
519            inst_constants,
520            value_ir_uses,
521            value_lowered_uses: SecondaryMap::default(),
522            inst_sunk: FxHashSet::default(),
523            cur_scan_entry_color: None,
524            cur_inst: None,
525            ir_insts: vec![],
526            try_call_rets,
527            try_call_payloads,
528            pinned_reg: None,
529            flags,
530        })
531    }
532
533    pub fn sigs(&self) -> &SigSet {
534        self.vcode.sigs()
535    }
536
537    pub fn sigs_mut(&mut self) -> &mut SigSet {
538        self.vcode.sigs_mut()
539    }
540
541    pub fn vregs_mut(&mut self) -> &mut VRegAllocator<I> {
542        &mut self.vregs
543    }
544
545    fn gen_arg_setup(&mut self) {
546        if let Some(entry_bb) = self.f.layout.entry_block() {
547            trace!(
548                "gen_arg_setup: entry BB {} args are:\n{:?}",
549                entry_bb,
550                self.f.dfg.block_params(entry_bb)
551            );
552
553            for (i, param) in self.f.dfg.block_params(entry_bb).iter().enumerate() {
554                if self.value_ir_uses[*param] == ValueUseState::Unused {
555                    continue;
556                }
557                let regs = writable_value_regs(self.value_regs[*param]);
558                for insn in self
559                    .vcode
560                    .vcode
561                    .abi
562                    .gen_copy_arg_to_regs(&self.vcode.vcode.sigs, i, regs, &mut self.vregs)
563                    .into_iter()
564                {
565                    self.emit(insn);
566                }
567            }
568            if let Some(insn) = self
569                .vcode
570                .vcode
571                .abi
572                .gen_retval_area_setup(&self.vcode.vcode.sigs, &mut self.vregs)
573            {
574                self.emit(insn);
575            }
576
577            // The `args` instruction below must come first. Finish
578            // the current "IR inst" (with a default source location,
579            // as for other special instructions inserted during
580            // lowering) and continue the scan backward.
581            self.finish_ir_inst(Default::default());
582
583            if let Some(insn) = self.vcode.vcode.abi.take_args() {
584                self.emit(insn);
585            }
586        }
587    }
588
589    /// Generate the return instruction.
590    pub fn gen_return(&mut self, rets: Vec<ValueRegs<Reg>>) {
591        let mut out_rets = vec![];
592
593        let mut rets = rets.into_iter();
594        for (i, ret) in self
595            .abi()
596            .signature()
597            .returns
598            .clone()
599            .into_iter()
600            .enumerate()
601        {
602            let regs = if ret.purpose == ArgumentPurpose::StructReturn {
603                self.sret_reg.unwrap()
604            } else {
605                rets.next().unwrap()
606            };
607
608            let (regs, insns) = self.vcode.abi().gen_copy_regs_to_retval(
609                self.vcode.sigs(),
610                i,
611                regs,
612                &mut self.vregs,
613            );
614            out_rets.extend(regs);
615            for insn in insns {
616                self.emit(insn);
617            }
618        }
619
620        // Hack: generate a virtual instruction that uses vmctx in
621        // order to keep it alive for the duration of the function,
622        // for the benefit of debuginfo.
623        if self.f.dfg.values_labels.is_some() {
624            if let Some(vmctx_val) = self.f.special_param(ArgumentPurpose::VMContext) {
625                if self.value_ir_uses[vmctx_val] != ValueUseState::Unused {
626                    let vmctx_reg = self.value_regs[vmctx_val].only_reg().unwrap();
627                    self.emit(I::gen_dummy_use(vmctx_reg));
628                }
629            }
630        }
631
632        let inst = self.abi().gen_rets(out_rets);
633        self.emit(inst);
634    }
635
636    /// Has this instruction been sunk to a use-site (i.e., away from its
637    /// original location)?
638    fn is_inst_sunk(&self, inst: Inst) -> bool {
639        self.inst_sunk.contains(&inst)
640    }
641
642    // Is any result of this instruction needed?
643    fn is_any_inst_result_needed(&self, inst: Inst) -> bool {
644        self.f
645            .dfg
646            .inst_results(inst)
647            .iter()
648            .any(|&result| self.value_lowered_uses[result] > 0)
649    }
650
651    fn lower_clif_block<B: LowerBackend<MInst = I>>(
652        &mut self,
653        backend: &B,
654        block: Block,
655        ctrl_plane: &mut ControlPlane,
656    ) -> CodegenResult<()> {
657        self.cur_scan_entry_color = Some(self.block_end_colors[block]);
658        // Lowering loop:
659        // - For each non-branch instruction, in reverse order:
660        //   - If side-effecting (load, store, branch/call/return,
661        //     possible trap), or if used outside of this block, or if
662        //     demanded by another inst, then lower.
663        //
664        // That's it! Lowering of side-effecting ops will force all *needed*
665        // (live) non-side-effecting ops to be lowered at the right places, via
666        // the `use_input_reg()` callback on the `Lower` (that's us). That's
667        // because `use_input_reg()` sets the eager/demand bit for any insts
668        // whose result registers are used.
669        //
670        // We set the VCodeBuilder to "backward" mode, so we emit
671        // blocks in reverse order wrt the BlockIndex sequence, and
672        // emit instructions in reverse order within blocks.  Because
673        // the machine backend calls `ctx.emit()` in forward order, we
674        // collect per-IR-inst lowered instructions in `ir_insts`,
675        // then reverse these and append to the VCode at the end of
676        // each IR instruction.
677        for inst in self.f.layout.block_insts(block).rev() {
678            let data = &self.f.dfg.insts[inst];
679            let has_side_effect = has_lowering_side_effect(self.f, inst);
680            // If  inst has been sunk to another location, skip it.
681            if self.is_inst_sunk(inst) {
682                continue;
683            }
684            // Are any outputs used at least once?
685            let value_needed = self.is_any_inst_result_needed(inst);
686            trace!(
687                "lower_clif_block: block {} inst {} ({:?}) is_branch {} side_effect {} value_needed {}",
688                block,
689                inst,
690                data,
691                data.opcode().is_branch(),
692                has_side_effect,
693                value_needed,
694            );
695
696            // Update scan state to color prior to this inst (as we are scanning
697            // backward).
698            self.cur_inst = Some(inst);
699            if has_side_effect {
700                let entry_color = *self
701                    .side_effect_inst_entry_colors
702                    .get(&inst)
703                    .expect("every side-effecting inst should have a color-map entry");
704                self.cur_scan_entry_color = Some(entry_color);
705            }
706
707            // Skip lowering branches; these are handled separately
708            // (see `lower_clif_branches()` below).
709            if self.f.dfg.insts[inst].opcode().is_branch() {
710                continue;
711            }
712
713            // Value defined by "inst" becomes live after it in normal
714            // order, and therefore **before** in reversed order.
715            self.emit_value_label_live_range_start_for_inst(inst);
716
717            // Normal instruction: codegen if the instruction is side-effecting
718            // or any of its outputs is used.
719            if has_side_effect || value_needed {
720                trace!("lowering: inst {}: {}", inst, self.f.dfg.display_inst(inst));
721                let temp_regs = match backend.lower(self, inst) {
722                    Some(regs) => regs,
723                    None => {
724                        let ty = if self.num_outputs(inst) > 0 {
725                            Some(self.output_ty(inst, 0))
726                        } else {
727                            None
728                        };
729                        return Err(CodegenError::Unsupported(format!(
730                            "should be implemented in ISLE: inst = `{}`, type = `{:?}`",
731                            self.f.dfg.display_inst(inst),
732                            ty
733                        )));
734                    }
735                };
736
737                // The ISLE generated code emits its own registers to define the
738                // instruction's lowered values in. However, other instructions
739                // that use this SSA value will be lowered assuming that the value
740                // is generated into a pre-assigned, different, register.
741                //
742                // To connect the two, we set up "aliases" in the VCodeBuilder
743                // that apply when it is building the Operand table for the
744                // regalloc to use. These aliases effectively rewrite any use of
745                // the pre-assigned register to the register that was returned by
746                // the ISLE lowering logic.
747                let results = self.f.dfg.inst_results(inst);
748                debug_assert_eq!(temp_regs.len(), results.len());
749                for (regs, &result) in temp_regs.iter().zip(results) {
750                    let dsts = self.value_regs[result];
751                    let mut regs = regs.regs().iter();
752                    for &dst in dsts.regs().iter() {
753                        let temp = regs.next().copied().unwrap_or(Reg::invalid_sentinel());
754                        trace!("set vreg alias: {result:?} = {dst:?}, lowering = {temp:?}");
755                        self.vregs.set_vreg_alias(dst, temp);
756                    }
757                }
758            }
759
760            let start = self.vcode.vcode.num_insts();
761            let loc = self.srcloc(inst);
762            self.finish_ir_inst(loc);
763
764            // If the instruction had a user stack map, forward it from the CLIF
765            // to the vcode.
766            if let Some(entries) = self.f.dfg.user_stack_map_entries(inst) {
767                let end = self.vcode.vcode.num_insts();
768                debug_assert!(end > start);
769                debug_assert_eq!(
770                    (start..end)
771                        .filter(|i| self.vcode.vcode[InsnIndex::new(*i)].is_safepoint())
772                        .count(),
773                    1
774                );
775                for i in start..end {
776                    let iix = InsnIndex::new(i);
777                    if self.vcode.vcode[iix].is_safepoint() {
778                        trace!(
779                            "Adding user stack map from clif\n\n\
780                                 {inst:?} `{}`\n\n\
781                             to vcode\n\n\
782                                 {iix:?} `{}`",
783                            self.f.dfg.display_inst(inst),
784                            &self.vcode.vcode[iix].pretty_print_inst(&mut Default::default()),
785                        );
786                        self.vcode
787                            .add_user_stack_map(BackwardsInsnIndex::new(iix.index()), entries);
788                        break;
789                    }
790                }
791            }
792
793            // maybe insert random instruction
794            if ctrl_plane.get_decision() {
795                if ctrl_plane.get_decision() {
796                    let imm: u64 = ctrl_plane.get_arbitrary();
797                    let reg = self.alloc_tmp(crate::ir::types::I64).regs()[0];
798                    I::gen_imm_u64(imm, reg).map(|inst| self.emit(inst));
799                } else {
800                    let imm: f64 = ctrl_plane.get_arbitrary();
801                    let tmp = self.alloc_tmp(crate::ir::types::I64).regs()[0];
802                    let reg = self.alloc_tmp(crate::ir::types::F64).regs()[0];
803                    for inst in I::gen_imm_f64(imm, tmp, reg) {
804                        self.emit(inst);
805                    }
806                }
807            }
808        }
809
810        // Add the block params to this block.
811        self.add_block_params(block)?;
812
813        self.cur_scan_entry_color = None;
814        Ok(())
815    }
816
817    fn add_block_params(&mut self, block: Block) -> CodegenResult<()> {
818        for &param in self.f.dfg.block_params(block) {
819            for &reg in self.value_regs[param].regs() {
820                let vreg = reg.to_virtual_reg().unwrap();
821                self.vcode.add_block_param(vreg);
822            }
823        }
824        Ok(())
825    }
826
827    fn get_value_labels<'a>(&'a self, val: Value, depth: usize) -> Option<&'a [ValueLabelStart]> {
828        if let Some(ref values_labels) = self.f.dfg.values_labels {
829            debug_assert!(self.f.dfg.value_is_real(val));
830            trace!(
831                "get_value_labels: val {} -> {:?}",
832                val,
833                values_labels.get(&val)
834            );
835            match values_labels.get(&val) {
836                Some(&ValueLabelAssignments::Starts(ref list)) => Some(&list[..]),
837                Some(&ValueLabelAssignments::Alias { value, .. }) if depth < 10 => {
838                    self.get_value_labels(value, depth + 1)
839                }
840                _ => None,
841            }
842        } else {
843            None
844        }
845    }
846
847    fn emit_value_label_marks_for_value(&mut self, val: Value) {
848        let regs = self.value_regs[val];
849        if regs.len() > 1 {
850            return;
851        }
852        let reg = regs.only_reg().unwrap();
853
854        if let Some(label_starts) = self.get_value_labels(val, 0) {
855            let labels = label_starts
856                .iter()
857                .map(|&ValueLabelStart { label, .. }| label)
858                .collect::<FxHashSet<_>>();
859            for label in labels {
860                trace!(
861                    "value labeling: defines val {:?} -> reg {:?} -> label {:?}",
862                    val,
863                    reg,
864                    label,
865                );
866                self.vcode.add_value_label(reg, label);
867            }
868        }
869    }
870
871    fn emit_value_label_live_range_start_for_inst(&mut self, inst: Inst) {
872        if self.f.dfg.values_labels.is_none() {
873            return;
874        }
875
876        trace!(
877            "value labeling: srcloc {}: inst {}",
878            self.srcloc(inst),
879            inst
880        );
881        for &val in self.f.dfg.inst_results(inst) {
882            self.emit_value_label_marks_for_value(val);
883        }
884    }
885
886    fn emit_value_label_live_range_start_for_block_args(&mut self, block: Block) {
887        if self.f.dfg.values_labels.is_none() {
888            return;
889        }
890
891        trace!("value labeling: block {}", block);
892        for &arg in self.f.dfg.block_params(block) {
893            self.emit_value_label_marks_for_value(arg);
894        }
895        self.finish_ir_inst(Default::default());
896    }
897
898    fn finish_ir_inst(&mut self, loc: RelSourceLoc) {
899        // The VCodeBuilder builds in reverse order (and reverses at
900        // the end), but `ir_insts` is in forward order, so reverse
901        // it.
902        for inst in self.ir_insts.drain(..).rev() {
903            self.vcode.push(inst, loc);
904        }
905    }
906
907    fn finish_bb(&mut self) {
908        self.vcode.end_bb();
909    }
910
911    fn lower_clif_branch<B: LowerBackend<MInst = I>>(
912        &mut self,
913        backend: &B,
914        // Lowered block index:
915        bindex: BlockIndex,
916        // Original CLIF block:
917        block: Block,
918        branch: Inst,
919        targets: &[MachLabel],
920    ) -> CodegenResult<()> {
921        trace!(
922            "lower_clif_branch: block {} branch {:?} targets {:?}",
923            block,
924            branch,
925            targets,
926        );
927        // When considering code-motion opportunities, consider the current
928        // program point to be this branch.
929        self.cur_inst = Some(branch);
930
931        // Lower the branch in ISLE.
932        backend
933            .lower_branch(self, branch, targets)
934            .unwrap_or_else(|| {
935                panic!(
936                    "should be implemented in ISLE: branch = `{}`",
937                    self.f.dfg.display_inst(branch),
938                )
939            });
940        let loc = self.srcloc(branch);
941        self.finish_ir_inst(loc);
942        // Add block param outputs for current block.
943        self.lower_branch_blockparam_args(bindex);
944        Ok(())
945    }
946
947    fn lower_branch_blockparam_args(&mut self, block: BlockIndex) {
948        let mut branch_arg_vregs: SmallVec<[Reg; 16]> = smallvec![];
949
950        // TODO: why not make `block_order` public?
951        for succ_idx in 0..self.vcode.block_order().succ_indices(block).1.len() {
952            branch_arg_vregs.clear();
953            let (succ, args) = self.collect_block_call(block, succ_idx, &mut branch_arg_vregs);
954            self.vcode.add_succ(succ, args);
955        }
956    }
957
958    fn collect_branch_and_targets(
959        &self,
960        bindex: BlockIndex,
961        _bb: Block,
962        targets: &mut SmallVec<[MachLabel; 2]>,
963    ) -> Option<Inst> {
964        targets.clear();
965        let (opt_inst, succs) = self.vcode.block_order().succ_indices(bindex);
966        targets.extend(succs.iter().map(|succ| MachLabel::from_block(*succ)));
967        opt_inst
968    }
969
970    /// Collect the outgoing block-call arguments for a given edge out
971    /// of a lowered block.
972    fn collect_block_call<'a>(
973        &mut self,
974        block: BlockIndex,
975        succ_idx: usize,
976        buffer: &'a mut SmallVec<[Reg; 16]>,
977    ) -> (BlockIndex, &'a [Reg]) {
978        let block_order = self.vcode.block_order();
979        let (_, succs) = block_order.succ_indices(block);
980        let succ = succs[succ_idx];
981        let this_lb = block_order.lowered_order()[block.index()];
982        let succ_lb = block_order.lowered_order()[succ.index()];
983
984        let (branch_inst, succ_idx) = match (this_lb, succ_lb) {
985            (_, LoweredBlock::CriticalEdge { .. }) => {
986                // The successor is a split-critical-edge block. In this
987                // case, this block-call has no arguments, and the
988                // arguments go on the critical edge block's unconditional
989                // branch instead.
990                return (succ, &[]);
991            }
992            (LoweredBlock::CriticalEdge { pred, succ_idx, .. }, _) => {
993                // This is a split-critical-edge block. In this case, our
994                // block-call has the arguments that in the CLIF appear in
995                // the predecessor's branch to this edge.
996                let branch_inst = self.f.layout.last_inst(pred).unwrap();
997                (branch_inst, succ_idx as usize)
998            }
999
1000            (this, _) => {
1001                let block = this.orig_block().unwrap();
1002                // Ordinary block, with an ordinary block as
1003                // successor. Take the arguments from the branch.
1004                let branch_inst = self.f.layout.last_inst(block).unwrap();
1005                (branch_inst, succ_idx)
1006            }
1007        };
1008
1009        let block_call = self.f.dfg.insts[branch_inst]
1010            .branch_destination(&self.f.dfg.jump_tables, &self.f.dfg.exception_tables)[succ_idx];
1011        for arg in block_call.args(&self.f.dfg.value_lists) {
1012            match arg {
1013                BlockArg::Value(arg) => {
1014                    debug_assert!(self.f.dfg.value_is_real(arg));
1015                    let regs = self.put_value_in_regs(arg);
1016                    buffer.extend_from_slice(regs.regs());
1017                }
1018                BlockArg::TryCallRet(i) => {
1019                    let regs = self.try_call_rets.get(&branch_inst).unwrap()[i as usize]
1020                        .map(|r| r.to_reg());
1021                    buffer.extend_from_slice(regs.regs());
1022                }
1023                BlockArg::TryCallExn(i) => {
1024                    let reg =
1025                        self.try_call_payloads.get(&branch_inst).unwrap()[i as usize].to_reg();
1026                    buffer.push(reg);
1027                }
1028            }
1029        }
1030        (succ, &buffer[..])
1031    }
1032
1033    /// Lower the function.
1034    pub fn lower<B: LowerBackend<MInst = I>>(
1035        mut self,
1036        backend: &B,
1037        ctrl_plane: &mut ControlPlane,
1038    ) -> CodegenResult<VCode<I>> {
1039        trace!("about to lower function: {:?}", self.f);
1040
1041        self.vcode.init_retval_area(&mut self.vregs)?;
1042
1043        // Get the pinned reg here (we only parameterize this function on `B`,
1044        // not the whole `Lower` impl).
1045        self.pinned_reg = backend.maybe_pinned_reg();
1046
1047        self.vcode.set_entry(BlockIndex::new(0));
1048
1049        // Reused vectors for branch lowering.
1050        let mut targets: SmallVec<[MachLabel; 2]> = SmallVec::new();
1051
1052        // get a copy of the lowered order; we hold this separately because we
1053        // need a mut ref to the vcode to mutate it below.
1054        let lowered_order: SmallVec<[LoweredBlock; 64]> = self
1055            .vcode
1056            .block_order()
1057            .lowered_order()
1058            .iter()
1059            .cloned()
1060            .collect();
1061
1062        // Main lowering loop over lowered blocks.
1063        for (bindex, lb) in lowered_order.iter().enumerate().rev() {
1064            let bindex = BlockIndex::new(bindex);
1065
1066            // Lower the block body in reverse order (see comment in
1067            // `lower_clif_block()` for rationale).
1068
1069            // End branch.
1070            if let Some(bb) = lb.orig_block() {
1071                if let Some(branch) = self.collect_branch_and_targets(bindex, bb, &mut targets) {
1072                    self.lower_clif_branch(backend, bindex, bb, branch, &targets)?;
1073                    self.finish_ir_inst(self.srcloc(branch));
1074                }
1075            } else {
1076                // If no orig block, this must be a pure edge block;
1077                // get the successor and emit a jump. This block has
1078                // no block params; and this jump's block-call args
1079                // will be filled in by
1080                // `lower_branch_blockparam_args`.
1081                let succ = self.vcode.block_order().succ_indices(bindex).1[0];
1082                self.emit(I::gen_jump(MachLabel::from_block(succ)));
1083                self.finish_ir_inst(Default::default());
1084                self.lower_branch_blockparam_args(bindex);
1085            }
1086
1087            // Original block body.
1088            if let Some(bb) = lb.orig_block() {
1089                self.lower_clif_block(backend, bb, ctrl_plane)?;
1090                self.emit_value_label_live_range_start_for_block_args(bb);
1091            }
1092
1093            if bindex.index() == 0 {
1094                // Set up the function with arg vreg inits.
1095                self.gen_arg_setup();
1096                self.finish_ir_inst(Default::default());
1097            }
1098
1099            self.finish_bb();
1100
1101            // Check for any deferred vreg-temp allocation errors, and
1102            // bubble one up at this time if it exists.
1103            if let Some(e) = self.vregs.take_deferred_error() {
1104                return Err(e);
1105            }
1106        }
1107
1108        // Now that we've emitted all instructions into the
1109        // VCodeBuilder, let's build the VCode.
1110        trace!(
1111            "built vcode:\n{:?}Backwards {:?}",
1112            &self.vregs,
1113            &self.vcode.vcode
1114        );
1115        let vcode = self.vcode.build(self.vregs);
1116
1117        Ok(vcode)
1118    }
1119
1120    pub fn value_is_unused(&self, val: Value) -> bool {
1121        match self.value_ir_uses[val] {
1122            ValueUseState::Unused => true,
1123            _ => false,
1124        }
1125    }
1126}
1127
1128/// Pre-analysis: compute `value_ir_uses`. See comment on
1129/// `ValueUseState` for a description of what this analysis
1130/// computes.
1131fn compute_use_states(
1132    f: &Function,
1133    sret_param: Option<Value>,
1134) -> SecondaryMap<Value, ValueUseState> {
1135    // We perform the analysis without recursion, so we don't
1136    // overflow the stack on long chains of ops in the input.
1137    //
1138    // This is sort of a hybrid of a "shallow use-count" pass and
1139    // a DFS. We iterate over all instructions and mark their args
1140    // as used. However when we increment a use-count to
1141    // "Multiple" we push its args onto the stack and do a DFS,
1142    // immediately marking the whole dependency tree as
1143    // Multiple. Doing both (shallow use-counting over all insts,
1144    // and deep Multiple propagation) lets us trim both
1145    // traversals, stopping recursion when a node is already at
1146    // the appropriate state.
1147    //
1148    // In particular, note that the *coarsening* into {Unused,
1149    // Once, Multiple} is part of what makes this pass more
1150    // efficient than a full indirect-use-counting pass.
1151
1152    let mut value_ir_uses = SecondaryMap::with_default(ValueUseState::Unused);
1153
1154    if let Some(sret_param) = sret_param {
1155        // There's an implicit use of the struct-return parameter in each
1156        // copy of the function epilogue, which we count here.
1157        value_ir_uses[sret_param] = ValueUseState::Multiple;
1158    }
1159
1160    // Stack of iterators over Values as we do DFS to mark
1161    // Multiple-state subtrees. The iterator type is whatever is
1162    // returned by `uses` below.
1163    let mut stack: SmallVec<[_; 16]> = smallvec![];
1164
1165    // Find the args for the inst corresponding to the given value.
1166    //
1167    // Note that "root" instructions are skipped here. This means that multiple
1168    // uses of any result of a multi-result instruction are not considered
1169    // multiple uses of the operands of a multi-result instruction. This
1170    // requires tight coupling with `get_value_as_source_or_const` above which
1171    // is the consumer of the map that this function is producing.
1172    let uses = |value| {
1173        trace!(" -> pushing args for {} onto stack", value);
1174        if let ValueDef::Result(src_inst, _) = f.dfg.value_def(value) {
1175            if is_value_use_root(f, src_inst) {
1176                None
1177            } else {
1178                Some(f.dfg.inst_values(src_inst))
1179            }
1180        } else {
1181            None
1182        }
1183    };
1184
1185    // Do a DFS through `value_ir_uses` to mark a subtree as
1186    // Multiple.
1187    for inst in f
1188        .layout
1189        .blocks()
1190        .flat_map(|block| f.layout.block_insts(block))
1191    {
1192        // Iterate over all values used by all instructions, noting an
1193        // additional use on each operand.
1194        for arg in f.dfg.inst_values(inst) {
1195            debug_assert!(f.dfg.value_is_real(arg));
1196            let old = value_ir_uses[arg];
1197            value_ir_uses[arg].inc();
1198            let new = value_ir_uses[arg];
1199            trace!("arg {} used, old state {:?}, new {:?}", arg, old, new);
1200
1201            // On transition to Multiple, do DFS.
1202            if old == ValueUseState::Multiple || new != ValueUseState::Multiple {
1203                continue;
1204            }
1205            if let Some(iter) = uses(arg) {
1206                stack.push(iter);
1207            }
1208            while let Some(iter) = stack.last_mut() {
1209                if let Some(value) = iter.next() {
1210                    debug_assert!(f.dfg.value_is_real(value));
1211                    trace!(" -> DFS reaches {}", value);
1212                    if value_ir_uses[value] == ValueUseState::Multiple {
1213                        // Truncate DFS here: no need to go further,
1214                        // as whole subtree must already be Multiple.
1215                        // With debug asserts, check one level of
1216                        // that invariant at least.
1217                        debug_assert!(uses(value).into_iter().flatten().all(|arg| {
1218                            debug_assert!(f.dfg.value_is_real(arg));
1219                            value_ir_uses[arg] == ValueUseState::Multiple
1220                        }));
1221                        continue;
1222                    }
1223                    value_ir_uses[value] = ValueUseState::Multiple;
1224                    trace!(" -> became Multiple");
1225                    if let Some(iter) = uses(value) {
1226                        stack.push(iter);
1227                    }
1228                } else {
1229                    // Empty iterator, discard.
1230                    stack.pop();
1231                }
1232            }
1233        }
1234    }
1235
1236    value_ir_uses
1237}
1238
1239/// Definition of a "root" instruction for the calculation of `ValueUseState`.
1240///
1241/// This function calculates whether `inst` is considered a "root" for value-use
1242/// information. This concept is used to forcibly prevent looking-through the
1243/// instruction during `get_value_as_source_or_const` as it additionally
1244/// prevents propagating `Multiple`-used results of the `inst` here to the
1245/// operands of the instruction.
1246///
1247/// Currently this is defined as multi-result instructions. That means that
1248/// lowerings are never allowed to look through a multi-result instruction to
1249/// generate patterns. Note that this isn't possible in ISLE today anyway so
1250/// this isn't currently much of a loss.
1251///
1252/// The main purpose of this function is to prevent the operands of a
1253/// multi-result instruction from being forcibly considered `Multiple`-used
1254/// regardless of circumstances.
1255fn is_value_use_root(f: &Function, inst: Inst) -> bool {
1256    f.dfg.inst_results(inst).len() > 1
1257}
1258
1259/// Function-level queries.
1260impl<'func, I: VCodeInst> Lower<'func, I> {
1261    pub fn dfg(&self) -> &DataFlowGraph {
1262        &self.f.dfg
1263    }
1264
1265    /// Get the `Callee`.
1266    pub fn abi(&self) -> &Callee<I::ABIMachineSpec> {
1267        self.vcode.abi()
1268    }
1269
1270    /// Get the `Callee`.
1271    pub fn abi_mut(&mut self) -> &mut Callee<I::ABIMachineSpec> {
1272        self.vcode.abi_mut()
1273    }
1274}
1275
1276/// Instruction input/output queries.
1277impl<'func, I: VCodeInst> Lower<'func, I> {
1278    /// Get the instdata for a given IR instruction.
1279    pub fn data(&self, ir_inst: Inst) -> &InstructionData {
1280        &self.f.dfg.insts[ir_inst]
1281    }
1282
1283    /// Likewise, but starting with a GlobalValue identifier.
1284    pub fn symbol_value_data<'b>(
1285        &'b self,
1286        global_value: GlobalValue,
1287    ) -> Option<(&'b ExternalName, RelocDistance, i64)> {
1288        let gvdata = &self.f.global_values[global_value];
1289        match gvdata {
1290            &GlobalValueData::Symbol {
1291                ref name,
1292                ref offset,
1293                colocated,
1294                ..
1295            } => {
1296                let offset = offset.bits();
1297                let dist = if colocated {
1298                    RelocDistance::Near
1299                } else {
1300                    RelocDistance::Far
1301                };
1302                Some((name, dist, offset))
1303            }
1304            _ => None,
1305        }
1306    }
1307
1308    /// Returns the memory flags of a given memory access.
1309    pub fn memflags(&self, ir_inst: Inst) -> Option<MemFlags> {
1310        match &self.f.dfg.insts[ir_inst] {
1311            &InstructionData::AtomicCas { flags, .. } => Some(flags),
1312            &InstructionData::AtomicRmw { flags, .. } => Some(flags),
1313            &InstructionData::Load { flags, .. }
1314            | &InstructionData::LoadNoOffset { flags, .. }
1315            | &InstructionData::Store { flags, .. } => Some(flags),
1316            &InstructionData::StoreNoOffset { flags, .. } => Some(flags),
1317            _ => None,
1318        }
1319    }
1320
1321    /// Get the source location for a given instruction.
1322    pub fn srcloc(&self, ir_inst: Inst) -> RelSourceLoc {
1323        self.f.rel_srclocs()[ir_inst]
1324    }
1325
1326    /// Get the number of inputs to the given IR instruction. This is a count only of the Value
1327    /// arguments to the instruction: block arguments will not be included in this count.
1328    pub fn num_inputs(&self, ir_inst: Inst) -> usize {
1329        self.f.dfg.inst_args(ir_inst).len()
1330    }
1331
1332    /// Get the number of outputs to the given IR instruction.
1333    pub fn num_outputs(&self, ir_inst: Inst) -> usize {
1334        self.f.dfg.inst_results(ir_inst).len()
1335    }
1336
1337    /// Get the type for an instruction's input.
1338    pub fn input_ty(&self, ir_inst: Inst, idx: usize) -> Type {
1339        self.value_ty(self.input_as_value(ir_inst, idx))
1340    }
1341
1342    /// Get the type for a value.
1343    pub fn value_ty(&self, val: Value) -> Type {
1344        self.f.dfg.value_type(val)
1345    }
1346
1347    /// Get the type for an instruction's output.
1348    pub fn output_ty(&self, ir_inst: Inst, idx: usize) -> Type {
1349        self.f.dfg.value_type(self.f.dfg.inst_results(ir_inst)[idx])
1350    }
1351
1352    /// Get the value of a constant instruction (`iconst`, etc.) as a 64-bit
1353    /// value, if possible.
1354    pub fn get_constant(&self, ir_inst: Inst) -> Option<u64> {
1355        self.inst_constants.get(&ir_inst).map(|&c| {
1356            // The upper bits must be zero, enforced during legalization and by
1357            // the CLIF verifier.
1358            debug_assert_eq!(c, {
1359                let input_size = self.output_ty(ir_inst, 0).bits() as u64;
1360                let shift = 64 - input_size;
1361                (c << shift) >> shift
1362            });
1363            c
1364        })
1365    }
1366
1367    /// Get the input as one of two options other than a direct register:
1368    ///
1369    /// - An instruction, given that it is effect-free or able to sink its
1370    ///   effect to the current instruction being lowered, and given it has only
1371    ///   one output, and if effect-ful, given that this is the only use;
1372    /// - A constant, if the value is a constant.
1373    ///
1374    /// The instruction input may be available in either of these forms.  It may
1375    /// be available in neither form, if the conditions are not met; if so, use
1376    /// `put_input_in_regs()` instead to get it in a register.
1377    ///
1378    /// If the backend merges the effect of a side-effecting instruction, it
1379    /// must call `sink_inst()`. When this is called, it indicates that the
1380    /// effect has been sunk to the current scan location. The sunk
1381    /// instruction's result(s) must have *no* uses remaining, because it will
1382    /// not be codegen'd (it has been integrated into the current instruction).
1383    pub fn input_as_value(&self, ir_inst: Inst, idx: usize) -> Value {
1384        let val = self.f.dfg.inst_args(ir_inst)[idx];
1385        debug_assert!(self.f.dfg.value_is_real(val));
1386        val
1387    }
1388
1389    /// Resolves a particular input of an instruction to the `Value` that it is
1390    /// represented with.
1391    ///
1392    /// For more information see [`Lower::get_value_as_source_or_const`].
1393    pub fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput {
1394        let val = self.input_as_value(ir_inst, idx);
1395        self.get_value_as_source_or_const(val)
1396    }
1397
1398    /// Resolves a `Value` definition to the source instruction it came from
1399    /// plus whether it's a unique-use of that instruction.
1400    ///
1401    /// This function is the workhorse of pattern-matching in ISLE which enables
1402    /// combining multiple instructions together. This is used implicitly in
1403    /// patterns such as `(iadd x (iconst y))` where this function is used to
1404    /// extract the `(iconst y)` operand.
1405    ///
1406    /// At its core this function is a wrapper around
1407    /// [`DataFlowGraph::value_def`]. This function applies a filter on top of
1408    /// that, however, to determine when it is actually safe to "look through"
1409    /// the `val` definition here and view the underlying instruction. This
1410    /// protects against duplicating side effects, such as loads, for example.
1411    ///
1412    /// Internally this uses the data computed from `compute_use_states` along
1413    /// with other instruction properties to know what to return.
1414    pub fn get_value_as_source_or_const(&self, val: Value) -> NonRegInput {
1415        trace!(
1416            "get_input_for_val: val {} at cur_inst {:?} cur_scan_entry_color {:?}",
1417            val,
1418            self.cur_inst,
1419            self.cur_scan_entry_color,
1420        );
1421        let inst = match self.f.dfg.value_def(val) {
1422            // OK to merge source instruction if we have a source
1423            // instruction, and one of these two conditions hold:
1424            //
1425            // - It has no side-effects and this instruction is not a "value-use
1426            //   root" instruction. Instructions which are considered "roots"
1427            //   for value-use calculations do not have accurate information
1428            //   known about the `ValueUseState` of their operands. This is
1429            //   currently done for multi-result instructions to prevent a use
1430            //   of each result from forcing all operands of the multi-result
1431            //   instruction to also be `Multiple`. This in turn means that the
1432            //   `ValueUseState` for operands of a "root" instruction to be a
1433            //   lie if pattern matching were to look through the multi-result
1434            //   instruction. As a result the "look through this instruction"
1435            //   logic only succeeds if it's not a root instruction.
1436            //
1437            // - It has a side-effect, has one output value, that one
1438            //   output has only one use, directly or indirectly (so
1439            //   cannot be duplicated -- see comment on
1440            //   `ValueUseState`), and the instruction's color is *one
1441            //   less than* the current scan color.
1442            //
1443            //   This latter set of conditions is testing whether a
1444            //   side-effecting instruction can sink to the current scan
1445            //   location; this is possible if the in-color of this inst is
1446            //   equal to the out-color of the producing inst, so no other
1447            //   side-effecting ops occur between them (which will only be true
1448            //   if they are in the same BB, because color increments at each BB
1449            //   start).
1450            //
1451            //   If it is actually sunk, then in `merge_inst()`, we update the
1452            //   scan color so that as we scan over the range past which the
1453            //   instruction was sunk, we allow other instructions (that came
1454            //   prior to the sunk instruction) to sink.
1455            ValueDef::Result(src_inst, result_idx) => {
1456                let src_side_effect = has_lowering_side_effect(self.f, src_inst);
1457                trace!(" -> src inst {}", src_inst);
1458                trace!(" -> has lowering side effect: {}", src_side_effect);
1459                if is_value_use_root(self.f, src_inst) {
1460                    // If this instruction is a "root instruction" then it's
1461                    // required that we can't look through it to see the
1462                    // definition. This means that the `ValueUseState` for the
1463                    // operands of this result assume that this instruction is
1464                    // generated exactly once which might get violated were we
1465                    // to allow looking through it.
1466                    trace!(" -> is a root instruction");
1467                    InputSourceInst::None
1468                } else if !src_side_effect {
1469                    // Otherwise if this instruction has no side effects and the
1470                    // value is used only once then we can look through it with
1471                    // a "unique" tag. A non-unique `Use` can be shown for other
1472                    // values ensuring consumers know how it's computed but that
1473                    // it's not available to omit.
1474                    if self.value_ir_uses[val] == ValueUseState::Once {
1475                        InputSourceInst::UniqueUse(src_inst, result_idx)
1476                    } else {
1477                        InputSourceInst::Use(src_inst, result_idx)
1478                    }
1479                } else {
1480                    // Side-effect: test whether this is the only use of the
1481                    // only result of the instruction, and whether colors allow
1482                    // the code-motion.
1483                    trace!(
1484                        " -> side-effecting op {} for val {}: use state {:?}",
1485                        src_inst,
1486                        val,
1487                        self.value_ir_uses[val]
1488                    );
1489                    if self.cur_scan_entry_color.is_some()
1490                        && self.value_ir_uses[val] == ValueUseState::Once
1491                        && self.num_outputs(src_inst) == 1
1492                        && self
1493                            .side_effect_inst_entry_colors
1494                            .get(&src_inst)
1495                            .unwrap()
1496                            .get()
1497                            + 1
1498                            == self.cur_scan_entry_color.unwrap().get()
1499                    {
1500                        InputSourceInst::UniqueUse(src_inst, 0)
1501                    } else {
1502                        InputSourceInst::None
1503                    }
1504                }
1505            }
1506            _ => InputSourceInst::None,
1507        };
1508        let constant = inst.as_inst().and_then(|(inst, _)| self.get_constant(inst));
1509
1510        NonRegInput { inst, constant }
1511    }
1512
1513    /// Increment the reference count for the Value, ensuring that it gets lowered.
1514    pub fn increment_lowered_uses(&mut self, val: Value) {
1515        self.value_lowered_uses[val] += 1
1516    }
1517
1518    /// Put the `idx`th input into register(s) and return the assigned register.
1519    pub fn put_input_in_regs(&mut self, ir_inst: Inst, idx: usize) -> ValueRegs<Reg> {
1520        let val = self.f.dfg.inst_args(ir_inst)[idx];
1521        self.put_value_in_regs(val)
1522    }
1523
1524    /// Put the given value into register(s) and return the assigned register.
1525    pub fn put_value_in_regs(&mut self, val: Value) -> ValueRegs<Reg> {
1526        debug_assert!(self.f.dfg.value_is_real(val));
1527        trace!("put_value_in_regs: val {}", val);
1528
1529        if let Some(inst) = self.f.dfg.value_def(val).inst() {
1530            assert!(!self.inst_sunk.contains(&inst));
1531        }
1532
1533        let regs = self.value_regs[val];
1534        trace!(" -> regs {:?}", regs);
1535        assert!(regs.is_valid());
1536
1537        self.value_lowered_uses[val] += 1;
1538
1539        regs
1540    }
1541
1542    /// Get the ValueRegs for the edge-defined values for special
1543    /// try-call-return block arguments.
1544    pub fn try_call_return_defs(&mut self, ir_inst: Inst) -> &[ValueRegs<Writable<Reg>>] {
1545        &self.try_call_rets.get(&ir_inst).unwrap()[..]
1546    }
1547
1548    /// Get the Regs for the edge-defined values for special
1549    /// try-call-return exception payload arguments.
1550    pub fn try_call_exception_defs(&mut self, ir_inst: Inst) -> &[Writable<Reg>] {
1551        &self.try_call_payloads.get(&ir_inst).unwrap()[..]
1552    }
1553}
1554
1555/// Codegen primitives: allocate temps, emit instructions, set result registers,
1556/// ask for an input to be gen'd into a register.
1557impl<'func, I: VCodeInst> Lower<'func, I> {
1558    /// Get a new temp.
1559    pub fn alloc_tmp(&mut self, ty: Type) -> ValueRegs<Writable<Reg>> {
1560        writable_value_regs(self.vregs.alloc_with_deferred_error(ty))
1561    }
1562
1563    /// Get the current root instruction that we are lowering.
1564    pub fn cur_inst(&self) -> Inst {
1565        self.cur_inst.unwrap()
1566    }
1567
1568    /// Emit a machine instruction.
1569    pub fn emit(&mut self, mach_inst: I) {
1570        trace!("emit: {:?}", mach_inst);
1571        self.ir_insts.push(mach_inst);
1572    }
1573
1574    /// Indicate that the side-effect of an instruction has been sunk to the
1575    /// current scan location. This should only be done with the instruction's
1576    /// original results are not used (i.e., `put_input_in_regs` is not invoked
1577    /// for the input produced by the sunk instruction), otherwise the
1578    /// side-effect will occur twice.
1579    pub fn sink_inst(&mut self, ir_inst: Inst) {
1580        assert!(has_lowering_side_effect(self.f, ir_inst));
1581        assert!(self.cur_scan_entry_color.is_some());
1582
1583        for result in self.dfg().inst_results(ir_inst) {
1584            assert!(self.value_lowered_uses[*result] == 0);
1585        }
1586
1587        let sunk_inst_entry_color = self
1588            .side_effect_inst_entry_colors
1589            .get(&ir_inst)
1590            .cloned()
1591            .unwrap();
1592        let sunk_inst_exit_color = InstColor::new(sunk_inst_entry_color.get() + 1);
1593        assert!(sunk_inst_exit_color == self.cur_scan_entry_color.unwrap());
1594        self.cur_scan_entry_color = Some(sunk_inst_entry_color);
1595        self.inst_sunk.insert(ir_inst);
1596    }
1597
1598    /// Retrieve immediate data given a handle.
1599    pub fn get_immediate_data(&self, imm: Immediate) -> &ConstantData {
1600        self.f.dfg.immediates.get(imm).unwrap()
1601    }
1602
1603    /// Retrieve constant data given a handle.
1604    pub fn get_constant_data(&self, constant_handle: Constant) -> &ConstantData {
1605        self.f.dfg.constants.get(constant_handle)
1606    }
1607
1608    /// Indicate that a constant should be emitted.
1609    pub fn use_constant(&mut self, constant: VCodeConstantData) -> VCodeConstant {
1610        self.vcode.constants().insert(constant)
1611    }
1612
1613    /// Cause the value in `reg` to be in a virtual reg, by copying it into a
1614    /// new virtual reg if `reg` is a real reg. `ty` describes the type of the
1615    /// value in `reg`.
1616    pub fn ensure_in_vreg(&mut self, reg: Reg, ty: Type) -> Reg {
1617        if reg.to_virtual_reg().is_some() {
1618            reg
1619        } else {
1620            let new_reg = self.alloc_tmp(ty).only_reg().unwrap();
1621            self.emit(I::gen_move(new_reg, reg, ty));
1622            new_reg.to_reg()
1623        }
1624    }
1625
1626    /// Add a range fact to a register, if no other fact is present.
1627    pub fn add_range_fact(&mut self, reg: Reg, bit_width: u16, min: u64, max: u64) {
1628        if self.flags.enable_pcc() {
1629            self.vregs.set_fact_if_missing(
1630                reg.to_virtual_reg().unwrap(),
1631                Fact::Range {
1632                    bit_width,
1633                    min,
1634                    max,
1635                },
1636            );
1637        }
1638    }
1639}
1640
1641#[cfg(test)]
1642mod tests {
1643    use super::ValueUseState;
1644    use crate::cursor::{Cursor, FuncCursor};
1645    use crate::ir::types;
1646    use crate::ir::{Function, InstBuilder};
1647
1648    #[test]
1649    fn multi_result_use_once() {
1650        let mut func = Function::new();
1651        let block0 = func.dfg.make_block();
1652        let mut pos = FuncCursor::new(&mut func);
1653        pos.insert_block(block0);
1654        let v1 = pos.ins().iconst(types::I64, 0);
1655        let v2 = pos.ins().iconst(types::I64, 1);
1656        let v3 = pos.ins().iconcat(v1, v2);
1657        let (v4, v5) = pos.ins().isplit(v3);
1658        pos.ins().return_(&[v4, v5]);
1659        let func = pos.func;
1660
1661        let uses = super::compute_use_states(&func, None);
1662        assert_eq!(uses[v1], ValueUseState::Once);
1663        assert_eq!(uses[v2], ValueUseState::Once);
1664        assert_eq!(uses[v3], ValueUseState::Once);
1665        assert_eq!(uses[v4], ValueUseState::Once);
1666        assert_eq!(uses[v5], ValueUseState::Once);
1667    }
1668
1669    #[test]
1670    fn results_used_twice_but_not_operands() {
1671        let mut func = Function::new();
1672        let block0 = func.dfg.make_block();
1673        let mut pos = FuncCursor::new(&mut func);
1674        pos.insert_block(block0);
1675        let v1 = pos.ins().iconst(types::I64, 0);
1676        let v2 = pos.ins().iconst(types::I64, 1);
1677        let v3 = pos.ins().iconcat(v1, v2);
1678        let (v4, v5) = pos.ins().isplit(v3);
1679        pos.ins().return_(&[v4, v4]);
1680        let func = pos.func;
1681
1682        let uses = super::compute_use_states(&func, None);
1683        assert_eq!(uses[v1], ValueUseState::Once);
1684        assert_eq!(uses[v2], ValueUseState::Once);
1685        assert_eq!(uses[v3], ValueUseState::Once);
1686        assert_eq!(uses[v4], ValueUseState::Multiple);
1687        assert_eq!(uses[v5], ValueUseState::Unused);
1688    }
1689}