cranelift_codegen/machinst/lower.rs
1//! This module implements lowering (instruction selection) from Cranelift IR
2//! to machine instructions with virtual registers. This is *almost* the final
3//! machine code, except for register allocation.
4
5// TODO: separate the IR-query core of `Lower` from the lowering logic built on
6// top of it, e.g. the side-effect/coloring analysis and the scan support.
7
8use crate::entity::SecondaryMap;
9use crate::inst_predicates::{has_lowering_side_effect, is_constant_64bit};
10use crate::ir::pcc::{Fact, FactContext, PccError, PccResult};
11use crate::ir::{
12 ArgumentPurpose, Block, BlockArg, Constant, ConstantData, DataFlowGraph, ExternalName,
13 Function, GlobalValue, GlobalValueData, Immediate, Inst, InstructionData, MemFlags,
14 RelSourceLoc, SigRef, Signature, Type, Value, ValueDef, ValueLabelAssignments, ValueLabelStart,
15};
16use crate::machinst::valueregs::InvalidSentinel;
17use crate::machinst::{
18 ABIMachineSpec, BackwardsInsnIndex, BlockIndex, BlockLoweringOrder, CallArgList, CallInfo,
19 CallRetList, Callee, InsnIndex, LoweredBlock, MachLabel, Reg, Sig, SigSet, TryCallInfo, VCode,
20 VCodeBuilder, VCodeConstant, VCodeConstantData, VCodeConstants, VCodeInst, ValueRegs, Writable,
21 writable_value_regs,
22};
23use crate::settings::Flags;
24use crate::{CodegenError, CodegenResult, trace};
25use crate::{FxHashMap, FxHashSet};
26use alloc::vec::Vec;
27use core::fmt::Debug;
28use cranelift_control::ControlPlane;
29use smallvec::{SmallVec, smallvec};
30
31use super::{VCodeBuildDirection, VRegAllocator};
32
33/// A vector of ValueRegs, used to represent the outputs of an instruction.
34pub type InstOutput = SmallVec<[ValueRegs<Reg>; 2]>;
35
36/// An "instruction color" partitions CLIF instructions by side-effecting ops.
37/// All instructions with the same "color" are guaranteed not to be separated by
38/// any side-effecting op (for this purpose, loads are also considered
39/// side-effecting, to avoid subtle questions w.r.t. the memory model), and
40/// furthermore, it is guaranteed that for any two instructions A and B such
41/// that color(A) == color(B), either A dominates B and B postdominates A, or
42/// vice-versa. (For now, in practice, only ops in the same basic block can ever
43/// have the same color, trivially providing the second condition.) Intuitively,
44/// this means that the ops of the same color must always execute "together", as
45/// part of one atomic contiguous section of the dynamic execution trace, and
46/// they can be freely permuted (modulo true dataflow dependencies) without
47/// affecting program behavior.
48#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
49struct InstColor(u32);
50impl InstColor {
51 fn new(n: u32) -> InstColor {
52 InstColor(n)
53 }
54
55 /// Get an arbitrary index representing this color. The index is unique
56 /// *within a single function compilation*, but indices may be reused across
57 /// functions.
58 pub fn get(self) -> u32 {
59 self.0
60 }
61}
62
63/// A representation of all of the ways in which a value is available, aside
64/// from as a direct register.
65///
66/// - An instruction, if it would be allowed to occur at the current location
67/// instead (see [Lower::get_input_as_source_or_const()] for more details).
68///
69/// - A constant, if the value is known to be a constant.
70#[derive(Clone, Copy, Debug)]
71pub struct NonRegInput {
72 /// An instruction produces this value (as the given output), and its
73 /// computation (and side-effect if applicable) could occur at the
74 /// current instruction's location instead.
75 ///
76 /// If this instruction's operation is merged into the current instruction,
77 /// the backend must call [Lower::sink_inst()].
78 ///
79 /// This enum indicates whether this use of the source instruction
80 /// is unique or not.
81 pub inst: InputSourceInst,
82 /// The value is a known constant.
83 pub constant: Option<u64>,
84}
85
86/// When examining an input to an instruction, this enum provides one
87/// of several options: there is or isn't a single instruction (that
88/// we can see and merge with) that produces that input's value, and
89/// we are or aren't the single user of that instruction.
90#[derive(Clone, Copy, Debug)]
91pub enum InputSourceInst {
92 /// The input in question is the single, unique use of the given
93 /// instruction and output index, and it can be sunk to the
94 /// location of this input.
95 UniqueUse(Inst, usize),
96 /// The input in question is one of multiple uses of the given
97 /// instruction. It can still be sunk to the location of this
98 /// input.
99 Use(Inst, usize),
100 /// We cannot determine which instruction produced the input, or
101 /// it is one of several instructions (e.g., due to a control-flow
102 /// merge and blockparam), or the source instruction cannot be
103 /// allowed to sink to the current location due to side-effects.
104 None,
105}
106
107impl InputSourceInst {
108 /// Get the instruction and output index for this source, whether
109 /// we are its single or one of many users.
110 pub fn as_inst(&self) -> Option<(Inst, usize)> {
111 match self {
112 &InputSourceInst::UniqueUse(inst, output_idx)
113 | &InputSourceInst::Use(inst, output_idx) => Some((inst, output_idx)),
114 &InputSourceInst::None => None,
115 }
116 }
117}
118
119/// A machine backend.
120pub trait LowerBackend {
121 /// The machine instruction type.
122 type MInst: VCodeInst;
123
124 /// Lower a single instruction.
125 ///
126 /// For a branch, this function should not generate the actual branch
127 /// instruction. However, it must force any values it needs for the branch
128 /// edge (block-param actuals) into registers, because the actual branch
129 /// generation (`lower_branch()`) happens *after* any possible merged
130 /// out-edge.
131 ///
132 /// Returns `None` if no lowering for the instruction was found.
133 fn lower(&self, ctx: &mut Lower<Self::MInst>, inst: Inst) -> Option<InstOutput>;
134
135 /// Lower a block-terminating group of branches (which together can be seen
136 /// as one N-way branch), given a vcode MachLabel for each target.
137 ///
138 /// Returns `None` if no lowering for the branch was found.
139 fn lower_branch(
140 &self,
141 ctx: &mut Lower<Self::MInst>,
142 inst: Inst,
143 targets: &[MachLabel],
144 ) -> Option<()>;
145
146 /// A bit of a hack: give a fixed register that always holds the result of a
147 /// `get_pinned_reg` instruction, if known. This allows elision of moves
148 /// into the associated vreg, instead using the real reg directly.
149 fn maybe_pinned_reg(&self) -> Option<Reg> {
150 None
151 }
152
153 /// The type of state carried between `check_fact` invocations.
154 type FactFlowState: Default + Clone + Debug;
155
156 /// Check any facts about an instruction, given VCode with facts
157 /// on VRegs. Takes mutable `VCode` so that it can propagate some
158 /// kinds of facts automatically.
159 fn check_fact(
160 &self,
161 _ctx: &FactContext<'_>,
162 _vcode: &mut VCode<Self::MInst>,
163 _inst: InsnIndex,
164 _state: &mut Self::FactFlowState,
165 ) -> PccResult<()> {
166 Err(PccError::UnimplementedBackend)
167 }
168}
169
170/// Machine-independent lowering driver / machine-instruction container. Maintains a correspondence
171/// from original Inst to MachInsts.
172pub struct Lower<'func, I: VCodeInst> {
173 /// The function to lower.
174 pub(crate) f: &'func Function,
175
176 /// Lowered machine instructions.
177 vcode: VCodeBuilder<I>,
178
179 /// VReg allocation context, given to the vcode field at build time to finalize the vcode.
180 vregs: VRegAllocator<I>,
181
182 /// Mapping from `Value` (SSA value in IR) to virtual register.
183 value_regs: SecondaryMap<Value, ValueRegs<Reg>>,
184
185 /// sret registers, if needed.
186 sret_reg: Option<ValueRegs<Reg>>,
187
188 /// Instruction colors at block exits. From this map, we can recover all
189 /// instruction colors by scanning backward from the block end and
190 /// decrementing on any color-changing (side-effecting) instruction.
191 block_end_colors: SecondaryMap<Block, InstColor>,
192
193 /// Instruction colors at side-effecting ops. This is the *entry* color,
194 /// i.e., the version of global state that exists before an instruction
195 /// executes. For each side-effecting instruction, the *exit* color is its
196 /// entry color plus one.
197 side_effect_inst_entry_colors: FxHashMap<Inst, InstColor>,
198
199 /// Current color as we scan during lowering. While we are lowering an
200 /// instruction, this is equal to the color *at entry to* the instruction.
201 cur_scan_entry_color: Option<InstColor>,
202
203 /// Current instruction as we scan during lowering.
204 cur_inst: Option<Inst>,
205
206 /// Instruction constant values, if known.
207 inst_constants: FxHashMap<Inst, u64>,
208
209 /// Use-counts per SSA value, as counted in the input IR. These
210 /// are "coarsened", in the abstract-interpretation sense: we only
211 /// care about "0, 1, many" states, as this is all we need and
212 /// this lets us do an efficient fixpoint analysis.
213 ///
214 /// See doc comment on `ValueUseState` for more details.
215 value_ir_uses: SecondaryMap<Value, ValueUseState>,
216
217 /// Actual uses of each SSA value so far, incremented while lowering.
218 value_lowered_uses: SecondaryMap<Value, u32>,
219
220 /// Effectful instructions that have been sunk; they are not codegen'd at
221 /// their original locations.
222 inst_sunk: FxHashSet<Inst>,
223
224 /// Instructions collected for the CLIF inst in progress, in forward order.
225 ir_insts: Vec<I>,
226
227 /// Try-call block arg normal-return values, indexed by instruction.
228 try_call_rets: FxHashMap<Inst, SmallVec<[ValueRegs<Writable<Reg>>; 2]>>,
229
230 /// Try-call block arg exceptional-return payloads, indexed by
231 /// instruction. Payloads are carried in registers per the ABI and
232 /// can only be one register each.
233 try_call_payloads: FxHashMap<Inst, SmallVec<[Writable<Reg>; 2]>>,
234
235 /// The register to use for GetPinnedReg, if any, on this architecture.
236 pinned_reg: Option<Reg>,
237
238 /// Compilation flags.
239 flags: Flags,
240}
241
242/// How is a value used in the IR?
243///
244/// This can be seen as a coarsening of an integer count. We only need
245/// distinct states for zero, one, or many.
246///
247/// This analysis deserves further explanation. The basic idea is that
248/// we want to allow instruction lowering to know whether a value that
249/// an instruction references is *only* referenced by that one use, or
250/// by others as well. This is necessary to know when we might want to
251/// move a side-effect: we cannot, for example, duplicate a load, so
252/// we cannot let instruction lowering match a load as part of a
253/// subpattern and potentially incorporate it.
254///
255/// Note that a lot of subtlety comes into play once we have
256/// *indirect* uses. The classical example of this in our development
257/// history was the x86 compare instruction, which is incorporated
258/// into flags users (e.g. `selectif`, `trueif`, branches) and can
259/// subsequently incorporate loads, or at least we would like it
260/// to. However, danger awaits: the compare might be the only user of
261/// a load, so we might think we can just move the load (and nothing
262/// is duplicated -- success!), except that the compare itself is
263/// codegen'd in multiple places, where it is incorporated as a
264/// subpattern itself.
265///
266/// So we really want a notion of "unique all the way along the
267/// matching path". Rust's `&T` and `&mut T` offer a partial analogy
268/// to the semantics that we want here: we want to know when we've
269/// matched a unique use of an instruction, and that instruction's
270/// unique use of another instruction, etc, just as `&mut T` can only
271/// be obtained by going through a chain of `&mut T`. If one has a
272/// `&T` to a struct containing `&mut T` (one of several uses of an
273/// instruction that itself has a unique use of an instruction), one
274/// can only get a `&T` (one can only get a "I am one of several users
275/// of this instruction" result).
276///
277/// We could track these paths, either dynamically as one "looks up the operand
278/// tree" or precomputed. But the former requires state and means that the
279/// `Lower` API carries that state implicitly, which we'd like to avoid if we
280/// can. And the latter implies O(n^2) storage: it is an all-pairs property (is
281/// inst `i` unique from the point of view of `j`).
282///
283/// To make matters even a little more complex still, a value that is
284/// not uniquely used when initially viewing the IR can *become*
285/// uniquely used, at least as a root allowing further unique uses of
286/// e.g. loads to merge, if no other instruction actually merges
287/// it. To be more concrete, if we have `v1 := load; v2 := op v1; v3
288/// := op v2; v4 := op v2` then `v2` is non-uniquely used, so from the
289/// point of view of lowering `v4` or `v3`, we cannot merge the load
290/// at `v1`. But if we decide just to use the assigned register for
291/// `v2` at both `v3` and `v4`, then we only actually codegen `v2`
292/// once, so it *is* a unique root at that point and we *can* merge
293/// the load.
294///
295/// Note also that the color scheme is not sufficient to give us this
296/// information, for various reasons: reasoning about side-effects
297/// does not tell us about potential duplication of uses through pure
298/// ops.
299///
300/// To keep things simple and avoid error-prone lowering APIs that
301/// would extract more information about whether instruction merging
302/// happens or not (we don't have that info now, and it would be
303/// difficult to refactor to get it and make that refactor 100%
304/// correct), we give up on the above "can become unique if not
305/// actually merged" point. Instead, we compute a
306/// transitive-uniqueness. That is what this enum represents.
307///
308/// There is one final caveat as well to the result of this analysis. Notably,
309/// we define some instructions to be "root" instructions, which means that we
310/// assume they will always be codegen'd at the root of a matching tree, and not
311/// matched. (This comes with the caveat that we actually enforce this property
312/// by making them "opaque" to subtree matching in
313/// `get_value_as_source_or_const`). Because they will always be codegen'd once,
314/// they in some sense "reset" multiplicity: these root instructions can be used
315/// many times, but because their result(s) are only computed once, they only
316/// use their inputs once.
317///
318/// We currently define all multi-result instructions to be "root" instructions,
319/// because it is too complex to reason about matching through them, and they
320/// cause too-coarse-grained approximation of multiplicity otherwise: the
321/// analysis would have to assume (as it used to!) that they are always
322/// multiply-used, simply because they have multiple outputs even if those
323/// outputs are used only once.
324///
325/// In the future we could define other instructions to be "root" instructions
326/// as well, if we make the corresponding change to get_value_as_source_or_const
327/// as well.
328///
329/// To define `ValueUseState` more plainly: a value is `Unused` if no references
330/// exist to it; `Once` if only one other op refers to it, *and* that other op
331/// is `Unused` or `Once`; and `Multiple` otherwise. In other words, `Multiple`
332/// is contagious (except through root instructions): even if an op's result
333/// value is directly used only once in the CLIF, that value is `Multiple` if
334/// the op that uses it is itself used multiple times (hence could be codegen'd
335/// multiple times). In brief, this analysis tells us whether, if every op
336/// merged all of its operand tree, a given op could be codegen'd in more than
337/// one place.
338///
339/// To compute this, we first consider direct uses. At this point
340/// `Unused` answers are correct, `Multiple` answers are correct, but
341/// some `Once`s may change to `Multiple`s. Then we propagate
342/// `Multiple` transitively using a workqueue/fixpoint algorithm.
343#[derive(Clone, Copy, Debug, PartialEq, Eq)]
344enum ValueUseState {
345 /// Not used at all.
346 Unused,
347 /// Used exactly once.
348 Once,
349 /// Used multiple times.
350 Multiple,
351}
352
353impl ValueUseState {
354 /// Add one use.
355 fn inc(&mut self) {
356 let new = match self {
357 Self::Unused => Self::Once,
358 Self::Once | Self::Multiple => Self::Multiple,
359 };
360 *self = new;
361 }
362}
363
364/// Notion of "relocation distance". This gives an estimate of how far away a symbol will be from a
365/// reference.
366#[derive(Clone, Copy, Debug, PartialEq, Eq)]
367pub enum RelocDistance {
368 /// Target of relocation is "nearby". The threshold for this is fuzzy but should be interpreted
369 /// as approximately "within the compiled output of one module"; e.g., within AArch64's +/-
370 /// 128MB offset. If unsure, use `Far` instead.
371 Near,
372 /// Target of relocation could be anywhere in the address space.
373 Far,
374}
375
376impl<'func, I: VCodeInst> Lower<'func, I> {
377 /// Prepare a new lowering context for the given IR function.
378 pub fn new(
379 f: &'func Function,
380 abi: Callee<I::ABIMachineSpec>,
381 emit_info: I::Info,
382 block_order: BlockLoweringOrder,
383 sigs: SigSet,
384 flags: Flags,
385 ) -> CodegenResult<Self> {
386 let constants = VCodeConstants::with_capacity(f.dfg.constants.len());
387 let vcode = VCodeBuilder::new(
388 sigs,
389 abi,
390 emit_info,
391 block_order,
392 constants,
393 VCodeBuildDirection::Backward,
394 flags.log2_min_function_alignment(),
395 );
396
397 // We usually need two VRegs per instruction result, plus extras for
398 // various temporaries, but two per Value is a good starting point.
399 let mut vregs = VRegAllocator::with_capacity(f.dfg.num_values() * 2);
400
401 let mut value_regs = SecondaryMap::with_default(ValueRegs::invalid());
402 let mut try_call_rets = FxHashMap::default();
403 let mut try_call_payloads = FxHashMap::default();
404
405 // Assign a vreg to each block param, each inst result, and
406 // each edge-defined block-call arg.
407 for bb in f.layout.blocks() {
408 for ¶m in f.dfg.block_params(bb) {
409 let ty = f.dfg.value_type(param);
410 if value_regs[param].is_invalid() {
411 let regs = vregs.alloc_with_maybe_fact(ty, f.dfg.facts[param].clone())?;
412 value_regs[param] = regs;
413 trace!("bb {} param {}: regs {:?}", bb, param, regs);
414 }
415 }
416 for inst in f.layout.block_insts(bb) {
417 for &result in f.dfg.inst_results(inst) {
418 let ty = f.dfg.value_type(result);
419 if value_regs[result].is_invalid() && !ty.is_invalid() {
420 let regs = vregs.alloc_with_maybe_fact(ty, f.dfg.facts[result].clone())?;
421 value_regs[result] = regs;
422 trace!(
423 "bb {} inst {} ({:?}): result {} regs {:?}",
424 bb, inst, f.dfg.insts[inst], result, regs,
425 );
426 }
427 }
428
429 if let Some(et) = f.dfg.insts[inst].exception_table() {
430 let exdata = &f.dfg.exception_tables[et];
431 let sig = &f.dfg.signatures[exdata.signature()];
432
433 let mut rets = smallvec![];
434 for ty in sig.returns.iter().map(|ret| ret.value_type) {
435 rets.push(vregs.alloc(ty)?.map(|r| Writable::from_reg(r)));
436 }
437 try_call_rets.insert(inst, rets);
438
439 let mut payloads = smallvec![];
440 // Note that this is intentionally using the calling
441 // convention of the callee to determine what payload types
442 // are available. The callee defines that, not the calling
443 // convention of the caller.
444 for &ty in sig
445 .call_conv
446 .exception_payload_types(I::ABIMachineSpec::word_type())
447 {
448 payloads.push(Writable::from_reg(vregs.alloc(ty)?.only_reg().unwrap()));
449 }
450 try_call_payloads.insert(inst, payloads);
451 }
452 }
453 }
454
455 // Find the sret register, if it's used.
456 let mut sret_param = None;
457 for ret in vcode.abi().signature().returns.iter() {
458 if ret.purpose == ArgumentPurpose::StructReturn {
459 let entry_bb = f.stencil.layout.entry_block().unwrap();
460 for (¶m, sig_param) in f
461 .dfg
462 .block_params(entry_bb)
463 .iter()
464 .zip(vcode.abi().signature().params.iter())
465 {
466 if sig_param.purpose == ArgumentPurpose::StructReturn {
467 assert!(sret_param.is_none());
468 sret_param = Some(param);
469 }
470 }
471
472 assert!(sret_param.is_some());
473 }
474 }
475
476 let sret_reg = sret_param.map(|param| {
477 let regs = value_regs[param];
478 assert!(regs.len() == 1);
479 regs
480 });
481
482 // Compute instruction colors, find constant instructions, and find instructions with
483 // side-effects, in one combined pass.
484 let mut cur_color = 0;
485 let mut block_end_colors = SecondaryMap::with_default(InstColor::new(0));
486 let mut side_effect_inst_entry_colors = FxHashMap::default();
487 let mut inst_constants = FxHashMap::default();
488 for bb in f.layout.blocks() {
489 cur_color += 1;
490 for inst in f.layout.block_insts(bb) {
491 let side_effect = has_lowering_side_effect(f, inst);
492
493 trace!("bb {} inst {} has color {}", bb, inst, cur_color);
494 if side_effect {
495 side_effect_inst_entry_colors.insert(inst, InstColor::new(cur_color));
496 trace!(" -> side-effecting; incrementing color for next inst");
497 cur_color += 1;
498 }
499
500 // Determine if this is a constant; if so, add to the table.
501 if let Some(c) = is_constant_64bit(f, inst) {
502 trace!(" -> constant: {}", c);
503 inst_constants.insert(inst, c);
504 }
505 }
506
507 block_end_colors[bb] = InstColor::new(cur_color);
508 }
509
510 let value_ir_uses = compute_use_states(f, sret_param);
511
512 Ok(Lower {
513 f,
514 vcode,
515 vregs,
516 value_regs,
517 sret_reg,
518 block_end_colors,
519 side_effect_inst_entry_colors,
520 inst_constants,
521 value_ir_uses,
522 value_lowered_uses: SecondaryMap::default(),
523 inst_sunk: FxHashSet::default(),
524 cur_scan_entry_color: None,
525 cur_inst: None,
526 ir_insts: vec![],
527 try_call_rets,
528 try_call_payloads,
529 pinned_reg: None,
530 flags,
531 })
532 }
533
534 pub fn sigs(&self) -> &SigSet {
535 self.vcode.sigs()
536 }
537
538 pub fn sigs_mut(&mut self) -> &mut SigSet {
539 self.vcode.sigs_mut()
540 }
541
542 pub fn vregs_mut(&mut self) -> &mut VRegAllocator<I> {
543 &mut self.vregs
544 }
545
546 fn gen_arg_setup(&mut self) {
547 if let Some(entry_bb) = self.f.layout.entry_block() {
548 trace!(
549 "gen_arg_setup: entry BB {} args are:\n{:?}",
550 entry_bb,
551 self.f.dfg.block_params(entry_bb)
552 );
553
554 for (i, param) in self.f.dfg.block_params(entry_bb).iter().enumerate() {
555 if self.value_ir_uses[*param] == ValueUseState::Unused {
556 continue;
557 }
558 let regs = writable_value_regs(self.value_regs[*param]);
559 for insn in self
560 .vcode
561 .vcode
562 .abi
563 .gen_copy_arg_to_regs(&self.vcode.vcode.sigs, i, regs, &mut self.vregs)
564 .into_iter()
565 {
566 self.emit(insn);
567 }
568 }
569 if let Some(insn) = self
570 .vcode
571 .vcode
572 .abi
573 .gen_retval_area_setup(&self.vcode.vcode.sigs, &mut self.vregs)
574 {
575 self.emit(insn);
576 }
577
578 // The `args` instruction below must come first. Finish
579 // the current "IR inst" (with a default source location,
580 // as for other special instructions inserted during
581 // lowering) and continue the scan backward.
582 self.finish_ir_inst(Default::default());
583
584 if let Some(insn) = self.vcode.vcode.abi.take_args() {
585 self.emit(insn);
586 }
587 }
588 }
589
590 /// Generate the return instruction.
591 pub fn gen_return(&mut self, rets: &[ValueRegs<Reg>]) {
592 let mut out_rets = vec![];
593
594 let mut rets = rets.into_iter();
595 for (i, ret) in self
596 .abi()
597 .signature()
598 .returns
599 .clone()
600 .into_iter()
601 .enumerate()
602 {
603 let regs = if ret.purpose == ArgumentPurpose::StructReturn {
604 self.sret_reg.unwrap()
605 } else {
606 *rets.next().unwrap()
607 };
608
609 let (regs, insns) = self.vcode.abi().gen_copy_regs_to_retval(
610 self.vcode.sigs(),
611 i,
612 regs,
613 &mut self.vregs,
614 );
615 out_rets.extend(regs);
616 for insn in insns {
617 self.emit(insn);
618 }
619 }
620
621 // Hack: generate a virtual instruction that uses vmctx in
622 // order to keep it alive for the duration of the function,
623 // for the benefit of debuginfo.
624 if self.f.dfg.values_labels.is_some() {
625 if let Some(vmctx_val) = self.f.special_param(ArgumentPurpose::VMContext) {
626 if self.value_ir_uses[vmctx_val] != ValueUseState::Unused {
627 let vmctx_reg = self.value_regs[vmctx_val].only_reg().unwrap();
628 self.emit(I::gen_dummy_use(vmctx_reg));
629 }
630 }
631 }
632
633 let inst = self.abi().gen_rets(out_rets);
634 self.emit(inst);
635 }
636
637 /// Generate list of registers to hold the output of a call with
638 /// signature `sig`.
639 pub fn gen_call_output(&mut self, sig: &Signature) -> InstOutput {
640 let mut rets = smallvec![];
641 for ty in sig.returns.iter().map(|ret| ret.value_type) {
642 rets.push(self.vregs.alloc_with_deferred_error(ty));
643 }
644 rets
645 }
646
647 /// Likewise, but for a `SigRef` instead.
648 pub fn gen_call_output_from_sig_ref(&mut self, sig_ref: SigRef) -> InstOutput {
649 self.gen_call_output(&self.f.dfg.signatures[sig_ref])
650 }
651
652 /// Set up arguments values `args` for a call with signature `sig`.
653 pub fn gen_call_args(&mut self, sig: Sig, args: &[ValueRegs<Reg>]) -> CallArgList {
654 let (uses, insts) = self.vcode.abi().gen_call_args(
655 self.vcode.sigs(),
656 sig,
657 args,
658 /* is_tail_call */ false,
659 &self.flags,
660 &mut self.vregs,
661 );
662 for insn in insts {
663 self.emit(insn);
664 }
665 uses
666 }
667
668 /// Likewise, but for a `return_call`.
669 pub fn gen_return_call_args(&mut self, sig: Sig, args: &[ValueRegs<Reg>]) -> CallArgList {
670 let (uses, insts) = self.vcode.abi().gen_call_args(
671 self.vcode.sigs(),
672 sig,
673 args,
674 /* is_tail_call */ true,
675 &self.flags,
676 &mut self.vregs,
677 );
678 for insn in insts {
679 self.emit(insn);
680 }
681 uses
682 }
683
684 /// Set up return values `outputs` for a call with signature `sig`.
685 pub fn gen_call_rets(&mut self, sig: Sig, outputs: &[ValueRegs<Reg>]) -> CallRetList {
686 self.vcode
687 .abi()
688 .gen_call_rets(self.vcode.sigs(), sig, outputs, None, &mut self.vregs)
689 }
690
691 /// Likewise, but for a `try_call`.
692 pub fn gen_try_call_rets(&mut self, sig: Sig) -> CallRetList {
693 let ir_inst = self.cur_inst.unwrap();
694 let mut outputs: SmallVec<[ValueRegs<Reg>; 2]> = smallvec![];
695 for return_def in self.try_call_rets.get(&ir_inst).unwrap() {
696 outputs.push(return_def.map(|r| r.to_reg()));
697 }
698 let payloads = Some(&self.try_call_payloads.get(&ir_inst).unwrap()[..]);
699
700 self.vcode
701 .abi()
702 .gen_call_rets(self.vcode.sigs(), sig, &outputs, payloads, &mut self.vregs)
703 }
704
705 /// Populate a `CallInfo` for a call with signature `sig`.
706 pub fn gen_call_info<T>(
707 &mut self,
708 sig: Sig,
709 dest: T,
710 uses: CallArgList,
711 defs: CallRetList,
712 try_call_info: Option<TryCallInfo>,
713 patchable: bool,
714 ) -> CallInfo<T> {
715 self.vcode.abi().gen_call_info(
716 self.vcode.sigs(),
717 sig,
718 dest,
719 uses,
720 defs,
721 try_call_info,
722 patchable,
723 )
724 }
725
726 /// Has this instruction been sunk to a use-site (i.e., away from its
727 /// original location)?
728 fn is_inst_sunk(&self, inst: Inst) -> bool {
729 self.inst_sunk.contains(&inst)
730 }
731
732 // Is any result of this instruction needed?
733 fn is_any_inst_result_needed(&self, inst: Inst) -> bool {
734 self.f
735 .dfg
736 .inst_results(inst)
737 .iter()
738 .any(|&result| self.value_lowered_uses[result] > 0)
739 }
740
741 fn lower_clif_block<B: LowerBackend<MInst = I>>(
742 &mut self,
743 backend: &B,
744 block: Block,
745 ctrl_plane: &mut ControlPlane,
746 ) -> CodegenResult<()> {
747 self.cur_scan_entry_color = Some(self.block_end_colors[block]);
748 // Lowering loop:
749 // - For each non-branch instruction, in reverse order:
750 // - If side-effecting (load, store, branch/call/return,
751 // possible trap), or if used outside of this block, or if
752 // demanded by another inst, then lower.
753 //
754 // That's it! Lowering of side-effecting ops will force all *needed*
755 // (live) non-side-effecting ops to be lowered at the right places, via
756 // the `use_input_reg()` callback on the `Lower` (that's us). That's
757 // because `use_input_reg()` sets the eager/demand bit for any insts
758 // whose result registers are used.
759 //
760 // We set the VCodeBuilder to "backward" mode, so we emit
761 // blocks in reverse order wrt the BlockIndex sequence, and
762 // emit instructions in reverse order within blocks. Because
763 // the machine backend calls `ctx.emit()` in forward order, we
764 // collect per-IR-inst lowered instructions in `ir_insts`,
765 // then reverse these and append to the VCode at the end of
766 // each IR instruction.
767 for inst in self.f.layout.block_insts(block).rev() {
768 let data = &self.f.dfg.insts[inst];
769 let has_side_effect = has_lowering_side_effect(self.f, inst);
770 // If inst has been sunk to another location, skip it.
771 if self.is_inst_sunk(inst) {
772 continue;
773 }
774 // Are any outputs used at least once?
775 let value_needed = self.is_any_inst_result_needed(inst);
776 trace!(
777 "lower_clif_block: block {} inst {} ({:?}) is_branch {} side_effect {} value_needed {}",
778 block,
779 inst,
780 data,
781 data.opcode().is_branch(),
782 has_side_effect,
783 value_needed,
784 );
785
786 // Update scan state to color prior to this inst (as we are scanning
787 // backward).
788 self.cur_inst = Some(inst);
789 if has_side_effect {
790 let entry_color = *self
791 .side_effect_inst_entry_colors
792 .get(&inst)
793 .expect("every side-effecting inst should have a color-map entry");
794 self.cur_scan_entry_color = Some(entry_color);
795 }
796
797 // Skip lowering branches; these are handled separately
798 // (see `lower_clif_branches()` below).
799 if self.f.dfg.insts[inst].opcode().is_branch() {
800 continue;
801 }
802
803 // Value defined by "inst" becomes live after it in normal
804 // order, and therefore **before** in reversed order.
805 // Only emit value label aliases if the instruction will be lowered
806 // (otherwise we want to keep using the earlier label instead).
807 self.emit_value_label_live_range_start_for_inst(inst, has_side_effect || value_needed);
808
809 // Normal instruction: codegen if the instruction is side-effecting
810 // or any of its outputs is used.
811 if has_side_effect || value_needed {
812 trace!("lowering: inst {}: {}", inst, self.f.dfg.display_inst(inst));
813 let temp_regs = match backend.lower(self, inst) {
814 Some(regs) => regs,
815 None => {
816 let ty = if self.num_outputs(inst) > 0 {
817 Some(self.output_ty(inst, 0))
818 } else {
819 None
820 };
821 return Err(CodegenError::Unsupported(format!(
822 "should be implemented in ISLE: inst = `{}`, type = `{:?}`",
823 self.f.dfg.display_inst(inst),
824 ty
825 )));
826 }
827 };
828
829 // The ISLE generated code emits its own registers to define the
830 // instruction's lowered values in. However, other instructions
831 // that use this SSA value will be lowered assuming that the value
832 // is generated into a pre-assigned, different, register.
833 //
834 // To connect the two, we set up "aliases" in the VCodeBuilder
835 // that apply when it is building the Operand table for the
836 // regalloc to use. These aliases effectively rewrite any use of
837 // the pre-assigned register to the register that was returned by
838 // the ISLE lowering logic.
839 let results = self.f.dfg.inst_results(inst);
840 debug_assert_eq!(temp_regs.len(), results.len());
841 for (regs, &result) in temp_regs.iter().zip(results) {
842 let dsts = self.value_regs[result];
843 let mut regs = regs.regs().iter();
844 for &dst in dsts.regs().iter() {
845 let temp = regs.next().copied().unwrap_or(Reg::invalid_sentinel());
846 trace!("set vreg alias: {result:?} = {dst:?}, lowering = {temp:?}");
847 self.vregs.set_vreg_alias(dst, temp);
848 }
849 }
850 }
851
852 let start = self.vcode.vcode.num_insts();
853 let loc = self.srcloc(inst);
854 self.finish_ir_inst(loc);
855
856 // If the instruction had a user stack map, forward it from the CLIF
857 // to the vcode.
858 if let Some(entries) = self.f.dfg.user_stack_map_entries(inst) {
859 let end = self.vcode.vcode.num_insts();
860 debug_assert!(end > start);
861 debug_assert_eq!(
862 (start..end)
863 .filter(|i| self.vcode.vcode[InsnIndex::new(*i)].is_safepoint())
864 .count(),
865 1
866 );
867 for i in start..end {
868 let iix = InsnIndex::new(i);
869 if self.vcode.vcode[iix].is_safepoint() {
870 trace!(
871 "Adding user stack map from clif\n\n\
872 {inst:?} `{}`\n\n\
873 to vcode\n\n\
874 {iix:?} `{}`",
875 self.f.dfg.display_inst(inst),
876 &self.vcode.vcode[iix].pretty_print_inst(&mut Default::default()),
877 );
878 self.vcode
879 .add_user_stack_map(BackwardsInsnIndex::new(iix.index()), entries);
880 break;
881 }
882 }
883 }
884
885 // If the CLIF instruction had debug tags, copy them to
886 // the VCode. Place on all VCode instructions lowered from
887 // this CLIF instruction.
888 let debug_tags = self.f.debug_tags.get(inst);
889 if !debug_tags.is_empty() && self.vcode.vcode.num_insts() > 0 {
890 let end = self.vcode.vcode.num_insts();
891 for i in start..end {
892 let backwards_index = BackwardsInsnIndex::new(i);
893 log::trace!(
894 "debug tags on {inst}; associating {debug_tags:?} with {backwards_index:?}"
895 );
896 self.vcode.add_debug_tags(backwards_index, debug_tags);
897 }
898 }
899
900 // maybe insert random instruction
901 if ctrl_plane.get_decision() {
902 if ctrl_plane.get_decision() {
903 let imm: u64 = ctrl_plane.get_arbitrary();
904 let reg = self.alloc_tmp(crate::ir::types::I64).regs()[0];
905 I::gen_imm_u64(imm, reg).map(|inst| self.emit(inst));
906 } else {
907 let imm: f64 = ctrl_plane.get_arbitrary();
908 let tmp = self.alloc_tmp(crate::ir::types::I64).regs()[0];
909 let reg = self.alloc_tmp(crate::ir::types::F64).regs()[0];
910 for inst in I::gen_imm_f64(imm, tmp, reg) {
911 self.emit(inst);
912 }
913 }
914 }
915 }
916
917 // Add the block params to this block.
918 self.add_block_params(block)?;
919
920 self.cur_scan_entry_color = None;
921 Ok(())
922 }
923
924 fn add_block_params(&mut self, block: Block) -> CodegenResult<()> {
925 for ¶m in self.f.dfg.block_params(block) {
926 for ® in self.value_regs[param].regs() {
927 let vreg = reg.to_virtual_reg().unwrap();
928 self.vcode.add_block_param(vreg);
929 }
930 }
931 Ok(())
932 }
933
934 fn get_value_labels<'a>(&'a self, val: Value, depth: usize) -> Option<&'a [ValueLabelStart]> {
935 if let Some(ref values_labels) = self.f.dfg.values_labels {
936 debug_assert!(self.f.dfg.value_is_real(val));
937 trace!(
938 "get_value_labels: val {} -> {:?}",
939 val,
940 values_labels.get(&val)
941 );
942 match values_labels.get(&val) {
943 Some(&ValueLabelAssignments::Starts(ref list)) => Some(&list[..]),
944 Some(&ValueLabelAssignments::Alias { value, .. }) if depth < 10 => {
945 self.get_value_labels(value, depth + 1)
946 }
947 _ => None,
948 }
949 } else {
950 None
951 }
952 }
953
954 fn emit_value_label_marks_for_value(&mut self, val: Value, allow_alias: bool) {
955 let regs = self.value_regs[val];
956 if regs.len() > 1 {
957 return;
958 }
959 let reg = regs.only_reg().unwrap();
960
961 if let Some(label_starts) = self.get_value_labels(val, if allow_alias { 0 } else { !0 }) {
962 let labels = label_starts
963 .iter()
964 .map(|&ValueLabelStart { label, .. }| label)
965 .collect::<FxHashSet<_>>();
966 for label in labels {
967 trace!(
968 "value labeling: defines val {:?} -> reg {:?} -> label {:?}",
969 val, reg, label,
970 );
971 self.vcode.add_value_label(reg, label);
972 }
973 }
974 }
975
976 fn emit_value_label_live_range_start_for_inst(&mut self, inst: Inst, allow_alias: bool) {
977 if self.f.dfg.values_labels.is_none() {
978 return;
979 }
980
981 trace!(
982 "value labeling: srcloc {}: inst {}",
983 self.srcloc(inst),
984 inst
985 );
986 for &val in self.f.dfg.inst_results(inst) {
987 self.emit_value_label_marks_for_value(val, allow_alias);
988 }
989 }
990
991 fn emit_value_label_live_range_start_for_block_args(&mut self, block: Block) {
992 if self.f.dfg.values_labels.is_none() {
993 return;
994 }
995
996 trace!("value labeling: block {}", block);
997 for &arg in self.f.dfg.block_params(block) {
998 self.emit_value_label_marks_for_value(arg, true);
999 }
1000 self.finish_ir_inst(Default::default());
1001 }
1002
1003 fn finish_ir_inst(&mut self, loc: RelSourceLoc) {
1004 // The VCodeBuilder builds in reverse order (and reverses at
1005 // the end), but `ir_insts` is in forward order, so reverse
1006 // it.
1007 for inst in self.ir_insts.drain(..).rev() {
1008 self.vcode.push(inst, loc);
1009 }
1010 }
1011
1012 fn finish_bb(&mut self) {
1013 self.vcode.end_bb();
1014 }
1015
1016 fn lower_clif_branch<B: LowerBackend<MInst = I>>(
1017 &mut self,
1018 backend: &B,
1019 // Lowered block index:
1020 bindex: BlockIndex,
1021 // Original CLIF block:
1022 block: Block,
1023 branch: Inst,
1024 targets: &[MachLabel],
1025 ) -> CodegenResult<()> {
1026 trace!(
1027 "lower_clif_branch: block {} branch {:?} targets {:?}",
1028 block, branch, targets,
1029 );
1030 // When considering code-motion opportunities, consider the current
1031 // program point to be this branch.
1032 self.cur_inst = Some(branch);
1033
1034 // Lower the branch in ISLE.
1035 backend
1036 .lower_branch(self, branch, targets)
1037 .unwrap_or_else(|| {
1038 panic!(
1039 "should be implemented in ISLE: branch = `{}`",
1040 self.f.dfg.display_inst(branch),
1041 )
1042 });
1043 let loc = self.srcloc(branch);
1044 self.finish_ir_inst(loc);
1045 // Add block param outputs for current block.
1046 self.lower_branch_blockparam_args(bindex);
1047 Ok(())
1048 }
1049
1050 fn lower_branch_blockparam_args(&mut self, block: BlockIndex) {
1051 let mut branch_arg_vregs: SmallVec<[Reg; 16]> = smallvec![];
1052
1053 // TODO: why not make `block_order` public?
1054 for succ_idx in 0..self.vcode.block_order().succ_indices(block).1.len() {
1055 branch_arg_vregs.clear();
1056 let (succ, args) = self.collect_block_call(block, succ_idx, &mut branch_arg_vregs);
1057 self.vcode.add_succ(succ, args);
1058 }
1059 }
1060
1061 fn collect_branch_and_targets(
1062 &self,
1063 bindex: BlockIndex,
1064 _bb: Block,
1065 targets: &mut SmallVec<[MachLabel; 2]>,
1066 ) -> Option<Inst> {
1067 targets.clear();
1068 let (opt_inst, succs) = self.vcode.block_order().succ_indices(bindex);
1069 targets.extend(succs.iter().map(|succ| MachLabel::from_block(*succ)));
1070 opt_inst
1071 }
1072
1073 /// Collect the outgoing block-call arguments for a given edge out
1074 /// of a lowered block.
1075 fn collect_block_call<'a>(
1076 &mut self,
1077 block: BlockIndex,
1078 succ_idx: usize,
1079 buffer: &'a mut SmallVec<[Reg; 16]>,
1080 ) -> (BlockIndex, &'a [Reg]) {
1081 let block_order = self.vcode.block_order();
1082 let (_, succs) = block_order.succ_indices(block);
1083 let succ = succs[succ_idx];
1084 let this_lb = block_order.lowered_order()[block.index()];
1085 let succ_lb = block_order.lowered_order()[succ.index()];
1086
1087 let (branch_inst, succ_idx) = match (this_lb, succ_lb) {
1088 (_, LoweredBlock::CriticalEdge { .. }) => {
1089 // The successor is a split-critical-edge block. In this
1090 // case, this block-call has no arguments, and the
1091 // arguments go on the critical edge block's unconditional
1092 // branch instead.
1093 return (succ, &[]);
1094 }
1095 (LoweredBlock::CriticalEdge { pred, succ_idx, .. }, _) => {
1096 // This is a split-critical-edge block. In this case, our
1097 // block-call has the arguments that in the CLIF appear in
1098 // the predecessor's branch to this edge.
1099 let branch_inst = self.f.layout.last_inst(pred).unwrap();
1100 (branch_inst, succ_idx as usize)
1101 }
1102
1103 (this, _) => {
1104 let block = this.orig_block().unwrap();
1105 // Ordinary block, with an ordinary block as
1106 // successor. Take the arguments from the branch.
1107 let branch_inst = self.f.layout.last_inst(block).unwrap();
1108 (branch_inst, succ_idx)
1109 }
1110 };
1111
1112 let block_call = self.f.dfg.insts[branch_inst]
1113 .branch_destination(&self.f.dfg.jump_tables, &self.f.dfg.exception_tables)[succ_idx];
1114 for arg in block_call.args(&self.f.dfg.value_lists) {
1115 match arg {
1116 BlockArg::Value(arg) => {
1117 debug_assert!(self.f.dfg.value_is_real(arg));
1118 let regs = self.put_value_in_regs(arg);
1119 buffer.extend_from_slice(regs.regs());
1120 }
1121 BlockArg::TryCallRet(i) => {
1122 let regs = self.try_call_rets.get(&branch_inst).unwrap()[i as usize]
1123 .map(|r| r.to_reg());
1124 buffer.extend_from_slice(regs.regs());
1125 }
1126 BlockArg::TryCallExn(i) => {
1127 let reg =
1128 self.try_call_payloads.get(&branch_inst).unwrap()[i as usize].to_reg();
1129 buffer.push(reg);
1130 }
1131 }
1132 }
1133 (succ, &buffer[..])
1134 }
1135
1136 /// Lower the function.
1137 pub fn lower<B: LowerBackend<MInst = I>>(
1138 mut self,
1139 backend: &B,
1140 ctrl_plane: &mut ControlPlane,
1141 ) -> CodegenResult<VCode<I>> {
1142 trace!("about to lower function: {:?}", self.f);
1143
1144 self.vcode.init_retval_area(&mut self.vregs)?;
1145
1146 // Get the pinned reg here (we only parameterize this function on `B`,
1147 // not the whole `Lower` impl).
1148 self.pinned_reg = backend.maybe_pinned_reg();
1149
1150 self.vcode.set_entry(BlockIndex::new(0));
1151
1152 // Reused vectors for branch lowering.
1153 let mut targets: SmallVec<[MachLabel; 2]> = SmallVec::new();
1154
1155 // get a copy of the lowered order; we hold this separately because we
1156 // need a mut ref to the vcode to mutate it below.
1157 let lowered_order: SmallVec<[LoweredBlock; 64]> = self
1158 .vcode
1159 .block_order()
1160 .lowered_order()
1161 .iter()
1162 .cloned()
1163 .collect();
1164
1165 // Main lowering loop over lowered blocks.
1166 for (bindex, lb) in lowered_order.iter().enumerate().rev() {
1167 let bindex = BlockIndex::new(bindex);
1168
1169 // Lower the block body in reverse order (see comment in
1170 // `lower_clif_block()` for rationale).
1171
1172 // End branch.
1173 if let Some(bb) = lb.orig_block() {
1174 if let Some(branch) = self.collect_branch_and_targets(bindex, bb, &mut targets) {
1175 self.lower_clif_branch(backend, bindex, bb, branch, &targets)?;
1176 self.finish_ir_inst(self.srcloc(branch));
1177 }
1178 } else {
1179 // If no orig block, this must be a pure edge block;
1180 // get the successor and emit a jump. This block has
1181 // no block params; and this jump's block-call args
1182 // will be filled in by
1183 // `lower_branch_blockparam_args`.
1184 let succ = self.vcode.block_order().succ_indices(bindex).1[0];
1185 self.emit(I::gen_jump(MachLabel::from_block(succ)));
1186 self.finish_ir_inst(Default::default());
1187 self.lower_branch_blockparam_args(bindex);
1188 }
1189
1190 // Original block body.
1191 if let Some(bb) = lb.orig_block() {
1192 self.lower_clif_block(backend, bb, ctrl_plane)?;
1193 self.emit_value_label_live_range_start_for_block_args(bb);
1194 }
1195
1196 if bindex.index() == 0 {
1197 // Set up the function with arg vreg inits.
1198 self.gen_arg_setup();
1199 self.finish_ir_inst(Default::default());
1200 }
1201
1202 self.finish_bb();
1203
1204 // Check for any deferred vreg-temp allocation errors, and
1205 // bubble one up at this time if it exists.
1206 if let Some(e) = self.vregs.take_deferred_error() {
1207 return Err(e);
1208 }
1209 }
1210
1211 // Now that we've emitted all instructions into the
1212 // VCodeBuilder, let's build the VCode.
1213 trace!(
1214 "built vcode:\n{:?}Backwards {:?}",
1215 &self.vregs, &self.vcode.vcode
1216 );
1217 let vcode = self.vcode.build(self.vregs);
1218
1219 Ok(vcode)
1220 }
1221
1222 pub fn value_is_unused(&self, val: Value) -> bool {
1223 match self.value_ir_uses[val] {
1224 ValueUseState::Unused => true,
1225 _ => false,
1226 }
1227 }
1228
1229 pub fn block_successor_label(&self, block: Block, succ: usize) -> MachLabel {
1230 trace!("block_successor_label: block {block} succ {succ}");
1231 let lowered = self
1232 .vcode
1233 .block_order()
1234 .lowered_index_for_block(block)
1235 .expect("Unreachable block");
1236 trace!(" -> lowered block {lowered:?}");
1237 let (_, succs) = self.vcode.block_order().succ_indices(lowered);
1238 trace!(" -> succs {succs:?}");
1239 let succ_block = *succs.get(succ).expect("Successor index out of range");
1240 MachLabel::from_block(succ_block)
1241 }
1242}
1243
1244/// Pre-analysis: compute `value_ir_uses`. See comment on
1245/// `ValueUseState` for a description of what this analysis
1246/// computes.
1247fn compute_use_states(
1248 f: &Function,
1249 sret_param: Option<Value>,
1250) -> SecondaryMap<Value, ValueUseState> {
1251 // We perform the analysis without recursion, so we don't
1252 // overflow the stack on long chains of ops in the input.
1253 //
1254 // This is sort of a hybrid of a "shallow use-count" pass and
1255 // a DFS. We iterate over all instructions and mark their args
1256 // as used. However when we increment a use-count to
1257 // "Multiple" we push its args onto the stack and do a DFS,
1258 // immediately marking the whole dependency tree as
1259 // Multiple. Doing both (shallow use-counting over all insts,
1260 // and deep Multiple propagation) lets us trim both
1261 // traversals, stopping recursion when a node is already at
1262 // the appropriate state.
1263 //
1264 // In particular, note that the *coarsening* into {Unused,
1265 // Once, Multiple} is part of what makes this pass more
1266 // efficient than a full indirect-use-counting pass.
1267
1268 let mut value_ir_uses = SecondaryMap::with_default(ValueUseState::Unused);
1269
1270 if let Some(sret_param) = sret_param {
1271 // There's an implicit use of the struct-return parameter in each
1272 // copy of the function epilogue, which we count here.
1273 value_ir_uses[sret_param] = ValueUseState::Multiple;
1274 }
1275
1276 // Stack of iterators over Values as we do DFS to mark
1277 // Multiple-state subtrees. The iterator type is whatever is
1278 // returned by `uses` below.
1279 let mut stack: SmallVec<[_; 16]> = smallvec![];
1280
1281 // Find the args for the inst corresponding to the given value.
1282 //
1283 // Note that "root" instructions are skipped here. This means that multiple
1284 // uses of any result of a multi-result instruction are not considered
1285 // multiple uses of the operands of a multi-result instruction. This
1286 // requires tight coupling with `get_value_as_source_or_const` above which
1287 // is the consumer of the map that this function is producing.
1288 let uses = |value| {
1289 trace!(" -> pushing args for {} onto stack", value);
1290 if let ValueDef::Result(src_inst, _) = f.dfg.value_def(value) {
1291 if is_value_use_root(f, src_inst) {
1292 None
1293 } else {
1294 Some(f.dfg.inst_values(src_inst))
1295 }
1296 } else {
1297 None
1298 }
1299 };
1300
1301 // Do a DFS through `value_ir_uses` to mark a subtree as
1302 // Multiple.
1303 for inst in f
1304 .layout
1305 .blocks()
1306 .flat_map(|block| f.layout.block_insts(block))
1307 {
1308 // Iterate over all values used by all instructions, noting an
1309 // additional use on each operand.
1310 for arg in f.dfg.inst_values(inst) {
1311 debug_assert!(f.dfg.value_is_real(arg));
1312 let old = value_ir_uses[arg];
1313 value_ir_uses[arg].inc();
1314 let new = value_ir_uses[arg];
1315 trace!("arg {} used, old state {:?}, new {:?}", arg, old, new);
1316
1317 // On transition to Multiple, do DFS.
1318 if old == ValueUseState::Multiple || new != ValueUseState::Multiple {
1319 continue;
1320 }
1321 if let Some(iter) = uses(arg) {
1322 stack.push(iter);
1323 }
1324 while let Some(iter) = stack.last_mut() {
1325 if let Some(value) = iter.next() {
1326 debug_assert!(f.dfg.value_is_real(value));
1327 trace!(" -> DFS reaches {}", value);
1328 if value_ir_uses[value] == ValueUseState::Multiple {
1329 // Truncate DFS here: no need to go further,
1330 // as whole subtree must already be Multiple.
1331 // With debug asserts, check one level of
1332 // that invariant at least.
1333 debug_assert!(uses(value).into_iter().flatten().all(|arg| {
1334 debug_assert!(f.dfg.value_is_real(arg));
1335 value_ir_uses[arg] == ValueUseState::Multiple
1336 }));
1337 continue;
1338 }
1339 value_ir_uses[value] = ValueUseState::Multiple;
1340 trace!(" -> became Multiple");
1341 if let Some(iter) = uses(value) {
1342 stack.push(iter);
1343 }
1344 } else {
1345 // Empty iterator, discard.
1346 stack.pop();
1347 }
1348 }
1349 }
1350 }
1351
1352 value_ir_uses
1353}
1354
1355/// Definition of a "root" instruction for the calculation of `ValueUseState`.
1356///
1357/// This function calculates whether `inst` is considered a "root" for value-use
1358/// information. This concept is used to forcibly prevent looking-through the
1359/// instruction during `get_value_as_source_or_const` as it additionally
1360/// prevents propagating `Multiple`-used results of the `inst` here to the
1361/// operands of the instruction.
1362///
1363/// Currently this is defined as multi-result instructions. That means that
1364/// lowerings are never allowed to look through a multi-result instruction to
1365/// generate patterns. Note that this isn't possible in ISLE today anyway so
1366/// this isn't currently much of a loss.
1367///
1368/// The main purpose of this function is to prevent the operands of a
1369/// multi-result instruction from being forcibly considered `Multiple`-used
1370/// regardless of circumstances.
1371fn is_value_use_root(f: &Function, inst: Inst) -> bool {
1372 f.dfg.inst_results(inst).len() > 1
1373}
1374
1375/// Function-level queries.
1376impl<'func, I: VCodeInst> Lower<'func, I> {
1377 pub fn dfg(&self) -> &DataFlowGraph {
1378 &self.f.dfg
1379 }
1380
1381 /// Get the `Callee`.
1382 pub fn abi(&self) -> &Callee<I::ABIMachineSpec> {
1383 self.vcode.abi()
1384 }
1385
1386 /// Get the `Callee`.
1387 pub fn abi_mut(&mut self) -> &mut Callee<I::ABIMachineSpec> {
1388 self.vcode.abi_mut()
1389 }
1390}
1391
1392/// Instruction input/output queries.
1393impl<'func, I: VCodeInst> Lower<'func, I> {
1394 /// Get the instdata for a given IR instruction.
1395 pub fn data(&self, ir_inst: Inst) -> &InstructionData {
1396 &self.f.dfg.insts[ir_inst]
1397 }
1398
1399 /// Likewise, but starting with a GlobalValue identifier.
1400 pub fn symbol_value_data<'b>(
1401 &'b self,
1402 global_value: GlobalValue,
1403 ) -> Option<(&'b ExternalName, RelocDistance, i64)> {
1404 let gvdata = &self.f.global_values[global_value];
1405 match gvdata {
1406 &GlobalValueData::Symbol {
1407 ref name,
1408 ref offset,
1409 colocated,
1410 ..
1411 } => {
1412 let offset = offset.bits();
1413 let dist = if colocated {
1414 RelocDistance::Near
1415 } else {
1416 RelocDistance::Far
1417 };
1418 Some((name, dist, offset))
1419 }
1420 _ => None,
1421 }
1422 }
1423
1424 /// Returns the memory flags of a given memory access.
1425 pub fn memflags(&self, ir_inst: Inst) -> Option<MemFlags> {
1426 match &self.f.dfg.insts[ir_inst] {
1427 &InstructionData::AtomicCas { flags, .. } => Some(flags),
1428 &InstructionData::AtomicRmw { flags, .. } => Some(flags),
1429 &InstructionData::Load { flags, .. }
1430 | &InstructionData::LoadNoOffset { flags, .. }
1431 | &InstructionData::Store { flags, .. } => Some(flags),
1432 &InstructionData::StoreNoOffset { flags, .. } => Some(flags),
1433 _ => None,
1434 }
1435 }
1436
1437 /// Get the source location for a given instruction.
1438 pub fn srcloc(&self, ir_inst: Inst) -> RelSourceLoc {
1439 self.f.rel_srclocs()[ir_inst]
1440 }
1441
1442 /// Get the number of inputs to the given IR instruction. This is a count only of the Value
1443 /// arguments to the instruction: block arguments will not be included in this count.
1444 pub fn num_inputs(&self, ir_inst: Inst) -> usize {
1445 self.f.dfg.inst_args(ir_inst).len()
1446 }
1447
1448 /// Get the number of outputs to the given IR instruction.
1449 pub fn num_outputs(&self, ir_inst: Inst) -> usize {
1450 self.f.dfg.inst_results(ir_inst).len()
1451 }
1452
1453 /// Get the type for an instruction's input.
1454 pub fn input_ty(&self, ir_inst: Inst, idx: usize) -> Type {
1455 self.value_ty(self.input_as_value(ir_inst, idx))
1456 }
1457
1458 /// Get the type for a value.
1459 pub fn value_ty(&self, val: Value) -> Type {
1460 self.f.dfg.value_type(val)
1461 }
1462
1463 /// Get the type for an instruction's output.
1464 pub fn output_ty(&self, ir_inst: Inst, idx: usize) -> Type {
1465 self.f.dfg.value_type(self.f.dfg.inst_results(ir_inst)[idx])
1466 }
1467
1468 /// Get the value of a constant instruction (`iconst`, etc.) as a 64-bit
1469 /// value, if possible.
1470 pub fn get_constant(&self, ir_inst: Inst) -> Option<u64> {
1471 self.inst_constants.get(&ir_inst).map(|&c| {
1472 // The upper bits must be zero, enforced during legalization and by
1473 // the CLIF verifier.
1474 debug_assert_eq!(c, {
1475 let input_size = self.output_ty(ir_inst, 0).bits() as u64;
1476 let shift = 64 - input_size;
1477 (c << shift) >> shift
1478 });
1479 c
1480 })
1481 }
1482
1483 /// Get the input as one of two options other than a direct register:
1484 ///
1485 /// - An instruction, given that it is effect-free or able to sink its
1486 /// effect to the current instruction being lowered, and given it has only
1487 /// one output, and if effect-ful, given that this is the only use;
1488 /// - A constant, if the value is a constant.
1489 ///
1490 /// The instruction input may be available in either of these forms. It may
1491 /// be available in neither form, if the conditions are not met; if so, use
1492 /// `put_input_in_regs()` instead to get it in a register.
1493 ///
1494 /// If the backend merges the effect of a side-effecting instruction, it
1495 /// must call `sink_inst()`. When this is called, it indicates that the
1496 /// effect has been sunk to the current scan location. The sunk
1497 /// instruction's result(s) must have *no* uses remaining, because it will
1498 /// not be codegen'd (it has been integrated into the current instruction).
1499 pub fn input_as_value(&self, ir_inst: Inst, idx: usize) -> Value {
1500 let val = self.f.dfg.inst_args(ir_inst)[idx];
1501 debug_assert!(self.f.dfg.value_is_real(val));
1502 val
1503 }
1504
1505 /// Resolves a particular input of an instruction to the `Value` that it is
1506 /// represented with.
1507 ///
1508 /// For more information see [`Lower::get_value_as_source_or_const`].
1509 pub fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput {
1510 let val = self.input_as_value(ir_inst, idx);
1511 self.get_value_as_source_or_const(val)
1512 }
1513
1514 /// Resolves a `Value` definition to the source instruction it came from
1515 /// plus whether it's a unique-use of that instruction.
1516 ///
1517 /// This function is the workhorse of pattern-matching in ISLE which enables
1518 /// combining multiple instructions together. This is used implicitly in
1519 /// patterns such as `(iadd x (iconst y))` where this function is used to
1520 /// extract the `(iconst y)` operand.
1521 ///
1522 /// At its core this function is a wrapper around
1523 /// [`DataFlowGraph::value_def`]. This function applies a filter on top of
1524 /// that, however, to determine when it is actually safe to "look through"
1525 /// the `val` definition here and view the underlying instruction. This
1526 /// protects against duplicating side effects, such as loads, for example.
1527 ///
1528 /// Internally this uses the data computed from `compute_use_states` along
1529 /// with other instruction properties to know what to return.
1530 pub fn get_value_as_source_or_const(&self, val: Value) -> NonRegInput {
1531 trace!(
1532 "get_input_for_val: val {} at cur_inst {:?} cur_scan_entry_color {:?}",
1533 val, self.cur_inst, self.cur_scan_entry_color,
1534 );
1535 let inst = match self.f.dfg.value_def(val) {
1536 // OK to merge source instruction if we have a source
1537 // instruction, and one of these two conditions hold:
1538 //
1539 // - It has no side-effects and this instruction is not a "value-use
1540 // root" instruction. Instructions which are considered "roots"
1541 // for value-use calculations do not have accurate information
1542 // known about the `ValueUseState` of their operands. This is
1543 // currently done for multi-result instructions to prevent a use
1544 // of each result from forcing all operands of the multi-result
1545 // instruction to also be `Multiple`. This in turn means that the
1546 // `ValueUseState` for operands of a "root" instruction to be a
1547 // lie if pattern matching were to look through the multi-result
1548 // instruction. As a result the "look through this instruction"
1549 // logic only succeeds if it's not a root instruction.
1550 //
1551 // - It has a side-effect, has one output value, that one
1552 // output has only one use, directly or indirectly (so
1553 // cannot be duplicated -- see comment on
1554 // `ValueUseState`), and the instruction's color is *one
1555 // less than* the current scan color.
1556 //
1557 // This latter set of conditions is testing whether a
1558 // side-effecting instruction can sink to the current scan
1559 // location; this is possible if the in-color of this inst is
1560 // equal to the out-color of the producing inst, so no other
1561 // side-effecting ops occur between them (which will only be true
1562 // if they are in the same BB, because color increments at each BB
1563 // start).
1564 //
1565 // If it is actually sunk, then in `merge_inst()`, we update the
1566 // scan color so that as we scan over the range past which the
1567 // instruction was sunk, we allow other instructions (that came
1568 // prior to the sunk instruction) to sink.
1569 ValueDef::Result(src_inst, result_idx) => {
1570 let src_side_effect = has_lowering_side_effect(self.f, src_inst);
1571 trace!(" -> src inst {}", self.f.dfg.display_inst(src_inst));
1572 trace!(" -> has lowering side effect: {}", src_side_effect);
1573 if is_value_use_root(self.f, src_inst) {
1574 // If this instruction is a "root instruction" then it's
1575 // required that we can't look through it to see the
1576 // definition. This means that the `ValueUseState` for the
1577 // operands of this result assume that this instruction is
1578 // generated exactly once which might get violated were we
1579 // to allow looking through it.
1580 trace!(" -> is a root instruction");
1581 InputSourceInst::None
1582 } else if !src_side_effect {
1583 // Otherwise if this instruction has no side effects and the
1584 // value is used only once then we can look through it with
1585 // a "unique" tag. A non-unique `Use` can be shown for other
1586 // values ensuring consumers know how it's computed but that
1587 // it's not available to omit.
1588 if self.value_ir_uses[val] == ValueUseState::Once {
1589 InputSourceInst::UniqueUse(src_inst, result_idx)
1590 } else {
1591 InputSourceInst::Use(src_inst, result_idx)
1592 }
1593 } else {
1594 // Side-effect: test whether this is the only use of the
1595 // only result of the instruction, and whether colors allow
1596 // the code-motion.
1597 trace!(
1598 " -> side-effecting op {} for val {}: use state {:?}",
1599 src_inst, val, self.value_ir_uses[val]
1600 );
1601 if self.cur_scan_entry_color.is_some()
1602 && self.value_ir_uses[val] == ValueUseState::Once
1603 && self.num_outputs(src_inst) == 1
1604 && self
1605 .side_effect_inst_entry_colors
1606 .get(&src_inst)
1607 .unwrap()
1608 .get()
1609 + 1
1610 == self.cur_scan_entry_color.unwrap().get()
1611 {
1612 InputSourceInst::UniqueUse(src_inst, 0)
1613 } else {
1614 InputSourceInst::None
1615 }
1616 }
1617 }
1618 _ => InputSourceInst::None,
1619 };
1620 let constant = inst.as_inst().and_then(|(inst, _)| self.get_constant(inst));
1621
1622 NonRegInput { inst, constant }
1623 }
1624
1625 /// Increment the reference count for the Value, ensuring that it gets lowered.
1626 pub fn increment_lowered_uses(&mut self, val: Value) {
1627 self.value_lowered_uses[val] += 1
1628 }
1629
1630 /// Put the `idx`th input into register(s) and return the assigned register.
1631 pub fn put_input_in_regs(&mut self, ir_inst: Inst, idx: usize) -> ValueRegs<Reg> {
1632 let val = self.f.dfg.inst_args(ir_inst)[idx];
1633 self.put_value_in_regs(val)
1634 }
1635
1636 /// Put the given value into register(s) and return the assigned register.
1637 pub fn put_value_in_regs(&mut self, val: Value) -> ValueRegs<Reg> {
1638 debug_assert!(self.f.dfg.value_is_real(val));
1639 trace!("put_value_in_regs: val {}", val);
1640
1641 if let Some(inst) = self.f.dfg.value_def(val).inst() {
1642 assert!(!self.inst_sunk.contains(&inst));
1643 }
1644
1645 let regs = self.value_regs[val];
1646 trace!(" -> regs {:?}", regs);
1647 assert!(regs.is_valid());
1648
1649 self.value_lowered_uses[val] += 1;
1650
1651 regs
1652 }
1653
1654 /// Get the ValueRegs for the edge-defined values for special
1655 /// try-call-return block arguments.
1656 pub fn try_call_return_defs(&mut self, ir_inst: Inst) -> &[ValueRegs<Writable<Reg>>] {
1657 &self.try_call_rets.get(&ir_inst).unwrap()[..]
1658 }
1659
1660 /// Get the Regs for the edge-defined values for special
1661 /// try-call-return exception payload arguments.
1662 pub fn try_call_exception_defs(&mut self, ir_inst: Inst) -> &[Writable<Reg>] {
1663 &self.try_call_payloads.get(&ir_inst).unwrap()[..]
1664 }
1665}
1666
1667/// Codegen primitives: allocate temps, emit instructions, set result registers,
1668/// ask for an input to be gen'd into a register.
1669impl<'func, I: VCodeInst> Lower<'func, I> {
1670 /// Get a new temp.
1671 pub fn alloc_tmp(&mut self, ty: Type) -> ValueRegs<Writable<Reg>> {
1672 writable_value_regs(self.vregs.alloc_with_deferred_error(ty))
1673 }
1674
1675 /// Get the current root instruction that we are lowering.
1676 pub fn cur_inst(&self) -> Inst {
1677 self.cur_inst.unwrap()
1678 }
1679
1680 /// Emit a machine instruction.
1681 pub fn emit(&mut self, mach_inst: I) {
1682 trace!("emit: {:?}", mach_inst);
1683 self.ir_insts.push(mach_inst);
1684 }
1685
1686 /// Indicate that the side-effect of an instruction has been sunk to the
1687 /// current scan location. This should only be done with the instruction's
1688 /// original results are not used (i.e., `put_input_in_regs` is not invoked
1689 /// for the input produced by the sunk instruction), otherwise the
1690 /// side-effect will occur twice.
1691 pub fn sink_inst(&mut self, ir_inst: Inst) {
1692 assert!(has_lowering_side_effect(self.f, ir_inst));
1693 assert!(self.cur_scan_entry_color.is_some());
1694
1695 for result in self.dfg().inst_results(ir_inst) {
1696 assert!(self.value_lowered_uses[*result] == 0);
1697 }
1698
1699 let sunk_inst_entry_color = self
1700 .side_effect_inst_entry_colors
1701 .get(&ir_inst)
1702 .cloned()
1703 .unwrap();
1704 let sunk_inst_exit_color = InstColor::new(sunk_inst_entry_color.get() + 1);
1705 assert!(sunk_inst_exit_color == self.cur_scan_entry_color.unwrap());
1706 self.cur_scan_entry_color = Some(sunk_inst_entry_color);
1707 self.inst_sunk.insert(ir_inst);
1708 }
1709
1710 /// Retrieve immediate data given a handle.
1711 pub fn get_immediate_data(&self, imm: Immediate) -> &ConstantData {
1712 self.f.dfg.immediates.get(imm).unwrap()
1713 }
1714
1715 /// Retrieve constant data given a handle.
1716 pub fn get_constant_data(&self, constant_handle: Constant) -> &ConstantData {
1717 self.f.dfg.constants.get(constant_handle)
1718 }
1719
1720 /// Indicate that a constant should be emitted.
1721 pub fn use_constant(&mut self, constant: VCodeConstantData) -> VCodeConstant {
1722 self.vcode.constants().insert(constant)
1723 }
1724
1725 /// Cause the value in `reg` to be in a virtual reg, by copying it into a
1726 /// new virtual reg if `reg` is a real reg. `ty` describes the type of the
1727 /// value in `reg`.
1728 pub fn ensure_in_vreg(&mut self, reg: Reg, ty: Type) -> Reg {
1729 if reg.to_virtual_reg().is_some() {
1730 reg
1731 } else {
1732 let new_reg = self.alloc_tmp(ty).only_reg().unwrap();
1733 self.emit(I::gen_move(new_reg, reg, ty));
1734 new_reg.to_reg()
1735 }
1736 }
1737
1738 /// Add a range fact to a register, if no other fact is present.
1739 pub fn add_range_fact(&mut self, reg: Reg, bit_width: u16, min: u64, max: u64) {
1740 if self.flags.enable_pcc() {
1741 self.vregs.set_fact_if_missing(
1742 reg.to_virtual_reg().unwrap(),
1743 Fact::Range {
1744 bit_width,
1745 min,
1746 max,
1747 },
1748 );
1749 }
1750 }
1751}
1752
1753#[cfg(test)]
1754mod tests {
1755 use super::ValueUseState;
1756 use crate::cursor::{Cursor, FuncCursor};
1757 use crate::ir::types;
1758 use crate::ir::{Function, InstBuilder};
1759
1760 #[test]
1761 fn multi_result_use_once() {
1762 let mut func = Function::new();
1763 let block0 = func.dfg.make_block();
1764 let mut pos = FuncCursor::new(&mut func);
1765 pos.insert_block(block0);
1766 let v1 = pos.ins().iconst(types::I64, 0);
1767 let v2 = pos.ins().iconst(types::I64, 1);
1768 let v3 = pos.ins().iconcat(v1, v2);
1769 let (v4, v5) = pos.ins().isplit(v3);
1770 pos.ins().return_(&[v4, v5]);
1771 let func = pos.func;
1772
1773 let uses = super::compute_use_states(&func, None);
1774 assert_eq!(uses[v1], ValueUseState::Once);
1775 assert_eq!(uses[v2], ValueUseState::Once);
1776 assert_eq!(uses[v3], ValueUseState::Once);
1777 assert_eq!(uses[v4], ValueUseState::Once);
1778 assert_eq!(uses[v5], ValueUseState::Once);
1779 }
1780
1781 #[test]
1782 fn results_used_twice_but_not_operands() {
1783 let mut func = Function::new();
1784 let block0 = func.dfg.make_block();
1785 let mut pos = FuncCursor::new(&mut func);
1786 pos.insert_block(block0);
1787 let v1 = pos.ins().iconst(types::I64, 0);
1788 let v2 = pos.ins().iconst(types::I64, 1);
1789 let v3 = pos.ins().iconcat(v1, v2);
1790 let (v4, v5) = pos.ins().isplit(v3);
1791 pos.ins().return_(&[v4, v4]);
1792 let func = pos.func;
1793
1794 let uses = super::compute_use_states(&func, None);
1795 assert_eq!(uses[v1], ValueUseState::Once);
1796 assert_eq!(uses[v2], ValueUseState::Once);
1797 assert_eq!(uses[v3], ValueUseState::Once);
1798 assert_eq!(uses[v4], ValueUseState::Multiple);
1799 assert_eq!(uses[v5], ValueUseState::Unused);
1800 }
1801}