cranelift_codegen/machinst/
lower.rs

1//! This module implements lowering (instruction selection) from Cranelift IR
2//! to machine instructions with virtual registers. This is *almost* the final
3//! machine code, except for register allocation.
4
5// TODO: separate the IR-query core of `Lower` from the lowering logic built on
6// top of it, e.g. the side-effect/coloring analysis and the scan support.
7
8use crate::entity::SecondaryMap;
9use crate::inst_predicates::{has_lowering_side_effect, is_constant_64bit};
10use crate::ir::pcc::{Fact, FactContext, PccError, PccResult};
11use crate::ir::{
12    ArgumentPurpose, Block, Constant, ConstantData, DataFlowGraph, ExternalName, Function,
13    GlobalValue, GlobalValueData, Immediate, Inst, InstructionData, MemFlags, RelSourceLoc, Type,
14    Value, ValueDef, ValueLabelAssignments, ValueLabelStart,
15};
16use crate::machinst::{
17    writable_value_regs, BackwardsInsnIndex, BlockIndex, BlockLoweringOrder, Callee, InsnIndex,
18    LoweredBlock, MachLabel, Reg, SigSet, VCode, VCodeBuilder, VCodeConstant, VCodeConstantData,
19    VCodeConstants, VCodeInst, ValueRegs, Writable,
20};
21use crate::settings::Flags;
22use crate::{trace, CodegenResult};
23use alloc::vec::Vec;
24use cranelift_control::ControlPlane;
25use rustc_hash::{FxHashMap, FxHashSet};
26use smallvec::{smallvec, SmallVec};
27use std::fmt::Debug;
28
29use super::{VCodeBuildDirection, VRegAllocator};
30
31/// A vector of ValueRegs, used to represent the outputs of an instruction.
32pub type InstOutput = SmallVec<[ValueRegs<Reg>; 2]>;
33
34/// An "instruction color" partitions CLIF instructions by side-effecting ops.
35/// All instructions with the same "color" are guaranteed not to be separated by
36/// any side-effecting op (for this purpose, loads are also considered
37/// side-effecting, to avoid subtle questions w.r.t. the memory model), and
38/// furthermore, it is guaranteed that for any two instructions A and B such
39/// that color(A) == color(B), either A dominates B and B postdominates A, or
40/// vice-versa. (For now, in practice, only ops in the same basic block can ever
41/// have the same color, trivially providing the second condition.) Intuitively,
42/// this means that the ops of the same color must always execute "together", as
43/// part of one atomic contiguous section of the dynamic execution trace, and
44/// they can be freely permuted (modulo true dataflow dependencies) without
45/// affecting program behavior.
46#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
47struct InstColor(u32);
48impl InstColor {
49    fn new(n: u32) -> InstColor {
50        InstColor(n)
51    }
52
53    /// Get an arbitrary index representing this color. The index is unique
54    /// *within a single function compilation*, but indices may be reused across
55    /// functions.
56    pub fn get(self) -> u32 {
57        self.0
58    }
59}
60
61/// A representation of all of the ways in which a value is available, aside
62/// from as a direct register.
63///
64/// - An instruction, if it would be allowed to occur at the current location
65///   instead (see [Lower::get_input_as_source_or_const()] for more details).
66///
67/// - A constant, if the value is known to be a constant.
68#[derive(Clone, Copy, Debug)]
69pub struct NonRegInput {
70    /// An instruction produces this value (as the given output), and its
71    /// computation (and side-effect if applicable) could occur at the
72    /// current instruction's location instead.
73    ///
74    /// If this instruction's operation is merged into the current instruction,
75    /// the backend must call [Lower::sink_inst()].
76    ///
77    /// This enum indicates whether this use of the source instruction
78    /// is unique or not.
79    pub inst: InputSourceInst,
80    /// The value is a known constant.
81    pub constant: Option<u64>,
82}
83
84/// When examining an input to an instruction, this enum provides one
85/// of several options: there is or isn't a single instruction (that
86/// we can see and merge with) that produces that input's value, and
87/// we are or aren't the single user of that instruction.
88#[derive(Clone, Copy, Debug)]
89pub enum InputSourceInst {
90    /// The input in question is the single, unique use of the given
91    /// instruction and output index, and it can be sunk to the
92    /// location of this input.
93    UniqueUse(Inst, usize),
94    /// The input in question is one of multiple uses of the given
95    /// instruction. It can still be sunk to the location of this
96    /// input.
97    Use(Inst, usize),
98    /// We cannot determine which instruction produced the input, or
99    /// it is one of several instructions (e.g., due to a control-flow
100    /// merge and blockparam), or the source instruction cannot be
101    /// allowed to sink to the current location due to side-effects.
102    None,
103}
104
105impl InputSourceInst {
106    /// Get the instruction and output index for this source, whether
107    /// we are its single or one of many users.
108    pub fn as_inst(&self) -> Option<(Inst, usize)> {
109        match self {
110            &InputSourceInst::UniqueUse(inst, output_idx)
111            | &InputSourceInst::Use(inst, output_idx) => Some((inst, output_idx)),
112            &InputSourceInst::None => None,
113        }
114    }
115}
116
117/// A machine backend.
118pub trait LowerBackend {
119    /// The machine instruction type.
120    type MInst: VCodeInst;
121
122    /// Lower a single instruction.
123    ///
124    /// For a branch, this function should not generate the actual branch
125    /// instruction. However, it must force any values it needs for the branch
126    /// edge (block-param actuals) into registers, because the actual branch
127    /// generation (`lower_branch()`) happens *after* any possible merged
128    /// out-edge.
129    ///
130    /// Returns `None` if no lowering for the instruction was found.
131    fn lower(&self, ctx: &mut Lower<Self::MInst>, inst: Inst) -> Option<InstOutput>;
132
133    /// Lower a block-terminating group of branches (which together can be seen
134    /// as one N-way branch), given a vcode MachLabel for each target.
135    ///
136    /// Returns `None` if no lowering for the branch was found.
137    fn lower_branch(
138        &self,
139        ctx: &mut Lower<Self::MInst>,
140        inst: Inst,
141        targets: &[MachLabel],
142    ) -> Option<()>;
143
144    /// A bit of a hack: give a fixed register that always holds the result of a
145    /// `get_pinned_reg` instruction, if known.  This allows elision of moves
146    /// into the associated vreg, instead using the real reg directly.
147    fn maybe_pinned_reg(&self) -> Option<Reg> {
148        None
149    }
150
151    /// The type of state carried between `check_fact` invocations.
152    type FactFlowState: Default + Clone + Debug;
153
154    /// Check any facts about an instruction, given VCode with facts
155    /// on VRegs. Takes mutable `VCode` so that it can propagate some
156    /// kinds of facts automatically.
157    fn check_fact(
158        &self,
159        _ctx: &FactContext<'_>,
160        _vcode: &mut VCode<Self::MInst>,
161        _inst: InsnIndex,
162        _state: &mut Self::FactFlowState,
163    ) -> PccResult<()> {
164        Err(PccError::UnimplementedBackend)
165    }
166}
167
168/// Machine-independent lowering driver / machine-instruction container. Maintains a correspondence
169/// from original Inst to MachInsts.
170pub struct Lower<'func, I: VCodeInst> {
171    /// The function to lower.
172    f: &'func Function,
173
174    /// Lowered machine instructions.
175    vcode: VCodeBuilder<I>,
176
177    /// VReg allocation context, given to the vcode field at build time to finalize the vcode.
178    vregs: VRegAllocator<I>,
179
180    /// Mapping from `Value` (SSA value in IR) to virtual register.
181    value_regs: SecondaryMap<Value, ValueRegs<Reg>>,
182
183    /// sret registers, if needed.
184    sret_reg: Option<ValueRegs<Reg>>,
185
186    /// Instruction colors at block exits. From this map, we can recover all
187    /// instruction colors by scanning backward from the block end and
188    /// decrementing on any color-changing (side-effecting) instruction.
189    block_end_colors: SecondaryMap<Block, InstColor>,
190
191    /// Instruction colors at side-effecting ops. This is the *entry* color,
192    /// i.e., the version of global state that exists before an instruction
193    /// executes.  For each side-effecting instruction, the *exit* color is its
194    /// entry color plus one.
195    side_effect_inst_entry_colors: FxHashMap<Inst, InstColor>,
196
197    /// Current color as we scan during lowering. While we are lowering an
198    /// instruction, this is equal to the color *at entry to* the instruction.
199    cur_scan_entry_color: Option<InstColor>,
200
201    /// Current instruction as we scan during lowering.
202    cur_inst: Option<Inst>,
203
204    /// Instruction constant values, if known.
205    inst_constants: FxHashMap<Inst, u64>,
206
207    /// Use-counts per SSA value, as counted in the input IR. These
208    /// are "coarsened", in the abstract-interpretation sense: we only
209    /// care about "0, 1, many" states, as this is all we need and
210    /// this lets us do an efficient fixpoint analysis.
211    ///
212    /// See doc comment on `ValueUseState` for more details.
213    value_ir_uses: SecondaryMap<Value, ValueUseState>,
214
215    /// Actual uses of each SSA value so far, incremented while lowering.
216    value_lowered_uses: SecondaryMap<Value, u32>,
217
218    /// Effectful instructions that have been sunk; they are not codegen'd at
219    /// their original locations.
220    inst_sunk: FxHashSet<Inst>,
221
222    /// Instructions collected for the CLIF inst in progress, in forward order.
223    ir_insts: Vec<I>,
224
225    /// The register to use for GetPinnedReg, if any, on this architecture.
226    pinned_reg: Option<Reg>,
227
228    /// Compilation flags.
229    flags: Flags,
230}
231
232/// How is a value used in the IR?
233///
234/// This can be seen as a coarsening of an integer count. We only need
235/// distinct states for zero, one, or many.
236///
237/// This analysis deserves further explanation. The basic idea is that
238/// we want to allow instruction lowering to know whether a value that
239/// an instruction references is *only* referenced by that one use, or
240/// by others as well. This is necessary to know when we might want to
241/// move a side-effect: we cannot, for example, duplicate a load, so
242/// we cannot let instruction lowering match a load as part of a
243/// subpattern and potentially incorporate it.
244///
245/// Note that a lot of subtlety comes into play once we have
246/// *indirect* uses. The classical example of this in our development
247/// history was the x86 compare instruction, which is incorporated
248/// into flags users (e.g. `selectif`, `trueif`, branches) and can
249/// subsequently incorporate loads, or at least we would like it
250/// to. However, danger awaits: the compare might be the only user of
251/// a load, so we might think we can just move the load (and nothing
252/// is duplicated -- success!), except that the compare itself is
253/// codegen'd in multiple places, where it is incorporated as a
254/// subpattern itself.
255///
256/// So we really want a notion of "unique all the way along the
257/// matching path". Rust's `&T` and `&mut T` offer a partial analogy
258/// to the semantics that we want here: we want to know when we've
259/// matched a unique use of an instruction, and that instruction's
260/// unique use of another instruction, etc, just as `&mut T` can only
261/// be obtained by going through a chain of `&mut T`. If one has a
262/// `&T` to a struct containing `&mut T` (one of several uses of an
263/// instruction that itself has a unique use of an instruction), one
264/// can only get a `&T` (one can only get a "I am one of several users
265/// of this instruction" result).
266///
267/// We could track these paths, either dynamically as one "looks up the operand
268/// tree" or precomputed. But the former requires state and means that the
269/// `Lower` API carries that state implicitly, which we'd like to avoid if we
270/// can. And the latter implies O(n^2) storage: it is an all-pairs property (is
271/// inst `i` unique from the point of view of `j`).
272///
273/// To make matters even a little more complex still, a value that is
274/// not uniquely used when initially viewing the IR can *become*
275/// uniquely used, at least as a root allowing further unique uses of
276/// e.g. loads to merge, if no other instruction actually merges
277/// it. To be more concrete, if we have `v1 := load; v2 := op v1; v3
278/// := op v2; v4 := op v2` then `v2` is non-uniquely used, so from the
279/// point of view of lowering `v4` or `v3`, we cannot merge the load
280/// at `v1`. But if we decide just to use the assigned register for
281/// `v2` at both `v3` and `v4`, then we only actually codegen `v2`
282/// once, so it *is* a unique root at that point and we *can* merge
283/// the load.
284///
285/// Note also that the color scheme is not sufficient to give us this
286/// information, for various reasons: reasoning about side-effects
287/// does not tell us about potential duplication of uses through pure
288/// ops.
289///
290/// To keep things simple and avoid error-prone lowering APIs that
291/// would extract more information about whether instruction merging
292/// happens or not (we don't have that info now, and it would be
293/// difficult to refactor to get it and make that refactor 100%
294/// correct), we give up on the above "can become unique if not
295/// actually merged" point. Instead, we compute a
296/// transitive-uniqueness. That is what this enum represents.
297///
298/// There is one final caveat as well to the result of this analysis.  Notably,
299/// we define some instructions to be "root" instructions, which means that we
300/// assume they will always be codegen'd at the root of a matching tree, and not
301/// matched. (This comes with the caveat that we actually enforce this property
302/// by making them "opaque" to subtree matching in
303/// `get_value_as_source_or_const`). Because they will always be codegen'd once,
304/// they in some sense "reset" multiplicity: these root instructions can be used
305/// many times, but because their result(s) are only computed once, they only
306/// use their inputs once.
307///
308/// We currently define all multi-result instructions to be "root" instructions,
309/// because it is too complex to reason about matching through them, and they
310/// cause too-coarse-grained approximation of multiplicity otherwise: the
311/// analysis would have to assume (as it used to!) that they are always
312/// multiply-used, simply because they have multiple outputs even if those
313/// outputs are used only once.
314///
315/// In the future we could define other instructions to be "root" instructions
316/// as well, if we make the corresponding change to get_value_as_source_or_const
317/// as well.
318///
319/// To define `ValueUseState` more plainly: a value is `Unused` if no references
320/// exist to it; `Once` if only one other op refers to it, *and* that other op
321/// is `Unused` or `Once`; and `Multiple` otherwise. In other words, `Multiple`
322/// is contagious (except through root instructions): even if an op's result
323/// value is directly used only once in the CLIF, that value is `Multiple` if
324/// the op that uses it is itself used multiple times (hence could be codegen'd
325/// multiple times). In brief, this analysis tells us whether, if every op
326/// merged all of its operand tree, a given op could be codegen'd in more than
327/// one place.
328///
329/// To compute this, we first consider direct uses. At this point
330/// `Unused` answers are correct, `Multiple` answers are correct, but
331/// some `Once`s may change to `Multiple`s. Then we propagate
332/// `Multiple` transitively using a workqueue/fixpoint algorithm.
333#[derive(Clone, Copy, Debug, PartialEq, Eq)]
334enum ValueUseState {
335    /// Not used at all.
336    Unused,
337    /// Used exactly once.
338    Once,
339    /// Used multiple times.
340    Multiple,
341}
342
343impl ValueUseState {
344    /// Add one use.
345    fn inc(&mut self) {
346        let new = match self {
347            Self::Unused => Self::Once,
348            Self::Once | Self::Multiple => Self::Multiple,
349        };
350        *self = new;
351    }
352}
353
354/// Notion of "relocation distance". This gives an estimate of how far away a symbol will be from a
355/// reference.
356#[derive(Clone, Copy, Debug, PartialEq, Eq)]
357pub enum RelocDistance {
358    /// Target of relocation is "nearby". The threshold for this is fuzzy but should be interpreted
359    /// as approximately "within the compiled output of one module"; e.g., within AArch64's +/-
360    /// 128MB offset. If unsure, use `Far` instead.
361    Near,
362    /// Target of relocation could be anywhere in the address space.
363    Far,
364}
365
366impl<'func, I: VCodeInst> Lower<'func, I> {
367    /// Prepare a new lowering context for the given IR function.
368    pub fn new(
369        f: &'func Function,
370        abi: Callee<I::ABIMachineSpec>,
371        emit_info: I::Info,
372        block_order: BlockLoweringOrder,
373        sigs: SigSet,
374        flags: Flags,
375    ) -> CodegenResult<Self> {
376        let constants = VCodeConstants::with_capacity(f.dfg.constants.len());
377        let vcode = VCodeBuilder::new(
378            sigs,
379            abi,
380            emit_info,
381            block_order,
382            constants,
383            VCodeBuildDirection::Backward,
384        );
385
386        // We usually need two VRegs per instruction result, plus extras for
387        // various temporaries, but two per Value is a good starting point.
388        let mut vregs = VRegAllocator::with_capacity(f.dfg.num_values() * 2);
389
390        let mut value_regs = SecondaryMap::with_default(ValueRegs::invalid());
391
392        // Assign a vreg to each block param and each inst result.
393        for bb in f.layout.blocks() {
394            for &param in f.dfg.block_params(bb) {
395                let ty = f.dfg.value_type(param);
396                if value_regs[param].is_invalid() {
397                    let regs = vregs.alloc_with_maybe_fact(ty, f.dfg.facts[param].clone())?;
398                    value_regs[param] = regs;
399                    trace!("bb {} param {}: regs {:?}", bb, param, regs);
400                }
401            }
402            for inst in f.layout.block_insts(bb) {
403                for &result in f.dfg.inst_results(inst) {
404                    let ty = f.dfg.value_type(result);
405                    if value_regs[result].is_invalid() && !ty.is_invalid() {
406                        let regs = vregs.alloc_with_maybe_fact(ty, f.dfg.facts[result].clone())?;
407                        value_regs[result] = regs;
408                        trace!(
409                            "bb {} inst {} ({:?}): result {} regs {:?}",
410                            bb,
411                            inst,
412                            f.dfg.insts[inst],
413                            result,
414                            regs,
415                        );
416                    }
417                }
418            }
419        }
420
421        // Find the sret register, if it's used.
422        let mut sret_param = None;
423        for ret in vcode.abi().signature().returns.iter() {
424            if ret.purpose == ArgumentPurpose::StructReturn {
425                let entry_bb = f.stencil.layout.entry_block().unwrap();
426                for (&param, sig_param) in f
427                    .dfg
428                    .block_params(entry_bb)
429                    .iter()
430                    .zip(vcode.abi().signature().params.iter())
431                {
432                    if sig_param.purpose == ArgumentPurpose::StructReturn {
433                        assert!(sret_param.is_none());
434                        sret_param = Some(param);
435                    }
436                }
437
438                assert!(sret_param.is_some());
439            }
440        }
441
442        let sret_reg = sret_param.map(|param| {
443            let regs = value_regs[param];
444            assert!(regs.len() == 1);
445            regs
446        });
447
448        // Compute instruction colors, find constant instructions, and find instructions with
449        // side-effects, in one combined pass.
450        let mut cur_color = 0;
451        let mut block_end_colors = SecondaryMap::with_default(InstColor::new(0));
452        let mut side_effect_inst_entry_colors = FxHashMap::default();
453        let mut inst_constants = FxHashMap::default();
454        for bb in f.layout.blocks() {
455            cur_color += 1;
456            for inst in f.layout.block_insts(bb) {
457                let side_effect = has_lowering_side_effect(f, inst);
458
459                trace!("bb {} inst {} has color {}", bb, inst, cur_color);
460                if side_effect {
461                    side_effect_inst_entry_colors.insert(inst, InstColor::new(cur_color));
462                    trace!(" -> side-effecting; incrementing color for next inst");
463                    cur_color += 1;
464                }
465
466                // Determine if this is a constant; if so, add to the table.
467                if let Some(c) = is_constant_64bit(f, inst) {
468                    trace!(" -> constant: {}", c);
469                    inst_constants.insert(inst, c);
470                }
471            }
472
473            block_end_colors[bb] = InstColor::new(cur_color);
474        }
475
476        let value_ir_uses = compute_use_states(f, sret_param);
477
478        Ok(Lower {
479            f,
480            vcode,
481            vregs,
482            value_regs,
483            sret_reg,
484            block_end_colors,
485            side_effect_inst_entry_colors,
486            inst_constants,
487            value_ir_uses,
488            value_lowered_uses: SecondaryMap::default(),
489            inst_sunk: FxHashSet::default(),
490            cur_scan_entry_color: None,
491            cur_inst: None,
492            ir_insts: vec![],
493            pinned_reg: None,
494            flags,
495        })
496    }
497
498    pub fn sigs(&self) -> &SigSet {
499        self.vcode.sigs()
500    }
501
502    pub fn sigs_mut(&mut self) -> &mut SigSet {
503        self.vcode.sigs_mut()
504    }
505
506    fn gen_arg_setup(&mut self) {
507        if let Some(entry_bb) = self.f.layout.entry_block() {
508            trace!(
509                "gen_arg_setup: entry BB {} args are:\n{:?}",
510                entry_bb,
511                self.f.dfg.block_params(entry_bb)
512            );
513
514            for (i, param) in self.f.dfg.block_params(entry_bb).iter().enumerate() {
515                if self.value_ir_uses[*param] == ValueUseState::Unused {
516                    continue;
517                }
518                let regs = writable_value_regs(self.value_regs[*param]);
519                for insn in self
520                    .vcode
521                    .vcode
522                    .abi
523                    .gen_copy_arg_to_regs(&self.vcode.vcode.sigs, i, regs, &mut self.vregs)
524                    .into_iter()
525                {
526                    self.emit(insn);
527                }
528            }
529            if let Some(insn) = self
530                .vcode
531                .vcode
532                .abi
533                .gen_retval_area_setup(&self.vcode.vcode.sigs, &mut self.vregs)
534            {
535                self.emit(insn);
536            }
537
538            // The `args` instruction below must come first. Finish
539            // the current "IR inst" (with a default source location,
540            // as for other special instructions inserted during
541            // lowering) and continue the scan backward.
542            self.finish_ir_inst(Default::default());
543
544            if let Some(insn) = self.vcode.vcode.abi.take_args() {
545                self.emit(insn);
546            }
547        }
548    }
549
550    /// Generate the return instruction.
551    pub fn gen_return(&mut self, rets: Vec<ValueRegs<Reg>>) {
552        let mut out_rets = vec![];
553
554        let mut rets = rets.into_iter();
555        for (i, ret) in self
556            .abi()
557            .signature()
558            .returns
559            .clone()
560            .into_iter()
561            .enumerate()
562        {
563            let regs = if ret.purpose == ArgumentPurpose::StructReturn {
564                self.sret_reg.unwrap()
565            } else {
566                rets.next().unwrap()
567            };
568
569            let (regs, insns) = self.vcode.abi().gen_copy_regs_to_retval(
570                self.vcode.sigs(),
571                i,
572                regs,
573                &mut self.vregs,
574            );
575            out_rets.extend(regs);
576            for insn in insns {
577                self.emit(insn);
578            }
579        }
580
581        // Hack: generate a virtual instruction that uses vmctx in
582        // order to keep it alive for the duration of the function,
583        // for the benefit of debuginfo.
584        if self.f.dfg.values_labels.is_some() {
585            if let Some(vmctx_val) = self.f.special_param(ArgumentPurpose::VMContext) {
586                if self.value_ir_uses[vmctx_val] != ValueUseState::Unused {
587                    let vmctx_reg = self.value_regs[vmctx_val].only_reg().unwrap();
588                    self.emit(I::gen_dummy_use(vmctx_reg));
589                }
590            }
591        }
592
593        let inst = self.abi().gen_rets(out_rets);
594        self.emit(inst);
595    }
596
597    /// Has this instruction been sunk to a use-site (i.e., away from its
598    /// original location)?
599    fn is_inst_sunk(&self, inst: Inst) -> bool {
600        self.inst_sunk.contains(&inst)
601    }
602
603    // Is any result of this instruction needed?
604    fn is_any_inst_result_needed(&self, inst: Inst) -> bool {
605        self.f
606            .dfg
607            .inst_results(inst)
608            .iter()
609            .any(|&result| self.value_lowered_uses[result] > 0)
610    }
611
612    fn lower_clif_block<B: LowerBackend<MInst = I>>(
613        &mut self,
614        backend: &B,
615        block: Block,
616        ctrl_plane: &mut ControlPlane,
617    ) -> CodegenResult<()> {
618        self.cur_scan_entry_color = Some(self.block_end_colors[block]);
619        // Lowering loop:
620        // - For each non-branch instruction, in reverse order:
621        //   - If side-effecting (load, store, branch/call/return,
622        //     possible trap), or if used outside of this block, or if
623        //     demanded by another inst, then lower.
624        //
625        // That's it! Lowering of side-effecting ops will force all *needed*
626        // (live) non-side-effecting ops to be lowered at the right places, via
627        // the `use_input_reg()` callback on the `Lower` (that's us). That's
628        // because `use_input_reg()` sets the eager/demand bit for any insts
629        // whose result registers are used.
630        //
631        // We set the VCodeBuilder to "backward" mode, so we emit
632        // blocks in reverse order wrt the BlockIndex sequence, and
633        // emit instructions in reverse order within blocks.  Because
634        // the machine backend calls `ctx.emit()` in forward order, we
635        // collect per-IR-inst lowered instructions in `ir_insts`,
636        // then reverse these and append to the VCode at the end of
637        // each IR instruction.
638        for inst in self.f.layout.block_insts(block).rev() {
639            let data = &self.f.dfg.insts[inst];
640            let has_side_effect = has_lowering_side_effect(self.f, inst);
641            // If  inst has been sunk to another location, skip it.
642            if self.is_inst_sunk(inst) {
643                continue;
644            }
645            // Are any outputs used at least once?
646            let value_needed = self.is_any_inst_result_needed(inst);
647            trace!(
648                "lower_clif_block: block {} inst {} ({:?}) is_branch {} side_effect {} value_needed {}",
649                block,
650                inst,
651                data,
652                data.opcode().is_branch(),
653                has_side_effect,
654                value_needed,
655            );
656
657            // Update scan state to color prior to this inst (as we are scanning
658            // backward).
659            self.cur_inst = Some(inst);
660            if has_side_effect {
661                let entry_color = *self
662                    .side_effect_inst_entry_colors
663                    .get(&inst)
664                    .expect("every side-effecting inst should have a color-map entry");
665                self.cur_scan_entry_color = Some(entry_color);
666            }
667
668            // Skip lowering branches; these are handled separately
669            // (see `lower_clif_branches()` below).
670            if self.f.dfg.insts[inst].opcode().is_branch() {
671                continue;
672            }
673
674            // Normal instruction: codegen if the instruction is side-effecting
675            // or any of its outputs is used.
676            if has_side_effect || value_needed {
677                trace!("lowering: inst {}: {}", inst, self.f.dfg.display_inst(inst));
678                let temp_regs = backend.lower(self, inst).unwrap_or_else(|| {
679                    let ty = if self.num_outputs(inst) > 0 {
680                        Some(self.output_ty(inst, 0))
681                    } else {
682                        None
683                    };
684                    panic!(
685                        "should be implemented in ISLE: inst = `{}`, type = `{:?}`",
686                        self.f.dfg.display_inst(inst),
687                        ty
688                    )
689                });
690
691                // The ISLE generated code emits its own registers to define the
692                // instruction's lowered values in. However, other instructions
693                // that use this SSA value will be lowered assuming that the value
694                // is generated into a pre-assigned, different, register.
695                //
696                // To connect the two, we set up "aliases" in the VCodeBuilder
697                // that apply when it is building the Operand table for the
698                // regalloc to use. These aliases effectively rewrite any use of
699                // the pre-assigned register to the register that was returned by
700                // the ISLE lowering logic.
701                let results = self.f.dfg.inst_results(inst);
702                debug_assert_eq!(temp_regs.len(), results.len());
703                for (regs, &result) in temp_regs.iter().zip(results) {
704                    let dsts = self.value_regs[result];
705                    debug_assert_eq!(regs.len(), dsts.len());
706                    for (&dst, &temp) in dsts.regs().iter().zip(regs.regs()) {
707                        trace!("set vreg alias: {result:?} = {dst:?}, lowering = {temp:?}");
708                        self.vregs.set_vreg_alias(dst, temp);
709                    }
710                }
711            }
712
713            let start = self.vcode.vcode.num_insts();
714            let loc = self.srcloc(inst);
715            self.finish_ir_inst(loc);
716
717            // If the instruction had a user stack map, forward it from the CLIF
718            // to the vcode.
719            if let Some(entries) = self.f.dfg.user_stack_map_entries(inst) {
720                let end = self.vcode.vcode.num_insts();
721                debug_assert!(end > start);
722                debug_assert_eq!(
723                    (start..end)
724                        .filter(|i| self.vcode.vcode[InsnIndex::new(*i)].is_safepoint())
725                        .count(),
726                    1
727                );
728                for i in start..end {
729                    let iix = InsnIndex::new(i);
730                    if self.vcode.vcode[iix].is_safepoint() {
731                        trace!(
732                            "Adding user stack map from clif\n\n\
733                                 {inst:?} `{}`\n\n\
734                             to vcode\n\n\
735                                 {iix:?} `{}`",
736                            self.f.dfg.display_inst(inst),
737                            &self.vcode.vcode[iix].pretty_print_inst(&mut Default::default()),
738                        );
739                        self.vcode
740                            .add_user_stack_map(BackwardsInsnIndex::new(iix.index()), entries);
741                        break;
742                    }
743                }
744            }
745
746            // maybe insert random instruction
747            if ctrl_plane.get_decision() {
748                if ctrl_plane.get_decision() {
749                    let imm: u64 = ctrl_plane.get_arbitrary();
750                    let reg = self.alloc_tmp(crate::ir::types::I64).regs()[0];
751                    I::gen_imm_u64(imm, reg).map(|inst| self.emit(inst));
752                } else {
753                    let imm: f64 = ctrl_plane.get_arbitrary();
754                    let tmp = self.alloc_tmp(crate::ir::types::I64).regs()[0];
755                    let reg = self.alloc_tmp(crate::ir::types::F64).regs()[0];
756                    for inst in I::gen_imm_f64(imm, tmp, reg) {
757                        self.emit(inst);
758                    }
759                }
760            }
761
762            // Emit value-label markers if needed, to later recover
763            // debug mappings. This must happen before the instruction
764            // (so after we emit, in bottom-to-top pass).
765            self.emit_value_label_markers_for_inst(inst);
766        }
767
768        // Add the block params to this block.
769        self.add_block_params(block)?;
770
771        self.cur_scan_entry_color = None;
772        Ok(())
773    }
774
775    fn add_block_params(&mut self, block: Block) -> CodegenResult<()> {
776        for &param in self.f.dfg.block_params(block) {
777            for &reg in self.value_regs[param].regs() {
778                let vreg = reg.to_virtual_reg().unwrap();
779                self.vcode.add_block_param(vreg);
780            }
781        }
782        Ok(())
783    }
784
785    fn get_value_labels<'a>(&'a self, val: Value, depth: usize) -> Option<&'a [ValueLabelStart]> {
786        if let Some(ref values_labels) = self.f.dfg.values_labels {
787            debug_assert!(self.f.dfg.value_is_real(val));
788            trace!(
789                "get_value_labels: val {} -> {:?}",
790                val,
791                values_labels.get(&val)
792            );
793            match values_labels.get(&val) {
794                Some(&ValueLabelAssignments::Starts(ref list)) => Some(&list[..]),
795                Some(&ValueLabelAssignments::Alias { value, .. }) if depth < 10 => {
796                    self.get_value_labels(value, depth + 1)
797                }
798                _ => None,
799            }
800        } else {
801            None
802        }
803    }
804
805    fn emit_value_label_marks_for_value(&mut self, val: Value) {
806        let regs = self.value_regs[val];
807        if regs.len() > 1 {
808            return;
809        }
810        let reg = regs.only_reg().unwrap();
811
812        if let Some(label_starts) = self.get_value_labels(val, 0) {
813            let labels = label_starts
814                .iter()
815                .map(|&ValueLabelStart { label, .. }| label)
816                .collect::<FxHashSet<_>>();
817            for label in labels {
818                trace!(
819                    "value labeling: defines val {:?} -> reg {:?} -> label {:?}",
820                    val,
821                    reg,
822                    label,
823                );
824                self.vcode.add_value_label(reg, label);
825            }
826        }
827    }
828
829    fn emit_value_label_markers_for_inst(&mut self, inst: Inst) {
830        if self.f.dfg.values_labels.is_none() {
831            return;
832        }
833
834        trace!(
835            "value labeling: srcloc {}: inst {}",
836            self.srcloc(inst),
837            inst
838        );
839        for &val in self.f.dfg.inst_results(inst) {
840            self.emit_value_label_marks_for_value(val);
841        }
842    }
843
844    fn emit_value_label_markers_for_block_args(&mut self, block: Block) {
845        if self.f.dfg.values_labels.is_none() {
846            return;
847        }
848
849        trace!("value labeling: block {}", block);
850        for &arg in self.f.dfg.block_params(block) {
851            self.emit_value_label_marks_for_value(arg);
852        }
853        self.finish_ir_inst(Default::default());
854    }
855
856    fn finish_ir_inst(&mut self, loc: RelSourceLoc) {
857        // The VCodeBuilder builds in reverse order (and reverses at
858        // the end), but `ir_insts` is in forward order, so reverse
859        // it.
860        for inst in self.ir_insts.drain(..).rev() {
861            self.vcode.push(inst, loc);
862        }
863    }
864
865    fn finish_bb(&mut self) {
866        self.vcode.end_bb();
867    }
868
869    fn lower_clif_branches<B: LowerBackend<MInst = I>>(
870        &mut self,
871        backend: &B,
872        // Lowered block index:
873        bindex: BlockIndex,
874        // Original CLIF block:
875        block: Block,
876        branch: Inst,
877        targets: &[MachLabel],
878    ) -> CodegenResult<()> {
879        trace!(
880            "lower_clif_branches: block {} branch {:?} targets {:?}",
881            block,
882            branch,
883            targets,
884        );
885        // When considering code-motion opportunities, consider the current
886        // program point to be this branch.
887        self.cur_inst = Some(branch);
888
889        // Lower the branch in ISLE.
890        backend
891            .lower_branch(self, branch, targets)
892            .unwrap_or_else(|| {
893                panic!(
894                    "should be implemented in ISLE: branch = `{}`",
895                    self.f.dfg.display_inst(branch),
896                )
897            });
898        let loc = self.srcloc(branch);
899        self.finish_ir_inst(loc);
900        // Add block param outputs for current block.
901        self.lower_branch_blockparam_args(bindex);
902        Ok(())
903    }
904
905    fn lower_branch_blockparam_args(&mut self, block: BlockIndex) {
906        // TODO: why not make `block_order` public?
907        for succ_idx in 0..self.vcode.block_order().succ_indices(block).1.len() {
908            // Avoid immutable borrow by explicitly indexing.
909            let (opt_inst, succs) = self.vcode.block_order().succ_indices(block);
910            let inst = opt_inst.expect("lower_branch_blockparam_args called on a critical edge!");
911            let succ = succs[succ_idx];
912
913            // The use of `succ_idx` to index `branch_destination` is valid on the assumption that
914            // the traversal order defined in `visit_block_succs` mirrors the order returned by
915            // `branch_destination`. If that assumption is violated, the branch targets returned
916            // here will not match the clif.
917            let branches = self.f.dfg.insts[inst].branch_destination(&self.f.dfg.jump_tables);
918            let branch_args = branches[succ_idx].args_slice(&self.f.dfg.value_lists);
919
920            let mut branch_arg_vregs: SmallVec<[Reg; 16]> = smallvec![];
921            for &arg in branch_args {
922                debug_assert!(self.f.dfg.value_is_real(arg));
923                let regs = self.put_value_in_regs(arg);
924                branch_arg_vregs.extend_from_slice(regs.regs());
925            }
926            self.vcode.add_succ(succ, &branch_arg_vregs[..]);
927        }
928    }
929
930    fn collect_branches_and_targets(
931        &self,
932        bindex: BlockIndex,
933        _bb: Block,
934        targets: &mut SmallVec<[MachLabel; 2]>,
935    ) -> Option<Inst> {
936        targets.clear();
937        let (opt_inst, succs) = self.vcode.block_order().succ_indices(bindex);
938        targets.extend(succs.iter().map(|succ| MachLabel::from_block(*succ)));
939        opt_inst
940    }
941
942    /// Lower the function.
943    pub fn lower<B: LowerBackend<MInst = I>>(
944        mut self,
945        backend: &B,
946        ctrl_plane: &mut ControlPlane,
947    ) -> CodegenResult<VCode<I>> {
948        trace!("about to lower function: {:?}", self.f);
949
950        self.vcode.init_retval_area(&mut self.vregs)?;
951
952        // Get the pinned reg here (we only parameterize this function on `B`,
953        // not the whole `Lower` impl).
954        self.pinned_reg = backend.maybe_pinned_reg();
955
956        self.vcode.set_entry(BlockIndex::new(0));
957
958        // Reused vectors for branch lowering.
959        let mut targets: SmallVec<[MachLabel; 2]> = SmallVec::new();
960
961        // get a copy of the lowered order; we hold this separately because we
962        // need a mut ref to the vcode to mutate it below.
963        let lowered_order: SmallVec<[LoweredBlock; 64]> = self
964            .vcode
965            .block_order()
966            .lowered_order()
967            .iter()
968            .cloned()
969            .collect();
970
971        // Main lowering loop over lowered blocks.
972        for (bindex, lb) in lowered_order.iter().enumerate().rev() {
973            let bindex = BlockIndex::new(bindex);
974
975            // Lower the block body in reverse order (see comment in
976            // `lower_clif_block()` for rationale).
977
978            // End branches.
979            if let Some(bb) = lb.orig_block() {
980                if let Some(branch) = self.collect_branches_and_targets(bindex, bb, &mut targets) {
981                    self.lower_clif_branches(backend, bindex, bb, branch, &targets)?;
982                    self.finish_ir_inst(self.srcloc(branch));
983                }
984            } else {
985                // If no orig block, this must be a pure edge block;
986                // get the successor and emit a jump. Add block params
987                // according to the one successor, and pass them
988                // through; note that the successor must have an
989                // original block.
990                let (_, succs) = self.vcode.block_order().succ_indices(bindex);
991                let succ = succs[0];
992
993                let orig_succ = lowered_order[succ.index()];
994                let orig_succ = orig_succ
995                    .orig_block()
996                    .expect("Edge block succ must be body block");
997
998                let mut branch_arg_vregs: SmallVec<[Reg; 16]> = smallvec![];
999                for ty in self.f.dfg.block_param_types(orig_succ) {
1000                    let regs = self.vregs.alloc(ty)?;
1001                    for &reg in regs.regs() {
1002                        branch_arg_vregs.push(reg);
1003                        let vreg = reg.to_virtual_reg().unwrap();
1004                        self.vcode.add_block_param(vreg);
1005                    }
1006                }
1007                self.vcode.add_succ(succ, &branch_arg_vregs[..]);
1008
1009                self.emit(I::gen_jump(MachLabel::from_block(succ)));
1010                self.finish_ir_inst(Default::default());
1011            }
1012
1013            // Original block body.
1014            if let Some(bb) = lb.orig_block() {
1015                self.lower_clif_block(backend, bb, ctrl_plane)?;
1016                self.emit_value_label_markers_for_block_args(bb);
1017            }
1018
1019            if bindex.index() == 0 {
1020                // Set up the function with arg vreg inits.
1021                self.gen_arg_setup();
1022                self.finish_ir_inst(Default::default());
1023            }
1024
1025            self.finish_bb();
1026
1027            // Check for any deferred vreg-temp allocation errors, and
1028            // bubble one up at this time if it exists.
1029            if let Some(e) = self.vregs.take_deferred_error() {
1030                return Err(e);
1031            }
1032        }
1033
1034        // Now that we've emitted all instructions into the
1035        // VCodeBuilder, let's build the VCode.
1036        trace!(
1037            "built vcode:\n{:?}Backwards {:?}",
1038            &self.vregs,
1039            &self.vcode.vcode
1040        );
1041        let vcode = self.vcode.build(self.vregs);
1042
1043        Ok(vcode)
1044    }
1045}
1046
1047/// Pre-analysis: compute `value_ir_uses`. See comment on
1048/// `ValueUseState` for a description of what this analysis
1049/// computes.
1050fn compute_use_states(
1051    f: &Function,
1052    sret_param: Option<Value>,
1053) -> SecondaryMap<Value, ValueUseState> {
1054    // We perform the analysis without recursion, so we don't
1055    // overflow the stack on long chains of ops in the input.
1056    //
1057    // This is sort of a hybrid of a "shallow use-count" pass and
1058    // a DFS. We iterate over all instructions and mark their args
1059    // as used. However when we increment a use-count to
1060    // "Multiple" we push its args onto the stack and do a DFS,
1061    // immediately marking the whole dependency tree as
1062    // Multiple. Doing both (shallow use-counting over all insts,
1063    // and deep Multiple propagation) lets us trim both
1064    // traversals, stopping recursion when a node is already at
1065    // the appropriate state.
1066    //
1067    // In particular, note that the *coarsening* into {Unused,
1068    // Once, Multiple} is part of what makes this pass more
1069    // efficient than a full indirect-use-counting pass.
1070
1071    let mut value_ir_uses = SecondaryMap::with_default(ValueUseState::Unused);
1072
1073    if let Some(sret_param) = sret_param {
1074        // There's an implicit use of the struct-return parameter in each
1075        // copy of the function epilogue, which we count here.
1076        value_ir_uses[sret_param] = ValueUseState::Multiple;
1077    }
1078
1079    // Stack of iterators over Values as we do DFS to mark
1080    // Multiple-state subtrees. The iterator type is whatever is
1081    // returned by `uses` below.
1082    let mut stack: SmallVec<[_; 16]> = smallvec![];
1083
1084    // Find the args for the inst corresponding to the given value.
1085    //
1086    // Note that "root" instructions are skipped here. This means that multiple
1087    // uses of any result of a multi-result instruction are not considered
1088    // multiple uses of the operands of a multi-result instruction. This
1089    // requires tight coupling with `get_value_as_source_or_const` above which
1090    // is the consumer of the map that this function is producing.
1091    let uses = |value| {
1092        trace!(" -> pushing args for {} onto stack", value);
1093        if let ValueDef::Result(src_inst, _) = f.dfg.value_def(value) {
1094            if is_value_use_root(f, src_inst) {
1095                None
1096            } else {
1097                Some(f.dfg.inst_values(src_inst))
1098            }
1099        } else {
1100            None
1101        }
1102    };
1103
1104    // Do a DFS through `value_ir_uses` to mark a subtree as
1105    // Multiple.
1106    for inst in f
1107        .layout
1108        .blocks()
1109        .flat_map(|block| f.layout.block_insts(block))
1110    {
1111        // Iterate over all values used by all instructions, noting an
1112        // additional use on each operand.
1113        for arg in f.dfg.inst_values(inst) {
1114            debug_assert!(f.dfg.value_is_real(arg));
1115            let old = value_ir_uses[arg];
1116            value_ir_uses[arg].inc();
1117            let new = value_ir_uses[arg];
1118            trace!("arg {} used, old state {:?}, new {:?}", arg, old, new);
1119
1120            // On transition to Multiple, do DFS.
1121            if old == ValueUseState::Multiple || new != ValueUseState::Multiple {
1122                continue;
1123            }
1124            if let Some(iter) = uses(arg) {
1125                stack.push(iter);
1126            }
1127            while let Some(iter) = stack.last_mut() {
1128                if let Some(value) = iter.next() {
1129                    debug_assert!(f.dfg.value_is_real(value));
1130                    trace!(" -> DFS reaches {}", value);
1131                    if value_ir_uses[value] == ValueUseState::Multiple {
1132                        // Truncate DFS here: no need to go further,
1133                        // as whole subtree must already be Multiple.
1134                        // With debug asserts, check one level of
1135                        // that invariant at least.
1136                        debug_assert!(uses(value).into_iter().flatten().all(|arg| {
1137                            debug_assert!(f.dfg.value_is_real(arg));
1138                            value_ir_uses[arg] == ValueUseState::Multiple
1139                        }));
1140                        continue;
1141                    }
1142                    value_ir_uses[value] = ValueUseState::Multiple;
1143                    trace!(" -> became Multiple");
1144                    if let Some(iter) = uses(value) {
1145                        stack.push(iter);
1146                    }
1147                } else {
1148                    // Empty iterator, discard.
1149                    stack.pop();
1150                }
1151            }
1152        }
1153    }
1154
1155    value_ir_uses
1156}
1157
1158/// Definition of a "root" instruction for the calculation of `ValueUseState`.
1159///
1160/// This function calculates whether `inst` is considered a "root" for value-use
1161/// information. This concept is used to forcibly prevent looking-through the
1162/// instruction during `get_value_as_source_or_const` as it additionally
1163/// prevents propagating `Multiple`-used results of the `inst` here to the
1164/// operands of the instruction.
1165///
1166/// Currently this is defined as multi-result instructions. That means that
1167/// lowerings are never allowed to look through a multi-result instruction to
1168/// generate patterns. Note that this isn't possible in ISLE today anyway so
1169/// this isn't currently much of a loss.
1170///
1171/// The main purpose of this function is to prevent the operands of a
1172/// multi-result instruction from being forcibly considered `Multiple`-used
1173/// regardless of circumstances.
1174fn is_value_use_root(f: &Function, inst: Inst) -> bool {
1175    f.dfg.inst_results(inst).len() > 1
1176}
1177
1178/// Function-level queries.
1179impl<'func, I: VCodeInst> Lower<'func, I> {
1180    pub fn dfg(&self) -> &DataFlowGraph {
1181        &self.f.dfg
1182    }
1183
1184    /// Get the `Callee`.
1185    pub fn abi(&self) -> &Callee<I::ABIMachineSpec> {
1186        self.vcode.abi()
1187    }
1188
1189    /// Get the `Callee`.
1190    pub fn abi_mut(&mut self) -> &mut Callee<I::ABIMachineSpec> {
1191        self.vcode.abi_mut()
1192    }
1193}
1194
1195/// Instruction input/output queries.
1196impl<'func, I: VCodeInst> Lower<'func, I> {
1197    /// Get the instdata for a given IR instruction.
1198    pub fn data(&self, ir_inst: Inst) -> &InstructionData {
1199        &self.f.dfg.insts[ir_inst]
1200    }
1201
1202    /// Likewise, but starting with a GlobalValue identifier.
1203    pub fn symbol_value_data<'b>(
1204        &'b self,
1205        global_value: GlobalValue,
1206    ) -> Option<(&'b ExternalName, RelocDistance, i64)> {
1207        let gvdata = &self.f.global_values[global_value];
1208        match gvdata {
1209            &GlobalValueData::Symbol {
1210                ref name,
1211                ref offset,
1212                colocated,
1213                ..
1214            } => {
1215                let offset = offset.bits();
1216                let dist = if colocated {
1217                    RelocDistance::Near
1218                } else {
1219                    RelocDistance::Far
1220                };
1221                Some((name, dist, offset))
1222            }
1223            _ => None,
1224        }
1225    }
1226
1227    /// Returns the memory flags of a given memory access.
1228    pub fn memflags(&self, ir_inst: Inst) -> Option<MemFlags> {
1229        match &self.f.dfg.insts[ir_inst] {
1230            &InstructionData::AtomicCas { flags, .. } => Some(flags),
1231            &InstructionData::AtomicRmw { flags, .. } => Some(flags),
1232            &InstructionData::Load { flags, .. }
1233            | &InstructionData::LoadNoOffset { flags, .. }
1234            | &InstructionData::Store { flags, .. } => Some(flags),
1235            &InstructionData::StoreNoOffset { flags, .. } => Some(flags),
1236            _ => None,
1237        }
1238    }
1239
1240    /// Get the source location for a given instruction.
1241    pub fn srcloc(&self, ir_inst: Inst) -> RelSourceLoc {
1242        self.f.rel_srclocs()[ir_inst]
1243    }
1244
1245    /// Get the number of inputs to the given IR instruction. This is a count only of the Value
1246    /// arguments to the instruction: block arguments will not be included in this count.
1247    pub fn num_inputs(&self, ir_inst: Inst) -> usize {
1248        self.f.dfg.inst_args(ir_inst).len()
1249    }
1250
1251    /// Get the number of outputs to the given IR instruction.
1252    pub fn num_outputs(&self, ir_inst: Inst) -> usize {
1253        self.f.dfg.inst_results(ir_inst).len()
1254    }
1255
1256    /// Get the type for an instruction's input.
1257    pub fn input_ty(&self, ir_inst: Inst, idx: usize) -> Type {
1258        self.value_ty(self.input_as_value(ir_inst, idx))
1259    }
1260
1261    /// Get the type for a value.
1262    pub fn value_ty(&self, val: Value) -> Type {
1263        self.f.dfg.value_type(val)
1264    }
1265
1266    /// Get the type for an instruction's output.
1267    pub fn output_ty(&self, ir_inst: Inst, idx: usize) -> Type {
1268        self.f.dfg.value_type(self.f.dfg.inst_results(ir_inst)[idx])
1269    }
1270
1271    /// Get the value of a constant instruction (`iconst`, etc.) as a 64-bit
1272    /// value, if possible.
1273    pub fn get_constant(&self, ir_inst: Inst) -> Option<u64> {
1274        self.inst_constants.get(&ir_inst).map(|&c| {
1275            // The upper bits must be zero, enforced during legalization and by
1276            // the CLIF verifier.
1277            debug_assert_eq!(c, {
1278                let input_size = self.output_ty(ir_inst, 0).bits() as u64;
1279                let shift = 64 - input_size;
1280                (c << shift) >> shift
1281            });
1282            c
1283        })
1284    }
1285
1286    /// Get the input as one of two options other than a direct register:
1287    ///
1288    /// - An instruction, given that it is effect-free or able to sink its
1289    ///   effect to the current instruction being lowered, and given it has only
1290    ///   one output, and if effect-ful, given that this is the only use;
1291    /// - A constant, if the value is a constant.
1292    ///
1293    /// The instruction input may be available in either of these forms.  It may
1294    /// be available in neither form, if the conditions are not met; if so, use
1295    /// `put_input_in_regs()` instead to get it in a register.
1296    ///
1297    /// If the backend merges the effect of a side-effecting instruction, it
1298    /// must call `sink_inst()`. When this is called, it indicates that the
1299    /// effect has been sunk to the current scan location. The sunk
1300    /// instruction's result(s) must have *no* uses remaining, because it will
1301    /// not be codegen'd (it has been integrated into the current instruction).
1302    pub fn input_as_value(&self, ir_inst: Inst, idx: usize) -> Value {
1303        let val = self.f.dfg.inst_args(ir_inst)[idx];
1304        debug_assert!(self.f.dfg.value_is_real(val));
1305        val
1306    }
1307
1308    /// Resolves a particular input of an instruction to the `Value` that it is
1309    /// represented with.
1310    ///
1311    /// For more information see [`Lower::get_value_as_source_or_const`].
1312    pub fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput {
1313        let val = self.input_as_value(ir_inst, idx);
1314        self.get_value_as_source_or_const(val)
1315    }
1316
1317    /// Resolves a `Value` definition to the source instruction it came from
1318    /// plus whether it's a unique-use of that instruction.
1319    ///
1320    /// This function is the workhorse of pattern-matching in ISLE which enables
1321    /// combining multiple instructions together. This is used implicitly in
1322    /// patterns such as `(iadd x (iconst y))` where this function is used to
1323    /// extract the `(iconst y)` operand.
1324    ///
1325    /// At its core this function is a wrapper around
1326    /// [`DataFlowGraph::value_def`]. This function applies a filter on top of
1327    /// that, however, to determine when it is actually safe to "look through"
1328    /// the `val` definition here and view the underlying instruction. This
1329    /// protects against duplicating side effects, such as loads, for example.
1330    ///
1331    /// Internally this uses the data computed from `compute_use_states` along
1332    /// with other instruction properties to know what to return.
1333    pub fn get_value_as_source_or_const(&self, val: Value) -> NonRegInput {
1334        trace!(
1335            "get_input_for_val: val {} at cur_inst {:?} cur_scan_entry_color {:?}",
1336            val,
1337            self.cur_inst,
1338            self.cur_scan_entry_color,
1339        );
1340        let inst = match self.f.dfg.value_def(val) {
1341            // OK to merge source instruction if we have a source
1342            // instruction, and one of these two conditions hold:
1343            //
1344            // - It has no side-effects and this instruction is not a "value-use
1345            //   root" instruction. Instructions which are considered "roots"
1346            //   for value-use calculations do not have accurate information
1347            //   known about the `ValueUseState` of their operands. This is
1348            //   currently done for multi-result instructions to prevent a use
1349            //   of each result from forcing all operands of the multi-result
1350            //   instruction to also be `Multiple`. This in turn means that the
1351            //   `ValueUseState` for operands of a "root" instruction to be a
1352            //   lie if pattern matching were to look through the multi-result
1353            //   instruction. As a result the "look through this instruction"
1354            //   logic only succeeds if it's not a root instruction.
1355            //
1356            // - It has a side-effect, has one output value, that one
1357            //   output has only one use, directly or indirectly (so
1358            //   cannot be duplicated -- see comment on
1359            //   `ValueUseState`), and the instruction's color is *one
1360            //   less than* the current scan color.
1361            //
1362            //   This latter set of conditions is testing whether a
1363            //   side-effecting instruction can sink to the current scan
1364            //   location; this is possible if the in-color of this inst is
1365            //   equal to the out-color of the producing inst, so no other
1366            //   side-effecting ops occur between them (which will only be true
1367            //   if they are in the same BB, because color increments at each BB
1368            //   start).
1369            //
1370            //   If it is actually sunk, then in `merge_inst()`, we update the
1371            //   scan color so that as we scan over the range past which the
1372            //   instruction was sunk, we allow other instructions (that came
1373            //   prior to the sunk instruction) to sink.
1374            ValueDef::Result(src_inst, result_idx) => {
1375                let src_side_effect = has_lowering_side_effect(self.f, src_inst);
1376                trace!(" -> src inst {}", src_inst);
1377                trace!(" -> has lowering side effect: {}", src_side_effect);
1378                if is_value_use_root(self.f, src_inst) {
1379                    // If this instruction is a "root instruction" then it's
1380                    // required that we can't look through it to see the
1381                    // definition. This means that the `ValueUseState` for the
1382                    // operands of this result assume that this instruction is
1383                    // generated exactly once which might get violated were we
1384                    // to allow looking through it.
1385                    trace!(" -> is a root instruction");
1386                    InputSourceInst::None
1387                } else if !src_side_effect {
1388                    // Otherwise if this instruction has no side effects and the
1389                    // value is used only once then we can look through it with
1390                    // a "unique" tag. A non-unique `Use` can be shown for other
1391                    // values ensuring consumers know how it's computed but that
1392                    // it's not available to omit.
1393                    if self.value_ir_uses[val] == ValueUseState::Once {
1394                        InputSourceInst::UniqueUse(src_inst, result_idx)
1395                    } else {
1396                        InputSourceInst::Use(src_inst, result_idx)
1397                    }
1398                } else {
1399                    // Side-effect: test whether this is the only use of the
1400                    // only result of the instruction, and whether colors allow
1401                    // the code-motion.
1402                    trace!(
1403                        " -> side-effecting op {} for val {}: use state {:?}",
1404                        src_inst,
1405                        val,
1406                        self.value_ir_uses[val]
1407                    );
1408                    if self.cur_scan_entry_color.is_some()
1409                        && self.value_ir_uses[val] == ValueUseState::Once
1410                        && self.num_outputs(src_inst) == 1
1411                        && self
1412                            .side_effect_inst_entry_colors
1413                            .get(&src_inst)
1414                            .unwrap()
1415                            .get()
1416                            + 1
1417                            == self.cur_scan_entry_color.unwrap().get()
1418                    {
1419                        InputSourceInst::UniqueUse(src_inst, 0)
1420                    } else {
1421                        InputSourceInst::None
1422                    }
1423                }
1424            }
1425            _ => InputSourceInst::None,
1426        };
1427        let constant = inst.as_inst().and_then(|(inst, _)| self.get_constant(inst));
1428
1429        NonRegInput { inst, constant }
1430    }
1431
1432    /// Increment the reference count for the Value, ensuring that it gets lowered.
1433    pub fn increment_lowered_uses(&mut self, val: Value) {
1434        self.value_lowered_uses[val] += 1
1435    }
1436
1437    /// Put the `idx`th input into register(s) and return the assigned register.
1438    pub fn put_input_in_regs(&mut self, ir_inst: Inst, idx: usize) -> ValueRegs<Reg> {
1439        let val = self.f.dfg.inst_args(ir_inst)[idx];
1440        self.put_value_in_regs(val)
1441    }
1442
1443    /// Put the given value into register(s) and return the assigned register.
1444    pub fn put_value_in_regs(&mut self, val: Value) -> ValueRegs<Reg> {
1445        debug_assert!(self.f.dfg.value_is_real(val));
1446        trace!("put_value_in_regs: val {}", val);
1447
1448        if let Some(inst) = self.f.dfg.value_def(val).inst() {
1449            assert!(!self.inst_sunk.contains(&inst));
1450        }
1451
1452        let regs = self.value_regs[val];
1453        trace!(" -> regs {:?}", regs);
1454        assert!(regs.is_valid());
1455
1456        self.value_lowered_uses[val] += 1;
1457
1458        regs
1459    }
1460}
1461
1462/// Codegen primitives: allocate temps, emit instructions, set result registers,
1463/// ask for an input to be gen'd into a register.
1464impl<'func, I: VCodeInst> Lower<'func, I> {
1465    /// Get a new temp.
1466    pub fn alloc_tmp(&mut self, ty: Type) -> ValueRegs<Writable<Reg>> {
1467        writable_value_regs(self.vregs.alloc_with_deferred_error(ty))
1468    }
1469
1470    /// Emit a machine instruction.
1471    pub fn emit(&mut self, mach_inst: I) {
1472        trace!("emit: {:?}", mach_inst);
1473        self.ir_insts.push(mach_inst);
1474    }
1475
1476    /// Indicate that the side-effect of an instruction has been sunk to the
1477    /// current scan location. This should only be done with the instruction's
1478    /// original results are not used (i.e., `put_input_in_regs` is not invoked
1479    /// for the input produced by the sunk instruction), otherwise the
1480    /// side-effect will occur twice.
1481    pub fn sink_inst(&mut self, ir_inst: Inst) {
1482        assert!(has_lowering_side_effect(self.f, ir_inst));
1483        assert!(self.cur_scan_entry_color.is_some());
1484
1485        for result in self.dfg().inst_results(ir_inst) {
1486            assert!(self.value_lowered_uses[*result] == 0);
1487        }
1488
1489        let sunk_inst_entry_color = self
1490            .side_effect_inst_entry_colors
1491            .get(&ir_inst)
1492            .cloned()
1493            .unwrap();
1494        let sunk_inst_exit_color = InstColor::new(sunk_inst_entry_color.get() + 1);
1495        assert!(sunk_inst_exit_color == self.cur_scan_entry_color.unwrap());
1496        self.cur_scan_entry_color = Some(sunk_inst_entry_color);
1497        self.inst_sunk.insert(ir_inst);
1498    }
1499
1500    /// Retrieve immediate data given a handle.
1501    pub fn get_immediate_data(&self, imm: Immediate) -> &ConstantData {
1502        self.f.dfg.immediates.get(imm).unwrap()
1503    }
1504
1505    /// Retrieve constant data given a handle.
1506    pub fn get_constant_data(&self, constant_handle: Constant) -> &ConstantData {
1507        self.f.dfg.constants.get(constant_handle)
1508    }
1509
1510    /// Indicate that a constant should be emitted.
1511    pub fn use_constant(&mut self, constant: VCodeConstantData) -> VCodeConstant {
1512        self.vcode.constants().insert(constant)
1513    }
1514
1515    /// Cause the value in `reg` to be in a virtual reg, by copying it into a new virtual reg
1516    /// if `reg` is a real reg.  `ty` describes the type of the value in `reg`.
1517    pub fn ensure_in_vreg(&mut self, reg: Reg, ty: Type) -> Reg {
1518        if reg.to_virtual_reg().is_some() {
1519            reg
1520        } else {
1521            let new_reg = self.alloc_tmp(ty).only_reg().unwrap();
1522            self.emit(I::gen_move(new_reg, reg, ty));
1523            new_reg.to_reg()
1524        }
1525    }
1526
1527    /// Add a range fact to a register, if no other fact is present.
1528    pub fn add_range_fact(&mut self, reg: Reg, bit_width: u16, min: u64, max: u64) {
1529        if self.flags.enable_pcc() {
1530            self.vregs.set_fact_if_missing(
1531                reg.to_virtual_reg().unwrap(),
1532                Fact::Range {
1533                    bit_width,
1534                    min,
1535                    max,
1536                },
1537            );
1538        }
1539    }
1540}
1541
1542#[cfg(test)]
1543mod tests {
1544    use super::ValueUseState;
1545    use crate::cursor::{Cursor, FuncCursor};
1546    use crate::ir::types;
1547    use crate::ir::{Function, InstBuilder};
1548
1549    #[test]
1550    fn multi_result_use_once() {
1551        let mut func = Function::new();
1552        let block0 = func.dfg.make_block();
1553        let mut pos = FuncCursor::new(&mut func);
1554        pos.insert_block(block0);
1555        let v1 = pos.ins().iconst(types::I64, 0);
1556        let v2 = pos.ins().iconst(types::I64, 1);
1557        let v3 = pos.ins().iconcat(v1, v2);
1558        let (v4, v5) = pos.ins().isplit(v3);
1559        pos.ins().return_(&[v4, v5]);
1560        let func = pos.func;
1561
1562        let uses = super::compute_use_states(&func, None);
1563        assert_eq!(uses[v1], ValueUseState::Once);
1564        assert_eq!(uses[v2], ValueUseState::Once);
1565        assert_eq!(uses[v3], ValueUseState::Once);
1566        assert_eq!(uses[v4], ValueUseState::Once);
1567        assert_eq!(uses[v5], ValueUseState::Once);
1568    }
1569
1570    #[test]
1571    fn results_used_twice_but_not_operands() {
1572        let mut func = Function::new();
1573        let block0 = func.dfg.make_block();
1574        let mut pos = FuncCursor::new(&mut func);
1575        pos.insert_block(block0);
1576        let v1 = pos.ins().iconst(types::I64, 0);
1577        let v2 = pos.ins().iconst(types::I64, 1);
1578        let v3 = pos.ins().iconcat(v1, v2);
1579        let (v4, v5) = pos.ins().isplit(v3);
1580        pos.ins().return_(&[v4, v4]);
1581        let func = pos.func;
1582
1583        let uses = super::compute_use_states(&func, None);
1584        assert_eq!(uses[v1], ValueUseState::Once);
1585        assert_eq!(uses[v2], ValueUseState::Once);
1586        assert_eq!(uses[v3], ValueUseState::Once);
1587        assert_eq!(uses[v4], ValueUseState::Multiple);
1588        assert_eq!(uses[v5], ValueUseState::Unused);
1589    }
1590}