cranelift_codegen/machinst/vcode.rs
1//! This implements the VCode container: a CFG of Insts that have been lowered.
2//!
3//! VCode is virtual-register code. An instruction in VCode is almost a machine
4//! instruction; however, its register slots can refer to virtual registers in
5//! addition to real machine registers.
6//!
7//! VCode is structured with traditional basic blocks, and
8//! each block must be terminated by an unconditional branch (one target), a
9//! conditional branch (two targets), or a return (no targets). Note that this
10//! slightly differs from the machine code of most ISAs: in most ISAs, a
11//! conditional branch has one target (and the not-taken case falls through).
12//! However, we expect that machine backends will elide branches to the following
13//! block (i.e., zero-offset jumps), and will be able to codegen a branch-cond /
14//! branch-uncond pair if *both* targets are not fallthrough. This allows us to
15//! play with layout prior to final binary emission, as well, if we want.
16//!
17//! See the main module comment in `mod.rs` for more details on the VCode-based
18//! backend pipeline.
19
20use crate::ir::pcc::*;
21use crate::ir::{self, types, Constant, ConstantData, ValueLabel};
22use crate::machinst::*;
23use crate::ranges::Ranges;
24use crate::timing;
25use crate::trace;
26use crate::CodegenError;
27use crate::{LabelValueLoc, ValueLocRange};
28use regalloc2::{
29 Edit, Function as RegallocFunction, InstOrEdit, InstRange, MachineEnv, Operand,
30 OperandConstraint, OperandKind, PRegSet, RegClass,
31};
32use rustc_hash::FxHashMap;
33
34use core::mem::take;
35use cranelift_entity::{entity_impl, Keys};
36use std::collections::hash_map::Entry;
37use std::collections::HashMap;
38use std::fmt;
39
40/// Index referring to an instruction in VCode.
41pub type InsnIndex = regalloc2::Inst;
42
43/// Extension trait for `InsnIndex` to allow conversion to a
44/// `BackwardsInsnIndex`.
45trait ToBackwardsInsnIndex {
46 fn to_backwards_insn_index(&self, num_insts: usize) -> BackwardsInsnIndex;
47}
48
49impl ToBackwardsInsnIndex for InsnIndex {
50 fn to_backwards_insn_index(&self, num_insts: usize) -> BackwardsInsnIndex {
51 BackwardsInsnIndex::new(num_insts - self.index() - 1)
52 }
53}
54
55/// An index referring to an instruction in the VCode when it is backwards,
56/// during VCode construction.
57#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
58#[cfg_attr(
59 feature = "enable-serde",
60 derive(::serde::Serialize, ::serde::Deserialize)
61)]
62pub struct BackwardsInsnIndex(InsnIndex);
63
64impl BackwardsInsnIndex {
65 pub fn new(i: usize) -> Self {
66 BackwardsInsnIndex(InsnIndex::new(i))
67 }
68}
69
70/// Index referring to a basic block in VCode.
71pub type BlockIndex = regalloc2::Block;
72
73/// VCodeInst wraps all requirements for a MachInst to be in VCode: it must be
74/// a `MachInst` and it must be able to emit itself at least to a `SizeCodeSink`.
75pub trait VCodeInst: MachInst + MachInstEmit {}
76impl<I: MachInst + MachInstEmit> VCodeInst for I {}
77
78/// A function in "VCode" (virtualized-register code) form, after
79/// lowering. This is essentially a standard CFG of basic blocks,
80/// where each basic block consists of lowered instructions produced
81/// by the machine-specific backend.
82///
83/// Note that the VCode is immutable once produced, and is not
84/// modified by register allocation in particular. Rather, register
85/// allocation on the `VCode` produces a separate `regalloc2::Output`
86/// struct, and this can be passed to `emit`. `emit` in turn does not
87/// modify the vcode, but produces an `EmitResult`, which contains the
88/// machine code itself, and the associated disassembly and/or
89/// metadata as requested.
90pub struct VCode<I: VCodeInst> {
91 /// VReg IR-level types.
92 vreg_types: Vec<Type>,
93
94 /// Lowered machine instructions in order corresponding to the original IR.
95 insts: Vec<I>,
96
97 /// A map from backwards instruction index to the user stack map for that
98 /// instruction.
99 ///
100 /// This is a sparse side table that only has entries for instructions that
101 /// are safepoints, and only for a subset of those that have an associated
102 /// user stack map.
103 user_stack_maps: FxHashMap<BackwardsInsnIndex, ir::UserStackMap>,
104
105 /// Operands: pre-regalloc references to virtual registers with
106 /// constraints, in one flattened array. This allows the regalloc
107 /// to efficiently access all operands without requiring expensive
108 /// matches or method invocations on insts.
109 operands: Vec<Operand>,
110
111 /// Operand index ranges: for each instruction in `insts`, there
112 /// is a tuple here providing the range in `operands` for that
113 /// instruction's operands.
114 operand_ranges: Ranges,
115
116 /// Clobbers: a sparse map from instruction indices to clobber masks.
117 clobbers: FxHashMap<InsnIndex, PRegSet>,
118
119 /// Source locations for each instruction. (`SourceLoc` is a `u32`, so it is
120 /// reasonable to keep one of these per instruction.)
121 srclocs: Vec<RelSourceLoc>,
122
123 /// Entry block.
124 entry: BlockIndex,
125
126 /// Block instruction indices.
127 block_ranges: Ranges,
128
129 /// Block successors: index range in the `block_succs` list.
130 block_succ_range: Ranges,
131
132 /// Block successor lists, concatenated into one vec. The
133 /// `block_succ_range` list of tuples above gives (start, end)
134 /// ranges within this list that correspond to each basic block's
135 /// successors.
136 block_succs: Vec<regalloc2::Block>,
137
138 /// Block predecessors: index range in the `block_preds` list.
139 block_pred_range: Ranges,
140
141 /// Block predecessor lists, concatenated into one vec. The
142 /// `block_pred_range` list of tuples above gives (start, end)
143 /// ranges within this list that correspond to each basic block's
144 /// predecessors.
145 block_preds: Vec<regalloc2::Block>,
146
147 /// Block parameters: index range in `block_params` below.
148 block_params_range: Ranges,
149
150 /// Block parameter lists, concatenated into one vec. The
151 /// `block_params_range` list of tuples above gives (start, end)
152 /// ranges within this list that correspond to each basic block's
153 /// blockparam vregs.
154 block_params: Vec<regalloc2::VReg>,
155
156 /// Outgoing block arguments on branch instructions, concatenated
157 /// into one list.
158 ///
159 /// Note that this is conceptually a 3D array: we have a VReg list
160 /// per block, per successor. We flatten those three dimensions
161 /// into this 1D vec, then store index ranges in two levels of
162 /// indirection.
163 ///
164 /// Indexed by the indices in `branch_block_arg_succ_range`.
165 branch_block_args: Vec<regalloc2::VReg>,
166
167 /// Array of sequences of (start, end) tuples in
168 /// `branch_block_args`, one for each successor; these sequences
169 /// for each block are concatenated.
170 ///
171 /// Indexed by the indices in `branch_block_arg_succ_range`.
172 branch_block_arg_range: Ranges,
173
174 /// For a given block, indices in `branch_block_arg_range`
175 /// corresponding to all of its successors.
176 branch_block_arg_succ_range: Ranges,
177
178 /// Block-order information.
179 block_order: BlockLoweringOrder,
180
181 /// ABI object.
182 pub(crate) abi: Callee<I::ABIMachineSpec>,
183
184 /// Constant information used during code emission. This should be
185 /// immutable across function compilations within the same module.
186 emit_info: I::Info,
187
188 /// Constants.
189 pub(crate) constants: VCodeConstants,
190
191 /// Value labels for debuginfo attached to vregs.
192 debug_value_labels: Vec<(VReg, InsnIndex, InsnIndex, u32)>,
193
194 pub(crate) sigs: SigSet,
195
196 /// Facts on VRegs, for proof-carrying code verification.
197 facts: Vec<Option<Fact>>,
198}
199
200/// The result of `VCode::emit`. Contains all information computed
201/// during emission: actual machine code, optionally a disassembly,
202/// and optionally metadata about the code layout.
203pub struct EmitResult {
204 /// The MachBuffer containing the machine code.
205 pub buffer: MachBufferFinalized<Stencil>,
206
207 /// Offset of each basic block, recorded during emission. Computed
208 /// only if `debug_value_labels` is non-empty.
209 pub bb_offsets: Vec<CodeOffset>,
210
211 /// Final basic-block edges, in terms of code offsets of
212 /// bb-starts. Computed only if `debug_value_labels` is non-empty.
213 pub bb_edges: Vec<(CodeOffset, CodeOffset)>,
214
215 /// Final length of function body.
216 pub func_body_len: CodeOffset,
217
218 /// The pretty-printed disassembly, if any. This uses the same
219 /// pretty-printing for MachInsts as the pre-regalloc VCode Debug
220 /// implementation, but additionally includes the prologue and
221 /// epilogue(s), and makes use of the regalloc results.
222 pub disasm: Option<String>,
223
224 /// Offsets of sized stackslots.
225 pub sized_stackslot_offsets: PrimaryMap<StackSlot, u32>,
226
227 /// Offsets of dynamic stackslots.
228 pub dynamic_stackslot_offsets: PrimaryMap<DynamicStackSlot, u32>,
229
230 /// Value-labels information (debug metadata).
231 pub value_labels_ranges: ValueLabelsRanges,
232
233 /// Stack frame size.
234 pub frame_size: u32,
235}
236
237/// A builder for a VCode function body.
238///
239/// This builder has the ability to accept instructions in either
240/// forward or reverse order, depending on the pass direction that
241/// produces the VCode. The lowering from CLIF to VCode<MachInst>
242/// ordinarily occurs in reverse order (in order to allow instructions
243/// to be lowered only if used, and not merged) so a reversal will
244/// occur at the end of lowering to ensure the VCode is in machine
245/// order.
246///
247/// If built in reverse, block and instruction indices used once the
248/// VCode is built are relative to the final (reversed) order, not the
249/// order of construction. Note that this means we do not know the
250/// final block or instruction indices when building, so we do not
251/// hand them out. (The user is assumed to know them when appending
252/// terminator instructions with successor blocks.)
253pub struct VCodeBuilder<I: VCodeInst> {
254 /// In-progress VCode.
255 pub(crate) vcode: VCode<I>,
256
257 /// In what direction is the build occurring?
258 direction: VCodeBuildDirection,
259
260 /// Debug-value label in-progress map, keyed by label. For each
261 /// label, we keep disjoint ranges mapping to vregs. We'll flatten
262 /// this into (vreg, range, label) tuples when done.
263 debug_info: FxHashMap<ValueLabel, Vec<(InsnIndex, InsnIndex, VReg)>>,
264}
265
266/// Direction in which a VCodeBuilder builds VCode.
267#[derive(Clone, Copy, Debug, PartialEq, Eq)]
268pub enum VCodeBuildDirection {
269 // TODO: add `Forward` once we need it and can test it adequately.
270 /// Backward-build pass: we expect the producer to call `emit()`
271 /// with instructions in reverse program order within each block.
272 Backward,
273}
274
275impl<I: VCodeInst> VCodeBuilder<I> {
276 /// Create a new VCodeBuilder.
277 pub fn new(
278 sigs: SigSet,
279 abi: Callee<I::ABIMachineSpec>,
280 emit_info: I::Info,
281 block_order: BlockLoweringOrder,
282 constants: VCodeConstants,
283 direction: VCodeBuildDirection,
284 ) -> Self {
285 let vcode = VCode::new(sigs, abi, emit_info, block_order, constants);
286
287 VCodeBuilder {
288 vcode,
289 direction,
290 debug_info: FxHashMap::default(),
291 }
292 }
293
294 pub fn init_retval_area(&mut self, vregs: &mut VRegAllocator<I>) -> CodegenResult<()> {
295 self.vcode.abi.init_retval_area(&self.vcode.sigs, vregs)
296 }
297
298 /// Access the ABI object.
299 pub fn abi(&self) -> &Callee<I::ABIMachineSpec> {
300 &self.vcode.abi
301 }
302
303 /// Access the ABI object.
304 pub fn abi_mut(&mut self) -> &mut Callee<I::ABIMachineSpec> {
305 &mut self.vcode.abi
306 }
307
308 pub fn sigs(&self) -> &SigSet {
309 &self.vcode.sigs
310 }
311
312 pub fn sigs_mut(&mut self) -> &mut SigSet {
313 &mut self.vcode.sigs
314 }
315
316 /// Access to the BlockLoweringOrder object.
317 pub fn block_order(&self) -> &BlockLoweringOrder {
318 &self.vcode.block_order
319 }
320
321 /// Set the current block as the entry block.
322 pub fn set_entry(&mut self, block: BlockIndex) {
323 self.vcode.entry = block;
324 }
325
326 /// End the current basic block. Must be called after emitting vcode insts
327 /// for IR insts and prior to ending the function (building the VCode).
328 pub fn end_bb(&mut self) {
329 let end_idx = self.vcode.insts.len();
330 // Add the instruction index range to the list of blocks.
331 self.vcode.block_ranges.push_end(end_idx);
332 // End the successors list.
333 let succ_end = self.vcode.block_succs.len();
334 self.vcode.block_succ_range.push_end(succ_end);
335 // End the blockparams list.
336 let block_params_end = self.vcode.block_params.len();
337 self.vcode.block_params_range.push_end(block_params_end);
338 // End the branch blockparam args list.
339 let branch_block_arg_succ_end = self.vcode.branch_block_arg_range.len();
340 self.vcode
341 .branch_block_arg_succ_range
342 .push_end(branch_block_arg_succ_end);
343 }
344
345 pub fn add_block_param(&mut self, param: VirtualReg) {
346 self.vcode.block_params.push(param.into());
347 }
348
349 fn add_branch_args_for_succ(&mut self, args: &[Reg]) {
350 self.vcode
351 .branch_block_args
352 .extend(args.iter().map(|&arg| VReg::from(arg)));
353 let end = self.vcode.branch_block_args.len();
354 self.vcode.branch_block_arg_range.push_end(end);
355 }
356
357 /// Push an instruction for the current BB and current IR inst
358 /// within the BB.
359 pub fn push(&mut self, insn: I, loc: RelSourceLoc) {
360 self.vcode.insts.push(insn);
361 self.vcode.srclocs.push(loc);
362 }
363
364 /// Add a successor block with branch args.
365 pub fn add_succ(&mut self, block: BlockIndex, args: &[Reg]) {
366 self.vcode.block_succs.push(block);
367 self.add_branch_args_for_succ(args);
368 }
369
370 /// Add a debug value label to a register.
371 pub fn add_value_label(&mut self, reg: Reg, label: ValueLabel) {
372 // We'll fix up labels in reverse(). Because we're generating
373 // code bottom-to-top, the liverange of the label goes *from*
374 // the last index at which was defined (or 0, which is the end
375 // of the eventual function) *to* just this instruction, and
376 // no further.
377 let inst = InsnIndex::new(self.vcode.insts.len());
378 let labels = self.debug_info.entry(label).or_insert_with(|| vec![]);
379 let last = labels
380 .last()
381 .map(|(_start, end, _vreg)| *end)
382 .unwrap_or(InsnIndex::new(0));
383 labels.push((last, inst, reg.into()));
384 }
385
386 /// Access the constants.
387 pub fn constants(&mut self) -> &mut VCodeConstants {
388 &mut self.vcode.constants
389 }
390
391 fn compute_preds_from_succs(&mut self) {
392 // Do a linear-time counting sort: first determine how many
393 // times each block appears as a successor.
394 let mut starts = vec![0u32; self.vcode.num_blocks()];
395 for succ in &self.vcode.block_succs {
396 starts[succ.index()] += 1;
397 }
398
399 // Determine for each block the starting index where that
400 // block's predecessors should go. This is equivalent to the
401 // ranges we need to store in block_pred_range.
402 self.vcode.block_pred_range.reserve(starts.len());
403 let mut end = 0;
404 for count in starts.iter_mut() {
405 let start = end;
406 end += *count;
407 *count = start;
408 self.vcode.block_pred_range.push_end(end as usize);
409 }
410 let end = end as usize;
411 debug_assert_eq!(end, self.vcode.block_succs.len());
412
413 // Walk over the successors again, this time grouped by
414 // predecessor, and push the predecessor at the current
415 // starting position of each of its successors. We build
416 // each group of predecessors in whatever order Ranges::iter
417 // returns them; regalloc2 doesn't care.
418 self.vcode.block_preds.resize(end, BlockIndex::invalid());
419 for (pred, range) in self.vcode.block_succ_range.iter() {
420 let pred = BlockIndex::new(pred);
421 for succ in &self.vcode.block_succs[range] {
422 let pos = &mut starts[succ.index()];
423 self.vcode.block_preds[*pos as usize] = pred;
424 *pos += 1;
425 }
426 }
427 debug_assert!(self.vcode.block_preds.iter().all(|pred| pred.is_valid()));
428 }
429
430 /// Called once, when a build in Backward order is complete, to
431 /// perform the overall reversal (into final forward order) and
432 /// finalize metadata accordingly.
433 fn reverse_and_finalize(&mut self, vregs: &VRegAllocator<I>) {
434 let n_insts = self.vcode.insts.len();
435 if n_insts == 0 {
436 return;
437 }
438
439 // Reverse the per-block and per-inst sequences.
440 self.vcode.block_ranges.reverse_index();
441 self.vcode.block_ranges.reverse_target(n_insts);
442 // block_params_range is indexed by block (and blocks were
443 // traversed in reverse) so we reverse it; but block-param
444 // sequences in the concatenated vec can remain in reverse
445 // order (it is effectively an arena of arbitrarily-placed
446 // referenced sequences).
447 self.vcode.block_params_range.reverse_index();
448 // Likewise, we reverse block_succ_range, but the block_succ
449 // concatenated array can remain as-is.
450 self.vcode.block_succ_range.reverse_index();
451 self.vcode.insts.reverse();
452 self.vcode.srclocs.reverse();
453 // Likewise, branch_block_arg_succ_range is indexed by block
454 // so must be reversed.
455 self.vcode.branch_block_arg_succ_range.reverse_index();
456
457 // To translate an instruction index *endpoint* in reversed
458 // order to forward order, compute `n_insts - i`.
459 //
460 // Why not `n_insts - 1 - i`? That would be correct to
461 // translate an individual instruction index (for ten insts 0
462 // to 9 inclusive, inst 0 becomes 9, and inst 9 becomes
463 // 0). But for the usual inclusive-start, exclusive-end range
464 // idiom, inclusive starts become exclusive ends and
465 // vice-versa, so e.g. an (inclusive) start of 0 becomes an
466 // (exclusive) end of 10.
467 let translate = |inst: InsnIndex| InsnIndex::new(n_insts - inst.index());
468
469 // Generate debug-value labels based on per-label maps.
470 for (label, tuples) in &self.debug_info {
471 for &(start, end, vreg) in tuples {
472 let vreg = vregs.resolve_vreg_alias(vreg);
473 let fwd_start = translate(end);
474 let fwd_end = translate(start);
475 self.vcode
476 .debug_value_labels
477 .push((vreg, fwd_start, fwd_end, label.as_u32()));
478 }
479 }
480
481 // Now sort debug value labels by VReg, as required
482 // by regalloc2.
483 self.vcode
484 .debug_value_labels
485 .sort_unstable_by_key(|(vreg, _, _, _)| *vreg);
486 }
487
488 fn collect_operands(&mut self, vregs: &VRegAllocator<I>) {
489 let allocatable = PRegSet::from(self.vcode.machine_env());
490 for (i, insn) in self.vcode.insts.iter_mut().enumerate() {
491 // Push operands from the instruction onto the operand list.
492 //
493 // We rename through the vreg alias table as we collect
494 // the operands. This is better than a separate post-pass
495 // over operands, because it has more cache locality:
496 // operands only need to pass through L1 once. This is
497 // also better than renaming instructions'
498 // operands/registers while lowering, because here we only
499 // need to do the `match` over the instruction to visit
500 // its register fields (which is slow, branchy code) once.
501
502 let mut op_collector =
503 OperandCollector::new(&mut self.vcode.operands, allocatable, |vreg| {
504 vregs.resolve_vreg_alias(vreg)
505 });
506 insn.get_operands(&mut op_collector);
507 let (ops, clobbers) = op_collector.finish();
508 self.vcode.operand_ranges.push_end(ops);
509
510 if clobbers != PRegSet::default() {
511 self.vcode.clobbers.insert(InsnIndex::new(i), clobbers);
512 }
513
514 if let Some((dst, src)) = insn.is_move() {
515 // We should never see non-virtual registers present in move
516 // instructions.
517 assert!(
518 src.is_virtual(),
519 "the real register {src:?} was used as the source of a move instruction"
520 );
521 assert!(
522 dst.to_reg().is_virtual(),
523 "the real register {:?} was used as the destination of a move instruction",
524 dst.to_reg()
525 );
526 }
527 }
528
529 // Translate blockparam args via the vreg aliases table as well.
530 for arg in &mut self.vcode.branch_block_args {
531 let new_arg = vregs.resolve_vreg_alias(*arg);
532 trace!("operandcollector: block arg {:?} -> {:?}", arg, new_arg);
533 *arg = new_arg;
534 }
535 }
536
537 /// Build the final VCode.
538 pub fn build(mut self, mut vregs: VRegAllocator<I>) -> VCode<I> {
539 self.vcode.vreg_types = take(&mut vregs.vreg_types);
540 self.vcode.facts = take(&mut vregs.facts);
541
542 if self.direction == VCodeBuildDirection::Backward {
543 self.reverse_and_finalize(&vregs);
544 }
545 self.collect_operands(&vregs);
546
547 self.compute_preds_from_succs();
548 self.vcode.debug_value_labels.sort_unstable();
549
550 // At this point, nothing in the vcode should mention any
551 // VReg which has been aliased. All the appropriate rewriting
552 // should have happened above. Just to be sure, let's
553 // double-check each field which has vregs.
554 // Note: can't easily check vcode.insts, resolved in collect_operands.
555 // Operands are resolved in collect_operands.
556 vregs.debug_assert_no_vreg_aliases(self.vcode.operands.iter().map(|op| op.vreg()));
557 // Currently block params are never aliased to another vreg.
558 vregs.debug_assert_no_vreg_aliases(self.vcode.block_params.iter().copied());
559 // Branch block args are resolved in collect_operands.
560 vregs.debug_assert_no_vreg_aliases(self.vcode.branch_block_args.iter().copied());
561 // Debug value labels are resolved in reverse_and_finalize.
562 vregs.debug_assert_no_vreg_aliases(
563 self.vcode.debug_value_labels.iter().map(|&(vreg, ..)| vreg),
564 );
565 // Facts are resolved eagerly during set_vreg_alias.
566 vregs.debug_assert_no_vreg_aliases(
567 self.vcode
568 .facts
569 .iter()
570 .zip(&vregs.vreg_types)
571 .enumerate()
572 .filter(|(_, (fact, _))| fact.is_some())
573 .map(|(vreg, (_, &ty))| {
574 let (regclasses, _) = I::rc_for_type(ty).unwrap();
575 VReg::new(vreg, regclasses[0])
576 }),
577 );
578
579 self.vcode
580 }
581
582 /// Add a user stack map for the associated instruction.
583 pub fn add_user_stack_map(
584 &mut self,
585 inst: BackwardsInsnIndex,
586 entries: &[ir::UserStackMapEntry],
587 ) {
588 let stack_map = ir::UserStackMap::new(entries, self.vcode.abi.sized_stackslot_offsets());
589 let old_entry = self.vcode.user_stack_maps.insert(inst, stack_map);
590 debug_assert!(old_entry.is_none());
591 }
592}
593
594const NO_INST_OFFSET: CodeOffset = u32::MAX;
595
596impl<I: VCodeInst> VCode<I> {
597 /// New empty VCode.
598 fn new(
599 sigs: SigSet,
600 abi: Callee<I::ABIMachineSpec>,
601 emit_info: I::Info,
602 block_order: BlockLoweringOrder,
603 constants: VCodeConstants,
604 ) -> Self {
605 let n_blocks = block_order.lowered_order().len();
606 VCode {
607 sigs,
608 vreg_types: vec![],
609 insts: Vec::with_capacity(10 * n_blocks),
610 user_stack_maps: FxHashMap::default(),
611 operands: Vec::with_capacity(30 * n_blocks),
612 operand_ranges: Ranges::with_capacity(10 * n_blocks),
613 clobbers: FxHashMap::default(),
614 srclocs: Vec::with_capacity(10 * n_blocks),
615 entry: BlockIndex::new(0),
616 block_ranges: Ranges::with_capacity(n_blocks),
617 block_succ_range: Ranges::with_capacity(n_blocks),
618 block_succs: Vec::with_capacity(n_blocks),
619 block_pred_range: Ranges::default(),
620 block_preds: Vec::new(),
621 block_params_range: Ranges::with_capacity(n_blocks),
622 block_params: Vec::with_capacity(5 * n_blocks),
623 branch_block_args: Vec::with_capacity(10 * n_blocks),
624 branch_block_arg_range: Ranges::with_capacity(2 * n_blocks),
625 branch_block_arg_succ_range: Ranges::with_capacity(n_blocks),
626 block_order,
627 abi,
628 emit_info,
629 constants,
630 debug_value_labels: vec![],
631 facts: vec![],
632 }
633 }
634
635 /// Get the ABI-dependent MachineEnv for managing register allocation.
636 pub fn machine_env(&self) -> &MachineEnv {
637 self.abi.machine_env(&self.sigs)
638 }
639
640 /// Get the number of blocks. Block indices will be in the range `0 ..
641 /// (self.num_blocks() - 1)`.
642 pub fn num_blocks(&self) -> usize {
643 self.block_ranges.len()
644 }
645
646 /// The number of lowered instructions.
647 pub fn num_insts(&self) -> usize {
648 self.insts.len()
649 }
650
651 fn compute_clobbers(&self, regalloc: ®alloc2::Output) -> Vec<Writable<RealReg>> {
652 let mut clobbered = PRegSet::default();
653
654 // All moves are included in clobbers.
655 for (_, Edit::Move { to, .. }) in ®alloc.edits {
656 if let Some(preg) = to.as_reg() {
657 clobbered.add(preg);
658 }
659 }
660
661 for (i, range) in self.operand_ranges.iter() {
662 // Skip this instruction if not "included in clobbers" as
663 // per the MachInst. (Some backends use this to implement
664 // ABI specifics; e.g., excluding calls of the same ABI as
665 // the current function from clobbers, because by
666 // definition everything clobbered by the call can be
667 // clobbered by this function without saving as well.)
668 if !self.insts[i].is_included_in_clobbers() {
669 continue;
670 }
671
672 let operands = &self.operands[range.clone()];
673 let allocs = ®alloc.allocs[range];
674 for (operand, alloc) in operands.iter().zip(allocs.iter()) {
675 if operand.kind() == OperandKind::Def {
676 if let Some(preg) = alloc.as_reg() {
677 clobbered.add(preg);
678 }
679 }
680 }
681
682 // Also add explicitly-clobbered registers.
683 if let Some(&inst_clobbered) = self.clobbers.get(&InsnIndex::new(i)) {
684 clobbered.union_from(inst_clobbered);
685 }
686 }
687
688 clobbered
689 .into_iter()
690 .map(|preg| Writable::from_reg(RealReg::from(preg)))
691 .collect()
692 }
693
694 /// Emit the instructions to a `MachBuffer`, containing fixed-up
695 /// code and external reloc/trap/etc. records ready for use. Takes
696 /// the regalloc results as well.
697 ///
698 /// Returns the machine code itself, and optionally metadata
699 /// and/or a disassembly, as an `EmitResult`. The `VCode` itself
700 /// is consumed by the emission process.
701 pub fn emit(
702 mut self,
703 regalloc: ®alloc2::Output,
704 want_disasm: bool,
705 flags: &settings::Flags,
706 ctrl_plane: &mut ControlPlane,
707 ) -> EmitResult
708 where
709 I: VCodeInst,
710 {
711 // To write into disasm string.
712 use core::fmt::Write;
713
714 let _tt = timing::vcode_emit();
715 let mut buffer = MachBuffer::new();
716 let mut bb_starts: Vec<Option<CodeOffset>> = vec![];
717
718 // The first M MachLabels are reserved for block indices.
719 buffer.reserve_labels_for_blocks(self.num_blocks());
720
721 // Register all allocated constants with the `MachBuffer` to ensure that
722 // any references to the constants during instructions can be handled
723 // correctly.
724 buffer.register_constants(&self.constants);
725
726 // Construct the final order we emit code in: cold blocks at the end.
727 let mut final_order: SmallVec<[BlockIndex; 16]> = smallvec![];
728 let mut cold_blocks: SmallVec<[BlockIndex; 16]> = smallvec![];
729 for block in 0..self.num_blocks() {
730 let block = BlockIndex::new(block);
731 if self.block_order.is_cold(block) {
732 cold_blocks.push(block);
733 } else {
734 final_order.push(block);
735 }
736 }
737 final_order.extend(cold_blocks.clone());
738
739 // Compute/save info we need for the prologue: clobbers and
740 // number of spillslots.
741 //
742 // We clone `abi` here because we will mutate it as we
743 // generate the prologue and set other info, but we can't
744 // mutate `VCode`. The info it usually carries prior to
745 // setting clobbers is fairly minimal so this should be
746 // relatively cheap.
747 let clobbers = self.compute_clobbers(regalloc);
748 self.abi
749 .compute_frame_layout(&self.sigs, regalloc.num_spillslots, clobbers);
750
751 // Emit blocks.
752 let mut cur_srcloc = None;
753 let mut last_offset = None;
754 let mut inst_offsets = vec![];
755 let mut state = I::State::new(&self.abi, std::mem::take(ctrl_plane));
756
757 let mut disasm = String::new();
758
759 if !self.debug_value_labels.is_empty() {
760 inst_offsets.resize(self.insts.len(), NO_INST_OFFSET);
761 }
762
763 // Count edits per block ahead of time; this is needed for
764 // lookahead island emission. (We could derive it per-block
765 // with binary search in the edit list, but it's more
766 // efficient to do it in one pass here.)
767 let mut ra_edits_per_block: SmallVec<[u32; 64]> = smallvec![];
768 let mut edit_idx = 0;
769 for block in 0..self.num_blocks() {
770 let end_inst = InsnIndex::new(self.block_ranges.get(block).end);
771 let start_edit_idx = edit_idx;
772 while edit_idx < regalloc.edits.len() && regalloc.edits[edit_idx].0.inst() < end_inst {
773 edit_idx += 1;
774 }
775 let end_edit_idx = edit_idx;
776 ra_edits_per_block.push((end_edit_idx - start_edit_idx) as u32);
777 }
778
779 let is_forward_edge_cfi_enabled = self.abi.is_forward_edge_cfi_enabled();
780 let mut bb_padding = match flags.bb_padding_log2_minus_one() {
781 0 => Vec::new(),
782 n => vec![0; 1 << (n - 1)],
783 };
784 let mut total_bb_padding = 0;
785
786 for (block_order_idx, &block) in final_order.iter().enumerate() {
787 trace!("emitting block {:?}", block);
788
789 // Call the new block hook for state
790 state.on_new_block();
791
792 // Emit NOPs to align the block.
793 let new_offset = I::align_basic_block(buffer.cur_offset());
794 while new_offset > buffer.cur_offset() {
795 // Pad with NOPs up to the aligned block offset.
796 let nop = I::gen_nop((new_offset - buffer.cur_offset()) as usize);
797 nop.emit(&mut buffer, &self.emit_info, &mut Default::default());
798 }
799 assert_eq!(buffer.cur_offset(), new_offset);
800
801 let do_emit = |inst: &I,
802 disasm: &mut String,
803 buffer: &mut MachBuffer<I>,
804 state: &mut I::State| {
805 if want_disasm && !inst.is_args() {
806 let mut s = state.clone();
807 writeln!(disasm, " {}", inst.pretty_print_inst(&mut s)).unwrap();
808 }
809 inst.emit(buffer, &self.emit_info, state);
810 };
811
812 // Is this the first block? Emit the prologue directly if so.
813 if block == self.entry {
814 trace!(" -> entry block");
815 buffer.start_srcloc(Default::default());
816 for inst in &self.abi.gen_prologue() {
817 do_emit(&inst, &mut disasm, &mut buffer, &mut state);
818 }
819 buffer.end_srcloc();
820 }
821
822 // Now emit the regular block body.
823
824 buffer.bind_label(MachLabel::from_block(block), state.ctrl_plane_mut());
825
826 if want_disasm {
827 writeln!(&mut disasm, "block{}:", block.index()).unwrap();
828 }
829
830 if flags.machine_code_cfg_info() {
831 // Track BB starts. If we have backed up due to MachBuffer
832 // branch opts, note that the removed blocks were removed.
833 let cur_offset = buffer.cur_offset();
834 if last_offset.is_some() && cur_offset <= last_offset.unwrap() {
835 for i in (0..bb_starts.len()).rev() {
836 if bb_starts[i].is_some() && cur_offset > bb_starts[i].unwrap() {
837 break;
838 }
839 bb_starts[i] = None;
840 }
841 }
842 bb_starts.push(Some(cur_offset));
843 last_offset = Some(cur_offset);
844 }
845
846 if let Some(block_start) = I::gen_block_start(
847 self.block_order.is_indirect_branch_target(block),
848 is_forward_edge_cfi_enabled,
849 ) {
850 do_emit(&block_start, &mut disasm, &mut buffer, &mut state);
851 }
852
853 for inst_or_edit in regalloc.block_insts_and_edits(&self, block) {
854 match inst_or_edit {
855 InstOrEdit::Inst(iix) => {
856 if !self.debug_value_labels.is_empty() {
857 // If we need to produce debug info,
858 // record the offset of each instruction
859 // so that we can translate value-label
860 // ranges to machine-code offsets.
861
862 // Cold blocks violate monotonicity
863 // assumptions elsewhere (that
864 // instructions in inst-index order are in
865 // order in machine code), so we omit
866 // their offsets here. Value-label range
867 // generation below will skip empty ranges
868 // and ranges with to-offsets of zero.
869 if !self.block_order.is_cold(block) {
870 inst_offsets[iix.index()] = buffer.cur_offset();
871 }
872 }
873
874 // Update the srcloc at this point in the buffer.
875 let srcloc = self.srclocs[iix.index()];
876 if cur_srcloc != Some(srcloc) {
877 if cur_srcloc.is_some() {
878 buffer.end_srcloc();
879 }
880 buffer.start_srcloc(srcloc);
881 cur_srcloc = Some(srcloc);
882 }
883
884 // If this is a safepoint, compute a stack map
885 // and pass it to the emit state.
886 let stack_map_disasm = if self.insts[iix.index()].is_safepoint() {
887 let (user_stack_map, user_stack_map_disasm) = {
888 // The `user_stack_maps` is keyed by reverse
889 // instruction index, so we must flip the
890 // index. We can't put this into a helper method
891 // due to borrowck issues because parts of
892 // `self` are borrowed mutably elsewhere in this
893 // function.
894 let index = iix.to_backwards_insn_index(self.num_insts());
895 let user_stack_map = self.user_stack_maps.remove(&index);
896 let user_stack_map_disasm =
897 user_stack_map.as_ref().map(|m| format!(" ; {m:?}"));
898 (user_stack_map, user_stack_map_disasm)
899 };
900
901 state.pre_safepoint(user_stack_map);
902
903 user_stack_map_disasm
904 } else {
905 None
906 };
907
908 // If the instruction we are about to emit is
909 // a return, place an epilogue at this point
910 // (and don't emit the return; the actual
911 // epilogue will contain it).
912 if self.insts[iix.index()].is_term() == MachTerminator::Ret {
913 for inst in self.abi.gen_epilogue() {
914 do_emit(&inst, &mut disasm, &mut buffer, &mut state);
915 }
916 } else {
917 // Update the operands for this inst using the
918 // allocations from the regalloc result.
919 let mut allocs = regalloc.inst_allocs(iix).iter();
920 self.insts[iix.index()].get_operands(
921 &mut |reg: &mut Reg, constraint, _kind, _pos| {
922 let alloc = allocs
923 .next()
924 .expect("enough allocations for all operands")
925 .as_reg()
926 .expect("only register allocations, not stack allocations")
927 .into();
928
929 if let OperandConstraint::FixedReg(rreg) = constraint {
930 debug_assert_eq!(Reg::from(rreg), alloc);
931 }
932 *reg = alloc;
933 },
934 );
935 debug_assert!(allocs.next().is_none());
936
937 // Emit the instruction!
938 do_emit(
939 &self.insts[iix.index()],
940 &mut disasm,
941 &mut buffer,
942 &mut state,
943 );
944 if let Some(stack_map_disasm) = stack_map_disasm {
945 disasm.push_str(&stack_map_disasm);
946 disasm.push('\n');
947 }
948 }
949 }
950
951 InstOrEdit::Edit(Edit::Move { from, to }) => {
952 // Create a move/spill/reload instruction and
953 // immediately emit it.
954 match (from.as_reg(), to.as_reg()) {
955 (Some(from), Some(to)) => {
956 // Reg-to-reg move.
957 let from_rreg = Reg::from(from);
958 let to_rreg = Writable::from_reg(Reg::from(to));
959 debug_assert_eq!(from.class(), to.class());
960 let ty = I::canonical_type_for_rc(from.class());
961 let mv = I::gen_move(to_rreg, from_rreg, ty);
962 do_emit(&mv, &mut disasm, &mut buffer, &mut state);
963 }
964 (Some(from), None) => {
965 // Spill from register to spillslot.
966 let to = to.as_stack().unwrap();
967 let from_rreg = RealReg::from(from);
968 let spill = self.abi.gen_spill(to, from_rreg);
969 do_emit(&spill, &mut disasm, &mut buffer, &mut state);
970 }
971 (None, Some(to)) => {
972 // Load from spillslot to register.
973 let from = from.as_stack().unwrap();
974 let to_rreg = Writable::from_reg(RealReg::from(to));
975 let reload = self.abi.gen_reload(to_rreg, from);
976 do_emit(&reload, &mut disasm, &mut buffer, &mut state);
977 }
978 (None, None) => {
979 panic!("regalloc2 should have eliminated stack-to-stack moves!");
980 }
981 }
982 }
983 }
984 }
985
986 if cur_srcloc.is_some() {
987 buffer.end_srcloc();
988 cur_srcloc = None;
989 }
990
991 // Do we need an island? Get the worst-case size of the next BB, add
992 // it to the optional padding behind the block, and pass this to the
993 // `MachBuffer` to determine if an island is necessary.
994 let worst_case_next_bb = if block_order_idx < final_order.len() - 1 {
995 let next_block = final_order[block_order_idx + 1];
996 let next_block_range = self.block_ranges.get(next_block.index());
997 let next_block_size = next_block_range.len() as u32;
998 let next_block_ra_insertions = ra_edits_per_block[next_block.index()];
999 I::worst_case_size() * (next_block_size + next_block_ra_insertions)
1000 } else {
1001 0
1002 };
1003 let padding = if bb_padding.is_empty() {
1004 0
1005 } else {
1006 bb_padding.len() as u32 + I::LabelUse::ALIGN - 1
1007 };
1008 if buffer.island_needed(padding + worst_case_next_bb) {
1009 buffer.emit_island(padding + worst_case_next_bb, ctrl_plane);
1010 }
1011
1012 // Insert padding, if configured, to stress the `MachBuffer`'s
1013 // relocation and island calculations.
1014 //
1015 // Padding can get quite large during fuzzing though so place a
1016 // total cap on it where when a per-function threshold is exceeded
1017 // the padding is turned back down to zero. This avoids a small-ish
1018 // test case generating a GB+ memory footprint in Cranelift for
1019 // example.
1020 if !bb_padding.is_empty() {
1021 buffer.put_data(&bb_padding);
1022 buffer.align_to(I::LabelUse::ALIGN);
1023 total_bb_padding += bb_padding.len();
1024 if total_bb_padding > (150 << 20) {
1025 bb_padding = Vec::new();
1026 }
1027 }
1028 }
1029
1030 debug_assert!(
1031 self.user_stack_maps.is_empty(),
1032 "any stack maps should have been consumed by instruction emission, still have: {:#?}",
1033 self.user_stack_maps,
1034 );
1035
1036 // Do any optimizations on branches at tail of buffer, as if we had
1037 // bound one last label.
1038 buffer.optimize_branches(ctrl_plane);
1039
1040 // emission state is not needed anymore, move control plane back out
1041 *ctrl_plane = state.take_ctrl_plane();
1042
1043 let func_body_len = buffer.cur_offset();
1044
1045 // Create `bb_edges` and final (filtered) `bb_starts`.
1046 let mut bb_edges = vec![];
1047 let mut bb_offsets = vec![];
1048 if flags.machine_code_cfg_info() {
1049 for block in 0..self.num_blocks() {
1050 if bb_starts[block].is_none() {
1051 // Block was deleted by MachBuffer; skip.
1052 continue;
1053 }
1054 let from = bb_starts[block].unwrap();
1055
1056 bb_offsets.push(from);
1057 // Resolve each `succ` label and add edges.
1058 let succs = self.block_succs(BlockIndex::new(block));
1059 for &succ in succs.iter() {
1060 let to = buffer.resolve_label_offset(MachLabel::from_block(succ));
1061 bb_edges.push((from, to));
1062 }
1063 }
1064 }
1065
1066 self.monotonize_inst_offsets(&mut inst_offsets[..], func_body_len);
1067 let value_labels_ranges =
1068 self.compute_value_labels_ranges(regalloc, &inst_offsets[..], func_body_len);
1069 let frame_size = self.abi.frame_size();
1070
1071 EmitResult {
1072 buffer: buffer.finish(&self.constants, ctrl_plane),
1073 bb_offsets,
1074 bb_edges,
1075 func_body_len,
1076 disasm: if want_disasm { Some(disasm) } else { None },
1077 sized_stackslot_offsets: self.abi.sized_stackslot_offsets().clone(),
1078 dynamic_stackslot_offsets: self.abi.dynamic_stackslot_offsets().clone(),
1079 value_labels_ranges,
1080 frame_size,
1081 }
1082 }
1083
1084 fn monotonize_inst_offsets(&self, inst_offsets: &mut [CodeOffset], func_body_len: u32) {
1085 if self.debug_value_labels.is_empty() {
1086 return;
1087 }
1088
1089 // During emission, branch removal can make offsets of instructions incorrect.
1090 // Consider the following sequence: [insi][jmp0][jmp1][jmp2][insj]
1091 // It will be recorded as (say): [30] [34] [38] [42] [<would be 46>]
1092 // When the jumps get removed we are left with (in "inst_offsets"):
1093 // [insi][jmp0][jmp1][jmp2][insj][...]
1094 // [30] [34] [38] [42] [34]
1095 // Which violates the monotonicity invariant. This method sets offsets of these
1096 // removed instructions such as to make them appear zero-sized:
1097 // [insi][jmp0][jmp1][jmp2][insj][...]
1098 // [30] [34] [34] [34] [34]
1099 //
1100 let mut next_offset = func_body_len;
1101 for inst_index in (0..(inst_offsets.len() - 1)).rev() {
1102 let inst_offset = inst_offsets[inst_index];
1103
1104 // Not all instructions get their offsets recorded.
1105 if inst_offset == NO_INST_OFFSET {
1106 continue;
1107 }
1108
1109 if inst_offset > next_offset {
1110 trace!(
1111 "Fixing code offset of the removed Inst {}: {} -> {}",
1112 inst_index,
1113 inst_offset,
1114 next_offset
1115 );
1116 inst_offsets[inst_index] = next_offset;
1117 continue;
1118 }
1119
1120 next_offset = inst_offset;
1121 }
1122 }
1123
1124 fn compute_value_labels_ranges(
1125 &self,
1126 regalloc: ®alloc2::Output,
1127 inst_offsets: &[CodeOffset],
1128 func_body_len: u32,
1129 ) -> ValueLabelsRanges {
1130 if self.debug_value_labels.is_empty() {
1131 return ValueLabelsRanges::default();
1132 }
1133
1134 let mut value_labels_ranges: ValueLabelsRanges = HashMap::new();
1135 for &(label, from, to, alloc) in ®alloc.debug_locations {
1136 let ranges = value_labels_ranges
1137 .entry(ValueLabel::from_u32(label))
1138 .or_insert_with(|| vec![]);
1139 let from_offset = inst_offsets[from.inst().index()];
1140 let to_offset = if to.inst().index() == inst_offsets.len() {
1141 func_body_len
1142 } else {
1143 inst_offsets[to.inst().index()]
1144 };
1145
1146 // Empty ranges or unavailable offsets can happen
1147 // due to cold blocks and branch removal (see above).
1148 if from_offset == NO_INST_OFFSET
1149 || to_offset == NO_INST_OFFSET
1150 || from_offset == to_offset
1151 {
1152 continue;
1153 }
1154
1155 let loc = if let Some(preg) = alloc.as_reg() {
1156 LabelValueLoc::Reg(Reg::from(preg))
1157 } else {
1158 let slot = alloc.as_stack().unwrap();
1159 let slot_offset = self.abi.get_spillslot_offset(slot);
1160 let slot_base_to_caller_sp_offset = self.abi.slot_base_to_caller_sp_offset();
1161 let caller_sp_to_cfa_offset =
1162 crate::isa::unwind::systemv::caller_sp_to_cfa_offset();
1163 // NOTE: this is a negative offset because it's relative to the caller's SP
1164 let cfa_to_sp_offset =
1165 -((slot_base_to_caller_sp_offset + caller_sp_to_cfa_offset) as i64);
1166 LabelValueLoc::CFAOffset(cfa_to_sp_offset + slot_offset)
1167 };
1168
1169 // ValueLocRanges are recorded by *instruction-end
1170 // offset*. `from_offset` is the *start* of the
1171 // instruction; that is the same as the end of another
1172 // instruction, so we only want to begin coverage once
1173 // we are past the previous instruction's end.
1174 let start = from_offset + 1;
1175
1176 // Likewise, `end` is exclusive, but we want to
1177 // *include* the end of the last
1178 // instruction. `to_offset` is the start of the
1179 // `to`-instruction, which is the exclusive end, i.e.,
1180 // the first instruction not covered. That
1181 // instruction's start is the same as the end of the
1182 // last instruction that is included, so we go one
1183 // byte further to be sure to include it.
1184 let end = to_offset + 1;
1185
1186 // Coalesce adjacent ranges that for the same location
1187 // to minimize output size here and for the consumers.
1188 if let Some(last_loc_range) = ranges.last_mut() {
1189 if last_loc_range.loc == loc && last_loc_range.end == start {
1190 trace!(
1191 "Extending debug range for VL{} in {:?} to {}",
1192 label,
1193 loc,
1194 end
1195 );
1196 last_loc_range.end = end;
1197 continue;
1198 }
1199 }
1200
1201 trace!(
1202 "Recording debug range for VL{} in {:?}: [Inst {}..Inst {}) [{}..{})",
1203 label,
1204 loc,
1205 from.inst().index(),
1206 to.inst().index(),
1207 start,
1208 end
1209 );
1210
1211 ranges.push(ValueLocRange { loc, start, end });
1212 }
1213
1214 value_labels_ranges
1215 }
1216
1217 /// Get the IR block for a BlockIndex, if one exists.
1218 pub fn bindex_to_bb(&self, block: BlockIndex) -> Option<ir::Block> {
1219 self.block_order.lowered_order()[block.index()].orig_block()
1220 }
1221
1222 /// Get the type of a VReg.
1223 pub fn vreg_type(&self, vreg: VReg) -> Type {
1224 self.vreg_types[vreg.vreg()]
1225 }
1226
1227 /// Get the fact, if any, for a given VReg.
1228 pub fn vreg_fact(&self, vreg: VReg) -> Option<&Fact> {
1229 self.facts[vreg.vreg()].as_ref()
1230 }
1231
1232 /// Set the fact for a given VReg.
1233 pub fn set_vreg_fact(&mut self, vreg: VReg, fact: Fact) {
1234 trace!("set fact on {}: {:?}", vreg, fact);
1235 self.facts[vreg.vreg()] = Some(fact);
1236 }
1237
1238 /// Does a given instruction define any facts?
1239 pub fn inst_defines_facts(&self, inst: InsnIndex) -> bool {
1240 self.inst_operands(inst)
1241 .iter()
1242 .filter(|o| o.kind() == OperandKind::Def)
1243 .map(|o| o.vreg())
1244 .any(|vreg| self.facts[vreg.vreg()].is_some())
1245 }
1246
1247 /// Get the user stack map associated with the given forward instruction index.
1248 pub fn get_user_stack_map(&self, inst: InsnIndex) -> Option<&ir::UserStackMap> {
1249 let index = inst.to_backwards_insn_index(self.num_insts());
1250 self.user_stack_maps.get(&index)
1251 }
1252}
1253
1254impl<I: VCodeInst> std::ops::Index<InsnIndex> for VCode<I> {
1255 type Output = I;
1256 fn index(&self, idx: InsnIndex) -> &Self::Output {
1257 &self.insts[idx.index()]
1258 }
1259}
1260
1261impl<I: VCodeInst> RegallocFunction for VCode<I> {
1262 fn num_insts(&self) -> usize {
1263 self.insts.len()
1264 }
1265
1266 fn num_blocks(&self) -> usize {
1267 self.block_ranges.len()
1268 }
1269
1270 fn entry_block(&self) -> BlockIndex {
1271 self.entry
1272 }
1273
1274 fn block_insns(&self, block: BlockIndex) -> InstRange {
1275 let range = self.block_ranges.get(block.index());
1276 InstRange::new(InsnIndex::new(range.start), InsnIndex::new(range.end))
1277 }
1278
1279 fn block_succs(&self, block: BlockIndex) -> &[BlockIndex] {
1280 let range = self.block_succ_range.get(block.index());
1281 &self.block_succs[range]
1282 }
1283
1284 fn block_preds(&self, block: BlockIndex) -> &[BlockIndex] {
1285 let range = self.block_pred_range.get(block.index());
1286 &self.block_preds[range]
1287 }
1288
1289 fn block_params(&self, block: BlockIndex) -> &[VReg] {
1290 // As a special case we don't return block params for the entry block, as all the arguments
1291 // will be defined by the `Inst::Args` instruction.
1292 if block == self.entry {
1293 return &[];
1294 }
1295
1296 let range = self.block_params_range.get(block.index());
1297 &self.block_params[range]
1298 }
1299
1300 fn branch_blockparams(&self, block: BlockIndex, _insn: InsnIndex, succ_idx: usize) -> &[VReg] {
1301 let succ_range = self.branch_block_arg_succ_range.get(block.index());
1302 debug_assert!(succ_idx < succ_range.len());
1303 let branch_block_args = self.branch_block_arg_range.get(succ_range.start + succ_idx);
1304 &self.branch_block_args[branch_block_args]
1305 }
1306
1307 fn is_ret(&self, insn: InsnIndex) -> bool {
1308 match self.insts[insn.index()].is_term() {
1309 // We treat blocks terminated by an unconditional trap like a return for regalloc.
1310 MachTerminator::None => self.insts[insn.index()].is_trap(),
1311 MachTerminator::Ret | MachTerminator::RetCall => true,
1312 MachTerminator::Uncond | MachTerminator::Cond | MachTerminator::Indirect => false,
1313 }
1314 }
1315
1316 fn is_branch(&self, insn: InsnIndex) -> bool {
1317 match self.insts[insn.index()].is_term() {
1318 MachTerminator::Cond | MachTerminator::Uncond | MachTerminator::Indirect => true,
1319 _ => false,
1320 }
1321 }
1322
1323 fn inst_operands(&self, insn: InsnIndex) -> &[Operand] {
1324 let range = self.operand_ranges.get(insn.index());
1325 &self.operands[range]
1326 }
1327
1328 fn inst_clobbers(&self, insn: InsnIndex) -> PRegSet {
1329 self.clobbers.get(&insn).cloned().unwrap_or_default()
1330 }
1331
1332 fn num_vregs(&self) -> usize {
1333 self.vreg_types.len()
1334 }
1335
1336 fn debug_value_labels(&self) -> &[(VReg, InsnIndex, InsnIndex, u32)] {
1337 &self.debug_value_labels
1338 }
1339
1340 fn spillslot_size(&self, regclass: RegClass) -> usize {
1341 self.abi.get_spillslot_size(regclass) as usize
1342 }
1343
1344 fn allow_multiple_vreg_defs(&self) -> bool {
1345 // At least the s390x backend requires this, because the
1346 // `Loop` pseudo-instruction aggregates all Operands so pinned
1347 // vregs (RealRegs) may occur more than once.
1348 true
1349 }
1350}
1351
1352impl<I: VCodeInst> Debug for VRegAllocator<I> {
1353 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1354 writeln!(f, "VRegAllocator {{")?;
1355
1356 let mut alias_keys = self.vreg_aliases.keys().cloned().collect::<Vec<_>>();
1357 alias_keys.sort_unstable();
1358 for key in alias_keys {
1359 let dest = self.vreg_aliases.get(&key).unwrap();
1360 writeln!(f, " {:?} := {:?}", Reg::from(key), Reg::from(*dest))?;
1361 }
1362
1363 for (vreg, fact) in self.facts.iter().enumerate() {
1364 if let Some(fact) = fact {
1365 writeln!(f, " v{vreg} ! {fact}")?;
1366 }
1367 }
1368
1369 writeln!(f, "}}")
1370 }
1371}
1372
1373impl<I: VCodeInst> fmt::Debug for VCode<I> {
1374 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1375 writeln!(f, "VCode {{")?;
1376 writeln!(f, " Entry block: {}", self.entry.index())?;
1377
1378 let mut state = Default::default();
1379
1380 for block in 0..self.num_blocks() {
1381 let block = BlockIndex::new(block);
1382 writeln!(
1383 f,
1384 "Block {}({:?}):",
1385 block.index(),
1386 self.block_params(block)
1387 )?;
1388 if let Some(bb) = self.bindex_to_bb(block) {
1389 writeln!(f, " (original IR block: {bb})")?;
1390 }
1391 for (succ_idx, succ) in self.block_succs(block).iter().enumerate() {
1392 writeln!(
1393 f,
1394 " (successor: Block {}({:?}))",
1395 succ.index(),
1396 self.branch_blockparams(block, InsnIndex::new(0) /* dummy */, succ_idx)
1397 )?;
1398 }
1399 for inst in self.block_ranges.get(block.index()) {
1400 writeln!(
1401 f,
1402 " Inst {}: {}",
1403 inst,
1404 self.insts[inst].pretty_print_inst(&mut state)
1405 )?;
1406 if !self.operands.is_empty() {
1407 for operand in self.inst_operands(InsnIndex::new(inst)) {
1408 if operand.kind() == OperandKind::Def {
1409 if let Some(fact) = &self.facts[operand.vreg().vreg()] {
1410 writeln!(f, " v{} ! {}", operand.vreg().vreg(), fact)?;
1411 }
1412 }
1413 }
1414 }
1415 if let Some(user_stack_map) = self.get_user_stack_map(InsnIndex::new(inst)) {
1416 writeln!(f, " {user_stack_map:?}")?;
1417 }
1418 }
1419 }
1420
1421 writeln!(f, "}}")?;
1422 Ok(())
1423 }
1424}
1425
1426/// This structure manages VReg allocation during the lifetime of the VCodeBuilder.
1427pub struct VRegAllocator<I> {
1428 /// VReg IR-level types.
1429 vreg_types: Vec<Type>,
1430
1431 /// VReg aliases. When the final VCode is built we rewrite all
1432 /// uses of the keys in this table to their replacement values.
1433 ///
1434 /// We use these aliases to rename an instruction's expected
1435 /// result vregs to the returned vregs from lowering, which are
1436 /// usually freshly-allocated temps.
1437 vreg_aliases: FxHashMap<regalloc2::VReg, regalloc2::VReg>,
1438
1439 /// A deferred error, to be bubbled up to the top level of the
1440 /// lowering algorithm. We take this approach because we cannot
1441 /// currently propagate a `Result` upward through ISLE code (the
1442 /// lowering rules) or some ABI code.
1443 deferred_error: Option<CodegenError>,
1444
1445 /// Facts on VRegs, for proof-carrying code.
1446 facts: Vec<Option<Fact>>,
1447
1448 /// The type of instruction that this allocator makes registers for.
1449 _inst: core::marker::PhantomData<I>,
1450}
1451
1452impl<I: VCodeInst> VRegAllocator<I> {
1453 /// Make a new VRegAllocator.
1454 pub fn with_capacity(capacity: usize) -> Self {
1455 let capacity = first_user_vreg_index() + capacity;
1456 let mut vreg_types = Vec::with_capacity(capacity);
1457 vreg_types.resize(first_user_vreg_index(), types::INVALID);
1458 Self {
1459 vreg_types,
1460 vreg_aliases: FxHashMap::with_capacity_and_hasher(capacity, Default::default()),
1461 deferred_error: None,
1462 facts: Vec::with_capacity(capacity),
1463 _inst: core::marker::PhantomData::default(),
1464 }
1465 }
1466
1467 /// Allocate a fresh ValueRegs.
1468 pub fn alloc(&mut self, ty: Type) -> CodegenResult<ValueRegs<Reg>> {
1469 if self.deferred_error.is_some() {
1470 return Err(CodegenError::CodeTooLarge);
1471 }
1472 let v = self.vreg_types.len();
1473 let (regclasses, tys) = I::rc_for_type(ty)?;
1474 if v + regclasses.len() >= VReg::MAX {
1475 return Err(CodegenError::CodeTooLarge);
1476 }
1477
1478 let regs: ValueRegs<Reg> = match regclasses {
1479 &[rc0] => ValueRegs::one(VReg::new(v, rc0).into()),
1480 &[rc0, rc1] => ValueRegs::two(VReg::new(v, rc0).into(), VReg::new(v + 1, rc1).into()),
1481 // We can extend this if/when we support 32-bit targets; e.g.,
1482 // an i128 on a 32-bit machine will need up to four machine regs
1483 // for a `Value`.
1484 _ => panic!("Value must reside in 1 or 2 registers"),
1485 };
1486 for (®_ty, ®) in tys.iter().zip(regs.regs().iter()) {
1487 let vreg = reg.to_virtual_reg().unwrap();
1488 debug_assert_eq!(self.vreg_types.len(), vreg.index());
1489 self.vreg_types.push(reg_ty);
1490 }
1491
1492 // Create empty facts for each allocated vreg.
1493 self.facts.resize(self.vreg_types.len(), None);
1494
1495 Ok(regs)
1496 }
1497
1498 /// Allocate a fresh ValueRegs, deferring any out-of-vregs
1499 /// errors. This is useful in places where we cannot bubble a
1500 /// `CodegenResult` upward easily, and which are known to be
1501 /// invoked from within the lowering loop that checks the deferred
1502 /// error status below.
1503 pub fn alloc_with_deferred_error(&mut self, ty: Type) -> ValueRegs<Reg> {
1504 match self.alloc(ty) {
1505 Ok(x) => x,
1506 Err(e) => {
1507 self.deferred_error = Some(e);
1508 self.bogus_for_deferred_error(ty)
1509 }
1510 }
1511 }
1512
1513 /// Take any deferred error that was accumulated by `alloc_with_deferred_error`.
1514 pub fn take_deferred_error(&mut self) -> Option<CodegenError> {
1515 self.deferred_error.take()
1516 }
1517
1518 /// Produce an bogus VReg placeholder with the proper number of
1519 /// registers for the given type. This is meant to be used with
1520 /// deferred allocation errors (see `Lower::alloc_tmp()`).
1521 fn bogus_for_deferred_error(&self, ty: Type) -> ValueRegs<Reg> {
1522 let (regclasses, _tys) = I::rc_for_type(ty).expect("must have valid type");
1523 match regclasses {
1524 &[rc0] => ValueRegs::one(VReg::new(0, rc0).into()),
1525 &[rc0, rc1] => ValueRegs::two(VReg::new(0, rc0).into(), VReg::new(1, rc1).into()),
1526 _ => panic!("Value must reside in 1 or 2 registers"),
1527 }
1528 }
1529
1530 /// Rewrite any mention of `from` into `to`.
1531 pub fn set_vreg_alias(&mut self, from: Reg, to: Reg) {
1532 let from = from.into();
1533 let resolved_to = self.resolve_vreg_alias(to.into());
1534 // Disallow cycles (see below).
1535 assert_ne!(resolved_to, from);
1536
1537 // Maintain the invariant that PCC facts only exist on vregs
1538 // which aren't aliases. We want to preserve whatever was
1539 // stated about the vreg before its producer was lowered.
1540 if let Some(fact) = self.facts[from.vreg()].take() {
1541 self.set_fact(resolved_to, fact);
1542 }
1543
1544 let old_alias = self.vreg_aliases.insert(from, resolved_to);
1545 debug_assert_eq!(old_alias, None);
1546 }
1547
1548 fn resolve_vreg_alias(&self, mut vreg: regalloc2::VReg) -> regalloc2::VReg {
1549 // We prevent cycles from existing by resolving targets of
1550 // aliases eagerly before setting them. If the target resolves
1551 // to the origin of the alias, then a cycle would be created
1552 // and the alias is disallowed. Because of the structure of
1553 // SSA code (one instruction can refer to another's defs but
1554 // not vice-versa, except indirectly through
1555 // phis/blockparams), cycles should not occur as we use
1556 // aliases to redirect vregs to the temps that actually define
1557 // them.
1558 while let Some(to) = self.vreg_aliases.get(&vreg) {
1559 vreg = *to;
1560 }
1561 vreg
1562 }
1563
1564 #[inline]
1565 fn debug_assert_no_vreg_aliases(&self, mut list: impl Iterator<Item = VReg>) {
1566 debug_assert!(list.all(|vreg| !self.vreg_aliases.contains_key(&vreg)));
1567 }
1568
1569 /// Set the proof-carrying code fact on a given virtual register.
1570 ///
1571 /// Returns the old fact, if any (only one fact can be stored).
1572 fn set_fact(&mut self, vreg: regalloc2::VReg, fact: Fact) -> Option<Fact> {
1573 trace!("vreg {:?} has fact: {:?}", vreg, fact);
1574 debug_assert!(!self.vreg_aliases.contains_key(&vreg));
1575 self.facts[vreg.vreg()].replace(fact)
1576 }
1577
1578 /// Set a fact only if one doesn't already exist.
1579 pub fn set_fact_if_missing(&mut self, vreg: VirtualReg, fact: Fact) {
1580 let vreg = self.resolve_vreg_alias(vreg.into());
1581 if self.facts[vreg.vreg()].is_none() {
1582 self.set_fact(vreg, fact);
1583 }
1584 }
1585
1586 /// Allocate a fresh ValueRegs, with a given fact to apply if
1587 /// the value fits in one VReg.
1588 pub fn alloc_with_maybe_fact(
1589 &mut self,
1590 ty: Type,
1591 fact: Option<Fact>,
1592 ) -> CodegenResult<ValueRegs<Reg>> {
1593 let result = self.alloc(ty)?;
1594
1595 // Ensure that we don't lose a fact on a value that splits
1596 // into multiple VRegs.
1597 assert!(result.len() == 1 || fact.is_none());
1598 if let Some(fact) = fact {
1599 self.set_fact(result.regs()[0].into(), fact);
1600 }
1601
1602 Ok(result)
1603 }
1604}
1605
1606/// This structure tracks the large constants used in VCode that will be emitted separately by the
1607/// [MachBuffer].
1608///
1609/// First, during the lowering phase, constants are inserted using
1610/// [VCodeConstants.insert]; an intermediate handle, `VCodeConstant`, tracks what constants are
1611/// used in this phase. Some deduplication is performed, when possible, as constant
1612/// values are inserted.
1613///
1614/// Secondly, during the emission phase, the [MachBuffer] assigns [MachLabel]s for each of the
1615/// constants so that instructions can refer to the value's memory location. The [MachBuffer]
1616/// then writes the constant values to the buffer.
1617#[derive(Default)]
1618pub struct VCodeConstants {
1619 constants: PrimaryMap<VCodeConstant, VCodeConstantData>,
1620 pool_uses: HashMap<Constant, VCodeConstant>,
1621 well_known_uses: HashMap<*const [u8], VCodeConstant>,
1622 u64s: HashMap<[u8; 8], VCodeConstant>,
1623}
1624impl VCodeConstants {
1625 /// Initialize the structure with the expected number of constants.
1626 pub fn with_capacity(expected_num_constants: usize) -> Self {
1627 Self {
1628 constants: PrimaryMap::with_capacity(expected_num_constants),
1629 pool_uses: HashMap::with_capacity(expected_num_constants),
1630 well_known_uses: HashMap::new(),
1631 u64s: HashMap::new(),
1632 }
1633 }
1634
1635 /// Insert a constant; using this method indicates that a constant value will be used and thus
1636 /// will be emitted to the `MachBuffer`. The current implementation can deduplicate constants
1637 /// that are [VCodeConstantData::Pool] or [VCodeConstantData::WellKnown] but not
1638 /// [VCodeConstantData::Generated].
1639 pub fn insert(&mut self, data: VCodeConstantData) -> VCodeConstant {
1640 match data {
1641 VCodeConstantData::Generated(_) => self.constants.push(data),
1642 VCodeConstantData::Pool(constant, _) => match self.pool_uses.get(&constant) {
1643 None => {
1644 let vcode_constant = self.constants.push(data);
1645 self.pool_uses.insert(constant, vcode_constant);
1646 vcode_constant
1647 }
1648 Some(&vcode_constant) => vcode_constant,
1649 },
1650 VCodeConstantData::WellKnown(data_ref) => {
1651 match self.well_known_uses.entry(data_ref as *const [u8]) {
1652 Entry::Vacant(v) => {
1653 let vcode_constant = self.constants.push(data);
1654 v.insert(vcode_constant);
1655 vcode_constant
1656 }
1657 Entry::Occupied(o) => *o.get(),
1658 }
1659 }
1660 VCodeConstantData::U64(value) => match self.u64s.entry(value) {
1661 Entry::Vacant(v) => {
1662 let vcode_constant = self.constants.push(data);
1663 v.insert(vcode_constant);
1664 vcode_constant
1665 }
1666 Entry::Occupied(o) => *o.get(),
1667 },
1668 }
1669 }
1670
1671 /// Return the number of constants inserted.
1672 pub fn len(&self) -> usize {
1673 self.constants.len()
1674 }
1675
1676 /// Iterate over the `VCodeConstant` keys inserted in this structure.
1677 pub fn keys(&self) -> Keys<VCodeConstant> {
1678 self.constants.keys()
1679 }
1680
1681 /// Iterate over the `VCodeConstant` keys and the data (as a byte slice) inserted in this
1682 /// structure.
1683 pub fn iter(&self) -> impl Iterator<Item = (VCodeConstant, &VCodeConstantData)> {
1684 self.constants.iter()
1685 }
1686
1687 /// Returns the data associated with the specified constant.
1688 pub fn get(&self, c: VCodeConstant) -> &VCodeConstantData {
1689 &self.constants[c]
1690 }
1691
1692 /// Checks if the given [VCodeConstantData] is registered as
1693 /// used by the pool.
1694 pub fn pool_uses(&self, constant: &VCodeConstantData) -> bool {
1695 match constant {
1696 VCodeConstantData::Pool(c, _) => self.pool_uses.contains_key(c),
1697 _ => false,
1698 }
1699 }
1700}
1701
1702/// A use of a constant by one or more VCode instructions; see [VCodeConstants].
1703#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1704pub struct VCodeConstant(u32);
1705entity_impl!(VCodeConstant);
1706
1707/// Identify the different types of constant that can be inserted into [VCodeConstants]. Tracking
1708/// these separately instead of as raw byte buffers allows us to avoid some duplication.
1709pub enum VCodeConstantData {
1710 /// A constant already present in the Cranelift IR
1711 /// [ConstantPool](crate::ir::constant::ConstantPool).
1712 Pool(Constant, ConstantData),
1713 /// A reference to a well-known constant value that is statically encoded within the compiler.
1714 WellKnown(&'static [u8]),
1715 /// A constant value generated during lowering; the value may depend on the instruction context
1716 /// which makes it difficult to de-duplicate--if possible, use other variants.
1717 Generated(ConstantData),
1718 /// A constant of at most 64 bits. These are deduplicated as
1719 /// well. Stored as a fixed-size array of `u8` so that we do not
1720 /// encounter endianness problems when cross-compiling.
1721 U64([u8; 8]),
1722}
1723impl VCodeConstantData {
1724 /// Retrieve the constant data as a byte slice.
1725 pub fn as_slice(&self) -> &[u8] {
1726 match self {
1727 VCodeConstantData::Pool(_, d) | VCodeConstantData::Generated(d) => d.as_slice(),
1728 VCodeConstantData::WellKnown(d) => d,
1729 VCodeConstantData::U64(value) => &value[..],
1730 }
1731 }
1732
1733 /// Calculate the alignment of the constant data.
1734 pub fn alignment(&self) -> u32 {
1735 if self.as_slice().len() <= 8 {
1736 8
1737 } else {
1738 16
1739 }
1740 }
1741}
1742
1743#[cfg(test)]
1744mod test {
1745 use super::*;
1746 use std::mem::size_of;
1747
1748 #[test]
1749 fn size_of_constant_structs() {
1750 assert_eq!(size_of::<Constant>(), 4);
1751 assert_eq!(size_of::<VCodeConstant>(), 4);
1752 assert_eq!(size_of::<ConstantData>(), 24);
1753 assert_eq!(size_of::<VCodeConstantData>(), 32);
1754 assert_eq!(
1755 size_of::<PrimaryMap<VCodeConstant, VCodeConstantData>>(),
1756 24
1757 );
1758 // TODO The VCodeConstants structure's memory size could be further optimized.
1759 // With certain versions of Rust, each `HashMap` in `VCodeConstants` occupied at
1760 // least 48 bytes, making an empty `VCodeConstants` cost 120 bytes.
1761 }
1762}