rustc_mir_transform/
lib.rs

1// tidy-alphabetical-start
2#![feature(array_windows)]
3#![feature(assert_matches)]
4#![feature(box_patterns)]
5#![feature(const_type_name)]
6#![feature(cow_is_borrowed)]
7#![feature(file_buffered)]
8#![feature(if_let_guard)]
9#![feature(impl_trait_in_assoc_type)]
10#![feature(try_blocks)]
11#![feature(yeet_expr)]
12// tidy-alphabetical-end
13
14use hir::ConstContext;
15use required_consts::RequiredConstsVisitor;
16use rustc_const_eval::check_consts::{self, ConstCx};
17use rustc_const_eval::util;
18use rustc_data_structures::fx::FxIndexSet;
19use rustc_data_structures::steal::Steal;
20use rustc_hir as hir;
21use rustc_hir::def::{CtorKind, DefKind};
22use rustc_hir::def_id::LocalDefId;
23use rustc_index::IndexVec;
24use rustc_middle::mir::{
25    AnalysisPhase, Body, CallSource, ClearCrossCrate, ConstOperand, ConstQualifs, LocalDecl,
26    MirPhase, Operand, Place, ProjectionElem, Promoted, RuntimePhase, Rvalue, START_BLOCK,
27    SourceInfo, Statement, StatementKind, TerminatorKind,
28};
29use rustc_middle::ty::{self, TyCtxt, TypeVisitableExt};
30use rustc_middle::util::Providers;
31use rustc_middle::{bug, query, span_bug};
32use rustc_mir_build::builder::build_mir;
33use rustc_span::source_map::Spanned;
34use rustc_span::{DUMMY_SP, sym};
35use tracing::debug;
36
37#[macro_use]
38mod pass_manager;
39
40use std::sync::LazyLock;
41
42use pass_manager::{self as pm, Lint, MirLint, MirPass, WithMinOptLevel};
43
44mod check_pointers;
45mod cost_checker;
46mod cross_crate_inline;
47mod deduce_param_attrs;
48mod elaborate_drop;
49mod errors;
50mod ffi_unwind_calls;
51mod lint;
52mod lint_tail_expr_drop_order;
53mod patch;
54mod shim;
55mod ssa;
56
57/// We import passes via this macro so that we can have a static list of pass names
58/// (used to verify CLI arguments). It takes a list of modules, followed by the passes
59/// declared within them.
60/// ```ignore,macro-test
61/// declare_passes! {
62///     // Declare a single pass from the module `abort_unwinding_calls`
63///     mod abort_unwinding_calls : AbortUnwindingCalls;
64///     // When passes are grouped together as an enum, declare the two constituent passes
65///     mod add_call_guards : AddCallGuards {
66///         AllCallEdges,
67///         CriticalCallEdges
68///     };
69///     // Declares multiple pass groups, each containing their own constituent passes
70///     mod simplify : SimplifyCfg {
71///         Initial,
72///         /* omitted */
73///     }, SimplifyLocals {
74///         BeforeConstProp,
75///         /* omitted */
76///     };
77/// }
78/// ```
79macro_rules! declare_passes {
80    (
81        $(
82            $vis:vis mod $mod_name:ident : $($pass_name:ident $( { $($ident:ident),* } )?),+ $(,)?;
83        )*
84    ) => {
85        $(
86            $vis mod $mod_name;
87            $(
88                // Make sure the type name is correct
89                #[allow(unused_imports)]
90                use $mod_name::$pass_name as _;
91            )+
92        )*
93
94        static PASS_NAMES: LazyLock<FxIndexSet<&str>> = LazyLock::new(|| [
95            // Fake marker pass
96            "PreCodegen",
97            $(
98                $(
99                    stringify!($pass_name),
100                    $(
101                        $(
102                            $mod_name::$pass_name::$ident.name(),
103                        )*
104                    )?
105                )+
106            )*
107        ].into_iter().collect());
108    };
109}
110
111declare_passes! {
112    mod abort_unwinding_calls : AbortUnwindingCalls;
113    mod add_call_guards : AddCallGuards { AllCallEdges, CriticalCallEdges };
114    mod add_moves_for_packed_drops : AddMovesForPackedDrops;
115    mod add_retag : AddRetag;
116    mod add_subtyping_projections : Subtyper;
117    mod check_inline : CheckForceInline;
118    mod check_call_recursion : CheckCallRecursion, CheckDropRecursion;
119    mod check_alignment : CheckAlignment;
120    mod check_const_item_mutation : CheckConstItemMutation;
121    mod check_null : CheckNull;
122    mod check_packed_ref : CheckPackedRef;
123    // This pass is public to allow external drivers to perform MIR cleanup
124    pub mod cleanup_post_borrowck : CleanupPostBorrowck;
125
126    mod copy_prop : CopyProp;
127    mod coroutine : StateTransform;
128    mod coverage : InstrumentCoverage;
129    mod ctfe_limit : CtfeLimit;
130    mod dataflow_const_prop : DataflowConstProp;
131    mod dead_store_elimination : DeadStoreElimination {
132        Initial,
133        Final
134    };
135    mod deref_separator : Derefer;
136    mod dest_prop : DestinationPropagation;
137    pub mod dump_mir : Marker;
138    mod early_otherwise_branch : EarlyOtherwiseBranch;
139    mod elaborate_box_derefs : ElaborateBoxDerefs;
140    mod elaborate_drops : ElaborateDrops;
141    mod function_item_references : FunctionItemReferences;
142    mod gvn : GVN;
143    // Made public so that `mir_drops_elaborated_and_const_checked` can be overridden
144    // by custom rustc drivers, running all the steps by themselves. See #114628.
145    pub mod inline : Inline, ForceInline;
146    mod impossible_predicates : ImpossiblePredicates;
147    mod instsimplify : InstSimplify { BeforeInline, AfterSimplifyCfg };
148    mod jump_threading : JumpThreading;
149    mod known_panics_lint : KnownPanicsLint;
150    mod large_enums : EnumSizeOpt;
151    mod lower_intrinsics : LowerIntrinsics;
152    mod lower_slice_len : LowerSliceLenCalls;
153    mod match_branches : MatchBranchSimplification;
154    mod mentioned_items : MentionedItems;
155    mod multiple_return_terminators : MultipleReturnTerminators;
156    mod nrvo : RenameReturnPlace;
157    mod post_drop_elaboration : CheckLiveDrops;
158    mod prettify : ReorderBasicBlocks, ReorderLocals;
159    mod promote_consts : PromoteTemps;
160    mod ref_prop : ReferencePropagation;
161    mod remove_noop_landing_pads : RemoveNoopLandingPads;
162    mod remove_place_mention : RemovePlaceMention;
163    mod remove_storage_markers : RemoveStorageMarkers;
164    mod remove_uninit_drops : RemoveUninitDrops;
165    mod remove_unneeded_drops : RemoveUnneededDrops;
166    mod remove_zsts : RemoveZsts;
167    mod required_consts : RequiredConstsVisitor;
168    mod post_analysis_normalize : PostAnalysisNormalize;
169    mod sanity_check : SanityCheck;
170    // This pass is public to allow external drivers to perform MIR cleanup
171    pub mod simplify :
172        SimplifyCfg {
173            Initial,
174            PromoteConsts,
175            RemoveFalseEdges,
176            PostAnalysis,
177            PreOptimizations,
178            Final,
179            MakeShim,
180            AfterUnreachableEnumBranching
181        },
182        SimplifyLocals {
183            BeforeConstProp,
184            AfterGVN,
185            Final
186        };
187    mod simplify_branches : SimplifyConstCondition {
188        AfterConstProp,
189        Final
190    };
191    mod simplify_comparison_integral : SimplifyComparisonIntegral;
192    mod single_use_consts : SingleUseConsts;
193    mod sroa : ScalarReplacementOfAggregates;
194    mod strip_debuginfo : StripDebugInfo;
195    mod unreachable_enum_branching : UnreachableEnumBranching;
196    mod unreachable_prop : UnreachablePropagation;
197    mod validate : Validator;
198}
199
200rustc_fluent_macro::fluent_messages! { "../messages.ftl" }
201
202pub fn provide(providers: &mut Providers) {
203    coverage::query::provide(providers);
204    ffi_unwind_calls::provide(providers);
205    shim::provide(providers);
206    cross_crate_inline::provide(providers);
207    providers.queries = query::Providers {
208        mir_keys,
209        mir_built,
210        mir_const_qualif,
211        mir_promoted,
212        mir_drops_elaborated_and_const_checked,
213        mir_for_ctfe,
214        mir_coroutine_witnesses: coroutine::mir_coroutine_witnesses,
215        optimized_mir,
216        is_mir_available,
217        is_ctfe_mir_available: is_mir_available,
218        mir_callgraph_reachable: inline::cycle::mir_callgraph_reachable,
219        mir_inliner_callees: inline::cycle::mir_inliner_callees,
220        promoted_mir,
221        deduced_param_attrs: deduce_param_attrs::deduced_param_attrs,
222        coroutine_by_move_body_def_id: coroutine::coroutine_by_move_body_def_id,
223        ..providers.queries
224    };
225}
226
227fn remap_mir_for_const_eval_select<'tcx>(
228    tcx: TyCtxt<'tcx>,
229    mut body: Body<'tcx>,
230    context: hir::Constness,
231) -> Body<'tcx> {
232    for bb in body.basic_blocks.as_mut().iter_mut() {
233        let terminator = bb.terminator.as_mut().expect("invalid terminator");
234        match terminator.kind {
235            TerminatorKind::Call {
236                func: Operand::Constant(box ConstOperand { ref const_, .. }),
237                ref mut args,
238                destination,
239                target,
240                unwind,
241                fn_span,
242                ..
243            } if let ty::FnDef(def_id, _) = *const_.ty().kind()
244                && tcx.is_intrinsic(def_id, sym::const_eval_select) =>
245            {
246                let Ok([tupled_args, called_in_const, called_at_rt]) = take_array(args) else {
247                    unreachable!()
248                };
249                let ty = tupled_args.node.ty(&body.local_decls, tcx);
250                let fields = ty.tuple_fields();
251                let num_args = fields.len();
252                let func =
253                    if context == hir::Constness::Const { called_in_const } else { called_at_rt };
254                let (method, place): (fn(Place<'tcx>) -> Operand<'tcx>, Place<'tcx>) =
255                    match tupled_args.node {
256                        Operand::Constant(_) => {
257                            // There is no good way of extracting a tuple arg from a constant
258                            // (const generic stuff) so we just create a temporary and deconstruct
259                            // that.
260                            let local = body.local_decls.push(LocalDecl::new(ty, fn_span));
261                            bb.statements.push(Statement {
262                                source_info: SourceInfo::outermost(fn_span),
263                                kind: StatementKind::Assign(Box::new((
264                                    local.into(),
265                                    Rvalue::Use(tupled_args.node.clone()),
266                                ))),
267                            });
268                            (Operand::Move, local.into())
269                        }
270                        Operand::Move(place) => (Operand::Move, place),
271                        Operand::Copy(place) => (Operand::Copy, place),
272                    };
273                let place_elems = place.projection;
274                let arguments = (0..num_args)
275                    .map(|x| {
276                        let mut place_elems = place_elems.to_vec();
277                        place_elems.push(ProjectionElem::Field(x.into(), fields[x]));
278                        let projection = tcx.mk_place_elems(&place_elems);
279                        let place = Place { local: place.local, projection };
280                        Spanned { node: method(place), span: DUMMY_SP }
281                    })
282                    .collect();
283                terminator.kind = TerminatorKind::Call {
284                    func: func.node,
285                    args: arguments,
286                    destination,
287                    target,
288                    unwind,
289                    call_source: CallSource::Misc,
290                    fn_span,
291                };
292            }
293            _ => {}
294        }
295    }
296    body
297}
298
299fn take_array<T, const N: usize>(b: &mut Box<[T]>) -> Result<[T; N], Box<[T]>> {
300    let b: Box<[T; N]> = std::mem::take(b).try_into()?;
301    Ok(*b)
302}
303
304fn is_mir_available(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
305    tcx.mir_keys(()).contains(&def_id)
306}
307
308/// Finds the full set of `DefId`s within the current crate that have
309/// MIR associated with them.
310fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet<LocalDefId> {
311    // All body-owners have MIR associated with them.
312    let mut set: FxIndexSet<_> = tcx.hir_body_owners().collect();
313
314    // Remove the fake bodies for `global_asm!`, since they're not useful
315    // to be emitted (`--emit=mir`) or encoded (in metadata).
316    set.retain(|&def_id| !matches!(tcx.def_kind(def_id), DefKind::GlobalAsm));
317
318    // Coroutine-closures (e.g. async closures) have an additional by-move MIR
319    // body that isn't in the HIR.
320    for body_owner in tcx.hir_body_owners() {
321        if let DefKind::Closure = tcx.def_kind(body_owner)
322            && tcx.needs_coroutine_by_move_body_def_id(body_owner.to_def_id())
323        {
324            set.insert(tcx.coroutine_by_move_body_def_id(body_owner).expect_local());
325        }
326    }
327
328    // tuple struct/variant constructors have MIR, but they don't have a BodyId,
329    // so we need to build them separately.
330    for item in tcx.hir_crate_items(()).free_items() {
331        if let DefKind::Struct | DefKind::Enum = tcx.def_kind(item.owner_id) {
332            for variant in tcx.adt_def(item.owner_id).variants() {
333                if let Some((CtorKind::Fn, ctor_def_id)) = variant.ctor {
334                    set.insert(ctor_def_id.expect_local());
335                }
336            }
337        }
338    }
339
340    set
341}
342
343fn mir_const_qualif(tcx: TyCtxt<'_>, def: LocalDefId) -> ConstQualifs {
344    // N.B., this `borrow()` is guaranteed to be valid (i.e., the value
345    // cannot yet be stolen), because `mir_promoted()`, which steals
346    // from `mir_built()`, forces this query to execute before
347    // performing the steal.
348    let body = &tcx.mir_built(def).borrow();
349    let ccx = check_consts::ConstCx::new(tcx, body);
350    // No need to const-check a non-const `fn`.
351    match ccx.const_kind {
352        Some(ConstContext::Const { .. } | ConstContext::Static(_) | ConstContext::ConstFn) => {}
353        None => span_bug!(
354            tcx.def_span(def),
355            "`mir_const_qualif` should only be called on const fns and const items"
356        ),
357    }
358
359    if body.return_ty().references_error() {
360        // It's possible to reach here without an error being emitted (#121103).
361        tcx.dcx().span_delayed_bug(body.span, "mir_const_qualif: MIR had errors");
362        return Default::default();
363    }
364
365    let mut validator = check_consts::check::Checker::new(&ccx);
366    validator.check_body();
367
368    // We return the qualifs in the return place for every MIR body, even though it is only used
369    // when deciding to promote a reference to a `const` for now.
370    validator.qualifs_in_return_place()
371}
372
373fn mir_built(tcx: TyCtxt<'_>, def: LocalDefId) -> &Steal<Body<'_>> {
374    let mut body = build_mir(tcx, def);
375
376    pass_manager::dump_mir_for_phase_change(tcx, &body);
377
378    pm::run_passes(
379        tcx,
380        &mut body,
381        &[
382            // MIR-level lints.
383            &Lint(check_inline::CheckForceInline),
384            &Lint(check_call_recursion::CheckCallRecursion),
385            &Lint(check_packed_ref::CheckPackedRef),
386            &Lint(check_const_item_mutation::CheckConstItemMutation),
387            &Lint(function_item_references::FunctionItemReferences),
388            // What we need to do constant evaluation.
389            &simplify::SimplifyCfg::Initial,
390            &Lint(sanity_check::SanityCheck),
391        ],
392        None,
393        pm::Optimizations::Allowed,
394    );
395    tcx.alloc_steal_mir(body)
396}
397
398/// Compute the main MIR body and the list of MIR bodies of the promoteds.
399fn mir_promoted(
400    tcx: TyCtxt<'_>,
401    def: LocalDefId,
402) -> (&Steal<Body<'_>>, &Steal<IndexVec<Promoted, Body<'_>>>) {
403    // Ensure that we compute the `mir_const_qualif` for constants at
404    // this point, before we steal the mir-const result.
405    // Also this means promotion can rely on all const checks having been done.
406
407    let const_qualifs = match tcx.def_kind(def) {
408        DefKind::Fn | DefKind::AssocFn | DefKind::Closure
409            if tcx.constness(def) == hir::Constness::Const
410                || tcx.is_const_default_method(def.to_def_id()) =>
411        {
412            tcx.mir_const_qualif(def)
413        }
414        DefKind::AssocConst
415        | DefKind::Const
416        | DefKind::Static { .. }
417        | DefKind::InlineConst
418        | DefKind::AnonConst => tcx.mir_const_qualif(def),
419        _ => ConstQualifs::default(),
420    };
421
422    // the `has_ffi_unwind_calls` query uses the raw mir, so make sure it is run.
423    tcx.ensure_done().has_ffi_unwind_calls(def);
424
425    // the `by_move_body` query uses the raw mir, so make sure it is run.
426    if tcx.needs_coroutine_by_move_body_def_id(def.to_def_id()) {
427        tcx.ensure_done().coroutine_by_move_body_def_id(def);
428    }
429
430    let mut body = tcx.mir_built(def).steal();
431    if let Some(error_reported) = const_qualifs.tainted_by_errors {
432        body.tainted_by_errors = Some(error_reported);
433    }
434
435    // Collect `required_consts` *before* promotion, so if there are any consts being promoted
436    // we still add them to the list in the outer MIR body.
437    RequiredConstsVisitor::compute_required_consts(&mut body);
438
439    // What we need to run borrowck etc.
440    let promote_pass = promote_consts::PromoteTemps::default();
441    pm::run_passes(
442        tcx,
443        &mut body,
444        &[&promote_pass, &simplify::SimplifyCfg::PromoteConsts, &coverage::InstrumentCoverage],
445        Some(MirPhase::Analysis(AnalysisPhase::Initial)),
446        pm::Optimizations::Allowed,
447    );
448
449    lint_tail_expr_drop_order::run_lint(tcx, def, &body);
450
451    let promoted = promote_pass.promoted_fragments.into_inner();
452    (tcx.alloc_steal_mir(body), tcx.alloc_steal_promoted(promoted))
453}
454
455/// Compute the MIR that is used during CTFE (and thus has no optimizations run on it)
456fn mir_for_ctfe(tcx: TyCtxt<'_>, def_id: LocalDefId) -> &Body<'_> {
457    tcx.arena.alloc(inner_mir_for_ctfe(tcx, def_id))
458}
459
460fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: LocalDefId) -> Body<'_> {
461    // FIXME: don't duplicate this between the optimized_mir/mir_for_ctfe queries
462    if tcx.is_constructor(def.to_def_id()) {
463        // There's no reason to run all of the MIR passes on constructors when
464        // we can just output the MIR we want directly. This also saves const
465        // qualification and borrow checking the trouble of special casing
466        // constructors.
467        return shim::build_adt_ctor(tcx, def.to_def_id());
468    }
469
470    let body = tcx.mir_drops_elaborated_and_const_checked(def);
471    let body = match tcx.hir_body_const_context(def) {
472        // consts and statics do not have `optimized_mir`, so we can steal the body instead of
473        // cloning it.
474        Some(hir::ConstContext::Const { .. } | hir::ConstContext::Static(_)) => body.steal(),
475        Some(hir::ConstContext::ConstFn) => body.borrow().clone(),
476        None => bug!("`mir_for_ctfe` called on non-const {def:?}"),
477    };
478
479    let mut body = remap_mir_for_const_eval_select(tcx, body, hir::Constness::Const);
480    pm::run_passes(tcx, &mut body, &[&ctfe_limit::CtfeLimit], None, pm::Optimizations::Allowed);
481
482    body
483}
484
485/// Obtain just the main MIR (no promoteds) and run some cleanups on it. This also runs
486/// mir borrowck *before* doing so in order to ensure that borrowck can be run and doesn't
487/// end up missing the source MIR due to stealing happening.
488fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> &Steal<Body<'_>> {
489    if tcx.is_coroutine(def.to_def_id()) {
490        tcx.ensure_done().mir_coroutine_witnesses(def);
491    }
492
493    // We only need to borrowck non-synthetic MIR.
494    let tainted_by_errors = if !tcx.is_synthetic_mir(def) {
495        tcx.mir_borrowck(tcx.typeck_root_def_id(def.to_def_id()).expect_local()).err()
496    } else {
497        None
498    };
499
500    let is_fn_like = tcx.def_kind(def).is_fn_like();
501    if is_fn_like {
502        // Do not compute the mir call graph without said call graph actually being used.
503        if pm::should_run_pass(tcx, &inline::Inline, pm::Optimizations::Allowed)
504            || inline::ForceInline::should_run_pass_for_callee(tcx, def.to_def_id())
505        {
506            tcx.ensure_done().mir_inliner_callees(ty::InstanceKind::Item(def.to_def_id()));
507        }
508    }
509
510    let (body, _) = tcx.mir_promoted(def);
511    let mut body = body.steal();
512
513    if let Some(error_reported) = tainted_by_errors {
514        body.tainted_by_errors = Some(error_reported);
515    }
516
517    // Also taint the body if it's within a top-level item that is not well formed.
518    //
519    // We do this check here and not during `mir_promoted` because that may result
520    // in borrowck cycles if WF requires looking into an opaque hidden type.
521    let root = tcx.typeck_root_def_id(def.to_def_id());
522    match tcx.def_kind(root) {
523        DefKind::Fn
524        | DefKind::AssocFn
525        | DefKind::Static { .. }
526        | DefKind::Const
527        | DefKind::AssocConst => {
528            if let Err(guar) = tcx.ensure_ok().check_well_formed(root.expect_local()) {
529                body.tainted_by_errors = Some(guar);
530            }
531        }
532        _ => {}
533    }
534
535    run_analysis_to_runtime_passes(tcx, &mut body);
536
537    tcx.alloc_steal_mir(body)
538}
539
540// Made public so that `mir_drops_elaborated_and_const_checked` can be overridden
541// by custom rustc drivers, running all the steps by themselves. See #114628.
542pub fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
543    assert!(body.phase == MirPhase::Analysis(AnalysisPhase::Initial));
544    let did = body.source.def_id();
545
546    debug!("analysis_mir_cleanup({:?})", did);
547    run_analysis_cleanup_passes(tcx, body);
548    assert!(body.phase == MirPhase::Analysis(AnalysisPhase::PostCleanup));
549
550    // Do a little drop elaboration before const-checking if `const_precise_live_drops` is enabled.
551    if check_consts::post_drop_elaboration::checking_enabled(&ConstCx::new(tcx, body)) {
552        pm::run_passes(
553            tcx,
554            body,
555            &[
556                &remove_uninit_drops::RemoveUninitDrops,
557                &simplify::SimplifyCfg::RemoveFalseEdges,
558                &Lint(post_drop_elaboration::CheckLiveDrops),
559            ],
560            None,
561            pm::Optimizations::Allowed,
562        );
563    }
564
565    debug!("runtime_mir_lowering({:?})", did);
566    run_runtime_lowering_passes(tcx, body);
567    assert!(body.phase == MirPhase::Runtime(RuntimePhase::Initial));
568
569    debug!("runtime_mir_cleanup({:?})", did);
570    run_runtime_cleanup_passes(tcx, body);
571    assert!(body.phase == MirPhase::Runtime(RuntimePhase::PostCleanup));
572}
573
574// FIXME(JakobDegen): Can we make these lists of passes consts?
575
576/// After this series of passes, no lifetime analysis based on borrowing can be done.
577fn run_analysis_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
578    let passes: &[&dyn MirPass<'tcx>] = &[
579        &impossible_predicates::ImpossiblePredicates,
580        &cleanup_post_borrowck::CleanupPostBorrowck,
581        &remove_noop_landing_pads::RemoveNoopLandingPads,
582        &simplify::SimplifyCfg::PostAnalysis,
583        &deref_separator::Derefer,
584    ];
585
586    pm::run_passes(
587        tcx,
588        body,
589        passes,
590        Some(MirPhase::Analysis(AnalysisPhase::PostCleanup)),
591        pm::Optimizations::Allowed,
592    );
593}
594
595/// Returns the sequence of passes that lowers analysis to runtime MIR.
596fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
597    let passes: &[&dyn MirPass<'tcx>] = &[
598        // These next passes must be executed together.
599        &add_call_guards::CriticalCallEdges,
600        // Must be done before drop elaboration because we need to drop opaque types, too.
601        &post_analysis_normalize::PostAnalysisNormalize,
602        // Calling this after `PostAnalysisNormalize` ensures that we don't deal with opaque types.
603        &add_subtyping_projections::Subtyper,
604        &elaborate_drops::ElaborateDrops,
605        // Needs to happen after drop elaboration.
606        &Lint(check_call_recursion::CheckDropRecursion),
607        // This will remove extraneous landing pads which are no longer
608        // necessary as well as forcing any call in a non-unwinding
609        // function calling a possibly-unwinding function to abort the process.
610        &abort_unwinding_calls::AbortUnwindingCalls,
611        // AddMovesForPackedDrops needs to run after drop
612        // elaboration.
613        &add_moves_for_packed_drops::AddMovesForPackedDrops,
614        // `AddRetag` needs to run after `ElaborateDrops` but before `ElaborateBoxDerefs`.
615        // Otherwise it should run fairly late, but before optimizations begin.
616        &add_retag::AddRetag,
617        &elaborate_box_derefs::ElaborateBoxDerefs,
618        &coroutine::StateTransform,
619        &Lint(known_panics_lint::KnownPanicsLint),
620    ];
621    pm::run_passes_no_validate(tcx, body, passes, Some(MirPhase::Runtime(RuntimePhase::Initial)));
622}
623
624/// Returns the sequence of passes that do the initial cleanup of runtime MIR.
625fn run_runtime_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
626    let passes: &[&dyn MirPass<'tcx>] = &[
627        &lower_intrinsics::LowerIntrinsics,
628        &remove_place_mention::RemovePlaceMention,
629        &simplify::SimplifyCfg::PreOptimizations,
630    ];
631
632    pm::run_passes(
633        tcx,
634        body,
635        passes,
636        Some(MirPhase::Runtime(RuntimePhase::PostCleanup)),
637        pm::Optimizations::Allowed,
638    );
639
640    // Clear this by anticipation. Optimizations and runtime MIR have no reason to look
641    // into this information, which is meant for borrowck diagnostics.
642    for decl in &mut body.local_decls {
643        decl.local_info = ClearCrossCrate::Clear;
644    }
645}
646
647pub(crate) fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
648    fn o1<T>(x: T) -> WithMinOptLevel<T> {
649        WithMinOptLevel(1, x)
650    }
651
652    let def_id = body.source.def_id();
653    let optimizations = if tcx.def_kind(def_id).has_codegen_attrs()
654        && tcx.codegen_fn_attrs(def_id).optimize.do_not_optimize()
655    {
656        pm::Optimizations::Suppressed
657    } else {
658        pm::Optimizations::Allowed
659    };
660
661    // The main optimizations that we do on MIR.
662    pm::run_passes(
663        tcx,
664        body,
665        &[
666            // Add some UB checks before any UB gets optimized away.
667            &check_alignment::CheckAlignment,
668            &check_null::CheckNull,
669            // Before inlining: trim down MIR with passes to reduce inlining work.
670
671            // Has to be done before inlining, otherwise actual call will be almost always inlined.
672            // Also simple, so can just do first.
673            &lower_slice_len::LowerSliceLenCalls,
674            // Perform instsimplify before inline to eliminate some trivial calls (like clone
675            // shims).
676            &instsimplify::InstSimplify::BeforeInline,
677            // Perform inlining of `#[rustc_force_inline]`-annotated callees.
678            &inline::ForceInline,
679            // Perform inlining, which may add a lot of code.
680            &inline::Inline,
681            // Code from other crates may have storage markers, so this needs to happen after
682            // inlining.
683            &remove_storage_markers::RemoveStorageMarkers,
684            // Inlining and instantiation may introduce ZST and useless drops.
685            &remove_zsts::RemoveZsts,
686            &remove_unneeded_drops::RemoveUnneededDrops,
687            // Type instantiation may create uninhabited enums.
688            // Also eliminates some unreachable branches based on variants of enums.
689            &unreachable_enum_branching::UnreachableEnumBranching,
690            &unreachable_prop::UnreachablePropagation,
691            &o1(simplify::SimplifyCfg::AfterUnreachableEnumBranching),
692            // Inlining may have introduced a lot of redundant code and a large move pattern.
693            // Now, we need to shrink the generated MIR.
694            &ref_prop::ReferencePropagation,
695            &sroa::ScalarReplacementOfAggregates,
696            &multiple_return_terminators::MultipleReturnTerminators,
697            // After simplifycfg, it allows us to discover new opportunities for peephole
698            // optimizations.
699            &instsimplify::InstSimplify::AfterSimplifyCfg,
700            &simplify::SimplifyLocals::BeforeConstProp,
701            &dead_store_elimination::DeadStoreElimination::Initial,
702            &gvn::GVN,
703            &simplify::SimplifyLocals::AfterGVN,
704            &match_branches::MatchBranchSimplification,
705            &dataflow_const_prop::DataflowConstProp,
706            &single_use_consts::SingleUseConsts,
707            &o1(simplify_branches::SimplifyConstCondition::AfterConstProp),
708            &jump_threading::JumpThreading,
709            &early_otherwise_branch::EarlyOtherwiseBranch,
710            &simplify_comparison_integral::SimplifyComparisonIntegral,
711            &dest_prop::DestinationPropagation,
712            &o1(simplify_branches::SimplifyConstCondition::Final),
713            &o1(remove_noop_landing_pads::RemoveNoopLandingPads),
714            &o1(simplify::SimplifyCfg::Final),
715            // After the last SimplifyCfg, because this wants one-block functions.
716            &strip_debuginfo::StripDebugInfo,
717            &copy_prop::CopyProp,
718            &dead_store_elimination::DeadStoreElimination::Final,
719            &nrvo::RenameReturnPlace,
720            &simplify::SimplifyLocals::Final,
721            &multiple_return_terminators::MultipleReturnTerminators,
722            &large_enums::EnumSizeOpt { discrepancy: 128 },
723            // Some cleanup necessary at least for LLVM and potentially other codegen backends.
724            &add_call_guards::CriticalCallEdges,
725            // Cleanup for human readability, off by default.
726            &prettify::ReorderBasicBlocks,
727            &prettify::ReorderLocals,
728            // Dump the end result for testing and debugging purposes.
729            &dump_mir::Marker("PreCodegen"),
730        ],
731        Some(MirPhase::Runtime(RuntimePhase::Optimized)),
732        optimizations,
733    );
734}
735
736/// Optimize the MIR and prepare it for codegen.
737fn optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> &Body<'_> {
738    tcx.arena.alloc(inner_optimized_mir(tcx, did))
739}
740
741fn inner_optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> Body<'_> {
742    if tcx.is_constructor(did.to_def_id()) {
743        // There's no reason to run all of the MIR passes on constructors when
744        // we can just output the MIR we want directly. This also saves const
745        // qualification and borrow checking the trouble of special casing
746        // constructors.
747        return shim::build_adt_ctor(tcx, did.to_def_id());
748    }
749
750    match tcx.hir_body_const_context(did) {
751        // Run the `mir_for_ctfe` query, which depends on `mir_drops_elaborated_and_const_checked`
752        // which we are going to steal below. Thus we need to run `mir_for_ctfe` first, so it
753        // computes and caches its result.
754        Some(hir::ConstContext::ConstFn) => tcx.ensure_done().mir_for_ctfe(did),
755        None => {}
756        Some(other) => panic!("do not use `optimized_mir` for constants: {other:?}"),
757    }
758    debug!("about to call mir_drops_elaborated...");
759    let body = tcx.mir_drops_elaborated_and_const_checked(did).steal();
760    let mut body = remap_mir_for_const_eval_select(tcx, body, hir::Constness::NotConst);
761
762    if body.tainted_by_errors.is_some() {
763        return body;
764    }
765
766    // Before doing anything, remember which items are being mentioned so that the set of items
767    // visited does not depend on the optimization level.
768    // We do not use `run_passes` for this as that might skip the pass if `injection_phase` is set.
769    mentioned_items::MentionedItems.run_pass(tcx, &mut body);
770
771    // If `mir_drops_elaborated_and_const_checked` found that the current body has unsatisfiable
772    // predicates, it will shrink the MIR to a single `unreachable` terminator.
773    // More generally, if MIR is a lone `unreachable`, there is nothing to optimize.
774    if let TerminatorKind::Unreachable = body.basic_blocks[START_BLOCK].terminator().kind
775        && body.basic_blocks[START_BLOCK].statements.is_empty()
776    {
777        return body;
778    }
779
780    run_optimization_passes(tcx, &mut body);
781
782    body
783}
784
785/// Fetch all the promoteds of an item and prepare their MIR bodies to be ready for
786/// constant evaluation once all generic parameters become known.
787fn promoted_mir(tcx: TyCtxt<'_>, def: LocalDefId) -> &IndexVec<Promoted, Body<'_>> {
788    if tcx.is_constructor(def.to_def_id()) {
789        return tcx.arena.alloc(IndexVec::new());
790    }
791
792    if !tcx.is_synthetic_mir(def) {
793        tcx.ensure_done().mir_borrowck(tcx.typeck_root_def_id(def.to_def_id()).expect_local());
794    }
795    let mut promoted = tcx.mir_promoted(def).1.steal();
796
797    for body in &mut promoted {
798        run_analysis_to_runtime_passes(tcx, body);
799    }
800
801    tcx.arena.alloc(promoted)
802}