clang 21.0.0git
CGExpr.cpp
Go to the documentation of this file.
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ABIInfoImpl.h"
14#include "CGCUDARuntime.h"
15#include "CGCXXABI.h"
16#include "CGCall.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGObjCRuntime.h"
20#include "CGOpenMPRuntime.h"
21#include "CGRecordLayout.h"
22#include "CodeGenFunction.h"
23#include "CodeGenModule.h"
24#include "ConstantEmitter.h"
25#include "TargetInfo.h"
27#include "clang/AST/ASTLambda.h"
28#include "clang/AST/Attr.h"
29#include "clang/AST/DeclObjC.h"
30#include "clang/AST/NSAPI.h"
35#include "llvm/ADT/STLExtras.h"
36#include "llvm/ADT/ScopeExit.h"
37#include "llvm/ADT/StringExtras.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/IR/LLVMContext.h"
41#include "llvm/IR/MDBuilder.h"
42#include "llvm/IR/MatrixBuilder.h"
43#include "llvm/Support/ConvertUTF.h"
44#include "llvm/Support/Endian.h"
45#include "llvm/Support/MathExtras.h"
46#include "llvm/Support/Path.h"
47#include "llvm/Support/xxhash.h"
48#include "llvm/Transforms/Utils/SanitizerStats.h"
49
50#include <numeric>
51#include <optional>
52#include <string>
53
54using namespace clang;
55using namespace CodeGen;
56
57namespace clang {
58// TODO: Introduce frontend options to enabled per sanitizers, similar to
59// `fsanitize-trap`.
60llvm::cl::opt<bool> ClSanitizeGuardChecks(
61 "ubsan-guard-checks", llvm::cl::Optional,
62 llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
63} // namespace clang
64
65//===--------------------------------------------------------------------===//
66// Defines for metadata
67//===--------------------------------------------------------------------===//
68
69// Those values are crucial to be the SAME as in ubsan runtime library.
71 /// An integer type.
72 TK_Integer = 0x0000,
73 /// A floating-point type.
74 TK_Float = 0x0001,
75 /// An _BitInt(N) type.
76 TK_BitInt = 0x0002,
77 /// Any other type. The value representation is unspecified.
78 TK_Unknown = 0xffff
79};
80
81//===--------------------------------------------------------------------===//
82// Miscellaneous Helper Methods
83//===--------------------------------------------------------------------===//
84
85/// CreateTempAlloca - This creates a alloca and inserts it into the entry
86/// block.
88CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align,
89 const Twine &Name,
90 llvm::Value *ArraySize) {
91 auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
92 Alloca->setAlignment(Align.getAsAlign());
93 return RawAddress(Alloca, Ty, Align, KnownNonNull);
94}
95
96/// CreateTempAlloca - This creates a alloca and inserts it into the entry
97/// block. The alloca is casted to default address space if necessary.
99 const Twine &Name,
100 llvm::Value *ArraySize,
101 RawAddress *AllocaAddr) {
102 auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
103 if (AllocaAddr)
104 *AllocaAddr = Alloca;
105 llvm::Value *V = Alloca.getPointer();
106 // Alloca always returns a pointer in alloca address space, which may
107 // be different from the type defined by the language. For example,
108 // in C++ the auto variables are in the default address space. Therefore
109 // cast alloca to the default address space when necessary.
111 auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
112 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
113 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
114 // otherwise alloca is inserted at the current insertion point of the
115 // builder.
116 if (!ArraySize)
117 Builder.SetInsertPoint(getPostAllocaInsertPoint());
120 Builder.getPtrTy(DestAddrSpace), /*non-null*/ true);
121 }
122
123 return RawAddress(V, Ty, Align, KnownNonNull);
124}
125
126/// CreateTempAlloca - This creates an alloca and inserts it into the entry
127/// block if \p ArraySize is nullptr, otherwise inserts it at the current
128/// insertion point of the builder.
129llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
130 const Twine &Name,
131 llvm::Value *ArraySize) {
132 llvm::AllocaInst *Alloca;
133 if (ArraySize)
134 Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
135 else
136 Alloca =
137 new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
138 ArraySize, Name, AllocaInsertPt->getIterator());
139 if (Allocas) {
140 Allocas->Add(Alloca);
141 }
142 return Alloca;
143}
144
145/// CreateDefaultAlignTempAlloca - This creates an alloca with the
146/// default alignment of the corresponding LLVM type, which is *not*
147/// guaranteed to be related in any way to the expected alignment of
148/// an AST type that might have been lowered to Ty.
150 const Twine &Name) {
151 CharUnits Align =
152 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
153 return CreateTempAlloca(Ty, Align, Name);
154}
155
158 return CreateTempAlloca(ConvertType(Ty), Align, Name);
159}
160
162 RawAddress *Alloca) {
163 // FIXME: Should we prefer the preferred type alignment here?
164 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
165}
166
168 const Twine &Name,
169 RawAddress *Alloca) {
171 /*ArraySize=*/nullptr, Alloca);
172
173 if (Ty->isConstantMatrixType()) {
174 auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
175 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
176 ArrayTy->getNumElements());
177
178 Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
180 }
181 return Result;
182}
183
185 CharUnits Align,
186 const Twine &Name) {
187 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
188}
189
191 const Twine &Name) {
192 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
193 Name);
194}
195
196/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
197/// expression and compare the result against zero, returning an Int1Ty value.
198llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
199 PGO.setCurrentStmt(E);
200 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
201 llvm::Value *MemPtr = EmitScalarExpr(E);
202 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
203 }
204
205 QualType BoolTy = getContext().BoolTy;
207 CGFPOptionsRAII FPOptsRAII(*this, E);
208 if (!E->getType()->isAnyComplexType())
209 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
210
212 Loc);
213}
214
215/// EmitIgnoredExpr - Emit code to compute the specified expression,
216/// ignoring the result.
218 if (E->isPRValue())
219 return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
220
221 // if this is a bitfield-resulting conditional operator, we can special case
222 // emit this. The normal 'EmitLValue' version of this is particularly
223 // difficult to codegen for, since creating a single "LValue" for two
224 // different sized arguments here is not particularly doable.
225 if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
227 if (CondOp->getObjectKind() == OK_BitField)
228 return EmitIgnoredConditionalOperator(CondOp);
229 }
230
231 // Just emit it as an l-value and drop the result.
232 EmitLValue(E);
233}
234
235/// EmitAnyExpr - Emit code to compute the specified expression which
236/// can have any type. The result is returned as an RValue struct.
237/// If this is an aggregate expression, AggSlot indicates where the
238/// result should be returned.
240 AggValueSlot aggSlot,
241 bool ignoreResult) {
242 switch (getEvaluationKind(E->getType())) {
243 case TEK_Scalar:
244 return RValue::get(EmitScalarExpr(E, ignoreResult));
245 case TEK_Complex:
246 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
247 case TEK_Aggregate:
248 if (!ignoreResult && aggSlot.isIgnored())
249 aggSlot = CreateAggTemp(E->getType(), "agg-temp");
250 EmitAggExpr(E, aggSlot);
251 return aggSlot.asRValue();
252 }
253 llvm_unreachable("bad evaluation kind");
254}
255
256/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
257/// always be accessible even if no aggregate location is provided.
260
262 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
263 return EmitAnyExpr(E, AggSlot);
264}
265
266/// EmitAnyExprToMem - Evaluate an expression into a given memory
267/// location.
269 Address Location,
270 Qualifiers Quals,
271 bool IsInit) {
272 // FIXME: This function should take an LValue as an argument.
273 switch (getEvaluationKind(E->getType())) {
274 case TEK_Complex:
276 /*isInit*/ false);
277 return;
278
279 case TEK_Aggregate: {
280 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
285 return;
286 }
287
288 case TEK_Scalar: {
289 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
290 LValue LV = MakeAddrLValue(Location, E->getType());
292 return;
293 }
294 }
295 llvm_unreachable("bad evaluation kind");
296}
297
299 const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed) {
300 QualType Type = LV.getType();
301 switch (getEvaluationKind(Type)) {
302 case TEK_Complex:
303 EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
304 return;
305 case TEK_Aggregate:
309 AggValueSlot::MayOverlap, IsZeroed));
310 return;
311 case TEK_Scalar:
312 if (LV.isSimple())
313 EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
314 else
316 return;
317 }
318 llvm_unreachable("bad evaluation kind");
319}
320
321static void
323 const Expr *E, Address ReferenceTemporary) {
324 // Objective-C++ ARC:
325 // If we are binding a reference to a temporary that has ownership, we
326 // need to perform retain/release operations on the temporary.
327 //
328 // FIXME: This should be looking at E, not M.
329 if (auto Lifetime = M->getType().getObjCLifetime()) {
330 switch (Lifetime) {
333 // Carry on to normal cleanup handling.
334 break;
335
337 // Nothing to do; cleaned up by an autorelease pool.
338 return;
339
342 switch (StorageDuration Duration = M->getStorageDuration()) {
343 case SD_Static:
344 // Note: we intentionally do not register a cleanup to release
345 // the object on program termination.
346 return;
347
348 case SD_Thread:
349 // FIXME: We should probably register a cleanup in this case.
350 return;
351
352 case SD_Automatic:
356 if (Lifetime == Qualifiers::OCL_Strong) {
357 const ValueDecl *VD = M->getExtendingDecl();
358 bool Precise = isa_and_nonnull<VarDecl>(VD) &&
359 VD->hasAttr<ObjCPreciseLifetimeAttr>();
363 } else {
364 // __weak objects always get EH cleanups; otherwise, exceptions
365 // could cause really nasty crashes instead of mere leaks.
368 }
369 if (Duration == SD_FullExpression)
370 CGF.pushDestroy(CleanupKind, ReferenceTemporary,
371 M->getType(), *Destroy,
373 else
374 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
375 M->getType(),
376 *Destroy, CleanupKind & EHCleanup);
377 return;
378
379 case SD_Dynamic:
380 llvm_unreachable("temporary cannot have dynamic storage duration");
381 }
382 llvm_unreachable("unknown storage duration");
383 }
384 }
385
386 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
387 if (const RecordType *RT =
389 // Get the destructor for the reference temporary.
390 auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
391 if (!ClassDecl->hasTrivialDestructor())
392 ReferenceTemporaryDtor = ClassDecl->getDestructor();
393 }
394
395 if (!ReferenceTemporaryDtor)
396 return;
397
398 // Call the destructor for the temporary.
399 switch (M->getStorageDuration()) {
400 case SD_Static:
401 case SD_Thread: {
402 llvm::FunctionCallee CleanupFn;
403 llvm::Constant *CleanupArg;
404 if (E->getType()->isArrayType()) {
406 ReferenceTemporary, E->getType(),
408 dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
409 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
410 } else {
411 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
412 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
413 CleanupArg = cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
414 }
416 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
417 break;
418 }
419
421 CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
423 CGF.getLangOpts().Exceptions);
424 break;
425
426 case SD_Automatic:
428 ReferenceTemporary, E->getType(),
430 CGF.getLangOpts().Exceptions);
431 break;
432
433 case SD_Dynamic:
434 llvm_unreachable("temporary cannot have dynamic storage duration");
435 }
436}
437
440 const Expr *Inner,
441 RawAddress *Alloca = nullptr) {
442 auto &TCG = CGF.getTargetHooks();
443 switch (M->getStorageDuration()) {
445 case SD_Automatic: {
446 // If we have a constant temporary array or record try to promote it into a
447 // constant global under the same rules a normal constant would've been
448 // promoted. This is easier on the optimizer and generally emits fewer
449 // instructions.
450 QualType Ty = Inner->getType();
451 if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
452 (Ty->isArrayType() || Ty->isRecordType()) &&
453 Ty.isConstantStorage(CGF.getContext(), true, false))
454 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
455 auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
456 auto *GV = new llvm::GlobalVariable(
457 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
458 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
459 llvm::GlobalValue::NotThreadLocal,
461 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
462 GV->setAlignment(alignment.getAsAlign());
463 llvm::Constant *C = GV;
464 if (AS != LangAS::Default)
465 C = TCG.performAddrSpaceCast(
466 CGF.CGM, GV, AS, LangAS::Default,
467 llvm::PointerType::get(
468 CGF.getLLVMContext(),
470 // FIXME: Should we put the new global into a COMDAT?
471 return RawAddress(C, GV->getValueType(), alignment);
472 }
473 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
474 }
475 case SD_Thread:
476 case SD_Static:
477 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
478
479 case SD_Dynamic:
480 llvm_unreachable("temporary can't have dynamic storage duration");
481 }
482 llvm_unreachable("unknown storage duration");
483}
484
485/// Helper method to check if the underlying ABI is AAPCS
486static bool isAAPCS(const TargetInfo &TargetInfo) {
487 return TargetInfo.getABI().starts_with("aapcs");
488}
489
492 const Expr *E = M->getSubExpr();
493
494 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
495 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
496 "Reference should never be pseudo-strong!");
497
498 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
499 // as that will cause the lifetime adjustment to be lost for ARC
500 auto ownership = M->getType().getObjCLifetime();
501 if (ownership != Qualifiers::OCL_None &&
502 ownership != Qualifiers::OCL_ExplicitNone) {
504 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
505 llvm::Type *Ty = ConvertTypeForMem(E->getType());
506 Object = Object.withElementType(Ty);
507
508 // createReferenceTemporary will promote the temporary to a global with a
509 // constant initializer if it can. It can only do this to a value of
510 // ARC-manageable type if the value is global and therefore "immune" to
511 // ref-counting operations. Therefore we have no need to emit either a
512 // dynamic initialization or a cleanup and we can just return the address
513 // of the temporary.
514 if (Var->hasInitializer())
515 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
516
517 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
518 }
519 LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
521
522 switch (getEvaluationKind(E->getType())) {
523 default: llvm_unreachable("expected scalar or aggregate expression");
524 case TEK_Scalar:
525 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
526 break;
527 case TEK_Aggregate: {
534 break;
535 }
536 }
537
538 pushTemporaryCleanup(*this, M, E, Object);
539 return RefTempDst;
540 }
541
544 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
545
546 for (const auto &Ignored : CommaLHSs)
547 EmitIgnoredExpr(Ignored);
548
549 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
550 if (opaque->getType()->isRecordType()) {
551 assert(Adjustments.empty());
552 return EmitOpaqueValueLValue(opaque);
553 }
554 }
555
556 // Create and initialize the reference temporary.
557 RawAddress Alloca = Address::invalid();
558 RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
559 if (auto *Var = dyn_cast<llvm::GlobalVariable>(
560 Object.getPointer()->stripPointerCasts())) {
561 llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
562 Object = Object.withElementType(TemporaryType);
563 // If the temporary is a global and has a constant initializer or is a
564 // constant temporary that we promoted to a global, we may have already
565 // initialized it.
566 if (!Var->hasInitializer()) {
567 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
568 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
569 }
570 } else {
571 switch (M->getStorageDuration()) {
572 case SD_Automatic:
573 if (auto *Size = EmitLifetimeStart(
574 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
575 Alloca.getPointer())) {
576 pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
577 Alloca, Size);
578 }
579 break;
580
581 case SD_FullExpression: {
582 if (!ShouldEmitLifetimeMarkers)
583 break;
584
585 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
586 // marker. Instead, start the lifetime of a conditional temporary earlier
587 // so that it's unconditional. Don't do this with sanitizers which need
588 // more precise lifetime marks. However when inside an "await.suspend"
589 // block, we should always avoid conditional cleanup because it creates
590 // boolean marker that lives across await_suspend, which can destroy coro
591 // frame.
592 ConditionalEvaluation *OldConditional = nullptr;
593 CGBuilderTy::InsertPoint OldIP;
595 ((!SanOpts.has(SanitizerKind::HWAddress) &&
596 !SanOpts.has(SanitizerKind::Memory) &&
597 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
598 inSuspendBlock())) {
599 OldConditional = OutermostConditional;
600 OutermostConditional = nullptr;
601
602 OldIP = Builder.saveIP();
603 llvm::BasicBlock *Block = OldConditional->getStartingBlock();
604 Builder.restoreIP(CGBuilderTy::InsertPoint(
605 Block, llvm::BasicBlock::iterator(Block->back())));
606 }
607
608 if (auto *Size = EmitLifetimeStart(
609 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
610 Alloca.getPointer())) {
611 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
612 Size);
613 }
614
615 if (OldConditional) {
616 OutermostConditional = OldConditional;
617 Builder.restoreIP(OldIP);
618 }
619 break;
620 }
621
622 default:
623 break;
624 }
625 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
626 }
627 pushTemporaryCleanup(*this, M, E, Object);
628
629 // Perform derived-to-base casts and/or field accesses, to get from the
630 // temporary object we created (and, potentially, for which we extended
631 // the lifetime) to the subobject we're binding the reference to.
632 for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
633 switch (Adjustment.Kind) {
635 Object =
636 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
637 Adjustment.DerivedToBase.BasePath->path_begin(),
638 Adjustment.DerivedToBase.BasePath->path_end(),
639 /*NullCheckValue=*/ false, E->getExprLoc());
640 break;
641
644 LV = EmitLValueForField(LV, Adjustment.Field);
645 assert(LV.isSimple() &&
646 "materialized temporary field is not a simple lvalue");
647 Object = LV.getAddress();
648 break;
649 }
650
652 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
654 Adjustment.Ptr.MPT);
655 break;
656 }
657 }
658 }
659
660 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
661}
662
663RValue
665 // Emit the expression as an lvalue.
666 LValue LV = EmitLValue(E);
667 assert(LV.isSimple());
668 llvm::Value *Value = LV.getPointer(*this);
669
671 // C++11 [dcl.ref]p5 (as amended by core issue 453):
672 // If a glvalue to which a reference is directly bound designates neither
673 // an existing object or function of an appropriate type nor a region of
674 // storage of suitable size and alignment to contain an object of the
675 // reference's type, the behavior is undefined.
676 QualType Ty = E->getType();
678 }
679
680 return RValue::get(Value);
681}
682
683
684/// getAccessedFieldNo - Given an encoded value and a result number, return the
685/// input field number being accessed.
686unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
687 const llvm::Constant *Elts) {
688 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
689 ->getZExtValue();
690}
691
692static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc,
693 llvm::Value *Ptr) {
694 llvm::Value *A0 =
695 Builder.CreateMul(Ptr, Builder.getInt64(0xbf58476d1ce4e5b9u));
696 llvm::Value *A1 =
697 Builder.CreateXor(A0, Builder.CreateLShr(A0, Builder.getInt64(31)));
698 return Builder.CreateXor(Acc, A1);
699}
700
701bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {
702 return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
704}
705
706bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {
708 return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
709 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
712}
713
715 return SanOpts.has(SanitizerKind::Null) ||
716 SanOpts.has(SanitizerKind::Alignment) ||
717 SanOpts.has(SanitizerKind::ObjectSize) ||
718 SanOpts.has(SanitizerKind::Vptr);
719}
720
722 llvm::Value *Ptr, QualType Ty,
723 CharUnits Alignment,
724 SanitizerSet SkippedChecks,
725 llvm::Value *ArraySize) {
727 return;
728
729 // Don't check pointers outside the default address space. The null check
730 // isn't correct, the object-size check isn't supported by LLVM, and we can't
731 // communicate the addresses to the runtime handler for the vptr check.
732 if (Ptr->getType()->getPointerAddressSpace())
733 return;
734
735 // Don't check pointers to volatile data. The behavior here is implementation-
736 // defined.
737 if (Ty.isVolatileQualified())
738 return;
739
740 SanitizerScope SanScope(this);
741
743 Checks;
744 llvm::BasicBlock *Done = nullptr;
745
746 // Quickly determine whether we have a pointer to an alloca. It's possible
747 // to skip null checks, and some alignment checks, for these pointers. This
748 // can reduce compile-time significantly.
749 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
750
751 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
752 llvm::Value *IsNonNull = nullptr;
753 bool IsGuaranteedNonNull =
754 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
755 bool AllowNullPointers = isNullPointerAllowed(TCK);
756 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
757 !IsGuaranteedNonNull) {
758 // The glvalue must not be an empty glvalue.
759 IsNonNull = Builder.CreateIsNotNull(Ptr);
760
761 // The IR builder can constant-fold the null check if the pointer points to
762 // a constant.
763 IsGuaranteedNonNull = IsNonNull == True;
764
765 // Skip the null check if the pointer is known to be non-null.
766 if (!IsGuaranteedNonNull) {
767 if (AllowNullPointers) {
768 // When performing pointer casts, it's OK if the value is null.
769 // Skip the remaining checks in that case.
770 Done = createBasicBlock("null");
771 llvm::BasicBlock *Rest = createBasicBlock("not.null");
772 Builder.CreateCondBr(IsNonNull, Rest, Done);
773 EmitBlock(Rest);
774 } else {
775 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::SO_Null));
776 }
777 }
778 }
779
780 if (SanOpts.has(SanitizerKind::ObjectSize) &&
781 !SkippedChecks.has(SanitizerKind::ObjectSize) &&
782 !Ty->isIncompleteType()) {
784 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
785 if (ArraySize)
786 Size = Builder.CreateMul(Size, ArraySize);
787
788 // Degenerate case: new X[0] does not need an objectsize check.
789 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
790 if (!ConstantSize || !ConstantSize->isNullValue()) {
791 // The glvalue must refer to a large enough storage region.
792 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
793 // to check this.
794 // FIXME: Get object address space
795 llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
796 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
797 llvm::Value *Min = Builder.getFalse();
798 llvm::Value *NullIsUnknown = Builder.getFalse();
799 llvm::Value *Dynamic = Builder.getFalse();
800 llvm::Value *LargeEnough = Builder.CreateICmpUGE(
801 Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
802 Checks.push_back(
803 std::make_pair(LargeEnough, SanitizerKind::SO_ObjectSize));
804 }
805 }
806
807 llvm::MaybeAlign AlignVal;
808 llvm::Value *PtrAsInt = nullptr;
809
810 if (SanOpts.has(SanitizerKind::Alignment) &&
811 !SkippedChecks.has(SanitizerKind::Alignment)) {
812 AlignVal = Alignment.getAsMaybeAlign();
813 if (!Ty->isIncompleteType() && !AlignVal)
814 AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
815 /*ForPointeeType=*/true)
817
818 // The glvalue must be suitably aligned.
819 if (AlignVal && *AlignVal > llvm::Align(1) &&
820 (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
821 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
822 llvm::Value *Align = Builder.CreateAnd(
823 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
824 llvm::Value *Aligned =
825 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
826 if (Aligned != True)
827 Checks.push_back(std::make_pair(Aligned, SanitizerKind::SO_Alignment));
828 }
829 }
830
831 if (Checks.size() > 0) {
832 llvm::Constant *StaticData[] = {
834 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
835 llvm::ConstantInt::get(Int8Ty, TCK)};
836 EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
837 PtrAsInt ? PtrAsInt : Ptr);
838 }
839
840 // If possible, check that the vptr indicates that there is a subobject of
841 // type Ty at offset zero within this object.
842 //
843 // C++11 [basic.life]p5,6:
844 // [For storage which does not refer to an object within its lifetime]
845 // The program has undefined behavior if:
846 // -- the [pointer or glvalue] is used to access a non-static data member
847 // or call a non-static member function
848 if (SanOpts.has(SanitizerKind::Vptr) &&
849 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
850 // Ensure that the pointer is non-null before loading it. If there is no
851 // compile-time guarantee, reuse the run-time null check or emit a new one.
852 if (!IsGuaranteedNonNull) {
853 if (!IsNonNull)
854 IsNonNull = Builder.CreateIsNotNull(Ptr);
855 if (!Done)
856 Done = createBasicBlock("vptr.null");
857 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
858 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
859 EmitBlock(VptrNotNull);
860 }
861
862 // Compute a deterministic hash of the mangled name of the type.
863 SmallString<64> MangledName;
864 llvm::raw_svector_ostream Out(MangledName);
866 Out);
867
868 // Contained in NoSanitizeList based on the mangled type.
869 if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
870 Out.str())) {
871 // Load the vptr, and mix it with TypeHash.
872 llvm::Value *TypeHash =
873 llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str()));
874
875 llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
876 Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
877 llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy,
878 Ty->getAsCXXRecordDecl(),
880 VPtrVal = Builder.CreateBitOrPointerCast(VPtrVal, IntPtrTy);
881
882 llvm::Value *Hash =
883 emitHashMix(Builder, TypeHash, Builder.CreateZExt(VPtrVal, Int64Ty));
884 Hash = Builder.CreateTrunc(Hash, IntPtrTy);
885
886 // Look the hash up in our cache.
887 const int CacheSize = 128;
888 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
889 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
890 "__ubsan_vptr_type_cache");
891 llvm::Value *Slot = Builder.CreateAnd(Hash,
892 llvm::ConstantInt::get(IntPtrTy,
893 CacheSize-1));
894 llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
895 llvm::Value *CacheVal = Builder.CreateAlignedLoad(
896 IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
898
899 // If the hash isn't in the cache, call a runtime handler to perform the
900 // hard work of checking whether the vptr is for an object of the right
901 // type. This will either fill in the cache and return, or produce a
902 // diagnostic.
903 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
904 llvm::Constant *StaticData[] = {
908 llvm::ConstantInt::get(Int8Ty, TCK)
909 };
910 llvm::Value *DynamicData[] = { Ptr, Hash };
911 EmitCheck(std::make_pair(EqualHash, SanitizerKind::SO_Vptr),
912 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
913 DynamicData);
914 }
915 }
916
917 if (Done) {
918 Builder.CreateBr(Done);
919 EmitBlock(Done);
920 }
921}
922
924 QualType EltTy) {
926 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
927 if (!EltSize)
928 return nullptr;
929
930 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
931 if (!ArrayDeclRef)
932 return nullptr;
933
934 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
935 if (!ParamDecl)
936 return nullptr;
937
938 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
939 if (!POSAttr)
940 return nullptr;
941
942 // Don't load the size if it's a lower bound.
943 int POSType = POSAttr->getType();
944 if (POSType != 0 && POSType != 1)
945 return nullptr;
946
947 // Find the implicit size parameter.
948 auto PassedSizeIt = SizeArguments.find(ParamDecl);
949 if (PassedSizeIt == SizeArguments.end())
950 return nullptr;
951
952 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
953 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
954 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
955 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
956 C.getSizeType(), E->getExprLoc());
957 llvm::Value *SizeOfElement =
958 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
959 return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
960}
961
962/// If Base is known to point to the start of an array, return the length of
963/// that array. Return 0 if the length cannot be determined.
964static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF,
965 const Expr *Base,
966 QualType &IndexedType,
968 StrictFlexArraysLevel) {
969 // For the vector indexing extension, the bound is the number of elements.
970 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
971 IndexedType = Base->getType();
972 return CGF.Builder.getInt32(VT->getNumElements());
973 }
974
975 Base = Base->IgnoreParens();
976
977 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
978 if (CE->getCastKind() == CK_ArrayToPointerDecay &&
979 !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
980 StrictFlexArraysLevel)) {
981 CodeGenFunction::SanitizerScope SanScope(&CGF);
982
983 IndexedType = CE->getSubExpr()->getType();
984 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
985 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
986 return CGF.Builder.getInt(CAT->getSize());
987
988 if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
989 return CGF.getVLASize(VAT).NumElts;
990 // Ignore pass_object_size here. It's not applicable on decayed pointers.
991 }
992 }
993
994 CodeGenFunction::SanitizerScope SanScope(&CGF);
995
996 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
997 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
998 IndexedType = Base->getType();
999 return POS;
1000 }
1001
1002 return nullptr;
1003}
1004
1005namespace {
1006
1007/// \p StructAccessBase returns the base \p Expr of a field access. It returns
1008/// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
1009///
1010/// p in p-> a.b.c
1011///
1012/// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
1013/// looking for:
1014///
1015/// struct s {
1016/// struct s *ptr;
1017/// int count;
1018/// char array[] __attribute__((counted_by(count)));
1019/// };
1020///
1021/// If we have an expression like \p p->ptr->array[index], we want the
1022/// \p MemberExpr for \p p->ptr instead of \p p.
1023class StructAccessBase
1024 : public ConstStmtVisitor<StructAccessBase, const Expr *> {
1025 const RecordDecl *ExpectedRD;
1026
1027 bool IsExpectedRecordDecl(const Expr *E) const {
1028 QualType Ty = E->getType();
1029 if (Ty->isPointerType())
1030 Ty = Ty->getPointeeType();
1031 return ExpectedRD == Ty->getAsRecordDecl();
1032 }
1033
1034public:
1035 StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
1036
1037 //===--------------------------------------------------------------------===//
1038 // Visitor Methods
1039 //===--------------------------------------------------------------------===//
1040
1041 // NOTE: If we build C++ support for counted_by, then we'll have to handle
1042 // horrors like this:
1043 //
1044 // struct S {
1045 // int x, y;
1046 // int blah[] __attribute__((counted_by(x)));
1047 // } s;
1048 //
1049 // int foo(int index, int val) {
1050 // int (S::*IHatePMDs)[] = &S::blah;
1051 // (s.*IHatePMDs)[index] = val;
1052 // }
1053
1054 const Expr *Visit(const Expr *E) {
1056 }
1057
1058 const Expr *VisitStmt(const Stmt *S) { return nullptr; }
1059
1060 // These are the types we expect to return (in order of most to least
1061 // likely):
1062 //
1063 // 1. DeclRefExpr - This is the expression for the base of the structure.
1064 // It's exactly what we want to build an access to the \p counted_by
1065 // field.
1066 // 2. MemberExpr - This is the expression that has the same \p RecordDecl
1067 // as the flexble array member's lexical enclosing \p RecordDecl. This
1068 // allows us to catch things like: "p->p->array"
1069 // 3. CompoundLiteralExpr - This is for people who create something
1070 // heretical like (struct foo has a flexible array member):
1071 //
1072 // (struct foo){ 1, 2 }.blah[idx];
1073 const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
1074 return IsExpectedRecordDecl(E) ? E : nullptr;
1075 }
1076 const Expr *VisitMemberExpr(const MemberExpr *E) {
1077 if (IsExpectedRecordDecl(E) && E->isArrow())
1078 return E;
1079 const Expr *Res = Visit(E->getBase());
1080 return !Res && IsExpectedRecordDecl(E) ? E : Res;
1081 }
1082 const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
1083 return IsExpectedRecordDecl(E) ? E : nullptr;
1084 }
1085 const Expr *VisitCallExpr(const CallExpr *E) {
1086 return IsExpectedRecordDecl(E) ? E : nullptr;
1087 }
1088
1089 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1090 if (IsExpectedRecordDecl(E))
1091 return E;
1092 return Visit(E->getBase());
1093 }
1094 const Expr *VisitCastExpr(const CastExpr *E) {
1095 if (E->getCastKind() == CK_LValueToRValue)
1096 return IsExpectedRecordDecl(E) ? E : nullptr;
1097 return Visit(E->getSubExpr());
1098 }
1099 const Expr *VisitParenExpr(const ParenExpr *E) {
1100 return Visit(E->getSubExpr());
1101 }
1102 const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
1103 return Visit(E->getSubExpr());
1104 }
1105 const Expr *VisitUnaryDeref(const UnaryOperator *E) {
1106 return Visit(E->getSubExpr());
1107 }
1108};
1109
1110} // end anonymous namespace
1111
1113
1115 const FieldDecl *Field,
1116 RecIndicesTy &Indices) {
1117 const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
1118 int64_t FieldNo = -1;
1119 for (const FieldDecl *FD : RD->fields()) {
1120 if (!Layout.containsFieldDecl(FD))
1121 // This could happen if the field has a struct type that's empty. I don't
1122 // know why either.
1123 continue;
1124
1125 FieldNo = Layout.getLLVMFieldNo(FD);
1126 if (FD == Field) {
1127 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1128 return true;
1129 }
1130
1131 QualType Ty = FD->getType();
1132 if (Ty->isRecordType()) {
1133 if (getGEPIndicesToField(CGF, Ty->getAsRecordDecl(), Field, Indices)) {
1134 if (RD->isUnion())
1135 FieldNo = 0;
1136 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1137 return true;
1138 }
1139 }
1140 }
1141
1142 return false;
1143}
1144
1146 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1147 const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext();
1148
1149 // Find the base struct expr (i.e. p in p->a.b.c.d).
1150 const Expr *StructBase = StructAccessBase(RD).Visit(Base);
1151 if (!StructBase || StructBase->HasSideEffects(getContext()))
1152 return nullptr;
1153
1154 llvm::Value *Res = nullptr;
1155 if (StructBase->getType()->isPointerType()) {
1156 LValueBaseInfo BaseInfo;
1157 TBAAAccessInfo TBAAInfo;
1158 Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
1159 Res = Addr.emitRawPointer(*this);
1160 } else if (StructBase->isLValue()) {
1161 LValue LV = EmitLValue(StructBase);
1162 Address Addr = LV.getAddress();
1163 Res = Addr.emitRawPointer(*this);
1164 } else {
1165 return nullptr;
1166 }
1167
1168 RecIndicesTy Indices;
1169 getGEPIndicesToField(*this, RD, CountDecl, Indices);
1170 if (Indices.empty())
1171 return nullptr;
1172
1173 Indices.push_back(Builder.getInt32(0));
1175 ConvertType(QualType(RD->getTypeForDecl(), 0)), Res,
1176 RecIndicesTy(llvm::reverse(Indices)), "counted_by.gep");
1177}
1178
1179/// This method is typically called in contexts where we can't generate
1180/// side-effects, like in __builtin_dynamic_object_size. When finding
1181/// expressions, only choose those that have either already been emitted or can
1182/// be loaded without side-effects.
1183///
1184/// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1185/// within the top-level struct.
1186/// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
1188 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1189 if (llvm::Value *GEP = GetCountedByFieldExprGEP(Base, FAMDecl, CountDecl))
1190 return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), GEP,
1191 getIntAlign(), "counted_by.load");
1192 return nullptr;
1193}
1194
1195void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
1196 llvm::Value *Index, QualType IndexType,
1197 bool Accessed) {
1198 assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1199 "should not be called unless adding bounds checks");
1200 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1201 getLangOpts().getStrictFlexArraysLevel();
1202 QualType IndexedType;
1203 llvm::Value *Bound =
1204 getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);
1205
1206 EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed);
1207}
1208
1209void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
1210 llvm::Value *Index,
1211 QualType IndexType,
1212 QualType IndexedType, bool Accessed) {
1213 if (!Bound)
1214 return;
1215
1216 SanitizerScope SanScope(this);
1217
1218 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1219 llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1220 llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1221
1222 llvm::Constant *StaticData[] = {
1224 EmitCheckTypeDescriptor(IndexedType),
1225 EmitCheckTypeDescriptor(IndexType)
1226 };
1227 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1228 : Builder.CreateICmpULE(IndexVal, BoundVal);
1229 EmitCheck(std::make_pair(Check, SanitizerKind::SO_ArrayBounds),
1230 SanitizerHandler::OutOfBounds, StaticData, Index);
1231}
1232
1235 bool isInc, bool isPre) {
1237
1238 llvm::Value *NextVal;
1239 if (isa<llvm::IntegerType>(InVal.first->getType())) {
1240 uint64_t AmountVal = isInc ? 1 : -1;
1241 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1242
1243 // Add the inc/dec to the real part.
1244 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1245 } else {
1246 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1247 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1248 if (!isInc)
1249 FVal.changeSign();
1250 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1251
1252 // Add the inc/dec to the real part.
1253 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1254 }
1255
1256 ComplexPairTy IncVal(NextVal, InVal.second);
1257
1258 // Store the updated result through the lvalue.
1259 EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1260 if (getLangOpts().OpenMP)
1262 E->getSubExpr());
1263
1264 // If this is a postinc, return the value read from memory, otherwise use the
1265 // updated value.
1266 return isPre ? IncVal : InVal;
1267}
1268
1270 CodeGenFunction *CGF) {
1271 // Bind VLAs in the cast type.
1272 if (CGF && E->getType()->isVariablyModifiedType())
1274
1275 if (CGDebugInfo *DI = getModuleDebugInfo())
1276 DI->EmitExplicitCastType(E->getType());
1277}
1278
1279//===----------------------------------------------------------------------===//
1280// LValue Expression Emission
1281//===----------------------------------------------------------------------===//
1282
1284 TBAAAccessInfo *TBAAInfo,
1285 KnownNonNull_t IsKnownNonNull,
1286 CodeGenFunction &CGF) {
1287 // We allow this with ObjC object pointers because of fragile ABIs.
1288 assert(E->getType()->isPointerType() ||
1290 E = E->IgnoreParens();
1291
1292 // Casts:
1293 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1294 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1295 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
1296
1297 switch (CE->getCastKind()) {
1298 // Non-converting casts (but not C's implicit conversion from void*).
1299 case CK_BitCast:
1300 case CK_NoOp:
1301 case CK_AddressSpaceConversion:
1302 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1303 if (PtrTy->getPointeeType()->isVoidType())
1304 break;
1305
1306 LValueBaseInfo InnerBaseInfo;
1307 TBAAAccessInfo InnerTBAAInfo;
1309 CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
1310 if (BaseInfo) *BaseInfo = InnerBaseInfo;
1311 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1312
1313 if (isa<ExplicitCastExpr>(CE)) {
1314 LValueBaseInfo TargetTypeBaseInfo;
1315 TBAAAccessInfo TargetTypeTBAAInfo;
1317 E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1318 if (TBAAInfo)
1319 *TBAAInfo =
1320 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
1321 // If the source l-value is opaque, honor the alignment of the
1322 // casted-to type.
1323 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1324 if (BaseInfo)
1325 BaseInfo->mergeForCast(TargetTypeBaseInfo);
1326 Addr.setAlignment(Align);
1327 }
1328 }
1329
1330 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1331 CE->getCastKind() == CK_BitCast) {
1332 if (auto PT = E->getType()->getAs<PointerType>())
1333 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1334 /*MayBeNull=*/true,
1336 CE->getBeginLoc());
1337 }
1338
1339 llvm::Type *ElemTy =
1341 Addr = Addr.withElementType(ElemTy);
1342 if (CE->getCastKind() == CK_AddressSpaceConversion)
1343 Addr = CGF.Builder.CreateAddrSpaceCast(
1344 Addr, CGF.ConvertType(E->getType()), ElemTy);
1345 return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(),
1346 CE->getType());
1347 }
1348 break;
1349
1350 // Array-to-pointer decay.
1351 case CK_ArrayToPointerDecay:
1352 return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1353
1354 // Derived-to-base conversions.
1355 case CK_UncheckedDerivedToBase:
1356 case CK_DerivedToBase: {
1357 // TODO: Support accesses to members of base classes in TBAA. For now, we
1358 // conservatively pretend that the complete object is of the base class
1359 // type.
1360 if (TBAAInfo)
1361 *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
1363 CE->getSubExpr(), BaseInfo, nullptr,
1364 (KnownNonNull_t)(IsKnownNonNull ||
1365 CE->getCastKind() == CK_UncheckedDerivedToBase));
1366 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1367 return CGF.GetAddressOfBaseClass(
1368 Addr, Derived, CE->path_begin(), CE->path_end(),
1369 CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
1370 }
1371
1372 // TODO: Is there any reason to treat base-to-derived conversions
1373 // specially?
1374 default:
1375 break;
1376 }
1377 }
1378
1379 // Unary &.
1380 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1381 if (UO->getOpcode() == UO_AddrOf) {
1382 LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
1383 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1384 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1385 return LV.getAddress();
1386 }
1387 }
1388
1389 // std::addressof and variants.
1390 if (auto *Call = dyn_cast<CallExpr>(E)) {
1391 switch (Call->getBuiltinCallee()) {
1392 default:
1393 break;
1394 case Builtin::BIaddressof:
1395 case Builtin::BI__addressof:
1396 case Builtin::BI__builtin_addressof: {
1397 LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
1398 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1399 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1400 return LV.getAddress();
1401 }
1402 }
1403 }
1404
1405 // TODO: conditional operators, comma.
1406
1407 // Otherwise, use the alignment of the type.
1410 /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
1411}
1412
1413/// EmitPointerWithAlignment - Given an expression of pointer type, try to
1414/// derive a more accurate bound on the alignment of the pointer.
1416 const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1417 KnownNonNull_t IsKnownNonNull) {
1418 Address Addr =
1419 ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
1420 if (IsKnownNonNull && !Addr.isKnownNonNull())
1421 Addr.setKnownNonNull();
1422 return Addr;
1423}
1424
1426 llvm::Value *V = RV.getScalarVal();
1427 if (auto MPT = T->getAs<MemberPointerType>())
1428 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1429 return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1430}
1431
1433 if (Ty->isVoidType())
1434 return RValue::get(nullptr);
1435
1436 switch (getEvaluationKind(Ty)) {
1437 case TEK_Complex: {
1438 llvm::Type *EltTy =
1440 llvm::Value *U = llvm::UndefValue::get(EltTy);
1441 return RValue::getComplex(std::make_pair(U, U));
1442 }
1443
1444 // If this is a use of an undefined aggregate type, the aggregate must have an
1445 // identifiable address. Just because the contents of the value are undefined
1446 // doesn't mean that the address can't be taken and compared.
1447 case TEK_Aggregate: {
1448 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1449 return RValue::getAggregate(DestPtr);
1450 }
1451
1452 case TEK_Scalar:
1453 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1454 }
1455 llvm_unreachable("bad evaluation kind");
1456}
1457
1459 const char *Name) {
1460 ErrorUnsupported(E, Name);
1461 return GetUndefRValue(E->getType());
1462}
1463
1465 const char *Name) {
1466 ErrorUnsupported(E, Name);
1467 llvm::Type *ElTy = ConvertType(E->getType());
1468 llvm::Type *Ty = UnqualPtrTy;
1469 return MakeAddrLValue(
1470 Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1471}
1472
1473bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
1474 const Expr *Base = Obj;
1475 while (!isa<CXXThisExpr>(Base)) {
1476 // The result of a dynamic_cast can be null.
1477 if (isa<CXXDynamicCastExpr>(Base))
1478 return false;
1479
1480 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1481 Base = CE->getSubExpr();
1482 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1483 Base = PE->getSubExpr();
1484 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1485 if (UO->getOpcode() == UO_Extension)
1486 Base = UO->getSubExpr();
1487 else
1488 return false;
1489 } else {
1490 return false;
1491 }
1492 }
1493 return true;
1494}
1495
1496LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
1497 LValue LV;
1498 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1499 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1500 else
1501 LV = EmitLValue(E);
1502 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1503 SanitizerSet SkippedChecks;
1504 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1505 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1506 if (IsBaseCXXThis)
1507 SkippedChecks.set(SanitizerKind::Alignment, true);
1508 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1509 SkippedChecks.set(SanitizerKind::Null, true);
1510 }
1511 EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
1512 }
1513 return LV;
1514}
1515
1516/// EmitLValue - Emit code to compute a designator that specifies the location
1517/// of the expression.
1518///
1519/// This can return one of two things: a simple address or a bitfield reference.
1520/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1521/// an LLVM pointer type.
1522///
1523/// If this returns a bitfield reference, nothing about the pointee type of the
1524/// LLVM value is known: For example, it may not be a pointer to an integer.
1525///
1526/// If this returns a normal address, and if the lvalue's C type is fixed size,
1527/// this method guarantees that the returned pointer type will point to an LLVM
1528/// type of the same size of the lvalue's type. If the lvalue has a variable
1529/// length type, this is not possible.
1530///
1532 KnownNonNull_t IsKnownNonNull) {
1533 // Running with sufficient stack space to avoid deeply nested expressions
1534 // cause a stack overflow.
1535 LValue LV;
1537 E->getExprLoc(), [&] { LV = EmitLValueHelper(E, IsKnownNonNull); });
1538
1539 if (IsKnownNonNull && !LV.isKnownNonNull())
1540 LV.setKnownNonNull();
1541 return LV;
1542}
1543
1545 const ASTContext &Ctx) {
1546 const Expr *SE = E->getSubExpr()->IgnoreImplicit();
1547 if (isa<OpaqueValueExpr>(SE))
1548 return SE->getType();
1549 return cast<CallExpr>(SE)->getCallReturnType(Ctx)->getPointeeType();
1550}
1551
1552LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1553 KnownNonNull_t IsKnownNonNull) {
1554 ApplyDebugLocation DL(*this, E);
1555 switch (E->getStmtClass()) {
1556 default: return EmitUnsupportedLValue(E, "l-value expression");
1557
1558 case Expr::ObjCPropertyRefExprClass:
1559 llvm_unreachable("cannot emit a property reference directly");
1560
1561 case Expr::ObjCSelectorExprClass:
1562 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
1563 case Expr::ObjCIsaExprClass:
1564 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
1565 case Expr::BinaryOperatorClass:
1566 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
1567 case Expr::CompoundAssignOperatorClass: {
1568 QualType Ty = E->getType();
1569 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1570 Ty = AT->getValueType();
1571 if (!Ty->isAnyComplexType())
1572 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1573 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1574 }
1575 case Expr::CallExprClass:
1576 case Expr::CXXMemberCallExprClass:
1577 case Expr::CXXOperatorCallExprClass:
1578 case Expr::UserDefinedLiteralClass:
1579 return EmitCallExprLValue(cast<CallExpr>(E));
1580 case Expr::CXXRewrittenBinaryOperatorClass:
1581 return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
1582 IsKnownNonNull);
1583 case Expr::VAArgExprClass:
1584 return EmitVAArgExprLValue(cast<VAArgExpr>(E));
1585 case Expr::DeclRefExprClass:
1586 return EmitDeclRefLValue(cast<DeclRefExpr>(E));
1587 case Expr::ConstantExprClass: {
1588 const ConstantExpr *CE = cast<ConstantExpr>(E);
1589 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1591 return MakeNaturalAlignAddrLValue(Result, RetType);
1592 }
1593 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
1594 }
1595 case Expr::ParenExprClass:
1596 return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
1597 case Expr::GenericSelectionExprClass:
1598 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
1599 IsKnownNonNull);
1600 case Expr::PredefinedExprClass:
1601 return EmitPredefinedLValue(cast<PredefinedExpr>(E));
1602 case Expr::StringLiteralClass:
1603 return EmitStringLiteralLValue(cast<StringLiteral>(E));
1604 case Expr::ObjCEncodeExprClass:
1605 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
1606 case Expr::PseudoObjectExprClass:
1607 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
1608 case Expr::InitListExprClass:
1609 return EmitInitListLValue(cast<InitListExpr>(E));
1610 case Expr::CXXTemporaryObjectExprClass:
1611 case Expr::CXXConstructExprClass:
1612 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
1613 case Expr::CXXBindTemporaryExprClass:
1614 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
1615 case Expr::CXXUuidofExprClass:
1616 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
1617 case Expr::LambdaExprClass:
1618 return EmitAggExprToLValue(E);
1619
1620 case Expr::ExprWithCleanupsClass: {
1621 const auto *cleanups = cast<ExprWithCleanups>(E);
1622 RunCleanupsScope Scope(*this);
1623 LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
1624 if (LV.isSimple()) {
1625 // Defend against branches out of gnu statement expressions surrounded by
1626 // cleanups.
1627 Address Addr = LV.getAddress();
1628 llvm::Value *V = Addr.getBasePointer();
1629 Scope.ForceCleanup({&V});
1630 Addr.replaceBasePointer(V);
1631 return LValue::MakeAddr(Addr, LV.getType(), getContext(),
1632 LV.getBaseInfo(), LV.getTBAAInfo());
1633 }
1634 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1635 // bitfield lvalue or some other non-simple lvalue?
1636 return LV;
1637 }
1638
1639 case Expr::CXXDefaultArgExprClass: {
1640 auto *DAE = cast<CXXDefaultArgExpr>(E);
1641 CXXDefaultArgExprScope Scope(*this, DAE);
1642 return EmitLValue(DAE->getExpr(), IsKnownNonNull);
1643 }
1644 case Expr::CXXDefaultInitExprClass: {
1645 auto *DIE = cast<CXXDefaultInitExpr>(E);
1646 CXXDefaultInitExprScope Scope(*this, DIE);
1647 return EmitLValue(DIE->getExpr(), IsKnownNonNull);
1648 }
1649 case Expr::CXXTypeidExprClass:
1650 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
1651
1652 case Expr::ObjCMessageExprClass:
1653 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
1654 case Expr::ObjCIvarRefExprClass:
1655 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
1656 case Expr::StmtExprClass:
1657 return EmitStmtExprLValue(cast<StmtExpr>(E));
1658 case Expr::UnaryOperatorClass:
1659 return EmitUnaryOpLValue(cast<UnaryOperator>(E));
1660 case Expr::ArraySubscriptExprClass:
1661 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
1662 case Expr::MatrixSubscriptExprClass:
1663 return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
1664 case Expr::ArraySectionExprClass:
1665 return EmitArraySectionExpr(cast<ArraySectionExpr>(E));
1666 case Expr::ExtVectorElementExprClass:
1667 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
1668 case Expr::CXXThisExprClass:
1670 case Expr::MemberExprClass:
1671 return EmitMemberExpr(cast<MemberExpr>(E));
1672 case Expr::CompoundLiteralExprClass:
1673 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
1674 case Expr::ConditionalOperatorClass:
1675 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
1676 case Expr::BinaryConditionalOperatorClass:
1677 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
1678 case Expr::ChooseExprClass:
1679 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
1680 case Expr::OpaqueValueExprClass:
1681 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
1682 case Expr::SubstNonTypeTemplateParmExprClass:
1683 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
1684 IsKnownNonNull);
1685 case Expr::ImplicitCastExprClass:
1686 case Expr::CStyleCastExprClass:
1687 case Expr::CXXFunctionalCastExprClass:
1688 case Expr::CXXStaticCastExprClass:
1689 case Expr::CXXDynamicCastExprClass:
1690 case Expr::CXXReinterpretCastExprClass:
1691 case Expr::CXXConstCastExprClass:
1692 case Expr::CXXAddrspaceCastExprClass:
1693 case Expr::ObjCBridgedCastExprClass:
1694 return EmitCastLValue(cast<CastExpr>(E));
1695
1696 case Expr::MaterializeTemporaryExprClass:
1697 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
1698
1699 case Expr::CoawaitExprClass:
1700 return EmitCoawaitLValue(cast<CoawaitExpr>(E));
1701 case Expr::CoyieldExprClass:
1702 return EmitCoyieldLValue(cast<CoyieldExpr>(E));
1703 case Expr::PackIndexingExprClass:
1704 return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());
1705 case Expr::HLSLOutArgExprClass:
1706 llvm_unreachable("cannot emit a HLSL out argument directly");
1707 }
1708}
1709
1710/// Given an object of the given canonical type, can we safely copy a
1711/// value out of it based on its initializer?
1713 assert(type.isCanonical());
1714 assert(!type->isReferenceType());
1715
1716 // Must be const-qualified but non-volatile.
1717 Qualifiers qs = type.getLocalQualifiers();
1718 if (!qs.hasConst() || qs.hasVolatile()) return false;
1719
1720 // Otherwise, all object types satisfy this except C++ classes with
1721 // mutable subobjects or non-trivial copy/destroy behavior.
1722 if (const auto *RT = dyn_cast<RecordType>(type))
1723 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1724 if (RD->hasMutableFields() || !RD->isTrivial())
1725 return false;
1726
1727 return true;
1728}
1729
1730/// Can we constant-emit a load of a reference to a variable of the
1731/// given type? This is different from predicates like
1732/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1733/// in situations that don't necessarily satisfy the language's rules
1734/// for this (e.g. C++'s ODR-use rules). For example, we want to able
1735/// to do this with const float variables even if those variables
1736/// aren't marked 'constexpr'.
1744 type = type.getCanonicalType();
1745 if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1746 if (isConstantEmittableObjectType(ref->getPointeeType()))
1748 return CEK_AsReferenceOnly;
1749 }
1751 return CEK_AsValueOnly;
1752 return CEK_None;
1753}
1754
1755/// Try to emit a reference to the given value without producing it as
1756/// an l-value. This is just an optimization, but it avoids us needing
1757/// to emit global copies of variables if they're named without triggering
1758/// a formal use in a context where we can't emit a direct reference to them,
1759/// for instance if a block or lambda or a member of a local class uses a
1760/// const int variable or constexpr variable from an enclosing function.
1761CodeGenFunction::ConstantEmission
1763 ValueDecl *value = refExpr->getDecl();
1764
1765 // The value needs to be an enum constant or a constant variable.
1767 if (isa<ParmVarDecl>(value)) {
1768 CEK = CEK_None;
1769 } else if (auto *var = dyn_cast<VarDecl>(value)) {
1770 CEK = checkVarTypeForConstantEmission(var->getType());
1771 } else if (isa<EnumConstantDecl>(value)) {
1772 CEK = CEK_AsValueOnly;
1773 } else {
1774 CEK = CEK_None;
1775 }
1776 if (CEK == CEK_None) return ConstantEmission();
1777
1778 Expr::EvalResult result;
1779 bool resultIsReference;
1780 QualType resultType;
1781
1782 // It's best to evaluate all the way as an r-value if that's permitted.
1783 if (CEK != CEK_AsReferenceOnly &&
1784 refExpr->EvaluateAsRValue(result, getContext())) {
1785 resultIsReference = false;
1786 resultType = refExpr->getType();
1787
1788 // Otherwise, try to evaluate as an l-value.
1789 } else if (CEK != CEK_AsValueOnly &&
1790 refExpr->EvaluateAsLValue(result, getContext())) {
1791 resultIsReference = true;
1792 resultType = value->getType();
1793
1794 // Failure.
1795 } else {
1796 return ConstantEmission();
1797 }
1798
1799 // In any case, if the initializer has side-effects, abandon ship.
1800 if (result.HasSideEffects)
1801 return ConstantEmission();
1802
1803 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1804 // referencing a global host variable by copy. In this case the lambda should
1805 // make a copy of the value of the global host variable. The DRE of the
1806 // captured reference variable cannot be emitted as load from the host
1807 // global variable as compile time constant, since the host variable is not
1808 // accessible on device. The DRE of the captured reference variable has to be
1809 // loaded from captures.
1810 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1812 auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1813 if (isLambdaMethod(MD) && MD->getOverloadedOperator() == OO_Call) {
1814 const APValue::LValueBase &base = result.Val.getLValueBase();
1815 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1816 if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1817 if (!VD->hasAttr<CUDADeviceAttr>()) {
1818 return ConstantEmission();
1819 }
1820 }
1821 }
1822 }
1823 }
1824
1825 // Emit as a constant.
1826 auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
1827 result.Val, resultType);
1828
1829 // Make sure we emit a debug reference to the global variable.
1830 // This should probably fire even for
1831 if (isa<VarDecl>(value)) {
1832 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
1833 EmitDeclRefExprDbgValue(refExpr, result.Val);
1834 } else {
1835 assert(isa<EnumConstantDecl>(value));
1836 EmitDeclRefExprDbgValue(refExpr, result.Val);
1837 }
1838
1839 // If we emitted a reference constant, we need to dereference that.
1840 if (resultIsReference)
1842
1844}
1845
1847 const MemberExpr *ME) {
1848 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1849 // Try to emit static variable member expressions as DREs.
1850 return DeclRefExpr::Create(
1852 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1853 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
1854 }
1855 return nullptr;
1856}
1857
1858CodeGenFunction::ConstantEmission
1861 return tryEmitAsConstant(DRE);
1862 return ConstantEmission();
1863}
1864
1866 const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1867 assert(Constant && "not a constant");
1868 if (Constant.isReference())
1869 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1870 E->getExprLoc())
1871 .getScalarVal();
1872 return Constant.getValue();
1873}
1874
1875llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
1877 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
1878 lvalue.getType(), Loc, lvalue.getBaseInfo(),
1879 lvalue.getTBAAInfo(), lvalue.isNontemporal());
1880}
1881
1883 if (Ty->isBooleanType())
1884 return true;
1885
1886 if (const EnumType *ET = Ty->getAs<EnumType>())
1887 return ET->getDecl()->getIntegerType()->isBooleanType();
1888
1889 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1890 return hasBooleanRepresentation(AT->getValueType());
1891
1892 return false;
1893}
1894
1896 llvm::APInt &Min, llvm::APInt &End,
1897 bool StrictEnums, bool IsBool) {
1898 const EnumType *ET = Ty->getAs<EnumType>();
1899 bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
1900 ET && !ET->getDecl()->isFixed();
1901 if (!IsBool && !IsRegularCPlusPlusEnum)
1902 return false;
1903
1904 if (IsBool) {
1905 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1906 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1907 } else {
1908 const EnumDecl *ED = ET->getDecl();
1909 ED->getValueRange(End, Min);
1910 }
1911 return true;
1912}
1913
1914llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1915 llvm::APInt Min, End;
1916 if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
1918 return nullptr;
1919
1920 llvm::MDBuilder MDHelper(getLLVMContext());
1921 return MDHelper.createRange(Min, End);
1922}
1923
1926 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
1927 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
1928 if (!HasBoolCheck && !HasEnumCheck)
1929 return false;
1930
1931 bool IsBool = hasBooleanRepresentation(Ty) ||
1933 bool NeedsBoolCheck = HasBoolCheck && IsBool;
1934 bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
1935 if (!NeedsBoolCheck && !NeedsEnumCheck)
1936 return false;
1937
1938 // Single-bit booleans don't need to be checked. Special-case this to avoid
1939 // a bit width mismatch when handling bitfield values. This is handled by
1940 // EmitFromMemory for the non-bitfield case.
1941 if (IsBool &&
1942 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
1943 return false;
1944
1945 if (NeedsEnumCheck &&
1946 getContext().isTypeIgnoredBySanitizer(SanitizerKind::Enum, Ty))
1947 return false;
1948
1949 llvm::APInt Min, End;
1950 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
1951 return true;
1952
1953 auto &Ctx = getLLVMContext();
1954 SanitizerScope SanScope(this);
1955 llvm::Value *Check;
1956 --End;
1957 if (!Min) {
1958 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
1959 } else {
1960 llvm::Value *Upper =
1961 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
1962 llvm::Value *Lower =
1963 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
1964 Check = Builder.CreateAnd(Upper, Lower);
1965 }
1966 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
1969 NeedsEnumCheck ? SanitizerKind::SO_Enum : SanitizerKind::SO_Bool;
1970 EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
1971 StaticArgs, EmitCheckValue(Value));
1972 return true;
1973}
1974
1975llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
1976 QualType Ty,
1978 LValueBaseInfo BaseInfo,
1979 TBAAAccessInfo TBAAInfo,
1980 bool isNontemporal) {
1981 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
1982 if (GV->isThreadLocal())
1983 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
1985
1986 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
1987 // Boolean vectors use `iN` as storage type.
1988 if (ClangVecTy->isExtVectorBoolType()) {
1989 llvm::Type *ValTy = ConvertType(Ty);
1990 unsigned ValNumElems =
1991 cast<llvm::FixedVectorType>(ValTy)->getNumElements();
1992 // Load the `iP` storage object (P is the padded vector size).
1993 auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
1994 const auto *RawIntTy = RawIntV->getType();
1995 assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
1996 // Bitcast iP --> <P x i1>.
1997 auto *PaddedVecTy = llvm::FixedVectorType::get(
1998 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
1999 llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
2000 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2001 V = emitBoolVecConversion(V, ValNumElems, "extractvec");
2002
2003 return EmitFromMemory(V, Ty);
2004 }
2005
2006 // Handles vectors of sizes that are likely to be expanded to a larger size
2007 // to optimize performance.
2008 auto *VTy = cast<llvm::FixedVectorType>(Addr.getElementType());
2009 auto *NewVecTy =
2011
2012 if (VTy != NewVecTy) {
2013 Address Cast = Addr.withElementType(NewVecTy);
2014 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVecN");
2015 unsigned OldNumElements = VTy->getNumElements();
2016 SmallVector<int, 16> Mask(OldNumElements);
2017 std::iota(Mask.begin(), Mask.end(), 0);
2018 V = Builder.CreateShuffleVector(V, Mask, "extractVec");
2019 return EmitFromMemory(V, Ty);
2020 }
2021 }
2022
2023 // Atomic operations have to be done on integral types.
2024 LValue AtomicLValue =
2025 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2026 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
2027 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
2028 }
2029
2030 Addr =
2032
2033 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
2034 if (isNontemporal) {
2035 llvm::MDNode *Node = llvm::MDNode::get(
2036 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2037 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2038 }
2039
2040 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
2041
2042 if (EmitScalarRangeCheck(Load, Ty, Loc)) {
2043 // In order to prevent the optimizer from throwing away the check, don't
2044 // attach range metadata to the load.
2045 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
2046 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
2047 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
2048 Load->setMetadata(llvm::LLVMContext::MD_noundef,
2049 llvm::MDNode::get(getLLVMContext(), {}));
2050 }
2051
2052 return EmitFromMemory(Load, Ty);
2053}
2054
2055/// Converts a scalar value from its primary IR type (as returned
2056/// by ConvertType) to its load/store type (as returned by
2057/// convertTypeForLoadStore).
2058llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
2059 if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
2060 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2062 return Builder.CreateIntCast(Value, StoreTy, Signed, "storedv");
2063 }
2064
2065 if (Ty->isExtVectorBoolType()) {
2066 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2067 // Expand to the memory bit width.
2068 unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits();
2069 // <N x i1> --> <P x i1>.
2070 Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
2071 // <P x i1> --> iP.
2072 Value = Builder.CreateBitCast(Value, StoreTy);
2073 }
2074
2075 return Value;
2076}
2077
2078/// Converts a scalar value from its load/store type (as returned
2079/// by convertTypeForLoadStore) to its primary IR type (as returned
2080/// by ConvertType).
2081llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
2082 if (Ty->isExtVectorBoolType()) {
2083 const auto *RawIntTy = Value->getType();
2084 // Bitcast iP --> <P x i1>.
2085 auto *PaddedVecTy = llvm::FixedVectorType::get(
2086 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2087 auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
2088 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2089 llvm::Type *ValTy = ConvertType(Ty);
2090 unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2091 return emitBoolVecConversion(V, ValNumElems, "extractvec");
2092 }
2093
2094 if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
2095 llvm::Type *ResTy = ConvertType(Ty);
2096 return Builder.CreateTrunc(Value, ResTy, "loadedv");
2097 }
2098
2099 return Value;
2100}
2101
2102// Convert the pointer of \p Addr to a pointer to a vector (the value type of
2103// MatrixType), if it points to a array (the memory type of MatrixType).
2105 CodeGenFunction &CGF,
2106 bool IsVector = true) {
2107 auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
2108 if (ArrayTy && IsVector) {
2109 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
2110 ArrayTy->getNumElements());
2111
2112 return Addr.withElementType(VectorTy);
2113 }
2114 auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
2115 if (VectorTy && !IsVector) {
2116 auto *ArrayTy = llvm::ArrayType::get(
2117 VectorTy->getElementType(),
2118 cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
2119
2120 return Addr.withElementType(ArrayTy);
2121 }
2122
2123 return Addr;
2124}
2125
2126// Emit a store of a matrix LValue. This may require casting the original
2127// pointer to memory address (ArrayType) to a pointer to the value type
2128// (VectorType).
2129static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
2130 bool isInit, CodeGenFunction &CGF) {
2131 Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,
2132 value->getType()->isVectorTy());
2133 CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
2134 lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
2135 lvalue.isNontemporal());
2136}
2137
2138void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
2139 bool Volatile, QualType Ty,
2140 LValueBaseInfo BaseInfo,
2141 TBAAAccessInfo TBAAInfo,
2142 bool isInit, bool isNontemporal) {
2143 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2144 if (GV->isThreadLocal())
2145 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2147
2148 // Handles vectors of sizes that are likely to be expanded to a larger size
2149 // to optimize performance.
2150 llvm::Type *SrcTy = Value->getType();
2151 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2152 if (auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2153 auto *NewVecTy =
2155 if (!ClangVecTy->isExtVectorBoolType() && VecTy != NewVecTy) {
2156 SmallVector<int, 16> Mask(NewVecTy->getNumElements(), -1);
2157 std::iota(Mask.begin(), Mask.begin() + VecTy->getNumElements(), 0);
2158 Value = Builder.CreateShuffleVector(Value, Mask, "extractVec");
2159 SrcTy = NewVecTy;
2160 }
2161 if (Addr.getElementType() != SrcTy)
2162 Addr = Addr.withElementType(SrcTy);
2163 }
2164 }
2165
2166 Value = EmitToMemory(Value, Ty);
2167
2168 LValue AtomicLValue =
2169 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2170 if (Ty->isAtomicType() ||
2171 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
2172 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
2173 return;
2174 }
2175
2176 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
2177 if (isNontemporal) {
2178 llvm::MDNode *Node =
2179 llvm::MDNode::get(Store->getContext(),
2180 llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2181 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2182 }
2183
2184 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
2185}
2186
2187void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2188 bool isInit) {
2189 if (lvalue.getType()->isConstantMatrixType()) {
2190 EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
2191 return;
2192 }
2193
2194 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
2195 lvalue.getType(), lvalue.getBaseInfo(),
2196 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
2197}
2198
2199// Emit a load of a LValue of matrix type. This may require casting the pointer
2200// to memory address (ArrayType) to a pointer to the value type (VectorType).
2202 CodeGenFunction &CGF) {
2203 assert(LV.getType()->isConstantMatrixType());
2205 LV.setAddress(Addr);
2206 return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
2207}
2208
2211 QualType Ty = LV.getType();
2212 switch (getEvaluationKind(Ty)) {
2213 case TEK_Scalar:
2214 return EmitLoadOfLValue(LV, Loc);
2215 case TEK_Complex:
2217 case TEK_Aggregate:
2218 EmitAggFinalDestCopy(Ty, Slot, LV, EVK_NonRValue);
2219 return Slot.asRValue();
2220 }
2221 llvm_unreachable("bad evaluation kind");
2222}
2223
2224/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2225/// method emits the address of the lvalue, then loads the result as an rvalue,
2226/// returning the rvalue.
2228 if (LV.isObjCWeak()) {
2229 // load of a __weak object.
2230 Address AddrWeakObj = LV.getAddress();
2232 AddrWeakObj));
2233 }
2235 // In MRC mode, we do a load+autorelease.
2236 if (!getLangOpts().ObjCAutoRefCount) {
2238 }
2239
2240 // In ARC mode, we load retained and then consume the value.
2241 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
2242 Object = EmitObjCConsumeObject(LV.getType(), Object);
2243 return RValue::get(Object);
2244 }
2245
2246 if (LV.isSimple()) {
2247 assert(!LV.getType()->isFunctionType());
2248
2249 if (LV.getType()->isConstantMatrixType())
2250 return EmitLoadOfMatrixLValue(LV, Loc, *this);
2251
2252 // Everything needs a load.
2253 return RValue::get(EmitLoadOfScalar(LV, Loc));
2254 }
2255
2256 if (LV.isVectorElt()) {
2257 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
2258 LV.isVolatileQualified());
2259 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
2260 "vecext"));
2261 }
2262
2263 // If this is a reference to a subset of the elements of a vector, either
2264 // shuffle the input or extract/insert them as appropriate.
2265 if (LV.isExtVectorElt()) {
2267 }
2268
2269 // Global Register variables always invoke intrinsics
2270 if (LV.isGlobalReg())
2271 return EmitLoadOfGlobalRegLValue(LV);
2272
2273 if (LV.isMatrixElt()) {
2274 llvm::Value *Idx = LV.getMatrixIdx();
2275 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2276 const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
2277 llvm::MatrixBuilder MB(Builder);
2278 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2279 }
2280 llvm::LoadInst *Load =
2282 return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));
2283 }
2284
2285 assert(LV.isBitField() && "Unknown LValue type!");
2286 return EmitLoadOfBitfieldLValue(LV, Loc);
2287}
2288
2291 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2292
2293 // Get the output type.
2294 llvm::Type *ResLTy = ConvertType(LV.getType());
2295
2296 Address Ptr = LV.getBitFieldAddress();
2297 llvm::Value *Val =
2298 Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2299
2300 bool UseVolatile = LV.isVolatileQualified() &&
2301 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2302 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2303 const unsigned StorageSize =
2304 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2305 if (Info.IsSigned) {
2306 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2307 unsigned HighBits = StorageSize - Offset - Info.Size;
2308 if (HighBits)
2309 Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2310 if (Offset + HighBits)
2311 Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2312 } else {
2313 if (Offset)
2314 Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2315 if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2316 Val = Builder.CreateAnd(
2317 Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2318 }
2319 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2320 EmitScalarRangeCheck(Val, LV.getType(), Loc);
2321 return RValue::get(Val);
2322}
2323
2324// If this is a reference to a subset of the elements of a vector, create an
2325// appropriate shufflevector.
2327 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2328 LV.isVolatileQualified());
2329
2330 // HLSL allows treating scalars as one-element vectors. Converting the scalar
2331 // IR value to a vector here allows the rest of codegen to behave as normal.
2332 if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2333 llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
2334 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2335 Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
2336 }
2337
2338 const llvm::Constant *Elts = LV.getExtVectorElts();
2339
2340 // If the result of the expression is a non-vector type, we must be extracting
2341 // a single element. Just codegen as an extractelement.
2342 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2343 if (!ExprVT) {
2344 unsigned InIdx = getAccessedFieldNo(0, Elts);
2345 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2346 return RValue::get(Builder.CreateExtractElement(Vec, Elt));
2347 }
2348
2349 // Always use shuffle vector to try to retain the original program structure
2350 unsigned NumResultElts = ExprVT->getNumElements();
2351
2353 for (unsigned i = 0; i != NumResultElts; ++i)
2354 Mask.push_back(getAccessedFieldNo(i, Elts));
2355
2356 Vec = Builder.CreateShuffleVector(Vec, Mask);
2357 return RValue::get(Vec);
2358}
2359
2360/// Generates lvalue for partial ext_vector access.
2362 Address VectorAddress = LV.getExtVectorAddress();
2363 QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2364 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2365
2366 Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
2367
2368 const llvm::Constant *Elts = LV.getExtVectorElts();
2369 unsigned ix = getAccessedFieldNo(0, Elts);
2370
2371 Address VectorBasePtrPlusIx =
2372 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2373 "vector.elt");
2374
2375 return VectorBasePtrPlusIx;
2376}
2377
2378/// Load of global named registers are always calls to intrinsics.
2380 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2381 "Bad type for register variable");
2382 llvm::MDNode *RegName = cast<llvm::MDNode>(
2383 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2384
2385 // We accept integer and pointer types only
2386 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2387 llvm::Type *Ty = OrigTy;
2388 if (OrigTy->isPointerTy())
2389 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2390 llvm::Type *Types[] = { Ty };
2391
2392 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2393 llvm::Value *Call = Builder.CreateCall(
2394 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2395 if (OrigTy->isPointerTy())
2396 Call = Builder.CreateIntToPtr(Call, OrigTy);
2397 return RValue::get(Call);
2398}
2399
2400/// EmitStoreThroughLValue - Store the specified rvalue into the specified
2401/// lvalue, where both are guaranteed to the have the same type, and that type
2402/// is 'Ty'.
2404 bool isInit) {
2405 if (!Dst.isSimple()) {
2406 if (Dst.isVectorElt()) {
2407 // Read/modify/write the vector, inserting the new element.
2408 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2409 Dst.isVolatileQualified());
2410 auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2411 if (IRStoreTy) {
2412 auto *IRVecTy = llvm::FixedVectorType::get(
2413 Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2414 Vec = Builder.CreateBitCast(Vec, IRVecTy);
2415 // iN --> <N x i1>.
2416 }
2417 llvm::Value *SrcVal = Src.getScalarVal();
2418 // Allow inserting `<1 x T>` into an `<N x T>`. It can happen with scalar
2419 // types which are mapped to vector LLVM IR types (e.g. for implementing
2420 // an ABI).
2421 if (auto *EltTy = dyn_cast<llvm::FixedVectorType>(SrcVal->getType());
2422 EltTy && EltTy->getNumElements() == 1)
2423 SrcVal = Builder.CreateBitCast(SrcVal, EltTy->getElementType());
2424 Vec = Builder.CreateInsertElement(Vec, SrcVal, Dst.getVectorIdx(),
2425 "vecins");
2426 if (IRStoreTy) {
2427 // <N x i1> --> <iN>.
2428 Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2429 }
2431 Dst.isVolatileQualified());
2432 return;
2433 }
2434
2435 // If this is an update of extended vector elements, insert them as
2436 // appropriate.
2437 if (Dst.isExtVectorElt())
2439
2440 if (Dst.isGlobalReg())
2441 return EmitStoreThroughGlobalRegLValue(Src, Dst);
2442
2443 if (Dst.isMatrixElt()) {
2444 llvm::Value *Idx = Dst.getMatrixIdx();
2445 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2446 const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2447 llvm::MatrixBuilder MB(Builder);
2448 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2449 }
2450 llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2451 llvm::Value *Vec =
2452 Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");
2454 Dst.isVolatileQualified());
2455 return;
2456 }
2457
2458 assert(Dst.isBitField() && "Unknown LValue type");
2459 return EmitStoreThroughBitfieldLValue(Src, Dst);
2460 }
2461
2462 // There's special magic for assigning into an ARC-qualified l-value.
2463 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2464 switch (Lifetime) {
2466 llvm_unreachable("present but none");
2467
2469 // nothing special
2470 break;
2471
2473 if (isInit) {
2474 Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2475 break;
2476 }
2477 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2478 return;
2479
2481 if (isInit)
2482 // Initialize and then skip the primitive store.
2484 else
2486 /*ignore*/ true);
2487 return;
2488
2491 Src.getScalarVal()));
2492 // fall into the normal path
2493 break;
2494 }
2495 }
2496
2497 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2498 // load of a __weak object.
2499 Address LvalueDst = Dst.getAddress();
2500 llvm::Value *src = Src.getScalarVal();
2501 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2502 return;
2503 }
2504
2505 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2506 // load of a __strong object.
2507 Address LvalueDst = Dst.getAddress();
2508 llvm::Value *src = Src.getScalarVal();
2509 if (Dst.isObjCIvar()) {
2510 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2511 llvm::Type *ResultType = IntPtrTy;
2513 llvm::Value *RHS = dst.emitRawPointer(*this);
2514 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2515 llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
2516 ResultType, "sub.ptr.lhs.cast");
2517 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2518 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
2519 } else if (Dst.isGlobalObjCRef()) {
2520 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2521 Dst.isThreadLocalRef());
2522 }
2523 else
2524 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2525 return;
2526 }
2527
2528 assert(Src.isScalar() && "Can't emit an agg store with this method");
2529 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2530}
2531
2533 llvm::Value **Result) {
2534 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2535 llvm::Type *ResLTy = convertTypeForLoadStore(Dst.getType());
2536 Address Ptr = Dst.getBitFieldAddress();
2537
2538 // Get the source value, truncated to the width of the bit-field.
2539 llvm::Value *SrcVal = Src.getScalarVal();
2540
2541 // Cast the source to the storage type and shift it into place.
2542 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2543 /*isSigned=*/false);
2544 llvm::Value *MaskedVal = SrcVal;
2545
2546 const bool UseVolatile =
2547 CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
2548 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2549 const unsigned StorageSize =
2550 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2551 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2552 // See if there are other bits in the bitfield's storage we'll need to load
2553 // and mask together with source before storing.
2554 if (StorageSize != Info.Size) {
2555 assert(StorageSize > Info.Size && "Invalid bitfield size.");
2556 llvm::Value *Val =
2557 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2558
2559 // Mask the source value as needed.
2561 SrcVal = Builder.CreateAnd(
2562 SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
2563 "bf.value");
2564 MaskedVal = SrcVal;
2565 if (Offset)
2566 SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
2567
2568 // Mask out the original value.
2569 Val = Builder.CreateAnd(
2570 Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
2571 "bf.clear");
2572
2573 // Or together the unchanged values and the source value.
2574 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2575 } else {
2576 assert(Offset == 0);
2577 // According to the AACPS:
2578 // When a volatile bit-field is written, and its container does not overlap
2579 // with any non-bit-field member, its container must be read exactly once
2580 // and written exactly once using the access width appropriate to the type
2581 // of the container. The two accesses are not atomic.
2582 if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
2583 CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2584 Builder.CreateLoad(Ptr, true, "bf.load");
2585 }
2586
2587 // Write the new value back out.
2588 Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2589
2590 // Return the new value of the bit-field, if requested.
2591 if (Result) {
2592 llvm::Value *ResultVal = MaskedVal;
2593
2594 // Sign extend the value if needed.
2595 if (Info.IsSigned) {
2596 assert(Info.Size <= StorageSize);
2597 unsigned HighBits = StorageSize - Info.Size;
2598 if (HighBits) {
2599 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2600 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2601 }
2602 }
2603
2604 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2605 "bf.result.cast");
2606 *Result = EmitFromMemory(ResultVal, Dst.getType());
2607 }
2608}
2609
2611 LValue Dst) {
2612 // HLSL allows storing to scalar values through ExtVector component LValues.
2613 // To support this we need to handle the case where the destination address is
2614 // a scalar.
2615 Address DstAddr = Dst.getExtVectorAddress();
2616 if (!DstAddr.getElementType()->isVectorTy()) {
2617 assert(!Dst.getType()->isVectorType() &&
2618 "this should only occur for non-vector l-values");
2619 Builder.CreateStore(Src.getScalarVal(), DstAddr, Dst.isVolatileQualified());
2620 return;
2621 }
2622
2623 // This access turns into a read/modify/write of the vector. Load the input
2624 // value now.
2625 llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
2626 const llvm::Constant *Elts = Dst.getExtVectorElts();
2627
2628 llvm::Value *SrcVal = Src.getScalarVal();
2629
2630 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2631 unsigned NumSrcElts = VTy->getNumElements();
2632 unsigned NumDstElts =
2633 cast<llvm::FixedVectorType>(Vec->getType())->getNumElements();
2634 if (NumDstElts == NumSrcElts) {
2635 // Use shuffle vector is the src and destination are the same number of
2636 // elements and restore the vector mask since it is on the side it will be
2637 // stored.
2638 SmallVector<int, 4> Mask(NumDstElts);
2639 for (unsigned i = 0; i != NumSrcElts; ++i)
2640 Mask[getAccessedFieldNo(i, Elts)] = i;
2641
2642 Vec = Builder.CreateShuffleVector(SrcVal, Mask);
2643 } else if (NumDstElts > NumSrcElts) {
2644 // Extended the source vector to the same length and then shuffle it
2645 // into the destination.
2646 // FIXME: since we're shuffling with undef, can we just use the indices
2647 // into that? This could be simpler.
2648 SmallVector<int, 4> ExtMask;
2649 for (unsigned i = 0; i != NumSrcElts; ++i)
2650 ExtMask.push_back(i);
2651 ExtMask.resize(NumDstElts, -1);
2652 llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
2653 // build identity
2655 for (unsigned i = 0; i != NumDstElts; ++i)
2656 Mask.push_back(i);
2657
2658 // When the vector size is odd and .odd or .hi is used, the last element
2659 // of the Elts constant array will be one past the size of the vector.
2660 // Ignore the last element here, if it is greater than the mask size.
2661 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2662 NumSrcElts--;
2663
2664 // modify when what gets shuffled in
2665 for (unsigned i = 0; i != NumSrcElts; ++i)
2666 Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
2667 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
2668 } else {
2669 // We should never shorten the vector
2670 llvm_unreachable("unexpected shorten vector length");
2671 }
2672 } else {
2673 // If the Src is a scalar (not a vector), and the target is a vector it must
2674 // be updating one element.
2675 unsigned InIdx = getAccessedFieldNo(0, Elts);
2676 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2677 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2678 }
2679
2681 Dst.isVolatileQualified());
2682}
2683
2684/// Store of global named registers are always calls to intrinsics.
2686 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2687 "Bad type for register variable");
2688 llvm::MDNode *RegName = cast<llvm::MDNode>(
2689 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2690 assert(RegName && "Register LValue is not metadata");
2691
2692 // We accept integer and pointer types only
2693 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2694 llvm::Type *Ty = OrigTy;
2695 if (OrigTy->isPointerTy())
2696 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2697 llvm::Type *Types[] = { Ty };
2698
2699 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2700 llvm::Value *Value = Src.getScalarVal();
2701 if (OrigTy->isPointerTy())
2702 Value = Builder.CreatePtrToInt(Value, Ty);
2703 Builder.CreateCall(
2704 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2705}
2706
2707// setObjCGCLValueClass - sets class of the lvalue for the purpose of
2708// generating write-barries API. It is currently a global, ivar,
2709// or neither.
2710static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2711 LValue &LV,
2712 bool IsMemberAccess=false) {
2713 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2714 return;
2715
2716 if (isa<ObjCIvarRefExpr>(E)) {
2717 QualType ExpTy = E->getType();
2718 if (IsMemberAccess && ExpTy->isPointerType()) {
2719 // If ivar is a structure pointer, assigning to field of
2720 // this struct follows gcc's behavior and makes it a non-ivar
2721 // writer-barrier conservatively.
2722 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2723 if (ExpTy->isRecordType()) {
2724 LV.setObjCIvar(false);
2725 return;
2726 }
2727 }
2728 LV.setObjCIvar(true);
2729 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2730 LV.setBaseIvarExp(Exp->getBase());
2732 return;
2733 }
2734
2735 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2736 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2737 if (VD->hasGlobalStorage()) {
2738 LV.setGlobalObjCRef(true);
2739 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2740 }
2741 }
2743 return;
2744 }
2745
2746 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2747 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2748 return;
2749 }
2750
2751 if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2752 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2753 if (LV.isObjCIvar()) {
2754 // If cast is to a structure pointer, follow gcc's behavior and make it
2755 // a non-ivar write-barrier.
2756 QualType ExpTy = E->getType();
2757 if (ExpTy->isPointerType())
2758 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2759 if (ExpTy->isRecordType())
2760 LV.setObjCIvar(false);
2761 }
2762 return;
2763 }
2764
2765 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
2766 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
2767 return;
2768 }
2769
2770 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
2771 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2772 return;
2773 }
2774
2775 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
2776 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2777 return;
2778 }
2779
2780 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
2781 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2782 return;
2783 }
2784
2785 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
2786 setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
2787 if (LV.isObjCIvar() && !LV.isObjCArray())
2788 // Using array syntax to assigning to what an ivar points to is not
2789 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2790 LV.setObjCIvar(false);
2791 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
2792 // Using array syntax to assigning to what global points to is not
2793 // same as assigning to the global itself. {id *G;} G[i] = 0;
2794 LV.setGlobalObjCRef(false);
2795 return;
2796 }
2797
2798 if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
2799 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
2800 // We don't know if member is an 'ivar', but this flag is looked at
2801 // only in the context of LV.isObjCIvar().
2803 return;
2804 }
2805}
2806
2808 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
2809 llvm::Type *RealVarTy, SourceLocation Loc) {
2810 if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
2812 CGF, VD, Addr, Loc);
2813 else
2814 Addr =
2815 CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
2816
2817 Addr = Addr.withElementType(RealVarTy);
2818 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2819}
2820
2822 const VarDecl *VD, QualType T) {
2823 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2824 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2825 // Return an invalid address if variable is MT_To (or MT_Enter starting with
2826 // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
2827 // and MT_To (or MT_Enter) with unified memory, return a valid address.
2828 if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2829 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2831 return Address::invalid();
2832 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2833 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2834 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2836 "Expected link clause OR to clause with unified memory enabled.");
2837 QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
2839 return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
2840}
2841
2842Address
2844 LValueBaseInfo *PointeeBaseInfo,
2845 TBAAAccessInfo *PointeeTBAAInfo) {
2846 llvm::LoadInst *Load =
2847 Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());
2849 return makeNaturalAddressForPointer(Load, RefLVal.getType()->getPointeeType(),
2850 CharUnits(), /*ForPointeeType=*/true,
2851 PointeeBaseInfo, PointeeTBAAInfo);
2852}
2853
2855 LValueBaseInfo PointeeBaseInfo;
2856 TBAAAccessInfo PointeeTBAAInfo;
2857 Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
2858 &PointeeTBAAInfo);
2859 return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
2860 PointeeBaseInfo, PointeeTBAAInfo);
2861}
2862
2864 const PointerType *PtrTy,
2865 LValueBaseInfo *BaseInfo,
2866 TBAAAccessInfo *TBAAInfo) {
2867 llvm::Value *Addr = Builder.CreateLoad(Ptr);
2868 return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
2869 CharUnits(), /*ForPointeeType=*/true,
2870 BaseInfo, TBAAInfo);
2871}
2872
2874 const PointerType *PtrTy) {
2875 LValueBaseInfo BaseInfo;
2876 TBAAAccessInfo TBAAInfo;
2877 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
2878 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
2879}
2880
2882 const Expr *E, const VarDecl *VD) {
2883 QualType T = E->getType();
2884
2885 // If it's thread_local, emit a call to its wrapper function instead.
2886 if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2888 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
2889 // Check if the variable is marked as declare target with link clause in
2890 // device codegen.
2891 if (CGF.getLangOpts().OpenMPIsTargetDevice) {
2892 Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
2893 if (Addr.isValid())
2894 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2895 }
2896
2897 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
2898
2899 if (VD->getTLSKind() != VarDecl::TLS_None)
2900 V = CGF.Builder.CreateThreadLocalAddress(V);
2901
2902 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
2903 CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
2904 Address Addr(V, RealVarTy, Alignment);
2905 // Emit reference to the private copy of the variable if it is an OpenMP
2906 // threadprivate variable.
2907 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
2908 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2909 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
2910 E->getExprLoc());
2911 }
2912 LValue LV = VD->getType()->isReferenceType() ?
2913 CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
2916 setObjCGCLValueClass(CGF.getContext(), E, LV);
2917 return LV;
2918}
2919
2921 llvm::Type *Ty) {
2922 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2923 if (FD->hasAttr<WeakRefAttr>()) {
2925 return aliasee.getPointer();
2926 }
2927
2928 llvm::Constant *V = GetAddrOfFunction(GD, Ty);
2929 return V;
2930}
2931
2933 GlobalDecl GD) {
2934 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2935 llvm::Constant *V = CGF.CGM.getFunctionPointer(GD);
2936 CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
2937 return CGF.MakeAddrLValue(V, E->getType(), Alignment,
2939}
2940
2942 llvm::Value *ThisValue) {
2943
2944 return CGF.EmitLValueForLambdaField(FD, ThisValue);
2945}
2946
2947/// Named Registers are named metadata pointing to the register name
2948/// which will be read from/written to as an argument to the intrinsic
2949/// @llvm.read/write_register.
2950/// So far, only the name is being passed down, but other options such as
2951/// register type, allocation type or even optimization options could be
2952/// passed down via the metadata node.
2954 SmallString<64> Name("llvm.named.register.");
2955 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
2956 assert(Asm->getLabel().size() < 64-Name.size() &&
2957 "Register name too big");
2958 Name.append(Asm->getLabel());
2959 llvm::NamedMDNode *M =
2960 CGM.getModule().getOrInsertNamedMetadata(Name);
2961 if (M->getNumOperands() == 0) {
2962 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
2963 Asm->getLabel());
2964 llvm::Metadata *Ops[] = {Str};
2965 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
2966 }
2967
2968 CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
2969
2970 llvm::Value *Ptr =
2971 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
2972 return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
2973}
2974
2975/// Determine whether we can emit a reference to \p VD from the current
2976/// context, despite not necessarily having seen an odr-use of the variable in
2977/// this context.
2979 const DeclRefExpr *E,
2980 const VarDecl *VD) {
2981 // For a variable declared in an enclosing scope, do not emit a spurious
2982 // reference even if we have a capture, as that will emit an unwarranted
2983 // reference to our capture state, and will likely generate worse code than
2984 // emitting a local copy.
2985 if (E->refersToEnclosingVariableOrCapture())
2986 return false;
2987
2988 // For a local declaration declared in this function, we can always reference
2989 // it even if we don't have an odr-use.
2990 if (VD->hasLocalStorage()) {
2991 return VD->getDeclContext() ==
2992 dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
2993 }
2994
2995 // For a global declaration, we can emit a reference to it if we know
2996 // for sure that we are able to emit a definition of it.
2997 VD = VD->getDefinition(CGF.getContext());
2998 if (!VD)
2999 return false;
3000
3001 // Don't emit a spurious reference if it might be to a variable that only
3002 // exists on a different device / target.
3003 // FIXME: This is unnecessarily broad. Check whether this would actually be a
3004 // cross-target reference.
3005 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
3006 CGF.getLangOpts().OpenCL) {
3007 return false;
3008 }
3009
3010 // We can emit a spurious reference only if the linkage implies that we'll
3011 // be emitting a non-interposable symbol that will be retained until link
3012 // time.
3013 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
3014 case llvm::GlobalValue::ExternalLinkage:
3015 case llvm::GlobalValue::LinkOnceODRLinkage:
3016 case llvm::GlobalValue::WeakODRLinkage:
3017 case llvm::GlobalValue::InternalLinkage:
3018 case llvm::GlobalValue::PrivateLinkage:
3019 return true;
3020 default:
3021 return false;
3022 }
3023}
3024
3026 const NamedDecl *ND = E->getDecl();
3027 QualType T = E->getType();
3028
3029 assert(E->isNonOdrUse() != NOUR_Unevaluated &&
3030 "should not emit an unevaluated operand");
3031
3032 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3033 // Global Named registers access via intrinsics only
3034 if (VD->getStorageClass() == SC_Register &&
3035 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
3036 return EmitGlobalNamedRegister(VD, CGM);
3037
3038 // If this DeclRefExpr does not constitute an odr-use of the variable,
3039 // we're not permitted to emit a reference to it in general, and it might
3040 // not be captured if capture would be necessary for a use. Emit the
3041 // constant value directly instead.
3042 if (E->isNonOdrUse() == NOUR_Constant &&
3043 (VD->getType()->isReferenceType() ||
3044 !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
3045 VD->getAnyInitializer(VD);
3046 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
3047 E->getLocation(), *VD->evaluateValue(), VD->getType());
3048 assert(Val && "failed to emit constant expression");
3049
3050 Address Addr = Address::invalid();
3051 if (!VD->getType()->isReferenceType()) {
3052 // Spill the constant value to a global.
3053 Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
3054 getContext().getDeclAlign(VD));
3055 llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
3056 auto *PTy = llvm::PointerType::get(
3057 VarTy, getTypes().getTargetAddressSpace(VD->getType()));
3058 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
3059 } else {
3060 // Should we be using the alignment of the constant pointer we emitted?
3061 CharUnits Alignment =
3063 /* BaseInfo= */ nullptr,
3064 /* TBAAInfo= */ nullptr,
3065 /* forPointeeType= */ true);
3066 Addr = makeNaturalAddressForPointer(Val, T, Alignment);
3067 }
3068 return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
3069 }
3070
3071 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
3072
3073 // Check for captured variables.
3074 if (E->refersToEnclosingVariableOrCapture()) {
3075 VD = VD->getCanonicalDecl();
3076 if (auto *FD = LambdaCaptureFields.lookup(VD))
3077 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3078 if (CapturedStmtInfo) {
3079 auto I = LocalDeclMap.find(VD);
3080 if (I != LocalDeclMap.end()) {
3081 LValue CapLVal;
3082 if (VD->getType()->isReferenceType())
3083 CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
3085 else
3086 CapLVal = MakeAddrLValue(I->second, T);
3087 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3088 // in simd context.
3089 if (getLangOpts().OpenMP &&
3091 CapLVal.setNontemporal(/*Value=*/true);
3092 return CapLVal;
3093 }
3094 LValue CapLVal =
3097 Address LValueAddress = CapLVal.getAddress();
3098 CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
3099 LValueAddress.getElementType(),
3100 getContext().getDeclAlign(VD)),
3101 CapLVal.getType(),
3103 CapLVal.getTBAAInfo());
3104 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3105 // in simd context.
3106 if (getLangOpts().OpenMP &&
3108 CapLVal.setNontemporal(/*Value=*/true);
3109 return CapLVal;
3110 }
3111
3112 assert(isa<BlockDecl>(CurCodeDecl));
3113 Address addr = GetAddrOfBlockDecl(VD);
3114 return MakeAddrLValue(addr, T, AlignmentSource::Decl);
3115 }
3116 }
3117
3118 // FIXME: We should be able to assert this for FunctionDecls as well!
3119 // FIXME: We should be able to assert this for all DeclRefExprs, not just
3120 // those with a valid source location.
3121 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
3122 !E->getLocation().isValid()) &&
3123 "Should not use decl without marking it used!");
3124
3125 if (ND->hasAttr<WeakRefAttr>()) {
3126 const auto *VD = cast<ValueDecl>(ND);
3128 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
3129 }
3130
3131 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3132 // Check if this is a global variable.
3133 if (VD->hasLinkage() || VD->isStaticDataMember())
3134 return EmitGlobalVarDeclLValue(*this, E, VD);
3135
3136 Address addr = Address::invalid();
3137
3138 // The variable should generally be present in the local decl map.
3139 auto iter = LocalDeclMap.find(VD);
3140 if (iter != LocalDeclMap.end()) {
3141 addr = iter->second;
3142
3143 // Otherwise, it might be static local we haven't emitted yet for
3144 // some reason; most likely, because it's in an outer function.
3145 } else if (VD->isStaticLocal()) {
3146 llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
3148 addr = Address(
3149 var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
3150
3151 // No other cases for now.
3152 } else {
3153 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3154 }
3155
3156 // Handle threadlocal function locals.
3157 if (VD->getTLSKind() != VarDecl::TLS_None)
3158 addr = addr.withPointer(
3159 Builder.CreateThreadLocalAddress(addr.getBasePointer()),
3161
3162 // Check for OpenMP threadprivate variables.
3163 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
3164 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3166 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
3167 E->getExprLoc());
3168 }
3169
3170 // Drill into block byref variables.
3171 bool isBlockByref = VD->isEscapingByref();
3172 if (isBlockByref) {
3173 addr = emitBlockByrefAddress(addr, VD);
3174 }
3175
3176 // Drill into reference types.
3177 LValue LV = VD->getType()->isReferenceType() ?
3178 EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) :
3180
3181 bool isLocalStorage = VD->hasLocalStorage();
3182
3183 bool NonGCable = isLocalStorage &&
3184 !VD->getType()->isReferenceType() &&
3185 !isBlockByref;
3186 if (NonGCable) {
3188 LV.setNonGC(true);
3189 }
3190
3191 bool isImpreciseLifetime =
3192 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3193 if (isImpreciseLifetime)
3196 return LV;
3197 }
3198
3199 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
3200 return EmitFunctionDeclLValue(*this, E, FD);
3201
3202 // FIXME: While we're emitting a binding from an enclosing scope, all other
3203 // DeclRefExprs we see should be implicitly treated as if they also refer to
3204 // an enclosing scope.
3205 if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
3206 if (E->refersToEnclosingVariableOrCapture()) {
3207 auto *FD = LambdaCaptureFields.lookup(BD);
3208 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3209 }
3210 return EmitLValue(BD->getBinding());
3211 }
3212
3213 // We can form DeclRefExprs naming GUID declarations when reconstituting
3214 // non-type template parameters into expressions.
3215 if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
3218
3219 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
3220 auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3221 auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
3222
3223 if (AS != T.getAddressSpace()) {
3224 auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
3225 auto PtrTy = llvm::PointerType::get(CGM.getLLVMContext(), TargetAS);
3227 CGM, ATPO.getPointer(), AS, T.getAddressSpace(), PtrTy);
3228 ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3229 }
3230
3231 return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
3232 }
3233
3234 llvm_unreachable("Unhandled DeclRefExpr");
3235}
3236
3238 // __extension__ doesn't affect lvalue-ness.
3239 if (E->getOpcode() == UO_Extension)
3240 return EmitLValue(E->getSubExpr());
3241
3242 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
3243 switch (E->getOpcode()) {
3244 default: llvm_unreachable("Unknown unary operator lvalue!");
3245 case UO_Deref: {
3246 QualType T = E->getSubExpr()->getType()->getPointeeType();
3247 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3248
3249 LValueBaseInfo BaseInfo;
3250 TBAAAccessInfo TBAAInfo;
3251 Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,
3252 &TBAAInfo);
3253 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3255
3256 // We should not generate __weak write barrier on indirect reference
3257 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3258 // But, we continue to generate __strong write barrier on indirect write
3259 // into a pointer to object.
3260 if (getLangOpts().ObjC &&
3261 getLangOpts().getGC() != LangOptions::NonGC &&
3262 LV.isObjCWeak())
3264 return LV;
3265 }
3266 case UO_Real:
3267 case UO_Imag: {
3268 LValue LV = EmitLValue(E->getSubExpr());
3269 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3270
3271 // __real is valid on scalars. This is a faster way of testing that.
3272 // __imag can only produce an rvalue on scalars.
3273 if (E->getOpcode() == UO_Real &&
3274 !LV.getAddress().getElementType()->isStructTy()) {
3275 assert(E->getSubExpr()->getType()->isArithmeticType());
3276 return LV;
3277 }
3278
3279 QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3280
3281 Address Component =
3282 (E->getOpcode() == UO_Real
3284 : emitAddrOfImagComponent(LV.getAddress(), LV.getType()));
3285 LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
3287 ElemLV.getQuals().addQualifiers(LV.getQuals());
3288 return ElemLV;
3289 }
3290 case UO_PreInc:
3291 case UO_PreDec: {
3292 LValue LV = EmitLValue(E->getSubExpr());
3293 bool isInc = E->getOpcode() == UO_PreInc;
3294
3295 if (E->getType()->isAnyComplexType())
3296 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3297 else
3298 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3299 return LV;
3300 }
3301 }
3302}
3303
3307}
3308
3312}
3313
3315 auto SL = E->getFunctionName();
3316 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3317 StringRef FnName = CurFn->getName();
3318 if (FnName.starts_with("\01"))
3319 FnName = FnName.substr(1);
3320 StringRef NameItems[] = {
3321 PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
3322 std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3323 if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3324 std::string Name = std::string(SL->getString());
3325 if (!Name.empty()) {
3326 unsigned Discriminator =
3328 if (Discriminator)
3329 Name += "_" + Twine(Discriminator + 1).str();
3330 auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
3332 } else {
3333 auto C =
3334 CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());
3336 }
3337 }
3338 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3340}
3341
3342/// Emit a type description suitable for use by a runtime sanitizer library. The
3343/// format of a type descriptor is
3344///
3345/// \code
3346/// { i16 TypeKind, i16 TypeInfo }
3347/// \endcode
3348///
3349/// followed by an array of i8 containing the type name with extra information
3350/// for BitInt. TypeKind is TK_Integer(0) for an integer, TK_Float(1) for a
3351/// floating point value, TK_BitInt(2) for BitInt and TK_Unknown(0xFFFF) for
3352/// anything else.
3354 // Only emit each type's descriptor once.
3355 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3356 return C;
3357
3358 uint16_t TypeKind = TK_Unknown;
3359 uint16_t TypeInfo = 0;
3360 bool IsBitInt = false;
3361
3362 if (T->isIntegerType()) {
3363 TypeKind = TK_Integer;
3364 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3365 (T->isSignedIntegerType() ? 1 : 0);
3366 // Follow suggestion from discussion of issue 64100.
3367 // So we can write the exact amount of bits in TypeName after '\0'
3368 // making it <diagnostic-like type name>.'\0'.<32-bit width>.
3369 if (T->isSignedIntegerType() && T->getAs<BitIntType>()) {
3370 // Do a sanity checks as we are using 32-bit type to store bit length.
3371 assert(getContext().getTypeSize(T) > 0 &&
3372 " non positive amount of bits in __BitInt type");
3373 assert(getContext().getTypeSize(T) <= 0xFFFFFFFF &&
3374 " too many bits in __BitInt type");
3375
3376 // Redefine TypeKind with the actual __BitInt type if we have signed
3377 // BitInt.
3378 TypeKind = TK_BitInt;
3379 IsBitInt = true;
3380 }
3381 } else if (T->isFloatingType()) {
3382 TypeKind = TK_Float;
3384 }
3385
3386 // Format the type name as if for a diagnostic, including quotes and
3387 // optionally an 'aka'.
3388 SmallString<32> Buffer;
3390 (intptr_t)T.getAsOpaquePtr(), StringRef(),
3391 StringRef(), {}, Buffer, {});
3392
3393 if (IsBitInt) {
3394 // The Structure is: 0 to end the string, 32 bit unsigned integer in target
3395 // endianness, zero.
3396 char S[6] = {'\0', '\0', '\0', '\0', '\0', '\0'};
3397 const auto *EIT = T->castAs<BitIntType>();
3398 uint32_t Bits = EIT->getNumBits();
3399 llvm::support::endian::write32(S + 1, Bits,
3400 getTarget().isBigEndian()
3401 ? llvm::endianness::big
3402 : llvm::endianness::little);
3403 StringRef Str = StringRef(S, sizeof(S) / sizeof(decltype(S[0])));
3404 Buffer.append(Str);
3405 }
3406
3407 llvm::Constant *Components[] = {
3408 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3409 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3410 };
3411 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3412
3413 auto *GV = new llvm::GlobalVariable(
3414 CGM.getModule(), Descriptor->getType(),
3415 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3416 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3418
3419 // Remember the descriptor for this type.
3421
3422 return GV;
3423}
3424
3425llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3426 llvm::Type *TargetTy = IntPtrTy;
3427
3428 if (V->getType() == TargetTy)
3429 return V;
3430
3431 // Floating-point types which fit into intptr_t are bitcast to integers
3432 // and then passed directly (after zero-extension, if necessary).
3433 if (V->getType()->isFloatingPointTy()) {
3434 unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
3435 if (Bits <= TargetTy->getIntegerBitWidth())
3436 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
3437 Bits));
3438 }
3439
3440 // Integers which fit in intptr_t are zero-extended and passed directly.
3441 if (V->getType()->isIntegerTy() &&
3442 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3443 return Builder.CreateZExt(V, TargetTy);
3444
3445 // Pointers are passed directly, everything else is passed by address.
3446 if (!V->getType()->isPointerTy()) {
3447 RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
3448 Builder.CreateStore(V, Ptr);
3449 V = Ptr.getPointer();
3450 }
3451 return Builder.CreatePtrToInt(V, TargetTy);
3452}
3453
3454/// Emit a representation of a SourceLocation for passing to a handler
3455/// in a sanitizer runtime library. The format for this data is:
3456/// \code
3457/// struct SourceLocation {
3458/// const char *Filename;
3459/// int32_t Line, Column;
3460/// };
3461/// \endcode
3462/// For an invalid SourceLocation, the Filename pointer is null.
3464 llvm::Constant *Filename;
3465 int Line, Column;
3466
3468 if (PLoc.isValid()) {
3469 StringRef FilenameString = PLoc.getFilename();
3470
3471 int PathComponentsToStrip =
3472 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3473 if (PathComponentsToStrip < 0) {
3474 assert(PathComponentsToStrip != INT_MIN);
3475 int PathComponentsToKeep = -PathComponentsToStrip;
3476 auto I = llvm::sys::path::rbegin(FilenameString);
3477 auto E = llvm::sys::path::rend(FilenameString);
3478 while (I != E && --PathComponentsToKeep)
3479 ++I;
3480
3481 FilenameString = FilenameString.substr(I - E);
3482 } else if (PathComponentsToStrip > 0) {
3483 auto I = llvm::sys::path::begin(FilenameString);
3484 auto E = llvm::sys::path::end(FilenameString);
3485 while (I != E && PathComponentsToStrip--)
3486 ++I;
3487
3488 if (I != E)
3489 FilenameString =
3490 FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
3491 else
3492 FilenameString = llvm::sys::path::filename(FilenameString);
3493 }
3494
3495 auto FilenameGV =
3496 CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
3498 cast<llvm::GlobalVariable>(
3499 FilenameGV.getPointer()->stripPointerCasts()));
3500 Filename = FilenameGV.getPointer();
3501 Line = PLoc.getLine();
3502 Column = PLoc.getColumn();
3503 } else {
3504 Filename = llvm::Constant::getNullValue(Int8PtrTy);
3505 Line = Column = 0;
3506 }
3507
3508 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
3509 Builder.getInt32(Column)};
3510
3511 return llvm::ConstantStruct::getAnon(Data);
3512}
3513
3514namespace {
3515/// Specify under what conditions this check can be recovered
3516enum class CheckRecoverableKind {
3517 /// Always terminate program execution if this check fails.
3519 /// Check supports recovering, runtime has both fatal (noreturn) and
3520 /// non-fatal handlers for this check.
3521 Recoverable,
3522 /// Runtime conditionally aborts, always need to support recovery.
3524};
3525}
3526
3527static CheckRecoverableKind
3529 if (Ordinal == SanitizerKind::SO_Vptr)
3530 return CheckRecoverableKind::AlwaysRecoverable;
3531 else if (Ordinal == SanitizerKind::SO_Return ||
3532 Ordinal == SanitizerKind::SO_Unreachable)
3533 return CheckRecoverableKind::Unrecoverable;
3534 else
3535 return CheckRecoverableKind::Recoverable;
3536}
3537
3538namespace {
3539struct SanitizerHandlerInfo {
3540 char const *const Name;
3541 unsigned Version;
3542};
3543}
3544
3545const SanitizerHandlerInfo SanitizerHandlers[] = {
3546#define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3548#undef SANITIZER_CHECK
3549};
3550
3552 llvm::FunctionType *FnType,
3554 SanitizerHandler CheckHandler,
3555 CheckRecoverableKind RecoverKind, bool IsFatal,
3556 llvm::BasicBlock *ContBB, bool NoMerge) {
3557 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3558 std::optional<ApplyDebugLocation> DL;
3559 if (!CGF.Builder.getCurrentDebugLocation()) {
3560 // Ensure that the call has at least an artificial debug location.
3561 DL.emplace(CGF, SourceLocation());
3562 }
3563 bool NeedsAbortSuffix =
3564 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
3565 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
3566 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
3567 const StringRef CheckName = CheckInfo.Name;
3568 std::string FnName = "__ubsan_handle_" + CheckName.str();
3569 if (CheckInfo.Version && !MinimalRuntime)
3570 FnName += "_v" + llvm::utostr(CheckInfo.Version);
3571 if (MinimalRuntime)
3572 FnName += "_minimal";
3573 if (NeedsAbortSuffix)
3574 FnName += "_abort";
3575 bool MayReturn =
3576 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
3577
3578 llvm::AttrBuilder B(CGF.getLLVMContext());
3579 if (!MayReturn) {
3580 B.addAttribute(llvm::Attribute::NoReturn)
3581 .addAttribute(llvm::Attribute::NoUnwind);
3582 }
3583 B.addUWTableAttr(llvm::UWTableKind::Default);
3584
3585 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
3586 FnType, FnName,
3587 llvm::AttributeList::get(CGF.getLLVMContext(),
3588 llvm::AttributeList::FunctionIndex, B),
3589 /*Local=*/true);
3590 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
3591 NoMerge = NoMerge || !CGF.CGM.getCodeGenOpts().OptimizationLevel ||
3592 (CGF.CurCodeDecl && CGF.CurCodeDecl->hasAttr<OptimizeNoneAttr>());
3593 if (NoMerge)
3594 HandlerCall->addFnAttr(llvm::Attribute::NoMerge);
3595 if (!MayReturn) {
3596 HandlerCall->setDoesNotReturn();
3597 CGF.Builder.CreateUnreachable();
3598 } else {
3599 CGF.Builder.CreateBr(ContBB);
3600 }
3601}
3602
3604 ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> Checked,
3605 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
3606 ArrayRef<llvm::Value *> DynamicArgs) {
3607 assert(IsSanitizerScope);
3608 assert(Checked.size() > 0);
3609 assert(CheckHandler >= 0 &&
3610 size_t(CheckHandler) < std::size(SanitizerHandlers));
3611 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
3612
3613 llvm::Value *FatalCond = nullptr;
3614 llvm::Value *RecoverableCond = nullptr;
3615 llvm::Value *TrapCond = nullptr;
3616 bool NoMerge = false;
3617 // Expand checks into:
3618 // (Check1 || !allow_ubsan_check) && (Check2 || !allow_ubsan_check) ...
3619 // We need separate allow_ubsan_check intrinsics because they have separately
3620 // specified cutoffs.
3621 // This expression looks expensive but will be simplified after
3622 // LowerAllowCheckPass.
3623 for (auto &[Check, Ord] : Checked) {
3624 llvm::Value *GuardedCheck = Check;
3627 llvm::Value *Allow = Builder.CreateCall(
3628 CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
3629 llvm::ConstantInt::get(CGM.Int8Ty, Ord));
3630 GuardedCheck = Builder.CreateOr(Check, Builder.CreateNot(Allow));
3631 }
3632
3633 // -fsanitize-trap= overrides -fsanitize-recover=.
3634 llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(Ord) ? TrapCond
3636 ? RecoverableCond
3637 : FatalCond;
3638 Cond = Cond ? Builder.CreateAnd(Cond, GuardedCheck) : GuardedCheck;
3639
3641 NoMerge = true;
3642 }
3643
3644 if (TrapCond)
3645 EmitTrapCheck(TrapCond, CheckHandler, NoMerge);
3646 if (!FatalCond && !RecoverableCond)
3647 return;
3648
3649 llvm::Value *JointCond;
3650 if (FatalCond && RecoverableCond)
3651 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
3652 else
3653 JointCond = FatalCond ? FatalCond : RecoverableCond;
3654 assert(JointCond);
3655
3656 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
3657 assert(SanOpts.has(Checked[0].second));
3658#ifndef NDEBUG
3659 for (int i = 1, n = Checked.size(); i < n; ++i) {
3660 assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
3661 "All recoverable kinds in a single check must be same!");
3662 assert(SanOpts.has(Checked[i].second));
3663 }
3664#endif
3665
3666 llvm::BasicBlock *Cont = createBasicBlock("cont");
3667 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
3668 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
3669 // Give hint that we very much don't expect to execute the handler
3670 llvm::MDBuilder MDHelper(getLLVMContext());
3671 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3672 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
3673 EmitBlock(Handlers);
3674
3675 // Handler functions take an i8* pointing to the (handler-specific) static
3676 // information block, followed by a sequence of intptr_t arguments
3677 // representing operand values.
3680 if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
3681 Args.reserve(DynamicArgs.size() + 1);
3682 ArgTypes.reserve(DynamicArgs.size() + 1);
3683
3684 // Emit handler arguments and create handler function type.
3685 if (!StaticArgs.empty()) {
3686 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3687 auto *InfoPtr = new llvm::GlobalVariable(
3688 CGM.getModule(), Info->getType(), false,
3689 llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr,
3690 llvm::GlobalVariable::NotThreadLocal,
3691 CGM.getDataLayout().getDefaultGlobalsAddressSpace());
3692 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3694 Args.push_back(InfoPtr);
3695 ArgTypes.push_back(Args.back()->getType());
3696 }
3697
3698 for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
3699 Args.push_back(EmitCheckValue(DynamicArgs[i]));
3700 ArgTypes.push_back(IntPtrTy);
3701 }
3702 }
3703
3704 llvm::FunctionType *FnType =
3705 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3706
3707 if (!FatalCond || !RecoverableCond) {
3708 // Simple case: we need to generate a single handler call, either
3709 // fatal, or non-fatal.
3710 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3711 (FatalCond != nullptr), Cont, NoMerge);
3712 } else {
3713 // Emit two handler calls: first one for set of unrecoverable checks,
3714 // another one for recoverable.
3715 llvm::BasicBlock *NonFatalHandlerBB =
3716 createBasicBlock("non_fatal." + CheckName);
3717 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
3718 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
3719 EmitBlock(FatalHandlerBB);
3720 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
3721 NonFatalHandlerBB, NoMerge);
3722 EmitBlock(NonFatalHandlerBB);
3723 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
3724 Cont, NoMerge);
3725 }
3726
3727 EmitBlock(Cont);
3728}
3729
3731 SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond,
3732 llvm::ConstantInt *TypeId, llvm::Value *Ptr,
3733 ArrayRef<llvm::Constant *> StaticArgs) {
3734 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
3735
3736 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
3737 llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
3738
3739 llvm::MDBuilder MDHelper(getLLVMContext());
3740 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3741 BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
3742
3743 EmitBlock(CheckBB);
3744
3745 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Ordinal);
3746
3747 llvm::CallInst *CheckCall;
3748 llvm::FunctionCallee SlowPathFn;
3749 if (WithDiag) {
3750 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3751 auto *InfoPtr =
3752 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3753 llvm::GlobalVariable::PrivateLinkage, Info);
3754 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3756
3757 SlowPathFn = CGM.getModule().getOrInsertFunction(
3758 "__cfi_slowpath_diag",
3759 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
3760 false));
3761 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
3762 } else {
3763 SlowPathFn = CGM.getModule().getOrInsertFunction(
3764 "__cfi_slowpath",
3765 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
3766 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
3767 }
3768
3770 cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
3771 CheckCall->setDoesNotThrow();
3772
3773 EmitBlock(Cont);
3774}
3775
3776// Emit a stub for __cfi_check function so that the linker knows about this
3777// symbol in LTO mode.
3779 llvm::Module *M = &CGM.getModule();
3780 ASTContext &C = getContext();
3781 QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
3782
3784 ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
3785 ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
3786 ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
3788 FnArgs.push_back(&ArgCallsiteTypeId);
3789 FnArgs.push_back(&ArgAddr);
3790 FnArgs.push_back(&ArgCFICheckFailData);
3791 const CGFunctionInfo &FI =
3793
3794 llvm::Function *F = llvm::Function::Create(
3795 llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
3796 llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
3797 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3799 F->setAlignment(llvm::Align(4096));
3800 CGM.setDSOLocal(F);
3801
3802 llvm::LLVMContext &Ctx = M->getContext();
3803 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
3804 // CrossDSOCFI pass is not executed if there is no executable code.
3805 SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
3806 llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
3807 llvm::ReturnInst::Create(Ctx, nullptr, BB);
3808}
3809
3810// This function is basically a switch over the CFI failure kind, which is
3811// extracted from CFICheckFailData (1st function argument). Each case is either
3812// llvm.trap or a call to one of the two runtime handlers, based on
3813// -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
3814// failure kind) traps, but this should really never happen. CFICheckFailData
3815// can be nullptr if the calling module has -fsanitize-trap behavior for this
3816// check kind; in this case __cfi_check_fail traps as well.
3818 SanitizerScope SanScope(this);
3819 FunctionArgList Args;
3824 Args.push_back(&ArgData);
3825 Args.push_back(&ArgAddr);
3826
3827 const CGFunctionInfo &FI =
3829
3830 llvm::Function *F = llvm::Function::Create(
3831 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
3832 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
3833
3834 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3836 F->setVisibility(llvm::GlobalValue::HiddenVisibility);
3837
3838 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
3839 SourceLocation());
3840
3841 // This function is not affected by NoSanitizeList. This function does
3842 // not have a source location, but "src:*" would still apply. Revert any
3843 // changes to SanOpts made in StartFunction.
3845
3846 llvm::Value *Data =
3847 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
3848 CGM.getContext().VoidPtrTy, ArgData.getLocation());
3849 llvm::Value *Addr =
3850 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
3851 CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
3852
3853 // Data == nullptr means the calling module has trap behaviour for this check.
3854 llvm::Value *DataIsNotNullPtr =
3855 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
3856 EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail);
3857
3858 llvm::StructType *SourceLocationTy =
3859 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
3860 llvm::StructType *CfiCheckFailDataTy =
3861 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
3862
3863 llvm::Value *V = Builder.CreateConstGEP2_32(
3864 CfiCheckFailDataTy, Builder.CreatePointerCast(Data, UnqualPtrTy), 0, 0);
3865
3866 Address CheckKindAddr(V, Int8Ty, getIntAlign());
3867 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
3868
3869 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
3871 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
3872 llvm::Value *ValidVtable = Builder.CreateZExt(
3873 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
3874 {Addr, AllVtables}),
3875 IntPtrTy);
3876
3877 const std::pair<int, SanitizerKind::SanitizerOrdinal> CheckKinds[] = {
3878 {CFITCK_VCall, SanitizerKind::SO_CFIVCall},
3879 {CFITCK_NVCall, SanitizerKind::SO_CFINVCall},
3880 {CFITCK_DerivedCast, SanitizerKind::SO_CFIDerivedCast},
3881 {CFITCK_UnrelatedCast, SanitizerKind::SO_CFIUnrelatedCast},
3882 {CFITCK_ICall, SanitizerKind::SO_CFIICall}};
3883
3885 Checks;
3886 for (auto CheckKindOrdinalPair : CheckKinds) {
3887 int Kind = CheckKindOrdinalPair.first;
3888 SanitizerKind::SanitizerOrdinal Ordinal = CheckKindOrdinalPair.second;
3889 llvm::Value *Cond =
3890 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
3891 if (CGM.getLangOpts().Sanitize.has(Ordinal))
3892 EmitCheck(std::make_pair(Cond, Ordinal), SanitizerHandler::CFICheckFail,
3893 {}, {Data, Addr, ValidVtable});
3894 else
3895 EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail);
3896 }
3897
3899 // The only reference to this function will be created during LTO link.
3900 // Make sure it survives until then.
3901 CGM.addUsedGlobal(F);
3902}
3903
3905 if (SanOpts.has(SanitizerKind::Unreachable)) {
3906 SanitizerScope SanScope(this);
3907 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
3908 SanitizerKind::SO_Unreachable),
3909 SanitizerHandler::BuiltinUnreachable,
3911 }
3912 Builder.CreateUnreachable();
3913}
3914
3915void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
3916 SanitizerHandler CheckHandlerID,
3917 bool NoMerge) {
3918 llvm::BasicBlock *Cont = createBasicBlock("cont");
3919
3920 // If we're optimizing, collapse all calls to trap down to just one per
3921 // check-type per function to save on code size.
3922 if ((int)TrapBBs.size() <= CheckHandlerID)
3923 TrapBBs.resize(CheckHandlerID + 1);
3924
3925 llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
3926
3927 NoMerge = NoMerge || !CGM.getCodeGenOpts().OptimizationLevel ||
3928 (CurCodeDecl && CurCodeDecl->hasAttr<OptimizeNoneAttr>());
3929
3930 if (TrapBB && !NoMerge) {
3931 auto Call = TrapBB->begin();
3932 assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
3933
3934 Call->applyMergedLocation(Call->getDebugLoc(),
3935 Builder.getCurrentDebugLocation());
3936 Builder.CreateCondBr(Checked, Cont, TrapBB);
3937 } else {
3938 TrapBB = createBasicBlock("trap");
3939 Builder.CreateCondBr(Checked, Cont, TrapBB);
3940 EmitBlock(TrapBB);
3941
3942 llvm::CallInst *TrapCall =
3943 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
3944 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID));
3945
3946 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3947 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3949 TrapCall->addFnAttr(A);
3950 }
3951 if (NoMerge)
3952 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
3953 TrapCall->setDoesNotReturn();
3954 TrapCall->setDoesNotThrow();
3955 Builder.CreateUnreachable();
3956 }
3957
3958 EmitBlock(Cont);
3959}
3960
3961llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
3962 llvm::CallInst *TrapCall =
3963 Builder.CreateCall(CGM.getIntrinsic(IntrID));
3964
3965 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3966 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3968 TrapCall->addFnAttr(A);
3969 }
3970
3972 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
3973 return TrapCall;
3974}
3975
3977 LValueBaseInfo *BaseInfo,
3978 TBAAAccessInfo *TBAAInfo) {
3979 assert(E->getType()->isArrayType() &&
3980 "Array to pointer decay must have array source type!");
3981
3982 // Expressions of array type can't be bitfields or vector elements.
3983 LValue LV = EmitLValue(E);
3984 Address Addr = LV.getAddress();
3985
3986 // If the array type was an incomplete type, we need to make sure
3987 // the decay ends up being the right type.
3988 llvm::Type *NewTy = ConvertType(E->getType());
3989 Addr = Addr.withElementType(NewTy);
3990
3991 // Note that VLA pointers are always decayed, so we don't need to do
3992 // anything here.
3993 if (!E->getType()->isVariableArrayType()) {
3994 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3995 "Expected pointer to array");
3996 Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
3997 }
3998
3999 // The result of this decay conversion points to an array element within the
4000 // base lvalue. However, since TBAA currently does not support representing
4001 // accesses to elements of member arrays, we conservatively represent accesses
4002 // to the pointee object as if it had no any base lvalue specified.
4003 // TODO: Support TBAA for member arrays.
4005 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
4006 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
4007
4008 return Addr.withElementType(ConvertTypeForMem(EltType));
4009}
4010
4011/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
4012/// array to pointer, return the array subexpression.
4013static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
4014 // If this isn't just an array->pointer decay, bail out.
4015 const auto *CE = dyn_cast<CastExpr>(E);
4016 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
4017 return nullptr;
4018
4019 // If this is a decay from variable width array, bail out.
4020 const Expr *SubExpr = CE->getSubExpr();
4021 if (SubExpr->getType()->isVariableArrayType())
4022 return nullptr;
4023
4024 return SubExpr;
4025}
4026
4028 llvm::Type *elemType,
4029 llvm::Value *ptr,
4030 ArrayRef<llvm::Value*> indices,
4031 bool inbounds,
4032 bool signedIndices,
4033 SourceLocation loc,
4034 const llvm::Twine &name = "arrayidx") {
4035 if (inbounds) {
4036 return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
4038 name);
4039 } else {
4040 return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
4041 }
4042}
4043
4046 llvm::Type *elementType, bool inbounds,
4047 bool signedIndices, SourceLocation loc,
4048 CharUnits align,
4049 const llvm::Twine &name = "arrayidx") {
4050 if (inbounds) {
4051 return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
4053 align, name);
4054 } else {
4055 return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
4056 }
4057}
4058
4060 llvm::Value *idx,
4061 CharUnits eltSize) {
4062 // If we have a constant index, we can use the exact offset of the
4063 // element we're accessing.
4064 if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
4065 CharUnits offset = constantIdx->getZExtValue() * eltSize;
4066 return arrayAlign.alignmentAtOffset(offset);
4067
4068 // Otherwise, use the worst-case alignment for any element.
4069 } else {
4070 return arrayAlign.alignmentOfArrayElement(eltSize);
4071 }
4072}
4073
4075 const VariableArrayType *vla) {
4076 QualType eltType;
4077 do {
4078 eltType = vla->getElementType();
4079 } while ((vla = ctx.getAsVariableArrayType(eltType)));
4080 return eltType;
4081}
4082
4084 return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
4085}
4086
4087static bool hasBPFPreserveStaticOffset(const Expr *E) {
4088 if (!E)
4089 return false;
4090 QualType PointeeType = E->getType()->getPointeeType();
4091 if (PointeeType.isNull())
4092 return false;
4093 if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
4094 return hasBPFPreserveStaticOffset(BaseDecl);
4095 return false;
4096}
4097
4098// Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
4100 Address &Addr) {
4101 if (!CGF.getTarget().getTriple().isBPF())
4102 return Addr;
4103
4104 llvm::Function *Fn =
4105 CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
4106 llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
4107 return Address(Call, Addr.getElementType(), Addr.getAlignment());
4108}
4109
4110/// Given an array base, check whether its member access belongs to a record
4111/// with preserve_access_index attribute or not.
4112static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
4113 if (!ArrayBase || !CGF.getDebugInfo())
4114 return false;
4115
4116 // Only support base as either a MemberExpr or DeclRefExpr.
4117 // DeclRefExpr to cover cases like:
4118 // struct s { int a; int b[10]; };
4119 // struct s *p;
4120 // p[1].a
4121 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4122 // p->b[5] is a MemberExpr example.
4123 const Expr *E = ArrayBase->IgnoreImpCasts();
4124 if (const auto *ME = dyn_cast<MemberExpr>(E))
4125 return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4126
4127 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
4128 const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
4129 if (!VarDef)
4130 return false;
4131
4132 const auto *PtrT = VarDef->getType()->getAs<PointerType>();
4133 if (!PtrT)
4134 return false;
4135
4136 const auto *PointeeT = PtrT->getPointeeType()
4138 if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
4139 return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4140 return false;
4141 }
4142
4143 return false;
4144}
4145
4148 QualType eltType, bool inbounds,
4149 bool signedIndices, SourceLocation loc,
4150 QualType *arrayType = nullptr,
4151 const Expr *Base = nullptr,
4152 const llvm::Twine &name = "arrayidx") {
4153 // All the indices except that last must be zero.
4154#ifndef NDEBUG
4155 for (auto *idx : indices.drop_back())
4156 assert(isa<llvm::ConstantInt>(idx) &&
4157 cast<llvm::ConstantInt>(idx)->isZero());
4158#endif
4159
4160 // Determine the element size of the statically-sized base. This is
4161 // the thing that the indices are expressed in terms of.
4162 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
4163 eltType = getFixedSizeElementType(CGF.getContext(), vla);
4164 }
4165
4166 // We can use that to compute the best alignment of the element.
4167 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
4168 CharUnits eltAlign =
4169 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
4170
4172 addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
4173
4174 llvm::Value *eltPtr;
4175 auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
4176 if (!LastIndex ||
4178 addr = emitArraySubscriptGEP(CGF, addr, indices,
4179 CGF.ConvertTypeForMem(eltType), inbounds,
4180 signedIndices, loc, eltAlign, name);
4181 return addr;
4182 } else {
4183 // Remember the original array subscript for bpf target
4184 unsigned idx = LastIndex->getZExtValue();
4185 llvm::DIType *DbgInfo = nullptr;
4186 if (arrayType)
4187 DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
4188 eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
4189 addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
4190 idx, DbgInfo);
4191 }
4192
4193 return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
4194}
4195
4196/// The offset of a field from the beginning of the record.
4198 const FieldDecl *Field, int64_t &Offset) {
4199 ASTContext &Ctx = CGF.getContext();
4200 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
4201 unsigned FieldNo = 0;
4202
4203 for (const FieldDecl *FD : RD->fields()) {
4204 if (FD == Field) {
4205 Offset += Layout.getFieldOffset(FieldNo);
4206 return true;
4207 }
4208
4209 QualType Ty = FD->getType();
4210 if (Ty->isRecordType())
4211 if (getFieldOffsetInBits(CGF, Ty->getAsRecordDecl(), Field, Offset)) {
4212 Offset += Layout.getFieldOffset(FieldNo);
4213 return true;
4214 }
4215
4216 if (!RD->isUnion())
4217 ++FieldNo;
4218 }
4219
4220 return false;
4221}
4222
4223/// Returns the relative offset difference between \p FD1 and \p FD2.
4224/// \code
4225/// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4226/// \endcode
4227/// Both fields must be within the same struct.
4228static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
4229 const FieldDecl *FD1,
4230 const FieldDecl *FD2) {
4231 const RecordDecl *FD1OuterRec =
4233 const RecordDecl *FD2OuterRec =
4235
4236 if (FD1OuterRec != FD2OuterRec)
4237 // Fields must be within the same RecordDecl.
4238 return std::optional<int64_t>();
4239
4240 int64_t FD1Offset = 0;
4241 if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
4242 return std::optional<int64_t>();
4243
4244 int64_t FD2Offset = 0;
4245 if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
4246 return std::optional<int64_t>();
4247
4248 return std::make_optional<int64_t>(FD1Offset - FD2Offset);
4249}
4250
4252 bool Accessed) {
4253 // The index must always be an integer, which is not an aggregate. Emit it
4254 // in lexical order (this complexity is, sadly, required by C++17).
4255 llvm::Value *IdxPre =
4256 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
4257 bool SignedIndices = false;
4258 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
4259 auto *Idx = IdxPre;
4260 if (E->getLHS() != E->getIdx()) {
4261 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
4262 Idx = EmitScalarExpr(E->getIdx());
4263 }
4264
4265 QualType IdxTy = E->getIdx()->getType();
4266 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
4267 SignedIndices |= IdxSigned;
4268
4269 if (SanOpts.has(SanitizerKind::ArrayBounds))
4270 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
4271
4272 // Extend or truncate the index type to 32 or 64-bits.
4273 if (Promote && Idx->getType() != IntPtrTy)
4274 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
4275
4276 return Idx;
4277 };
4278 IdxPre = nullptr;
4279
4280 // If the base is a vector type, then we are forming a vector element lvalue
4281 // with this subscript.
4282 if (E->getBase()->getType()->isSubscriptableVectorType() &&
4283 !isa<ExtVectorElementExpr>(E->getBase())) {
4284 // Emit the vector as an lvalue to get its address.
4285 LValue LHS = EmitLValue(E->getBase());
4286 auto *Idx = EmitIdxAfterBase(/*Promote*/false);
4287 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
4288 return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
4289 LHS.getBaseInfo(), TBAAAccessInfo());
4290 }
4291
4292 // All the other cases basically behave like simple offsetting.
4293
4294 // Handle the extvector case we ignored above.
4295 if (isa<ExtVectorElementExpr>(E->getBase())) {
4296 LValue LV = EmitLValue(E->getBase());
4297 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4299
4300 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
4301 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
4302 SignedIndices, E->getExprLoc());
4303 return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
4304 CGM.getTBAAInfoForSubobject(LV, EltType));
4305 }
4306
4307 LValueBaseInfo EltBaseInfo;
4308 TBAAAccessInfo EltTBAAInfo;
4309 Address Addr = Address::invalid();
4310 if (const VariableArrayType *vla =
4311 getContext().getAsVariableArrayType(E->getType())) {
4312 // The base must be a pointer, which is not an aggregate. Emit
4313 // it. It needs to be emitted first in case it's what captures
4314 // the VLA bounds.
4315 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4316 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4317
4318 // The element count here is the total number of non-VLA elements.
4319 llvm::Value *numElements = getVLASize(vla).NumElts;
4320
4321 // Effectively, the multiply by the VLA size is part of the GEP.
4322 // GEP indexes are signed, and scaling an index isn't permitted to
4323 // signed-overflow, so we use the same semantics for our explicit
4324 // multiply. We suppress this if overflow is not undefined behavior.
4325 if (getLangOpts().PointerOverflowDefined) {
4326 Idx = Builder.CreateMul(Idx, numElements);
4327 } else {
4328 Idx = Builder.CreateNSWMul(Idx, numElements);
4329 }
4330
4331 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
4332 !getLangOpts().PointerOverflowDefined,
4333 SignedIndices, E->getExprLoc());
4334
4335 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
4336 // Indexing over an interface, as in "NSString *P; P[4];"
4337
4338 // Emit the base pointer.
4339 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4340 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4341
4342 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
4343 llvm::Value *InterfaceSizeVal =
4344 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
4345
4346 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
4347
4348 // We don't necessarily build correct LLVM struct types for ObjC
4349 // interfaces, so we can't rely on GEP to do this scaling
4350 // correctly, so we need to cast to i8*. FIXME: is this actually
4351 // true? A lot of other things in the fragile ABI would break...
4352 llvm::Type *OrigBaseElemTy = Addr.getElementType();
4353
4354 // Do the GEP.
4355 CharUnits EltAlign =
4356 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
4357 llvm::Value *EltPtr =
4358 emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
4359 ScaledIdx, false, SignedIndices, E->getExprLoc());
4360 Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
4361 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4362 // If this is A[i] where A is an array, the frontend will have decayed the
4363 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4364 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4365 // "gep x, i" here. Emit one "gep A, 0, i".
4366 assert(Array->getType()->isArrayType() &&
4367 "Array to pointer decay must have array source type!");
4368 LValue ArrayLV;
4369 // For simple multidimensional array indexing, set the 'accessed' flag for
4370 // better bounds-checking of the base expression.
4371 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4372 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4373 else
4374 ArrayLV = EmitLValue(Array);
4375 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4376
4377 if (SanOpts.has(SanitizerKind::ArrayBounds)) {
4378 // If the array being accessed has a "counted_by" attribute, generate
4379 // bounds checking code. The "count" field is at the top level of the
4380 // struct or in an anonymous struct, that's also at the top level. Future
4381 // expansions may allow the "count" to reside at any place in the struct,
4382 // but the value of "counted_by" will be a "simple" path to the count,
4383 // i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4384 // similar to emit the correct GEP.
4385 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
4386 getLangOpts().getStrictFlexArraysLevel();
4387
4388 if (const auto *ME = dyn_cast<MemberExpr>(Array);
4389 ME &&
4390 ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel) &&
4392 const FieldDecl *FAMDecl = cast<FieldDecl>(ME->getMemberDecl());
4393 if (const FieldDecl *CountFD = FAMDecl->findCountedByField()) {
4394 if (std::optional<int64_t> Diff =
4395 getOffsetDifferenceInBits(*this, CountFD, FAMDecl)) {
4396 CharUnits OffsetDiff = CGM.getContext().toCharUnitsFromBits(*Diff);
4397
4398 // Create a GEP with a byte offset between the FAM and count and
4399 // use that to load the count value.
4401 ArrayLV.getAddress(), Int8PtrTy, Int8Ty);
4402
4403 llvm::Type *CountTy = ConvertType(CountFD->getType());
4404 llvm::Value *Res = Builder.CreateInBoundsGEP(
4405 Int8Ty, Addr.emitRawPointer(*this),
4406 Builder.getInt32(OffsetDiff.getQuantity()), ".counted_by.gep");
4407 Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
4408 ".counted_by.load");
4409
4410 // Now emit the bounds checking.
4411 EmitBoundsCheckImpl(E, Res, Idx, E->getIdx()->getType(),
4412 Array->getType(), Accessed);
4413 }
4414 }
4415 }
4416 }
4417
4418 // Propagate the alignment from the array itself to the result.
4419 QualType arrayType = Array->getType();
4420 Addr = emitArraySubscriptGEP(
4421 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4422 E->getType(), !getLangOpts().PointerOverflowDefined, SignedIndices,
4423 E->getExprLoc(), &arrayType, E->getBase());
4424 EltBaseInfo = ArrayLV.getBaseInfo();
4425 EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
4426 } else {
4427 // The base must be a pointer; emit it with an estimate of its alignment.
4428 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4429 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4430 QualType ptrType = E->getBase()->getType();
4431 Addr = emitArraySubscriptGEP(
4432 *this, Addr, Idx, E->getType(), !getLangOpts().PointerOverflowDefined,
4433 SignedIndices, E->getExprLoc(), &ptrType, E->getBase());
4434 }
4435
4436 LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
4437
4438 if (getLangOpts().ObjC &&
4439 getLangOpts().getGC() != LangOptions::NonGC) {
4442 }
4443 return LV;
4444}
4445
4446llvm::Value *CodeGenFunction::EmitMatrixIndexExpr(const Expr *E) {
4447 llvm::Value *Idx = EmitScalarExpr(E);
4448 if (Idx->getType() == IntPtrTy)
4449 return Idx;
4450 bool IsSigned = E->getType()->isSignedIntegerOrEnumerationType();
4451 return Builder.CreateIntCast(Idx, IntPtrTy, IsSigned);
4452}
4453
4455 assert(
4456 !E->isIncomplete() &&
4457 "incomplete matrix subscript expressions should be rejected during Sema");
4458 LValue Base = EmitLValue(E->getBase());
4459
4460 // Extend or truncate the index type to 32 or 64-bits if needed.
4461 llvm::Value *RowIdx = EmitMatrixIndexExpr(E->getRowIdx());
4462 llvm::Value *ColIdx = EmitMatrixIndexExpr(E->getColumnIdx());
4463
4464 llvm::Value *NumRows = Builder.getIntN(
4465 RowIdx->getType()->getScalarSizeInBits(),
4466 E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
4467 llvm::Value *FinalIdx =
4468 Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
4469 return LValue::MakeMatrixElt(
4470 MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,
4471 E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
4472}
4473
4475 LValueBaseInfo &BaseInfo,
4476 TBAAAccessInfo &TBAAInfo,
4477 QualType BaseTy, QualType ElTy,
4478 bool IsLowerBound) {
4479 LValue BaseLVal;
4480 if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
4481 BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
4482 if (BaseTy->isArrayType()) {
4483 Address Addr = BaseLVal.getAddress();
4484 BaseInfo = BaseLVal.getBaseInfo();
4485
4486 // If the array type was an incomplete type, we need to make sure
4487 // the decay ends up being the right type.
4488 llvm::Type *NewTy = CGF.ConvertType(BaseTy);
4489 Addr = Addr.withElementType(NewTy);
4490
4491 // Note that VLA pointers are always decayed, so we don't need to do
4492 // anything here.
4493 if (!BaseTy->isVariableArrayType()) {
4494 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4495 "Expected pointer to array");
4496 Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4497 }
4498
4499 return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
4500 }
4501 LValueBaseInfo TypeBaseInfo;
4502 TBAAAccessInfo TypeTBAAInfo;
4503 CharUnits Align =
4504 CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
4505 BaseInfo.mergeForCast(TypeBaseInfo);
4506 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
4507 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),
4508 CGF.ConvertTypeForMem(ElTy), Align);
4509 }
4510 return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
4511}
4512
4514 bool IsLowerBound) {
4515
4516 assert(!E->isOpenACCArraySection() &&
4517 "OpenACC Array section codegen not implemented");
4518
4520 QualType ResultExprTy;
4521 if (auto *AT = getContext().getAsArrayType(BaseTy))
4522 ResultExprTy = AT->getElementType();
4523 else
4524 ResultExprTy = BaseTy->getPointeeType();
4525 llvm::Value *Idx = nullptr;
4526 if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
4527 // Requesting lower bound or upper bound, but without provided length and
4528 // without ':' symbol for the default length -> length = 1.
4529 // Idx = LowerBound ?: 0;
4530 if (auto *LowerBound = E->getLowerBound()) {
4531 Idx = Builder.CreateIntCast(
4532 EmitScalarExpr(LowerBound), IntPtrTy,
4533 LowerBound->getType()->hasSignedIntegerRepresentation());
4534 } else
4535 Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
4536 } else {
4537 // Try to emit length or lower bound as constant. If this is possible, 1
4538 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4539 // IR (LB + Len) - 1.
4540 auto &C = CGM.getContext();
4541 auto *Length = E->getLength();
4542 llvm::APSInt ConstLength;
4543 if (Length) {
4544 // Idx = LowerBound + Length - 1;
4545 if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
4546 ConstLength = CL->zextOrTrunc(PointerWidthInBits);
4547 Length = nullptr;
4548 }
4549 auto *LowerBound = E->getLowerBound();
4550 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
4551 if (LowerBound) {
4552 if (std::optional<llvm::APSInt> LB =
4553 LowerBound->getIntegerConstantExpr(C)) {
4554 ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
4555 LowerBound = nullptr;
4556 }
4557 }
4558 if (!Length)
4559 --ConstLength;
4560 else if (!LowerBound)
4561 --ConstLowerBound;
4562
4563 if (Length || LowerBound) {
4564 auto *LowerBoundVal =
4565 LowerBound
4566 ? Builder.CreateIntCast(
4567 EmitScalarExpr(LowerBound), IntPtrTy,
4568 LowerBound->getType()->hasSignedIntegerRepresentation())
4569 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
4570 auto *LengthVal =
4571 Length
4572 ? Builder.CreateIntCast(
4573 EmitScalarExpr(Length), IntPtrTy,
4574 Length->getType()->hasSignedIntegerRepresentation())
4575 : llvm::ConstantInt::get(IntPtrTy, ConstLength);
4576 Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
4577 /*HasNUW=*/false,
4578 !getLangOpts().PointerOverflowDefined);
4579 if (Length && LowerBound) {
4580 Idx = Builder.CreateSub(
4581 Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
4582 /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined);
4583 }
4584 } else
4585 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
4586 } else {
4587 // Idx = ArraySize - 1;
4588 QualType ArrayTy = BaseTy->isPointerType()
4589 ? E->getBase()->IgnoreParenImpCasts()->getType()
4590 : BaseTy;
4591 if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
4592 Length = VAT->getSizeExpr();
4593 if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
4594 ConstLength = *L;
4595 Length = nullptr;
4596 }
4597 } else {
4598 auto *CAT = C.getAsConstantArrayType(ArrayTy);
4599 assert(CAT && "unexpected type for array initializer");
4600 ConstLength = CAT->getSize();
4601 }
4602 if (Length) {
4603 auto *LengthVal = Builder.CreateIntCast(
4604 EmitScalarExpr(Length), IntPtrTy,
4605 Length->getType()->hasSignedIntegerRepresentation());
4606 Idx = Builder.CreateSub(
4607 LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
4608 /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined);
4609 } else {
4610 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
4611 --ConstLength;
4612 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
4613 }
4614 }
4615 }
4616 assert(Idx);
4617
4618 Address EltPtr = Address::invalid();
4619 LValueBaseInfo BaseInfo;
4620 TBAAAccessInfo TBAAInfo;
4621 if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
4622 // The base must be a pointer, which is not an aggregate. Emit
4623 // it. It needs to be emitted first in case it's what captures
4624 // the VLA bounds.
4625 Address Base =
4626 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
4627 BaseTy, VLA->getElementType(), IsLowerBound);
4628 // The element count here is the total number of non-VLA elements.
4629 llvm::Value *NumElements = getVLASize(VLA).NumElts;
4630
4631 // Effectively, the multiply by the VLA size is part of the GEP.
4632 // GEP indexes are signed, and scaling an index isn't permitted to
4633 // signed-overflow, so we use the same semantics for our explicit
4634 // multiply. We suppress this if overflow is not undefined behavior.
4635 if (getLangOpts().PointerOverflowDefined)
4636 Idx = Builder.CreateMul(Idx, NumElements);
4637 else
4638 Idx = Builder.CreateNSWMul(Idx, NumElements);
4639 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
4640 !getLangOpts().PointerOverflowDefined,
4641 /*signedIndices=*/false, E->getExprLoc());
4642 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4643 // If this is A[i] where A is an array, the frontend will have decayed the
4644 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4645 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4646 // "gep x, i" here. Emit one "gep A, 0, i".
4647 assert(Array->getType()->isArrayType() &&
4648 "Array to pointer decay must have array source type!");
4649 LValue ArrayLV;
4650 // For simple multidimensional array indexing, set the 'accessed' flag for
4651 // better bounds-checking of the base expression.
4652 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4653 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4654 else
4655 ArrayLV = EmitLValue(Array);
4656
4657 // Propagate the alignment from the array itself to the result.
4658 EltPtr = emitArraySubscriptGEP(
4659 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4660 ResultExprTy, !getLangOpts().PointerOverflowDefined,
4661 /*signedIndices=*/false, E->getExprLoc());
4662 BaseInfo = ArrayLV.getBaseInfo();
4663 TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
4664 } else {
4665 Address Base =
4666 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,
4667 ResultExprTy, IsLowerBound);
4668 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
4669 !getLangOpts().PointerOverflowDefined,
4670 /*signedIndices=*/false, E->getExprLoc());
4671 }
4672
4673 return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
4674}
4675
4678 // Emit the base vector as an l-value.
4679 LValue Base;
4680
4681 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4682 if (E->isArrow()) {
4683 // If it is a pointer to a vector, emit the address and form an lvalue with
4684 // it.
4685 LValueBaseInfo BaseInfo;
4686 TBAAAccessInfo TBAAInfo;
4687 Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
4688 const auto *PT = E->getBase()->getType()->castAs<PointerType>();
4689 Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
4690 Base.getQuals().removeObjCGCAttr();
4691 } else if (E->getBase()->isGLValue()) {
4692 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4693 // emit the base as an lvalue.
4694 assert(E->getBase()->getType()->isVectorType());
4695 Base = EmitLValue(E->getBase());
4696 } else {
4697 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4698 assert(E->getBase()->getType()->isVectorType() &&
4699 "Result must be a vector");
4700 llvm::Value *Vec = EmitScalarExpr(E->getBase());
4701
4702 // Store the vector to memory (because LValue wants an address).
4703 Address VecMem = CreateMemTemp(E->getBase()->getType());
4704 Builder.CreateStore(Vec, VecMem);
4705 Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
4707 }
4708
4709 QualType type =
4710 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
4711
4712 // Encode the element access list into a vector of unsigned indices.
4714 E->getEncodedElementAccess(Indices);
4715
4716 if (Base.isSimple()) {
4717 llvm::Constant *CV =
4718 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
4719 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
4720 Base.getBaseInfo(), TBAAAccessInfo());
4721 }
4722 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4723
4724 llvm::Constant *BaseElts = Base.getExtVectorElts();
4726
4727 for (unsigned i = 0, e = Indices.size(); i != e; ++i)
4728 CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
4729 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
4730 return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
4731 Base.getBaseInfo(), TBAAAccessInfo());
4732}
4733
4736 EmitIgnoredExpr(E->getBase());
4737 return EmitDeclRefLValue(DRE);
4738 }
4739
4740 Expr *BaseExpr = E->getBase();
4741 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
4742 LValue BaseLV;
4743 if (E->isArrow()) {
4744 LValueBaseInfo BaseInfo;
4745 TBAAAccessInfo TBAAInfo;
4746 Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
4747 QualType PtrTy = BaseExpr->getType()->getPointeeType();
4748 SanitizerSet SkippedChecks;
4749 bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
4750 if (IsBaseCXXThis)
4751 SkippedChecks.set(SanitizerKind::Alignment, true);
4752 if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
4753 SkippedChecks.set(SanitizerKind::Null, true);
4754 EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr, PtrTy,
4755 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
4756 BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
4757 } else
4758 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
4759
4760 NamedDecl *ND = E->getMemberDecl();
4761 if (auto *Field = dyn_cast<FieldDecl>(ND)) {
4762 LValue LV = EmitLValueForField(BaseLV, Field);
4764 if (getLangOpts().OpenMP) {
4765 // If the member was explicitly marked as nontemporal, mark it as
4766 // nontemporal. If the base lvalue is marked as nontemporal, mark access
4767 // to children as nontemporal too.
4768 if ((IsWrappedCXXThis(BaseExpr) &&
4770 BaseLV.isNontemporal())
4771 LV.setNontemporal(/*Value=*/true);
4772 }
4773 return LV;
4774 }
4775
4776 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
4777 return EmitFunctionDeclLValue(*this, E, FD);
4778
4779 llvm_unreachable("Unhandled member declaration!");
4780}
4781
4782/// Given that we are currently emitting a lambda, emit an l-value for
4783/// one of its members.
4784///
4786 llvm::Value *ThisValue) {
4787 bool HasExplicitObjectParameter = false;
4788 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl);
4789 if (MD) {
4790 HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
4791 assert(MD->getParent()->isLambda());
4792 assert(MD->getParent() == Field->getParent());
4793 }
4794 LValue LambdaLV;
4795 if (HasExplicitObjectParameter) {
4796 const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);
4797 auto It = LocalDeclMap.find(D);
4798 assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
4799 Address AddrOfExplicitObject = It->getSecond();
4800 if (D->getType()->isReferenceType())
4801 LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
4803 else
4804 LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
4805 D->getType().getNonReferenceType());
4806
4807 // Make sure we have an lvalue to the lambda itself and not a derived class.
4808 auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl();
4809 auto *LambdaTy = cast<CXXRecordDecl>(Field->getParent());
4810 if (ThisTy != LambdaTy) {
4811 const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(MD);
4813 LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
4814 BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
4815 LambdaLV = MakeAddrLValue(Base, QualType{LambdaTy->getTypeForDecl(), 0});
4816 }
4817 } else {
4818 QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
4819 LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
4820 }
4821 return EmitLValueForField(LambdaLV, Field);
4822}
4823
4825 return EmitLValueForLambdaField(Field, CXXABIThisValue);
4826}
4827
4828/// Get the field index in the debug info. The debug info structure/union
4829/// will ignore the unnamed bitfields.
4831 unsigned FieldIndex) {
4832 unsigned I = 0, Skipped = 0;
4833
4834 for (auto *F : Rec->getDefinition()->fields()) {
4835 if (I == FieldIndex)
4836 break;
4837 if (F->isUnnamedBitField())
4838 Skipped++;
4839 I++;
4840 }
4841
4842 return FieldIndex - Skipped;
4843}
4844
4845/// Get the address of a zero-sized field within a record. The resulting
4846/// address doesn't necessarily have the right type.
4848 const FieldDecl *Field) {
4850 CGF.getContext().getFieldOffset(Field));
4851 if (Offset.isZero())
4852 return Base;
4853 Base = Base.withElementType(CGF.Int8Ty);
4854 return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
4855}
4856
4857/// Drill down to the storage of a field without walking into
4858/// reference types.
4859///
4860/// The resulting address doesn't necessarily have the right type.
4862 const FieldDecl *field) {
4863 if (isEmptyFieldForLayout(CGF.getContext(), field))
4864 return emitAddrOfZeroSizeField(CGF, base, field);
4865
4866 const RecordDecl *rec = field->getParent();
4867
4868 unsigned idx =
4869 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4870
4871 return CGF.Builder.CreateStructGEP(base, idx, field->getName());
4872}
4873
4875 Address addr, const FieldDecl *field) {
4876 const RecordDecl *rec = field->getParent();
4877 llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
4878 base.getType(), rec->getLocation());
4879
4880 unsigned idx =
4881 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4882
4884 addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
4885}
4886
4887static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
4888 const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
4889 if (!RD)
4890 return false;
4891
4892 if (RD->isDynamicClass())
4893 return true;
4894
4895 for (const auto &Base : RD->bases())
4896 if (hasAnyVptr(Base.getType(), Context))
4897 return true;
4898
4899 for (const FieldDecl *Field : RD->fields())
4900 if (hasAnyVptr(Field->getType(), Context))
4901 return true;
4902
4903 return false;
4904}
4905
4907 const FieldDecl *field) {
4908 LValueBaseInfo BaseInfo = base.getBaseInfo();
4909
4910 if (field->isBitField()) {
4911 const CGRecordLayout &RL =
4913 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
4914 const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
4915 CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
4916 Info.VolatileStorageSize != 0 &&
4917 field->getType()
4920 Address Addr = base.getAddress();
4921 unsigned Idx = RL.getLLVMFieldNo(field);
4922 const RecordDecl *rec = field->getParent();
4924 Addr = wrapWithBPFPreserveStaticOffset(*this, Addr);
4925 if (!UseVolatile) {
4926 if (!IsInPreservedAIRegion &&
4927 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4928 if (Idx != 0)
4929 // For structs, we GEP to the field that the record layout suggests.
4930 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
4931 } else {
4932 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
4933 getContext().getRecordType(rec), rec->getLocation());
4935 Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
4936 DbgInfo);
4937 }
4938 }
4939 const unsigned SS =
4940 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
4941 // Get the access type.
4942 llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
4943 Addr = Addr.withElementType(FieldIntTy);
4944 if (UseVolatile) {
4945 const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
4946 if (VolatileOffset)
4947 Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
4948 }
4949
4950 QualType fieldType =
4951 field->getType().withCVRQualifiers(base.getVRQualifiers());
4952 // TODO: Support TBAA for bit fields.
4953 LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
4954 return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
4955 TBAAAccessInfo());
4956 }
4957
4958 // Fields of may-alias structures are may-alias themselves.
4959 // FIXME: this should get propagated down through anonymous structs
4960 // and unions.
4961 QualType FieldType = field->getType();
4962 const RecordDecl *rec = field->getParent();
4963 AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
4964 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
4965 TBAAAccessInfo FieldTBAAInfo;
4966 if (base.getTBAAInfo().isMayAlias() ||
4967 rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
4968 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4969 } else if (rec->isUnion()) {
4970 // TODO: Support TBAA for unions.
4971 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4972 } else {
4973 // If no base type been assigned for the base access, then try to generate
4974 // one for this base lvalue.
4975 FieldTBAAInfo = base.getTBAAInfo();
4976 if (!FieldTBAAInfo.BaseType) {
4977 FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
4978 assert(!FieldTBAAInfo.Offset &&
4979 "Nonzero offset for an access with no base type!");
4980 }
4981
4982 // Adjust offset to be relative to the base type.
4983 const ASTRecordLayout &Layout =
4985 unsigned CharWidth = getContext().getCharWidth();
4986 if (FieldTBAAInfo.BaseType)
4987 FieldTBAAInfo.Offset +=
4988 Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
4989
4990 // Update the final access type and size.
4991 FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
4992 FieldTBAAInfo.Size =
4994 }
4995
4996 Address addr = base.getAddress();
4998 addr = wrapWithBPFPreserveStaticOffset(*this, addr);
4999 if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
5000 if (CGM.getCodeGenOpts().StrictVTablePointers &&
5001 ClassDef->isDynamicClass()) {
5002 // Getting to any field of dynamic object requires stripping dynamic
5003 // information provided by invariant.group. This is because accessing
5004 // fields may leak the real address of dynamic object, which could result
5005 // in miscompilation when leaked pointer would be compared.
5006 auto *stripped =
5008 addr = Address(stripped, addr.getElementType(), addr.getAlignment());
5009 }
5010 }
5011
5012 unsigned RecordCVR = base.getVRQualifiers();
5013 if (rec->isUnion()) {
5014 // For unions, there is no pointer adjustment.
5015 if (CGM.getCodeGenOpts().StrictVTablePointers &&
5016 hasAnyVptr(FieldType, getContext()))
5017 // Because unions can easily skip invariant.barriers, we need to add
5018 // a barrier every time CXXRecord field with vptr is referenced.
5020
5022 (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5023 // Remember the original union field index
5024 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
5025 rec->getLocation());
5026 addr =
5028 addr.emitRawPointer(*this),
5029 getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
5030 addr.getElementType(), addr.getAlignment());
5031 }
5032
5033 if (FieldType->isReferenceType())
5034 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5035 } else {
5036 if (!IsInPreservedAIRegion &&
5037 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
5038 // For structs, we GEP to the field that the record layout suggests.
5039 addr = emitAddrOfFieldStorage(*this, addr, field);
5040 else
5041 // Remember the original struct field index
5042 addr = emitPreserveStructAccess(*this, base, addr, field);
5043 }
5044
5045 // If this is a reference field, load the reference right now.
5046 if (FieldType->isReferenceType()) {
5047 LValue RefLVal =
5048 MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5049 if (RecordCVR & Qualifiers::Volatile)
5050 RefLVal.getQuals().addVolatile();
5051 addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
5052
5053 // Qualifiers on the struct don't apply to the referencee.
5054 RecordCVR = 0;
5055 FieldType = FieldType->getPointeeType();
5056 }
5057
5058 // Make sure that the address is pointing to the right type. This is critical
5059 // for both unions and structs.
5060 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5061
5062 if (field->hasAttr<AnnotateAttr>())
5063 addr = EmitFieldAnnotations(field, addr);
5064
5065 LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5066 LV.getQuals().addCVRQualifiers(RecordCVR);
5067
5068 // __weak attribute on a field is ignored.
5071
5072 return LV;
5073}
5074
5075LValue
5077 const FieldDecl *Field) {
5078 QualType FieldType = Field->getType();
5079
5080 if (!FieldType->isReferenceType())
5081 return EmitLValueForField(Base, Field);
5082
5083 Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field);
5084
5085 // Make sure that the address is pointing to the right type.
5086 llvm::Type *llvmType = ConvertTypeForMem(FieldType);
5087 V = V.withElementType(llvmType);
5088
5089 // TODO: Generate TBAA information that describes this access as a structure
5090 // member access and not just an access to an object of the field's type. This
5091 // should be similar to what we do in EmitLValueForField().
5092 LValueBaseInfo BaseInfo = Base.getBaseInfo();
5093 AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
5094 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
5095 return MakeAddrLValue(V, FieldType, FieldBaseInfo,
5096 CGM.getTBAAInfoForSubobject(Base, FieldType));
5097}
5098
5100 if (E->isFileScope()) {
5102 return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
5103 }
5105 // make sure to emit the VLA size.
5107
5108 Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
5109 const Expr *InitExpr = E->getInitializer();
5111
5112 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
5113 /*Init*/ true);
5114
5115 // Block-scope compound literals are destroyed at the end of the enclosing
5116 // scope in C.
5117 if (!getLangOpts().CPlusPlus)
5120 E->getType(), getDestroyer(DtorKind),
5121 DtorKind & EHCleanup);
5122
5123 return Result;
5124}
5125
5127 if (!E->isGLValue())
5128 // Initializing an aggregate temporary in C++11: T{...}.
5129 return EmitAggExprToLValue(E);
5130
5131 // An lvalue initializer list must be initializing a reference.
5132 assert(E->isTransparent() && "non-transparent glvalue init list");
5133 return EmitLValue(E->getInit(0));
5134}
5135
5136/// Emit the operand of a glvalue conditional operator. This is either a glvalue
5137/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
5138/// LValue is returned and the current block has been terminated.
5139static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
5140 const Expr *Operand) {
5141 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
5142 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
5143 return std::nullopt;
5144 }
5145
5146 return CGF.EmitLValue(Operand);
5147}
5148
5149namespace {
5150// Handle the case where the condition is a constant evaluatable simple integer,
5151// which means we don't have to separately handle the true/false blocks.
5152std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
5154 const Expr *condExpr = E->getCond();
5155 bool CondExprBool;
5156 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5157 const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
5158 if (!CondExprBool)
5159 std::swap(Live, Dead);
5160
5161 if (!CGF.ContainsLabel(Dead)) {
5162 // If the true case is live, we need to track its region.
5163 if (CondExprBool)
5165 CGF.markStmtMaybeUsed(Dead);
5166 // If a throw expression we emit it and return an undefined lvalue
5167 // because it can't be used.
5168 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
5169 CGF.EmitCXXThrowExpr(ThrowExpr);
5170 llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
5171 llvm::Type *Ty = CGF.UnqualPtrTy;
5172 return CGF.MakeAddrLValue(
5173 Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
5174 Dead->getType());
5175 }
5176 return CGF.EmitLValue(Live);
5177 }
5178 }
5179 return std::nullopt;
5180}
5181struct ConditionalInfo {
5182 llvm::BasicBlock *lhsBlock, *rhsBlock;
5183 std::optional<LValue> LHS, RHS;
5184};
5185
5186// Create and generate the 3 blocks for a conditional operator.
5187// Leaves the 'current block' in the continuation basic block.
5188template<typename FuncTy>
5189ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
5191 const FuncTy &BranchGenFunc) {
5192 ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
5193 CGF.createBasicBlock("cond.false"), std::nullopt,
5194 std::nullopt};
5195 llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
5196
5197 CodeGenFunction::ConditionalEvaluation eval(CGF);
5198 CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
5199 CGF.getProfileCount(E));
5200
5201 // Any temporaries created here are conditional.
5202 CGF.EmitBlock(Info.lhsBlock);
5204 eval.begin(CGF);
5205 Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
5206 eval.end(CGF);
5207 Info.lhsBlock = CGF.Builder.GetInsertBlock();
5208
5209 if (Info.LHS)
5210 CGF.Builder.CreateBr(endBlock);
5211
5212 // Any temporaries created here are conditional.
5213 CGF.EmitBlock(Info.rhsBlock);
5214 eval.begin(CGF);
5215 Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
5216 eval.end(CGF);
5217 Info.rhsBlock = CGF.Builder.GetInsertBlock();
5218 CGF.EmitBlock(endBlock);
5219
5220 return Info;
5221}
5222} // namespace
5223
5226 if (!E->isGLValue()) {
5227 // ?: here should be an aggregate.
5229 "Unexpected conditional operator!");
5230 return (void)EmitAggExprToLValue(E);
5231 }
5232
5233 OpaqueValueMapping binding(*this, E);
5234 if (HandleConditionalOperatorLValueSimpleCase(*this, E))
5235 return;
5236
5237 EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
5238 CGF.EmitIgnoredExpr(E);
5239 return LValue{};
5240 });
5241}
5244 if (!expr->isGLValue()) {
5245 // ?: here should be an aggregate.
5246 assert(hasAggregateEvaluationKind(expr->getType()) &&
5247 "Unexpected conditional operator!");
5248 return EmitAggExprToLValue(expr);
5249 }
5250
5251 OpaqueValueMapping binding(*this, expr);
5252 if (std::optional<LValue> Res =
5253 HandleConditionalOperatorLValueSimpleCase(*this, expr))
5254 return *Res;
5255
5256 ConditionalInfo Info = EmitConditionalBlocks(
5257 *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
5258 return EmitLValueOrThrowExpression(CGF, E);
5259 });
5260
5261 if ((Info.LHS && !Info.LHS->isSimple()) ||
5262 (Info.RHS && !Info.RHS->isSimple()))
5263 return EmitUnsupportedLValue(expr, "conditional operator");
5264
5265 if (Info.LHS && Info.RHS) {
5266 Address lhsAddr = Info.LHS->getAddress();
5267 Address rhsAddr = Info.RHS->getAddress();
5269 lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
5270 Builder.GetInsertBlock(), expr->getType());
5271 AlignmentSource alignSource =
5272 std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
5273 Info.RHS->getBaseInfo().getAlignmentSource());
5275 Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
5276 return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
5277 TBAAInfo);
5278 } else {
5279 assert((Info.LHS || Info.RHS) &&
5280 "both operands of glvalue conditional are throw-expressions?");
5281 return Info.LHS ? *Info.LHS : *Info.RHS;
5282 }
5283}
5284
5285/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
5286/// type. If the cast is to a reference, we can have the usual lvalue result,
5287/// otherwise if a cast is needed by the code generator in an lvalue context,
5288/// then it must mean that we need the address of an aggregate in order to
5289/// access one of its members. This can happen for all the reasons that casts
5290/// are permitted with aggregate result, including noop aggregate casts, and
5291/// cast from scalar to union.
5293 switch (E->getCastKind()) {
5294 case CK_ToVoid:
5295 case CK_BitCast:
5296 case CK_LValueToRValueBitCast:
5297 case CK_ArrayToPointerDecay:
5298 case CK_FunctionToPointerDecay:
5299 case CK_NullToMemberPointer:
5300 case CK_NullToPointer:
5301 case CK_IntegralToPointer:
5302 case CK_PointerToIntegral:
5303 case CK_PointerToBoolean:
5304 case CK_IntegralCast:
5305 case CK_BooleanToSignedIntegral:
5306 case CK_IntegralToBoolean:
5307 case CK_IntegralToFloating:
5308 case CK_FloatingToIntegral:
5309 case CK_FloatingToBoolean:
5310 case CK_FloatingCast:
5311 case CK_FloatingRealToComplex:
5312 case CK_FloatingComplexToReal:
5313 case CK_FloatingComplexToBoolean:
5314 case CK_FloatingComplexCast:
5315 case CK_FloatingComplexToIntegralComplex:
5316 case CK_IntegralRealToComplex:
5317 case CK_IntegralComplexToReal:
5318 case CK_IntegralComplexToBoolean:
5319 case CK_IntegralComplexCast:
5320 case CK_IntegralComplexToFloatingComplex:
5321 case CK_DerivedToBaseMemberPointer:
5322 case CK_BaseToDerivedMemberPointer:
5323 case CK_MemberPointerToBoolean:
5324 case CK_ReinterpretMemberPointer:
5325 case CK_AnyPointerToBlockPointerCast:
5326 case CK_ARCProduceObject:
5327 case CK_ARCConsumeObject:
5328 case CK_ARCReclaimReturnedObject:
5329 case CK_ARCExtendBlockObject:
5330 case CK_CopyAndAutoreleaseBlockObject:
5331 case CK_IntToOCLSampler:
5332 case CK_FloatingToFixedPoint:
5333 case CK_FixedPointToFloating:
5334 case CK_FixedPointCast:
5335 case CK_FixedPointToBoolean:
5336 case CK_FixedPointToIntegral:
5337 case CK_IntegralToFixedPoint:
5338 case CK_MatrixCast:
5339 case CK_HLSLVectorTruncation:
5340 case CK_HLSLArrayRValue:
5341 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5342
5343 case CK_Dependent:
5344 llvm_unreachable("dependent cast kind in IR gen!");
5345
5346 case CK_BuiltinFnToFnPtr:
5347 llvm_unreachable("builtin functions are handled elsewhere");
5348
5349 // These are never l-values; just use the aggregate emission code.
5350 case CK_NonAtomicToAtomic:
5351 case CK_AtomicToNonAtomic:
5352 return EmitAggExprToLValue(E);
5353
5354 case CK_Dynamic: {
5355 LValue LV = EmitLValue(E->getSubExpr());
5356 Address V = LV.getAddress();
5357 const auto *DCE = cast<CXXDynamicCastExpr>(E);
5359 }
5360
5361 case CK_ConstructorConversion:
5362 case CK_UserDefinedConversion:
5363 case CK_CPointerToObjCPointerCast:
5364 case CK_BlockPointerToObjCPointerCast:
5365 case CK_LValueToRValue:
5366 return EmitLValue(E->getSubExpr());
5367
5368 case CK_NoOp: {
5369 // CK_NoOp can model a qualification conversion, which can remove an array
5370 // bound and change the IR type.
5371 // FIXME: Once pointee types are removed from IR, remove this.
5372 LValue LV = EmitLValue(E->getSubExpr());
5373 // Propagate the volatile qualifer to LValue, if exist in E.
5374 if (E->changesVolatileQualification())
5375 LV.getQuals() = E->getType().getQualifiers();
5376 if (LV.isSimple()) {
5377 Address V = LV.getAddress();
5378 if (V.isValid()) {
5379 llvm::Type *T = ConvertTypeForMem(E->getType());
5380 if (V.getElementType() != T)
5381 LV.setAddress(V.withElementType(T));
5382 }
5383 }
5384 return LV;
5385 }
5386
5387 case CK_UncheckedDerivedToBase:
5388 case CK_DerivedToBase: {
5389 const auto *DerivedClassTy =
5390 E->getSubExpr()->getType()->castAs<RecordType>();
5391 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5392
5393 LValue LV = EmitLValue(E->getSubExpr());
5394 Address This = LV.getAddress();
5395
5396 // Perform the derived-to-base conversion
5398 This, DerivedClassDecl, E->path_begin(), E->path_end(),
5399 /*NullCheckValue=*/false, E->getExprLoc());
5400
5401 // TODO: Support accesses to members of base classes in TBAA. For now, we
5402 // conservatively pretend that the complete object is of the base class
5403 // type.
5404 return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
5406 }
5407 case CK_ToUnion:
5408 return EmitAggExprToLValue(E);
5409 case CK_BaseToDerived: {
5410 const auto *DerivedClassTy = E->getType()->castAs<RecordType>();
5411 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5412
5413 LValue LV = EmitLValue(E->getSubExpr());
5414
5415 // Perform the base-to-derived conversion
5417 LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(),
5418 /*NullCheckValue=*/false);
5419
5420 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
5421 // performed and the object is not of the derived type.
5424 E->getType());
5425
5426 if (SanOpts.has(SanitizerKind::CFIDerivedCast))
5428 /*MayBeNull=*/false, CFITCK_DerivedCast,
5429 E->getBeginLoc());
5430
5431 return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
5433 }
5434 case CK_LValueBitCast: {
5435 // This must be a reinterpret_cast (or c-style equivalent).
5436 const auto *CE = cast<ExplicitCastExpr>(E);
5437
5438 CGM.EmitExplicitCastExprType(CE, this);
5439 LValue LV = EmitLValue(E->getSubExpr());
5441 ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
5442
5443 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
5445 /*MayBeNull=*/false, CFITCK_UnrelatedCast,
5446 E->getBeginLoc());
5447
5448 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5450 }
5451 case CK_AddressSpaceConversion: {
5452 LValue LV = EmitLValue(E->getSubExpr());
5454 llvm::Value *V = getTargetHooks().performAddrSpaceCast(
5455 *this, LV.getPointer(*this),
5456 E->getSubExpr()->getType().getAddressSpace(),
5457 E->getType().getAddressSpace(), ConvertType(DestTy));
5459 LV.getAddress().getAlignment()),
5460 E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
5461 }
5462 case CK_ObjCObjectLValueCast: {
5463 LValue LV = EmitLValue(E->getSubExpr());
5465 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5467 }
5468 case CK_ZeroToOCLOpaqueType:
5469 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
5470
5471 case CK_VectorSplat: {
5472 // LValue results of vector splats are only supported in HLSL.
5473 if (!getLangOpts().HLSL)
5474 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5475 return EmitLValue(E->getSubExpr());
5476 }
5477 }
5478
5479 llvm_unreachable("Unhandled lvalue cast kind?");
5480}
5481
5485}
5486
5487std::pair<LValue, LValue>
5489 // Emitting the casted temporary through an opaque value.
5490 LValue BaseLV = EmitLValue(E->getArgLValue());
5491 OpaqueValueMappingData::bind(*this, E->getOpaqueArgLValue(), BaseLV);
5492
5493 QualType ExprTy = E->getType();
5494 Address OutTemp = CreateIRTemp(ExprTy);
5495 LValue TempLV = MakeAddrLValue(OutTemp, ExprTy);
5496
5497 if (E->isInOut())
5498 EmitInitializationToLValue(E->getCastedTemporary()->getSourceExpr(),
5499 TempLV);
5500
5501 OpaqueValueMappingData::bind(*this, E->getCastedTemporary(), TempLV);
5502 return std::make_pair(BaseLV, TempLV);
5503}
5504
5506 CallArgList &Args, QualType Ty) {
5507
5508 auto [BaseLV, TempLV] = EmitHLSLOutArgLValues(E, Ty);
5509
5510 llvm::Value *Addr = TempLV.getAddress().getBasePointer();
5511 llvm::Type *ElTy = ConvertTypeForMem(TempLV.getType());
5512
5513 llvm::TypeSize Sz = CGM.getDataLayout().getTypeAllocSize(ElTy);
5514
5515 llvm::Value *LifetimeSize = EmitLifetimeStart(Sz, Addr);
5516
5517 Address TmpAddr(Addr, ElTy, TempLV.getAlignment());
5518 Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast(),
5519 LifetimeSize);
5520 Args.add(RValue::get(TmpAddr, *this), Ty);
5521 return TempLV;
5522}
5523
5524LValue
5527
5528 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
5529 it = OpaqueLValues.find(e);
5530
5531 if (it != OpaqueLValues.end())
5532 return it->second;
5533
5534 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
5535 return EmitLValue(e->getSourceExpr());
5536}
5537
5538RValue
5541
5542 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
5543 it = OpaqueRValues.find(e);
5544
5545 if (it != OpaqueRValues.end())
5546 return it->second;
5547
5548 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
5549 return EmitAnyExpr(e->getSourceExpr());
5550}
5551
5553 const FieldDecl *FD,
5555 QualType FT = FD->getType();
5556 LValue FieldLV = EmitLValueForField(LV, FD);
5557 switch (getEvaluationKind(FT)) {
5558 case TEK_Complex:
5559 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
5560 case TEK_Aggregate:
5561 return FieldLV.asAggregateRValue();
5562 case TEK_Scalar:
5563 // This routine is used to load fields one-by-one to perform a copy, so
5564 // don't load reference fields.
5565 if (FD->getType()->isReferenceType())
5566 return RValue::get(FieldLV.getPointer(*this));
5567 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
5568 // primitive load.
5569 if (FieldLV.isBitField())
5570 return EmitLoadOfLValue(FieldLV, Loc);
5571 return RValue::get(EmitLoadOfScalar(FieldLV, Loc));
5572 }
5573 llvm_unreachable("bad evaluation kind");
5574}
5575
5576//===--------------------------------------------------------------------===//
5577// Expression Emission
5578//===--------------------------------------------------------------------===//
5579
5581 ReturnValueSlot ReturnValue,
5582 llvm::CallBase **CallOrInvoke) {
5583 llvm::CallBase *CallOrInvokeStorage;
5584 if (!CallOrInvoke) {
5585 CallOrInvoke = &CallOrInvokeStorage;
5586 }
5587
5588 auto AddCoroElideSafeOnExit = llvm::make_scope_exit([&] {
5589 if (E->isCoroElideSafe()) {
5590 auto *I = *CallOrInvoke;
5591 if (I)
5592 I->addFnAttr(llvm::Attribute::CoroElideSafe);
5593 }
5594 });
5595
5596 // Builtins never have block type.
5597 if (E->getCallee()->getType()->isBlockPointerType())
5598 return EmitBlockCallExpr(E, ReturnValue, CallOrInvoke);
5599
5600 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
5601 return EmitCXXMemberCallExpr(CE, ReturnValue, CallOrInvoke);
5602
5603 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
5604 return EmitCUDAKernelCallExpr(CE, ReturnValue, CallOrInvoke);
5605
5606 // A CXXOperatorCallExpr is created even for explicit object methods, but
5607 // these should be treated like static function call.
5608 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
5609 if (const auto *MD =
5610 dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());
5611 MD && MD->isImplicitObjectMemberFunction())
5612 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue, CallOrInvoke);
5613
5614 CGCallee callee = EmitCallee(E->getCallee());
5615
5616 if (callee.isBuiltin()) {
5617 return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
5618 E, ReturnValue);
5619 }
5620
5621 if (callee.isPseudoDestructor()) {
5623 }
5624
5625 return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue,
5626 /*Chain=*/nullptr, CallOrInvoke);
5627}
5628
5629/// Emit a CallExpr without considering whether it might be a subclass.
5631 ReturnValueSlot ReturnValue,
5632 llvm::CallBase **CallOrInvoke) {
5633 CGCallee Callee = EmitCallee(E->getCallee());
5634 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue,
5635 /*Chain=*/nullptr, CallOrInvoke);
5636}
5637
5638// Detect the unusual situation where an inline version is shadowed by a
5639// non-inline version. In that case we should pick the external one
5640// everywhere. That's GCC behavior too.
5642 for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
5643 if (!PD->isInlineBuiltinDeclaration())
5644 return false;
5645 return true;
5646}
5647
5649 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
5650
5651 if (auto builtinID = FD->getBuiltinID()) {
5652 std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
5653 std::string NoBuiltins = "no-builtins";
5654
5655 StringRef Ident = CGF.CGM.getMangledName(GD);
5656 std::string FDInlineName = (Ident + ".inline").str();
5657
5658 bool IsPredefinedLibFunction =
5660 bool HasAttributeNoBuiltin =
5661 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
5662 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
5663
5664 // When directing calling an inline builtin, call it through it's mangled
5665 // name to make it clear it's not the actual builtin.
5666 if (CGF.CurFn->getName() != FDInlineName &&
5668 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
5669 llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
5670 llvm::Module *M = Fn->getParent();
5671 llvm::Function *Clone = M->getFunction(FDInlineName);
5672 if (!Clone) {
5673 Clone = llvm::Function::Create(Fn->getFunctionType(),
5674 llvm::GlobalValue::InternalLinkage,
5675 Fn->getAddressSpace(), FDInlineName, M);
5676 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
5677 }
5678 return CGCallee::forDirect(Clone, GD);
5679 }
5680
5681 // Replaceable builtins provide their own implementation of a builtin. If we
5682 // are in an inline builtin implementation, avoid trivial infinite
5683 // recursion. Honor __attribute__((no_builtin("foo"))) or
5684 // __attribute__((no_builtin)) on the current function unless foo is
5685 // not a predefined library function which means we must generate the
5686 // builtin no matter what.
5687 else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
5688 return CGCallee::forBuiltin(builtinID, FD);
5689 }
5690
5691 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
5692 if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
5693 FD->hasAttr<CUDAGlobalAttr>())
5694 CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
5695 cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
5696
5697 return CGCallee::forDirect(CalleePtr, GD);
5698}
5699
5701 E = E->IgnoreParens();
5702
5703 // Look through function-to-pointer decay.
5704 if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
5705 if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
5706 ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
5707 return EmitCallee(ICE->getSubExpr());
5708 }
5709
5710 // Resolve direct calls.
5711 } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
5712 if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
5713 return EmitDirectCallee(*this, FD);
5714 }
5715 } else if (auto ME = dyn_cast<MemberExpr>(E)) {
5716 if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
5717 EmitIgnoredExpr(ME->getBase());
5718 return EmitDirectCallee(*this, FD);
5719 }
5720
5721 // Look through template substitutions.
5722 } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
5723 return EmitCallee(NTTP->getReplacement());
5724
5725 // Treat pseudo-destructor calls differently.
5726 } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
5728 }
5729
5730 // Otherwise, we have an indirect reference.
5731 llvm::Value *calleePtr;
5733 if (auto ptrType = E->getType()->getAs<PointerType>()) {
5734 calleePtr = EmitScalarExpr(E);
5735 functionType = ptrType->getPointeeType();
5736 } else {
5737 functionType = E->getType();
5738 calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
5739 }
5740 assert(functionType->isFunctionType());
5741
5742 GlobalDecl GD;
5743 if (const auto *VD =
5744 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
5745 GD = GlobalDecl(VD);
5746
5747 CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
5749 CGCallee callee(calleeInfo, calleePtr, pointerAuth);
5750 return callee;
5751}
5752
5754 // Comma expressions just emit their LHS then their RHS as an l-value.
5755 if (E->getOpcode() == BO_Comma) {
5756 EmitIgnoredExpr(E->getLHS());
5758 return EmitLValue(E->getRHS());
5759 }
5760
5761 if (E->getOpcode() == BO_PtrMemD ||
5762 E->getOpcode() == BO_PtrMemI)
5764
5765 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
5766
5767 // Note that in all of these cases, __block variables need the RHS
5768 // evaluated first just in case the variable gets moved by the RHS.
5769
5770 switch (getEvaluationKind(E->getType())) {
5771 case TEK_Scalar: {
5772 switch (E->getLHS()->getType().getObjCLifetime()) {
5774 return EmitARCStoreStrong(E, /*ignored*/ false).first;
5775
5777 return EmitARCStoreAutoreleasing(E).first;
5778
5779 // No reason to do any of these differently.
5783 break;
5784 }
5785
5786 // TODO: Can we de-duplicate this code with the corresponding code in
5787 // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
5788 RValue RV;
5789 llvm::Value *Previous = nullptr;
5790 QualType SrcType = E->getRHS()->getType();
5791 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5792 // we want to extract that value and potentially (if the bitfield sanitizer
5793 // is enabled) use it to check for an implicit conversion.
5794 if (E->getLHS()->refersToBitField()) {
5795 llvm::Value *RHS =
5797 RV = RValue::get(RHS);
5798 } else
5799 RV = EmitAnyExpr(E->getRHS());
5800
5801 LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
5802
5803 if (RV.isScalar())
5805
5806 if (LV.isBitField()) {
5807 llvm::Value *Result = nullptr;
5808 // If bitfield sanitizers are enabled we want to use the result
5809 // to check whether a truncation or sign change has occurred.
5810 if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
5812 else
5814
5815 // If the expression contained an implicit conversion, make sure
5816 // to use the value before the scalar conversion.
5817 llvm::Value *Src = Previous ? Previous : RV.getScalarVal();
5818 QualType DstType = E->getLHS()->getType();
5819 EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
5820 LV.getBitFieldInfo(), E->getExprLoc());
5821 } else
5822 EmitStoreThroughLValue(RV, LV);
5823
5824 if (getLangOpts().OpenMP)
5826 E->getLHS());
5827 return LV;
5828 }
5829
5830 case TEK_Complex:
5832
5833 case TEK_Aggregate:
5834 // If the lang opt is HLSL and the LHS is a constant array
5835 // then we are performing a copy assignment and call a special
5836 // function because EmitAggExprToLValue emits to a temporary LValue
5837 if (getLangOpts().HLSL && E->getLHS()->getType()->isConstantArrayType())
5839
5840 return EmitAggExprToLValue(E);
5841 }
5842 llvm_unreachable("bad evaluation kind");
5843}
5844
5845// This function implements trivial copy assignment for HLSL's
5846// assignable constant arrays.
5848 // Don't emit an LValue for the RHS because it might not be an LValue
5849 LValue LHS = EmitLValue(E->getLHS());
5850 // In C the RHS of an assignment operator is an RValue.
5851 // EmitAggregateAssign takes anan LValue for the RHS. Instead we can call
5852 // EmitInitializationToLValue to emit an RValue into an LValue.
5853 EmitInitializationToLValue(E->getRHS(), LHS);
5854 return LHS;
5855}
5856
5858 llvm::CallBase **CallOrInvoke) {
5859 RValue RV = EmitCallExpr(E, ReturnValueSlot(), CallOrInvoke);
5860
5861 if (!RV.isScalar())
5864
5865 assert(E->getCallReturnType(getContext())->isReferenceType() &&
5866 "Can't have a scalar return unless the return type is a "
5867 "reference type!");
5868
5870}
5871
5873 // FIXME: This shouldn't require another copy.
5874 return EmitAggExprToLValue(E);
5875}
5876
5879 && "binding l-value to type which needs a temporary");
5881 EmitCXXConstructExpr(E, Slot);
5883}
5884
5885LValue
5888}
5889
5891 return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl())
5893}
5894
5898}
5899
5900LValue
5902 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
5904 EmitAggExpr(E->getSubExpr(), Slot);
5905 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());
5907}
5908
5911
5912 if (!RV.isScalar())
5915
5916 assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
5917 "Can't have a scalar return unless the return type is a "
5918 "reference type!");
5919
5921}
5922
5924 Address V =
5925 CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
5927}
5928
5930 const ObjCIvarDecl *Ivar) {
5931 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
5932}
5933
5934llvm::Value *
5936 const ObjCIvarDecl *Ivar) {
5937 llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
5938 QualType PointerDiffType = getContext().getPointerDiffType();
5939 return Builder.CreateZExtOrTrunc(OffsetValue,
5940 getTypes().ConvertType(PointerDiffType));
5941}
5942
5944 llvm::Value *BaseValue,
5945 const ObjCIvarDecl *Ivar,
5946 unsigned CVRQualifiers) {
5947 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
5948 Ivar, CVRQualifiers);
5949}
5950
5952 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
5953 llvm::Value *BaseValue = nullptr;
5954 const Expr *BaseExpr = E->getBase();
5955 Qualifiers BaseQuals;
5956 QualType ObjectTy;
5957 if (E->isArrow()) {
5958 BaseValue = EmitScalarExpr(BaseExpr);
5959 ObjectTy = BaseExpr->getType()->getPointeeType();
5960 BaseQuals = ObjectTy.getQualifiers();
5961 } else {
5962 LValue BaseLV = EmitLValue(BaseExpr);
5963 BaseValue = BaseLV.getPointer(*this);
5964 ObjectTy = BaseExpr->getType();
5965 BaseQuals = ObjectTy.getQualifiers();
5966 }
5967
5968 LValue LV =
5969 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
5970 BaseQuals.getCVRQualifiers());
5972 return LV;
5973}
5974
5976 // Can only get l-value for message expression returning aggregate type
5980}
5981
5983 const CGCallee &OrigCallee, const CallExpr *E,
5984 ReturnValueSlot ReturnValue,
5985 llvm::Value *Chain,
5986 llvm::CallBase **CallOrInvoke,
5987 CGFunctionInfo const **ResolvedFnInfo) {
5988 // Get the actual function type. The callee type will always be a pointer to
5989 // function type or a block pointer type.
5990 assert(CalleeType->isFunctionPointerType() &&
5991 "Call must have function pointer type!");
5992
5993 const Decl *TargetDecl =
5994 OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
5995
5996 assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
5997 !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
5998 "trying to emit a call to an immediate function");
5999
6000 CalleeType = getContext().getCanonicalType(CalleeType);
6001
6002 auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
6003
6004 CGCallee Callee = OrigCallee;
6005
6006 if (SanOpts.has(SanitizerKind::Function) &&
6007 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
6008 !isa<FunctionNoProtoType>(PointeeType)) {
6009 if (llvm::Constant *PrefixSig =
6011 SanitizerScope SanScope(this);
6012 auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
6013
6014 llvm::Type *PrefixSigType = PrefixSig->getType();
6015 llvm::StructType *PrefixStructTy = llvm::StructType::get(
6016 CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
6017
6018 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6020 // Use raw pointer since we are using the callee pointer as data here.
6021 Address Addr =
6022 Address(CalleePtr, CalleePtr->getType(),
6024 CalleePtr->getPointerAlignment(CGM.getDataLayout())),
6025 Callee.getPointerAuthInfo(), nullptr);
6026 CalleePtr = Addr.emitRawPointer(*this);
6027 }
6028
6029 // On 32-bit Arm, the low bit of a function pointer indicates whether
6030 // it's using the Arm or Thumb instruction set. The actual first
6031 // instruction lives at the same address either way, so we must clear
6032 // that low bit before using the function address to find the prefix
6033 // structure.
6034 //
6035 // This applies to both Arm and Thumb target triples, because
6036 // either one could be used in an interworking context where it
6037 // might be passed function pointers of both types.
6038 llvm::Value *AlignedCalleePtr;
6039 if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
6040 llvm::Value *CalleeAddress =
6041 Builder.CreatePtrToInt(CalleePtr, IntPtrTy);
6042 llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1);
6043 llvm::Value *AlignedCalleeAddress =
6044 Builder.CreateAnd(CalleeAddress, Mask);
6045 AlignedCalleePtr =
6046 Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType());
6047 } else {
6048 AlignedCalleePtr = CalleePtr;
6049 }
6050
6051 llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
6052 llvm::Value *CalleeSigPtr =
6053 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
6054 llvm::Value *CalleeSig =
6055 Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
6056 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
6057
6058 llvm::BasicBlock *Cont = createBasicBlock("cont");
6059 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
6060 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
6061
6062 EmitBlock(TypeCheck);
6063 llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
6064 Int32Ty,
6065 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
6066 getPointerAlign());
6067 llvm::Value *CalleeTypeHashMatch =
6068 Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
6069 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
6070 EmitCheckTypeDescriptor(CalleeType)};
6071 EmitCheck(std::make_pair(CalleeTypeHashMatch, SanitizerKind::SO_Function),
6072 SanitizerHandler::FunctionTypeMismatch, StaticData,
6073 {CalleePtr});
6074
6075 Builder.CreateBr(Cont);
6076 EmitBlock(Cont);
6077 }
6078 }
6079
6080 const auto *FnType = cast<FunctionType>(PointeeType);
6081
6082 // If we are checking indirect calls and this call is indirect, check that the
6083 // function pointer is a member of the bit set for the function type.
6084 if (SanOpts.has(SanitizerKind::CFIICall) &&
6085 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
6086 SanitizerScope SanScope(this);
6087 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
6088
6089 llvm::Metadata *MD;
6090 if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
6092 else
6094
6095 llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
6096
6097 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6098 llvm::Value *TypeTest = Builder.CreateCall(
6099 CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});
6100
6101 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
6102 llvm::Constant *StaticData[] = {
6103 llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
6106 };
6107 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
6108 EmitCfiSlowPathCheck(SanitizerKind::SO_CFIICall, TypeTest, CrossDsoTypeId,
6109 CalleePtr, StaticData);
6110 } else {
6111 EmitCheck(std::make_pair(TypeTest, SanitizerKind::SO_CFIICall),
6112 SanitizerHandler::CFICheckFail, StaticData,
6113 {CalleePtr, llvm::UndefValue::get(IntPtrTy)});
6114 }
6115 }
6116
6117 CallArgList Args;
6118 if (Chain)
6119 Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);
6120
6121 // C++17 requires that we evaluate arguments to a call using assignment syntax
6122 // right-to-left, and that we evaluate arguments to certain other operators
6123 // left-to-right. Note that we allow this to override the order dictated by
6124 // the calling convention on the MS ABI, which means that parameter
6125 // destruction order is not necessarily reverse construction order.
6126 // FIXME: Revisit this based on C++ committee response to unimplementability.
6128 bool StaticOperator = false;
6129 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
6130 if (OCE->isAssignmentOp())
6132 else {
6133 switch (OCE->getOperator()) {
6134 case OO_LessLess:
6135 case OO_GreaterGreater:
6136 case OO_AmpAmp:
6137 case OO_PipePipe:
6138 case OO_Comma:
6139 case OO_ArrowStar:
6141 break;
6142 default:
6143 break;
6144 }
6145 }
6146
6147 if (const auto *MD =
6148 dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl());
6149 MD && MD->isStatic())
6150 StaticOperator = true;
6151 }
6152
6153 auto Arguments = E->arguments();
6154 if (StaticOperator) {
6155 // If we're calling a static operator, we need to emit the object argument
6156 // and ignore it.
6157 EmitIgnoredExpr(E->getArg(0));
6158 Arguments = drop_begin(Arguments, 1);
6159 }
6160 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments,
6161 E->getDirectCallee(), /*ParamsToSkip=*/0, Order);
6162
6164 Args, FnType, /*ChainCall=*/Chain);
6165
6166 if (ResolvedFnInfo)
6167 *ResolvedFnInfo = &FnInfo;
6168
6169 // HIP function pointer contains kernel handle when it is used in triple
6170 // chevron. The kernel stub needs to be loaded from kernel handle and used
6171 // as callee.
6172 if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
6173 isa<CUDAKernelCallExpr>(E) &&
6174 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
6175 llvm::Value *Handle = Callee.getFunctionPointer();
6176 auto *Stub = Builder.CreateLoad(
6177 Address(Handle, Handle->getType(), CGM.getPointerAlign()));
6178 Callee.setFunctionPointer(Stub);
6179 }
6180 llvm::CallBase *LocalCallOrInvoke = nullptr;
6181 RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &LocalCallOrInvoke,
6182 E == MustTailCall, E->getExprLoc());
6183
6184 // Generate function declaration DISuprogram in order to be used
6185 // in debug info about call sites.
6186 if (CGDebugInfo *DI = getDebugInfo()) {
6187 if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
6188 FunctionArgList Args;
6189 QualType ResTy = BuildFunctionArgList(CalleeDecl, Args);
6190 DI->EmitFuncDeclForCallSite(LocalCallOrInvoke,
6191 DI->getFunctionType(CalleeDecl, ResTy, Args),
6192 CalleeDecl);
6193 }
6194 }
6195 if (CallOrInvoke)
6196 *CallOrInvoke = LocalCallOrInvoke;
6197
6198 return Call;
6199}
6200
6203 Address BaseAddr = Address::invalid();
6204 if (E->getOpcode() == BO_PtrMemI) {
6205 BaseAddr = EmitPointerWithAlignment(E->getLHS());
6206 } else {
6207 BaseAddr = EmitLValue(E->getLHS()).getAddress();
6208 }
6209
6210 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
6211 const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();
6212
6213 LValueBaseInfo BaseInfo;
6214 TBAAAccessInfo TBAAInfo;
6215 Address MemberAddr =
6216 EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, &BaseInfo,
6217 &TBAAInfo);
6218
6219 return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);
6220}
6221
6222/// Given the address of a temporary variable, produce an r-value of
6223/// its type.
6225 QualType type,
6226 SourceLocation loc) {
6228 switch (getEvaluationKind(type)) {
6229 case TEK_Complex:
6230 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
6231 case TEK_Aggregate:
6232 return lvalue.asAggregateRValue();
6233 case TEK_Scalar:
6234 return RValue::get(EmitLoadOfScalar(lvalue, loc));
6235 }
6236 llvm_unreachable("bad evaluation kind");
6237}
6238
6239void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
6240 assert(Val->getType()->isFPOrFPVectorTy());
6241 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
6242 return;
6243
6244 llvm::MDBuilder MDHelper(getLLVMContext());
6245 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
6246
6247 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
6248}
6249
6250void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) {
6251 llvm::Type *EltTy = Val->getType()->getScalarType();
6252 if (!EltTy->isFloatTy())
6253 return;
6254
6255 if ((getLangOpts().OpenCL &&
6256 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6257 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6258 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6259 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
6260 //
6261 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6262 // build option allows an application to specify that single precision
6263 // floating-point divide (x/y and 1/x) and sqrt used in the program
6264 // source are correctly rounded.
6265 //
6266 // TODO: CUDA has a prec-sqrt flag
6267 SetFPAccuracy(Val, 3.0f);
6268 }
6269}
6270
6271void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) {
6272 llvm::Type *EltTy = Val->getType()->getScalarType();
6273 if (!EltTy->isFloatTy())
6274 return;
6275
6276 if ((getLangOpts().OpenCL &&
6277 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6278 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6279 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6280 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
6281 //
6282 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6283 // build option allows an application to specify that single precision
6284 // floating-point divide (x/y and 1/x) and sqrt used in the program
6285 // source are correctly rounded.
6286 //
6287 // TODO: CUDA has a prec-div flag
6288 SetFPAccuracy(Val, 2.5f);
6289 }
6290}
6291
6292namespace {
6293 struct LValueOrRValue {
6294 LValue LV;
6295 RValue RV;
6296 };
6297}
6298
6299static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
6300 const PseudoObjectExpr *E,
6301 bool forLValue,
6302 AggValueSlot slot) {
6304
6305 // Find the result expression, if any.
6306 const Expr *resultExpr = E->getResultExpr();
6307 LValueOrRValue result;
6308
6310 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
6311 const Expr *semantic = *i;
6312
6313 // If this semantic expression is an opaque value, bind it
6314 // to the result of its source expression.
6315 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
6316 // Skip unique OVEs.
6317 if (ov->isUnique()) {
6318 assert(ov != resultExpr &&
6319 "A unique OVE cannot be used as the result expression");
6320 continue;
6321 }
6322
6323 // If this is the result expression, we may need to evaluate
6324 // directly into the slot.
6325 typedef CodeGenFunction::OpaqueValueMappingData OVMA;
6326 OVMA opaqueData;
6327 if (ov == resultExpr && ov->isPRValue() && !forLValue &&
6329 CGF.EmitAggExpr(ov->getSourceExpr(), slot);
6330 LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
6332 opaqueData = OVMA::bind(CGF, ov, LV);
6333 result.RV = slot.asRValue();
6334
6335 // Otherwise, emit as normal.
6336 } else {
6337 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
6338
6339 // If this is the result, also evaluate the result now.
6340 if (ov == resultExpr) {
6341 if (forLValue)
6342 result.LV = CGF.EmitLValue(ov);
6343 else
6344 result.RV = CGF.EmitAnyExpr(ov, slot);
6345 }
6346 }
6347
6348 opaques.push_back(opaqueData);
6349
6350 // Otherwise, if the expression is the result, evaluate it
6351 // and remember the result.
6352 } else if (semantic == resultExpr) {
6353 if (forLValue)
6354 result.LV = CGF.EmitLValue(semantic);
6355 else
6356 result.RV = CGF.EmitAnyExpr(semantic, slot);
6357
6358 // Otherwise, evaluate the expression in an ignored context.
6359 } else {
6360 CGF.EmitIgnoredExpr(semantic);
6361 }
6362 }
6363
6364 // Unbind all the opaques now.
6365 for (unsigned i = 0, e = opaques.size(); i != e; ++i)
6366 opaques[i].unbind(CGF);
6367
6368 return result;
6369}
6370
6372 AggValueSlot slot) {
6373 return emitPseudoObjectExpr(*this, E, false, slot).RV;
6374}
6375
6377 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
6378}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3460
This file provides some common utility functions for processing Lambda related AST Constructs.
DynTypedNode Node
Defines enum values for all the target-independent builtin functions.
CodeGenFunction::ComplexPairTy ComplexPairTy
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV, bool IsMemberAccess=false)
Definition: CGExpr.cpp:2710
static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM)
Named Registers are named metadata pointing to the register name which will be read from/written to a...
Definition: CGExpr.cpp:2953
static llvm::Value * emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc, llvm::Value *Ptr)
Definition: CGExpr.cpp:692
static const Expr * isSimpleArrayDecayOperand(const Expr *E)
isSimpleArrayDecayOperand - If the specified expr is a simple decay from an array to pointer,...
Definition: CGExpr.cpp:4013
static bool hasBooleanRepresentation(QualType Ty)
Definition: CGExpr.cpp:1882
static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, int64_t &Offset)
The offset of a field from the beginning of the record.
Definition: CGExpr.cpp:4197
static bool hasBPFPreserveStaticOffset(const RecordDecl *D)
Definition: CGExpr.cpp:4083
static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, const FieldDecl *field)
Drill down to the storage of a field without walking into reference types.
Definition: CGExpr.cpp:4861
ConstantEmissionKind
Can we constant-emit a load of a reference to a variable of the given type? This is different from pr...
Definition: CGExpr.cpp:1737
@ CEK_AsReferenceOnly
Definition: CGExpr.cpp:1739
@ CEK_AsValueOnly
Definition: CGExpr.cpp:1741
@ CEK_None
Definition: CGExpr.cpp:1738
@ CEK_AsValueOrReference
Definition: CGExpr.cpp:1740
static bool isConstantEmittableObjectType(QualType type)
Given an object of the given canonical type, can we safely copy a value out of it based on its initia...
Definition: CGExpr.cpp:1712
static QualType getFixedSizeElementType(const ASTContext &ctx, const VariableArrayType *vla)
Definition: CGExpr.cpp:4074
static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, llvm::Value *ThisValue)
Definition: CGExpr.cpp:2941
static std::optional< LValue > EmitLValueOrThrowExpression(CodeGenFunction &CGF, const Expr *Operand)
Emit the operand of a glvalue conditional operator.
Definition: CGExpr.cpp:5139
static CheckRecoverableKind getRecoverableKind(SanitizerKind::SanitizerOrdinal Ordinal)
Definition: CGExpr.cpp:3528
static llvm::Value * emitArraySubscriptGEP(CodeGenFunction &CGF, llvm::Type *elemType, llvm::Value *ptr, ArrayRef< llvm::Value * > indices, bool inbounds, bool signedIndices, SourceLocation loc, const llvm::Twine &name="arrayidx")
Definition: CGExpr.cpp:4027
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, GlobalDecl GD)
Definition: CGExpr.cpp:2932
static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, CodeGenFunction &CGF, bool IsVector=true)
Definition: CGExpr.cpp:2104
static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, const PseudoObjectExpr *E, bool forLValue, AggValueSlot slot)
Definition: CGExpr.cpp:6299
static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, Address &Addr)
Definition: CGExpr.cpp:4099
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF, const MemberExpr *ME)
Definition: CGExpr.cpp:1846
static llvm::Value * getArrayIndexingBound(CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel)
If Base is known to point to the start of an array, return the length of that array.
Definition: CGExpr.cpp:964
static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, CodeGenFunction &CGF)
Definition: CGExpr.cpp:2201
static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type)
Definition: CGExpr.cpp:1743
static QualType getConstantExprReferredType(const FullExpr *E, const ASTContext &Ctx)
Definition: CGExpr.cpp:1544
static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, llvm::APInt &Min, llvm::APInt &End, bool StrictEnums, bool IsBool)
Definition: CGExpr.cpp:1895
static std::optional< int64_t > getOffsetDifferenceInBits(CodeGenFunction &CGF, const FieldDecl *FD1, const FieldDecl *FD2)
Returns the relative offset difference between FD1 and FD2.
Definition: CGExpr.cpp:4228
static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD)
Definition: CGExpr.cpp:5648
static LValue EmitThreadPrivateVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, llvm::Type *RealVarTy, SourceLocation Loc)
Definition: CGExpr.cpp:2807
static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, RecIndicesTy &Indices)
Definition: CGExpr.cpp:1114
static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD)
Definition: CGExpr.cpp:5641
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, const Expr *E, const VarDecl *VD)
Definition: CGExpr.cpp:2881
static bool hasAnyVptr(const QualType Type, const ASTContext &Context)
Definition: CGExpr.cpp:4887
static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase)
Given an array base, check whether its member access belongs to a record with preserve_access_index a...
Definition: CGExpr.cpp:4112
static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T)
Definition: CGExpr.cpp:2821
static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, const DeclRefExpr *E, const VarDecl *VD)
Determine whether we can emit a reference to VD from the current context, despite not necessarily hav...
Definition: CGExpr.cpp:2978
VariableTypeDescriptorKind
Definition: CGExpr.cpp:70
@ TK_Float
A floating-point type.
Definition: CGExpr.cpp:74
@ TK_Unknown
Any other type. The value representation is unspecified.
Definition: CGExpr.cpp:78
@ TK_Integer
An integer type.
Definition: CGExpr.cpp:72
@ TK_BitInt
An _BitInt(N) type.
Definition: CGExpr.cpp:76
static CharUnits getArrayElementAlign(CharUnits arrayAlign, llvm::Value *idx, CharUnits eltSize)
Definition: CGExpr.cpp:4059
static RawAddress createReferenceTemporary(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *Inner, RawAddress *Alloca=nullptr)
Definition: CGExpr.cpp:438
static bool isAAPCS(const TargetInfo &TargetInfo)
Helper method to check if the underlying ABI is AAPCS.
Definition: CGExpr.cpp:486
static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, bool isInit, CodeGenFunction &CGF)
Definition: CGExpr.cpp:2129
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
Definition: CGExpr.cpp:1283
static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, Address addr, const FieldDecl *field)
Definition: CGExpr.cpp:4874
const SanitizerHandlerInfo SanitizerHandlers[]
Definition: CGExpr.cpp:3545
static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, const FieldDecl *Field)
Get the address of a zero-sized field within a record.
Definition: CGExpr.cpp:4847
static void emitCheckHandlerCall(CodeGenFunction &CGF, llvm::FunctionType *FnType, ArrayRef< llvm::Value * > FnArgs, SanitizerHandler CheckHandler, CheckRecoverableKind RecoverKind, bool IsFatal, llvm::BasicBlock *ContBB, bool NoMerge)
Definition: CGExpr.cpp:3551
static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, LValueBaseInfo &BaseInfo, TBAAAccessInfo &TBAAInfo, QualType BaseTy, QualType ElTy, bool IsLowerBound)
Definition: CGExpr.cpp:4474
static void pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *E, Address ReferenceTemporary)
Definition: CGExpr.cpp:322
const Decl * D
Expr * E
StringRef Filename
Definition: Format.cpp:3056
static const SanitizerMask AlwaysRecoverable
static const SanitizerMask Unrecoverable
static const RecordType * getRecordType(QualType QT)
Checks that the passed in QualType either is of RecordType or points to RecordType.
SourceLocation Loc
Definition: SemaObjC.cpp:759
Defines the SourceManager interface.
static QualType getPointeeType(const MemRegion *R)
StateNode * Previous
const LValueBase getLValueBase() const
Definition: APValue.cpp:984
bool isLValue() const
Definition: APValue.h:472
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
SourceManager & getSourceManager()
Definition: ASTContext.h:741
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
uint64_t getFieldOffset(const ValueDecl *FD) const
Get the offset of a FieldDecl or IndirectFieldDecl, in bits.
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
Definition: ASTContext.h:2723
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType VoidPtrTy
Definition: ASTContext.h:1187
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:682
const LangOptions & getLangOpts() const
Definition: ASTContext.h:834
bool isTypeIgnoredBySanitizer(const SanitizerMask &Mask, const QualType &Ty) const
Check if a type can have its sanitizer instrumentation elided based on its presence within an ignorel...
Definition: ASTContext.cpp:854
QualType getPointerDiffType() const
Return the unique type for "ptrdiff_t" (C99 7.17) defined in <stddef.h>.
CanQualType BoolTy
Definition: ASTContext.h:1161
const NoSanitizeList & getNoSanitizeList() const
Definition: ASTContext.h:844
llvm::DenseMap< const CXXMethodDecl *, CXXCastPath > LambdaCastPaths
For capturing lambdas with an explicit object parameter whose type is derived from the lambda type,...
Definition: ASTContext.h:1256
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2489
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
Definition: ASTContext.h:1160
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2925
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
uint64_t getCharWidth() const
Return the size of the character type, in bits.
Definition: ASTContext.h:2493
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4224
This class represents BOTH the OpenMP Array Section and OpenACC 'subarray', with a boolean differenti...
Definition: Expr.h:6986
static QualType getBaseOriginalType(const Expr *Base)
Return original type of the base expression for array section.
Definition: Expr.cpp:5186
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2718
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3578
QualType getElementType() const
Definition: Type.h:3590
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3909
A fixed int type of a specified bitwidth.
Definition: Type.h:7820
bool isPredefinedLibFunction(unsigned ID) const
Determines whether this builtin is a predefined libc/libm function, such as "malloc",...
Definition: Builtins.h:161
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1491
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1546
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2856
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition: DeclCXX.h:1378
bool isDynamicClass() const
Definition: DeclCXX.h:586
bool hasDefinition() const
Definition: DeclCXX.h:572
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:845
A Microsoft C++ __uuidof expression, which gets the _GUID that corresponds to the supplied type or ex...
Definition: ExprCXX.h:1066
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2874
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3547
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition: CharUnits.h:207
llvm::MaybeAlign getAsMaybeAlign() const
getAsMaybeAlign - Returns Quantity as a valid llvm::Align or std::nullopt, Beware llvm::MaybeAlign as...
Definition: CharUnits.h:194
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
SanitizerSet SanitizeMergeHandlers
Set of sanitizer checks that can merge handlers (smaller code size at the expense of debuggability).
PointerAuthOptions PointerAuth
Configuration for pointer-signing.
SanitizerSet SanitizeTrap
Set of sanitizer checks that trap rather than diagnose.
SanitizerSet SanitizeRecover
Set of sanitizer checks that are non-fatal (i.e.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
SanitizerMaskCutoffs SanitizeSkipHotCutoffs
Set of thresholds in a range [0.0, 1.0]: the top hottest code responsible for the given fraction of P...
virtual llvm::FixedVectorType * getOptimalVectorMemoryType(llvm::FixedVectorType *T, const LangOptions &Opt) const
Returns the optimal vector memory type based on the given vector type.
Definition: ABIInfo.cpp:240
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
llvm::Value * getBasePointer() const
Definition: Address.h:193
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
CharUnits getAlignment() const
Definition: Address.h:189
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Definition: Address.h:259
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
KnownNonNull_t isKnownNonNull() const
Whether the pointer is known not to be null.
Definition: Address.h:231
Address setKnownNonNull()
Definition: Address.h:236
void setAlignment(CharUnits Value)
Definition: Address.h:191
void replaceBasePointer(llvm::Value *P)
This function is used in situations where the caller is doing some sort of opaque "laundering" of the...
Definition: Address.h:181
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:572
Address getAddress() const
Definition: CGValue.h:644
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:613
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition: CGValue.h:602
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
RValue asRValue() const
Definition: CGValue.h:666
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:858
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:305
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:292
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:203
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Definition: CGBuilder.h:331
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Definition: CGBuilder.h:241
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:219
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
Address CreatePreserveStructAccessIndex(Address Addr, unsigned Index, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition: CGBuilder.h:413
Address CreateLaunderInvariantGroup(Address Addr)
Definition: CGBuilder.h:437
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:128
Address CreatePreserveUnionAccessIndex(Address Addr, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition: CGBuilder.h:429
Address CreateStripInvariantGroup(Address Addr)
Definition: CGBuilder.h:443
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:189
Address CreateConstInBoundsGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = T* ... produce name = getelementptr inbounds addr, i64 index where i64 is actually the t...
Definition: CGBuilder.h:261
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition: CGBuilder.h:346
virtual llvm::Function * getKernelStub(llvm::GlobalValue *Handle)=0
Get kernel stub by kernel handle.
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr)=0
Emit code to force the execution of a destructor during global teardown.
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition: CGCXXABI.cpp:97
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType)=0
Emit a reference to a non-local thread_local variable (including triggering the initialization of all...
virtual bool usesThreadWrapperFunction(const VarDecl *VD) const =0
MangleContext & getMangleContext()
Gets the mangle context.
Definition: CGCXXABI.h:113
Abstract information about a function or function prototype.
Definition: CGCall.h:41
const GlobalDecl getCalleeDecl() const
Definition: CGCall.h:59
All available information about a concrete callee.
Definition: CGCall.h:63
CGCalleeInfo getAbstractInfo() const
Definition: CGCall.h:180
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition: CGCall.h:172
bool isPseudoDestructor() const
Definition: CGCall.h:169
static CGCallee forBuiltin(unsigned builtinID, const FunctionDecl *builtinDecl)
Definition: CGCall.h:123
unsigned getBuiltinID() const
Definition: CGCall.h:164
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition: CGCall.h:137
bool isBuiltin() const
Definition: CGCall.h:157
const FunctionDecl * getBuiltinDecl() const
Definition: CGCall.h:160
static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E)
Definition: CGCall.h:131
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:58
llvm::DIType * getOrCreateStandaloneType(QualType Ty, SourceLocation Loc)
Emit standalone debug info for a type.
llvm::DIType * getOrCreateRecordType(QualType Ty, SourceLocation L)
Emit record type's standalone debug info.
CGFunctionInfo - Class to encapsulate the information about a function definition.
virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest, llvm::Value *ivarOffset)=0
virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest)=0
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)=0
virtual llvm::Value * EmitIvarOffset(CodeGen::CodeGenFunction &CGF, const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)=0
virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, Address AddrWeakObj)=0
virtual Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel)=0
Get the address of a selector for the specified name and type values.
virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest)=0
virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest, bool threadlocal=false)=0
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
virtual ConstantAddress getAddrOfDeclareTargetVar(const VarDecl *VD)
Returns the address of the variable marked as declare target with link clause OR as declare target wi...
bool hasRequiresUnifiedSharedMemory() const
Return whether the unified_shared_memory has been specified.
bool isNontemporalDecl(const ValueDecl *VD) const
Checks if the VD variable is marked as nontemporal declaration in current context.
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const
Return llvm::StructType element number that corresponds to the field FD.
bool containsFieldDecl(const FieldDecl *FD) const
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:274
void add(RValue rvalue, QualType type)
Definition: CGCall.h:305
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse, const Expr *writebackExpr=nullptr, llvm::Value *lifetimeSz=nullptr)
Definition: CGCall.h:326
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
static ConstantEmission forValue(llvm::Constant *C)
static ConstantEmission forReference(llvm::Constant *C)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e)
RValue EmitLoadOfGlobalRegLValue(LValue LV)
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E)
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base, llvm::Value *memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
LValue EmitCastLValue(const CastExpr *E)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
SanitizerSet SanOpts
Sanitizers enabled for this function.
LValue EmitCoawaitLValue(const CoawaitExpr *E)
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
llvm::Value * EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
LValue EmitCallExprLValue(const CallExpr *E, llvm::CallBase **CallOrInvoke=nullptr)
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
LValue EmitHLSLArrayAssignLValue(const BinaryOperator *E)
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
LValue EmitBinaryOperatorLValue(const BinaryOperator *E)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E)
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E)
llvm::Value * EmitARCLoadWeakRetained(Address addr)
const LangOptions & getLangOpts() const
llvm::Value * LoadPassedObjectSize(const Expr *E, QualType EltTy)
If E references a parameter with pass_object_size info or a constant array size modifier,...
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
LValue EmitInitListLValue(const InitListExpr *E)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo=nullptr, TBAAAccessInfo *PointeeTBAAInfo=nullptr)
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Address EmitExtVectorElementLValue(LValue V)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_DowncastReference
Checking the operand of a static_cast to a derived reference type.
@ TCK_MemberAccess
Checking the object expression in a non-static data member access.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_UpcastToVirtualBase
Checking the operand of a cast to a virtual base object.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
@ TCK_DynamicOperation
Checking the operand of a dynamic_cast or a typeid expression.
@ TCK_ReferenceBinding
Checking the bound value in a reference binding.
@ TCK_Upcast
Checking the operand of a cast to a base object.
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
LValue EmitUnsupportedLValue(const Expr *E, const char *Name)
EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue an ErrorUnsupported style ...
llvm::Type * ConvertTypeForMem(QualType T)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex)
Get the record field index as represented in debug info.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void markStmtMaybeUsed(const Stmt *S)
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ Default
! No language constraints on evaluation order.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
void EmitCfiCheckStub()
Emit a stub for the cross-DSO CFI check function.
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr)
llvm::Value * EmitARCLoadWeak(Address addr)
const TargetInfo & getTarget() const
LValue EmitCXXConstructLValue(const CXXConstructExpr *E)
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
LValue EmitVAArgExprLValue(const VAArgExpr *E)
bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, SourceLocation Loc)
Check if the scalar Value is within the valid range for the given type Ty.
llvm::Function * generateDestroyHelper(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray, const VarDecl *VD)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
LValue EmitCoyieldLValue(const CoyieldExpr *E)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, llvm::BasicBlock *LHSBlock, llvm::BasicBlock *RHSBlock, llvm::BasicBlock *MergeBlock, QualType MergedType)
LValue EmitUnaryOpLValue(const UnaryOperator *E)
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitUnsupportedRValue(const Expr *E, const char *Name)
EmitUnsupportedRValue - Emit a dummy r-value using the type of E and issue an ErrorUnsupported style ...
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts)
getAccessedFieldNo - Given an encoded value and a result number, return the input field number being ...
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, llvm::Value *Index, QualType IndexType, QualType IndexedType, bool Accessed)
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitCfiCheckFail()
Emit a cross-DSO CFI failure handling function.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue EmitLoadOfExtVectorElementLValue(LValue V)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
LValue EmitComplexAssignmentLValue(const BinaryOperator *E)
Emit an l-value for an assignment (simple or compound) of complex type.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E)
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E)
Address emitAddrOfImagComponent(Address complex, QualType complexType)
LValue EmitDeclRefLValue(const DeclRefExpr *E)
const TargetCodeGenInfo & getTargetHooks() const
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
LValue EmitPredefinedLValue(const PredefinedExpr *E)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst)
RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
void EmitCfiSlowPathCheck(SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond, llvm::ConstantInt *TypeId, llvm::Value *Ptr, ArrayRef< llvm::Constant * > StaticArgs)
Emit a slow path cross-DSO CFI check which calls __cfi_slowpath if Cond if false.
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
CGCallee EmitCallee(const Expr *E)
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy)
std::pair< LValue, LValue > EmitHLSLOutArgLValues(const HLSLOutArgExpr *E, QualType Ty)
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
llvm::Instruction * getPostAllocaInsertPoint()
Return PostAllocaInsertPt.
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
LValue EmitMemberExpr(const MemberExpr *E)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed=false)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
CleanupKind getCleanupKind(QualType::DestructionKind kind)
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitCXXTypeidExpr(const CXXTypeidExpr *E)
Address GetAddrOfBlockDecl(const VarDecl *var)
CodeGenTypes & getTypes() const
void EmitARCInitWeak(Address addr, llvm::Value *value)
LValue EmitArraySectionExpr(const ArraySectionExpr *E, bool IsLowerBound=true)
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E)
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst)
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
LValue EmitStringLiteralLValue(const StringLiteral *E)
static Destroyer destroyARCStrongPrecise
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr)
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E)
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
static bool hasAggregateEvaluationKind(QualType T)
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
void SetFPAccuracy(llvm::Value *Val, float Accuracy)
SetFPAccuracy - Set the minimum required accuracy of the given floating point operation,...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
LValue EmitLoadOfReferenceLValue(LValue RefLVal)
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
void EmitInitializationToLValue(const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed=AggValueSlot::IsNotZeroed)
EmitInitializationToLValue - Emit an initializer to an LValue.
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E)
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
static bool isNullPointerAllowed(TypeCheckKind TCK)
Determine whether the pointer type check TCK permits null pointers.
static Destroyer destroyARCStrongImprecise
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
LValue EmitStmtExprLValue(const StmtExpr *E)
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
bool LValueIsSuitableForInlineAtomic(LValue Src)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
Address emitAddrOfRealComponent(Address complex, QualType complexType)
static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty)
Determine whether the pointer type check TCK requires a vptr check.
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E)
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
LValue EmitLValueForLambdaField(const FieldDecl *Field)
static bool IsWrappedCXXThis(const Expr *E)
Check if E is a C++ "this" pointer wrapped in value-preserving casts.
This class organizes the cross-function state that is used while generating LLVM code.
ConstantAddress GetAddrOfMSGuidDecl(const MSGuidDecl *GD)
Get the address of a GUID.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition: CGExpr.cpp:1269
void setDSOLocal(llvm::GlobalValue *GV) const
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
CGDebugInfo * getModuleDebugInfo()
ConstantAddress GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E)
Returns a pointer to a constant global variable for the given file-scope compound literal expression.
llvm::ConstantInt * CreateCrossDsoCfiTypeId(llvm::Metadata *MD)
Generate a cross-DSO type identifier for MD.
void setTypeDescriptorInMap(QualType Ty, llvm::Constant *C)
llvm::Constant * getRawFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return a function pointer for a reference to the given function.
Definition: CGExpr.cpp:2920
llvm::FunctionCallee getAddrAndTypeOfCXXStructor(GlobalDecl GD, const CGFunctionInfo *FnInfo=nullptr, llvm::FunctionType *FnType=nullptr, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Definition: CGCXX.cpp:218
llvm::Constant * GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH=false)
Get the address of the RTTI descriptor for the given type.
llvm::Constant * GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty=nullptr, bool ForVTable=false, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Return the address of the given function.
Address createUnnamedGlobalFrom(const VarDecl &D, llvm::Constant *Constant, CharUnits Align)
Definition: CGDecl.cpp:1109
llvm::Constant * getFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return the ABI-correct function pointer value for a reference to the given function.
DiagnosticsEngine & getDiags() const
void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref< void()> Fn)
Run some code with "sufficient" stack space.
const LangOptions & getLangOpts() const
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
const TargetInfo & getTarget() const
llvm::Metadata * CreateMetadataIdentifierForType(QualType T)
Create a metadata identifier for the given type.
llvm::Constant * getTypeDescriptorFromMap(QualType Ty)
void addUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.used metadata.
llvm::MDNode * getTBAABaseTypeInfo(QualType QTy)
getTBAABaseTypeInfo - Get metadata that describes the given base access type.
llvm::GlobalValue::LinkageTypes getLLVMLinkageVarDefinition(const VarDecl *VD)
Returns LLVM linkage for a declarator.
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
ConstantAddress GetWeakRefReference(const ValueDecl *VD)
Get a reference to the target of VD.
CGPointerAuthInfo getFunctionPointerAuthInfo(QualType T)
Return the abstract pointer authentication schema for a pointer to the given function type.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
llvm::Metadata * CreateMetadataIdentifierGeneralized(QualType T)
Create a metadata identifier for the generalization of the given type.
const llvm::Triple & getTriple() const
llvm::Constant * getOrCreateStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage)
Definition: CGDecl.cpp:247
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
TBAAAccessInfo getTBAAInfoForSubobject(LValue Base, QualType AccessType)
getTBAAInfoForSubobject - Get TBAA information for an access with a given base lvalue.
llvm::Constant * CreateRuntimeVariable(llvm::Type *Ty, StringRef Name)
Create a new runtime global variable with the specified type and name.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ConstantAddress GetAddrOfConstantStringFromLiteral(const StringLiteral *S, StringRef Name=".str")
Return a pointer to a constant array for the given string literal.
ASTContext & getContext() const
ConstantAddress GetAddrOfTemplateParamObject(const TemplateParamObjectDecl *TPO)
Get the address of a template parameter object.
TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, TBAAAccessInfo TargetInfo)
mergeTBAAInfoForCast - Get merged TBAA information for the purposes of type casts.
llvm::Constant * GetAddrOfGlobalVar(const VarDecl *D, llvm::Type *Ty=nullptr, ForDefinition_t IsForDefinition=NotForDefinition)
Return the llvm::Constant for the address of the given global variable.
llvm::MDNode * getTBAATypeInfo(QualType QTy)
getTBAATypeInfo - Get metadata used to describe accesses to objects of the given type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
TBAAAccessInfo mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA, TBAAAccessInfo InfoB)
mergeTBAAInfoForConditionalOperator - Get merged TBAA information for the purposes of conditional ope...
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
CGObjCRuntime & getObjCRuntime()
Return a reference to the configured Objective-C runtime.
ConstantAddress GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E, const Expr *Inner)
Returns a pointer to a global variable representing a temporary with static or thread storage duratio...
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
void SetLLVMFunctionAttributes(GlobalDecl GD, const CGFunctionInfo &Info, llvm::Function *F, bool IsThunk)
Set the LLVM function attributes (sext, zext, etc).
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F)
Set the LLVM function attributes which only apply to a function definition.
ConstantAddress GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *)
Return a pointer to a constant array for the given ObjCEncodeExpr node.
ConstantAddress GetAddrOfConstantCString(const std::string &Str, const char *GlobalName=nullptr)
Returns a pointer to a character array containing the literal and a terminating '\0' character.
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:679
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
const llvm::DataLayout & getDataLayout() const
Definition: CodeGenTypes.h:99
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
Definition: CGCall.cpp:638
A specialization of Address that requires the address to be an LLVM Constant.
Definition: Address.h:294
ConstantAddress withElementType(llvm::Type *ElemTy) const
Definition: Address.h:310
llvm::Constant * getPointer() const
Definition: Address.h:306
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
llvm::Constant * tryEmitConstantExpr(const ConstantExpr *CE)
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:382
void mergeForCast(const LValueBaseInfo &Info)
Definition: CGValue.h:174
AlignmentSource getAlignmentSource() const
Definition: CGValue.h:171
LValue - This represents an lvalue references.
Definition: CGValue.h:182
bool isBitField() const
Definition: CGValue.h:280
bool isMatrixElt() const
Definition: CGValue.h:283
Expr * getBaseIvarExp() const
Definition: CGValue.h:332
llvm::Constant * getExtVectorElts() const
Definition: CGValue.h:409
static LValue MakeGlobalReg(llvm::Value *V, CharUnits alignment, QualType type)
Definition: CGValue.h:478
void setObjCIvar(bool Value)
Definition: CGValue.h:298
bool isObjCArray() const
Definition: CGValue.h:300
bool isObjCStrong() const
Definition: CGValue.h:324
bool isGlobalObjCRef() const
Definition: CGValue.h:306
bool isVectorElt() const
Definition: CGValue.h:279
void setObjCArray(bool Value)
Definition: CGValue.h:301
bool isSimple() const
Definition: CGValue.h:278
bool isVolatileQualified() const
Definition: CGValue.h:285
RValue asAggregateRValue() const
Definition: CGValue.h:498
CharUnits getAlignment() const
Definition: CGValue.h:343
llvm::Value * getPointer(CodeGenFunction &CGF) const
llvm::Value * getMatrixIdx() const
Definition: CGValue.h:395
llvm::Value * getGlobalReg() const
Definition: CGValue.h:430
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:432
bool isVolatile() const
Definition: CGValue.h:328
const Qualifiers & getQuals() const
Definition: CGValue.h:338
bool isGlobalReg() const
Definition: CGValue.h:282
static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:452
bool isObjCWeak() const
Definition: CGValue.h:321
Address getAddress() const
Definition: CGValue.h:361
unsigned getVRQualifiers() const
Definition: CGValue.h:287
void setThreadLocalRef(bool Value)
Definition: CGValue.h:310
LValue setKnownNonNull()
Definition: CGValue.h:350
bool isNonGC() const
Definition: CGValue.h:303
void setGlobalObjCRef(bool Value)
Definition: CGValue.h:307
bool isExtVectorElt() const
Definition: CGValue.h:281
llvm::Value * getVectorIdx() const
Definition: CGValue.h:382
void setNontemporal(bool Value)
Definition: CGValue.h:319
LValueBaseInfo getBaseInfo() const
Definition: CGValue.h:346
void setARCPreciseLifetime(ARCPreciseLifetime_t value)
Definition: CGValue.h:315
QualType getType() const
Definition: CGValue.h:291
const CGBitFieldInfo & getBitFieldInfo() const
Definition: CGValue.h:424
bool isThreadLocalRef() const
Definition: CGValue.h:309
KnownNonNull_t isKnownNonNull() const
Definition: CGValue.h:349
TBAAAccessInfo getTBAAInfo() const
Definition: CGValue.h:335
void setNonGC(bool Value)
Definition: CGValue.h:304
Address getVectorAddress() const
Definition: CGValue.h:370
bool isNontemporal() const
Definition: CGValue.h:318
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
Definition: CGValue.h:468
bool isObjCIvar() const
Definition: CGValue.h:297
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:442
void setAddress(Address address)
Definition: CGValue.h:363
void setBaseIvarExp(Expr *V)
Definition: CGValue.h:333
Address getExtVectorAddress() const
Definition: CGValue.h:401
static LValue MakeMatrixElt(Address matAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:488
Address getMatrixAddress() const
Definition: CGValue.h:387
Address getBitFieldAddress() const
Definition: CGValue.h:415
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition: CGValue.h:125
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:108
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
An abstract representation of an aligned address.
Definition: Address.h:42
RawAddress withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:100
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:77
llvm::Value * getPointer() const
Definition: Address.h:66
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:386
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information,...
Definition: TargetInfo.h:237
Complex values, per C99 6.2.5p11.
Definition: Type.h:3146
QualType getElementType() const
Definition: Type.h:3156
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3477
ConstStmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:196
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1077
Represents a concrete matrix type with constant number of rows and columns.
Definition: Type.h:4233
unsigned getNumRows() const
Returns the number of rows in the matrix.
Definition: Type.h:4251
RecordDecl * getOuterLexicalRecordContext()
Retrieve the outermost lexically enclosing record context.
Definition: DeclBase.cpp:2038
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition: Expr.h:1463
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition: Expr.cpp:487
ValueDecl * getDecl()
Definition: Expr.h:1333
SourceLocation getLocation() const
Definition: Expr.h:1341
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
T * getAttr() const
Definition: DeclBase.h:576
SourceLocation getLocation() const
Definition: DeclBase.h:442
bool isUsed(bool CheckUsedAttr=true) const
Whether any (re-)declaration of the entity was used, meaning that a definition is required.
Definition: DeclBase.cpp:557
DeclContext * getDeclContext()
Definition: DeclBase.h:451
bool hasAttr() const
Definition: DeclBase.h:580
void ConvertArgToString(ArgumentKind Kind, intptr_t Val, StringRef Modifier, StringRef Argument, ArrayRef< ArgumentValue > PrevArgs, SmallVectorImpl< char > &Output, ArrayRef< intptr_t > QualTypeVals) const
Converts a diagnostic argument (as an intptr_t) into the string that represents it.
Definition: Diagnostic.h:907
Represents an enum.
Definition: Decl.h:3861
bool isFixed() const
Returns true if this is an Objective-C, C++11, or Microsoft-style enumeration with a fixed underlying...
Definition: Decl.h:4075
void getValueRange(llvm::APInt &Max, llvm::APInt &Min) const
Calculates the [Min,Max) values the enum can store based on the NumPositiveBits and NumNegativeBits.
Definition: Decl.cpp:5013
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Definition: Type.h:6104
EnumDecl * getDecl() const
Definition: Type.h:6111
ExplicitCastExpr - An explicit cast written in the source code.
Definition: Expr.h:3799
This represents one expression.
Definition: Expr.h:110
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition: Expr.cpp:82
bool isGLValue() const
Definition: Expr.h:280
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3123
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition: Expr.h:437
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition: Expr.cpp:3096
Expr * IgnoreImplicit() LLVM_READONLY
Skip past any implicit AST nodes which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3084
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3092
bool EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsLValue - Evaluate an expression to see if we can fold it to an lvalue with link time known ...
bool isPRValue() const
Definition: Expr.h:278
bool isLValue() const
isLValue - True if this expression is an "l-value" according to the rules of the current language.
Definition: Expr.h:277
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition: Expr.cpp:1549
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3593
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3076
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:276
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition: Expr.h:469
bool isFlexibleArrayMemberLike(ASTContext &Context, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution=false) const
Check whether this array fits the idiom of a flexible array member, depending on the value of -fstric...
Definition: Expr.cpp:205
QualType getType() const
Definition: Expr.h:142
bool isOBJCGCCandidate(ASTContext &Ctx) const
isOBJCGCCandidate - Return true if this expression may be used in a read/ write barrier.
Definition: Expr.cpp:3007
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition: Expr.h:6354
Represents a member of a struct/union/class.
Definition: Decl.h:3033
bool isBitField() const
Determines whether this field is a bitfield.
Definition: Decl.h:3136
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition: Decl.h:3118
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition: Decl.h:3264
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition: Decl.cpp:4722
FullExpr - Represents a "full-expression" node.
Definition: Expr.h:1044
const Expr * getSubExpr() const
Definition: Expr.h:1057
Represents a function declaration or definition.
Definition: Decl.h:1935
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition: Decl.cpp:3649
Represents a prototype with parameter type info, e.g.
Definition: Type.h:5108
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
const Decl * getDecl() const
Definition: GlobalDecl.h:103
This class represents temporary values used to represent inout and out arguments in HLSL.
Definition: Expr.h:7152
Describes an C or C++ initializer list.
Definition: Expr.h:5088
SanitizerSet Sanitize
Set of enabled sanitizers.
Definition: LangOptions.h:508
virtual void mangleCXXRTTI(QualType T, raw_ostream &)=0
unsigned getBlockId(const BlockDecl *BD, bool Local)
Definition: Mangle.h:84
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4732
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition: ExprCXX.h:4757
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition: ExprCXX.h:4749
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition: ExprCXX.h:4782
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition: Expr.h:2796
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3236
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition: Expr.h:3319
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why? This is only meaningful if the named memb...
Definition: Expr.h:3460
Expr * getBase() const
Definition: Expr.h:3313
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:3431
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:3520
bool isObjCBOOLType(QualType T) const
Returns true if.
Definition: NSAPI.cpp:481
This represents a decl that may have a name.
Definition: Decl.h:253
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:280
A C++ nested-name-specifier augmented with source location information.
bool containsType(SanitizerMask Mask, StringRef MangledTypeName, StringRef Category=StringRef()) const
ObjCEncodeExpr, used for @encode in Objective-C.
Definition: ExprObjC.h:410
Represents an ObjC class declaration.
Definition: DeclObjC.h:1153
ObjCIvarDecl - Represents an ObjC instance variable.
Definition: DeclObjC.h:1951
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:549
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:941
Represents a class type in Objective C.
Definition: Type.h:7332
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:455
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1173
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1223
bool isUnique() const
Definition: Expr.h:1231
ParenExpr - This represents a parenthesized expression, e.g.
Definition: Expr.h:2170
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3199
QualType getPointeeType() const
Definition: Type.h:3209
[C99 6.4.2.2] - A predefined identifier such as func.
Definition: Expr.h:1991
StringRef getIdentKindName() const
Definition: Expr.h:2048
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
bool isValid() const
unsigned getLine() const
Return the presumed line number of this location.
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6546
const Expr *const * const_semantics_iterator
Definition: Expr.h:6611
A (possibly-)qualified type.
Definition: Type.h:929
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:8021
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:996
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:8063
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:7977
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition: Type.h:1433
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition: Type.h:8031
QualType withCVRQualifiers(unsigned CVR) const
Definition: Type.h:1174
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition: Type.h:1531
bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, bool ExcludeDtor)
Definition: Type.h:1028
The collection of all-type qualifiers we support.
Definition: Type.h:324
unsigned getCVRQualifiers() const
Definition: Type.h:481
GC getObjCGCAttr() const
Definition: Type.h:512
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition: Type.h:354
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition: Type.h:347
@ OCL_None
There is no lifetime qualification on this type.
Definition: Type.h:343
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition: Type.h:357
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition: Type.h:360
bool hasConst() const
Definition: Type.h:450
void addCVRQualifiers(unsigned mask)
Definition: Type.h:495
void removeObjCGCAttr()
Definition: Type.h:516
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition: Type.h:643
void setAddressSpace(LangAS space)
Definition: Type.h:584
bool hasVolatile() const
Definition: Type.h:460
ObjCLifetime getObjCLifetime() const
Definition: Type.h:538
void addVolatile()
Definition: Type.h:463
Represents a struct/union/class.
Definition: Decl.h:4162
field_range fields() const
Definition: Decl.h:4376
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
Definition: Decl.h:4361
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:6078
decl_type * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Definition: Redeclarable.h:203
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4466
Stmt - This represents one statement.
Definition: Stmt.h:84
StmtClass getStmtClass() const
Definition: Stmt.h:1380
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:346
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
bool isUnion() const
Definition: Decl.h:3784
Exposes information about the current target.
Definition: TargetInfo.h:220
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:1263
virtual StringRef getABI() const
Get the ABI currently in use.
Definition: TargetInfo.h:1331
const Type * getTypeForDecl() const
Definition: Decl.h:3409
The type-property cache.
Definition: Type.cpp:4499
The base class of the type hierarchy.
Definition: Type.h:1828
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1916
bool isBlockPointerType() const
Definition: Type.h:8206
bool isVoidType() const
Definition: Type.h:8516
bool isBooleanType() const
Definition: Type.h:8648
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:2201
bool hasAttr(attr::Kind AK) const
Determine whether this type had the specified attribute applied to it (looking through top-level type...
Definition: Type.cpp:1933
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition: Type.cpp:2180
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition: Type.h:8819
bool isConstantArrayType() const
Definition: Type.h:8268
bool isArrayType() const
Definition: Type.h:8264
bool isFunctionPointerType() const
Definition: Type.h:8232
bool isCountAttributedType() const
Definition: Type.cpp:727
bool isArithmeticType() const
Definition: Type.cpp:2315
bool isConstantMatrixType() const
Definition: Type.h:8326
bool isPointerType() const
Definition: Type.h:8192
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:8560
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8810
bool isReferenceType() const
Definition: Type.h:8210
bool isVariableArrayType() const
Definition: Type.h:8276
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:738
bool isExtVectorBoolType() const
Definition: Type.h:8312
bool isBitIntType() const
Definition: Type.h:8430
bool isAnyComplexType() const
Definition: Type.h:8300
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition: Type.h:8691
bool isAtomicType() const
Definition: Type.h:8347
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2725
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition: Type.cpp:2396
bool isFunctionType() const
Definition: Type.h:8188
bool isObjCObjectPointerType() const
Definition: Type.h:8334
bool isVectorType() const
Definition: Type.h:8304
bool isFloatingType() const
Definition: Type.cpp:2283
bool isSubscriptableVectorType() const
Definition: Type.h:8318
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8741
const Type * getUnqualifiedDesugaredType() const
Return the specified type with any "sugar" removed from the type, removing any typedefs,...
Definition: Type.cpp:638
bool isRecordType() const
Definition: Type.h:8292
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition: Type.cpp:1920
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2232
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4750
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:671
QualType getType() const
Definition: Decl.h:682
QualType getType() const
Definition: Value.cpp:234
Represents a variable declaration or definition.
Definition: Decl.h:886
TLSKind getTLSKind() const
Definition: Decl.cpp:2157
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition: Decl.cpp:2355
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition: Decl.h:1139
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition: Decl.h:912
@ TLS_None
Not a TLS variable.
Definition: Decl.h:906
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:3809
Represents a GCC generic vector type.
Definition: Type.h:4035
unsigned getNumElements() const
Definition: Type.h:4050
#define INT_MIN
Definition: limits.h:55
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition: CGValue.h:141
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
bool isEmptyFieldForLayout(const ASTContext &Context, const FieldDecl *FD)
isEmptyFieldForLayout - Return true iff the field is "empty", that is, either a zero-width bit-field ...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
@ ARCImpreciseLifetime
Definition: CGValue.h:136
static AlignmentSource getFieldAlignmentSource(AlignmentSource Source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
Definition: CGValue.h:159
@ NotKnownNonNull
Definition: Address.h:33
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const AstTypeMatcher< FunctionType > functionType
Matches FunctionType nodes.
constexpr Variable var(Literal L)
Returns the variable of L.
Definition: CNFFormula.h:64
const void * Store
Store - This opaque type encapsulates an immutable mapping from locations to values.
Definition: StoreRef.h:27
bool This(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2383
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2346
bool IsNonNull(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2371
bool Load(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1689
bool Cast(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2122
The JSON file list parser is used to communicate input to InstallAPI.
@ OpenCL
Definition: LangStandard.h:65
@ CPlusPlus
Definition: LangStandard.h:55
@ OK_BitField
A bitfield object is a bitfield on a C or C++ record.
Definition: Specifiers.h:154
@ SC_Register
Definition: Specifiers.h:257
@ Asm
Assembly: we accept this only so that we can preprocess it.
StorageDuration
The storage duration for an object (per C++ [basic.stc]).
Definition: Specifiers.h:327
@ SD_Thread
Thread storage duration.
Definition: Specifiers.h:330
@ SD_Static
Static storage duration.
Definition: Specifiers.h:331
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition: Specifiers.h:328
@ SD_Automatic
Automatic storage duration (most local variables).
Definition: Specifiers.h:329
@ SD_Dynamic
Dynamic storage duration.
Definition: Specifiers.h:332
@ Result
The result type of a method or function.
@ Dtor_Complete
Complete object dtor.
Definition: ABI.h:35
llvm::cl::opt< bool > ClSanitizeGuardChecks
const FunctionProtoType * T
LangAS getLangASFromTargetAS(unsigned TargetAS)
Definition: AddressSpaces.h:87
@ Interface
The "__interface" keyword introduces the elaborated-type-specifier.
bool isLambdaMethod(const DeclContext *DC)
Definition: ASTLambda.h:39
@ Other
Other implicit parameter.
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition: Specifiers.h:177
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition: Specifiers.h:180
unsigned long uint64_t
unsigned int uint32_t
__INTPTR_TYPE__ intptr_t
A signed integer type with the property that any valid pointer to void can be converted to this type,...
Structure with information about how a bitfield should be accessed.
CharUnits VolatileStorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned VolatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned VolatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned IsSigned
Whether the bit-field is signed.
static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
unsigned char PointerWidthInBits
The width of a pointer into the generic address space.
llvm::MDNode * AccessType
AccessType - The final access type.
Definition: CodeGenTBAA.h:105
uint64_t Offset
Offset - The byte offset of the final access within the base one.
Definition: CodeGenTBAA.h:109
static TBAAAccessInfo getMayAliasInfo()
Definition: CodeGenTBAA.h:63
uint64_t Size
Size - The size of access, in bytes.
Definition: CodeGenTBAA.h:112
llvm::MDNode * BaseType
BaseType - The base/leading access type.
Definition: CodeGenTBAA.h:101
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition: Expr.h:609
PointerAuthSchema FunctionPointers
The ABI for C function pointers.
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition: Sanitizers.h:187
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:174
An adjustment to be made to the temporary created when emitting a reference binding,...
Definition: Expr.h:66