1*67e74705SXin Li //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2*67e74705SXin Li //
3*67e74705SXin Li // The LLVM Compiler Infrastructure
4*67e74705SXin Li //
5*67e74705SXin Li // This file is distributed under the University of Illinois Open Source
6*67e74705SXin Li // License. See LICENSE.TXT for details.
7*67e74705SXin Li //
8*67e74705SXin Li //===----------------------------------------------------------------------===//
9*67e74705SXin Li //
10*67e74705SXin Li // These classes wrap the information about a call or function
11*67e74705SXin Li // definition used to handle ABI compliancy.
12*67e74705SXin Li //
13*67e74705SXin Li //===----------------------------------------------------------------------===//
14*67e74705SXin Li
15*67e74705SXin Li #include "CGCall.h"
16*67e74705SXin Li #include "ABIInfo.h"
17*67e74705SXin Li #include "CGBlocks.h"
18*67e74705SXin Li #include "CGCXXABI.h"
19*67e74705SXin Li #include "CGCleanup.h"
20*67e74705SXin Li #include "CodeGenFunction.h"
21*67e74705SXin Li #include "CodeGenModule.h"
22*67e74705SXin Li #include "TargetInfo.h"
23*67e74705SXin Li #include "clang/AST/Decl.h"
24*67e74705SXin Li #include "clang/AST/DeclCXX.h"
25*67e74705SXin Li #include "clang/AST/DeclObjC.h"
26*67e74705SXin Li #include "clang/Basic/TargetBuiltins.h"
27*67e74705SXin Li #include "clang/Basic/TargetInfo.h"
28*67e74705SXin Li #include "clang/CodeGen/CGFunctionInfo.h"
29*67e74705SXin Li #include "clang/CodeGen/SwiftCallingConv.h"
30*67e74705SXin Li #include "clang/Frontend/CodeGenOptions.h"
31*67e74705SXin Li #include "llvm/ADT/StringExtras.h"
32*67e74705SXin Li #include "llvm/IR/Attributes.h"
33*67e74705SXin Li #include "llvm/IR/CallingConv.h"
34*67e74705SXin Li #include "llvm/IR/CallSite.h"
35*67e74705SXin Li #include "llvm/IR/DataLayout.h"
36*67e74705SXin Li #include "llvm/IR/InlineAsm.h"
37*67e74705SXin Li #include "llvm/IR/Intrinsics.h"
38*67e74705SXin Li #include "llvm/IR/IntrinsicInst.h"
39*67e74705SXin Li #include "llvm/Transforms/Utils/Local.h"
40*67e74705SXin Li using namespace clang;
41*67e74705SXin Li using namespace CodeGen;
42*67e74705SXin Li
43*67e74705SXin Li /***/
44*67e74705SXin Li
ClangCallConvToLLVMCallConv(CallingConv CC)45*67e74705SXin Li unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
46*67e74705SXin Li switch (CC) {
47*67e74705SXin Li default: return llvm::CallingConv::C;
48*67e74705SXin Li case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
49*67e74705SXin Li case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
50*67e74705SXin Li case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
51*67e74705SXin Li case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
52*67e74705SXin Li case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
53*67e74705SXin Li case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
54*67e74705SXin Li case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
55*67e74705SXin Li case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
56*67e74705SXin Li // TODO: Add support for __pascal to LLVM.
57*67e74705SXin Li case CC_X86Pascal: return llvm::CallingConv::C;
58*67e74705SXin Li // TODO: Add support for __vectorcall to LLVM.
59*67e74705SXin Li case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
60*67e74705SXin Li case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
61*67e74705SXin Li case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
62*67e74705SXin Li case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
63*67e74705SXin Li case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
64*67e74705SXin Li case CC_Swift: return llvm::CallingConv::Swift;
65*67e74705SXin Li }
66*67e74705SXin Li }
67*67e74705SXin Li
68*67e74705SXin Li /// Derives the 'this' type for codegen purposes, i.e. ignoring method
69*67e74705SXin Li /// qualification.
70*67e74705SXin Li /// FIXME: address space qualification?
GetThisType(ASTContext & Context,const CXXRecordDecl * RD)71*67e74705SXin Li static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
72*67e74705SXin Li QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
73*67e74705SXin Li return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
74*67e74705SXin Li }
75*67e74705SXin Li
76*67e74705SXin Li /// Returns the canonical formal type of the given C++ method.
GetFormalType(const CXXMethodDecl * MD)77*67e74705SXin Li static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
78*67e74705SXin Li return MD->getType()->getCanonicalTypeUnqualified()
79*67e74705SXin Li .getAs<FunctionProtoType>();
80*67e74705SXin Li }
81*67e74705SXin Li
82*67e74705SXin Li /// Returns the "extra-canonicalized" return type, which discards
83*67e74705SXin Li /// qualifiers on the return type. Codegen doesn't care about them,
84*67e74705SXin Li /// and it makes ABI code a little easier to be able to assume that
85*67e74705SXin Li /// all parameter and return types are top-level unqualified.
GetReturnType(QualType RetTy)86*67e74705SXin Li static CanQualType GetReturnType(QualType RetTy) {
87*67e74705SXin Li return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
88*67e74705SXin Li }
89*67e74705SXin Li
90*67e74705SXin Li /// Arrange the argument and result information for a value of the given
91*67e74705SXin Li /// unprototyped freestanding function type.
92*67e74705SXin Li const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP)93*67e74705SXin Li CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
94*67e74705SXin Li // When translating an unprototyped function type, always use a
95*67e74705SXin Li // variadic type.
96*67e74705SXin Li return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
97*67e74705SXin Li /*instanceMethod=*/false,
98*67e74705SXin Li /*chainCall=*/false, None,
99*67e74705SXin Li FTNP->getExtInfo(), {}, RequiredArgs(0));
100*67e74705SXin Li }
101*67e74705SXin Li
102*67e74705SXin Li /// Adds the formal paramaters in FPT to the given prefix. If any parameter in
103*67e74705SXin Li /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
appendParameterTypes(const CodeGenTypes & CGT,SmallVectorImpl<CanQualType> & prefix,SmallVectorImpl<FunctionProtoType::ExtParameterInfo> & paramInfos,CanQual<FunctionProtoType> FPT,const FunctionDecl * FD)104*67e74705SXin Li static void appendParameterTypes(const CodeGenTypes &CGT,
105*67e74705SXin Li SmallVectorImpl<CanQualType> &prefix,
106*67e74705SXin Li SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos,
107*67e74705SXin Li CanQual<FunctionProtoType> FPT,
108*67e74705SXin Li const FunctionDecl *FD) {
109*67e74705SXin Li // Fill out paramInfos.
110*67e74705SXin Li if (FPT->hasExtParameterInfos() || !paramInfos.empty()) {
111*67e74705SXin Li assert(paramInfos.size() <= prefix.size());
112*67e74705SXin Li auto protoParamInfos = FPT->getExtParameterInfos();
113*67e74705SXin Li paramInfos.reserve(prefix.size() + protoParamInfos.size());
114*67e74705SXin Li paramInfos.resize(prefix.size());
115*67e74705SXin Li paramInfos.append(protoParamInfos.begin(), protoParamInfos.end());
116*67e74705SXin Li }
117*67e74705SXin Li
118*67e74705SXin Li // Fast path: unknown target.
119*67e74705SXin Li if (FD == nullptr) {
120*67e74705SXin Li prefix.append(FPT->param_type_begin(), FPT->param_type_end());
121*67e74705SXin Li return;
122*67e74705SXin Li }
123*67e74705SXin Li
124*67e74705SXin Li // In the vast majority cases, we'll have precisely FPT->getNumParams()
125*67e74705SXin Li // parameters; the only thing that can change this is the presence of
126*67e74705SXin Li // pass_object_size. So, we preallocate for the common case.
127*67e74705SXin Li prefix.reserve(prefix.size() + FPT->getNumParams());
128*67e74705SXin Li
129*67e74705SXin Li assert(FD->getNumParams() == FPT->getNumParams());
130*67e74705SXin Li for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
131*67e74705SXin Li prefix.push_back(FPT->getParamType(I));
132*67e74705SXin Li if (FD->getParamDecl(I)->hasAttr<PassObjectSizeAttr>())
133*67e74705SXin Li prefix.push_back(CGT.getContext().getSizeType());
134*67e74705SXin Li }
135*67e74705SXin Li }
136*67e74705SXin Li
137*67e74705SXin Li /// Arrange the LLVM function layout for a value of the given function
138*67e74705SXin Li /// type, on top of any implicit parameters already stored.
139*67e74705SXin Li static const CGFunctionInfo &
arrangeLLVMFunctionInfo(CodeGenTypes & CGT,bool instanceMethod,SmallVectorImpl<CanQualType> & prefix,CanQual<FunctionProtoType> FTP,const FunctionDecl * FD)140*67e74705SXin Li arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
141*67e74705SXin Li SmallVectorImpl<CanQualType> &prefix,
142*67e74705SXin Li CanQual<FunctionProtoType> FTP,
143*67e74705SXin Li const FunctionDecl *FD) {
144*67e74705SXin Li SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
145*67e74705SXin Li RequiredArgs Required =
146*67e74705SXin Li RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
147*67e74705SXin Li // FIXME: Kill copy.
148*67e74705SXin Li appendParameterTypes(CGT, prefix, paramInfos, FTP, FD);
149*67e74705SXin Li CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
150*67e74705SXin Li
151*67e74705SXin Li return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
152*67e74705SXin Li /*chainCall=*/false, prefix,
153*67e74705SXin Li FTP->getExtInfo(), paramInfos,
154*67e74705SXin Li Required);
155*67e74705SXin Li }
156*67e74705SXin Li
157*67e74705SXin Li /// Arrange the argument and result information for a value of the
158*67e74705SXin Li /// given freestanding function type.
159*67e74705SXin Li const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,const FunctionDecl * FD)160*67e74705SXin Li CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,
161*67e74705SXin Li const FunctionDecl *FD) {
162*67e74705SXin Li SmallVector<CanQualType, 16> argTypes;
163*67e74705SXin Li return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
164*67e74705SXin Li FTP, FD);
165*67e74705SXin Li }
166*67e74705SXin Li
getCallingConventionForDecl(const Decl * D,bool IsWindows)167*67e74705SXin Li static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
168*67e74705SXin Li // Set the appropriate calling convention for the Function.
169*67e74705SXin Li if (D->hasAttr<StdCallAttr>())
170*67e74705SXin Li return CC_X86StdCall;
171*67e74705SXin Li
172*67e74705SXin Li if (D->hasAttr<FastCallAttr>())
173*67e74705SXin Li return CC_X86FastCall;
174*67e74705SXin Li
175*67e74705SXin Li if (D->hasAttr<ThisCallAttr>())
176*67e74705SXin Li return CC_X86ThisCall;
177*67e74705SXin Li
178*67e74705SXin Li if (D->hasAttr<VectorCallAttr>())
179*67e74705SXin Li return CC_X86VectorCall;
180*67e74705SXin Li
181*67e74705SXin Li if (D->hasAttr<PascalAttr>())
182*67e74705SXin Li return CC_X86Pascal;
183*67e74705SXin Li
184*67e74705SXin Li if (PcsAttr *PCS = D->getAttr<PcsAttr>())
185*67e74705SXin Li return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
186*67e74705SXin Li
187*67e74705SXin Li if (D->hasAttr<IntelOclBiccAttr>())
188*67e74705SXin Li return CC_IntelOclBicc;
189*67e74705SXin Li
190*67e74705SXin Li if (D->hasAttr<MSABIAttr>())
191*67e74705SXin Li return IsWindows ? CC_C : CC_X86_64Win64;
192*67e74705SXin Li
193*67e74705SXin Li if (D->hasAttr<SysVABIAttr>())
194*67e74705SXin Li return IsWindows ? CC_X86_64SysV : CC_C;
195*67e74705SXin Li
196*67e74705SXin Li if (D->hasAttr<PreserveMostAttr>())
197*67e74705SXin Li return CC_PreserveMost;
198*67e74705SXin Li
199*67e74705SXin Li if (D->hasAttr<PreserveAllAttr>())
200*67e74705SXin Li return CC_PreserveAll;
201*67e74705SXin Li
202*67e74705SXin Li return CC_C;
203*67e74705SXin Li }
204*67e74705SXin Li
205*67e74705SXin Li /// Arrange the argument and result information for a call to an
206*67e74705SXin Li /// unknown C++ non-static member function of the given abstract type.
207*67e74705SXin Li /// (Zero value of RD means we don't have any meaningful "this" argument type,
208*67e74705SXin Li /// so fall back to a generic pointer type).
209*67e74705SXin Li /// The member function must be an ordinary function, i.e. not a
210*67e74705SXin Li /// constructor or destructor.
211*67e74705SXin Li const CGFunctionInfo &
arrangeCXXMethodType(const CXXRecordDecl * RD,const FunctionProtoType * FTP,const CXXMethodDecl * MD)212*67e74705SXin Li CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
213*67e74705SXin Li const FunctionProtoType *FTP,
214*67e74705SXin Li const CXXMethodDecl *MD) {
215*67e74705SXin Li SmallVector<CanQualType, 16> argTypes;
216*67e74705SXin Li
217*67e74705SXin Li // Add the 'this' pointer.
218*67e74705SXin Li if (RD)
219*67e74705SXin Li argTypes.push_back(GetThisType(Context, RD));
220*67e74705SXin Li else
221*67e74705SXin Li argTypes.push_back(Context.VoidPtrTy);
222*67e74705SXin Li
223*67e74705SXin Li return ::arrangeLLVMFunctionInfo(
224*67e74705SXin Li *this, true, argTypes,
225*67e74705SXin Li FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
226*67e74705SXin Li }
227*67e74705SXin Li
228*67e74705SXin Li /// Arrange the argument and result information for a declaration or
229*67e74705SXin Li /// definition of the given C++ non-static member function. The
230*67e74705SXin Li /// member function must be an ordinary function, i.e. not a
231*67e74705SXin Li /// constructor or destructor.
232*67e74705SXin Li const CGFunctionInfo &
arrangeCXXMethodDeclaration(const CXXMethodDecl * MD)233*67e74705SXin Li CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
234*67e74705SXin Li assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
235*67e74705SXin Li assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
236*67e74705SXin Li
237*67e74705SXin Li CanQual<FunctionProtoType> prototype = GetFormalType(MD);
238*67e74705SXin Li
239*67e74705SXin Li if (MD->isInstance()) {
240*67e74705SXin Li // The abstract case is perfectly fine.
241*67e74705SXin Li const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
242*67e74705SXin Li return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
243*67e74705SXin Li }
244*67e74705SXin Li
245*67e74705SXin Li return arrangeFreeFunctionType(prototype, MD);
246*67e74705SXin Li }
247*67e74705SXin Li
inheritingCtorHasParams(const InheritedConstructor & Inherited,CXXCtorType Type)248*67e74705SXin Li bool CodeGenTypes::inheritingCtorHasParams(
249*67e74705SXin Li const InheritedConstructor &Inherited, CXXCtorType Type) {
250*67e74705SXin Li // Parameters are unnecessary if we're constructing a base class subobject
251*67e74705SXin Li // and the inherited constructor lives in a virtual base.
252*67e74705SXin Li return Type == Ctor_Complete ||
253*67e74705SXin Li !Inherited.getShadowDecl()->constructsVirtualBase() ||
254*67e74705SXin Li !Target.getCXXABI().hasConstructorVariants();
255*67e74705SXin Li }
256*67e74705SXin Li
257*67e74705SXin Li const CGFunctionInfo &
arrangeCXXStructorDeclaration(const CXXMethodDecl * MD,StructorType Type)258*67e74705SXin Li CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
259*67e74705SXin Li StructorType Type) {
260*67e74705SXin Li
261*67e74705SXin Li SmallVector<CanQualType, 16> argTypes;
262*67e74705SXin Li SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
263*67e74705SXin Li argTypes.push_back(GetThisType(Context, MD->getParent()));
264*67e74705SXin Li
265*67e74705SXin Li bool PassParams = true;
266*67e74705SXin Li
267*67e74705SXin Li GlobalDecl GD;
268*67e74705SXin Li if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
269*67e74705SXin Li GD = GlobalDecl(CD, toCXXCtorType(Type));
270*67e74705SXin Li
271*67e74705SXin Li // A base class inheriting constructor doesn't get forwarded arguments
272*67e74705SXin Li // needed to construct a virtual base (or base class thereof).
273*67e74705SXin Li if (auto Inherited = CD->getInheritedConstructor())
274*67e74705SXin Li PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
275*67e74705SXin Li } else {
276*67e74705SXin Li auto *DD = dyn_cast<CXXDestructorDecl>(MD);
277*67e74705SXin Li GD = GlobalDecl(DD, toCXXDtorType(Type));
278*67e74705SXin Li }
279*67e74705SXin Li
280*67e74705SXin Li CanQual<FunctionProtoType> FTP = GetFormalType(MD);
281*67e74705SXin Li
282*67e74705SXin Li // Add the formal parameters.
283*67e74705SXin Li if (PassParams)
284*67e74705SXin Li appendParameterTypes(*this, argTypes, paramInfos, FTP, MD);
285*67e74705SXin Li
286*67e74705SXin Li TheCXXABI.buildStructorSignature(MD, Type, argTypes);
287*67e74705SXin Li
288*67e74705SXin Li RequiredArgs required =
289*67e74705SXin Li (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
290*67e74705SXin Li : RequiredArgs::All);
291*67e74705SXin Li
292*67e74705SXin Li FunctionType::ExtInfo extInfo = FTP->getExtInfo();
293*67e74705SXin Li CanQualType resultType = TheCXXABI.HasThisReturn(GD)
294*67e74705SXin Li ? argTypes.front()
295*67e74705SXin Li : TheCXXABI.hasMostDerivedReturn(GD)
296*67e74705SXin Li ? CGM.getContext().VoidPtrTy
297*67e74705SXin Li : Context.VoidTy;
298*67e74705SXin Li return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
299*67e74705SXin Li /*chainCall=*/false, argTypes, extInfo,
300*67e74705SXin Li paramInfos, required);
301*67e74705SXin Li }
302*67e74705SXin Li
303*67e74705SXin Li static SmallVector<CanQualType, 16>
getArgTypesForCall(ASTContext & ctx,const CallArgList & args)304*67e74705SXin Li getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
305*67e74705SXin Li SmallVector<CanQualType, 16> argTypes;
306*67e74705SXin Li for (auto &arg : args)
307*67e74705SXin Li argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
308*67e74705SXin Li return argTypes;
309*67e74705SXin Li }
310*67e74705SXin Li
311*67e74705SXin Li static SmallVector<CanQualType, 16>
getArgTypesForDeclaration(ASTContext & ctx,const FunctionArgList & args)312*67e74705SXin Li getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
313*67e74705SXin Li SmallVector<CanQualType, 16> argTypes;
314*67e74705SXin Li for (auto &arg : args)
315*67e74705SXin Li argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
316*67e74705SXin Li return argTypes;
317*67e74705SXin Li }
318*67e74705SXin Li
addExtParameterInfosForCall(llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> & paramInfos,const FunctionProtoType * proto,unsigned prefixArgs,unsigned totalArgs)319*67e74705SXin Li static void addExtParameterInfosForCall(
320*67e74705SXin Li llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos,
321*67e74705SXin Li const FunctionProtoType *proto,
322*67e74705SXin Li unsigned prefixArgs,
323*67e74705SXin Li unsigned totalArgs) {
324*67e74705SXin Li assert(proto->hasExtParameterInfos());
325*67e74705SXin Li assert(paramInfos.size() <= prefixArgs);
326*67e74705SXin Li assert(proto->getNumParams() + prefixArgs <= totalArgs);
327*67e74705SXin Li
328*67e74705SXin Li // Add default infos for any prefix args that don't already have infos.
329*67e74705SXin Li paramInfos.resize(prefixArgs);
330*67e74705SXin Li
331*67e74705SXin Li // Add infos for the prototype.
332*67e74705SXin Li auto protoInfos = proto->getExtParameterInfos();
333*67e74705SXin Li paramInfos.append(protoInfos.begin(), protoInfos.end());
334*67e74705SXin Li
335*67e74705SXin Li // Add default infos for the variadic arguments.
336*67e74705SXin Li paramInfos.resize(totalArgs);
337*67e74705SXin Li }
338*67e74705SXin Li
339*67e74705SXin Li static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
getExtParameterInfosForCall(const FunctionProtoType * proto,unsigned prefixArgs,unsigned totalArgs)340*67e74705SXin Li getExtParameterInfosForCall(const FunctionProtoType *proto,
341*67e74705SXin Li unsigned prefixArgs, unsigned totalArgs) {
342*67e74705SXin Li llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
343*67e74705SXin Li if (proto->hasExtParameterInfos()) {
344*67e74705SXin Li addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
345*67e74705SXin Li }
346*67e74705SXin Li return result;
347*67e74705SXin Li }
348*67e74705SXin Li
349*67e74705SXin Li /// Arrange a call to a C++ method, passing the given arguments.
350*67e74705SXin Li const CGFunctionInfo &
arrangeCXXConstructorCall(const CallArgList & args,const CXXConstructorDecl * D,CXXCtorType CtorKind,unsigned ExtraArgs)351*67e74705SXin Li CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
352*67e74705SXin Li const CXXConstructorDecl *D,
353*67e74705SXin Li CXXCtorType CtorKind,
354*67e74705SXin Li unsigned ExtraArgs) {
355*67e74705SXin Li // FIXME: Kill copy.
356*67e74705SXin Li SmallVector<CanQualType, 16> ArgTypes;
357*67e74705SXin Li for (const auto &Arg : args)
358*67e74705SXin Li ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
359*67e74705SXin Li
360*67e74705SXin Li CanQual<FunctionProtoType> FPT = GetFormalType(D);
361*67e74705SXin Li RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs, D);
362*67e74705SXin Li GlobalDecl GD(D, CtorKind);
363*67e74705SXin Li CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
364*67e74705SXin Li ? ArgTypes.front()
365*67e74705SXin Li : TheCXXABI.hasMostDerivedReturn(GD)
366*67e74705SXin Li ? CGM.getContext().VoidPtrTy
367*67e74705SXin Li : Context.VoidTy;
368*67e74705SXin Li
369*67e74705SXin Li FunctionType::ExtInfo Info = FPT->getExtInfo();
370*67e74705SXin Li auto ParamInfos = getExtParameterInfosForCall(FPT.getTypePtr(), 1 + ExtraArgs,
371*67e74705SXin Li ArgTypes.size());
372*67e74705SXin Li return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
373*67e74705SXin Li /*chainCall=*/false, ArgTypes, Info,
374*67e74705SXin Li ParamInfos, Required);
375*67e74705SXin Li }
376*67e74705SXin Li
377*67e74705SXin Li /// Arrange the argument and result information for the declaration or
378*67e74705SXin Li /// definition of the given function.
379*67e74705SXin Li const CGFunctionInfo &
arrangeFunctionDeclaration(const FunctionDecl * FD)380*67e74705SXin Li CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
381*67e74705SXin Li if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
382*67e74705SXin Li if (MD->isInstance())
383*67e74705SXin Li return arrangeCXXMethodDeclaration(MD);
384*67e74705SXin Li
385*67e74705SXin Li CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
386*67e74705SXin Li
387*67e74705SXin Li assert(isa<FunctionType>(FTy));
388*67e74705SXin Li
389*67e74705SXin Li // When declaring a function without a prototype, always use a
390*67e74705SXin Li // non-variadic type.
391*67e74705SXin Li if (isa<FunctionNoProtoType>(FTy)) {
392*67e74705SXin Li CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
393*67e74705SXin Li return arrangeLLVMFunctionInfo(
394*67e74705SXin Li noProto->getReturnType(), /*instanceMethod=*/false,
395*67e74705SXin Li /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
396*67e74705SXin Li }
397*67e74705SXin Li
398*67e74705SXin Li assert(isa<FunctionProtoType>(FTy));
399*67e74705SXin Li return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>(), FD);
400*67e74705SXin Li }
401*67e74705SXin Li
402*67e74705SXin Li /// Arrange the argument and result information for the declaration or
403*67e74705SXin Li /// definition of an Objective-C method.
404*67e74705SXin Li const CGFunctionInfo &
arrangeObjCMethodDeclaration(const ObjCMethodDecl * MD)405*67e74705SXin Li CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
406*67e74705SXin Li // It happens that this is the same as a call with no optional
407*67e74705SXin Li // arguments, except also using the formal 'self' type.
408*67e74705SXin Li return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
409*67e74705SXin Li }
410*67e74705SXin Li
411*67e74705SXin Li /// Arrange the argument and result information for the function type
412*67e74705SXin Li /// through which to perform a send to the given Objective-C method,
413*67e74705SXin Li /// using the given receiver type. The receiver type is not always
414*67e74705SXin Li /// the 'self' type of the method or even an Objective-C pointer type.
415*67e74705SXin Li /// This is *not* the right method for actually performing such a
416*67e74705SXin Li /// message send, due to the possibility of optional arguments.
417*67e74705SXin Li const CGFunctionInfo &
arrangeObjCMessageSendSignature(const ObjCMethodDecl * MD,QualType receiverType)418*67e74705SXin Li CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
419*67e74705SXin Li QualType receiverType) {
420*67e74705SXin Li SmallVector<CanQualType, 16> argTys;
421*67e74705SXin Li argTys.push_back(Context.getCanonicalParamType(receiverType));
422*67e74705SXin Li argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
423*67e74705SXin Li // FIXME: Kill copy?
424*67e74705SXin Li for (const auto *I : MD->parameters()) {
425*67e74705SXin Li argTys.push_back(Context.getCanonicalParamType(I->getType()));
426*67e74705SXin Li }
427*67e74705SXin Li
428*67e74705SXin Li FunctionType::ExtInfo einfo;
429*67e74705SXin Li bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
430*67e74705SXin Li einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
431*67e74705SXin Li
432*67e74705SXin Li if (getContext().getLangOpts().ObjCAutoRefCount &&
433*67e74705SXin Li MD->hasAttr<NSReturnsRetainedAttr>())
434*67e74705SXin Li einfo = einfo.withProducesResult(true);
435*67e74705SXin Li
436*67e74705SXin Li RequiredArgs required =
437*67e74705SXin Li (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
438*67e74705SXin Li
439*67e74705SXin Li return arrangeLLVMFunctionInfo(
440*67e74705SXin Li GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
441*67e74705SXin Li /*chainCall=*/false, argTys, einfo, {}, required);
442*67e74705SXin Li }
443*67e74705SXin Li
444*67e74705SXin Li const CGFunctionInfo &
arrangeUnprototypedObjCMessageSend(QualType returnType,const CallArgList & args)445*67e74705SXin Li CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
446*67e74705SXin Li const CallArgList &args) {
447*67e74705SXin Li auto argTypes = getArgTypesForCall(Context, args);
448*67e74705SXin Li FunctionType::ExtInfo einfo;
449*67e74705SXin Li
450*67e74705SXin Li return arrangeLLVMFunctionInfo(
451*67e74705SXin Li GetReturnType(returnType), /*instanceMethod=*/false,
452*67e74705SXin Li /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
453*67e74705SXin Li }
454*67e74705SXin Li
455*67e74705SXin Li const CGFunctionInfo &
arrangeGlobalDeclaration(GlobalDecl GD)456*67e74705SXin Li CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
457*67e74705SXin Li // FIXME: Do we need to handle ObjCMethodDecl?
458*67e74705SXin Li const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
459*67e74705SXin Li
460*67e74705SXin Li if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
461*67e74705SXin Li return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
462*67e74705SXin Li
463*67e74705SXin Li if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
464*67e74705SXin Li return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
465*67e74705SXin Li
466*67e74705SXin Li return arrangeFunctionDeclaration(FD);
467*67e74705SXin Li }
468*67e74705SXin Li
469*67e74705SXin Li /// Arrange a thunk that takes 'this' as the first parameter followed by
470*67e74705SXin Li /// varargs. Return a void pointer, regardless of the actual return type.
471*67e74705SXin Li /// The body of the thunk will end in a musttail call to a function of the
472*67e74705SXin Li /// correct type, and the caller will bitcast the function to the correct
473*67e74705SXin Li /// prototype.
474*67e74705SXin Li const CGFunctionInfo &
arrangeMSMemberPointerThunk(const CXXMethodDecl * MD)475*67e74705SXin Li CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
476*67e74705SXin Li assert(MD->isVirtual() && "only virtual memptrs have thunks");
477*67e74705SXin Li CanQual<FunctionProtoType> FTP = GetFormalType(MD);
478*67e74705SXin Li CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
479*67e74705SXin Li return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
480*67e74705SXin Li /*chainCall=*/false, ArgTys,
481*67e74705SXin Li FTP->getExtInfo(), {}, RequiredArgs(1));
482*67e74705SXin Li }
483*67e74705SXin Li
484*67e74705SXin Li const CGFunctionInfo &
arrangeMSCtorClosure(const CXXConstructorDecl * CD,CXXCtorType CT)485*67e74705SXin Li CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
486*67e74705SXin Li CXXCtorType CT) {
487*67e74705SXin Li assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
488*67e74705SXin Li
489*67e74705SXin Li CanQual<FunctionProtoType> FTP = GetFormalType(CD);
490*67e74705SXin Li SmallVector<CanQualType, 2> ArgTys;
491*67e74705SXin Li const CXXRecordDecl *RD = CD->getParent();
492*67e74705SXin Li ArgTys.push_back(GetThisType(Context, RD));
493*67e74705SXin Li if (CT == Ctor_CopyingClosure)
494*67e74705SXin Li ArgTys.push_back(*FTP->param_type_begin());
495*67e74705SXin Li if (RD->getNumVBases() > 0)
496*67e74705SXin Li ArgTys.push_back(Context.IntTy);
497*67e74705SXin Li CallingConv CC = Context.getDefaultCallingConvention(
498*67e74705SXin Li /*IsVariadic=*/false, /*IsCXXMethod=*/true);
499*67e74705SXin Li return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
500*67e74705SXin Li /*chainCall=*/false, ArgTys,
501*67e74705SXin Li FunctionType::ExtInfo(CC), {},
502*67e74705SXin Li RequiredArgs::All);
503*67e74705SXin Li }
504*67e74705SXin Li
505*67e74705SXin Li /// Arrange a call as unto a free function, except possibly with an
506*67e74705SXin Li /// additional number of formal parameters considered required.
507*67e74705SXin Li static const CGFunctionInfo &
arrangeFreeFunctionLikeCall(CodeGenTypes & CGT,CodeGenModule & CGM,const CallArgList & args,const FunctionType * fnType,unsigned numExtraRequiredArgs,bool chainCall)508*67e74705SXin Li arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
509*67e74705SXin Li CodeGenModule &CGM,
510*67e74705SXin Li const CallArgList &args,
511*67e74705SXin Li const FunctionType *fnType,
512*67e74705SXin Li unsigned numExtraRequiredArgs,
513*67e74705SXin Li bool chainCall) {
514*67e74705SXin Li assert(args.size() >= numExtraRequiredArgs);
515*67e74705SXin Li
516*67e74705SXin Li llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
517*67e74705SXin Li
518*67e74705SXin Li // In most cases, there are no optional arguments.
519*67e74705SXin Li RequiredArgs required = RequiredArgs::All;
520*67e74705SXin Li
521*67e74705SXin Li // If we have a variadic prototype, the required arguments are the
522*67e74705SXin Li // extra prefix plus the arguments in the prototype.
523*67e74705SXin Li if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
524*67e74705SXin Li if (proto->isVariadic())
525*67e74705SXin Li required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
526*67e74705SXin Li
527*67e74705SXin Li if (proto->hasExtParameterInfos())
528*67e74705SXin Li addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
529*67e74705SXin Li args.size());
530*67e74705SXin Li
531*67e74705SXin Li // If we don't have a prototype at all, but we're supposed to
532*67e74705SXin Li // explicitly use the variadic convention for unprototyped calls,
533*67e74705SXin Li // treat all of the arguments as required but preserve the nominal
534*67e74705SXin Li // possibility of variadics.
535*67e74705SXin Li } else if (CGM.getTargetCodeGenInfo()
536*67e74705SXin Li .isNoProtoCallVariadic(args,
537*67e74705SXin Li cast<FunctionNoProtoType>(fnType))) {
538*67e74705SXin Li required = RequiredArgs(args.size());
539*67e74705SXin Li }
540*67e74705SXin Li
541*67e74705SXin Li // FIXME: Kill copy.
542*67e74705SXin Li SmallVector<CanQualType, 16> argTypes;
543*67e74705SXin Li for (const auto &arg : args)
544*67e74705SXin Li argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
545*67e74705SXin Li return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
546*67e74705SXin Li /*instanceMethod=*/false, chainCall,
547*67e74705SXin Li argTypes, fnType->getExtInfo(), paramInfos,
548*67e74705SXin Li required);
549*67e74705SXin Li }
550*67e74705SXin Li
551*67e74705SXin Li /// Figure out the rules for calling a function with the given formal
552*67e74705SXin Li /// type using the given arguments. The arguments are necessary
553*67e74705SXin Li /// because the function might be unprototyped, in which case it's
554*67e74705SXin Li /// target-dependent in crazy ways.
555*67e74705SXin Li const CGFunctionInfo &
arrangeFreeFunctionCall(const CallArgList & args,const FunctionType * fnType,bool chainCall)556*67e74705SXin Li CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
557*67e74705SXin Li const FunctionType *fnType,
558*67e74705SXin Li bool chainCall) {
559*67e74705SXin Li return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
560*67e74705SXin Li chainCall ? 1 : 0, chainCall);
561*67e74705SXin Li }
562*67e74705SXin Li
563*67e74705SXin Li /// A block function is essentially a free function with an
564*67e74705SXin Li /// extra implicit argument.
565*67e74705SXin Li const CGFunctionInfo &
arrangeBlockFunctionCall(const CallArgList & args,const FunctionType * fnType)566*67e74705SXin Li CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
567*67e74705SXin Li const FunctionType *fnType) {
568*67e74705SXin Li return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
569*67e74705SXin Li /*chainCall=*/false);
570*67e74705SXin Li }
571*67e74705SXin Li
572*67e74705SXin Li const CGFunctionInfo &
arrangeBlockFunctionDeclaration(const FunctionProtoType * proto,const FunctionArgList & params)573*67e74705SXin Li CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
574*67e74705SXin Li const FunctionArgList ¶ms) {
575*67e74705SXin Li auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
576*67e74705SXin Li auto argTypes = getArgTypesForDeclaration(Context, params);
577*67e74705SXin Li
578*67e74705SXin Li return arrangeLLVMFunctionInfo(
579*67e74705SXin Li GetReturnType(proto->getReturnType()),
580*67e74705SXin Li /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
581*67e74705SXin Li proto->getExtInfo(), paramInfos,
582*67e74705SXin Li RequiredArgs::forPrototypePlus(proto, 1, nullptr));
583*67e74705SXin Li }
584*67e74705SXin Li
585*67e74705SXin Li const CGFunctionInfo &
arrangeBuiltinFunctionCall(QualType resultType,const CallArgList & args)586*67e74705SXin Li CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
587*67e74705SXin Li const CallArgList &args) {
588*67e74705SXin Li // FIXME: Kill copy.
589*67e74705SXin Li SmallVector<CanQualType, 16> argTypes;
590*67e74705SXin Li for (const auto &Arg : args)
591*67e74705SXin Li argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
592*67e74705SXin Li return arrangeLLVMFunctionInfo(
593*67e74705SXin Li GetReturnType(resultType), /*instanceMethod=*/false,
594*67e74705SXin Li /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
595*67e74705SXin Li /*paramInfos=*/ {}, RequiredArgs::All);
596*67e74705SXin Li }
597*67e74705SXin Li
598*67e74705SXin Li const CGFunctionInfo &
arrangeBuiltinFunctionDeclaration(QualType resultType,const FunctionArgList & args)599*67e74705SXin Li CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
600*67e74705SXin Li const FunctionArgList &args) {
601*67e74705SXin Li auto argTypes = getArgTypesForDeclaration(Context, args);
602*67e74705SXin Li
603*67e74705SXin Li return arrangeLLVMFunctionInfo(
604*67e74705SXin Li GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
605*67e74705SXin Li argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
606*67e74705SXin Li }
607*67e74705SXin Li
608*67e74705SXin Li const CGFunctionInfo &
arrangeBuiltinFunctionDeclaration(CanQualType resultType,ArrayRef<CanQualType> argTypes)609*67e74705SXin Li CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
610*67e74705SXin Li ArrayRef<CanQualType> argTypes) {
611*67e74705SXin Li return arrangeLLVMFunctionInfo(
612*67e74705SXin Li resultType, /*instanceMethod=*/false, /*chainCall=*/false,
613*67e74705SXin Li argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
614*67e74705SXin Li }
615*67e74705SXin Li
616*67e74705SXin Li /// Arrange a call to a C++ method, passing the given arguments.
617*67e74705SXin Li const CGFunctionInfo &
arrangeCXXMethodCall(const CallArgList & args,const FunctionProtoType * proto,RequiredArgs required)618*67e74705SXin Li CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
619*67e74705SXin Li const FunctionProtoType *proto,
620*67e74705SXin Li RequiredArgs required) {
621*67e74705SXin Li unsigned numRequiredArgs =
622*67e74705SXin Li (proto->isVariadic() ? required.getNumRequiredArgs() : args.size());
623*67e74705SXin Li unsigned numPrefixArgs = numRequiredArgs - proto->getNumParams();
624*67e74705SXin Li auto paramInfos =
625*67e74705SXin Li getExtParameterInfosForCall(proto, numPrefixArgs, args.size());
626*67e74705SXin Li
627*67e74705SXin Li // FIXME: Kill copy.
628*67e74705SXin Li auto argTypes = getArgTypesForCall(Context, args);
629*67e74705SXin Li
630*67e74705SXin Li FunctionType::ExtInfo info = proto->getExtInfo();
631*67e74705SXin Li return arrangeLLVMFunctionInfo(
632*67e74705SXin Li GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
633*67e74705SXin Li /*chainCall=*/false, argTypes, info, paramInfos, required);
634*67e74705SXin Li }
635*67e74705SXin Li
arrangeNullaryFunction()636*67e74705SXin Li const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
637*67e74705SXin Li return arrangeLLVMFunctionInfo(
638*67e74705SXin Li getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
639*67e74705SXin Li None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
640*67e74705SXin Li }
641*67e74705SXin Li
642*67e74705SXin Li const CGFunctionInfo &
arrangeCall(const CGFunctionInfo & signature,const CallArgList & args)643*67e74705SXin Li CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
644*67e74705SXin Li const CallArgList &args) {
645*67e74705SXin Li assert(signature.arg_size() <= args.size());
646*67e74705SXin Li if (signature.arg_size() == args.size())
647*67e74705SXin Li return signature;
648*67e74705SXin Li
649*67e74705SXin Li SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
650*67e74705SXin Li auto sigParamInfos = signature.getExtParameterInfos();
651*67e74705SXin Li if (!sigParamInfos.empty()) {
652*67e74705SXin Li paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
653*67e74705SXin Li paramInfos.resize(args.size());
654*67e74705SXin Li }
655*67e74705SXin Li
656*67e74705SXin Li auto argTypes = getArgTypesForCall(Context, args);
657*67e74705SXin Li
658*67e74705SXin Li assert(signature.getRequiredArgs().allowsOptionalArgs());
659*67e74705SXin Li return arrangeLLVMFunctionInfo(signature.getReturnType(),
660*67e74705SXin Li signature.isInstanceMethod(),
661*67e74705SXin Li signature.isChainCall(),
662*67e74705SXin Li argTypes,
663*67e74705SXin Li signature.getExtInfo(),
664*67e74705SXin Li paramInfos,
665*67e74705SXin Li signature.getRequiredArgs());
666*67e74705SXin Li }
667*67e74705SXin Li
668*67e74705SXin Li /// Arrange the argument and result information for an abstract value
669*67e74705SXin Li /// of a given function type. This is the method which all of the
670*67e74705SXin Li /// above functions ultimately defer to.
671*67e74705SXin Li const CGFunctionInfo &
arrangeLLVMFunctionInfo(CanQualType resultType,bool instanceMethod,bool chainCall,ArrayRef<CanQualType> argTypes,FunctionType::ExtInfo info,ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,RequiredArgs required)672*67e74705SXin Li CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
673*67e74705SXin Li bool instanceMethod,
674*67e74705SXin Li bool chainCall,
675*67e74705SXin Li ArrayRef<CanQualType> argTypes,
676*67e74705SXin Li FunctionType::ExtInfo info,
677*67e74705SXin Li ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
678*67e74705SXin Li RequiredArgs required) {
679*67e74705SXin Li assert(std::all_of(argTypes.begin(), argTypes.end(),
680*67e74705SXin Li std::mem_fun_ref(&CanQualType::isCanonicalAsParam)));
681*67e74705SXin Li
682*67e74705SXin Li // Lookup or create unique function info.
683*67e74705SXin Li llvm::FoldingSetNodeID ID;
684*67e74705SXin Li CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
685*67e74705SXin Li required, resultType, argTypes);
686*67e74705SXin Li
687*67e74705SXin Li void *insertPos = nullptr;
688*67e74705SXin Li CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
689*67e74705SXin Li if (FI)
690*67e74705SXin Li return *FI;
691*67e74705SXin Li
692*67e74705SXin Li unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
693*67e74705SXin Li
694*67e74705SXin Li // Construct the function info. We co-allocate the ArgInfos.
695*67e74705SXin Li FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
696*67e74705SXin Li paramInfos, resultType, argTypes, required);
697*67e74705SXin Li FunctionInfos.InsertNode(FI, insertPos);
698*67e74705SXin Li
699*67e74705SXin Li bool inserted = FunctionsBeingProcessed.insert(FI).second;
700*67e74705SXin Li (void)inserted;
701*67e74705SXin Li assert(inserted && "Recursively being processed?");
702*67e74705SXin Li
703*67e74705SXin Li // Compute ABI information.
704*67e74705SXin Li if (info.getCC() != CC_Swift) {
705*67e74705SXin Li getABIInfo().computeInfo(*FI);
706*67e74705SXin Li } else {
707*67e74705SXin Li swiftcall::computeABIInfo(CGM, *FI);
708*67e74705SXin Li }
709*67e74705SXin Li
710*67e74705SXin Li // Loop over all of the computed argument and return value info. If any of
711*67e74705SXin Li // them are direct or extend without a specified coerce type, specify the
712*67e74705SXin Li // default now.
713*67e74705SXin Li ABIArgInfo &retInfo = FI->getReturnInfo();
714*67e74705SXin Li if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
715*67e74705SXin Li retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
716*67e74705SXin Li
717*67e74705SXin Li for (auto &I : FI->arguments())
718*67e74705SXin Li if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
719*67e74705SXin Li I.info.setCoerceToType(ConvertType(I.type));
720*67e74705SXin Li
721*67e74705SXin Li bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
722*67e74705SXin Li assert(erased && "Not in set?");
723*67e74705SXin Li
724*67e74705SXin Li return *FI;
725*67e74705SXin Li }
726*67e74705SXin Li
create(unsigned llvmCC,bool instanceMethod,bool chainCall,const FunctionType::ExtInfo & info,ArrayRef<ExtParameterInfo> paramInfos,CanQualType resultType,ArrayRef<CanQualType> argTypes,RequiredArgs required)727*67e74705SXin Li CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
728*67e74705SXin Li bool instanceMethod,
729*67e74705SXin Li bool chainCall,
730*67e74705SXin Li const FunctionType::ExtInfo &info,
731*67e74705SXin Li ArrayRef<ExtParameterInfo> paramInfos,
732*67e74705SXin Li CanQualType resultType,
733*67e74705SXin Li ArrayRef<CanQualType> argTypes,
734*67e74705SXin Li RequiredArgs required) {
735*67e74705SXin Li assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
736*67e74705SXin Li
737*67e74705SXin Li void *buffer =
738*67e74705SXin Li operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
739*67e74705SXin Li argTypes.size() + 1, paramInfos.size()));
740*67e74705SXin Li
741*67e74705SXin Li CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
742*67e74705SXin Li FI->CallingConvention = llvmCC;
743*67e74705SXin Li FI->EffectiveCallingConvention = llvmCC;
744*67e74705SXin Li FI->ASTCallingConvention = info.getCC();
745*67e74705SXin Li FI->InstanceMethod = instanceMethod;
746*67e74705SXin Li FI->ChainCall = chainCall;
747*67e74705SXin Li FI->NoReturn = info.getNoReturn();
748*67e74705SXin Li FI->ReturnsRetained = info.getProducesResult();
749*67e74705SXin Li FI->Required = required;
750*67e74705SXin Li FI->HasRegParm = info.getHasRegParm();
751*67e74705SXin Li FI->RegParm = info.getRegParm();
752*67e74705SXin Li FI->ArgStruct = nullptr;
753*67e74705SXin Li FI->ArgStructAlign = 0;
754*67e74705SXin Li FI->NumArgs = argTypes.size();
755*67e74705SXin Li FI->HasExtParameterInfos = !paramInfos.empty();
756*67e74705SXin Li FI->getArgsBuffer()[0].type = resultType;
757*67e74705SXin Li for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
758*67e74705SXin Li FI->getArgsBuffer()[i + 1].type = argTypes[i];
759*67e74705SXin Li for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
760*67e74705SXin Li FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
761*67e74705SXin Li return FI;
762*67e74705SXin Li }
763*67e74705SXin Li
764*67e74705SXin Li /***/
765*67e74705SXin Li
766*67e74705SXin Li namespace {
767*67e74705SXin Li // ABIArgInfo::Expand implementation.
768*67e74705SXin Li
769*67e74705SXin Li // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
770*67e74705SXin Li struct TypeExpansion {
771*67e74705SXin Li enum TypeExpansionKind {
772*67e74705SXin Li // Elements of constant arrays are expanded recursively.
773*67e74705SXin Li TEK_ConstantArray,
774*67e74705SXin Li // Record fields are expanded recursively (but if record is a union, only
775*67e74705SXin Li // the field with the largest size is expanded).
776*67e74705SXin Li TEK_Record,
777*67e74705SXin Li // For complex types, real and imaginary parts are expanded recursively.
778*67e74705SXin Li TEK_Complex,
779*67e74705SXin Li // All other types are not expandable.
780*67e74705SXin Li TEK_None
781*67e74705SXin Li };
782*67e74705SXin Li
783*67e74705SXin Li const TypeExpansionKind Kind;
784*67e74705SXin Li
TypeExpansion__anon41e727fc0111::TypeExpansion785*67e74705SXin Li TypeExpansion(TypeExpansionKind K) : Kind(K) {}
~TypeExpansion__anon41e727fc0111::TypeExpansion786*67e74705SXin Li virtual ~TypeExpansion() {}
787*67e74705SXin Li };
788*67e74705SXin Li
789*67e74705SXin Li struct ConstantArrayExpansion : TypeExpansion {
790*67e74705SXin Li QualType EltTy;
791*67e74705SXin Li uint64_t NumElts;
792*67e74705SXin Li
ConstantArrayExpansion__anon41e727fc0111::ConstantArrayExpansion793*67e74705SXin Li ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
794*67e74705SXin Li : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
classof__anon41e727fc0111::ConstantArrayExpansion795*67e74705SXin Li static bool classof(const TypeExpansion *TE) {
796*67e74705SXin Li return TE->Kind == TEK_ConstantArray;
797*67e74705SXin Li }
798*67e74705SXin Li };
799*67e74705SXin Li
800*67e74705SXin Li struct RecordExpansion : TypeExpansion {
801*67e74705SXin Li SmallVector<const CXXBaseSpecifier *, 1> Bases;
802*67e74705SXin Li
803*67e74705SXin Li SmallVector<const FieldDecl *, 1> Fields;
804*67e74705SXin Li
RecordExpansion__anon41e727fc0111::RecordExpansion805*67e74705SXin Li RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
806*67e74705SXin Li SmallVector<const FieldDecl *, 1> &&Fields)
807*67e74705SXin Li : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
808*67e74705SXin Li Fields(std::move(Fields)) {}
classof__anon41e727fc0111::RecordExpansion809*67e74705SXin Li static bool classof(const TypeExpansion *TE) {
810*67e74705SXin Li return TE->Kind == TEK_Record;
811*67e74705SXin Li }
812*67e74705SXin Li };
813*67e74705SXin Li
814*67e74705SXin Li struct ComplexExpansion : TypeExpansion {
815*67e74705SXin Li QualType EltTy;
816*67e74705SXin Li
ComplexExpansion__anon41e727fc0111::ComplexExpansion817*67e74705SXin Li ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
classof__anon41e727fc0111::ComplexExpansion818*67e74705SXin Li static bool classof(const TypeExpansion *TE) {
819*67e74705SXin Li return TE->Kind == TEK_Complex;
820*67e74705SXin Li }
821*67e74705SXin Li };
822*67e74705SXin Li
823*67e74705SXin Li struct NoExpansion : TypeExpansion {
NoExpansion__anon41e727fc0111::NoExpansion824*67e74705SXin Li NoExpansion() : TypeExpansion(TEK_None) {}
classof__anon41e727fc0111::NoExpansion825*67e74705SXin Li static bool classof(const TypeExpansion *TE) {
826*67e74705SXin Li return TE->Kind == TEK_None;
827*67e74705SXin Li }
828*67e74705SXin Li };
829*67e74705SXin Li } // namespace
830*67e74705SXin Li
831*67e74705SXin Li static std::unique_ptr<TypeExpansion>
getTypeExpansion(QualType Ty,const ASTContext & Context)832*67e74705SXin Li getTypeExpansion(QualType Ty, const ASTContext &Context) {
833*67e74705SXin Li if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
834*67e74705SXin Li return llvm::make_unique<ConstantArrayExpansion>(
835*67e74705SXin Li AT->getElementType(), AT->getSize().getZExtValue());
836*67e74705SXin Li }
837*67e74705SXin Li if (const RecordType *RT = Ty->getAs<RecordType>()) {
838*67e74705SXin Li SmallVector<const CXXBaseSpecifier *, 1> Bases;
839*67e74705SXin Li SmallVector<const FieldDecl *, 1> Fields;
840*67e74705SXin Li const RecordDecl *RD = RT->getDecl();
841*67e74705SXin Li assert(!RD->hasFlexibleArrayMember() &&
842*67e74705SXin Li "Cannot expand structure with flexible array.");
843*67e74705SXin Li if (RD->isUnion()) {
844*67e74705SXin Li // Unions can be here only in degenerative cases - all the fields are same
845*67e74705SXin Li // after flattening. Thus we have to use the "largest" field.
846*67e74705SXin Li const FieldDecl *LargestFD = nullptr;
847*67e74705SXin Li CharUnits UnionSize = CharUnits::Zero();
848*67e74705SXin Li
849*67e74705SXin Li for (const auto *FD : RD->fields()) {
850*67e74705SXin Li // Skip zero length bitfields.
851*67e74705SXin Li if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
852*67e74705SXin Li continue;
853*67e74705SXin Li assert(!FD->isBitField() &&
854*67e74705SXin Li "Cannot expand structure with bit-field members.");
855*67e74705SXin Li CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
856*67e74705SXin Li if (UnionSize < FieldSize) {
857*67e74705SXin Li UnionSize = FieldSize;
858*67e74705SXin Li LargestFD = FD;
859*67e74705SXin Li }
860*67e74705SXin Li }
861*67e74705SXin Li if (LargestFD)
862*67e74705SXin Li Fields.push_back(LargestFD);
863*67e74705SXin Li } else {
864*67e74705SXin Li if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
865*67e74705SXin Li assert(!CXXRD->isDynamicClass() &&
866*67e74705SXin Li "cannot expand vtable pointers in dynamic classes");
867*67e74705SXin Li for (const CXXBaseSpecifier &BS : CXXRD->bases())
868*67e74705SXin Li Bases.push_back(&BS);
869*67e74705SXin Li }
870*67e74705SXin Li
871*67e74705SXin Li for (const auto *FD : RD->fields()) {
872*67e74705SXin Li // Skip zero length bitfields.
873*67e74705SXin Li if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
874*67e74705SXin Li continue;
875*67e74705SXin Li assert(!FD->isBitField() &&
876*67e74705SXin Li "Cannot expand structure with bit-field members.");
877*67e74705SXin Li Fields.push_back(FD);
878*67e74705SXin Li }
879*67e74705SXin Li }
880*67e74705SXin Li return llvm::make_unique<RecordExpansion>(std::move(Bases),
881*67e74705SXin Li std::move(Fields));
882*67e74705SXin Li }
883*67e74705SXin Li if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
884*67e74705SXin Li return llvm::make_unique<ComplexExpansion>(CT->getElementType());
885*67e74705SXin Li }
886*67e74705SXin Li return llvm::make_unique<NoExpansion>();
887*67e74705SXin Li }
888*67e74705SXin Li
getExpansionSize(QualType Ty,const ASTContext & Context)889*67e74705SXin Li static int getExpansionSize(QualType Ty, const ASTContext &Context) {
890*67e74705SXin Li auto Exp = getTypeExpansion(Ty, Context);
891*67e74705SXin Li if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
892*67e74705SXin Li return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
893*67e74705SXin Li }
894*67e74705SXin Li if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
895*67e74705SXin Li int Res = 0;
896*67e74705SXin Li for (auto BS : RExp->Bases)
897*67e74705SXin Li Res += getExpansionSize(BS->getType(), Context);
898*67e74705SXin Li for (auto FD : RExp->Fields)
899*67e74705SXin Li Res += getExpansionSize(FD->getType(), Context);
900*67e74705SXin Li return Res;
901*67e74705SXin Li }
902*67e74705SXin Li if (isa<ComplexExpansion>(Exp.get()))
903*67e74705SXin Li return 2;
904*67e74705SXin Li assert(isa<NoExpansion>(Exp.get()));
905*67e74705SXin Li return 1;
906*67e74705SXin Li }
907*67e74705SXin Li
908*67e74705SXin Li void
getExpandedTypes(QualType Ty,SmallVectorImpl<llvm::Type * >::iterator & TI)909*67e74705SXin Li CodeGenTypes::getExpandedTypes(QualType Ty,
910*67e74705SXin Li SmallVectorImpl<llvm::Type *>::iterator &TI) {
911*67e74705SXin Li auto Exp = getTypeExpansion(Ty, Context);
912*67e74705SXin Li if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
913*67e74705SXin Li for (int i = 0, n = CAExp->NumElts; i < n; i++) {
914*67e74705SXin Li getExpandedTypes(CAExp->EltTy, TI);
915*67e74705SXin Li }
916*67e74705SXin Li } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
917*67e74705SXin Li for (auto BS : RExp->Bases)
918*67e74705SXin Li getExpandedTypes(BS->getType(), TI);
919*67e74705SXin Li for (auto FD : RExp->Fields)
920*67e74705SXin Li getExpandedTypes(FD->getType(), TI);
921*67e74705SXin Li } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
922*67e74705SXin Li llvm::Type *EltTy = ConvertType(CExp->EltTy);
923*67e74705SXin Li *TI++ = EltTy;
924*67e74705SXin Li *TI++ = EltTy;
925*67e74705SXin Li } else {
926*67e74705SXin Li assert(isa<NoExpansion>(Exp.get()));
927*67e74705SXin Li *TI++ = ConvertType(Ty);
928*67e74705SXin Li }
929*67e74705SXin Li }
930*67e74705SXin Li
forConstantArrayExpansion(CodeGenFunction & CGF,ConstantArrayExpansion * CAE,Address BaseAddr,llvm::function_ref<void (Address)> Fn)931*67e74705SXin Li static void forConstantArrayExpansion(CodeGenFunction &CGF,
932*67e74705SXin Li ConstantArrayExpansion *CAE,
933*67e74705SXin Li Address BaseAddr,
934*67e74705SXin Li llvm::function_ref<void(Address)> Fn) {
935*67e74705SXin Li CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
936*67e74705SXin Li CharUnits EltAlign =
937*67e74705SXin Li BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
938*67e74705SXin Li
939*67e74705SXin Li for (int i = 0, n = CAE->NumElts; i < n; i++) {
940*67e74705SXin Li llvm::Value *EltAddr =
941*67e74705SXin Li CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
942*67e74705SXin Li Fn(Address(EltAddr, EltAlign));
943*67e74705SXin Li }
944*67e74705SXin Li }
945*67e74705SXin Li
ExpandTypeFromArgs(QualType Ty,LValue LV,SmallVectorImpl<llvm::Value * >::iterator & AI)946*67e74705SXin Li void CodeGenFunction::ExpandTypeFromArgs(
947*67e74705SXin Li QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
948*67e74705SXin Li assert(LV.isSimple() &&
949*67e74705SXin Li "Unexpected non-simple lvalue during struct expansion.");
950*67e74705SXin Li
951*67e74705SXin Li auto Exp = getTypeExpansion(Ty, getContext());
952*67e74705SXin Li if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
953*67e74705SXin Li forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
954*67e74705SXin Li [&](Address EltAddr) {
955*67e74705SXin Li LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
956*67e74705SXin Li ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
957*67e74705SXin Li });
958*67e74705SXin Li } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
959*67e74705SXin Li Address This = LV.getAddress();
960*67e74705SXin Li for (const CXXBaseSpecifier *BS : RExp->Bases) {
961*67e74705SXin Li // Perform a single step derived-to-base conversion.
962*67e74705SXin Li Address Base =
963*67e74705SXin Li GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
964*67e74705SXin Li /*NullCheckValue=*/false, SourceLocation());
965*67e74705SXin Li LValue SubLV = MakeAddrLValue(Base, BS->getType());
966*67e74705SXin Li
967*67e74705SXin Li // Recurse onto bases.
968*67e74705SXin Li ExpandTypeFromArgs(BS->getType(), SubLV, AI);
969*67e74705SXin Li }
970*67e74705SXin Li for (auto FD : RExp->Fields) {
971*67e74705SXin Li // FIXME: What are the right qualifiers here?
972*67e74705SXin Li LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
973*67e74705SXin Li ExpandTypeFromArgs(FD->getType(), SubLV, AI);
974*67e74705SXin Li }
975*67e74705SXin Li } else if (isa<ComplexExpansion>(Exp.get())) {
976*67e74705SXin Li auto realValue = *AI++;
977*67e74705SXin Li auto imagValue = *AI++;
978*67e74705SXin Li EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
979*67e74705SXin Li } else {
980*67e74705SXin Li assert(isa<NoExpansion>(Exp.get()));
981*67e74705SXin Li EmitStoreThroughLValue(RValue::get(*AI++), LV);
982*67e74705SXin Li }
983*67e74705SXin Li }
984*67e74705SXin Li
ExpandTypeToArgs(QualType Ty,RValue RV,llvm::FunctionType * IRFuncTy,SmallVectorImpl<llvm::Value * > & IRCallArgs,unsigned & IRCallArgPos)985*67e74705SXin Li void CodeGenFunction::ExpandTypeToArgs(
986*67e74705SXin Li QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
987*67e74705SXin Li SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
988*67e74705SXin Li auto Exp = getTypeExpansion(Ty, getContext());
989*67e74705SXin Li if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
990*67e74705SXin Li forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(),
991*67e74705SXin Li [&](Address EltAddr) {
992*67e74705SXin Li RValue EltRV =
993*67e74705SXin Li convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
994*67e74705SXin Li ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
995*67e74705SXin Li });
996*67e74705SXin Li } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
997*67e74705SXin Li Address This = RV.getAggregateAddress();
998*67e74705SXin Li for (const CXXBaseSpecifier *BS : RExp->Bases) {
999*67e74705SXin Li // Perform a single step derived-to-base conversion.
1000*67e74705SXin Li Address Base =
1001*67e74705SXin Li GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1002*67e74705SXin Li /*NullCheckValue=*/false, SourceLocation());
1003*67e74705SXin Li RValue BaseRV = RValue::getAggregate(Base);
1004*67e74705SXin Li
1005*67e74705SXin Li // Recurse onto bases.
1006*67e74705SXin Li ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
1007*67e74705SXin Li IRCallArgPos);
1008*67e74705SXin Li }
1009*67e74705SXin Li
1010*67e74705SXin Li LValue LV = MakeAddrLValue(This, Ty);
1011*67e74705SXin Li for (auto FD : RExp->Fields) {
1012*67e74705SXin Li RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
1013*67e74705SXin Li ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
1014*67e74705SXin Li IRCallArgPos);
1015*67e74705SXin Li }
1016*67e74705SXin Li } else if (isa<ComplexExpansion>(Exp.get())) {
1017*67e74705SXin Li ComplexPairTy CV = RV.getComplexVal();
1018*67e74705SXin Li IRCallArgs[IRCallArgPos++] = CV.first;
1019*67e74705SXin Li IRCallArgs[IRCallArgPos++] = CV.second;
1020*67e74705SXin Li } else {
1021*67e74705SXin Li assert(isa<NoExpansion>(Exp.get()));
1022*67e74705SXin Li assert(RV.isScalar() &&
1023*67e74705SXin Li "Unexpected non-scalar rvalue during struct expansion.");
1024*67e74705SXin Li
1025*67e74705SXin Li // Insert a bitcast as needed.
1026*67e74705SXin Li llvm::Value *V = RV.getScalarVal();
1027*67e74705SXin Li if (IRCallArgPos < IRFuncTy->getNumParams() &&
1028*67e74705SXin Li V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1029*67e74705SXin Li V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1030*67e74705SXin Li
1031*67e74705SXin Li IRCallArgs[IRCallArgPos++] = V;
1032*67e74705SXin Li }
1033*67e74705SXin Li }
1034*67e74705SXin Li
1035*67e74705SXin Li /// Create a temporary allocation for the purposes of coercion.
CreateTempAllocaForCoercion(CodeGenFunction & CGF,llvm::Type * Ty,CharUnits MinAlign)1036*67e74705SXin Li static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1037*67e74705SXin Li CharUnits MinAlign) {
1038*67e74705SXin Li // Don't use an alignment that's worse than what LLVM would prefer.
1039*67e74705SXin Li auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1040*67e74705SXin Li CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1041*67e74705SXin Li
1042*67e74705SXin Li return CGF.CreateTempAlloca(Ty, Align);
1043*67e74705SXin Li }
1044*67e74705SXin Li
1045*67e74705SXin Li /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1046*67e74705SXin Li /// accessing some number of bytes out of it, try to gep into the struct to get
1047*67e74705SXin Li /// at its inner goodness. Dive as deep as possible without entering an element
1048*67e74705SXin Li /// with an in-memory size smaller than DstSize.
1049*67e74705SXin Li static Address
EnterStructPointerForCoercedAccess(Address SrcPtr,llvm::StructType * SrcSTy,uint64_t DstSize,CodeGenFunction & CGF)1050*67e74705SXin Li EnterStructPointerForCoercedAccess(Address SrcPtr,
1051*67e74705SXin Li llvm::StructType *SrcSTy,
1052*67e74705SXin Li uint64_t DstSize, CodeGenFunction &CGF) {
1053*67e74705SXin Li // We can't dive into a zero-element struct.
1054*67e74705SXin Li if (SrcSTy->getNumElements() == 0) return SrcPtr;
1055*67e74705SXin Li
1056*67e74705SXin Li llvm::Type *FirstElt = SrcSTy->getElementType(0);
1057*67e74705SXin Li
1058*67e74705SXin Li // If the first elt is at least as large as what we're looking for, or if the
1059*67e74705SXin Li // first element is the same size as the whole struct, we can enter it. The
1060*67e74705SXin Li // comparison must be made on the store size and not the alloca size. Using
1061*67e74705SXin Li // the alloca size may overstate the size of the load.
1062*67e74705SXin Li uint64_t FirstEltSize =
1063*67e74705SXin Li CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1064*67e74705SXin Li if (FirstEltSize < DstSize &&
1065*67e74705SXin Li FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1066*67e74705SXin Li return SrcPtr;
1067*67e74705SXin Li
1068*67e74705SXin Li // GEP into the first element.
1069*67e74705SXin Li SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1070*67e74705SXin Li
1071*67e74705SXin Li // If the first element is a struct, recurse.
1072*67e74705SXin Li llvm::Type *SrcTy = SrcPtr.getElementType();
1073*67e74705SXin Li if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1074*67e74705SXin Li return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1075*67e74705SXin Li
1076*67e74705SXin Li return SrcPtr;
1077*67e74705SXin Li }
1078*67e74705SXin Li
1079*67e74705SXin Li /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1080*67e74705SXin Li /// are either integers or pointers. This does a truncation of the value if it
1081*67e74705SXin Li /// is too large or a zero extension if it is too small.
1082*67e74705SXin Li ///
1083*67e74705SXin Li /// This behaves as if the value were coerced through memory, so on big-endian
1084*67e74705SXin Li /// targets the high bits are preserved in a truncation, while little-endian
1085*67e74705SXin Li /// targets preserve the low bits.
CoerceIntOrPtrToIntOrPtr(llvm::Value * Val,llvm::Type * Ty,CodeGenFunction & CGF)1086*67e74705SXin Li static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1087*67e74705SXin Li llvm::Type *Ty,
1088*67e74705SXin Li CodeGenFunction &CGF) {
1089*67e74705SXin Li if (Val->getType() == Ty)
1090*67e74705SXin Li return Val;
1091*67e74705SXin Li
1092*67e74705SXin Li if (isa<llvm::PointerType>(Val->getType())) {
1093*67e74705SXin Li // If this is Pointer->Pointer avoid conversion to and from int.
1094*67e74705SXin Li if (isa<llvm::PointerType>(Ty))
1095*67e74705SXin Li return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1096*67e74705SXin Li
1097*67e74705SXin Li // Convert the pointer to an integer so we can play with its width.
1098*67e74705SXin Li Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1099*67e74705SXin Li }
1100*67e74705SXin Li
1101*67e74705SXin Li llvm::Type *DestIntTy = Ty;
1102*67e74705SXin Li if (isa<llvm::PointerType>(DestIntTy))
1103*67e74705SXin Li DestIntTy = CGF.IntPtrTy;
1104*67e74705SXin Li
1105*67e74705SXin Li if (Val->getType() != DestIntTy) {
1106*67e74705SXin Li const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1107*67e74705SXin Li if (DL.isBigEndian()) {
1108*67e74705SXin Li // Preserve the high bits on big-endian targets.
1109*67e74705SXin Li // That is what memory coercion does.
1110*67e74705SXin Li uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1111*67e74705SXin Li uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1112*67e74705SXin Li
1113*67e74705SXin Li if (SrcSize > DstSize) {
1114*67e74705SXin Li Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1115*67e74705SXin Li Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1116*67e74705SXin Li } else {
1117*67e74705SXin Li Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1118*67e74705SXin Li Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1119*67e74705SXin Li }
1120*67e74705SXin Li } else {
1121*67e74705SXin Li // Little-endian targets preserve the low bits. No shifts required.
1122*67e74705SXin Li Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1123*67e74705SXin Li }
1124*67e74705SXin Li }
1125*67e74705SXin Li
1126*67e74705SXin Li if (isa<llvm::PointerType>(Ty))
1127*67e74705SXin Li Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1128*67e74705SXin Li return Val;
1129*67e74705SXin Li }
1130*67e74705SXin Li
1131*67e74705SXin Li
1132*67e74705SXin Li
1133*67e74705SXin Li /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1134*67e74705SXin Li /// a pointer to an object of type \arg Ty, known to be aligned to
1135*67e74705SXin Li /// \arg SrcAlign bytes.
1136*67e74705SXin Li ///
1137*67e74705SXin Li /// This safely handles the case when the src type is smaller than the
1138*67e74705SXin Li /// destination type; in this situation the values of bits which not
1139*67e74705SXin Li /// present in the src are undefined.
CreateCoercedLoad(Address Src,llvm::Type * Ty,CodeGenFunction & CGF)1140*67e74705SXin Li static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1141*67e74705SXin Li CodeGenFunction &CGF) {
1142*67e74705SXin Li llvm::Type *SrcTy = Src.getElementType();
1143*67e74705SXin Li
1144*67e74705SXin Li // If SrcTy and Ty are the same, just do a load.
1145*67e74705SXin Li if (SrcTy == Ty)
1146*67e74705SXin Li return CGF.Builder.CreateLoad(Src);
1147*67e74705SXin Li
1148*67e74705SXin Li uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1149*67e74705SXin Li
1150*67e74705SXin Li if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1151*67e74705SXin Li Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1152*67e74705SXin Li SrcTy = Src.getType()->getElementType();
1153*67e74705SXin Li }
1154*67e74705SXin Li
1155*67e74705SXin Li uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1156*67e74705SXin Li
1157*67e74705SXin Li // If the source and destination are integer or pointer types, just do an
1158*67e74705SXin Li // extension or truncation to the desired type.
1159*67e74705SXin Li if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1160*67e74705SXin Li (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1161*67e74705SXin Li llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1162*67e74705SXin Li return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1163*67e74705SXin Li }
1164*67e74705SXin Li
1165*67e74705SXin Li // If load is legal, just bitcast the src pointer.
1166*67e74705SXin Li if (SrcSize >= DstSize) {
1167*67e74705SXin Li // Generally SrcSize is never greater than DstSize, since this means we are
1168*67e74705SXin Li // losing bits. However, this can happen in cases where the structure has
1169*67e74705SXin Li // additional padding, for example due to a user specified alignment.
1170*67e74705SXin Li //
1171*67e74705SXin Li // FIXME: Assert that we aren't truncating non-padding bits when have access
1172*67e74705SXin Li // to that information.
1173*67e74705SXin Li Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty));
1174*67e74705SXin Li return CGF.Builder.CreateLoad(Src);
1175*67e74705SXin Li }
1176*67e74705SXin Li
1177*67e74705SXin Li // Otherwise do coercion through memory. This is stupid, but simple.
1178*67e74705SXin Li Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1179*67e74705SXin Li Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1180*67e74705SXin Li Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy);
1181*67e74705SXin Li CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1182*67e74705SXin Li llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1183*67e74705SXin Li false);
1184*67e74705SXin Li return CGF.Builder.CreateLoad(Tmp);
1185*67e74705SXin Li }
1186*67e74705SXin Li
1187*67e74705SXin Li // Function to store a first-class aggregate into memory. We prefer to
1188*67e74705SXin Li // store the elements rather than the aggregate to be more friendly to
1189*67e74705SXin Li // fast-isel.
1190*67e74705SXin Li // FIXME: Do we need to recurse here?
BuildAggStore(CodeGenFunction & CGF,llvm::Value * Val,Address Dest,bool DestIsVolatile)1191*67e74705SXin Li static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1192*67e74705SXin Li Address Dest, bool DestIsVolatile) {
1193*67e74705SXin Li // Prefer scalar stores to first-class aggregate stores.
1194*67e74705SXin Li if (llvm::StructType *STy =
1195*67e74705SXin Li dyn_cast<llvm::StructType>(Val->getType())) {
1196*67e74705SXin Li const llvm::StructLayout *Layout =
1197*67e74705SXin Li CGF.CGM.getDataLayout().getStructLayout(STy);
1198*67e74705SXin Li
1199*67e74705SXin Li for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1200*67e74705SXin Li auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1201*67e74705SXin Li Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1202*67e74705SXin Li llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1203*67e74705SXin Li CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1204*67e74705SXin Li }
1205*67e74705SXin Li } else {
1206*67e74705SXin Li CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1207*67e74705SXin Li }
1208*67e74705SXin Li }
1209*67e74705SXin Li
1210*67e74705SXin Li /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1211*67e74705SXin Li /// where the source and destination may have different types. The
1212*67e74705SXin Li /// destination is known to be aligned to \arg DstAlign bytes.
1213*67e74705SXin Li ///
1214*67e74705SXin Li /// This safely handles the case when the src type is larger than the
1215*67e74705SXin Li /// destination type; the upper bits of the src will be lost.
CreateCoercedStore(llvm::Value * Src,Address Dst,bool DstIsVolatile,CodeGenFunction & CGF)1216*67e74705SXin Li static void CreateCoercedStore(llvm::Value *Src,
1217*67e74705SXin Li Address Dst,
1218*67e74705SXin Li bool DstIsVolatile,
1219*67e74705SXin Li CodeGenFunction &CGF) {
1220*67e74705SXin Li llvm::Type *SrcTy = Src->getType();
1221*67e74705SXin Li llvm::Type *DstTy = Dst.getType()->getElementType();
1222*67e74705SXin Li if (SrcTy == DstTy) {
1223*67e74705SXin Li CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1224*67e74705SXin Li return;
1225*67e74705SXin Li }
1226*67e74705SXin Li
1227*67e74705SXin Li uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1228*67e74705SXin Li
1229*67e74705SXin Li if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1230*67e74705SXin Li Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1231*67e74705SXin Li DstTy = Dst.getType()->getElementType();
1232*67e74705SXin Li }
1233*67e74705SXin Li
1234*67e74705SXin Li // If the source and destination are integer or pointer types, just do an
1235*67e74705SXin Li // extension or truncation to the desired type.
1236*67e74705SXin Li if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1237*67e74705SXin Li (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1238*67e74705SXin Li Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1239*67e74705SXin Li CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1240*67e74705SXin Li return;
1241*67e74705SXin Li }
1242*67e74705SXin Li
1243*67e74705SXin Li uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1244*67e74705SXin Li
1245*67e74705SXin Li // If store is legal, just bitcast the src pointer.
1246*67e74705SXin Li if (SrcSize <= DstSize) {
1247*67e74705SXin Li Dst = CGF.Builder.CreateBitCast(Dst, llvm::PointerType::getUnqual(SrcTy));
1248*67e74705SXin Li BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1249*67e74705SXin Li } else {
1250*67e74705SXin Li // Otherwise do coercion through memory. This is stupid, but
1251*67e74705SXin Li // simple.
1252*67e74705SXin Li
1253*67e74705SXin Li // Generally SrcSize is never greater than DstSize, since this means we are
1254*67e74705SXin Li // losing bits. However, this can happen in cases where the structure has
1255*67e74705SXin Li // additional padding, for example due to a user specified alignment.
1256*67e74705SXin Li //
1257*67e74705SXin Li // FIXME: Assert that we aren't truncating non-padding bits when have access
1258*67e74705SXin Li // to that information.
1259*67e74705SXin Li Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1260*67e74705SXin Li CGF.Builder.CreateStore(Src, Tmp);
1261*67e74705SXin Li Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1262*67e74705SXin Li Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy);
1263*67e74705SXin Li CGF.Builder.CreateMemCpy(DstCasted, Casted,
1264*67e74705SXin Li llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1265*67e74705SXin Li false);
1266*67e74705SXin Li }
1267*67e74705SXin Li }
1268*67e74705SXin Li
emitAddressAtOffset(CodeGenFunction & CGF,Address addr,const ABIArgInfo & info)1269*67e74705SXin Li static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1270*67e74705SXin Li const ABIArgInfo &info) {
1271*67e74705SXin Li if (unsigned offset = info.getDirectOffset()) {
1272*67e74705SXin Li addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1273*67e74705SXin Li addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1274*67e74705SXin Li CharUnits::fromQuantity(offset));
1275*67e74705SXin Li addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1276*67e74705SXin Li }
1277*67e74705SXin Li return addr;
1278*67e74705SXin Li }
1279*67e74705SXin Li
1280*67e74705SXin Li namespace {
1281*67e74705SXin Li
1282*67e74705SXin Li /// Encapsulates information about the way function arguments from
1283*67e74705SXin Li /// CGFunctionInfo should be passed to actual LLVM IR function.
1284*67e74705SXin Li class ClangToLLVMArgMapping {
1285*67e74705SXin Li static const unsigned InvalidIndex = ~0U;
1286*67e74705SXin Li unsigned InallocaArgNo;
1287*67e74705SXin Li unsigned SRetArgNo;
1288*67e74705SXin Li unsigned TotalIRArgs;
1289*67e74705SXin Li
1290*67e74705SXin Li /// Arguments of LLVM IR function corresponding to single Clang argument.
1291*67e74705SXin Li struct IRArgs {
1292*67e74705SXin Li unsigned PaddingArgIndex;
1293*67e74705SXin Li // Argument is expanded to IR arguments at positions
1294*67e74705SXin Li // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1295*67e74705SXin Li unsigned FirstArgIndex;
1296*67e74705SXin Li unsigned NumberOfArgs;
1297*67e74705SXin Li
IRArgs__anon41e727fc0411::ClangToLLVMArgMapping::IRArgs1298*67e74705SXin Li IRArgs()
1299*67e74705SXin Li : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1300*67e74705SXin Li NumberOfArgs(0) {}
1301*67e74705SXin Li };
1302*67e74705SXin Li
1303*67e74705SXin Li SmallVector<IRArgs, 8> ArgInfo;
1304*67e74705SXin Li
1305*67e74705SXin Li public:
ClangToLLVMArgMapping(const ASTContext & Context,const CGFunctionInfo & FI,bool OnlyRequiredArgs=false)1306*67e74705SXin Li ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1307*67e74705SXin Li bool OnlyRequiredArgs = false)
1308*67e74705SXin Li : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1309*67e74705SXin Li ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1310*67e74705SXin Li construct(Context, FI, OnlyRequiredArgs);
1311*67e74705SXin Li }
1312*67e74705SXin Li
hasInallocaArg() const1313*67e74705SXin Li bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
getInallocaArgNo() const1314*67e74705SXin Li unsigned getInallocaArgNo() const {
1315*67e74705SXin Li assert(hasInallocaArg());
1316*67e74705SXin Li return InallocaArgNo;
1317*67e74705SXin Li }
1318*67e74705SXin Li
hasSRetArg() const1319*67e74705SXin Li bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
getSRetArgNo() const1320*67e74705SXin Li unsigned getSRetArgNo() const {
1321*67e74705SXin Li assert(hasSRetArg());
1322*67e74705SXin Li return SRetArgNo;
1323*67e74705SXin Li }
1324*67e74705SXin Li
totalIRArgs() const1325*67e74705SXin Li unsigned totalIRArgs() const { return TotalIRArgs; }
1326*67e74705SXin Li
hasPaddingArg(unsigned ArgNo) const1327*67e74705SXin Li bool hasPaddingArg(unsigned ArgNo) const {
1328*67e74705SXin Li assert(ArgNo < ArgInfo.size());
1329*67e74705SXin Li return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1330*67e74705SXin Li }
getPaddingArgNo(unsigned ArgNo) const1331*67e74705SXin Li unsigned getPaddingArgNo(unsigned ArgNo) const {
1332*67e74705SXin Li assert(hasPaddingArg(ArgNo));
1333*67e74705SXin Li return ArgInfo[ArgNo].PaddingArgIndex;
1334*67e74705SXin Li }
1335*67e74705SXin Li
1336*67e74705SXin Li /// Returns index of first IR argument corresponding to ArgNo, and their
1337*67e74705SXin Li /// quantity.
getIRArgs(unsigned ArgNo) const1338*67e74705SXin Li std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1339*67e74705SXin Li assert(ArgNo < ArgInfo.size());
1340*67e74705SXin Li return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1341*67e74705SXin Li ArgInfo[ArgNo].NumberOfArgs);
1342*67e74705SXin Li }
1343*67e74705SXin Li
1344*67e74705SXin Li private:
1345*67e74705SXin Li void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1346*67e74705SXin Li bool OnlyRequiredArgs);
1347*67e74705SXin Li };
1348*67e74705SXin Li
construct(const ASTContext & Context,const CGFunctionInfo & FI,bool OnlyRequiredArgs)1349*67e74705SXin Li void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1350*67e74705SXin Li const CGFunctionInfo &FI,
1351*67e74705SXin Li bool OnlyRequiredArgs) {
1352*67e74705SXin Li unsigned IRArgNo = 0;
1353*67e74705SXin Li bool SwapThisWithSRet = false;
1354*67e74705SXin Li const ABIArgInfo &RetAI = FI.getReturnInfo();
1355*67e74705SXin Li
1356*67e74705SXin Li if (RetAI.getKind() == ABIArgInfo::Indirect) {
1357*67e74705SXin Li SwapThisWithSRet = RetAI.isSRetAfterThis();
1358*67e74705SXin Li SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1359*67e74705SXin Li }
1360*67e74705SXin Li
1361*67e74705SXin Li unsigned ArgNo = 0;
1362*67e74705SXin Li unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1363*67e74705SXin Li for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1364*67e74705SXin Li ++I, ++ArgNo) {
1365*67e74705SXin Li assert(I != FI.arg_end());
1366*67e74705SXin Li QualType ArgType = I->type;
1367*67e74705SXin Li const ABIArgInfo &AI = I->info;
1368*67e74705SXin Li // Collect data about IR arguments corresponding to Clang argument ArgNo.
1369*67e74705SXin Li auto &IRArgs = ArgInfo[ArgNo];
1370*67e74705SXin Li
1371*67e74705SXin Li if (AI.getPaddingType())
1372*67e74705SXin Li IRArgs.PaddingArgIndex = IRArgNo++;
1373*67e74705SXin Li
1374*67e74705SXin Li switch (AI.getKind()) {
1375*67e74705SXin Li case ABIArgInfo::Extend:
1376*67e74705SXin Li case ABIArgInfo::Direct: {
1377*67e74705SXin Li // FIXME: handle sseregparm someday...
1378*67e74705SXin Li llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1379*67e74705SXin Li if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1380*67e74705SXin Li IRArgs.NumberOfArgs = STy->getNumElements();
1381*67e74705SXin Li } else {
1382*67e74705SXin Li IRArgs.NumberOfArgs = 1;
1383*67e74705SXin Li }
1384*67e74705SXin Li break;
1385*67e74705SXin Li }
1386*67e74705SXin Li case ABIArgInfo::Indirect:
1387*67e74705SXin Li IRArgs.NumberOfArgs = 1;
1388*67e74705SXin Li break;
1389*67e74705SXin Li case ABIArgInfo::Ignore:
1390*67e74705SXin Li case ABIArgInfo::InAlloca:
1391*67e74705SXin Li // ignore and inalloca doesn't have matching LLVM parameters.
1392*67e74705SXin Li IRArgs.NumberOfArgs = 0;
1393*67e74705SXin Li break;
1394*67e74705SXin Li case ABIArgInfo::CoerceAndExpand:
1395*67e74705SXin Li IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1396*67e74705SXin Li break;
1397*67e74705SXin Li case ABIArgInfo::Expand:
1398*67e74705SXin Li IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1399*67e74705SXin Li break;
1400*67e74705SXin Li }
1401*67e74705SXin Li
1402*67e74705SXin Li if (IRArgs.NumberOfArgs > 0) {
1403*67e74705SXin Li IRArgs.FirstArgIndex = IRArgNo;
1404*67e74705SXin Li IRArgNo += IRArgs.NumberOfArgs;
1405*67e74705SXin Li }
1406*67e74705SXin Li
1407*67e74705SXin Li // Skip over the sret parameter when it comes second. We already handled it
1408*67e74705SXin Li // above.
1409*67e74705SXin Li if (IRArgNo == 1 && SwapThisWithSRet)
1410*67e74705SXin Li IRArgNo++;
1411*67e74705SXin Li }
1412*67e74705SXin Li assert(ArgNo == ArgInfo.size());
1413*67e74705SXin Li
1414*67e74705SXin Li if (FI.usesInAlloca())
1415*67e74705SXin Li InallocaArgNo = IRArgNo++;
1416*67e74705SXin Li
1417*67e74705SXin Li TotalIRArgs = IRArgNo;
1418*67e74705SXin Li }
1419*67e74705SXin Li } // namespace
1420*67e74705SXin Li
1421*67e74705SXin Li /***/
1422*67e74705SXin Li
ReturnTypeUsesSRet(const CGFunctionInfo & FI)1423*67e74705SXin Li bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1424*67e74705SXin Li return FI.getReturnInfo().isIndirect();
1425*67e74705SXin Li }
1426*67e74705SXin Li
ReturnSlotInterferesWithArgs(const CGFunctionInfo & FI)1427*67e74705SXin Li bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1428*67e74705SXin Li return ReturnTypeUsesSRet(FI) &&
1429*67e74705SXin Li getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1430*67e74705SXin Li }
1431*67e74705SXin Li
ReturnTypeUsesFPRet(QualType ResultType)1432*67e74705SXin Li bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1433*67e74705SXin Li if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1434*67e74705SXin Li switch (BT->getKind()) {
1435*67e74705SXin Li default:
1436*67e74705SXin Li return false;
1437*67e74705SXin Li case BuiltinType::Float:
1438*67e74705SXin Li return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1439*67e74705SXin Li case BuiltinType::Double:
1440*67e74705SXin Li return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1441*67e74705SXin Li case BuiltinType::LongDouble:
1442*67e74705SXin Li return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1443*67e74705SXin Li }
1444*67e74705SXin Li }
1445*67e74705SXin Li
1446*67e74705SXin Li return false;
1447*67e74705SXin Li }
1448*67e74705SXin Li
ReturnTypeUsesFP2Ret(QualType ResultType)1449*67e74705SXin Li bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1450*67e74705SXin Li if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1451*67e74705SXin Li if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1452*67e74705SXin Li if (BT->getKind() == BuiltinType::LongDouble)
1453*67e74705SXin Li return getTarget().useObjCFP2RetForComplexLongDouble();
1454*67e74705SXin Li }
1455*67e74705SXin Li }
1456*67e74705SXin Li
1457*67e74705SXin Li return false;
1458*67e74705SXin Li }
1459*67e74705SXin Li
GetFunctionType(GlobalDecl GD)1460*67e74705SXin Li llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1461*67e74705SXin Li const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1462*67e74705SXin Li return GetFunctionType(FI);
1463*67e74705SXin Li }
1464*67e74705SXin Li
1465*67e74705SXin Li llvm::FunctionType *
GetFunctionType(const CGFunctionInfo & FI)1466*67e74705SXin Li CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1467*67e74705SXin Li
1468*67e74705SXin Li bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1469*67e74705SXin Li (void)Inserted;
1470*67e74705SXin Li assert(Inserted && "Recursively being processed?");
1471*67e74705SXin Li
1472*67e74705SXin Li llvm::Type *resultType = nullptr;
1473*67e74705SXin Li const ABIArgInfo &retAI = FI.getReturnInfo();
1474*67e74705SXin Li switch (retAI.getKind()) {
1475*67e74705SXin Li case ABIArgInfo::Expand:
1476*67e74705SXin Li llvm_unreachable("Invalid ABI kind for return argument");
1477*67e74705SXin Li
1478*67e74705SXin Li case ABIArgInfo::Extend:
1479*67e74705SXin Li case ABIArgInfo::Direct:
1480*67e74705SXin Li resultType = retAI.getCoerceToType();
1481*67e74705SXin Li break;
1482*67e74705SXin Li
1483*67e74705SXin Li case ABIArgInfo::InAlloca:
1484*67e74705SXin Li if (retAI.getInAllocaSRet()) {
1485*67e74705SXin Li // sret things on win32 aren't void, they return the sret pointer.
1486*67e74705SXin Li QualType ret = FI.getReturnType();
1487*67e74705SXin Li llvm::Type *ty = ConvertType(ret);
1488*67e74705SXin Li unsigned addressSpace = Context.getTargetAddressSpace(ret);
1489*67e74705SXin Li resultType = llvm::PointerType::get(ty, addressSpace);
1490*67e74705SXin Li } else {
1491*67e74705SXin Li resultType = llvm::Type::getVoidTy(getLLVMContext());
1492*67e74705SXin Li }
1493*67e74705SXin Li break;
1494*67e74705SXin Li
1495*67e74705SXin Li case ABIArgInfo::Indirect:
1496*67e74705SXin Li case ABIArgInfo::Ignore:
1497*67e74705SXin Li resultType = llvm::Type::getVoidTy(getLLVMContext());
1498*67e74705SXin Li break;
1499*67e74705SXin Li
1500*67e74705SXin Li case ABIArgInfo::CoerceAndExpand:
1501*67e74705SXin Li resultType = retAI.getUnpaddedCoerceAndExpandType();
1502*67e74705SXin Li break;
1503*67e74705SXin Li }
1504*67e74705SXin Li
1505*67e74705SXin Li ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1506*67e74705SXin Li SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1507*67e74705SXin Li
1508*67e74705SXin Li // Add type for sret argument.
1509*67e74705SXin Li if (IRFunctionArgs.hasSRetArg()) {
1510*67e74705SXin Li QualType Ret = FI.getReturnType();
1511*67e74705SXin Li llvm::Type *Ty = ConvertType(Ret);
1512*67e74705SXin Li unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1513*67e74705SXin Li ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1514*67e74705SXin Li llvm::PointerType::get(Ty, AddressSpace);
1515*67e74705SXin Li }
1516*67e74705SXin Li
1517*67e74705SXin Li // Add type for inalloca argument.
1518*67e74705SXin Li if (IRFunctionArgs.hasInallocaArg()) {
1519*67e74705SXin Li auto ArgStruct = FI.getArgStruct();
1520*67e74705SXin Li assert(ArgStruct);
1521*67e74705SXin Li ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1522*67e74705SXin Li }
1523*67e74705SXin Li
1524*67e74705SXin Li // Add in all of the required arguments.
1525*67e74705SXin Li unsigned ArgNo = 0;
1526*67e74705SXin Li CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1527*67e74705SXin Li ie = it + FI.getNumRequiredArgs();
1528*67e74705SXin Li for (; it != ie; ++it, ++ArgNo) {
1529*67e74705SXin Li const ABIArgInfo &ArgInfo = it->info;
1530*67e74705SXin Li
1531*67e74705SXin Li // Insert a padding type to ensure proper alignment.
1532*67e74705SXin Li if (IRFunctionArgs.hasPaddingArg(ArgNo))
1533*67e74705SXin Li ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1534*67e74705SXin Li ArgInfo.getPaddingType();
1535*67e74705SXin Li
1536*67e74705SXin Li unsigned FirstIRArg, NumIRArgs;
1537*67e74705SXin Li std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1538*67e74705SXin Li
1539*67e74705SXin Li switch (ArgInfo.getKind()) {
1540*67e74705SXin Li case ABIArgInfo::Ignore:
1541*67e74705SXin Li case ABIArgInfo::InAlloca:
1542*67e74705SXin Li assert(NumIRArgs == 0);
1543*67e74705SXin Li break;
1544*67e74705SXin Li
1545*67e74705SXin Li case ABIArgInfo::Indirect: {
1546*67e74705SXin Li assert(NumIRArgs == 1);
1547*67e74705SXin Li // indirect arguments are always on the stack, which is addr space #0.
1548*67e74705SXin Li llvm::Type *LTy = ConvertTypeForMem(it->type);
1549*67e74705SXin Li ArgTypes[FirstIRArg] = LTy->getPointerTo();
1550*67e74705SXin Li break;
1551*67e74705SXin Li }
1552*67e74705SXin Li
1553*67e74705SXin Li case ABIArgInfo::Extend:
1554*67e74705SXin Li case ABIArgInfo::Direct: {
1555*67e74705SXin Li // Fast-isel and the optimizer generally like scalar values better than
1556*67e74705SXin Li // FCAs, so we flatten them if this is safe to do for this argument.
1557*67e74705SXin Li llvm::Type *argType = ArgInfo.getCoerceToType();
1558*67e74705SXin Li llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1559*67e74705SXin Li if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1560*67e74705SXin Li assert(NumIRArgs == st->getNumElements());
1561*67e74705SXin Li for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1562*67e74705SXin Li ArgTypes[FirstIRArg + i] = st->getElementType(i);
1563*67e74705SXin Li } else {
1564*67e74705SXin Li assert(NumIRArgs == 1);
1565*67e74705SXin Li ArgTypes[FirstIRArg] = argType;
1566*67e74705SXin Li }
1567*67e74705SXin Li break;
1568*67e74705SXin Li }
1569*67e74705SXin Li
1570*67e74705SXin Li case ABIArgInfo::CoerceAndExpand: {
1571*67e74705SXin Li auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1572*67e74705SXin Li for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1573*67e74705SXin Li *ArgTypesIter++ = EltTy;
1574*67e74705SXin Li }
1575*67e74705SXin Li assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1576*67e74705SXin Li break;
1577*67e74705SXin Li }
1578*67e74705SXin Li
1579*67e74705SXin Li case ABIArgInfo::Expand:
1580*67e74705SXin Li auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1581*67e74705SXin Li getExpandedTypes(it->type, ArgTypesIter);
1582*67e74705SXin Li assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1583*67e74705SXin Li break;
1584*67e74705SXin Li }
1585*67e74705SXin Li }
1586*67e74705SXin Li
1587*67e74705SXin Li bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1588*67e74705SXin Li assert(Erased && "Not in set?");
1589*67e74705SXin Li
1590*67e74705SXin Li return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1591*67e74705SXin Li }
1592*67e74705SXin Li
GetFunctionTypeForVTable(GlobalDecl GD)1593*67e74705SXin Li llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1594*67e74705SXin Li const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1595*67e74705SXin Li const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1596*67e74705SXin Li
1597*67e74705SXin Li if (!isFuncTypeConvertible(FPT))
1598*67e74705SXin Li return llvm::StructType::get(getLLVMContext());
1599*67e74705SXin Li
1600*67e74705SXin Li const CGFunctionInfo *Info;
1601*67e74705SXin Li if (isa<CXXDestructorDecl>(MD))
1602*67e74705SXin Li Info =
1603*67e74705SXin Li &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
1604*67e74705SXin Li else
1605*67e74705SXin Li Info = &arrangeCXXMethodDeclaration(MD);
1606*67e74705SXin Li return GetFunctionType(*Info);
1607*67e74705SXin Li }
1608*67e74705SXin Li
AddAttributesFromFunctionProtoType(ASTContext & Ctx,llvm::AttrBuilder & FuncAttrs,const FunctionProtoType * FPT)1609*67e74705SXin Li static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1610*67e74705SXin Li llvm::AttrBuilder &FuncAttrs,
1611*67e74705SXin Li const FunctionProtoType *FPT) {
1612*67e74705SXin Li if (!FPT)
1613*67e74705SXin Li return;
1614*67e74705SXin Li
1615*67e74705SXin Li if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1616*67e74705SXin Li FPT->isNothrow(Ctx))
1617*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1618*67e74705SXin Li }
1619*67e74705SXin Li
ConstructAttributeList(StringRef Name,const CGFunctionInfo & FI,CGCalleeInfo CalleeInfo,AttributeListType & PAL,unsigned & CallingConv,bool AttrOnCallSite)1620*67e74705SXin Li void CodeGenModule::ConstructAttributeList(
1621*67e74705SXin Li StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1622*67e74705SXin Li AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) {
1623*67e74705SXin Li llvm::AttrBuilder FuncAttrs;
1624*67e74705SXin Li llvm::AttrBuilder RetAttrs;
1625*67e74705SXin Li bool HasOptnone = false;
1626*67e74705SXin Li
1627*67e74705SXin Li CallingConv = FI.getEffectiveCallingConvention();
1628*67e74705SXin Li
1629*67e74705SXin Li if (FI.isNoReturn())
1630*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1631*67e74705SXin Li
1632*67e74705SXin Li // If we have information about the function prototype, we can learn
1633*67e74705SXin Li // attributes form there.
1634*67e74705SXin Li AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1635*67e74705SXin Li CalleeInfo.getCalleeFunctionProtoType());
1636*67e74705SXin Li
1637*67e74705SXin Li const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1638*67e74705SXin Li
1639*67e74705SXin Li bool HasAnyX86InterruptAttr = false;
1640*67e74705SXin Li // FIXME: handle sseregparm someday...
1641*67e74705SXin Li if (TargetDecl) {
1642*67e74705SXin Li if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1643*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1644*67e74705SXin Li if (TargetDecl->hasAttr<NoThrowAttr>())
1645*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1646*67e74705SXin Li if (TargetDecl->hasAttr<NoReturnAttr>())
1647*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1648*67e74705SXin Li if (TargetDecl->hasAttr<NoDuplicateAttr>())
1649*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1650*67e74705SXin Li
1651*67e74705SXin Li if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1652*67e74705SXin Li AddAttributesFromFunctionProtoType(
1653*67e74705SXin Li getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1654*67e74705SXin Li // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1655*67e74705SXin Li // These attributes are not inherited by overloads.
1656*67e74705SXin Li const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1657*67e74705SXin Li if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1658*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1659*67e74705SXin Li }
1660*67e74705SXin Li
1661*67e74705SXin Li // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1662*67e74705SXin Li if (TargetDecl->hasAttr<ConstAttr>()) {
1663*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1664*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1665*67e74705SXin Li } else if (TargetDecl->hasAttr<PureAttr>()) {
1666*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1667*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1668*67e74705SXin Li } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1669*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1670*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1671*67e74705SXin Li }
1672*67e74705SXin Li if (TargetDecl->hasAttr<RestrictAttr>())
1673*67e74705SXin Li RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1674*67e74705SXin Li if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1675*67e74705SXin Li RetAttrs.addAttribute(llvm::Attribute::NonNull);
1676*67e74705SXin Li
1677*67e74705SXin Li HasAnyX86InterruptAttr = TargetDecl->hasAttr<AnyX86InterruptAttr>();
1678*67e74705SXin Li HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1679*67e74705SXin Li }
1680*67e74705SXin Li
1681*67e74705SXin Li // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1682*67e74705SXin Li if (!HasOptnone) {
1683*67e74705SXin Li if (CodeGenOpts.OptimizeSize)
1684*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1685*67e74705SXin Li if (CodeGenOpts.OptimizeSize == 2)
1686*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1687*67e74705SXin Li }
1688*67e74705SXin Li
1689*67e74705SXin Li if (CodeGenOpts.DisableRedZone)
1690*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1691*67e74705SXin Li if (CodeGenOpts.NoImplicitFloat)
1692*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1693*67e74705SXin Li if (CodeGenOpts.EnableSegmentedStacks &&
1694*67e74705SXin Li !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1695*67e74705SXin Li FuncAttrs.addAttribute("split-stack");
1696*67e74705SXin Li
1697*67e74705SXin Li if (AttrOnCallSite) {
1698*67e74705SXin Li // Attributes that should go on the call site only.
1699*67e74705SXin Li if (!CodeGenOpts.SimplifyLibCalls ||
1700*67e74705SXin Li CodeGenOpts.isNoBuiltinFunc(Name.data()))
1701*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1702*67e74705SXin Li if (!CodeGenOpts.TrapFuncName.empty())
1703*67e74705SXin Li FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1704*67e74705SXin Li } else {
1705*67e74705SXin Li // Attributes that should go on the function, but not the call site.
1706*67e74705SXin Li if (!CodeGenOpts.DisableFPElim) {
1707*67e74705SXin Li FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1708*67e74705SXin Li } else if (CodeGenOpts.OmitLeafFramePointer) {
1709*67e74705SXin Li FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1710*67e74705SXin Li FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1711*67e74705SXin Li } else {
1712*67e74705SXin Li FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1713*67e74705SXin Li FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1714*67e74705SXin Li }
1715*67e74705SXin Li
1716*67e74705SXin Li bool DisableTailCalls =
1717*67e74705SXin Li CodeGenOpts.DisableTailCalls || HasAnyX86InterruptAttr ||
1718*67e74705SXin Li (TargetDecl && TargetDecl->hasAttr<DisableTailCallsAttr>());
1719*67e74705SXin Li FuncAttrs.addAttribute(
1720*67e74705SXin Li "disable-tail-calls",
1721*67e74705SXin Li llvm::toStringRef(DisableTailCalls));
1722*67e74705SXin Li
1723*67e74705SXin Li FuncAttrs.addAttribute("less-precise-fpmad",
1724*67e74705SXin Li llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1725*67e74705SXin Li FuncAttrs.addAttribute("no-infs-fp-math",
1726*67e74705SXin Li llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1727*67e74705SXin Li FuncAttrs.addAttribute("no-nans-fp-math",
1728*67e74705SXin Li llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1729*67e74705SXin Li FuncAttrs.addAttribute("unsafe-fp-math",
1730*67e74705SXin Li llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1731*67e74705SXin Li FuncAttrs.addAttribute("use-soft-float",
1732*67e74705SXin Li llvm::toStringRef(CodeGenOpts.SoftFloat));
1733*67e74705SXin Li FuncAttrs.addAttribute("stack-protector-buffer-size",
1734*67e74705SXin Li llvm::utostr(CodeGenOpts.SSPBufferSize));
1735*67e74705SXin Li FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1736*67e74705SXin Li llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1737*67e74705SXin Li
1738*67e74705SXin Li if (CodeGenOpts.StackRealignment)
1739*67e74705SXin Li FuncAttrs.addAttribute("stackrealign");
1740*67e74705SXin Li if (CodeGenOpts.Backchain)
1741*67e74705SXin Li FuncAttrs.addAttribute("backchain");
1742*67e74705SXin Li
1743*67e74705SXin Li // Add target-cpu and target-features attributes to functions. If
1744*67e74705SXin Li // we have a decl for the function and it has a target attribute then
1745*67e74705SXin Li // parse that and add it to the feature set.
1746*67e74705SXin Li StringRef TargetCPU = getTarget().getTargetOpts().CPU;
1747*67e74705SXin Li const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
1748*67e74705SXin Li if (FD && FD->hasAttr<TargetAttr>()) {
1749*67e74705SXin Li llvm::StringMap<bool> FeatureMap;
1750*67e74705SXin Li getFunctionFeatureMap(FeatureMap, FD);
1751*67e74705SXin Li
1752*67e74705SXin Li // Produce the canonical string for this set of features.
1753*67e74705SXin Li std::vector<std::string> Features;
1754*67e74705SXin Li for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
1755*67e74705SXin Li ie = FeatureMap.end();
1756*67e74705SXin Li it != ie; ++it)
1757*67e74705SXin Li Features.push_back((it->second ? "+" : "-") + it->first().str());
1758*67e74705SXin Li
1759*67e74705SXin Li // Now add the target-cpu and target-features to the function.
1760*67e74705SXin Li // While we populated the feature map above, we still need to
1761*67e74705SXin Li // get and parse the target attribute so we can get the cpu for
1762*67e74705SXin Li // the function.
1763*67e74705SXin Li const auto *TD = FD->getAttr<TargetAttr>();
1764*67e74705SXin Li TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
1765*67e74705SXin Li if (ParsedAttr.second != "")
1766*67e74705SXin Li TargetCPU = ParsedAttr.second;
1767*67e74705SXin Li if (TargetCPU != "")
1768*67e74705SXin Li FuncAttrs.addAttribute("target-cpu", TargetCPU);
1769*67e74705SXin Li if (!Features.empty()) {
1770*67e74705SXin Li std::sort(Features.begin(), Features.end());
1771*67e74705SXin Li FuncAttrs.addAttribute(
1772*67e74705SXin Li "target-features",
1773*67e74705SXin Li llvm::join(Features.begin(), Features.end(), ","));
1774*67e74705SXin Li }
1775*67e74705SXin Li } else {
1776*67e74705SXin Li // Otherwise just add the existing target cpu and target features to the
1777*67e74705SXin Li // function.
1778*67e74705SXin Li std::vector<std::string> &Features = getTarget().getTargetOpts().Features;
1779*67e74705SXin Li if (TargetCPU != "")
1780*67e74705SXin Li FuncAttrs.addAttribute("target-cpu", TargetCPU);
1781*67e74705SXin Li if (!Features.empty()) {
1782*67e74705SXin Li std::sort(Features.begin(), Features.end());
1783*67e74705SXin Li FuncAttrs.addAttribute(
1784*67e74705SXin Li "target-features",
1785*67e74705SXin Li llvm::join(Features.begin(), Features.end(), ","));
1786*67e74705SXin Li }
1787*67e74705SXin Li }
1788*67e74705SXin Li }
1789*67e74705SXin Li
1790*67e74705SXin Li if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1791*67e74705SXin Li // Conservatively, mark all functions and calls in CUDA as convergent
1792*67e74705SXin Li // (meaning, they may call an intrinsically convergent op, such as
1793*67e74705SXin Li // __syncthreads(), and so can't have certain optimizations applied around
1794*67e74705SXin Li // them). LLVM will remove this attribute where it safely can.
1795*67e74705SXin Li FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1796*67e74705SXin Li
1797*67e74705SXin Li // Respect -fcuda-flush-denormals-to-zero.
1798*67e74705SXin Li if (getLangOpts().CUDADeviceFlushDenormalsToZero)
1799*67e74705SXin Li FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1800*67e74705SXin Li }
1801*67e74705SXin Li
1802*67e74705SXin Li ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1803*67e74705SXin Li
1804*67e74705SXin Li QualType RetTy = FI.getReturnType();
1805*67e74705SXin Li const ABIArgInfo &RetAI = FI.getReturnInfo();
1806*67e74705SXin Li switch (RetAI.getKind()) {
1807*67e74705SXin Li case ABIArgInfo::Extend:
1808*67e74705SXin Li if (RetTy->hasSignedIntegerRepresentation())
1809*67e74705SXin Li RetAttrs.addAttribute(llvm::Attribute::SExt);
1810*67e74705SXin Li else if (RetTy->hasUnsignedIntegerRepresentation())
1811*67e74705SXin Li RetAttrs.addAttribute(llvm::Attribute::ZExt);
1812*67e74705SXin Li // FALL THROUGH
1813*67e74705SXin Li case ABIArgInfo::Direct:
1814*67e74705SXin Li if (RetAI.getInReg())
1815*67e74705SXin Li RetAttrs.addAttribute(llvm::Attribute::InReg);
1816*67e74705SXin Li break;
1817*67e74705SXin Li case ABIArgInfo::Ignore:
1818*67e74705SXin Li break;
1819*67e74705SXin Li
1820*67e74705SXin Li case ABIArgInfo::InAlloca:
1821*67e74705SXin Li case ABIArgInfo::Indirect: {
1822*67e74705SXin Li // inalloca and sret disable readnone and readonly
1823*67e74705SXin Li FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1824*67e74705SXin Li .removeAttribute(llvm::Attribute::ReadNone);
1825*67e74705SXin Li break;
1826*67e74705SXin Li }
1827*67e74705SXin Li
1828*67e74705SXin Li case ABIArgInfo::CoerceAndExpand:
1829*67e74705SXin Li break;
1830*67e74705SXin Li
1831*67e74705SXin Li case ABIArgInfo::Expand:
1832*67e74705SXin Li llvm_unreachable("Invalid ABI kind for return argument");
1833*67e74705SXin Li }
1834*67e74705SXin Li
1835*67e74705SXin Li if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1836*67e74705SXin Li QualType PTy = RefTy->getPointeeType();
1837*67e74705SXin Li if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1838*67e74705SXin Li RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1839*67e74705SXin Li .getQuantity());
1840*67e74705SXin Li else if (getContext().getTargetAddressSpace(PTy) == 0)
1841*67e74705SXin Li RetAttrs.addAttribute(llvm::Attribute::NonNull);
1842*67e74705SXin Li }
1843*67e74705SXin Li
1844*67e74705SXin Li // Attach return attributes.
1845*67e74705SXin Li if (RetAttrs.hasAttributes()) {
1846*67e74705SXin Li PAL.push_back(llvm::AttributeSet::get(
1847*67e74705SXin Li getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
1848*67e74705SXin Li }
1849*67e74705SXin Li
1850*67e74705SXin Li bool hasUsedSRet = false;
1851*67e74705SXin Li
1852*67e74705SXin Li // Attach attributes to sret.
1853*67e74705SXin Li if (IRFunctionArgs.hasSRetArg()) {
1854*67e74705SXin Li llvm::AttrBuilder SRETAttrs;
1855*67e74705SXin Li SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1856*67e74705SXin Li hasUsedSRet = true;
1857*67e74705SXin Li if (RetAI.getInReg())
1858*67e74705SXin Li SRETAttrs.addAttribute(llvm::Attribute::InReg);
1859*67e74705SXin Li PAL.push_back(llvm::AttributeSet::get(
1860*67e74705SXin Li getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
1861*67e74705SXin Li }
1862*67e74705SXin Li
1863*67e74705SXin Li // Attach attributes to inalloca argument.
1864*67e74705SXin Li if (IRFunctionArgs.hasInallocaArg()) {
1865*67e74705SXin Li llvm::AttrBuilder Attrs;
1866*67e74705SXin Li Attrs.addAttribute(llvm::Attribute::InAlloca);
1867*67e74705SXin Li PAL.push_back(llvm::AttributeSet::get(
1868*67e74705SXin Li getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
1869*67e74705SXin Li }
1870*67e74705SXin Li
1871*67e74705SXin Li unsigned ArgNo = 0;
1872*67e74705SXin Li for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
1873*67e74705SXin Li E = FI.arg_end();
1874*67e74705SXin Li I != E; ++I, ++ArgNo) {
1875*67e74705SXin Li QualType ParamType = I->type;
1876*67e74705SXin Li const ABIArgInfo &AI = I->info;
1877*67e74705SXin Li llvm::AttrBuilder Attrs;
1878*67e74705SXin Li
1879*67e74705SXin Li // Add attribute for padding argument, if necessary.
1880*67e74705SXin Li if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
1881*67e74705SXin Li if (AI.getPaddingInReg())
1882*67e74705SXin Li PAL.push_back(llvm::AttributeSet::get(
1883*67e74705SXin Li getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
1884*67e74705SXin Li llvm::Attribute::InReg));
1885*67e74705SXin Li }
1886*67e74705SXin Li
1887*67e74705SXin Li // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1888*67e74705SXin Li // have the corresponding parameter variable. It doesn't make
1889*67e74705SXin Li // sense to do it here because parameters are so messed up.
1890*67e74705SXin Li switch (AI.getKind()) {
1891*67e74705SXin Li case ABIArgInfo::Extend:
1892*67e74705SXin Li if (ParamType->isSignedIntegerOrEnumerationType())
1893*67e74705SXin Li Attrs.addAttribute(llvm::Attribute::SExt);
1894*67e74705SXin Li else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
1895*67e74705SXin Li if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
1896*67e74705SXin Li Attrs.addAttribute(llvm::Attribute::SExt);
1897*67e74705SXin Li else
1898*67e74705SXin Li Attrs.addAttribute(llvm::Attribute::ZExt);
1899*67e74705SXin Li }
1900*67e74705SXin Li // FALL THROUGH
1901*67e74705SXin Li case ABIArgInfo::Direct:
1902*67e74705SXin Li if (ArgNo == 0 && FI.isChainCall())
1903*67e74705SXin Li Attrs.addAttribute(llvm::Attribute::Nest);
1904*67e74705SXin Li else if (AI.getInReg())
1905*67e74705SXin Li Attrs.addAttribute(llvm::Attribute::InReg);
1906*67e74705SXin Li break;
1907*67e74705SXin Li
1908*67e74705SXin Li case ABIArgInfo::Indirect: {
1909*67e74705SXin Li if (AI.getInReg())
1910*67e74705SXin Li Attrs.addAttribute(llvm::Attribute::InReg);
1911*67e74705SXin Li
1912*67e74705SXin Li if (AI.getIndirectByVal())
1913*67e74705SXin Li Attrs.addAttribute(llvm::Attribute::ByVal);
1914*67e74705SXin Li
1915*67e74705SXin Li CharUnits Align = AI.getIndirectAlign();
1916*67e74705SXin Li
1917*67e74705SXin Li // In a byval argument, it is important that the required
1918*67e74705SXin Li // alignment of the type is honored, as LLVM might be creating a
1919*67e74705SXin Li // *new* stack object, and needs to know what alignment to give
1920*67e74705SXin Li // it. (Sometimes it can deduce a sensible alignment on its own,
1921*67e74705SXin Li // but not if clang decides it must emit a packed struct, or the
1922*67e74705SXin Li // user specifies increased alignment requirements.)
1923*67e74705SXin Li //
1924*67e74705SXin Li // This is different from indirect *not* byval, where the object
1925*67e74705SXin Li // exists already, and the align attribute is purely
1926*67e74705SXin Li // informative.
1927*67e74705SXin Li assert(!Align.isZero());
1928*67e74705SXin Li
1929*67e74705SXin Li // For now, only add this when we have a byval argument.
1930*67e74705SXin Li // TODO: be less lazy about updating test cases.
1931*67e74705SXin Li if (AI.getIndirectByVal())
1932*67e74705SXin Li Attrs.addAlignmentAttr(Align.getQuantity());
1933*67e74705SXin Li
1934*67e74705SXin Li // byval disables readnone and readonly.
1935*67e74705SXin Li FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1936*67e74705SXin Li .removeAttribute(llvm::Attribute::ReadNone);
1937*67e74705SXin Li break;
1938*67e74705SXin Li }
1939*67e74705SXin Li case ABIArgInfo::Ignore:
1940*67e74705SXin Li case ABIArgInfo::Expand:
1941*67e74705SXin Li case ABIArgInfo::CoerceAndExpand:
1942*67e74705SXin Li break;
1943*67e74705SXin Li
1944*67e74705SXin Li case ABIArgInfo::InAlloca:
1945*67e74705SXin Li // inalloca disables readnone and readonly.
1946*67e74705SXin Li FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1947*67e74705SXin Li .removeAttribute(llvm::Attribute::ReadNone);
1948*67e74705SXin Li continue;
1949*67e74705SXin Li }
1950*67e74705SXin Li
1951*67e74705SXin Li if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
1952*67e74705SXin Li QualType PTy = RefTy->getPointeeType();
1953*67e74705SXin Li if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1954*67e74705SXin Li Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1955*67e74705SXin Li .getQuantity());
1956*67e74705SXin Li else if (getContext().getTargetAddressSpace(PTy) == 0)
1957*67e74705SXin Li Attrs.addAttribute(llvm::Attribute::NonNull);
1958*67e74705SXin Li }
1959*67e74705SXin Li
1960*67e74705SXin Li switch (FI.getExtParameterInfo(ArgNo).getABI()) {
1961*67e74705SXin Li case ParameterABI::Ordinary:
1962*67e74705SXin Li break;
1963*67e74705SXin Li
1964*67e74705SXin Li case ParameterABI::SwiftIndirectResult: {
1965*67e74705SXin Li // Add 'sret' if we haven't already used it for something, but
1966*67e74705SXin Li // only if the result is void.
1967*67e74705SXin Li if (!hasUsedSRet && RetTy->isVoidType()) {
1968*67e74705SXin Li Attrs.addAttribute(llvm::Attribute::StructRet);
1969*67e74705SXin Li hasUsedSRet = true;
1970*67e74705SXin Li }
1971*67e74705SXin Li
1972*67e74705SXin Li // Add 'noalias' in either case.
1973*67e74705SXin Li Attrs.addAttribute(llvm::Attribute::NoAlias);
1974*67e74705SXin Li
1975*67e74705SXin Li // Add 'dereferenceable' and 'alignment'.
1976*67e74705SXin Li auto PTy = ParamType->getPointeeType();
1977*67e74705SXin Li if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
1978*67e74705SXin Li auto info = getContext().getTypeInfoInChars(PTy);
1979*67e74705SXin Li Attrs.addDereferenceableAttr(info.first.getQuantity());
1980*67e74705SXin Li Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
1981*67e74705SXin Li info.second.getQuantity()));
1982*67e74705SXin Li }
1983*67e74705SXin Li break;
1984*67e74705SXin Li }
1985*67e74705SXin Li
1986*67e74705SXin Li case ParameterABI::SwiftErrorResult:
1987*67e74705SXin Li Attrs.addAttribute(llvm::Attribute::SwiftError);
1988*67e74705SXin Li break;
1989*67e74705SXin Li
1990*67e74705SXin Li case ParameterABI::SwiftContext:
1991*67e74705SXin Li Attrs.addAttribute(llvm::Attribute::SwiftSelf);
1992*67e74705SXin Li break;
1993*67e74705SXin Li }
1994*67e74705SXin Li
1995*67e74705SXin Li if (Attrs.hasAttributes()) {
1996*67e74705SXin Li unsigned FirstIRArg, NumIRArgs;
1997*67e74705SXin Li std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1998*67e74705SXin Li for (unsigned i = 0; i < NumIRArgs; i++)
1999*67e74705SXin Li PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
2000*67e74705SXin Li FirstIRArg + i + 1, Attrs));
2001*67e74705SXin Li }
2002*67e74705SXin Li }
2003*67e74705SXin Li assert(ArgNo == FI.arg_size());
2004*67e74705SXin Li
2005*67e74705SXin Li if (FuncAttrs.hasAttributes())
2006*67e74705SXin Li PAL.push_back(llvm::
2007*67e74705SXin Li AttributeSet::get(getLLVMContext(),
2008*67e74705SXin Li llvm::AttributeSet::FunctionIndex,
2009*67e74705SXin Li FuncAttrs));
2010*67e74705SXin Li }
2011*67e74705SXin Li
2012*67e74705SXin Li /// An argument came in as a promoted argument; demote it back to its
2013*67e74705SXin Li /// declared type.
emitArgumentDemotion(CodeGenFunction & CGF,const VarDecl * var,llvm::Value * value)2014*67e74705SXin Li static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2015*67e74705SXin Li const VarDecl *var,
2016*67e74705SXin Li llvm::Value *value) {
2017*67e74705SXin Li llvm::Type *varType = CGF.ConvertType(var->getType());
2018*67e74705SXin Li
2019*67e74705SXin Li // This can happen with promotions that actually don't change the
2020*67e74705SXin Li // underlying type, like the enum promotions.
2021*67e74705SXin Li if (value->getType() == varType) return value;
2022*67e74705SXin Li
2023*67e74705SXin Li assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2024*67e74705SXin Li && "unexpected promotion type");
2025*67e74705SXin Li
2026*67e74705SXin Li if (isa<llvm::IntegerType>(varType))
2027*67e74705SXin Li return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2028*67e74705SXin Li
2029*67e74705SXin Li return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2030*67e74705SXin Li }
2031*67e74705SXin Li
2032*67e74705SXin Li /// Returns the attribute (either parameter attribute, or function
2033*67e74705SXin Li /// attribute), which declares argument ArgNo to be non-null.
getNonNullAttr(const Decl * FD,const ParmVarDecl * PVD,QualType ArgType,unsigned ArgNo)2034*67e74705SXin Li static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2035*67e74705SXin Li QualType ArgType, unsigned ArgNo) {
2036*67e74705SXin Li // FIXME: __attribute__((nonnull)) can also be applied to:
2037*67e74705SXin Li // - references to pointers, where the pointee is known to be
2038*67e74705SXin Li // nonnull (apparently a Clang extension)
2039*67e74705SXin Li // - transparent unions containing pointers
2040*67e74705SXin Li // In the former case, LLVM IR cannot represent the constraint. In
2041*67e74705SXin Li // the latter case, we have no guarantee that the transparent union
2042*67e74705SXin Li // is in fact passed as a pointer.
2043*67e74705SXin Li if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2044*67e74705SXin Li return nullptr;
2045*67e74705SXin Li // First, check attribute on parameter itself.
2046*67e74705SXin Li if (PVD) {
2047*67e74705SXin Li if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2048*67e74705SXin Li return ParmNNAttr;
2049*67e74705SXin Li }
2050*67e74705SXin Li // Check function attributes.
2051*67e74705SXin Li if (!FD)
2052*67e74705SXin Li return nullptr;
2053*67e74705SXin Li for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2054*67e74705SXin Li if (NNAttr->isNonNull(ArgNo))
2055*67e74705SXin Li return NNAttr;
2056*67e74705SXin Li }
2057*67e74705SXin Li return nullptr;
2058*67e74705SXin Li }
2059*67e74705SXin Li
2060*67e74705SXin Li namespace {
2061*67e74705SXin Li struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2062*67e74705SXin Li Address Temp;
2063*67e74705SXin Li Address Arg;
CopyBackSwiftError__anon41e727fc0511::CopyBackSwiftError2064*67e74705SXin Li CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
Emit__anon41e727fc0511::CopyBackSwiftError2065*67e74705SXin Li void Emit(CodeGenFunction &CGF, Flags flags) override {
2066*67e74705SXin Li llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2067*67e74705SXin Li CGF.Builder.CreateStore(errorValue, Arg);
2068*67e74705SXin Li }
2069*67e74705SXin Li };
2070*67e74705SXin Li }
2071*67e74705SXin Li
EmitFunctionProlog(const CGFunctionInfo & FI,llvm::Function * Fn,const FunctionArgList & Args)2072*67e74705SXin Li void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2073*67e74705SXin Li llvm::Function *Fn,
2074*67e74705SXin Li const FunctionArgList &Args) {
2075*67e74705SXin Li if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2076*67e74705SXin Li // Naked functions don't have prologues.
2077*67e74705SXin Li return;
2078*67e74705SXin Li
2079*67e74705SXin Li // If this is an implicit-return-zero function, go ahead and
2080*67e74705SXin Li // initialize the return value. TODO: it might be nice to have
2081*67e74705SXin Li // a more general mechanism for this that didn't require synthesized
2082*67e74705SXin Li // return statements.
2083*67e74705SXin Li if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2084*67e74705SXin Li if (FD->hasImplicitReturnZero()) {
2085*67e74705SXin Li QualType RetTy = FD->getReturnType().getUnqualifiedType();
2086*67e74705SXin Li llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2087*67e74705SXin Li llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2088*67e74705SXin Li Builder.CreateStore(Zero, ReturnValue);
2089*67e74705SXin Li }
2090*67e74705SXin Li }
2091*67e74705SXin Li
2092*67e74705SXin Li // FIXME: We no longer need the types from FunctionArgList; lift up and
2093*67e74705SXin Li // simplify.
2094*67e74705SXin Li
2095*67e74705SXin Li ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2096*67e74705SXin Li // Flattened function arguments.
2097*67e74705SXin Li SmallVector<llvm::Value *, 16> FnArgs;
2098*67e74705SXin Li FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2099*67e74705SXin Li for (auto &Arg : Fn->args()) {
2100*67e74705SXin Li FnArgs.push_back(&Arg);
2101*67e74705SXin Li }
2102*67e74705SXin Li assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2103*67e74705SXin Li
2104*67e74705SXin Li // If we're using inalloca, all the memory arguments are GEPs off of the last
2105*67e74705SXin Li // parameter, which is a pointer to the complete memory area.
2106*67e74705SXin Li Address ArgStruct = Address::invalid();
2107*67e74705SXin Li const llvm::StructLayout *ArgStructLayout = nullptr;
2108*67e74705SXin Li if (IRFunctionArgs.hasInallocaArg()) {
2109*67e74705SXin Li ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2110*67e74705SXin Li ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2111*67e74705SXin Li FI.getArgStructAlignment());
2112*67e74705SXin Li
2113*67e74705SXin Li assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2114*67e74705SXin Li }
2115*67e74705SXin Li
2116*67e74705SXin Li // Name the struct return parameter.
2117*67e74705SXin Li if (IRFunctionArgs.hasSRetArg()) {
2118*67e74705SXin Li auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2119*67e74705SXin Li AI->setName("agg.result");
2120*67e74705SXin Li AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
2121*67e74705SXin Li llvm::Attribute::NoAlias));
2122*67e74705SXin Li }
2123*67e74705SXin Li
2124*67e74705SXin Li // Track if we received the parameter as a pointer (indirect, byval, or
2125*67e74705SXin Li // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2126*67e74705SXin Li // into a local alloca for us.
2127*67e74705SXin Li SmallVector<ParamValue, 16> ArgVals;
2128*67e74705SXin Li ArgVals.reserve(Args.size());
2129*67e74705SXin Li
2130*67e74705SXin Li // Create a pointer value for every parameter declaration. This usually
2131*67e74705SXin Li // entails copying one or more LLVM IR arguments into an alloca. Don't push
2132*67e74705SXin Li // any cleanups or do anything that might unwind. We do that separately, so
2133*67e74705SXin Li // we can push the cleanups in the correct order for the ABI.
2134*67e74705SXin Li assert(FI.arg_size() == Args.size() &&
2135*67e74705SXin Li "Mismatch between function signature & arguments.");
2136*67e74705SXin Li unsigned ArgNo = 0;
2137*67e74705SXin Li CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2138*67e74705SXin Li for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2139*67e74705SXin Li i != e; ++i, ++info_it, ++ArgNo) {
2140*67e74705SXin Li const VarDecl *Arg = *i;
2141*67e74705SXin Li QualType Ty = info_it->type;
2142*67e74705SXin Li const ABIArgInfo &ArgI = info_it->info;
2143*67e74705SXin Li
2144*67e74705SXin Li bool isPromoted =
2145*67e74705SXin Li isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2146*67e74705SXin Li
2147*67e74705SXin Li unsigned FirstIRArg, NumIRArgs;
2148*67e74705SXin Li std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2149*67e74705SXin Li
2150*67e74705SXin Li switch (ArgI.getKind()) {
2151*67e74705SXin Li case ABIArgInfo::InAlloca: {
2152*67e74705SXin Li assert(NumIRArgs == 0);
2153*67e74705SXin Li auto FieldIndex = ArgI.getInAllocaFieldIndex();
2154*67e74705SXin Li CharUnits FieldOffset =
2155*67e74705SXin Li CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
2156*67e74705SXin Li Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2157*67e74705SXin Li Arg->getName());
2158*67e74705SXin Li ArgVals.push_back(ParamValue::forIndirect(V));
2159*67e74705SXin Li break;
2160*67e74705SXin Li }
2161*67e74705SXin Li
2162*67e74705SXin Li case ABIArgInfo::Indirect: {
2163*67e74705SXin Li assert(NumIRArgs == 1);
2164*67e74705SXin Li Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2165*67e74705SXin Li
2166*67e74705SXin Li if (!hasScalarEvaluationKind(Ty)) {
2167*67e74705SXin Li // Aggregates and complex variables are accessed by reference. All we
2168*67e74705SXin Li // need to do is realign the value, if requested.
2169*67e74705SXin Li Address V = ParamAddr;
2170*67e74705SXin Li if (ArgI.getIndirectRealign()) {
2171*67e74705SXin Li Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2172*67e74705SXin Li
2173*67e74705SXin Li // Copy from the incoming argument pointer to the temporary with the
2174*67e74705SXin Li // appropriate alignment.
2175*67e74705SXin Li //
2176*67e74705SXin Li // FIXME: We should have a common utility for generating an aggregate
2177*67e74705SXin Li // copy.
2178*67e74705SXin Li CharUnits Size = getContext().getTypeSizeInChars(Ty);
2179*67e74705SXin Li auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2180*67e74705SXin Li Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2181*67e74705SXin Li Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2182*67e74705SXin Li Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2183*67e74705SXin Li V = AlignedTemp;
2184*67e74705SXin Li }
2185*67e74705SXin Li ArgVals.push_back(ParamValue::forIndirect(V));
2186*67e74705SXin Li } else {
2187*67e74705SXin Li // Load scalar value from indirect argument.
2188*67e74705SXin Li llvm::Value *V =
2189*67e74705SXin Li EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
2190*67e74705SXin Li
2191*67e74705SXin Li if (isPromoted)
2192*67e74705SXin Li V = emitArgumentDemotion(*this, Arg, V);
2193*67e74705SXin Li ArgVals.push_back(ParamValue::forDirect(V));
2194*67e74705SXin Li }
2195*67e74705SXin Li break;
2196*67e74705SXin Li }
2197*67e74705SXin Li
2198*67e74705SXin Li case ABIArgInfo::Extend:
2199*67e74705SXin Li case ABIArgInfo::Direct: {
2200*67e74705SXin Li
2201*67e74705SXin Li // If we have the trivial case, handle it with no muss and fuss.
2202*67e74705SXin Li if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2203*67e74705SXin Li ArgI.getCoerceToType() == ConvertType(Ty) &&
2204*67e74705SXin Li ArgI.getDirectOffset() == 0) {
2205*67e74705SXin Li assert(NumIRArgs == 1);
2206*67e74705SXin Li llvm::Value *V = FnArgs[FirstIRArg];
2207*67e74705SXin Li auto AI = cast<llvm::Argument>(V);
2208*67e74705SXin Li
2209*67e74705SXin Li if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2210*67e74705SXin Li if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2211*67e74705SXin Li PVD->getFunctionScopeIndex()))
2212*67e74705SXin Li AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2213*67e74705SXin Li AI->getArgNo() + 1,
2214*67e74705SXin Li llvm::Attribute::NonNull));
2215*67e74705SXin Li
2216*67e74705SXin Li QualType OTy = PVD->getOriginalType();
2217*67e74705SXin Li if (const auto *ArrTy =
2218*67e74705SXin Li getContext().getAsConstantArrayType(OTy)) {
2219*67e74705SXin Li // A C99 array parameter declaration with the static keyword also
2220*67e74705SXin Li // indicates dereferenceability, and if the size is constant we can
2221*67e74705SXin Li // use the dereferenceable attribute (which requires the size in
2222*67e74705SXin Li // bytes).
2223*67e74705SXin Li if (ArrTy->getSizeModifier() == ArrayType::Static) {
2224*67e74705SXin Li QualType ETy = ArrTy->getElementType();
2225*67e74705SXin Li uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2226*67e74705SXin Li if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2227*67e74705SXin Li ArrSize) {
2228*67e74705SXin Li llvm::AttrBuilder Attrs;
2229*67e74705SXin Li Attrs.addDereferenceableAttr(
2230*67e74705SXin Li getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2231*67e74705SXin Li AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2232*67e74705SXin Li AI->getArgNo() + 1, Attrs));
2233*67e74705SXin Li } else if (getContext().getTargetAddressSpace(ETy) == 0) {
2234*67e74705SXin Li AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2235*67e74705SXin Li AI->getArgNo() + 1,
2236*67e74705SXin Li llvm::Attribute::NonNull));
2237*67e74705SXin Li }
2238*67e74705SXin Li }
2239*67e74705SXin Li } else if (const auto *ArrTy =
2240*67e74705SXin Li getContext().getAsVariableArrayType(OTy)) {
2241*67e74705SXin Li // For C99 VLAs with the static keyword, we don't know the size so
2242*67e74705SXin Li // we can't use the dereferenceable attribute, but in addrspace(0)
2243*67e74705SXin Li // we know that it must be nonnull.
2244*67e74705SXin Li if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2245*67e74705SXin Li !getContext().getTargetAddressSpace(ArrTy->getElementType()))
2246*67e74705SXin Li AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2247*67e74705SXin Li AI->getArgNo() + 1,
2248*67e74705SXin Li llvm::Attribute::NonNull));
2249*67e74705SXin Li }
2250*67e74705SXin Li
2251*67e74705SXin Li const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2252*67e74705SXin Li if (!AVAttr)
2253*67e74705SXin Li if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2254*67e74705SXin Li AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2255*67e74705SXin Li if (AVAttr) {
2256*67e74705SXin Li llvm::Value *AlignmentValue =
2257*67e74705SXin Li EmitScalarExpr(AVAttr->getAlignment());
2258*67e74705SXin Li llvm::ConstantInt *AlignmentCI =
2259*67e74705SXin Li cast<llvm::ConstantInt>(AlignmentValue);
2260*67e74705SXin Li unsigned Alignment =
2261*67e74705SXin Li std::min((unsigned) AlignmentCI->getZExtValue(),
2262*67e74705SXin Li +llvm::Value::MaximumAlignment);
2263*67e74705SXin Li
2264*67e74705SXin Li llvm::AttrBuilder Attrs;
2265*67e74705SXin Li Attrs.addAlignmentAttr(Alignment);
2266*67e74705SXin Li AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2267*67e74705SXin Li AI->getArgNo() + 1, Attrs));
2268*67e74705SXin Li }
2269*67e74705SXin Li }
2270*67e74705SXin Li
2271*67e74705SXin Li if (Arg->getType().isRestrictQualified())
2272*67e74705SXin Li AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2273*67e74705SXin Li AI->getArgNo() + 1,
2274*67e74705SXin Li llvm::Attribute::NoAlias));
2275*67e74705SXin Li
2276*67e74705SXin Li // LLVM expects swifterror parameters to be used in very restricted
2277*67e74705SXin Li // ways. Copy the value into a less-restricted temporary.
2278*67e74705SXin Li if (FI.getExtParameterInfo(ArgNo).getABI()
2279*67e74705SXin Li == ParameterABI::SwiftErrorResult) {
2280*67e74705SXin Li QualType pointeeTy = Ty->getPointeeType();
2281*67e74705SXin Li assert(pointeeTy->isPointerType());
2282*67e74705SXin Li Address temp =
2283*67e74705SXin Li CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2284*67e74705SXin Li Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2285*67e74705SXin Li llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2286*67e74705SXin Li Builder.CreateStore(incomingErrorValue, temp);
2287*67e74705SXin Li V = temp.getPointer();
2288*67e74705SXin Li
2289*67e74705SXin Li // Push a cleanup to copy the value back at the end of the function.
2290*67e74705SXin Li // The convention does not guarantee that the value will be written
2291*67e74705SXin Li // back if the function exits with an unwind exception.
2292*67e74705SXin Li EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2293*67e74705SXin Li }
2294*67e74705SXin Li
2295*67e74705SXin Li // Ensure the argument is the correct type.
2296*67e74705SXin Li if (V->getType() != ArgI.getCoerceToType())
2297*67e74705SXin Li V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2298*67e74705SXin Li
2299*67e74705SXin Li if (isPromoted)
2300*67e74705SXin Li V = emitArgumentDemotion(*this, Arg, V);
2301*67e74705SXin Li
2302*67e74705SXin Li if (const CXXMethodDecl *MD =
2303*67e74705SXin Li dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
2304*67e74705SXin Li if (MD->isVirtual() && Arg == CXXABIThisDecl)
2305*67e74705SXin Li V = CGM.getCXXABI().
2306*67e74705SXin Li adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
2307*67e74705SXin Li }
2308*67e74705SXin Li
2309*67e74705SXin Li // Because of merging of function types from multiple decls it is
2310*67e74705SXin Li // possible for the type of an argument to not match the corresponding
2311*67e74705SXin Li // type in the function type. Since we are codegening the callee
2312*67e74705SXin Li // in here, add a cast to the argument type.
2313*67e74705SXin Li llvm::Type *LTy = ConvertType(Arg->getType());
2314*67e74705SXin Li if (V->getType() != LTy)
2315*67e74705SXin Li V = Builder.CreateBitCast(V, LTy);
2316*67e74705SXin Li
2317*67e74705SXin Li ArgVals.push_back(ParamValue::forDirect(V));
2318*67e74705SXin Li break;
2319*67e74705SXin Li }
2320*67e74705SXin Li
2321*67e74705SXin Li Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2322*67e74705SXin Li Arg->getName());
2323*67e74705SXin Li
2324*67e74705SXin Li // Pointer to store into.
2325*67e74705SXin Li Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2326*67e74705SXin Li
2327*67e74705SXin Li // Fast-isel and the optimizer generally like scalar values better than
2328*67e74705SXin Li // FCAs, so we flatten them if this is safe to do for this argument.
2329*67e74705SXin Li llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2330*67e74705SXin Li if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2331*67e74705SXin Li STy->getNumElements() > 1) {
2332*67e74705SXin Li auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2333*67e74705SXin Li uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2334*67e74705SXin Li llvm::Type *DstTy = Ptr.getElementType();
2335*67e74705SXin Li uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2336*67e74705SXin Li
2337*67e74705SXin Li Address AddrToStoreInto = Address::invalid();
2338*67e74705SXin Li if (SrcSize <= DstSize) {
2339*67e74705SXin Li AddrToStoreInto =
2340*67e74705SXin Li Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
2341*67e74705SXin Li } else {
2342*67e74705SXin Li AddrToStoreInto =
2343*67e74705SXin Li CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2344*67e74705SXin Li }
2345*67e74705SXin Li
2346*67e74705SXin Li assert(STy->getNumElements() == NumIRArgs);
2347*67e74705SXin Li for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2348*67e74705SXin Li auto AI = FnArgs[FirstIRArg + i];
2349*67e74705SXin Li AI->setName(Arg->getName() + ".coerce" + Twine(i));
2350*67e74705SXin Li auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2351*67e74705SXin Li Address EltPtr =
2352*67e74705SXin Li Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2353*67e74705SXin Li Builder.CreateStore(AI, EltPtr);
2354*67e74705SXin Li }
2355*67e74705SXin Li
2356*67e74705SXin Li if (SrcSize > DstSize) {
2357*67e74705SXin Li Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2358*67e74705SXin Li }
2359*67e74705SXin Li
2360*67e74705SXin Li } else {
2361*67e74705SXin Li // Simple case, just do a coerced store of the argument into the alloca.
2362*67e74705SXin Li assert(NumIRArgs == 1);
2363*67e74705SXin Li auto AI = FnArgs[FirstIRArg];
2364*67e74705SXin Li AI->setName(Arg->getName() + ".coerce");
2365*67e74705SXin Li CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2366*67e74705SXin Li }
2367*67e74705SXin Li
2368*67e74705SXin Li // Match to what EmitParmDecl is expecting for this type.
2369*67e74705SXin Li if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2370*67e74705SXin Li llvm::Value *V =
2371*67e74705SXin Li EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
2372*67e74705SXin Li if (isPromoted)
2373*67e74705SXin Li V = emitArgumentDemotion(*this, Arg, V);
2374*67e74705SXin Li ArgVals.push_back(ParamValue::forDirect(V));
2375*67e74705SXin Li } else {
2376*67e74705SXin Li ArgVals.push_back(ParamValue::forIndirect(Alloca));
2377*67e74705SXin Li }
2378*67e74705SXin Li break;
2379*67e74705SXin Li }
2380*67e74705SXin Li
2381*67e74705SXin Li case ABIArgInfo::CoerceAndExpand: {
2382*67e74705SXin Li // Reconstruct into a temporary.
2383*67e74705SXin Li Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2384*67e74705SXin Li ArgVals.push_back(ParamValue::forIndirect(alloca));
2385*67e74705SXin Li
2386*67e74705SXin Li auto coercionType = ArgI.getCoerceAndExpandType();
2387*67e74705SXin Li alloca = Builder.CreateElementBitCast(alloca, coercionType);
2388*67e74705SXin Li auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2389*67e74705SXin Li
2390*67e74705SXin Li unsigned argIndex = FirstIRArg;
2391*67e74705SXin Li for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2392*67e74705SXin Li llvm::Type *eltType = coercionType->getElementType(i);
2393*67e74705SXin Li if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2394*67e74705SXin Li continue;
2395*67e74705SXin Li
2396*67e74705SXin Li auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2397*67e74705SXin Li auto elt = FnArgs[argIndex++];
2398*67e74705SXin Li Builder.CreateStore(elt, eltAddr);
2399*67e74705SXin Li }
2400*67e74705SXin Li assert(argIndex == FirstIRArg + NumIRArgs);
2401*67e74705SXin Li break;
2402*67e74705SXin Li }
2403*67e74705SXin Li
2404*67e74705SXin Li case ABIArgInfo::Expand: {
2405*67e74705SXin Li // If this structure was expanded into multiple arguments then
2406*67e74705SXin Li // we need to create a temporary and reconstruct it from the
2407*67e74705SXin Li // arguments.
2408*67e74705SXin Li Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2409*67e74705SXin Li LValue LV = MakeAddrLValue(Alloca, Ty);
2410*67e74705SXin Li ArgVals.push_back(ParamValue::forIndirect(Alloca));
2411*67e74705SXin Li
2412*67e74705SXin Li auto FnArgIter = FnArgs.begin() + FirstIRArg;
2413*67e74705SXin Li ExpandTypeFromArgs(Ty, LV, FnArgIter);
2414*67e74705SXin Li assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2415*67e74705SXin Li for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2416*67e74705SXin Li auto AI = FnArgs[FirstIRArg + i];
2417*67e74705SXin Li AI->setName(Arg->getName() + "." + Twine(i));
2418*67e74705SXin Li }
2419*67e74705SXin Li break;
2420*67e74705SXin Li }
2421*67e74705SXin Li
2422*67e74705SXin Li case ABIArgInfo::Ignore:
2423*67e74705SXin Li assert(NumIRArgs == 0);
2424*67e74705SXin Li // Initialize the local variable appropriately.
2425*67e74705SXin Li if (!hasScalarEvaluationKind(Ty)) {
2426*67e74705SXin Li ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2427*67e74705SXin Li } else {
2428*67e74705SXin Li llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2429*67e74705SXin Li ArgVals.push_back(ParamValue::forDirect(U));
2430*67e74705SXin Li }
2431*67e74705SXin Li break;
2432*67e74705SXin Li }
2433*67e74705SXin Li }
2434*67e74705SXin Li
2435*67e74705SXin Li if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2436*67e74705SXin Li for (int I = Args.size() - 1; I >= 0; --I)
2437*67e74705SXin Li EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2438*67e74705SXin Li } else {
2439*67e74705SXin Li for (unsigned I = 0, E = Args.size(); I != E; ++I)
2440*67e74705SXin Li EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2441*67e74705SXin Li }
2442*67e74705SXin Li }
2443*67e74705SXin Li
eraseUnusedBitCasts(llvm::Instruction * insn)2444*67e74705SXin Li static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2445*67e74705SXin Li while (insn->use_empty()) {
2446*67e74705SXin Li llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2447*67e74705SXin Li if (!bitcast) return;
2448*67e74705SXin Li
2449*67e74705SXin Li // This is "safe" because we would have used a ConstantExpr otherwise.
2450*67e74705SXin Li insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2451*67e74705SXin Li bitcast->eraseFromParent();
2452*67e74705SXin Li }
2453*67e74705SXin Li }
2454*67e74705SXin Li
2455*67e74705SXin Li /// Try to emit a fused autorelease of a return result.
tryEmitFusedAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)2456*67e74705SXin Li static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2457*67e74705SXin Li llvm::Value *result) {
2458*67e74705SXin Li // We must be immediately followed the cast.
2459*67e74705SXin Li llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2460*67e74705SXin Li if (BB->empty()) return nullptr;
2461*67e74705SXin Li if (&BB->back() != result) return nullptr;
2462*67e74705SXin Li
2463*67e74705SXin Li llvm::Type *resultType = result->getType();
2464*67e74705SXin Li
2465*67e74705SXin Li // result is in a BasicBlock and is therefore an Instruction.
2466*67e74705SXin Li llvm::Instruction *generator = cast<llvm::Instruction>(result);
2467*67e74705SXin Li
2468*67e74705SXin Li SmallVector<llvm::Instruction*,4> insnsToKill;
2469*67e74705SXin Li
2470*67e74705SXin Li // Look for:
2471*67e74705SXin Li // %generator = bitcast %type1* %generator2 to %type2*
2472*67e74705SXin Li while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2473*67e74705SXin Li // We would have emitted this as a constant if the operand weren't
2474*67e74705SXin Li // an Instruction.
2475*67e74705SXin Li generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2476*67e74705SXin Li
2477*67e74705SXin Li // Require the generator to be immediately followed by the cast.
2478*67e74705SXin Li if (generator->getNextNode() != bitcast)
2479*67e74705SXin Li return nullptr;
2480*67e74705SXin Li
2481*67e74705SXin Li insnsToKill.push_back(bitcast);
2482*67e74705SXin Li }
2483*67e74705SXin Li
2484*67e74705SXin Li // Look for:
2485*67e74705SXin Li // %generator = call i8* @objc_retain(i8* %originalResult)
2486*67e74705SXin Li // or
2487*67e74705SXin Li // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2488*67e74705SXin Li llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2489*67e74705SXin Li if (!call) return nullptr;
2490*67e74705SXin Li
2491*67e74705SXin Li bool doRetainAutorelease;
2492*67e74705SXin Li
2493*67e74705SXin Li if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2494*67e74705SXin Li doRetainAutorelease = true;
2495*67e74705SXin Li } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2496*67e74705SXin Li .objc_retainAutoreleasedReturnValue) {
2497*67e74705SXin Li doRetainAutorelease = false;
2498*67e74705SXin Li
2499*67e74705SXin Li // If we emitted an assembly marker for this call (and the
2500*67e74705SXin Li // ARCEntrypoints field should have been set if so), go looking
2501*67e74705SXin Li // for that call. If we can't find it, we can't do this
2502*67e74705SXin Li // optimization. But it should always be the immediately previous
2503*67e74705SXin Li // instruction, unless we needed bitcasts around the call.
2504*67e74705SXin Li if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2505*67e74705SXin Li llvm::Instruction *prev = call->getPrevNode();
2506*67e74705SXin Li assert(prev);
2507*67e74705SXin Li if (isa<llvm::BitCastInst>(prev)) {
2508*67e74705SXin Li prev = prev->getPrevNode();
2509*67e74705SXin Li assert(prev);
2510*67e74705SXin Li }
2511*67e74705SXin Li assert(isa<llvm::CallInst>(prev));
2512*67e74705SXin Li assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2513*67e74705SXin Li CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2514*67e74705SXin Li insnsToKill.push_back(prev);
2515*67e74705SXin Li }
2516*67e74705SXin Li } else {
2517*67e74705SXin Li return nullptr;
2518*67e74705SXin Li }
2519*67e74705SXin Li
2520*67e74705SXin Li result = call->getArgOperand(0);
2521*67e74705SXin Li insnsToKill.push_back(call);
2522*67e74705SXin Li
2523*67e74705SXin Li // Keep killing bitcasts, for sanity. Note that we no longer care
2524*67e74705SXin Li // about precise ordering as long as there's exactly one use.
2525*67e74705SXin Li while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2526*67e74705SXin Li if (!bitcast->hasOneUse()) break;
2527*67e74705SXin Li insnsToKill.push_back(bitcast);
2528*67e74705SXin Li result = bitcast->getOperand(0);
2529*67e74705SXin Li }
2530*67e74705SXin Li
2531*67e74705SXin Li // Delete all the unnecessary instructions, from latest to earliest.
2532*67e74705SXin Li for (SmallVectorImpl<llvm::Instruction*>::iterator
2533*67e74705SXin Li i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
2534*67e74705SXin Li (*i)->eraseFromParent();
2535*67e74705SXin Li
2536*67e74705SXin Li // Do the fused retain/autorelease if we were asked to.
2537*67e74705SXin Li if (doRetainAutorelease)
2538*67e74705SXin Li result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2539*67e74705SXin Li
2540*67e74705SXin Li // Cast back to the result type.
2541*67e74705SXin Li return CGF.Builder.CreateBitCast(result, resultType);
2542*67e74705SXin Li }
2543*67e74705SXin Li
2544*67e74705SXin Li /// If this is a +1 of the value of an immutable 'self', remove it.
tryRemoveRetainOfSelf(CodeGenFunction & CGF,llvm::Value * result)2545*67e74705SXin Li static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2546*67e74705SXin Li llvm::Value *result) {
2547*67e74705SXin Li // This is only applicable to a method with an immutable 'self'.
2548*67e74705SXin Li const ObjCMethodDecl *method =
2549*67e74705SXin Li dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2550*67e74705SXin Li if (!method) return nullptr;
2551*67e74705SXin Li const VarDecl *self = method->getSelfDecl();
2552*67e74705SXin Li if (!self->getType().isConstQualified()) return nullptr;
2553*67e74705SXin Li
2554*67e74705SXin Li // Look for a retain call.
2555*67e74705SXin Li llvm::CallInst *retainCall =
2556*67e74705SXin Li dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2557*67e74705SXin Li if (!retainCall ||
2558*67e74705SXin Li retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2559*67e74705SXin Li return nullptr;
2560*67e74705SXin Li
2561*67e74705SXin Li // Look for an ordinary load of 'self'.
2562*67e74705SXin Li llvm::Value *retainedValue = retainCall->getArgOperand(0);
2563*67e74705SXin Li llvm::LoadInst *load =
2564*67e74705SXin Li dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2565*67e74705SXin Li if (!load || load->isAtomic() || load->isVolatile() ||
2566*67e74705SXin Li load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2567*67e74705SXin Li return nullptr;
2568*67e74705SXin Li
2569*67e74705SXin Li // Okay! Burn it all down. This relies for correctness on the
2570*67e74705SXin Li // assumption that the retain is emitted as part of the return and
2571*67e74705SXin Li // that thereafter everything is used "linearly".
2572*67e74705SXin Li llvm::Type *resultType = result->getType();
2573*67e74705SXin Li eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2574*67e74705SXin Li assert(retainCall->use_empty());
2575*67e74705SXin Li retainCall->eraseFromParent();
2576*67e74705SXin Li eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2577*67e74705SXin Li
2578*67e74705SXin Li return CGF.Builder.CreateBitCast(load, resultType);
2579*67e74705SXin Li }
2580*67e74705SXin Li
2581*67e74705SXin Li /// Emit an ARC autorelease of the result of a function.
2582*67e74705SXin Li ///
2583*67e74705SXin Li /// \return the value to actually return from the function
emitAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)2584*67e74705SXin Li static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2585*67e74705SXin Li llvm::Value *result) {
2586*67e74705SXin Li // If we're returning 'self', kill the initial retain. This is a
2587*67e74705SXin Li // heuristic attempt to "encourage correctness" in the really unfortunate
2588*67e74705SXin Li // case where we have a return of self during a dealloc and we desperately
2589*67e74705SXin Li // need to avoid the possible autorelease.
2590*67e74705SXin Li if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2591*67e74705SXin Li return self;
2592*67e74705SXin Li
2593*67e74705SXin Li // At -O0, try to emit a fused retain/autorelease.
2594*67e74705SXin Li if (CGF.shouldUseFusedARCCalls())
2595*67e74705SXin Li if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2596*67e74705SXin Li return fused;
2597*67e74705SXin Li
2598*67e74705SXin Li return CGF.EmitARCAutoreleaseReturnValue(result);
2599*67e74705SXin Li }
2600*67e74705SXin Li
2601*67e74705SXin Li /// Heuristically search for a dominating store to the return-value slot.
findDominatingStoreToReturnValue(CodeGenFunction & CGF)2602*67e74705SXin Li static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2603*67e74705SXin Li // Check if a User is a store which pointerOperand is the ReturnValue.
2604*67e74705SXin Li // We are looking for stores to the ReturnValue, not for stores of the
2605*67e74705SXin Li // ReturnValue to some other location.
2606*67e74705SXin Li auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2607*67e74705SXin Li auto *SI = dyn_cast<llvm::StoreInst>(U);
2608*67e74705SXin Li if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2609*67e74705SXin Li return nullptr;
2610*67e74705SXin Li // These aren't actually possible for non-coerced returns, and we
2611*67e74705SXin Li // only care about non-coerced returns on this code path.
2612*67e74705SXin Li assert(!SI->isAtomic() && !SI->isVolatile());
2613*67e74705SXin Li return SI;
2614*67e74705SXin Li };
2615*67e74705SXin Li // If there are multiple uses of the return-value slot, just check
2616*67e74705SXin Li // for something immediately preceding the IP. Sometimes this can
2617*67e74705SXin Li // happen with how we generate implicit-returns; it can also happen
2618*67e74705SXin Li // with noreturn cleanups.
2619*67e74705SXin Li if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2620*67e74705SXin Li llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2621*67e74705SXin Li if (IP->empty()) return nullptr;
2622*67e74705SXin Li llvm::Instruction *I = &IP->back();
2623*67e74705SXin Li
2624*67e74705SXin Li // Skip lifetime markers
2625*67e74705SXin Li for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2626*67e74705SXin Li IE = IP->rend();
2627*67e74705SXin Li II != IE; ++II) {
2628*67e74705SXin Li if (llvm::IntrinsicInst *Intrinsic =
2629*67e74705SXin Li dyn_cast<llvm::IntrinsicInst>(&*II)) {
2630*67e74705SXin Li if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2631*67e74705SXin Li const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2632*67e74705SXin Li ++II;
2633*67e74705SXin Li if (II == IE)
2634*67e74705SXin Li break;
2635*67e74705SXin Li if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2636*67e74705SXin Li continue;
2637*67e74705SXin Li }
2638*67e74705SXin Li }
2639*67e74705SXin Li I = &*II;
2640*67e74705SXin Li break;
2641*67e74705SXin Li }
2642*67e74705SXin Li
2643*67e74705SXin Li return GetStoreIfValid(I);
2644*67e74705SXin Li }
2645*67e74705SXin Li
2646*67e74705SXin Li llvm::StoreInst *store =
2647*67e74705SXin Li GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2648*67e74705SXin Li if (!store) return nullptr;
2649*67e74705SXin Li
2650*67e74705SXin Li // Now do a first-and-dirty dominance check: just walk up the
2651*67e74705SXin Li // single-predecessors chain from the current insertion point.
2652*67e74705SXin Li llvm::BasicBlock *StoreBB = store->getParent();
2653*67e74705SXin Li llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2654*67e74705SXin Li while (IP != StoreBB) {
2655*67e74705SXin Li if (!(IP = IP->getSinglePredecessor()))
2656*67e74705SXin Li return nullptr;
2657*67e74705SXin Li }
2658*67e74705SXin Li
2659*67e74705SXin Li // Okay, the store's basic block dominates the insertion point; we
2660*67e74705SXin Li // can do our thing.
2661*67e74705SXin Li return store;
2662*67e74705SXin Li }
2663*67e74705SXin Li
EmitFunctionEpilog(const CGFunctionInfo & FI,bool EmitRetDbgLoc,SourceLocation EndLoc)2664*67e74705SXin Li void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2665*67e74705SXin Li bool EmitRetDbgLoc,
2666*67e74705SXin Li SourceLocation EndLoc) {
2667*67e74705SXin Li if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2668*67e74705SXin Li // Naked functions don't have epilogues.
2669*67e74705SXin Li Builder.CreateUnreachable();
2670*67e74705SXin Li return;
2671*67e74705SXin Li }
2672*67e74705SXin Li
2673*67e74705SXin Li // Functions with no result always return void.
2674*67e74705SXin Li if (!ReturnValue.isValid()) {
2675*67e74705SXin Li Builder.CreateRetVoid();
2676*67e74705SXin Li return;
2677*67e74705SXin Li }
2678*67e74705SXin Li
2679*67e74705SXin Li llvm::DebugLoc RetDbgLoc;
2680*67e74705SXin Li llvm::Value *RV = nullptr;
2681*67e74705SXin Li QualType RetTy = FI.getReturnType();
2682*67e74705SXin Li const ABIArgInfo &RetAI = FI.getReturnInfo();
2683*67e74705SXin Li
2684*67e74705SXin Li switch (RetAI.getKind()) {
2685*67e74705SXin Li case ABIArgInfo::InAlloca:
2686*67e74705SXin Li // Aggregrates get evaluated directly into the destination. Sometimes we
2687*67e74705SXin Li // need to return the sret value in a register, though.
2688*67e74705SXin Li assert(hasAggregateEvaluationKind(RetTy));
2689*67e74705SXin Li if (RetAI.getInAllocaSRet()) {
2690*67e74705SXin Li llvm::Function::arg_iterator EI = CurFn->arg_end();
2691*67e74705SXin Li --EI;
2692*67e74705SXin Li llvm::Value *ArgStruct = &*EI;
2693*67e74705SXin Li llvm::Value *SRet = Builder.CreateStructGEP(
2694*67e74705SXin Li nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2695*67e74705SXin Li RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2696*67e74705SXin Li }
2697*67e74705SXin Li break;
2698*67e74705SXin Li
2699*67e74705SXin Li case ABIArgInfo::Indirect: {
2700*67e74705SXin Li auto AI = CurFn->arg_begin();
2701*67e74705SXin Li if (RetAI.isSRetAfterThis())
2702*67e74705SXin Li ++AI;
2703*67e74705SXin Li switch (getEvaluationKind(RetTy)) {
2704*67e74705SXin Li case TEK_Complex: {
2705*67e74705SXin Li ComplexPairTy RT =
2706*67e74705SXin Li EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2707*67e74705SXin Li EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2708*67e74705SXin Li /*isInit*/ true);
2709*67e74705SXin Li break;
2710*67e74705SXin Li }
2711*67e74705SXin Li case TEK_Aggregate:
2712*67e74705SXin Li // Do nothing; aggregrates get evaluated directly into the destination.
2713*67e74705SXin Li break;
2714*67e74705SXin Li case TEK_Scalar:
2715*67e74705SXin Li EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2716*67e74705SXin Li MakeNaturalAlignAddrLValue(&*AI, RetTy),
2717*67e74705SXin Li /*isInit*/ true);
2718*67e74705SXin Li break;
2719*67e74705SXin Li }
2720*67e74705SXin Li break;
2721*67e74705SXin Li }
2722*67e74705SXin Li
2723*67e74705SXin Li case ABIArgInfo::Extend:
2724*67e74705SXin Li case ABIArgInfo::Direct:
2725*67e74705SXin Li if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2726*67e74705SXin Li RetAI.getDirectOffset() == 0) {
2727*67e74705SXin Li // The internal return value temp always will have pointer-to-return-type
2728*67e74705SXin Li // type, just do a load.
2729*67e74705SXin Li
2730*67e74705SXin Li // If there is a dominating store to ReturnValue, we can elide
2731*67e74705SXin Li // the load, zap the store, and usually zap the alloca.
2732*67e74705SXin Li if (llvm::StoreInst *SI =
2733*67e74705SXin Li findDominatingStoreToReturnValue(*this)) {
2734*67e74705SXin Li // Reuse the debug location from the store unless there is
2735*67e74705SXin Li // cleanup code to be emitted between the store and return
2736*67e74705SXin Li // instruction.
2737*67e74705SXin Li if (EmitRetDbgLoc && !AutoreleaseResult)
2738*67e74705SXin Li RetDbgLoc = SI->getDebugLoc();
2739*67e74705SXin Li // Get the stored value and nuke the now-dead store.
2740*67e74705SXin Li RV = SI->getValueOperand();
2741*67e74705SXin Li SI->eraseFromParent();
2742*67e74705SXin Li
2743*67e74705SXin Li // If that was the only use of the return value, nuke it as well now.
2744*67e74705SXin Li auto returnValueInst = ReturnValue.getPointer();
2745*67e74705SXin Li if (returnValueInst->use_empty()) {
2746*67e74705SXin Li if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2747*67e74705SXin Li alloca->eraseFromParent();
2748*67e74705SXin Li ReturnValue = Address::invalid();
2749*67e74705SXin Li }
2750*67e74705SXin Li }
2751*67e74705SXin Li
2752*67e74705SXin Li // Otherwise, we have to do a simple load.
2753*67e74705SXin Li } else {
2754*67e74705SXin Li RV = Builder.CreateLoad(ReturnValue);
2755*67e74705SXin Li }
2756*67e74705SXin Li } else {
2757*67e74705SXin Li // If the value is offset in memory, apply the offset now.
2758*67e74705SXin Li Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2759*67e74705SXin Li
2760*67e74705SXin Li RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2761*67e74705SXin Li }
2762*67e74705SXin Li
2763*67e74705SXin Li // In ARC, end functions that return a retainable type with a call
2764*67e74705SXin Li // to objc_autoreleaseReturnValue.
2765*67e74705SXin Li if (AutoreleaseResult) {
2766*67e74705SXin Li #ifndef NDEBUG
2767*67e74705SXin Li // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2768*67e74705SXin Li // been stripped of the typedefs, so we cannot use RetTy here. Get the
2769*67e74705SXin Li // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2770*67e74705SXin Li // CurCodeDecl or BlockInfo.
2771*67e74705SXin Li QualType RT;
2772*67e74705SXin Li
2773*67e74705SXin Li if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2774*67e74705SXin Li RT = FD->getReturnType();
2775*67e74705SXin Li else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2776*67e74705SXin Li RT = MD->getReturnType();
2777*67e74705SXin Li else if (isa<BlockDecl>(CurCodeDecl))
2778*67e74705SXin Li RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2779*67e74705SXin Li else
2780*67e74705SXin Li llvm_unreachable("Unexpected function/method type");
2781*67e74705SXin Li
2782*67e74705SXin Li assert(getLangOpts().ObjCAutoRefCount &&
2783*67e74705SXin Li !FI.isReturnsRetained() &&
2784*67e74705SXin Li RT->isObjCRetainableType());
2785*67e74705SXin Li #endif
2786*67e74705SXin Li RV = emitAutoreleaseOfResult(*this, RV);
2787*67e74705SXin Li }
2788*67e74705SXin Li
2789*67e74705SXin Li break;
2790*67e74705SXin Li
2791*67e74705SXin Li case ABIArgInfo::Ignore:
2792*67e74705SXin Li break;
2793*67e74705SXin Li
2794*67e74705SXin Li case ABIArgInfo::CoerceAndExpand: {
2795*67e74705SXin Li auto coercionType = RetAI.getCoerceAndExpandType();
2796*67e74705SXin Li auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2797*67e74705SXin Li
2798*67e74705SXin Li // Load all of the coerced elements out into results.
2799*67e74705SXin Li llvm::SmallVector<llvm::Value*, 4> results;
2800*67e74705SXin Li Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2801*67e74705SXin Li for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2802*67e74705SXin Li auto coercedEltType = coercionType->getElementType(i);
2803*67e74705SXin Li if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2804*67e74705SXin Li continue;
2805*67e74705SXin Li
2806*67e74705SXin Li auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2807*67e74705SXin Li auto elt = Builder.CreateLoad(eltAddr);
2808*67e74705SXin Li results.push_back(elt);
2809*67e74705SXin Li }
2810*67e74705SXin Li
2811*67e74705SXin Li // If we have one result, it's the single direct result type.
2812*67e74705SXin Li if (results.size() == 1) {
2813*67e74705SXin Li RV = results[0];
2814*67e74705SXin Li
2815*67e74705SXin Li // Otherwise, we need to make a first-class aggregate.
2816*67e74705SXin Li } else {
2817*67e74705SXin Li // Construct a return type that lacks padding elements.
2818*67e74705SXin Li llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2819*67e74705SXin Li
2820*67e74705SXin Li RV = llvm::UndefValue::get(returnType);
2821*67e74705SXin Li for (unsigned i = 0, e = results.size(); i != e; ++i) {
2822*67e74705SXin Li RV = Builder.CreateInsertValue(RV, results[i], i);
2823*67e74705SXin Li }
2824*67e74705SXin Li }
2825*67e74705SXin Li break;
2826*67e74705SXin Li }
2827*67e74705SXin Li
2828*67e74705SXin Li case ABIArgInfo::Expand:
2829*67e74705SXin Li llvm_unreachable("Invalid ABI kind for return argument");
2830*67e74705SXin Li }
2831*67e74705SXin Li
2832*67e74705SXin Li llvm::Instruction *Ret;
2833*67e74705SXin Li if (RV) {
2834*67e74705SXin Li if (CurCodeDecl && SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
2835*67e74705SXin Li if (auto RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>()) {
2836*67e74705SXin Li SanitizerScope SanScope(this);
2837*67e74705SXin Li llvm::Value *Cond = Builder.CreateICmpNE(
2838*67e74705SXin Li RV, llvm::Constant::getNullValue(RV->getType()));
2839*67e74705SXin Li llvm::Constant *StaticData[] = {
2840*67e74705SXin Li EmitCheckSourceLocation(EndLoc),
2841*67e74705SXin Li EmitCheckSourceLocation(RetNNAttr->getLocation()),
2842*67e74705SXin Li };
2843*67e74705SXin Li EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
2844*67e74705SXin Li "nonnull_return", StaticData, None);
2845*67e74705SXin Li }
2846*67e74705SXin Li }
2847*67e74705SXin Li Ret = Builder.CreateRet(RV);
2848*67e74705SXin Li } else {
2849*67e74705SXin Li Ret = Builder.CreateRetVoid();
2850*67e74705SXin Li }
2851*67e74705SXin Li
2852*67e74705SXin Li if (RetDbgLoc)
2853*67e74705SXin Li Ret->setDebugLoc(std::move(RetDbgLoc));
2854*67e74705SXin Li }
2855*67e74705SXin Li
isInAllocaArgument(CGCXXABI & ABI,QualType type)2856*67e74705SXin Li static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
2857*67e74705SXin Li const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2858*67e74705SXin Li return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
2859*67e74705SXin Li }
2860*67e74705SXin Li
createPlaceholderSlot(CodeGenFunction & CGF,QualType Ty)2861*67e74705SXin Li static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
2862*67e74705SXin Li QualType Ty) {
2863*67e74705SXin Li // FIXME: Generate IR in one pass, rather than going back and fixing up these
2864*67e74705SXin Li // placeholders.
2865*67e74705SXin Li llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
2866*67e74705SXin Li llvm::Value *Placeholder =
2867*67e74705SXin Li llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
2868*67e74705SXin Li Placeholder = CGF.Builder.CreateDefaultAlignedLoad(Placeholder);
2869*67e74705SXin Li
2870*67e74705SXin Li // FIXME: When we generate this IR in one pass, we shouldn't need
2871*67e74705SXin Li // this win32-specific alignment hack.
2872*67e74705SXin Li CharUnits Align = CharUnits::fromQuantity(4);
2873*67e74705SXin Li
2874*67e74705SXin Li return AggValueSlot::forAddr(Address(Placeholder, Align),
2875*67e74705SXin Li Ty.getQualifiers(),
2876*67e74705SXin Li AggValueSlot::IsNotDestructed,
2877*67e74705SXin Li AggValueSlot::DoesNotNeedGCBarriers,
2878*67e74705SXin Li AggValueSlot::IsNotAliased);
2879*67e74705SXin Li }
2880*67e74705SXin Li
EmitDelegateCallArg(CallArgList & args,const VarDecl * param,SourceLocation loc)2881*67e74705SXin Li void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
2882*67e74705SXin Li const VarDecl *param,
2883*67e74705SXin Li SourceLocation loc) {
2884*67e74705SXin Li // StartFunction converted the ABI-lowered parameter(s) into a
2885*67e74705SXin Li // local alloca. We need to turn that into an r-value suitable
2886*67e74705SXin Li // for EmitCall.
2887*67e74705SXin Li Address local = GetAddrOfLocalVar(param);
2888*67e74705SXin Li
2889*67e74705SXin Li QualType type = param->getType();
2890*67e74705SXin Li
2891*67e74705SXin Li assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
2892*67e74705SXin Li "cannot emit delegate call arguments for inalloca arguments!");
2893*67e74705SXin Li
2894*67e74705SXin Li // For the most part, we just need to load the alloca, except that
2895*67e74705SXin Li // aggregate r-values are actually pointers to temporaries.
2896*67e74705SXin Li if (type->isReferenceType())
2897*67e74705SXin Li args.add(RValue::get(Builder.CreateLoad(local)), type);
2898*67e74705SXin Li else
2899*67e74705SXin Li args.add(convertTempToRValue(local, type, loc), type);
2900*67e74705SXin Li }
2901*67e74705SXin Li
isProvablyNull(llvm::Value * addr)2902*67e74705SXin Li static bool isProvablyNull(llvm::Value *addr) {
2903*67e74705SXin Li return isa<llvm::ConstantPointerNull>(addr);
2904*67e74705SXin Li }
2905*67e74705SXin Li
isProvablyNonNull(llvm::Value * addr)2906*67e74705SXin Li static bool isProvablyNonNull(llvm::Value *addr) {
2907*67e74705SXin Li return isa<llvm::AllocaInst>(addr);
2908*67e74705SXin Li }
2909*67e74705SXin Li
2910*67e74705SXin Li /// Emit the actual writing-back of a writeback.
emitWriteback(CodeGenFunction & CGF,const CallArgList::Writeback & writeback)2911*67e74705SXin Li static void emitWriteback(CodeGenFunction &CGF,
2912*67e74705SXin Li const CallArgList::Writeback &writeback) {
2913*67e74705SXin Li const LValue &srcLV = writeback.Source;
2914*67e74705SXin Li Address srcAddr = srcLV.getAddress();
2915*67e74705SXin Li assert(!isProvablyNull(srcAddr.getPointer()) &&
2916*67e74705SXin Li "shouldn't have writeback for provably null argument");
2917*67e74705SXin Li
2918*67e74705SXin Li llvm::BasicBlock *contBB = nullptr;
2919*67e74705SXin Li
2920*67e74705SXin Li // If the argument wasn't provably non-null, we need to null check
2921*67e74705SXin Li // before doing the store.
2922*67e74705SXin Li bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
2923*67e74705SXin Li if (!provablyNonNull) {
2924*67e74705SXin Li llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
2925*67e74705SXin Li contBB = CGF.createBasicBlock("icr.done");
2926*67e74705SXin Li
2927*67e74705SXin Li llvm::Value *isNull =
2928*67e74705SXin Li CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
2929*67e74705SXin Li CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
2930*67e74705SXin Li CGF.EmitBlock(writebackBB);
2931*67e74705SXin Li }
2932*67e74705SXin Li
2933*67e74705SXin Li // Load the value to writeback.
2934*67e74705SXin Li llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
2935*67e74705SXin Li
2936*67e74705SXin Li // Cast it back, in case we're writing an id to a Foo* or something.
2937*67e74705SXin Li value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
2938*67e74705SXin Li "icr.writeback-cast");
2939*67e74705SXin Li
2940*67e74705SXin Li // Perform the writeback.
2941*67e74705SXin Li
2942*67e74705SXin Li // If we have a "to use" value, it's something we need to emit a use
2943*67e74705SXin Li // of. This has to be carefully threaded in: if it's done after the
2944*67e74705SXin Li // release it's potentially undefined behavior (and the optimizer
2945*67e74705SXin Li // will ignore it), and if it happens before the retain then the
2946*67e74705SXin Li // optimizer could move the release there.
2947*67e74705SXin Li if (writeback.ToUse) {
2948*67e74705SXin Li assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
2949*67e74705SXin Li
2950*67e74705SXin Li // Retain the new value. No need to block-copy here: the block's
2951*67e74705SXin Li // being passed up the stack.
2952*67e74705SXin Li value = CGF.EmitARCRetainNonBlock(value);
2953*67e74705SXin Li
2954*67e74705SXin Li // Emit the intrinsic use here.
2955*67e74705SXin Li CGF.EmitARCIntrinsicUse(writeback.ToUse);
2956*67e74705SXin Li
2957*67e74705SXin Li // Load the old value (primitively).
2958*67e74705SXin Li llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
2959*67e74705SXin Li
2960*67e74705SXin Li // Put the new value in place (primitively).
2961*67e74705SXin Li CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
2962*67e74705SXin Li
2963*67e74705SXin Li // Release the old value.
2964*67e74705SXin Li CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
2965*67e74705SXin Li
2966*67e74705SXin Li // Otherwise, we can just do a normal lvalue store.
2967*67e74705SXin Li } else {
2968*67e74705SXin Li CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
2969*67e74705SXin Li }
2970*67e74705SXin Li
2971*67e74705SXin Li // Jump to the continuation block.
2972*67e74705SXin Li if (!provablyNonNull)
2973*67e74705SXin Li CGF.EmitBlock(contBB);
2974*67e74705SXin Li }
2975*67e74705SXin Li
emitWritebacks(CodeGenFunction & CGF,const CallArgList & args)2976*67e74705SXin Li static void emitWritebacks(CodeGenFunction &CGF,
2977*67e74705SXin Li const CallArgList &args) {
2978*67e74705SXin Li for (const auto &I : args.writebacks())
2979*67e74705SXin Li emitWriteback(CGF, I);
2980*67e74705SXin Li }
2981*67e74705SXin Li
deactivateArgCleanupsBeforeCall(CodeGenFunction & CGF,const CallArgList & CallArgs)2982*67e74705SXin Li static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
2983*67e74705SXin Li const CallArgList &CallArgs) {
2984*67e74705SXin Li assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee());
2985*67e74705SXin Li ArrayRef<CallArgList::CallArgCleanup> Cleanups =
2986*67e74705SXin Li CallArgs.getCleanupsToDeactivate();
2987*67e74705SXin Li // Iterate in reverse to increase the likelihood of popping the cleanup.
2988*67e74705SXin Li for (const auto &I : llvm::reverse(Cleanups)) {
2989*67e74705SXin Li CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
2990*67e74705SXin Li I.IsActiveIP->eraseFromParent();
2991*67e74705SXin Li }
2992*67e74705SXin Li }
2993*67e74705SXin Li
maybeGetUnaryAddrOfOperand(const Expr * E)2994*67e74705SXin Li static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
2995*67e74705SXin Li if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
2996*67e74705SXin Li if (uop->getOpcode() == UO_AddrOf)
2997*67e74705SXin Li return uop->getSubExpr();
2998*67e74705SXin Li return nullptr;
2999*67e74705SXin Li }
3000*67e74705SXin Li
3001*67e74705SXin Li /// Emit an argument that's being passed call-by-writeback. That is,
3002*67e74705SXin Li /// we are passing the address of an __autoreleased temporary; it
3003*67e74705SXin Li /// might be copy-initialized with the current value of the given
3004*67e74705SXin Li /// address, but it will definitely be copied out of after the call.
emitWritebackArg(CodeGenFunction & CGF,CallArgList & args,const ObjCIndirectCopyRestoreExpr * CRE)3005*67e74705SXin Li static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3006*67e74705SXin Li const ObjCIndirectCopyRestoreExpr *CRE) {
3007*67e74705SXin Li LValue srcLV;
3008*67e74705SXin Li
3009*67e74705SXin Li // Make an optimistic effort to emit the address as an l-value.
3010*67e74705SXin Li // This can fail if the argument expression is more complicated.
3011*67e74705SXin Li if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3012*67e74705SXin Li srcLV = CGF.EmitLValue(lvExpr);
3013*67e74705SXin Li
3014*67e74705SXin Li // Otherwise, just emit it as a scalar.
3015*67e74705SXin Li } else {
3016*67e74705SXin Li Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3017*67e74705SXin Li
3018*67e74705SXin Li QualType srcAddrType =
3019*67e74705SXin Li CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3020*67e74705SXin Li srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3021*67e74705SXin Li }
3022*67e74705SXin Li Address srcAddr = srcLV.getAddress();
3023*67e74705SXin Li
3024*67e74705SXin Li // The dest and src types don't necessarily match in LLVM terms
3025*67e74705SXin Li // because of the crazy ObjC compatibility rules.
3026*67e74705SXin Li
3027*67e74705SXin Li llvm::PointerType *destType =
3028*67e74705SXin Li cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3029*67e74705SXin Li
3030*67e74705SXin Li // If the address is a constant null, just pass the appropriate null.
3031*67e74705SXin Li if (isProvablyNull(srcAddr.getPointer())) {
3032*67e74705SXin Li args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3033*67e74705SXin Li CRE->getType());
3034*67e74705SXin Li return;
3035*67e74705SXin Li }
3036*67e74705SXin Li
3037*67e74705SXin Li // Create the temporary.
3038*67e74705SXin Li Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3039*67e74705SXin Li CGF.getPointerAlign(),
3040*67e74705SXin Li "icr.temp");
3041*67e74705SXin Li // Loading an l-value can introduce a cleanup if the l-value is __weak,
3042*67e74705SXin Li // and that cleanup will be conditional if we can't prove that the l-value
3043*67e74705SXin Li // isn't null, so we need to register a dominating point so that the cleanups
3044*67e74705SXin Li // system will make valid IR.
3045*67e74705SXin Li CodeGenFunction::ConditionalEvaluation condEval(CGF);
3046*67e74705SXin Li
3047*67e74705SXin Li // Zero-initialize it if we're not doing a copy-initialization.
3048*67e74705SXin Li bool shouldCopy = CRE->shouldCopy();
3049*67e74705SXin Li if (!shouldCopy) {
3050*67e74705SXin Li llvm::Value *null =
3051*67e74705SXin Li llvm::ConstantPointerNull::get(
3052*67e74705SXin Li cast<llvm::PointerType>(destType->getElementType()));
3053*67e74705SXin Li CGF.Builder.CreateStore(null, temp);
3054*67e74705SXin Li }
3055*67e74705SXin Li
3056*67e74705SXin Li llvm::BasicBlock *contBB = nullptr;
3057*67e74705SXin Li llvm::BasicBlock *originBB = nullptr;
3058*67e74705SXin Li
3059*67e74705SXin Li // If the address is *not* known to be non-null, we need to switch.
3060*67e74705SXin Li llvm::Value *finalArgument;
3061*67e74705SXin Li
3062*67e74705SXin Li bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
3063*67e74705SXin Li if (provablyNonNull) {
3064*67e74705SXin Li finalArgument = temp.getPointer();
3065*67e74705SXin Li } else {
3066*67e74705SXin Li llvm::Value *isNull =
3067*67e74705SXin Li CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3068*67e74705SXin Li
3069*67e74705SXin Li finalArgument = CGF.Builder.CreateSelect(isNull,
3070*67e74705SXin Li llvm::ConstantPointerNull::get(destType),
3071*67e74705SXin Li temp.getPointer(), "icr.argument");
3072*67e74705SXin Li
3073*67e74705SXin Li // If we need to copy, then the load has to be conditional, which
3074*67e74705SXin Li // means we need control flow.
3075*67e74705SXin Li if (shouldCopy) {
3076*67e74705SXin Li originBB = CGF.Builder.GetInsertBlock();
3077*67e74705SXin Li contBB = CGF.createBasicBlock("icr.cont");
3078*67e74705SXin Li llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3079*67e74705SXin Li CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3080*67e74705SXin Li CGF.EmitBlock(copyBB);
3081*67e74705SXin Li condEval.begin(CGF);
3082*67e74705SXin Li }
3083*67e74705SXin Li }
3084*67e74705SXin Li
3085*67e74705SXin Li llvm::Value *valueToUse = nullptr;
3086*67e74705SXin Li
3087*67e74705SXin Li // Perform a copy if necessary.
3088*67e74705SXin Li if (shouldCopy) {
3089*67e74705SXin Li RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3090*67e74705SXin Li assert(srcRV.isScalar());
3091*67e74705SXin Li
3092*67e74705SXin Li llvm::Value *src = srcRV.getScalarVal();
3093*67e74705SXin Li src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3094*67e74705SXin Li "icr.cast");
3095*67e74705SXin Li
3096*67e74705SXin Li // Use an ordinary store, not a store-to-lvalue.
3097*67e74705SXin Li CGF.Builder.CreateStore(src, temp);
3098*67e74705SXin Li
3099*67e74705SXin Li // If optimization is enabled, and the value was held in a
3100*67e74705SXin Li // __strong variable, we need to tell the optimizer that this
3101*67e74705SXin Li // value has to stay alive until we're doing the store back.
3102*67e74705SXin Li // This is because the temporary is effectively unretained,
3103*67e74705SXin Li // and so otherwise we can violate the high-level semantics.
3104*67e74705SXin Li if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3105*67e74705SXin Li srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3106*67e74705SXin Li valueToUse = src;
3107*67e74705SXin Li }
3108*67e74705SXin Li }
3109*67e74705SXin Li
3110*67e74705SXin Li // Finish the control flow if we needed it.
3111*67e74705SXin Li if (shouldCopy && !provablyNonNull) {
3112*67e74705SXin Li llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3113*67e74705SXin Li CGF.EmitBlock(contBB);
3114*67e74705SXin Li
3115*67e74705SXin Li // Make a phi for the value to intrinsically use.
3116*67e74705SXin Li if (valueToUse) {
3117*67e74705SXin Li llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3118*67e74705SXin Li "icr.to-use");
3119*67e74705SXin Li phiToUse->addIncoming(valueToUse, copyBB);
3120*67e74705SXin Li phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3121*67e74705SXin Li originBB);
3122*67e74705SXin Li valueToUse = phiToUse;
3123*67e74705SXin Li }
3124*67e74705SXin Li
3125*67e74705SXin Li condEval.end(CGF);
3126*67e74705SXin Li }
3127*67e74705SXin Li
3128*67e74705SXin Li args.addWriteback(srcLV, temp, valueToUse);
3129*67e74705SXin Li args.add(RValue::get(finalArgument), CRE->getType());
3130*67e74705SXin Li }
3131*67e74705SXin Li
allocateArgumentMemory(CodeGenFunction & CGF)3132*67e74705SXin Li void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3133*67e74705SXin Li assert(!StackBase && !StackCleanup.isValid());
3134*67e74705SXin Li
3135*67e74705SXin Li // Save the stack.
3136*67e74705SXin Li llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3137*67e74705SXin Li StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3138*67e74705SXin Li }
3139*67e74705SXin Li
freeArgumentMemory(CodeGenFunction & CGF) const3140*67e74705SXin Li void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3141*67e74705SXin Li if (StackBase) {
3142*67e74705SXin Li // Restore the stack after the call.
3143*67e74705SXin Li llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3144*67e74705SXin Li CGF.Builder.CreateCall(F, StackBase);
3145*67e74705SXin Li }
3146*67e74705SXin Li }
3147*67e74705SXin Li
EmitNonNullArgCheck(RValue RV,QualType ArgType,SourceLocation ArgLoc,const FunctionDecl * FD,unsigned ParmNum)3148*67e74705SXin Li void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3149*67e74705SXin Li SourceLocation ArgLoc,
3150*67e74705SXin Li const FunctionDecl *FD,
3151*67e74705SXin Li unsigned ParmNum) {
3152*67e74705SXin Li if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
3153*67e74705SXin Li return;
3154*67e74705SXin Li auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
3155*67e74705SXin Li unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3156*67e74705SXin Li auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
3157*67e74705SXin Li if (!NNAttr)
3158*67e74705SXin Li return;
3159*67e74705SXin Li SanitizerScope SanScope(this);
3160*67e74705SXin Li assert(RV.isScalar());
3161*67e74705SXin Li llvm::Value *V = RV.getScalarVal();
3162*67e74705SXin Li llvm::Value *Cond =
3163*67e74705SXin Li Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3164*67e74705SXin Li llvm::Constant *StaticData[] = {
3165*67e74705SXin Li EmitCheckSourceLocation(ArgLoc),
3166*67e74705SXin Li EmitCheckSourceLocation(NNAttr->getLocation()),
3167*67e74705SXin Li llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3168*67e74705SXin Li };
3169*67e74705SXin Li EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
3170*67e74705SXin Li "nonnull_arg", StaticData, None);
3171*67e74705SXin Li }
3172*67e74705SXin Li
EmitCallArgs(CallArgList & Args,ArrayRef<QualType> ArgTypes,llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,const FunctionDecl * CalleeDecl,unsigned ParamsToSkip)3173*67e74705SXin Li void CodeGenFunction::EmitCallArgs(
3174*67e74705SXin Li CallArgList &Args, ArrayRef<QualType> ArgTypes,
3175*67e74705SXin Li llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3176*67e74705SXin Li const FunctionDecl *CalleeDecl, unsigned ParamsToSkip) {
3177*67e74705SXin Li assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3178*67e74705SXin Li
3179*67e74705SXin Li auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg) {
3180*67e74705SXin Li if (CalleeDecl == nullptr || I >= CalleeDecl->getNumParams())
3181*67e74705SXin Li return;
3182*67e74705SXin Li auto *PS = CalleeDecl->getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3183*67e74705SXin Li if (PS == nullptr)
3184*67e74705SXin Li return;
3185*67e74705SXin Li
3186*67e74705SXin Li const auto &Context = getContext();
3187*67e74705SXin Li auto SizeTy = Context.getSizeType();
3188*67e74705SXin Li auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3189*67e74705SXin Li llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T);
3190*67e74705SXin Li Args.add(RValue::get(V), SizeTy);
3191*67e74705SXin Li };
3192*67e74705SXin Li
3193*67e74705SXin Li // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3194*67e74705SXin Li // because arguments are destroyed left to right in the callee.
3195*67e74705SXin Li if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3196*67e74705SXin Li // Insert a stack save if we're going to need any inalloca args.
3197*67e74705SXin Li bool HasInAllocaArgs = false;
3198*67e74705SXin Li for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3199*67e74705SXin Li I != E && !HasInAllocaArgs; ++I)
3200*67e74705SXin Li HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3201*67e74705SXin Li if (HasInAllocaArgs) {
3202*67e74705SXin Li assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3203*67e74705SXin Li Args.allocateArgumentMemory(*this);
3204*67e74705SXin Li }
3205*67e74705SXin Li
3206*67e74705SXin Li // Evaluate each argument.
3207*67e74705SXin Li size_t CallArgsStart = Args.size();
3208*67e74705SXin Li for (int I = ArgTypes.size() - 1; I >= 0; --I) {
3209*67e74705SXin Li CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
3210*67e74705SXin Li MaybeEmitImplicitObjectSize(I, *Arg);
3211*67e74705SXin Li EmitCallArg(Args, *Arg, ArgTypes[I]);
3212*67e74705SXin Li EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
3213*67e74705SXin Li CalleeDecl, ParamsToSkip + I);
3214*67e74705SXin Li }
3215*67e74705SXin Li
3216*67e74705SXin Li // Un-reverse the arguments we just evaluated so they match up with the LLVM
3217*67e74705SXin Li // IR function.
3218*67e74705SXin Li std::reverse(Args.begin() + CallArgsStart, Args.end());
3219*67e74705SXin Li return;
3220*67e74705SXin Li }
3221*67e74705SXin Li
3222*67e74705SXin Li for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3223*67e74705SXin Li CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
3224*67e74705SXin Li assert(Arg != ArgRange.end());
3225*67e74705SXin Li EmitCallArg(Args, *Arg, ArgTypes[I]);
3226*67e74705SXin Li EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
3227*67e74705SXin Li CalleeDecl, ParamsToSkip + I);
3228*67e74705SXin Li MaybeEmitImplicitObjectSize(I, *Arg);
3229*67e74705SXin Li }
3230*67e74705SXin Li }
3231*67e74705SXin Li
3232*67e74705SXin Li namespace {
3233*67e74705SXin Li
3234*67e74705SXin Li struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
DestroyUnpassedArg__anon41e727fc0811::DestroyUnpassedArg3235*67e74705SXin Li DestroyUnpassedArg(Address Addr, QualType Ty)
3236*67e74705SXin Li : Addr(Addr), Ty(Ty) {}
3237*67e74705SXin Li
3238*67e74705SXin Li Address Addr;
3239*67e74705SXin Li QualType Ty;
3240*67e74705SXin Li
Emit__anon41e727fc0811::DestroyUnpassedArg3241*67e74705SXin Li void Emit(CodeGenFunction &CGF, Flags flags) override {
3242*67e74705SXin Li const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3243*67e74705SXin Li assert(!Dtor->isTrivial());
3244*67e74705SXin Li CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3245*67e74705SXin Li /*Delegating=*/false, Addr);
3246*67e74705SXin Li }
3247*67e74705SXin Li };
3248*67e74705SXin Li
3249*67e74705SXin Li struct DisableDebugLocationUpdates {
3250*67e74705SXin Li CodeGenFunction &CGF;
3251*67e74705SXin Li bool disabledDebugInfo;
DisableDebugLocationUpdates__anon41e727fc0811::DisableDebugLocationUpdates3252*67e74705SXin Li DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3253*67e74705SXin Li if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3254*67e74705SXin Li CGF.disableDebugInfo();
3255*67e74705SXin Li }
~DisableDebugLocationUpdates__anon41e727fc0811::DisableDebugLocationUpdates3256*67e74705SXin Li ~DisableDebugLocationUpdates() {
3257*67e74705SXin Li if (disabledDebugInfo)
3258*67e74705SXin Li CGF.enableDebugInfo();
3259*67e74705SXin Li }
3260*67e74705SXin Li };
3261*67e74705SXin Li
3262*67e74705SXin Li } // end anonymous namespace
3263*67e74705SXin Li
EmitCallArg(CallArgList & args,const Expr * E,QualType type)3264*67e74705SXin Li void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3265*67e74705SXin Li QualType type) {
3266*67e74705SXin Li DisableDebugLocationUpdates Dis(*this, E);
3267*67e74705SXin Li if (const ObjCIndirectCopyRestoreExpr *CRE
3268*67e74705SXin Li = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3269*67e74705SXin Li assert(getLangOpts().ObjCAutoRefCount);
3270*67e74705SXin Li assert(getContext().hasSameType(E->getType(), type));
3271*67e74705SXin Li return emitWritebackArg(*this, args, CRE);
3272*67e74705SXin Li }
3273*67e74705SXin Li
3274*67e74705SXin Li assert(type->isReferenceType() == E->isGLValue() &&
3275*67e74705SXin Li "reference binding to unmaterialized r-value!");
3276*67e74705SXin Li
3277*67e74705SXin Li if (E->isGLValue()) {
3278*67e74705SXin Li assert(E->getObjectKind() == OK_Ordinary);
3279*67e74705SXin Li return args.add(EmitReferenceBindingToExpr(E), type);
3280*67e74705SXin Li }
3281*67e74705SXin Li
3282*67e74705SXin Li bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3283*67e74705SXin Li
3284*67e74705SXin Li // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3285*67e74705SXin Li // However, we still have to push an EH-only cleanup in case we unwind before
3286*67e74705SXin Li // we make it to the call.
3287*67e74705SXin Li if (HasAggregateEvalKind &&
3288*67e74705SXin Li CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3289*67e74705SXin Li // If we're using inalloca, use the argument memory. Otherwise, use a
3290*67e74705SXin Li // temporary.
3291*67e74705SXin Li AggValueSlot Slot;
3292*67e74705SXin Li if (args.isUsingInAlloca())
3293*67e74705SXin Li Slot = createPlaceholderSlot(*this, type);
3294*67e74705SXin Li else
3295*67e74705SXin Li Slot = CreateAggTemp(type, "agg.tmp");
3296*67e74705SXin Li
3297*67e74705SXin Li const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3298*67e74705SXin Li bool DestroyedInCallee =
3299*67e74705SXin Li RD && RD->hasNonTrivialDestructor() &&
3300*67e74705SXin Li CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
3301*67e74705SXin Li if (DestroyedInCallee)
3302*67e74705SXin Li Slot.setExternallyDestructed();
3303*67e74705SXin Li
3304*67e74705SXin Li EmitAggExpr(E, Slot);
3305*67e74705SXin Li RValue RV = Slot.asRValue();
3306*67e74705SXin Li args.add(RV, type);
3307*67e74705SXin Li
3308*67e74705SXin Li if (DestroyedInCallee) {
3309*67e74705SXin Li // Create a no-op GEP between the placeholder and the cleanup so we can
3310*67e74705SXin Li // RAUW it successfully. It also serves as a marker of the first
3311*67e74705SXin Li // instruction where the cleanup is active.
3312*67e74705SXin Li pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3313*67e74705SXin Li type);
3314*67e74705SXin Li // This unreachable is a temporary marker which will be removed later.
3315*67e74705SXin Li llvm::Instruction *IsActive = Builder.CreateUnreachable();
3316*67e74705SXin Li args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3317*67e74705SXin Li }
3318*67e74705SXin Li return;
3319*67e74705SXin Li }
3320*67e74705SXin Li
3321*67e74705SXin Li if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3322*67e74705SXin Li cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3323*67e74705SXin Li LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3324*67e74705SXin Li assert(L.isSimple());
3325*67e74705SXin Li if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
3326*67e74705SXin Li args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
3327*67e74705SXin Li } else {
3328*67e74705SXin Li // We can't represent a misaligned lvalue in the CallArgList, so copy
3329*67e74705SXin Li // to an aligned temporary now.
3330*67e74705SXin Li Address tmp = CreateMemTemp(type);
3331*67e74705SXin Li EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile());
3332*67e74705SXin Li args.add(RValue::getAggregate(tmp), type);
3333*67e74705SXin Li }
3334*67e74705SXin Li return;
3335*67e74705SXin Li }
3336*67e74705SXin Li
3337*67e74705SXin Li args.add(EmitAnyExprToTemp(E), type);
3338*67e74705SXin Li }
3339*67e74705SXin Li
getVarArgType(const Expr * Arg)3340*67e74705SXin Li QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3341*67e74705SXin Li // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3342*67e74705SXin Li // implicitly widens null pointer constants that are arguments to varargs
3343*67e74705SXin Li // functions to pointer-sized ints.
3344*67e74705SXin Li if (!getTarget().getTriple().isOSWindows())
3345*67e74705SXin Li return Arg->getType();
3346*67e74705SXin Li
3347*67e74705SXin Li if (Arg->getType()->isIntegerType() &&
3348*67e74705SXin Li getContext().getTypeSize(Arg->getType()) <
3349*67e74705SXin Li getContext().getTargetInfo().getPointerWidth(0) &&
3350*67e74705SXin Li Arg->isNullPointerConstant(getContext(),
3351*67e74705SXin Li Expr::NPC_ValueDependentIsNotNull)) {
3352*67e74705SXin Li return getContext().getIntPtrType();
3353*67e74705SXin Li }
3354*67e74705SXin Li
3355*67e74705SXin Li return Arg->getType();
3356*67e74705SXin Li }
3357*67e74705SXin Li
3358*67e74705SXin Li // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3359*67e74705SXin Li // optimizer it can aggressively ignore unwind edges.
3360*67e74705SXin Li void
AddObjCARCExceptionMetadata(llvm::Instruction * Inst)3361*67e74705SXin Li CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3362*67e74705SXin Li if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3363*67e74705SXin Li !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3364*67e74705SXin Li Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3365*67e74705SXin Li CGM.getNoObjCARCExceptionsMetadata());
3366*67e74705SXin Li }
3367*67e74705SXin Li
3368*67e74705SXin Li /// Emits a call to the given no-arguments nounwind runtime function.
3369*67e74705SXin Li llvm::CallInst *
EmitNounwindRuntimeCall(llvm::Value * callee,const llvm::Twine & name)3370*67e74705SXin Li CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3371*67e74705SXin Li const llvm::Twine &name) {
3372*67e74705SXin Li return EmitNounwindRuntimeCall(callee, None, name);
3373*67e74705SXin Li }
3374*67e74705SXin Li
3375*67e74705SXin Li /// Emits a call to the given nounwind runtime function.
3376*67e74705SXin Li llvm::CallInst *
EmitNounwindRuntimeCall(llvm::Value * callee,ArrayRef<llvm::Value * > args,const llvm::Twine & name)3377*67e74705SXin Li CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3378*67e74705SXin Li ArrayRef<llvm::Value*> args,
3379*67e74705SXin Li const llvm::Twine &name) {
3380*67e74705SXin Li llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3381*67e74705SXin Li call->setDoesNotThrow();
3382*67e74705SXin Li return call;
3383*67e74705SXin Li }
3384*67e74705SXin Li
3385*67e74705SXin Li /// Emits a simple call (never an invoke) to the given no-arguments
3386*67e74705SXin Li /// runtime function.
3387*67e74705SXin Li llvm::CallInst *
EmitRuntimeCall(llvm::Value * callee,const llvm::Twine & name)3388*67e74705SXin Li CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3389*67e74705SXin Li const llvm::Twine &name) {
3390*67e74705SXin Li return EmitRuntimeCall(callee, None, name);
3391*67e74705SXin Li }
3392*67e74705SXin Li
3393*67e74705SXin Li // Calls which may throw must have operand bundles indicating which funclet
3394*67e74705SXin Li // they are nested within.
3395*67e74705SXin Li static void
getBundlesForFunclet(llvm::Value * Callee,llvm::Instruction * CurrentFuncletPad,SmallVectorImpl<llvm::OperandBundleDef> & BundleList)3396*67e74705SXin Li getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad,
3397*67e74705SXin Li SmallVectorImpl<llvm::OperandBundleDef> &BundleList) {
3398*67e74705SXin Li // There is no need for a funclet operand bundle if we aren't inside a
3399*67e74705SXin Li // funclet.
3400*67e74705SXin Li if (!CurrentFuncletPad)
3401*67e74705SXin Li return;
3402*67e74705SXin Li
3403*67e74705SXin Li // Skip intrinsics which cannot throw.
3404*67e74705SXin Li auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3405*67e74705SXin Li if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3406*67e74705SXin Li return;
3407*67e74705SXin Li
3408*67e74705SXin Li BundleList.emplace_back("funclet", CurrentFuncletPad);
3409*67e74705SXin Li }
3410*67e74705SXin Li
3411*67e74705SXin Li /// Emits a simple call (never an invoke) to the given runtime function.
3412*67e74705SXin Li llvm::CallInst *
EmitRuntimeCall(llvm::Value * callee,ArrayRef<llvm::Value * > args,const llvm::Twine & name)3413*67e74705SXin Li CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3414*67e74705SXin Li ArrayRef<llvm::Value*> args,
3415*67e74705SXin Li const llvm::Twine &name) {
3416*67e74705SXin Li SmallVector<llvm::OperandBundleDef, 1> BundleList;
3417*67e74705SXin Li getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3418*67e74705SXin Li
3419*67e74705SXin Li llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList, name);
3420*67e74705SXin Li call->setCallingConv(getRuntimeCC());
3421*67e74705SXin Li return call;
3422*67e74705SXin Li }
3423*67e74705SXin Li
3424*67e74705SXin Li /// Emits a call or invoke to the given noreturn runtime function.
EmitNoreturnRuntimeCallOrInvoke(llvm::Value * callee,ArrayRef<llvm::Value * > args)3425*67e74705SXin Li void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
3426*67e74705SXin Li ArrayRef<llvm::Value*> args) {
3427*67e74705SXin Li SmallVector<llvm::OperandBundleDef, 1> BundleList;
3428*67e74705SXin Li getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3429*67e74705SXin Li
3430*67e74705SXin Li if (getInvokeDest()) {
3431*67e74705SXin Li llvm::InvokeInst *invoke =
3432*67e74705SXin Li Builder.CreateInvoke(callee,
3433*67e74705SXin Li getUnreachableBlock(),
3434*67e74705SXin Li getInvokeDest(),
3435*67e74705SXin Li args,
3436*67e74705SXin Li BundleList);
3437*67e74705SXin Li invoke->setDoesNotReturn();
3438*67e74705SXin Li invoke->setCallingConv(getRuntimeCC());
3439*67e74705SXin Li } else {
3440*67e74705SXin Li llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3441*67e74705SXin Li call->setDoesNotReturn();
3442*67e74705SXin Li call->setCallingConv(getRuntimeCC());
3443*67e74705SXin Li Builder.CreateUnreachable();
3444*67e74705SXin Li }
3445*67e74705SXin Li }
3446*67e74705SXin Li
3447*67e74705SXin Li /// Emits a call or invoke instruction to the given nullary runtime function.
3448*67e74705SXin Li llvm::CallSite
EmitRuntimeCallOrInvoke(llvm::Value * callee,const Twine & name)3449*67e74705SXin Li CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3450*67e74705SXin Li const Twine &name) {
3451*67e74705SXin Li return EmitRuntimeCallOrInvoke(callee, None, name);
3452*67e74705SXin Li }
3453*67e74705SXin Li
3454*67e74705SXin Li /// Emits a call or invoke instruction to the given runtime function.
3455*67e74705SXin Li llvm::CallSite
EmitRuntimeCallOrInvoke(llvm::Value * callee,ArrayRef<llvm::Value * > args,const Twine & name)3456*67e74705SXin Li CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3457*67e74705SXin Li ArrayRef<llvm::Value*> args,
3458*67e74705SXin Li const Twine &name) {
3459*67e74705SXin Li llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3460*67e74705SXin Li callSite.setCallingConv(getRuntimeCC());
3461*67e74705SXin Li return callSite;
3462*67e74705SXin Li }
3463*67e74705SXin Li
3464*67e74705SXin Li /// Emits a call or invoke instruction to the given function, depending
3465*67e74705SXin Li /// on the current state of the EH stack.
3466*67e74705SXin Li llvm::CallSite
EmitCallOrInvoke(llvm::Value * Callee,ArrayRef<llvm::Value * > Args,const Twine & Name)3467*67e74705SXin Li CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
3468*67e74705SXin Li ArrayRef<llvm::Value *> Args,
3469*67e74705SXin Li const Twine &Name) {
3470*67e74705SXin Li llvm::BasicBlock *InvokeDest = getInvokeDest();
3471*67e74705SXin Li SmallVector<llvm::OperandBundleDef, 1> BundleList;
3472*67e74705SXin Li getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
3473*67e74705SXin Li
3474*67e74705SXin Li llvm::Instruction *Inst;
3475*67e74705SXin Li if (!InvokeDest)
3476*67e74705SXin Li Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3477*67e74705SXin Li else {
3478*67e74705SXin Li llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3479*67e74705SXin Li Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3480*67e74705SXin Li Name);
3481*67e74705SXin Li EmitBlock(ContBB);
3482*67e74705SXin Li }
3483*67e74705SXin Li
3484*67e74705SXin Li // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3485*67e74705SXin Li // optimizer it can aggressively ignore unwind edges.
3486*67e74705SXin Li if (CGM.getLangOpts().ObjCAutoRefCount)
3487*67e74705SXin Li AddObjCARCExceptionMetadata(Inst);
3488*67e74705SXin Li
3489*67e74705SXin Li return llvm::CallSite(Inst);
3490*67e74705SXin Li }
3491*67e74705SXin Li
3492*67e74705SXin Li /// \brief Store a non-aggregate value to an address to initialize it. For
3493*67e74705SXin Li /// initialization, a non-atomic store will be used.
EmitInitStoreOfNonAggregate(CodeGenFunction & CGF,RValue Src,LValue Dst)3494*67e74705SXin Li static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
3495*67e74705SXin Li LValue Dst) {
3496*67e74705SXin Li if (Src.isScalar())
3497*67e74705SXin Li CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
3498*67e74705SXin Li else
3499*67e74705SXin Li CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
3500*67e74705SXin Li }
3501*67e74705SXin Li
deferPlaceholderReplacement(llvm::Instruction * Old,llvm::Value * New)3502*67e74705SXin Li void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3503*67e74705SXin Li llvm::Value *New) {
3504*67e74705SXin Li DeferredReplacements.push_back(std::make_pair(Old, New));
3505*67e74705SXin Li }
3506*67e74705SXin Li
EmitCall(const CGFunctionInfo & CallInfo,llvm::Value * Callee,ReturnValueSlot ReturnValue,const CallArgList & CallArgs,CGCalleeInfo CalleeInfo,llvm::Instruction ** callOrInvoke)3507*67e74705SXin Li RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3508*67e74705SXin Li llvm::Value *Callee,
3509*67e74705SXin Li ReturnValueSlot ReturnValue,
3510*67e74705SXin Li const CallArgList &CallArgs,
3511*67e74705SXin Li CGCalleeInfo CalleeInfo,
3512*67e74705SXin Li llvm::Instruction **callOrInvoke) {
3513*67e74705SXin Li // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3514*67e74705SXin Li
3515*67e74705SXin Li // Handle struct-return functions by passing a pointer to the
3516*67e74705SXin Li // location that we would like to return into.
3517*67e74705SXin Li QualType RetTy = CallInfo.getReturnType();
3518*67e74705SXin Li const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3519*67e74705SXin Li
3520*67e74705SXin Li llvm::FunctionType *IRFuncTy =
3521*67e74705SXin Li cast<llvm::FunctionType>(
3522*67e74705SXin Li cast<llvm::PointerType>(Callee->getType())->getElementType());
3523*67e74705SXin Li
3524*67e74705SXin Li // If we're using inalloca, insert the allocation after the stack save.
3525*67e74705SXin Li // FIXME: Do this earlier rather than hacking it in here!
3526*67e74705SXin Li Address ArgMemory = Address::invalid();
3527*67e74705SXin Li const llvm::StructLayout *ArgMemoryLayout = nullptr;
3528*67e74705SXin Li if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3529*67e74705SXin Li ArgMemoryLayout = CGM.getDataLayout().getStructLayout(ArgStruct);
3530*67e74705SXin Li llvm::Instruction *IP = CallArgs.getStackBase();
3531*67e74705SXin Li llvm::AllocaInst *AI;
3532*67e74705SXin Li if (IP) {
3533*67e74705SXin Li IP = IP->getNextNode();
3534*67e74705SXin Li AI = new llvm::AllocaInst(ArgStruct, "argmem", IP);
3535*67e74705SXin Li } else {
3536*67e74705SXin Li AI = CreateTempAlloca(ArgStruct, "argmem");
3537*67e74705SXin Li }
3538*67e74705SXin Li auto Align = CallInfo.getArgStructAlignment();
3539*67e74705SXin Li AI->setAlignment(Align.getQuantity());
3540*67e74705SXin Li AI->setUsedWithInAlloca(true);
3541*67e74705SXin Li assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3542*67e74705SXin Li ArgMemory = Address(AI, Align);
3543*67e74705SXin Li }
3544*67e74705SXin Li
3545*67e74705SXin Li // Helper function to drill into the inalloca allocation.
3546*67e74705SXin Li auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3547*67e74705SXin Li auto FieldOffset =
3548*67e74705SXin Li CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3549*67e74705SXin Li return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3550*67e74705SXin Li };
3551*67e74705SXin Li
3552*67e74705SXin Li ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3553*67e74705SXin Li SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3554*67e74705SXin Li
3555*67e74705SXin Li // If the call returns a temporary with struct return, create a temporary
3556*67e74705SXin Li // alloca to hold the result, unless one is given to us.
3557*67e74705SXin Li Address SRetPtr = Address::invalid();
3558*67e74705SXin Li size_t UnusedReturnSize = 0;
3559*67e74705SXin Li if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3560*67e74705SXin Li if (!ReturnValue.isNull()) {
3561*67e74705SXin Li SRetPtr = ReturnValue.getValue();
3562*67e74705SXin Li } else {
3563*67e74705SXin Li SRetPtr = CreateMemTemp(RetTy);
3564*67e74705SXin Li if (HaveInsertPoint() && ReturnValue.isUnused()) {
3565*67e74705SXin Li uint64_t size =
3566*67e74705SXin Li CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3567*67e74705SXin Li if (EmitLifetimeStart(size, SRetPtr.getPointer()))
3568*67e74705SXin Li UnusedReturnSize = size;
3569*67e74705SXin Li }
3570*67e74705SXin Li }
3571*67e74705SXin Li if (IRFunctionArgs.hasSRetArg()) {
3572*67e74705SXin Li IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3573*67e74705SXin Li } else if (RetAI.isInAlloca()) {
3574*67e74705SXin Li Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3575*67e74705SXin Li Builder.CreateStore(SRetPtr.getPointer(), Addr);
3576*67e74705SXin Li }
3577*67e74705SXin Li }
3578*67e74705SXin Li
3579*67e74705SXin Li Address swiftErrorTemp = Address::invalid();
3580*67e74705SXin Li Address swiftErrorArg = Address::invalid();
3581*67e74705SXin Li
3582*67e74705SXin Li assert(CallInfo.arg_size() == CallArgs.size() &&
3583*67e74705SXin Li "Mismatch between function signature & arguments.");
3584*67e74705SXin Li unsigned ArgNo = 0;
3585*67e74705SXin Li CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3586*67e74705SXin Li for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3587*67e74705SXin Li I != E; ++I, ++info_it, ++ArgNo) {
3588*67e74705SXin Li const ABIArgInfo &ArgInfo = info_it->info;
3589*67e74705SXin Li RValue RV = I->RV;
3590*67e74705SXin Li
3591*67e74705SXin Li // Insert a padding argument to ensure proper alignment.
3592*67e74705SXin Li if (IRFunctionArgs.hasPaddingArg(ArgNo))
3593*67e74705SXin Li IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3594*67e74705SXin Li llvm::UndefValue::get(ArgInfo.getPaddingType());
3595*67e74705SXin Li
3596*67e74705SXin Li unsigned FirstIRArg, NumIRArgs;
3597*67e74705SXin Li std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3598*67e74705SXin Li
3599*67e74705SXin Li switch (ArgInfo.getKind()) {
3600*67e74705SXin Li case ABIArgInfo::InAlloca: {
3601*67e74705SXin Li assert(NumIRArgs == 0);
3602*67e74705SXin Li assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3603*67e74705SXin Li if (RV.isAggregate()) {
3604*67e74705SXin Li // Replace the placeholder with the appropriate argument slot GEP.
3605*67e74705SXin Li llvm::Instruction *Placeholder =
3606*67e74705SXin Li cast<llvm::Instruction>(RV.getAggregatePointer());
3607*67e74705SXin Li CGBuilderTy::InsertPoint IP = Builder.saveIP();
3608*67e74705SXin Li Builder.SetInsertPoint(Placeholder);
3609*67e74705SXin Li Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3610*67e74705SXin Li Builder.restoreIP(IP);
3611*67e74705SXin Li deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3612*67e74705SXin Li } else {
3613*67e74705SXin Li // Store the RValue into the argument struct.
3614*67e74705SXin Li Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3615*67e74705SXin Li unsigned AS = Addr.getType()->getPointerAddressSpace();
3616*67e74705SXin Li llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3617*67e74705SXin Li // There are some cases where a trivial bitcast is not avoidable. The
3618*67e74705SXin Li // definition of a type later in a translation unit may change it's type
3619*67e74705SXin Li // from {}* to (%struct.foo*)*.
3620*67e74705SXin Li if (Addr.getType() != MemType)
3621*67e74705SXin Li Addr = Builder.CreateBitCast(Addr, MemType);
3622*67e74705SXin Li LValue argLV = MakeAddrLValue(Addr, I->Ty);
3623*67e74705SXin Li EmitInitStoreOfNonAggregate(*this, RV, argLV);
3624*67e74705SXin Li }
3625*67e74705SXin Li break;
3626*67e74705SXin Li }
3627*67e74705SXin Li
3628*67e74705SXin Li case ABIArgInfo::Indirect: {
3629*67e74705SXin Li assert(NumIRArgs == 1);
3630*67e74705SXin Li if (RV.isScalar() || RV.isComplex()) {
3631*67e74705SXin Li // Make a temporary alloca to pass the argument.
3632*67e74705SXin Li Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3633*67e74705SXin Li IRCallArgs[FirstIRArg] = Addr.getPointer();
3634*67e74705SXin Li
3635*67e74705SXin Li LValue argLV = MakeAddrLValue(Addr, I->Ty);
3636*67e74705SXin Li EmitInitStoreOfNonAggregate(*this, RV, argLV);
3637*67e74705SXin Li } else {
3638*67e74705SXin Li // We want to avoid creating an unnecessary temporary+copy here;
3639*67e74705SXin Li // however, we need one in three cases:
3640*67e74705SXin Li // 1. If the argument is not byval, and we are required to copy the
3641*67e74705SXin Li // source. (This case doesn't occur on any common architecture.)
3642*67e74705SXin Li // 2. If the argument is byval, RV is not sufficiently aligned, and
3643*67e74705SXin Li // we cannot force it to be sufficiently aligned.
3644*67e74705SXin Li // 3. If the argument is byval, but RV is located in an address space
3645*67e74705SXin Li // different than that of the argument (0).
3646*67e74705SXin Li Address Addr = RV.getAggregateAddress();
3647*67e74705SXin Li CharUnits Align = ArgInfo.getIndirectAlign();
3648*67e74705SXin Li const llvm::DataLayout *TD = &CGM.getDataLayout();
3649*67e74705SXin Li const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
3650*67e74705SXin Li const unsigned ArgAddrSpace =
3651*67e74705SXin Li (FirstIRArg < IRFuncTy->getNumParams()
3652*67e74705SXin Li ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
3653*67e74705SXin Li : 0);
3654*67e74705SXin Li if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
3655*67e74705SXin Li (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
3656*67e74705SXin Li llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
3657*67e74705SXin Li Align.getQuantity(), *TD)
3658*67e74705SXin Li < Align.getQuantity()) ||
3659*67e74705SXin Li (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
3660*67e74705SXin Li // Create an aligned temporary, and copy to it.
3661*67e74705SXin Li Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3662*67e74705SXin Li IRCallArgs[FirstIRArg] = AI.getPointer();
3663*67e74705SXin Li EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
3664*67e74705SXin Li } else {
3665*67e74705SXin Li // Skip the extra memcpy call.
3666*67e74705SXin Li IRCallArgs[FirstIRArg] = Addr.getPointer();
3667*67e74705SXin Li }
3668*67e74705SXin Li }
3669*67e74705SXin Li break;
3670*67e74705SXin Li }
3671*67e74705SXin Li
3672*67e74705SXin Li case ABIArgInfo::Ignore:
3673*67e74705SXin Li assert(NumIRArgs == 0);
3674*67e74705SXin Li break;
3675*67e74705SXin Li
3676*67e74705SXin Li case ABIArgInfo::Extend:
3677*67e74705SXin Li case ABIArgInfo::Direct: {
3678*67e74705SXin Li if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3679*67e74705SXin Li ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3680*67e74705SXin Li ArgInfo.getDirectOffset() == 0) {
3681*67e74705SXin Li assert(NumIRArgs == 1);
3682*67e74705SXin Li llvm::Value *V;
3683*67e74705SXin Li if (RV.isScalar())
3684*67e74705SXin Li V = RV.getScalarVal();
3685*67e74705SXin Li else
3686*67e74705SXin Li V = Builder.CreateLoad(RV.getAggregateAddress());
3687*67e74705SXin Li
3688*67e74705SXin Li // Implement swifterror by copying into a new swifterror argument.
3689*67e74705SXin Li // We'll write back in the normal path out of the call.
3690*67e74705SXin Li if (CallInfo.getExtParameterInfo(ArgNo).getABI()
3691*67e74705SXin Li == ParameterABI::SwiftErrorResult) {
3692*67e74705SXin Li assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
3693*67e74705SXin Li
3694*67e74705SXin Li QualType pointeeTy = I->Ty->getPointeeType();
3695*67e74705SXin Li swiftErrorArg =
3696*67e74705SXin Li Address(V, getContext().getTypeAlignInChars(pointeeTy));
3697*67e74705SXin Li
3698*67e74705SXin Li swiftErrorTemp =
3699*67e74705SXin Li CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
3700*67e74705SXin Li V = swiftErrorTemp.getPointer();
3701*67e74705SXin Li cast<llvm::AllocaInst>(V)->setSwiftError(true);
3702*67e74705SXin Li
3703*67e74705SXin Li llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
3704*67e74705SXin Li Builder.CreateStore(errorValue, swiftErrorTemp);
3705*67e74705SXin Li }
3706*67e74705SXin Li
3707*67e74705SXin Li // We might have to widen integers, but we should never truncate.
3708*67e74705SXin Li if (ArgInfo.getCoerceToType() != V->getType() &&
3709*67e74705SXin Li V->getType()->isIntegerTy())
3710*67e74705SXin Li V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
3711*67e74705SXin Li
3712*67e74705SXin Li // If the argument doesn't match, perform a bitcast to coerce it. This
3713*67e74705SXin Li // can happen due to trivial type mismatches.
3714*67e74705SXin Li if (FirstIRArg < IRFuncTy->getNumParams() &&
3715*67e74705SXin Li V->getType() != IRFuncTy->getParamType(FirstIRArg))
3716*67e74705SXin Li V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
3717*67e74705SXin Li
3718*67e74705SXin Li IRCallArgs[FirstIRArg] = V;
3719*67e74705SXin Li break;
3720*67e74705SXin Li }
3721*67e74705SXin Li
3722*67e74705SXin Li // FIXME: Avoid the conversion through memory if possible.
3723*67e74705SXin Li Address Src = Address::invalid();
3724*67e74705SXin Li if (RV.isScalar() || RV.isComplex()) {
3725*67e74705SXin Li Src = CreateMemTemp(I->Ty, "coerce");
3726*67e74705SXin Li LValue SrcLV = MakeAddrLValue(Src, I->Ty);
3727*67e74705SXin Li EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
3728*67e74705SXin Li } else {
3729*67e74705SXin Li Src = RV.getAggregateAddress();
3730*67e74705SXin Li }
3731*67e74705SXin Li
3732*67e74705SXin Li // If the value is offset in memory, apply the offset now.
3733*67e74705SXin Li Src = emitAddressAtOffset(*this, Src, ArgInfo);
3734*67e74705SXin Li
3735*67e74705SXin Li // Fast-isel and the optimizer generally like scalar values better than
3736*67e74705SXin Li // FCAs, so we flatten them if this is safe to do for this argument.
3737*67e74705SXin Li llvm::StructType *STy =
3738*67e74705SXin Li dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
3739*67e74705SXin Li if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
3740*67e74705SXin Li llvm::Type *SrcTy = Src.getType()->getElementType();
3741*67e74705SXin Li uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
3742*67e74705SXin Li uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
3743*67e74705SXin Li
3744*67e74705SXin Li // If the source type is smaller than the destination type of the
3745*67e74705SXin Li // coerce-to logic, copy the source value into a temp alloca the size
3746*67e74705SXin Li // of the destination type to allow loading all of it. The bits past
3747*67e74705SXin Li // the source value are left undef.
3748*67e74705SXin Li if (SrcSize < DstSize) {
3749*67e74705SXin Li Address TempAlloca
3750*67e74705SXin Li = CreateTempAlloca(STy, Src.getAlignment(),
3751*67e74705SXin Li Src.getName() + ".coerce");
3752*67e74705SXin Li Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
3753*67e74705SXin Li Src = TempAlloca;
3754*67e74705SXin Li } else {
3755*67e74705SXin Li Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy));
3756*67e74705SXin Li }
3757*67e74705SXin Li
3758*67e74705SXin Li auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
3759*67e74705SXin Li assert(NumIRArgs == STy->getNumElements());
3760*67e74705SXin Li for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3761*67e74705SXin Li auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
3762*67e74705SXin Li Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
3763*67e74705SXin Li llvm::Value *LI = Builder.CreateLoad(EltPtr);
3764*67e74705SXin Li IRCallArgs[FirstIRArg + i] = LI;
3765*67e74705SXin Li }
3766*67e74705SXin Li } else {
3767*67e74705SXin Li // In the simple case, just pass the coerced loaded value.
3768*67e74705SXin Li assert(NumIRArgs == 1);
3769*67e74705SXin Li IRCallArgs[FirstIRArg] =
3770*67e74705SXin Li CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
3771*67e74705SXin Li }
3772*67e74705SXin Li
3773*67e74705SXin Li break;
3774*67e74705SXin Li }
3775*67e74705SXin Li
3776*67e74705SXin Li case ABIArgInfo::CoerceAndExpand: {
3777*67e74705SXin Li auto coercionType = ArgInfo.getCoerceAndExpandType();
3778*67e74705SXin Li auto layout = CGM.getDataLayout().getStructLayout(coercionType);
3779*67e74705SXin Li
3780*67e74705SXin Li llvm::Value *tempSize = nullptr;
3781*67e74705SXin Li Address addr = Address::invalid();
3782*67e74705SXin Li if (RV.isAggregate()) {
3783*67e74705SXin Li addr = RV.getAggregateAddress();
3784*67e74705SXin Li } else {
3785*67e74705SXin Li assert(RV.isScalar()); // complex should always just be direct
3786*67e74705SXin Li
3787*67e74705SXin Li llvm::Type *scalarType = RV.getScalarVal()->getType();
3788*67e74705SXin Li auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
3789*67e74705SXin Li auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
3790*67e74705SXin Li
3791*67e74705SXin Li tempSize = llvm::ConstantInt::get(CGM.Int64Ty, scalarSize);
3792*67e74705SXin Li
3793*67e74705SXin Li // Materialize to a temporary.
3794*67e74705SXin Li addr = CreateTempAlloca(RV.getScalarVal()->getType(),
3795*67e74705SXin Li CharUnits::fromQuantity(std::max(layout->getAlignment(),
3796*67e74705SXin Li scalarAlign)));
3797*67e74705SXin Li EmitLifetimeStart(scalarSize, addr.getPointer());
3798*67e74705SXin Li
3799*67e74705SXin Li Builder.CreateStore(RV.getScalarVal(), addr);
3800*67e74705SXin Li }
3801*67e74705SXin Li
3802*67e74705SXin Li addr = Builder.CreateElementBitCast(addr, coercionType);
3803*67e74705SXin Li
3804*67e74705SXin Li unsigned IRArgPos = FirstIRArg;
3805*67e74705SXin Li for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3806*67e74705SXin Li llvm::Type *eltType = coercionType->getElementType(i);
3807*67e74705SXin Li if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
3808*67e74705SXin Li Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
3809*67e74705SXin Li llvm::Value *elt = Builder.CreateLoad(eltAddr);
3810*67e74705SXin Li IRCallArgs[IRArgPos++] = elt;
3811*67e74705SXin Li }
3812*67e74705SXin Li assert(IRArgPos == FirstIRArg + NumIRArgs);
3813*67e74705SXin Li
3814*67e74705SXin Li if (tempSize) {
3815*67e74705SXin Li EmitLifetimeEnd(tempSize, addr.getPointer());
3816*67e74705SXin Li }
3817*67e74705SXin Li
3818*67e74705SXin Li break;
3819*67e74705SXin Li }
3820*67e74705SXin Li
3821*67e74705SXin Li case ABIArgInfo::Expand:
3822*67e74705SXin Li unsigned IRArgPos = FirstIRArg;
3823*67e74705SXin Li ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
3824*67e74705SXin Li assert(IRArgPos == FirstIRArg + NumIRArgs);
3825*67e74705SXin Li break;
3826*67e74705SXin Li }
3827*67e74705SXin Li }
3828*67e74705SXin Li
3829*67e74705SXin Li if (ArgMemory.isValid()) {
3830*67e74705SXin Li llvm::Value *Arg = ArgMemory.getPointer();
3831*67e74705SXin Li if (CallInfo.isVariadic()) {
3832*67e74705SXin Li // When passing non-POD arguments by value to variadic functions, we will
3833*67e74705SXin Li // end up with a variadic prototype and an inalloca call site. In such
3834*67e74705SXin Li // cases, we can't do any parameter mismatch checks. Give up and bitcast
3835*67e74705SXin Li // the callee.
3836*67e74705SXin Li unsigned CalleeAS =
3837*67e74705SXin Li cast<llvm::PointerType>(Callee->getType())->getAddressSpace();
3838*67e74705SXin Li Callee = Builder.CreateBitCast(
3839*67e74705SXin Li Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS));
3840*67e74705SXin Li } else {
3841*67e74705SXin Li llvm::Type *LastParamTy =
3842*67e74705SXin Li IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
3843*67e74705SXin Li if (Arg->getType() != LastParamTy) {
3844*67e74705SXin Li #ifndef NDEBUG
3845*67e74705SXin Li // Assert that these structs have equivalent element types.
3846*67e74705SXin Li llvm::StructType *FullTy = CallInfo.getArgStruct();
3847*67e74705SXin Li llvm::StructType *DeclaredTy = cast<llvm::StructType>(
3848*67e74705SXin Li cast<llvm::PointerType>(LastParamTy)->getElementType());
3849*67e74705SXin Li assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
3850*67e74705SXin Li for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
3851*67e74705SXin Li DE = DeclaredTy->element_end(),
3852*67e74705SXin Li FI = FullTy->element_begin();
3853*67e74705SXin Li DI != DE; ++DI, ++FI)
3854*67e74705SXin Li assert(*DI == *FI);
3855*67e74705SXin Li #endif
3856*67e74705SXin Li Arg = Builder.CreateBitCast(Arg, LastParamTy);
3857*67e74705SXin Li }
3858*67e74705SXin Li }
3859*67e74705SXin Li assert(IRFunctionArgs.hasInallocaArg());
3860*67e74705SXin Li IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
3861*67e74705SXin Li }
3862*67e74705SXin Li
3863*67e74705SXin Li if (!CallArgs.getCleanupsToDeactivate().empty())
3864*67e74705SXin Li deactivateArgCleanupsBeforeCall(*this, CallArgs);
3865*67e74705SXin Li
3866*67e74705SXin Li // If the callee is a bitcast of a function to a varargs pointer to function
3867*67e74705SXin Li // type, check to see if we can remove the bitcast. This handles some cases
3868*67e74705SXin Li // with unprototyped functions.
3869*67e74705SXin Li if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
3870*67e74705SXin Li if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
3871*67e74705SXin Li llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
3872*67e74705SXin Li llvm::FunctionType *CurFT =
3873*67e74705SXin Li cast<llvm::FunctionType>(CurPT->getElementType());
3874*67e74705SXin Li llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
3875*67e74705SXin Li
3876*67e74705SXin Li if (CE->getOpcode() == llvm::Instruction::BitCast &&
3877*67e74705SXin Li ActualFT->getReturnType() == CurFT->getReturnType() &&
3878*67e74705SXin Li ActualFT->getNumParams() == CurFT->getNumParams() &&
3879*67e74705SXin Li ActualFT->getNumParams() == IRCallArgs.size() &&
3880*67e74705SXin Li (CurFT->isVarArg() || !ActualFT->isVarArg())) {
3881*67e74705SXin Li bool ArgsMatch = true;
3882*67e74705SXin Li for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
3883*67e74705SXin Li if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
3884*67e74705SXin Li ArgsMatch = false;
3885*67e74705SXin Li break;
3886*67e74705SXin Li }
3887*67e74705SXin Li
3888*67e74705SXin Li // Strip the cast if we can get away with it. This is a nice cleanup,
3889*67e74705SXin Li // but also allows us to inline the function at -O0 if it is marked
3890*67e74705SXin Li // always_inline.
3891*67e74705SXin Li if (ArgsMatch)
3892*67e74705SXin Li Callee = CalleeF;
3893*67e74705SXin Li }
3894*67e74705SXin Li }
3895*67e74705SXin Li
3896*67e74705SXin Li assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
3897*67e74705SXin Li for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
3898*67e74705SXin Li // Inalloca argument can have different type.
3899*67e74705SXin Li if (IRFunctionArgs.hasInallocaArg() &&
3900*67e74705SXin Li i == IRFunctionArgs.getInallocaArgNo())
3901*67e74705SXin Li continue;
3902*67e74705SXin Li if (i < IRFuncTy->getNumParams())
3903*67e74705SXin Li assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
3904*67e74705SXin Li }
3905*67e74705SXin Li
3906*67e74705SXin Li unsigned CallingConv;
3907*67e74705SXin Li CodeGen::AttributeListType AttributeList;
3908*67e74705SXin Li CGM.ConstructAttributeList(Callee->getName(), CallInfo, CalleeInfo,
3909*67e74705SXin Li AttributeList, CallingConv,
3910*67e74705SXin Li /*AttrOnCallSite=*/true);
3911*67e74705SXin Li llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
3912*67e74705SXin Li AttributeList);
3913*67e74705SXin Li
3914*67e74705SXin Li bool CannotThrow;
3915*67e74705SXin Li if (currentFunctionUsesSEHTry()) {
3916*67e74705SXin Li // SEH cares about asynchronous exceptions, everything can "throw."
3917*67e74705SXin Li CannotThrow = false;
3918*67e74705SXin Li } else if (isCleanupPadScope() &&
3919*67e74705SXin Li EHPersonality::get(*this).isMSVCXXPersonality()) {
3920*67e74705SXin Li // The MSVC++ personality will implicitly terminate the program if an
3921*67e74705SXin Li // exception is thrown. An unwind edge cannot be reached.
3922*67e74705SXin Li CannotThrow = true;
3923*67e74705SXin Li } else {
3924*67e74705SXin Li // Otherwise, nowunind callsites will never throw.
3925*67e74705SXin Li CannotThrow = Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
3926*67e74705SXin Li llvm::Attribute::NoUnwind);
3927*67e74705SXin Li }
3928*67e74705SXin Li llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
3929*67e74705SXin Li
3930*67e74705SXin Li SmallVector<llvm::OperandBundleDef, 1> BundleList;
3931*67e74705SXin Li getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
3932*67e74705SXin Li
3933*67e74705SXin Li llvm::CallSite CS;
3934*67e74705SXin Li if (!InvokeDest) {
3935*67e74705SXin Li CS = Builder.CreateCall(Callee, IRCallArgs, BundleList);
3936*67e74705SXin Li } else {
3937*67e74705SXin Li llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
3938*67e74705SXin Li CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs,
3939*67e74705SXin Li BundleList);
3940*67e74705SXin Li EmitBlock(Cont);
3941*67e74705SXin Li }
3942*67e74705SXin Li if (callOrInvoke)
3943*67e74705SXin Li *callOrInvoke = CS.getInstruction();
3944*67e74705SXin Li
3945*67e74705SXin Li if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
3946*67e74705SXin Li !CS.hasFnAttr(llvm::Attribute::NoInline))
3947*67e74705SXin Li Attrs =
3948*67e74705SXin Li Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3949*67e74705SXin Li llvm::Attribute::AlwaysInline);
3950*67e74705SXin Li
3951*67e74705SXin Li // Disable inlining inside SEH __try blocks.
3952*67e74705SXin Li if (isSEHTryScope())
3953*67e74705SXin Li Attrs =
3954*67e74705SXin Li Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3955*67e74705SXin Li llvm::Attribute::NoInline);
3956*67e74705SXin Li
3957*67e74705SXin Li CS.setAttributes(Attrs);
3958*67e74705SXin Li CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
3959*67e74705SXin Li
3960*67e74705SXin Li // Insert instrumentation or attach profile metadata at indirect call sites.
3961*67e74705SXin Li // For more details, see the comment before the definition of
3962*67e74705SXin Li // IPVK_IndirectCallTarget in InstrProfData.inc.
3963*67e74705SXin Li if (!CS.getCalledFunction())
3964*67e74705SXin Li PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
3965*67e74705SXin Li CS.getInstruction(), Callee);
3966*67e74705SXin Li
3967*67e74705SXin Li // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3968*67e74705SXin Li // optimizer it can aggressively ignore unwind edges.
3969*67e74705SXin Li if (CGM.getLangOpts().ObjCAutoRefCount)
3970*67e74705SXin Li AddObjCARCExceptionMetadata(CS.getInstruction());
3971*67e74705SXin Li
3972*67e74705SXin Li // If the call doesn't return, finish the basic block and clear the
3973*67e74705SXin Li // insertion point; this allows the rest of IRgen to discard
3974*67e74705SXin Li // unreachable code.
3975*67e74705SXin Li if (CS.doesNotReturn()) {
3976*67e74705SXin Li if (UnusedReturnSize)
3977*67e74705SXin Li EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
3978*67e74705SXin Li SRetPtr.getPointer());
3979*67e74705SXin Li
3980*67e74705SXin Li Builder.CreateUnreachable();
3981*67e74705SXin Li Builder.ClearInsertionPoint();
3982*67e74705SXin Li
3983*67e74705SXin Li // FIXME: For now, emit a dummy basic block because expr emitters in
3984*67e74705SXin Li // generally are not ready to handle emitting expressions at unreachable
3985*67e74705SXin Li // points.
3986*67e74705SXin Li EnsureInsertPoint();
3987*67e74705SXin Li
3988*67e74705SXin Li // Return a reasonable RValue.
3989*67e74705SXin Li return GetUndefRValue(RetTy);
3990*67e74705SXin Li }
3991*67e74705SXin Li
3992*67e74705SXin Li llvm::Instruction *CI = CS.getInstruction();
3993*67e74705SXin Li if (!CI->getType()->isVoidTy())
3994*67e74705SXin Li CI->setName("call");
3995*67e74705SXin Li
3996*67e74705SXin Li // Perform the swifterror writeback.
3997*67e74705SXin Li if (swiftErrorTemp.isValid()) {
3998*67e74705SXin Li llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
3999*67e74705SXin Li Builder.CreateStore(errorResult, swiftErrorArg);
4000*67e74705SXin Li }
4001*67e74705SXin Li
4002*67e74705SXin Li // Emit any writebacks immediately. Arguably this should happen
4003*67e74705SXin Li // after any return-value munging.
4004*67e74705SXin Li if (CallArgs.hasWritebacks())
4005*67e74705SXin Li emitWritebacks(*this, CallArgs);
4006*67e74705SXin Li
4007*67e74705SXin Li // The stack cleanup for inalloca arguments has to run out of the normal
4008*67e74705SXin Li // lexical order, so deactivate it and run it manually here.
4009*67e74705SXin Li CallArgs.freeArgumentMemory(*this);
4010*67e74705SXin Li
4011*67e74705SXin Li if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4012*67e74705SXin Li const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
4013*67e74705SXin Li if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4014*67e74705SXin Li Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4015*67e74705SXin Li }
4016*67e74705SXin Li
4017*67e74705SXin Li RValue Ret = [&] {
4018*67e74705SXin Li switch (RetAI.getKind()) {
4019*67e74705SXin Li case ABIArgInfo::CoerceAndExpand: {
4020*67e74705SXin Li auto coercionType = RetAI.getCoerceAndExpandType();
4021*67e74705SXin Li auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4022*67e74705SXin Li
4023*67e74705SXin Li Address addr = SRetPtr;
4024*67e74705SXin Li addr = Builder.CreateElementBitCast(addr, coercionType);
4025*67e74705SXin Li
4026*67e74705SXin Li assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4027*67e74705SXin Li bool requiresExtract = isa<llvm::StructType>(CI->getType());
4028*67e74705SXin Li
4029*67e74705SXin Li unsigned unpaddedIndex = 0;
4030*67e74705SXin Li for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4031*67e74705SXin Li llvm::Type *eltType = coercionType->getElementType(i);
4032*67e74705SXin Li if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4033*67e74705SXin Li Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4034*67e74705SXin Li llvm::Value *elt = CI;
4035*67e74705SXin Li if (requiresExtract)
4036*67e74705SXin Li elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4037*67e74705SXin Li else
4038*67e74705SXin Li assert(unpaddedIndex == 0);
4039*67e74705SXin Li Builder.CreateStore(elt, eltAddr);
4040*67e74705SXin Li }
4041*67e74705SXin Li // FALLTHROUGH
4042*67e74705SXin Li }
4043*67e74705SXin Li
4044*67e74705SXin Li case ABIArgInfo::InAlloca:
4045*67e74705SXin Li case ABIArgInfo::Indirect: {
4046*67e74705SXin Li RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4047*67e74705SXin Li if (UnusedReturnSize)
4048*67e74705SXin Li EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
4049*67e74705SXin Li SRetPtr.getPointer());
4050*67e74705SXin Li return ret;
4051*67e74705SXin Li }
4052*67e74705SXin Li
4053*67e74705SXin Li case ABIArgInfo::Ignore:
4054*67e74705SXin Li // If we are ignoring an argument that had a result, make sure to
4055*67e74705SXin Li // construct the appropriate return value for our caller.
4056*67e74705SXin Li return GetUndefRValue(RetTy);
4057*67e74705SXin Li
4058*67e74705SXin Li case ABIArgInfo::Extend:
4059*67e74705SXin Li case ABIArgInfo::Direct: {
4060*67e74705SXin Li llvm::Type *RetIRTy = ConvertType(RetTy);
4061*67e74705SXin Li if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4062*67e74705SXin Li switch (getEvaluationKind(RetTy)) {
4063*67e74705SXin Li case TEK_Complex: {
4064*67e74705SXin Li llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4065*67e74705SXin Li llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4066*67e74705SXin Li return RValue::getComplex(std::make_pair(Real, Imag));
4067*67e74705SXin Li }
4068*67e74705SXin Li case TEK_Aggregate: {
4069*67e74705SXin Li Address DestPtr = ReturnValue.getValue();
4070*67e74705SXin Li bool DestIsVolatile = ReturnValue.isVolatile();
4071*67e74705SXin Li
4072*67e74705SXin Li if (!DestPtr.isValid()) {
4073*67e74705SXin Li DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4074*67e74705SXin Li DestIsVolatile = false;
4075*67e74705SXin Li }
4076*67e74705SXin Li BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4077*67e74705SXin Li return RValue::getAggregate(DestPtr);
4078*67e74705SXin Li }
4079*67e74705SXin Li case TEK_Scalar: {
4080*67e74705SXin Li // If the argument doesn't match, perform a bitcast to coerce it. This
4081*67e74705SXin Li // can happen due to trivial type mismatches.
4082*67e74705SXin Li llvm::Value *V = CI;
4083*67e74705SXin Li if (V->getType() != RetIRTy)
4084*67e74705SXin Li V = Builder.CreateBitCast(V, RetIRTy);
4085*67e74705SXin Li return RValue::get(V);
4086*67e74705SXin Li }
4087*67e74705SXin Li }
4088*67e74705SXin Li llvm_unreachable("bad evaluation kind");
4089*67e74705SXin Li }
4090*67e74705SXin Li
4091*67e74705SXin Li Address DestPtr = ReturnValue.getValue();
4092*67e74705SXin Li bool DestIsVolatile = ReturnValue.isVolatile();
4093*67e74705SXin Li
4094*67e74705SXin Li if (!DestPtr.isValid()) {
4095*67e74705SXin Li DestPtr = CreateMemTemp(RetTy, "coerce");
4096*67e74705SXin Li DestIsVolatile = false;
4097*67e74705SXin Li }
4098*67e74705SXin Li
4099*67e74705SXin Li // If the value is offset in memory, apply the offset now.
4100*67e74705SXin Li Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4101*67e74705SXin Li CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4102*67e74705SXin Li
4103*67e74705SXin Li return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4104*67e74705SXin Li }
4105*67e74705SXin Li
4106*67e74705SXin Li case ABIArgInfo::Expand:
4107*67e74705SXin Li llvm_unreachable("Invalid ABI kind for return argument");
4108*67e74705SXin Li }
4109*67e74705SXin Li
4110*67e74705SXin Li llvm_unreachable("Unhandled ABIArgInfo::Kind");
4111*67e74705SXin Li } ();
4112*67e74705SXin Li
4113*67e74705SXin Li const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
4114*67e74705SXin Li
4115*67e74705SXin Li if (Ret.isScalar() && TargetDecl) {
4116*67e74705SXin Li if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4117*67e74705SXin Li llvm::Value *OffsetValue = nullptr;
4118*67e74705SXin Li if (const auto *Offset = AA->getOffset())
4119*67e74705SXin Li OffsetValue = EmitScalarExpr(Offset);
4120*67e74705SXin Li
4121*67e74705SXin Li llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4122*67e74705SXin Li llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4123*67e74705SXin Li EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
4124*67e74705SXin Li OffsetValue);
4125*67e74705SXin Li }
4126*67e74705SXin Li }
4127*67e74705SXin Li
4128*67e74705SXin Li return Ret;
4129*67e74705SXin Li }
4130*67e74705SXin Li
4131*67e74705SXin Li /* VarArg handling */
4132*67e74705SXin Li
EmitVAArg(VAArgExpr * VE,Address & VAListAddr)4133*67e74705SXin Li Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
4134*67e74705SXin Li VAListAddr = VE->isMicrosoftABI()
4135*67e74705SXin Li ? EmitMSVAListRef(VE->getSubExpr())
4136*67e74705SXin Li : EmitVAListRef(VE->getSubExpr());
4137*67e74705SXin Li QualType Ty = VE->getType();
4138*67e74705SXin Li if (VE->isMicrosoftABI())
4139*67e74705SXin Li return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4140*67e74705SXin Li return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4141*67e74705SXin Li }
4142