diff options
author | Chris Lattner <sabre@nondot.org> | 2011-07-09 17:41:47 +0000 |
---|---|---|
committer | Chris Lattner <sabre@nondot.org> | 2011-07-09 17:41:47 +0000 |
commit | 9cbe4f0ba01ec304e1e3d071c071f7bca33631c0 (patch) | |
tree | f89ae9b252a8af0e0a2cbdfbddc230e3183ee8ee | |
parent | 0850e8d1b093cfe1fc2fdf533a0e264ef9d5412e (diff) |
clang side to match the LLVM IR type system rewrite patch.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@134831 91177308-0d34-0410-b5e6-96231b3b80d8
83 files changed, 935 insertions, 1050 deletions
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h index ce1039849b..138123816c 100644 --- a/lib/CodeGen/ABIInfo.h +++ b/lib/CodeGen/ABIInfo.h @@ -68,22 +68,22 @@ namespace clang { private: Kind TheKind; - llvm::PATypeHolder TypeData; + llvm::Type *TypeData; unsigned UIntData; bool BoolData0; bool BoolData1; - ABIArgInfo(Kind K, const llvm::Type *TD=0, + ABIArgInfo(Kind K, llvm::Type *TD=0, unsigned UI=0, bool B0 = false, bool B1 = false) : TheKind(K), TypeData(TD), UIntData(UI), BoolData0(B0), BoolData1(B1) {} public: ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {} - static ABIArgInfo getDirect(const llvm::Type *T = 0, unsigned Offset = 0) { + static ABIArgInfo getDirect(llvm::Type *T = 0, unsigned Offset = 0) { return ABIArgInfo(Direct, T, Offset); } - static ABIArgInfo getExtend(const llvm::Type *T = 0) { + static ABIArgInfo getExtend(llvm::Type *T = 0) { return ABIArgInfo(Extend, T, 0); } static ABIArgInfo getIgnore() { @@ -113,12 +113,12 @@ namespace clang { assert((isDirect() || isExtend()) && "Not a direct or extend kind"); return UIntData; } - const llvm::Type *getCoerceToType() const { + llvm::Type *getCoerceToType() const { assert(canHaveCoerceToType() && "Invalid kind!"); return TypeData; } - void setCoerceToType(const llvm::Type *T) { + void setCoerceToType(llvm::Type *T) { assert(canHaveCoerceToType() && "Invalid kind!"); TypeData = T; } diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp index a0a8d667df..020cbdf60e 100644 --- a/lib/CodeGen/CGBlocks.cpp +++ b/lib/CodeGen/CGBlocks.cpp @@ -654,11 +654,11 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) { } -const llvm::Type *CodeGenModule::getBlockDescriptorType() { +llvm::Type *CodeGenModule::getBlockDescriptorType() { if (BlockDescriptorType) return BlockDescriptorType; - const llvm::Type *UnsignedLongTy = + llvm::Type *UnsignedLongTy = getTypes().ConvertType(getContext().UnsignedLongTy); // struct __block_descriptor { @@ -676,21 +676,19 @@ const llvm::Type *CodeGenModule::getBlockDescriptorType() { // const char *layout; // reserved // }; BlockDescriptorType = - llvm::StructType::get(UnsignedLongTy, UnsignedLongTy, NULL); - - getModule().addTypeName("struct.__block_descriptor", - BlockDescriptorType); + llvm::StructType::createNamed("struct.__block_descriptor", + UnsignedLongTy, UnsignedLongTy, NULL); // Now form a pointer to that. BlockDescriptorType = llvm::PointerType::getUnqual(BlockDescriptorType); return BlockDescriptorType; } -const llvm::Type *CodeGenModule::getGenericBlockLiteralType() { +llvm::Type *CodeGenModule::getGenericBlockLiteralType() { if (GenericBlockLiteralType) return GenericBlockLiteralType; - const llvm::Type *BlockDescPtrTy = getBlockDescriptorType(); + llvm::Type *BlockDescPtrTy = getBlockDescriptorType(); // struct __block_literal_generic { // void *__isa; @@ -699,15 +697,14 @@ const llvm::Type *CodeGenModule::getGenericBlockLiteralType() { // void (*__invoke)(void *); // struct __block_descriptor *__descriptor; // }; - GenericBlockLiteralType = llvm::StructType::get(VoidPtrTy, - IntTy, - IntTy, - VoidPtrTy, - BlockDescPtrTy, - NULL); - - getModule().addTypeName("struct.__block_literal_generic", - GenericBlockLiteralType); + GenericBlockLiteralType = + llvm::StructType::createNamed("struct.__block_literal_generic", + VoidPtrTy, + IntTy, + IntTy, + VoidPtrTy, + BlockDescPtrTy, + NULL); return GenericBlockLiteralType; } @@ -1663,15 +1660,17 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) { QualType Ty = D->getType(); - llvm::SmallVector<const llvm::Type *, 8> types; + llvm::SmallVector<llvm::Type *, 8> types; - llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(getLLVMContext()); + llvm::StructType *ByRefType = + llvm::StructType::createNamed(getLLVMContext(), + "struct.__block_byref_" + D->getNameAsString()); // void *__isa; types.push_back(Int8PtrTy); // void *__forwarding; - types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder)); + types.push_back(llvm::PointerType::getUnqual(ByRefType)); // int32_t __flags; types.push_back(Int32Ty); @@ -1706,7 +1705,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) { unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes; if (NumPaddingBytes > 0) { - const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext()); + llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext()); // FIXME: We need a sema error for alignment larger than the minimum of // the maximal stack alignment and the alignment of malloc on the system. if (NumPaddingBytes > 1) @@ -1722,13 +1721,9 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) { // T x; types.push_back(ConvertTypeForMem(Ty)); - const llvm::Type *T = llvm::StructType::get(getLLVMContext(), types, Packed); - - cast<llvm::OpaqueType>(ByRefTypeHolder.get())->refineAbstractTypeTo(T); - CGM.getModule().addTypeName("struct.__block_byref_" + D->getNameAsString(), - ByRefTypeHolder.get()); + ByRefType->setBody(types, Packed); - Info.first = ByRefTypeHolder.get(); + Info.first = ByRefType; Info.second = types.size() - 1; diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 8f29663613..3246cfc34e 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -95,12 +95,12 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF, unsigned AddrSpace = cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); - const llvm::IntegerType *IntType = + llvm::IntegerType *IntType = llvm::IntegerType::get(CGF.getLLVMContext(), CGF.getContext().getTypeSize(T)); - const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); + llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); - const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; + llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2); llvm::Value *Args[2]; @@ -130,12 +130,12 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, unsigned AddrSpace = cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); - const llvm::IntegerType *IntType = + llvm::IntegerType *IntType = llvm::IntegerType::get(CGF.getLLVMContext(), CGF.getContext().getTypeSize(T)); - const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); + llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); - const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; + llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2); llvm::Value *Args[2]; @@ -165,7 +165,8 @@ static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) { } // The prototype is something that takes and returns whatever V's type is. - llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(), + llvm::Type *ArgTys[] = { V->getType() }; + llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), ArgTys, false); llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName); @@ -233,7 +234,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, case Builtin::BI__builtin_ctzll: { Value *ArgValue = EmitScalarExpr(E->getArg(0)); - const llvm::Type *ArgType = ArgValue->getType(); + llvm::Type *ArgType = ArgValue->getType(); Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1); const llvm::Type *ResultType = ConvertType(E->getType()); @@ -248,7 +249,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, case Builtin::BI__builtin_clzll: { Value *ArgValue = EmitScalarExpr(E->getArg(0)); - const llvm::Type *ArgType = ArgValue->getType(); + llvm::Type *ArgType = ArgValue->getType(); Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1); const llvm::Type *ResultType = ConvertType(E->getType()); @@ -264,7 +265,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, // ffs(x) -> x ? cttz(x) + 1 : 0 Value *ArgValue = EmitScalarExpr(E->getArg(0)); - const llvm::Type *ArgType = ArgValue->getType(); + llvm::Type *ArgType = ArgValue->getType(); Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1); const llvm::Type *ResultType = ConvertType(E->getType()); @@ -284,7 +285,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, // parity(x) -> ctpop(x) & 1 Value *ArgValue = EmitScalarExpr(E->getArg(0)); - const llvm::Type *ArgType = ArgValue->getType(); + llvm::Type *ArgType = ArgValue->getType(); Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1); const llvm::Type *ResultType = ConvertType(E->getType()); @@ -301,7 +302,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, case Builtin::BI__builtin_popcountll: { Value *ArgValue = EmitScalarExpr(E->getArg(0)); - const llvm::Type *ArgType = ArgValue->getType(); + llvm::Type *ArgType = ArgValue->getType(); Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1); const llvm::Type *ResultType = ConvertType(E->getType()); @@ -313,7 +314,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, } case Builtin::BI__builtin_expect: { Value *ArgValue = EmitScalarExpr(E->getArg(0)); - const llvm::Type *ArgType = ArgValue->getType(); + llvm::Type *ArgType = ArgValue->getType(); Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, &ArgType, 1); Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); @@ -321,19 +322,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Value *Result = Builder.CreateCall2(FnExpect, ArgValue, ExpectedValue, "expval"); return RValue::get(Result); - } case Builtin::BI__builtin_bswap32: case Builtin::BI__builtin_bswap64: { Value *ArgValue = EmitScalarExpr(E->getArg(0)); - const llvm::Type *ArgType = ArgValue->getType(); + llvm::Type *ArgType = ArgValue->getType(); Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1); return RValue::get(Builder.CreateCall(F, ArgValue, "tmp")); } case Builtin::BI__builtin_object_size: { // We pass this builtin onto the optimizer so that it can // figure out the object size in more complex cases. - const llvm::Type *ResType[] = { + llvm::Type *ResType[] = { ConvertType(E->getType()) }; @@ -382,7 +382,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, case Builtin::BI__builtin_powil: { Value *Base = EmitScalarExpr(E->getArg(0)); Value *Exponent = EmitScalarExpr(E->getArg(1)); - const llvm::Type *ArgType = Base->getType(); + llvm::Type *ArgType = Base->getType(); Value *F = CGM.getIntrinsic(Intrinsic::powi, &ArgType, 1); return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp")); } @@ -867,11 +867,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, unsigned AddrSpace = cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); - const llvm::IntegerType *IntType = + llvm::IntegerType *IntType = llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(T)); - const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); - const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; + llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); + llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, IntrinsicTypes, 2); @@ -897,11 +897,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, unsigned AddrSpace = cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); - const llvm::IntegerType *IntType = + llvm::IntegerType *IntType = llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(T)); - const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); - const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; + llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); + llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, IntrinsicTypes, 2); @@ -984,7 +984,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, break; Value *Base = EmitScalarExpr(E->getArg(0)); Value *Exponent = EmitScalarExpr(E->getArg(1)); - const llvm::Type *ArgType = Base->getType(); + llvm::Type *ArgType = Base->getType(); Value *F = CGM.getIntrinsic(Intrinsic::pow, &ArgType, 1); return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp")); } @@ -997,7 +997,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, case Builtin::BI__builtin_fmal: { // Rewrite fma to intrinsic. Value *FirstArg = EmitScalarExpr(E->getArg(0)); - const llvm::Type *ArgType = FirstArg->getType(); + llvm::Type *ArgType = FirstArg->getType(); Value *F = CGM.getIntrinsic(Intrinsic::fma, &ArgType, 1); return RValue::get(Builder.CreateCall3(F, FirstArg, EmitScalarExpr(E->getArg(1)), @@ -1122,8 +1122,7 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, } } -static const llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, - bool q) { +static llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, bool q) { switch (type) { default: break; case 0: @@ -1254,7 +1253,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f || BuiltinID == ARM::BI__builtin_arm_vcvtr_d) { // Determine the overloaded type of this builtin. - const llvm::Type *Ty; + llvm::Type *Ty; if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f) Ty = llvm::Type::getFloatTy(getLLVMContext()); else @@ -1277,8 +1276,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, (void)poly; // Only used in assert()s. bool rightShift = false; - const llvm::VectorType *VTy = GetNeonType(getLLVMContext(), type & 0x7, quad); - const llvm::Type *Ty = VTy; + llvm::VectorType *VTy = GetNeonType(getLLVMContext(), type & 0x7, quad); + llvm::Type *Ty = VTy; if (!Ty) return 0; @@ -1362,7 +1361,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, } case ARM::BI__builtin_neon_vcvt_n_f32_v: case ARM::BI__builtin_neon_vcvtq_n_f32_v: { - const llvm::Type *Tys[2] = { GetNeonType(getLLVMContext(), 4, quad), Ty }; + llvm::Type *Tys[2] = { GetNeonType(getLLVMContext(), 4, quad), Ty }; Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp : Intrinsic::arm_neon_vcvtfxs2fp; Function *F = CGM.getIntrinsic(Int, Tys, 2); return EmitNeonCall(F, Ops, "vcvt_n"); @@ -1371,7 +1370,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, case ARM::BI__builtin_neon_vcvt_n_u32_v: case ARM::BI__builtin_neon_vcvtq_n_s32_v: case ARM::BI__builtin_neon_vcvtq_n_u32_v: { - const llvm::Type *Tys[2] = { Ty, GetNeonType(getLLVMContext(), 4, quad) }; + llvm::Type *Tys[2] = { Ty, GetNeonType(getLLVMContext(), 4, quad) }; Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu : Intrinsic::arm_neon_vcvtfp2fxs; Function *F = CGM.getIntrinsic(Int, Tys, 2); return EmitNeonCall(F, Ops, "vcvt_n"); @@ -1589,9 +1588,9 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); const llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2); - const llvm::Type *NarrowTy = + llvm::Type *NarrowTy = llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); - const llvm::Type *Tys[2] = { Ty, NarrowTy }; + llvm::Type *Tys[2] = { Ty, NarrowTy }; return EmitNeonCall(CGM.getIntrinsic(Int, Tys, 2), Ops, "vpadal"); } case ARM::BI__builtin_neon_vpadd_v: @@ -1603,9 +1602,9 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, // The source operand type has twice as many elements of half the size. unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); const llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2); - const llvm::Type *NarrowTy = + llvm::Type *NarrowTy = llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); - const llvm::Type *Tys[2] = { Ty, NarrowTy }; + llvm::Type *Tys[2] = { Ty, NarrowTy }; return EmitNeonCall(CGM.getIntrinsic(Int, Tys, 2), Ops, "vpaddl"); } case ARM::BI__builtin_neon_vpmax_v: diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp index 92f1c63a38..dcc28b45cf 100644 --- a/lib/CodeGen/CGCXXABI.cpp +++ b/lib/CodeGen/CGCXXABI.cpp @@ -34,7 +34,7 @@ static llvm::Constant *GetBogusMemberPointer(CodeGenModule &CGM, return llvm::Constant::getNullValue(CGM.getTypes().ConvertType(T)); } -const llvm::Type * +llvm::Type * CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) { return CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType()); } diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h index de4df3dcbe..29f299a43e 100644 --- a/lib/CodeGen/CGCXXABI.h +++ b/lib/CodeGen/CGCXXABI.h @@ -82,7 +82,7 @@ public: /// Find the LLVM type used to represent the given member pointer /// type. - virtual const llvm::Type * + virtual llvm::Type * ConvertMemberPointerType(const MemberPointerType *MPT); /// Load a member function from an object and a member function diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp index ead2162421..a6f242f2b8 100644 --- a/lib/CodeGen/CGCall.cpp +++ b/lib/CodeGen/CGCall.cpp @@ -1,4 +1,4 @@ -//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===// +//===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -67,31 +67,28 @@ static CanQualType GetReturnType(QualType RetTy) { } const CGFunctionInfo & -CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP, - bool IsRecursive) { +CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) { return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(), llvm::SmallVector<CanQualType, 16>(), - FTNP->getExtInfo(), IsRecursive); + FTNP->getExtInfo()); } /// \param Args - contains any initial parameters besides those /// in the formal type static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT, llvm::SmallVectorImpl<CanQualType> &ArgTys, - CanQual<FunctionProtoType> FTP, - bool IsRecursive = false) { + CanQual<FunctionProtoType> FTP) { // FIXME: Kill copy. for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) ArgTys.push_back(FTP->getArgType(i)); CanQualType ResTy = FTP->getResultType().getUnqualifiedType(); - return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo(), IsRecursive); + return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo()); } const CGFunctionInfo & -CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP, - bool IsRecursive) { +CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) { llvm::SmallVector<CanQualType, 16> ArgTys; - return ::getFunctionInfo(*this, ArgTys, FTP, IsRecursive); + return ::getFunctionInfo(*this, ArgTys, FTP); } static CallingConv getCallingConventionForDecl(const Decl *D) { @@ -244,8 +241,7 @@ const CGFunctionInfo &CodeGenTypes::getNullaryFunctionInfo() { const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy, const llvm::SmallVectorImpl<CanQualType> &ArgTys, - const FunctionType::ExtInfo &Info, - bool IsRecursive) { + const FunctionType::ExtInfo &Info) { #ifndef NDEBUG for (llvm::SmallVectorImpl<CanQualType>::const_iterator I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I) @@ -278,18 +274,12 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy, // default now. ABIArgInfo &RetInfo = FI->getReturnInfo(); if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0) - RetInfo.setCoerceToType(ConvertTypeRecursive(FI->getReturnType())); + RetInfo.setCoerceToType(ConvertType(FI->getReturnType())); for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end(); I != E; ++I) if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0) - I->info.setCoerceToType(ConvertTypeRecursive(I->type)); - - // If this is a top-level call and ConvertTypeRecursive hit unresolved pointer - // types, resolve them now. These pointers may point to this function, which - // we *just* filled in the FunctionInfo for. - if (!IsRecursive && !PointersToResolve.empty()) - HandleLateResolvedPointers(); + I->info.setCoerceToType(ConvertType(I->type)); return *FI; } @@ -317,8 +307,7 @@ CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention, /***/ void CodeGenTypes::GetExpandedTypes(QualType type, - llvm::SmallVectorImpl<const llvm::Type*> &expandedTypes, - bool isRecursive) { + llvm::SmallVectorImpl<llvm::Type*> &expandedTypes) { const RecordType *RT = type->getAsStructureType(); assert(RT && "Can only expand structure types."); const RecordDecl *RD = RT->getDecl(); @@ -333,9 +322,9 @@ void CodeGenTypes::GetExpandedTypes(QualType type, QualType fieldType = FD->getType(); if (fieldType->isRecordType()) - GetExpandedTypes(fieldType, expandedTypes, isRecursive); + GetExpandedTypes(fieldType, expandedTypes); else - expandedTypes.push_back(ConvertType(fieldType, isRecursive)); + expandedTypes.push_back(ConvertType(fieldType)); } } @@ -629,7 +618,7 @@ bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { return false; } -const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { +llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { const CGFunctionInfo &FI = getFunctionInfo(GD); // For definition purposes, don't consider a K&R function variadic. @@ -638,13 +627,12 @@ const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>()) Variadic = FPT->isVariadic(); - return GetFunctionType(FI, Variadic, false); + return GetFunctionType(FI, Variadic); } -const llvm::FunctionType * -CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic, - bool isRecursive) { - llvm::SmallVector<const llvm::Type*, 8> argTypes; +llvm::FunctionType * +CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) { + llvm::SmallVector<llvm::Type*, 8> argTypes; const llvm::Type *resultType = 0; const ABIArgInfo &retAI = FI.getReturnInfo(); @@ -662,7 +650,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic, resultType = llvm::Type::getVoidTy(getLLVMContext()); QualType ret = FI.getReturnType(); - const llvm::Type *ty = ConvertType(ret, isRecursive); + const llvm::Type *ty = ConvertType(ret); unsigned addressSpace = Context.getTargetAddressSpace(ret); argTypes.push_back(llvm::PointerType::get(ty, addressSpace)); break; @@ -683,7 +671,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic, case ABIArgInfo::Indirect: { // indirect arguments are always on the stack, which is addr space #0. - const llvm::Type *LTy = ConvertTypeForMem(it->type, isRecursive); + const llvm::Type *LTy = ConvertTypeForMem(it->type); argTypes.push_back(LTy->getPointerTo()); break; } @@ -693,7 +681,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic, // If the coerce-to type is a first class aggregate, flatten it. Either // way is semantically identical, but fast-isel and the optimizer // generally likes scalar values better than FCAs. - const llvm::Type *argType = argAI.getCoerceToType(); + llvm::Type *argType = argAI.getCoerceToType(); if (const llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) { for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) argTypes.push_back(st->getElementType(i)); @@ -704,7 +692,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic, } case ABIArgInfo::Expand: - GetExpandedTypes(it->type, argTypes, isRecursive); + GetExpandedTypes(it->type, argTypes); break; } } @@ -722,10 +710,10 @@ const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType()); else Info = &getFunctionInfo(MD); - return GetFunctionType(*Info, FPT->isVariadic(), false); + return GetFunctionType(*Info, FPT->isVariadic()); } - return llvm::OpaqueType::get(getLLVMContext()); + return llvm::StructType::get(getLLVMContext()); } void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, @@ -852,11 +840,11 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, continue; case ABIArgInfo::Expand: { - llvm::SmallVector<const llvm::Type*, 8> types; + llvm::SmallVector<llvm::Type*, 8> types; // FIXME: This is rather inefficient. Do we ever actually need to do // anything here? The result should be just reconstructed on the other // side, so extension should be a non-issue. - getTypes().GetExpandedTypes(ParamType, types, false); + getTypes().GetExpandedTypes(ParamType, types); Index += types.size(); continue; } diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp index 42a07df5e4..9c400e75a6 100644 --- a/lib/CodeGen/CGDeclCXX.cpp +++ b/lib/CodeGen/CGDeclCXX.cpp @@ -117,12 +117,13 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn, } // Get the destructor function type - const llvm::Type *DtorFnTy = + llvm::Type *ArgTys[] = { Int8PtrTy }; + llvm::Type *DtorFnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), - Int8PtrTy, false); + ArgTys, false); DtorFnTy = llvm::PointerType::getUnqual(DtorFnTy); - const llvm::Type *Params[] = { DtorFnTy, Int8PtrTy, Int8PtrTy }; + llvm::Type *Params[] = { DtorFnTy, Int8PtrTy, Int8PtrTy }; // Get the __cxa_atexit function type // extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d ); diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp index af54f39686..30d4072af2 100644 --- a/lib/CodeGen/CGException.cpp +++ b/lib/CodeGen/CGException.cpp @@ -29,10 +29,11 @@ using namespace CodeGen; static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) { // void *__cxa_allocate_exception(size_t thrown_size); - const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType()); + llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType()); + llvm::Type *ArgTys[] = { SizeTy }; const llvm::FunctionType *FTy = llvm::FunctionType::get(llvm::Type::getInt8PtrTy(CGF.getLLVMContext()), - SizeTy, /*IsVarArgs=*/false); + ArgTys, /*IsVarArgs=*/false); return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception"); } @@ -40,10 +41,11 @@ static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) { static llvm::Constant *getFreeExceptionFn(CodeGenFunction &CGF) { // void __cxa_free_exception(void *thrown_exception); - const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); + llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); + llvm::Type *ArgTys[] = { Int8PtrTy }; const llvm::FunctionType *FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), - Int8PtrTy, /*IsVarArgs=*/false); + ArgTys, /*IsVarArgs=*/false); return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception"); } @@ -52,8 +54,8 @@ static llvm::Constant *getThrowFn(CodeGenFunction &CGF) { // void __cxa_throw(void *thrown_exception, std::type_info *tinfo, // void (*dest) (void *)); - const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); - const llvm::Type *Args[3] = { Int8PtrTy, Int8PtrTy, Int8PtrTy }; + llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); + llvm::Type *Args[3] = { Int8PtrTy, Int8PtrTy, Int8PtrTy }; const llvm::FunctionType *FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), Args, /*IsVarArgs=*/false); @@ -74,9 +76,10 @@ static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) { static llvm::Constant *getGetExceptionPtrFn(CodeGenFunction &CGF) { // void *__cxa_get_exception_ptr(void*); - const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); + llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); + llvm::Type *ArgTys[] = { Int8PtrTy }; const llvm::FunctionType *FTy = - llvm::FunctionType::get(Int8PtrTy, Int8PtrTy, /*IsVarArgs=*/false); + llvm::FunctionType::get(Int8PtrTy, ArgTys, /*IsVarArgs=*/false); return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr"); } @@ -84,9 +87,10 @@ static llvm::Constant *getGetExceptionPtrFn(CodeGenFunction &CGF) { static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) { // void *__cxa_begin_catch(void*); - const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); + llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); + llvm::Type *ArgTys[] = { Int8PtrTy }; const llvm::FunctionType *FTy = - llvm::FunctionType::get(Int8PtrTy, Int8PtrTy, /*IsVarArgs=*/false); + llvm::FunctionType::get(Int8PtrTy, ArgTys, /*IsVarArgs=*/false); return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch"); } @@ -104,17 +108,19 @@ static llvm::Constant *getEndCatchFn(CodeGenFunction &CGF) { static llvm::Constant *getUnexpectedFn(CodeGenFunction &CGF) { // void __cxa_call_unexepcted(void *thrown_exception); - const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); + llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); + llvm::Type *ArgTys[] = { Int8PtrTy }; const llvm::FunctionType *FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), - Int8PtrTy, /*IsVarArgs=*/false); + ArgTys, /*IsVarArgs=*/false); return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected"); } llvm::Constant *CodeGenFunction::getUnwindResumeFn() { + llvm::Type *ArgTys[] = { Int8PtrTy }; const llvm::FunctionType *FTy = - llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false); + llvm::FunctionType::get(VoidTy, ArgTys, /*IsVarArgs=*/false); if (CGM.getLangOptions().SjLjExceptions) return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume"); @@ -122,8 +128,9 @@ llvm::Constant *CodeGenFunction::getUnwindResumeFn() { } llvm::Constant *CodeGenFunction::getUnwindResumeOrRethrowFn() { + llvm::Type *ArgTys[] = { Int8PtrTy }; const llvm::FunctionType *FTy = - llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false); + llvm::FunctionType::get(VoidTy, ArgTys, /*IsVarArgs=*/false); if (CGM.getLangOptions().SjLjExceptions) return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume_or_Rethrow"); @@ -152,10 +159,11 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) { static llvm::Constant *getCatchallRethrowFn(CodeGenFunction &CGF, llvm::StringRef Name) { - const llvm::Type *Int8PtrTy = + llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext()); - const llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, Int8PtrTy, + llvm::Type *ArgTys[] = { Int8PtrTy }; + const llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, ArgTys, /*IsVarArgs=*/false); return CGF.CGM.CreateRuntimeFunction(FTy, Name); diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp index 289eaa08fb..5d72162938 100644 --- a/lib/CodeGen/CGExpr.cpp +++ b/lib/CodeGen/CGExpr.cpp @@ -505,7 +505,7 @@ void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) { // This needs to be to the standard address space. Address = Builder.CreateBitCast(Address, Int8PtrTy); - const llvm::Type *IntPtrT = IntPtrTy; + llvm::Type *IntPtrT = IntPtrTy; llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &IntPtrT, 1); // In time, people may want to control this and use a 1 here. diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp index 2e68f8f960..525f75af53 100644 --- a/lib/CodeGen/CGExprCXX.cpp +++ b/lib/CodeGen/CGExprCXX.cpp @@ -619,7 +619,7 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, // can be ignored because the result shouldn't be used if // allocation fails. if (typeSizeMultiplier != 1) { - const llvm::Type *intrinsicTypes[] = { CGF.SizeTy }; + llvm::Type *intrinsicTypes[] = { CGF.SizeTy }; llvm::Value *umul_with_overflow = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, intrinsicTypes, 1); @@ -661,7 +661,7 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, if (cookieSize != 0) { sizeWithoutCookie = size; - const llvm::Type *intrinsicTypes[] = { CGF.SizeTy }; + llvm::Type *intrinsicTypes[] = { CGF.SizeTy }; llvm::Value *uadd_with_overflow = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, intrinsicTypes, 1); @@ -1569,11 +1569,11 @@ static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) { // const abi::__class_type_info *dst, // std::ptrdiff_t src2dst_offset); - const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); - const llvm::Type *PtrDiffTy = + llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); + llvm::Type *PtrDiffTy = CGF.ConvertType(CGF.getContext().getPointerDiffType()); - const llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy }; + llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy }; const llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false); diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp index 9bbc93945c..05b8194af4 100644 --- a/lib/CodeGen/CGExprScalar.cpp +++ b/lib/CodeGen/CGExprScalar.cpp @@ -1772,7 +1772,7 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { OpID <<= 1; OpID |= 1; - const llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty); + llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty); llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, &opTy, 1); @@ -1803,8 +1803,8 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { Builder.SetInsertPoint(overflowBB); // Get the overflow handler. - const llvm::Type *Int8Ty = llvm::Type::getInt8Ty(VMContext); - const llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty }; + llvm::Type *Int8Ty = llvm::Type::getInt8Ty(VMContext); + llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty }; llvm::FunctionType *handlerTy = llvm::FunctionType::get(CGF.Int64Ty, argTypes, true); llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName); diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp index be38d36f6c..2f0d827fc3 100644 --- a/lib/CodeGen/CGObjC.cpp +++ b/lib/CodeGen/CGObjC.cpp @@ -1328,7 +1328,7 @@ static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF, if (isa<llvm::ConstantPointerNull>(value)) return value; if (!fn) { - std::vector<const llvm::Type*> args(1, CGF.Int8PtrTy); + std::vector<llvm::Type*> args(1, CGF.Int8PtrTy); const llvm::FunctionType *fnType = llvm::FunctionType::get(CGF.Int8PtrTy, args, false); fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName); @@ -1353,7 +1353,7 @@ static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, llvm::Constant *&fn, llvm::StringRef fnName) { if (!fn) { - std::vector<const llvm::Type*> args(1, CGF.Int8PtrPtrTy); + std::vector<llvm::Type*> args(1, CGF.Int8PtrPtrTy); const llvm::FunctionType *fnType = llvm::FunctionType::get(CGF.Int8PtrTy, args, false); fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName); @@ -1388,7 +1388,7 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, == value->getType()); if (!fn) { - std::vector<const llvm::Type*> argTypes(2); + std::vector<llvm::Type*> argTypes(2); argTypes[0] = CGF.Int8PtrPtrTy; argTypes[1] = CGF.Int8PtrTy; @@ -1420,7 +1420,7 @@ static void emitARCCopyOperation(CodeGenFunction &CGF, assert(dst->getType() == src->getType()); if (!fn) { - std::vector<const llvm::Type*> argTypes(2, CGF.Int8PtrPtrTy); + std::vector<llvm::Type*> argTypes(2, CGF.Int8PtrPtrTy); const llvm::FunctionType *fnType = llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false); fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName); @@ -1518,7 +1518,7 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) { llvm::Constant *&fn = CGM.getARCEntrypoints().objc_release; if (!fn) { - std::vector<const llvm::Type*> args(1, Int8PtrTy); + std::vector<llvm::Type*> args(1, Int8PtrTy); const llvm::FunctionType *fnType = llvm::FunctionType::get(Builder.getVoidTy(), args, false); fn = createARCRuntimeFunction(CGM, fnType, "objc_release"); @@ -1548,7 +1548,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr, llvm::Constant *&fn = CGM.getARCEntrypoints().objc_storeStrong; if (!fn) { - const llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy }; + llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy }; const llvm::FunctionType *fnType = llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false); fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong"); @@ -1702,7 +1702,7 @@ void CodeGenFunction::EmitARCInitWeak(llvm::Value *addr, llvm::Value *value) { void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) { llvm::Constant *&fn = CGM.getARCEntrypoints().objc_destroyWeak; if (!fn) { - std::vector<const llvm::Type*> args(1, Int8PtrPtrTy); + std::vector<llvm::Type*> args(1, Int8PtrPtrTy); const llvm::FunctionType *fnType = llvm::FunctionType::get(Builder.getVoidTy(), args, false); fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak"); @@ -1756,7 +1756,7 @@ void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) { llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPop; if (!fn) { - std::vector<const llvm::Type*> args(1, Int8PtrTy); + std::vector<llvm::Type*> args(1, Int8PtrTy); const llvm::FunctionType *fnType = llvm::FunctionType::get(Builder.getVoidTy(), args, false); @@ -2406,7 +2406,7 @@ void CodeGenFunction::EmitObjCAutoreleasePoolStmt( /// make sure it survives garbage collection until this point. void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) { // We just use an inline assembly. - const llvm::Type *paramTypes[] = { VoidPtrTy }; + llvm::Type *paramTypes[] = { VoidPtrTy }; llvm::FunctionType *extenderType = llvm::FunctionType::get(VoidTy, paramTypes, /*variadic*/ false); llvm::Value *extender diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp index dc822628f5..f4571eeee1 100644 --- a/lib/CodeGen/CGObjCGNU.cpp +++ b/lib/CodeGen/CGObjCGNU.cpp @@ -50,7 +50,7 @@ namespace { /// avoids constructing the type more than once if it's used more than once. class LazyRuntimeFunction { CodeGenModule *CGM; - std::vector<const llvm::Type*> ArgTys; + std::vector<llvm::Type*> ArgTys; const char *FunctionName; llvm::Constant *Function; public: @@ -63,14 +63,14 @@ class LazyRuntimeFunction { /// of the arguments. END_WITH_NULL void init(CodeGenModule *Mod, const char *name, - const llvm::Type *RetTy, ...) { + llvm::Type *RetTy, ...) { CGM =Mod; FunctionName = name; Function = 0; ArgTys.clear(); va_list Args; va_start(Args, RetTy); - while (const llvm::Type *ArgTy = va_arg(Args, const llvm::Type*)) + while (llvm::Type *ArgTy = va_arg(Args, llvm::Type*)) ArgTys.push_back(ArgTy); va_end(Args); // Push the return type on at the end so we can pop it off easily @@ -118,24 +118,24 @@ protected: /// LLVM type for selectors. Opaque pointer (i8*) unless a header declaring /// SEL is included in a header somewhere, in which case it will be whatever /// type is declared in that header, most likely {i8*, i8*}. - const llvm::PointerType *SelectorTy; + llvm::PointerType *SelectorTy; /// LLVM i8 type. Cached here to avoid repeatedly getting it in all of the /// places where it's used const llvm::IntegerType *Int8Ty; /// Pointer to i8 - LLVM type of char*, for all of the places where the /// runtime needs to deal with C strings. - const llvm::PointerType *PtrToInt8Ty; + llvm::PointerType *PtrToInt8Ty; /// Instance Method Pointer type. This is a pointer to a function that takes, /// at a minimum, an object and a selector, and is the generic type for /// Objective-C methods. Due to differences between variadic / non-variadic /// calling conventions, it must always be cast to the correct type before /// actually being used. - const llvm::PointerType *IMPTy; + llvm::PointerType *IMPTy; /// Type of an untyped Objective-C object. Clang treats id as a built-in type /// when compiling Objective-C code, so this may be an opaque pointer (i8*), /// but if the runtime header declaring it is included then it may be a /// pointer to a structure. - const llvm::PointerType *IdTy; + llvm::PointerType *IdTy; /// Pointer to a pointer to an Objective-C object. Used in the new ABI /// message lookup function and some GC-related functions. const llvm::PointerType *PtrToIdTy; @@ -143,11 +143,11 @@ protected: /// call Objective-C methods. CanQualType ASTIdTy; /// LLVM type for C int type. - const llvm::IntegerType *IntTy; + llvm::IntegerType *IntTy; /// LLVM type for an opaque pointer. This is identical to PtrToInt8Ty, but is /// used in the code to document the difference between i8* meaning a pointer /// to a C string and i8* meaning a pointer to some opaque type. - const llvm::PointerType *PtrTy; + llvm::PointerType *PtrTy; /// LLVM type for C long type. The runtime uses this in a lot of places where /// it should be using intptr_t, but we can't fix this without breaking /// compatibility with GCC... @@ -619,7 +619,7 @@ class CGObjCGNUstep : public CGObjCGNU { PtrToObjCSuperTy, SelectorTy, NULL); // If we're in ObjC++ mode, then we want to make if (CGM.getLangOptions().CPlusPlus) { - const llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext); + llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext); // void *__cxa_begin_catch(void *e) EnterCatchFn.init(&CGM, "__cxa_begin_catch", PtrTy, PtrTy, NULL); // void __cxa_end_catch(void) @@ -712,7 +712,7 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion, ObjCSuperTy = llvm::StructType::get(IdTy, IdTy, NULL); PtrToObjCSuperTy = llvm::PointerType::getUnqual(ObjCSuperTy); - const llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext); + llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext); // void objc_exception_throw(id); ExceptionThrowFn.init(&CGM, "objc_exception_throw", VoidTy, IdTy, NULL); @@ -740,7 +740,7 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion, PtrDiffTy, BoolTy, BoolTy, NULL); // IMP type - const llvm::Type *IMPArgs[] = { IdTy, SelectorTy }; + llvm::Type *IMPArgs[] = { IdTy, SelectorTy }; IMPTy = llvm::PointerType::getUnqual(llvm::FunctionType::get(IdTy, IMPArgs, true)); @@ -794,8 +794,9 @@ llvm::Value *CGObjCGNU::GetClassNamed(CGBuilderTy &Builder, EmitClassRef(Name); ClassName = Builder.CreateStructGEP(ClassName, 0); + llvm::Type *ArgTys[] = { PtrToInt8Ty }; llvm::Constant *ClassLookupFn = - CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, PtrToInt8Ty, true), + CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, ArgTys, true), "objc_lookup_class"); return Builder.CreateCall(ClassLookupFn, ClassName); } @@ -999,11 +1000,13 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF, if (isCategoryImpl) { llvm::Constant *classLookupFunction = 0; if (IsClassMessage) { + llvm::Type *ArgTys[] = { PtrTy }; classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get( - IdTy, PtrTy, true), "objc_get_meta_class"); + IdTy, ArgTys, true), "objc_get_meta_class"); } else { + llvm::Type *ArgTys[] = { PtrTy }; classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get( - IdTy, PtrTy, true), "objc_get_class"); + IdTy, ArgTys, true), "objc_get_class"); } ReceiverClass = Builder.CreateCall(classLookupFunction, MakeConstantString(Class->getNameAsString())); @@ -1237,18 +1240,14 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const llvm::StringRef &ClassName, Methods); // Structure containing list pointer, array and array count - llvm::SmallVector<const llvm::Type*, 16> ObjCMethodListFields; - llvm::PATypeHolder OpaqueNextTy = llvm::OpaqueType::get(VMContext); - llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(OpaqueNextTy); - llvm::StructType *ObjCMethodListTy = llvm::StructType::get( + llvm::StructType *ObjCMethodListTy = + llvm::StructType::createNamed(VMContext, ""); + llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(ObjCMethodListTy); + ObjCMethodListTy->setBody( NextPtrTy, IntTy, ObjCMethodArrayTy, NULL); - // Refine next pointer type to concrete type - llvm::cast<llvm::OpaqueType>( - OpaqueNextTy.get())->refineAbstractTypeTo(ObjCMethodListTy); - ObjCMethodListTy = llvm::cast<llvm::StructType>(OpaqueNextTy.get()); Methods.clear(); Methods.push_back(llvm::ConstantPointerNull::get( @@ -2043,11 +2042,6 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() { SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy); } - // Name the ObjC types to make the IR a bit easier to read - TheModule.addTypeName(".objc_selector", SelStructPtrTy); - TheModule.addTypeName(".objc_id", IdTy); - TheModule.addTypeName(".objc_imp", IMPTy); - std::vector<llvm::Constant*> Elements; llvm::Constant *Statics = NULLPtr; // Generate statics list: @@ -2212,9 +2206,9 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() { CGBuilderTy Builder(VMContext); Builder.SetInsertPoint(EntryBB); + llvm::Type *ArgTys[] = { llvm::PointerType::getUnqual(ModuleTy) }; llvm::FunctionType *FT = - llvm::FunctionType::get(Builder.getVoidTy(), - llvm::PointerType::getUnqual(ModuleTy), true); + llvm::FunctionType::get(Builder.getVoidTy(), ArgTys, true); llvm::Value *Register = CGM.CreateRuntimeFunction(FT, "__objc_exec_class"); Builder.CreateCall(Register, Module); Builder.CreateRetVoid(); diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp index 98cd57155d..19ba868a3e 100644 --- a/lib/CodeGen/CGObjCMac.cpp +++ b/lib/CodeGen/CGObjCMac.cpp @@ -65,7 +65,7 @@ private: llvm::Constant *getMessageSendFn() const { // Add the non-lazy-bind attribute, since objc_msgSend is likely to // be called a lot. - const llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy }; + llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, true), "objc_msgSend", @@ -78,7 +78,7 @@ private: /// by indirect reference in the first argument, and therefore the /// self and selector parameters are shifted over by one. llvm::Constant *getMessageSendStretFn() const { - const llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy }; + llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.VoidTy, params, true), "objc_msgSend_stret"); @@ -91,7 +91,7 @@ private: /// floating-point stack; without a special entrypoint, the nil case /// would be unbalanced. llvm::Constant *getMessageSendFpretFn() const { - const llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy }; + llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get( llvm::Type::getDoubleTy(VMContext), params, true), @@ -105,7 +105,7 @@ private: /// semantics. The class passed is the superclass of the current /// class. llvm::Constant *getMessageSendSuperFn() const { - const llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy }; + llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, true), "objc_msgSendSuper"); @@ -116,7 +116,7 @@ private: /// A slightly different messenger used for super calls. The class /// passed is the current class. llvm::Constant *getMessageSendSuperFn2() const { - const llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy }; + llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, true), "objc_msgSendSuper2"); @@ -127,7 +127,7 @@ private: /// /// The messenger used for super calls which return an aggregate indirectly. llvm::Constant *getMessageSendSuperStretFn() const { - const llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy }; + llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy }; return CGM.CreateRuntimeFunction( llvm::FunctionType::get(CGM.VoidTy, params, true), "objc_msgSendSuper_stret"); @@ -138,7 +138,7 @@ private: /// /// objc_msgSendSuper_stret with the super2 semantics. llvm::Constant *getMessageSendSuperStretFn2() const { - const llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy }; + llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy }; return CGM.CreateRuntimeFunction( llvm::FunctionType::get(CGM.VoidTy, params, true), "objc_msgSendSuper2_stret"); @@ -158,20 +158,20 @@ protected: CodeGen::CodeGenModule &CGM; public: - const llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy; - const llvm::Type *Int8PtrTy; + llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy; + llvm::Type *Int8PtrTy; /// ObjectPtrTy - LLVM type for object handles (typeof(id)) - const llvm::Type *ObjectPtrTy; + llvm::Type *ObjectPtrTy; /// PtrObjectPtrTy - LLVM type for id * - const llvm::Type *PtrObjectPtrTy; + llvm::Type *PtrObjectPtrTy; /// SelectorPtrTy - LLVM type for selector handles (typeof(SEL)) - const llvm::Type *SelectorPtrTy; + llvm::Type *SelectorPtrTy; /// ProtocolPtrTy - LLVM type for external protocol handles /// (typeof(Protocol)) - const llvm::Type *ExternalProtocolPtrTy; + llvm::Type *ExternalProtocolPtrTy; // SuperCTy - clang type for struct objc_super. QualType SuperCTy; @@ -179,27 +179,27 @@ public: QualType SuperPtrCTy; /// SuperTy - LLVM type for struct objc_super. - const llvm::StructType *SuperTy; + llvm::StructType *SuperTy; /// SuperPtrTy - LLVM type for struct objc_super *. - const llvm::Type *SuperPtrTy; + llvm::Type *SuperPtrTy; /// PropertyTy - LLVM type for struct objc_property (struct _prop_t /// in GCC parlance). - const llvm::StructType *PropertyTy; + llvm::StructType *PropertyTy; /// PropertyListTy - LLVM type for struct objc_property_list /// (_prop_list_t in GCC parlance). - const llvm::StructType *PropertyListTy; + llvm::StructType *PropertyListTy; /// PropertyListPtrTy - LLVM type for struct objc_property_list*. - const llvm::Type *PropertyListPtrTy; + llvm::Type *PropertyListPtrTy; // MethodTy - LLVM type for struct objc_method. - const llvm::StructType *MethodTy; + llvm::StructType *MethodTy; /// CacheTy - LLVM type for struct objc_cache. - const llvm::Type *CacheTy; + llvm::Type *CacheTy; /// CachePtrTy - LLVM type for struct objc_cache *. - const llvm::Type *CachePtrTy; + llvm::Type *CachePtrTy; llvm::Constant *getGetPropertyFn() { CodeGen::CodeGenTypes &Types = CGM.getTypes(); @@ -273,7 +273,7 @@ public: /// GcReadWeakFn -- LLVM objc_read_weak (id *src) function. llvm::Constant *getGcReadWeakFn() { // id objc_read_weak (id *) - const llvm::Type *args[] = { ObjectPtrTy->getPointerTo() }; + llvm::Type *args[] = { ObjectPtrTy->getPointerTo() }; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_read_weak"); @@ -282,7 +282,7 @@ public: /// GcAssignWeakFn -- LLVM objc_assign_weak function. llvm::Constant *getGcAssignWeakFn() { // id objc_assign_weak (id, id *) - const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() }; + llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() }; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak"); @@ -291,7 +291,7 @@ public: /// GcAssignGlobalFn -- LLVM objc_assign_global function. llvm::Constant *getGcAssignGlobalFn() { // id objc_assign_global(id, id *) - const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() }; + llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() }; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_assign_global"); @@ -300,7 +300,7 @@ public: /// GcAssignThreadLocalFn -- LLVM objc_assign_threadlocal function. llvm::Constant *getGcAssignThreadLocalFn() { // id objc_assign_threadlocal(id src, id * dest) - const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() }; + llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() }; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_assign_threadlocal"); @@ -309,8 +309,8 @@ public: /// GcAssignIvarFn -- LLVM objc_assign_ivar function. llvm::Constant *getGcAssignIvarFn() { // id objc_assign_ivar(id, id *, ptrdiff_t) - const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo(), - CGM.PtrDiffTy }; + llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo(), + CGM.PtrDiffTy }; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar"); @@ -319,7 +319,7 @@ public: /// GcMemmoveCollectableFn -- LLVM objc_memmove_collectable function. llvm::Constant *GcMemmoveCollectableFn() { // void *objc_memmove_collectable(void *dst, const void *src, size_t size) - const llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, LongTy }; + llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, LongTy }; llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable"); } @@ -327,7 +327,7 @@ public: /// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function. llvm::Constant *getGcAssignStrongCastFn() { // id objc_assign_strongCast(id, id *) - const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() }; + llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() }; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast"); @@ -336,7 +336,7 @@ public: /// ExceptionThrowFn - LLVM objc_exception_throw function. llvm::Constant *getExceptionThrowFn() { // void objc_exception_throw(id) - const llvm::Type *args[] = { ObjectPtrTy }; + llvm::Type *args[] = { ObjectPtrTy }; llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw"); @@ -352,7 +352,7 @@ public: /// SyncEnterFn - LLVM object_sync_enter function. llvm::Constant *getSyncEnterFn() { // void objc_sync_enter (id) - const llvm::Type *args[] = { ObjectPtrTy }; + llvm::Type *args[] = { ObjectPtrTy }; llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter"); @@ -361,7 +361,7 @@ public: /// SyncExitFn - LLVM object_sync_exit function. llvm::Constant *getSyncExitFn() { // void objc_sync_exit (id) - const llvm::Type *args[] = { ObjectPtrTy }; + llvm::Type *args[] = { ObjectPtrTy }; llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit"); @@ -400,62 +400,62 @@ public: class ObjCTypesHelper : public ObjCCommonTypesHelper { public: /// SymtabTy - LLVM type for struct objc_symtab. - const llvm::StructType *SymtabTy; + llvm::StructType *SymtabTy; /// SymtabPtrTy - LLVM type for struct objc_symtab *. - const llvm::Type *SymtabPtrTy; + llvm::Type *SymtabPtrTy; /// ModuleTy - LLVM type for struct objc_module. - const llvm::StructType *ModuleTy; + llvm::StructType *ModuleTy; /// ProtocolTy - LLVM type for struct objc_protocol. - const llvm::StructType *ProtocolTy; + llvm::StructType *ProtocolTy; /// ProtocolPtrTy - LLVM type for struct objc_protocol *. - const llvm::Type *ProtocolPtrTy; + llvm::Type *ProtocolPtrTy; /// ProtocolExtensionTy - LLVM type for struct /// objc_protocol_extension. - const llvm::StructType *ProtocolExtensionTy; + llvm::StructType *ProtocolExtensionTy; /// ProtocolExtensionTy - LLVM type for struct /// objc_protocol_extension *. - const llvm::Type *ProtocolExtensionPtrTy; + llvm::Type *ProtocolExtensionPtrTy; /// MethodDescriptionTy - LLVM type for struct /// objc_method_description. - const llvm::StructType *MethodDescriptionTy; + llvm::StructType *MethodDescriptionTy; /// MethodDescriptionListTy - LLVM type for struct /// objc_method_description_list. - const llvm::StructType *MethodDescriptionListTy; + llvm::StructType *MethodDescriptionListTy; /// MethodDescriptionListPtrTy - LLVM type for struct /// objc_method_description_list *. - const llvm::Type *MethodDescriptionListPtrTy; + llvm::Type *MethodDescriptionListPtrTy; /// ProtocolListTy - LLVM type for struct objc_property_list. - const llvm::Type *ProtocolListTy; + llvm::StructType *ProtocolListTy; /// ProtocolListPtrTy - LLVM type for struct objc_property_list*. - const llvm::Type *ProtocolListPtrTy; + llvm::Type *ProtocolListPtrTy; /// CategoryTy - LLVM type for struct objc_category. - const llvm::StructType *CategoryTy; + llvm::StructType *CategoryTy; /// ClassTy - LLVM type for struct objc_class. - const llvm::StructType *ClassTy; + llvm::StructType *ClassTy; /// ClassPtrTy - LLVM type for struct objc_class *. - const llvm::Type *ClassPtrTy; + llvm::Type *ClassPtrTy; /// ClassExtensionTy - LLVM type for struct objc_class_ext. - const llvm::StructType *ClassExtensionTy; + llvm::StructType *ClassExtensionTy; /// ClassExtensionPtrTy - LLVM type for struct objc_class_ext *. - const llvm::Type *ClassExtensionPtrTy; + llvm::Type *ClassExtensionPtrTy; // IvarTy - LLVM type for struct objc_ivar. - const llvm::StructType *IvarTy; + llvm::StructType *IvarTy; /// IvarListTy - LLVM type for struct objc_ivar_list. - const llvm::Type *IvarListTy; + llvm::Type *IvarListTy; /// IvarListPtrTy - LLVM type for struct objc_ivar_list *. - const llvm::Type *IvarListPtrTy; + llvm::Type *IvarListPtrTy; /// MethodListTy - LLVM type for struct objc_method_list. - const llvm::Type *MethodListTy; + llvm::Type *MethodListTy; /// MethodListPtrTy - LLVM type for struct objc_method_list *. - const llvm::Type *MethodListPtrTy; + llvm::Type *MethodListPtrTy; /// ExceptionDataTy - LLVM type for struct _objc_exception_data. - const llvm::Type *ExceptionDataTy; + llvm::Type *ExceptionDataTy; /// ExceptionTryEnterFn - LLVM objc_exception_try_enter function. llvm::Constant *getExceptionTryEnterFn() { - const llvm::Type *params[] = { ExceptionDataTy->getPointerTo() }; + llvm::Type *params[] = { ExceptionDataTy->getPointerTo() }; return CGM.CreateRuntimeFunction( llvm::FunctionType::get(CGM.VoidTy, params, false), "objc_exception_try_enter"); @@ -463,7 +463,7 @@ public: /// ExceptionTryExitFn - LLVM objc_exception_try_exit function. llvm::Constant *getExceptionTryExitFn() { - const llvm::Type *params[] = { ExceptionDataTy->getPointerTo() }; + llvm::Type *params[] = { ExceptionDataTy->getPointerTo() }; return CGM.CreateRuntimeFunction( llvm::FunctionType::get(CGM.VoidTy, params, false), "objc_exception_try_exit"); @@ -471,7 +471,7 @@ public: /// ExceptionExtractFn - LLVM objc_exception_extract function. llvm::Constant *getExceptionExtractFn() { - const llvm::Type *params[] = { ExceptionDataTy->getPointerTo() }; + llvm::Type *params[] = { ExceptionDataTy->getPointerTo() }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, false), "objc_exception_extract"); @@ -479,7 +479,7 @@ public: /// ExceptionMatchFn - LLVM objc_exception_match function. llvm::Constant *getExceptionMatchFn() { - const llvm::Type *params[] = { ClassPtrTy, ObjectPtrTy }; + llvm::Type *params[] = { ClassPtrTy, ObjectPtrTy }; return CGM.CreateRuntimeFunction( llvm::FunctionType::get(CGM.Int32Ty, params, false), "objc_exception_match"); @@ -489,7 +489,7 @@ public: /// SetJmpFn - LLVM _setjmp function. llvm::Constant *getSetJmpFn() { // This is specifically the prototype for x86. - const llvm::Type *params[] = { CGM.Int32Ty->getPointerTo() }; + llvm::Type *params[] = { CGM.Int32Ty->getPointerTo() }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty, params, false), "_setjmp"); @@ -506,46 +506,46 @@ class ObjCNonFragileABITypesHelper : public ObjCCommonTypesHelper { public: // MethodListnfABITy - LLVM for struct _method_list_t - const llvm::StructType *MethodListnfABITy; + llvm::StructType *MethodListnfABITy; // MethodListnfABIPtrTy - LLVM for struct _method_list_t* - const llvm::Type *MethodListnfABIPtrTy; + llvm::Type *MethodListnfABIPtrTy; // ProtocolnfABITy = LLVM for struct _protocol_t - const llvm::StructType *ProtocolnfABITy; + llvm::StructType *ProtocolnfABITy; // ProtocolnfABIPtrTy = LLVM for struct _protocol_t* - const llvm::Type *ProtocolnfABIPtrTy; + llvm::Type *ProtocolnfABIPtrTy; // ProtocolListnfABITy - LLVM for struct _objc_protocol_list - const llvm::StructType *ProtocolListnfABITy; + llvm::StructType *ProtocolListnfABITy; // ProtocolListnfABIPtrTy - LLVM for struct _objc_protocol_list* - const llvm::Type *ProtocolListnfABIPtrTy; + llvm::Type *ProtocolListnfABIPtrTy; // ClassnfABITy - LLVM for struct _class_t - const llvm::StructType *ClassnfABITy; + llvm::StructType *ClassnfABITy; // ClassnfABIPtrTy - LLVM for struct _class_t* - const llvm::Type *ClassnfABIPtrTy; + llvm::Type *ClassnfABIPtrTy; // IvarnfABITy - LLVM for struct _ivar_t - const llvm::StructType *IvarnfABITy; + llvm::StructType *IvarnfABITy; // IvarListnfABITy - LLVM for struct _ivar_list_t - const llvm::StructType *IvarListnfABITy; + llvm::StructType *IvarListnfABITy; // IvarListnfABIPtrTy = LLVM for struct _ivar_list_t* - const llvm::Type *IvarListnfABIPtrTy; + llvm::Type *IvarListnfABIPtrTy; // ClassRonfABITy - LLVM for struct _class_ro_t - const llvm::StructType *ClassRonfABITy; + llvm::StructType *ClassRonfABITy; // ImpnfABITy - LLVM for id (*)(id, SEL, ...) - const llvm::Type *ImpnfABITy; + llvm::Type *ImpnfABITy; // CategorynfABITy - LLVM for struct _category_t - const llvm::StructType *CategorynfABITy; + llvm::StructType *CategorynfABITy; // New types for nonfragile abi messaging. @@ -554,31 +554,31 @@ public: // IMP messenger; // SEL name; // }; - const llvm::StructType *MessageRefTy; + llvm::StructType *MessageRefTy; // MessageRefCTy - clang type for struct _message_ref_t QualType MessageRefCTy; // MessageRefPtrTy - LLVM for struct _message_ref_t* - const llvm::Type *MessageRefPtrTy; + llvm::Type *MessageRefPtrTy; // MessageRefCPtrTy - clang type for struct _message_ref_t* QualType MessageRefCPtrTy; // MessengerTy - Type of the messenger (shown as IMP above) - const llvm::FunctionType *MessengerTy; + llvm::FunctionType *MessengerTy; // SuperMessageRefTy - LLVM for: // struct _super_message_ref_t { // SUPER_IMP messenger; // SEL name; // }; - const llvm::StructType *SuperMessageRefTy; + llvm::StructType *SuperMessageRefTy; // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t* - const llvm::Type *SuperMessageRefPtrTy; + llvm::Type *SuperMessageRefPtrTy; llvm::Constant *getMessageSendFixupFn() { // id objc_msgSend_fixup(id, struct message_ref_t*, ...) - const llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy }; + llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, true), "objc_msgSend_fixup"); @@ -586,7 +586,7 @@ public: llvm::Constant *getMessageSendFpretFixupFn() { // id objc_msgSend_fpret_fixup(id, struct message_ref_t*, ...) - const llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy }; + llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, true), "objc_msgSend_fpret_fixup"); @@ -594,7 +594,7 @@ public: llvm::Constant *getMessageSendStretFixupFn() { // id objc_msgSend_stret_fixup(id, struct message_ref_t*, ...) - const llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy }; + llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, true), "objc_msgSend_stret_fixup"); @@ -603,7 +603,7 @@ public: llvm::Constant *getMessageSendSuper2FixupFn() { // id objc_msgSendSuper2_fixup (struct objc_super *, // struct _super_message_ref_t*, ...) - const llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy }; + llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, true), "objc_msgSendSuper2_fixup"); @@ -612,7 +612,7 @@ public: llvm::Constant *getMessageSendSuper2StretFixupFn() { // id objc_msgSendSuper2_stret_fixup(struct objc_super *, // struct _super_message_ref_t*, ...) - const llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy }; + llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, true), "objc_msgSendSuper2_stret_fixup"); @@ -625,14 +625,14 @@ public: } llvm::Constant *getObjCBeginCatchFn() { - const llvm::Type *params[] = { Int8PtrTy }; + llvm::Type *params[] = { Int8PtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(Int8PtrTy, params, false), "objc_begin_catch"); } - const llvm::StructType *EHTypeTy; - const llvm::Type *EHTypePtrTy; + llvm::StructType *EHTypeTy; + llvm::Type *EHTypePtrTy; ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm); ~ObjCNonFragileABITypesHelper(){} @@ -2768,7 +2768,7 @@ void FragileHazards::collectLocals() { } llvm::FunctionType *FragileHazards::GetAsmFnType() { - llvm::SmallVector<const llvm::Type *, 16> tys(Locals.size()); + llvm::SmallVector<llvm::Type *, 16> tys(Locals.size()); for (unsigned i = 0, e = Locals.size(); i != e; ++i) tys[i] = Locals[i]->getType(); return llvm::FunctionType::get(CGF.VoidTy, tys, false); @@ -4162,20 +4162,19 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm) // char *name; // char *attributes; // } - PropertyTy = llvm::StructType::get(Int8PtrTy, Int8PtrTy, NULL); - CGM.getModule().addTypeName("struct._prop_t", - PropertyTy); + PropertyTy = llvm::StructType::createNamed("struct._prop_t", + Int8PtrTy, Int8PtrTy, NULL); // struct _prop_list_t { // uint32_t entsize; // sizeof(struct _prop_t) // uint32_t count_of_properties; // struct _prop_t prop_list[count_of_properties]; // } - PropertyListTy = llvm::StructType::get(IntTy, IntTy, - llvm::ArrayType::get(PropertyTy, 0), - NULL); - CGM.getModule().addTypeName("struct._prop_list_t", - PropertyListTy); + PropertyListTy = + llvm::StructType::createNamed("struct._prop_list_t", + IntTy, IntTy, + llvm::ArrayType::get(PropertyTy, 0), + NULL); // struct _prop_list_t * PropertyListPtrTy = llvm::PointerType::getUnqual(PropertyListTy); @@ -4184,12 +4183,12 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm) // char *method_type; // char *_imp; // } - MethodTy = llvm::StructType::get(SelectorPtrTy, Int8PtrTy, Int8PtrTy, NULL); - CGM.getModule().addTypeName("struct._objc_method", MethodTy); + MethodTy = llvm::StructType::createNamed("struct._objc_method", + SelectorPtrTy, Int8PtrTy, Int8PtrTy, + NULL); // struct _objc_cache * - CacheTy = llvm::OpaqueType::get(VMContext); - CGM.getModule().addTypeName("struct._objc_cache", CacheTy); + CacheTy = llvm::StructType::createNamed(VMContext, "struct._objc_cache"); CachePtrTy = llvm::PointerType::getUnqual(CacheTy); } @@ -4201,19 +4200,18 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm) // char *types; // } MethodDescriptionTy = - llvm::StructType::get(SelectorPtrTy, Int8PtrTy, NULL); - CGM.getModule().addTypeName("struct._objc_method_description", - MethodDescriptionTy); + llvm::StructType::createNamed("struct._objc_method_description", + SelectorPtrTy, Int8PtrTy, NULL); // struct _objc_method_description_list { // int count; // struct _objc_method_description[1]; // } MethodDescriptionListTy = - llvm::StructType::get(IntTy, llvm::ArrayType::get(MethodDescriptionTy, 0), - NULL); - CGM.getModule().addTypeName("struct._objc_method_description_list", - MethodDescriptionListTy); + llvm::StructType::createNamed("struct._objc_method_description_list", + IntTy, + llvm::ArrayType::get(MethodDescriptionTy, 0), + NULL); // struct _objc_method_description_list * MethodDescriptionListPtrTy = @@ -4228,28 +4226,27 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm) // struct _objc_property_list *instance_properties; // } ProtocolExtensionTy = - llvm::StructType::get(IntTy, - MethodDescriptionListPtrTy, - MethodDescriptionListPtrTy, - PropertyListPtrTy, - NULL); - CGM.getModule().addTypeName("struct._objc_protocol_extension", - ProtocolExtensionTy); + llvm::StructType::createNamed("struct._objc_protocol_extension", + IntTy, + MethodDescriptionListPtrTy, + MethodDescriptionListPtrTy, + PropertyListPtrTy, + NULL); // struct _objc_protocol_extension * ProtocolExtensionPtrTy = llvm::PointerType::getUnqual(ProtocolExtensionTy); // Handle recursive construction of Protocol and ProtocolList types - llvm::PATypeHolder ProtocolTyHolder = llvm::OpaqueType::get(VMContext); - llvm::PATypeHolder ProtocolListTyHolder = llvm::OpaqueType::get(VMContext); + ProtocolTy = + llvm::StructType::createNamed(VMContext, "struct._objc_protocol"); - const llvm::Type *T = - llvm::StructType::get(llvm::PointerType::getUnqual(ProtocolListTyHolder), + ProtocolListTy = + llvm::StructType::createNamed(VMContext, "struct._objc_protocol_list"); + ProtocolListTy->setBody(llvm::PointerType::getUnqual(ProtocolListTy), LongTy, - llvm::ArrayType::get(ProtocolTyHolder, 0), + llvm::ArrayType::get(ProtocolTy, 0), NULL); - cast<llvm::OpaqueType>(ProtocolListTyHolder.get())->refineAbstractTypeTo(T); // struct _objc_protocol { // struct _objc_protocol_extension *isa; @@ -4258,21 +4255,15 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm) // struct _objc_method_description_list *instance_methods; // struct _objc_method_description_list *class_methods; // } - T = llvm::StructType::get(ProtocolExtensionPtrTy, Int8PtrTy, - llvm::PointerType::getUnqual(ProtocolListTyHolder), - MethodDescriptionListPtrTy, - MethodDescriptionListPtrTy, - NULL); - cast<llvm::OpaqueType>(ProtocolTyHolder.get())->refineAbstractTypeTo(T); - - ProtocolListTy = cast<llvm::StructType>(ProtocolListTyHolder.get()); - CGM.getModule().addTypeName("struct._objc_protocol_list", - ProtocolListTy); + ProtocolTy->setBody(ProtocolExtensionPtrTy, Int8PtrTy, + llvm::PointerType::getUnqual(ProtocolListTy), + MethodDescriptionListPtrTy, + MethodDescriptionListPtrTy, + NULL); + // struct _objc_protocol_list * ProtocolListPtrTy = llvm::PointerType::getUnqual(ProtocolListTy); - ProtocolTy = cast<llvm::StructType>(ProtocolTyHolder.get()); - CGM.getModule().addTypeName("struct._objc_protocol", ProtocolTy); ProtocolPtrTy = llvm::PointerType::getUnqual(ProtocolTy); // Class description structures @@ -4282,26 +4273,26 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm) // char *ivar_type; // int ivar_offset; // } - IvarTy = llvm::StructType::get(Int8PtrTy, Int8PtrTy, IntTy, NULL); - CGM.getModule().addTypeName("struct._objc_ivar", IvarTy); + IvarTy = llvm::StructType::createNamed("struct._objc_ivar", + Int8PtrTy, Int8PtrTy, IntTy, NULL); // struct _objc_ivar_list * - IvarListTy = llvm::OpaqueType::get(VMContext); - CGM.getModule().addTypeName("struct._objc_ivar_list", IvarListTy); + IvarListTy = + llvm::StructType::createNamed(VMContext, "struct._objc_ivar_list"); IvarListPtrTy = llvm::PointerType::getUnqual(IvarListTy); // struct _objc_method_list * - MethodListTy = llvm::OpaqueType::get(VMContext); - CGM.getModule().addTypeName("struct._objc_method_list", MethodListTy); + MethodListTy = + llvm::StructType::createNamed(VMContext, "struct._objc_method_list"); MethodListPtrTy = llvm::PointerType::getUnqual(MethodListTy); // struct _objc_class_extension * ClassExtensionTy = - llvm::StructType::get(IntTy, Int8PtrTy, PropertyListPtrTy, NULL); - CGM.getModule().addTypeName("struct._objc_class_extension", ClassExtensionTy); + llvm::StructType::createNamed("struct._objc_class_extension", + IntTy, Int8PtrTy, PropertyListPtrTy, NULL); ClassExtensionPtrTy = llvm::PointerType::getUnqual(ClassExtensionTy); - llvm::PATypeHolder ClassTyHolder = llvm::OpaqueType::get(VMContext); + ClassTy = llvm::StructType::createNamed(VMContext, "struct._objc_class"); // struct _objc_class { // Class isa; @@ -4317,23 +4308,20 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm) // char *ivar_layout; // struct _objc_class_ext *ext; // }; - T = llvm::StructType::get(llvm::PointerType::getUnqual(ClassTyHolder), - llvm::PointerType::getUnqual(ClassTyHolder), - Int8PtrTy, - LongTy, - LongTy, - LongTy, - IvarListPtrTy, - MethodListPtrTy, - CachePtrTy, - ProtocolListPtrTy, - Int8PtrTy, - ClassExtensionPtrTy, - NULL); - cast<llvm::OpaqueType>(ClassTyHolder.get())->refineAbstractTypeTo(T); - - ClassTy = cast<llvm::StructType>(ClassTyHolder.get()); - CGM.getModule().addTypeName("struct._objc_class", ClassTy); + ClassTy->setBody(llvm::PointerType::getUnqual(ClassTy), + llvm::PointerType::getUnqual(ClassTy), + Int8PtrTy, + LongTy, + LongTy, + LongTy, + IvarListPtrTy, + MethodListPtrTy, + CachePtrTy, + ProtocolListPtrTy, + Int8PtrTy, + ClassExtensionPtrTy, + NULL); + ClassPtrTy = llvm::PointerType::getUnqual(ClassTy); // struct _objc_category { @@ -4344,10 +4332,11 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm) // uint32_t size; // sizeof(struct _objc_category) // struct _objc_property_list *instance_properties;// category's @property // } - CategoryTy = llvm::StructType::get(Int8PtrTy, Int8PtrTy, MethodListPtrTy, - MethodListPtrTy, ProtocolListPtrTy, - IntTy, PropertyListPtrTy, NULL); - CGM.getModule().addTypeName("struct._objc_category", CategoryTy); + CategoryTy = + llvm::StructType::createNamed("struct._objc_category", + Int8PtrTy, Int8PtrTy, MethodListPtrTy, + MethodListPtrTy, ProtocolListPtrTy, + IntTy, PropertyListPtrTy, NULL); // Global metadata structures @@ -4358,9 +4347,10 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm) // short cat_def_cnt; // char *defs[cls_def_cnt + cat_def_cnt]; // } - SymtabTy = llvm::StructType::get(LongTy, SelectorPtrTy, ShortTy, ShortTy, - llvm::ArrayType::get(Int8PtrTy, 0), NULL); - CGM.getModule().addTypeName("struct._objc_symtab", SymtabTy); + SymtabTy = + llvm::StructType::createNamed("struct._objc_symtab", + LongTy, SelectorPtrTy, ShortTy, ShortTy, + llvm::ArrayType::get(Int8PtrTy, 0), NULL); SymtabPtrTy = llvm::PointerType::getUnqual(SymtabTy); // struct _objc_module { @@ -4370,8 +4360,8 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm) // struct _objc_symtab* symtab; // } ModuleTy = - llvm::StructType::get(LongTy, LongTy, Int8PtrTy, SymtabPtrTy, NULL); - CGM.getModule().addTypeName("struct._objc_module", ModuleTy); + llvm::StructType::createNamed("struct._objc_module", + LongTy, LongTy, Int8PtrTy, SymtabPtrTy, NULL); // FIXME: This is the size of the setjmp buffer and should be target @@ -4379,16 +4369,14 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm) uint64_t SetJmpBufferSize = 18; // Exceptions - const llvm::Type *StackPtrTy = llvm::ArrayType::get( + llvm::Type *StackPtrTy = llvm::ArrayType::get( llvm::Type::getInt8PtrTy(VMContext), 4); ExceptionDataTy = - llvm::StructType::get( - llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext), - SetJmpBufferSize), - StackPtrTy, NULL); - CGM.getModule().addTypeName("struct._objc_exception_data", - ExceptionDataTy); + llvm::StructType::createNamed("struct._objc_exception_data", + llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext), + SetJmpBufferSize), + StackPtrTy, NULL); } @@ -4399,11 +4387,11 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul // uint32_t method_count; // struct _objc_method method_list[method_count]; // } - MethodListnfABITy = llvm::StructType::get(IntTy, IntTy, - llvm::ArrayType::get(MethodTy, 0), - NULL); - CGM.getModule().addTypeName("struct.__method_list_t", - MethodListnfABITy); + MethodListnfABITy = + llvm::StructType::createNamed("struct.__method_list_t", + IntTy, IntTy, + llvm::ArrayType::get(MethodTy, 0), + NULL); // struct method_list_t * MethodListnfABIPtrTy = llvm::PointerType::getUnqual(MethodListnfABITy); @@ -4421,20 +4409,21 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul // } // Holder for struct _protocol_list_t * - llvm::PATypeHolder ProtocolListTyHolder = llvm::OpaqueType::get(VMContext); - - ProtocolnfABITy = llvm::StructType::get(ObjectPtrTy, Int8PtrTy, - ProtocolListTyHolder->getPointerTo(), - MethodListnfABIPtrTy, - MethodListnfABIPtrTy, - MethodListnfABIPtrTy, - MethodListnfABIPtrTy, - PropertyListPtrTy, - IntTy, - IntTy, - NULL); - CGM.getModule().addTypeName("struct._protocol_t", - ProtocolnfABITy); + ProtocolListnfABITy = + llvm::StructType::createNamed(VMContext, "struct._objc_protocol_list"); + + ProtocolnfABITy = + llvm::StructType::createNamed("struct._protocol_t", + ObjectPtrTy, Int8PtrTy, + llvm::PointerType::getUnqual(ProtocolListnfABITy), + MethodListnfABIPtrTy, + MethodListnfABIPtrTy, + MethodListnfABIPtrTy, + MethodListnfABIPtrTy, + PropertyListPtrTy, + IntTy, + IntTy, + NULL); // struct _protocol_t* ProtocolnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolnfABITy); @@ -4443,14 +4432,9 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul // long protocol_count; // Note, this is 32/64 bit // struct _protocol_t *[protocol_count]; // } - ProtocolListnfABITy = llvm::StructType::get(LongTy, - llvm::ArrayType::get( - ProtocolnfABIPtrTy, 0), - NULL); - CGM.getModule().addTypeName("struct._objc_protocol_list", - ProtocolListnfABITy); - cast<llvm::OpaqueType>(ProtocolListTyHolder.get())->refineAbstractTypeTo( - ProtocolListnfABITy); + ProtocolListnfABITy->setBody(LongTy, + llvm::ArrayType::get(ProtocolnfABIPtrTy, 0), + NULL); // struct _objc_protocol_list* ProtocolListnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolListnfABITy); @@ -4462,24 +4446,25 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul // uint32_t alignment; // uint32_t size; // } - IvarnfABITy = llvm::StructType::get(llvm::PointerType::getUnqual(LongTy), - Int8PtrTy, - Int8PtrTy, - IntTy, - IntTy, - NULL); - CGM.getModule().addTypeName("struct._ivar_t", IvarnfABITy); + IvarnfABITy = + llvm::StructType::createNamed("struct._ivar_t", + llvm::PointerType::getUnqual(LongTy), + Int8PtrTy, + Int8PtrTy, + IntTy, + IntTy, + NULL); // struct _ivar_list_t { // uint32 entsize; // sizeof(struct _ivar_t) // uint32 count; // struct _iver_t list[count]; // } - IvarListnfABITy = llvm::StructType::get(IntTy, IntTy, - llvm::ArrayType::get( - IvarnfABITy, 0), - NULL); - CGM.getModule().addTypeName("struct._ivar_list_t", IvarListnfABITy); + IvarListnfABITy = + llvm::StructType::createNamed("struct._ivar_list_t", + IntTy, IntTy, + llvm::ArrayType::get(IvarnfABITy, 0), + NULL); IvarListnfABIPtrTy = llvm::PointerType::getUnqual(IvarListnfABITy); @@ -4498,22 +4483,21 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul // } // FIXME. Add 'reserved' field in 64bit abi mode! - ClassRonfABITy = llvm::StructType::get(IntTy, - IntTy, - IntTy, - Int8PtrTy, - Int8PtrTy, - MethodListnfABIPtrTy, - ProtocolListnfABIPtrTy, - IvarListnfABIPtrTy, - Int8PtrTy, - PropertyListPtrTy, - NULL); - CGM.getModule().addTypeName("struct._class_ro_t", - ClassRonfABITy); + ClassRonfABITy = llvm::StructType::createNamed("struct._class_ro_t", + IntTy, + IntTy, + IntTy, + Int8PtrTy, + Int8PtrTy, + MethodListnfABIPtrTy, + ProtocolListnfABIPtrTy, + IvarListnfABIPtrTy, + Int8PtrTy, + PropertyListPtrTy, + NULL); // ImpnfABITy - LLVM for id (*)(id, SEL, ...) - const llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy }; + llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy }; ImpnfABITy = llvm::FunctionType::get(ObjectPtrTy, params, false) ->getPointerTo(); @@ -4525,18 +4509,13 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul // struct class_ro_t *ro; // } - llvm::PATypeHolder ClassTyHolder = llvm::OpaqueType::get(VMContext); - ClassnfABITy = - llvm::StructType::get(llvm::PointerType::getUnqual(ClassTyHolder), - llvm::PointerType::getUnqual(ClassTyHolder), - CachePtrTy, - llvm::PointerType::getUnqual(ImpnfABITy), - llvm::PointerType::getUnqual(ClassRonfABITy), - NULL); - CGM.getModule().addTypeName("struct._class_t", ClassnfABITy); - - cast<llvm::OpaqueType>(ClassTyHolder.get())->refineAbstractTypeTo( - ClassnfABITy); + ClassnfABITy = llvm::StructType::createNamed(VMContext, "struct._class_t"); + ClassnfABITy->setBody(llvm::PointerType::getUnqual(ClassnfABITy), + llvm::PointerType::getUnqual(ClassnfABITy), + CachePtrTy, + llvm::PointerType::getUnqual(ImpnfABITy), + llvm::PointerType::getUnqual(ClassRonfABITy), + NULL); // LLVM for struct _class_t * ClassnfABIPtrTy = llvm::PointerType::getUnqual(ClassnfABITy); @@ -4549,14 +4528,14 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul // const struct _protocol_list_t * const protocols; // const struct _prop_list_t * const properties; // } - CategorynfABITy = llvm::StructType::get(Int8PtrTy, - ClassnfABIPtrTy, - MethodListnfABIPtrTy, - MethodListnfABIPtrTy, - ProtocolListnfABIPtrTy, - PropertyListPtrTy, - NULL); - CGM.getModule().addTypeName("struct._category_t", CategorynfABITy); + CategorynfABITy = llvm::StructType::createNamed("struct._category_t", + Int8PtrTy, + ClassnfABIPtrTy, + MethodListnfABIPtrTy, + MethodListnfABIPtrTy, + ProtocolListnfABIPtrTy, + PropertyListPtrTy, + NULL); // New types for nonfragile abi messaging. CodeGen::CodeGenTypes &Types = CGM.getTypes(); @@ -4591,8 +4570,9 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul // SUPER_IMP messenger; // SEL name; // }; - SuperMessageRefTy = llvm::StructType::get(ImpnfABITy, SelectorPtrTy, NULL); - CGM.getModule().addTypeName("struct._super_message_ref_t", SuperMessageRefTy); + SuperMessageRefTy = + llvm::StructType::createNamed("struct._super_message_ref_t", + ImpnfABITy, SelectorPtrTy, NULL); // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t* SuperMessageRefPtrTy = llvm::PointerType::getUnqual(SuperMessageRefTy); @@ -4603,11 +4583,12 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul // const char* name; // c++ typeinfo string // Class cls; // }; - EHTypeTy = llvm::StructType::get(llvm::PointerType::getUnqual(Int8PtrTy), - Int8PtrTy, - ClassnfABIPtrTy, - NULL); - CGM.getModule().addTypeName("struct._objc_typeinfo", EHTypeTy); + EHTypeTy = + llvm::StructType::createNamed("struct._objc_typeinfo", + llvm::PointerType::getUnqual(Int8PtrTy), + Int8PtrTy, + ClassnfABIPtrTy, + NULL); EHTypePtrTy = llvm::PointerType::getUnqual(EHTypeTy); } diff --git a/lib/CodeGen/CGRecordLayout.h b/lib/CodeGen/CGRecordLayout.h index 6d9fc0589e..8a450298f7 100644 --- a/lib/CodeGen/CGRecordLayout.h +++ b/lib/CodeGen/CGRecordLayout.h @@ -176,11 +176,11 @@ class CGRecordLayout { private: /// The LLVM type corresponding to this record layout; used when /// laying it out as a complete object. - llvm::PATypeHolder CompleteObjectType; + llvm::StructType *CompleteObjectType; /// The LLVM type for the non-virtual part of this record layout; /// used when laying it out as a base subobject. - llvm::PATypeHolder BaseSubobjectType; + llvm::StructType *BaseSubobjectType; /// Map from (non-bit-field) struct field to the corresponding llvm struct /// type field no. This info is populated by record builder. @@ -208,8 +208,8 @@ private: bool IsZeroInitializableAsBase : 1; public: - CGRecordLayout(const llvm::StructType *CompleteObjectType, - const llvm::StructType *BaseSubobjectType, + CGRecordLayout(llvm::StructType *CompleteObjectType, + llvm::StructType *BaseSubobjectType, bool IsZeroInitializable, bool IsZeroInitializableAsBase) : CompleteObjectType(CompleteObjectType), @@ -219,14 +219,14 @@ public: /// \brief Return the "complete object" LLVM type associated with /// this record. - const llvm::StructType *getLLVMType() const { - return cast<llvm::StructType>(CompleteObjectType.get()); + llvm::StructType *getLLVMType() const { + return CompleteObjectType; } /// \brief Return the "base subobject" LLVM type associated with /// this record. - const llvm::StructType *getBaseSubobjectLLVMType() const { - return cast<llvm::StructType>(BaseSubobjectType.get()); + llvm::StructType *getBaseSubobjectLLVMType() const { + return BaseSubobjectType; } /// \brief Check whether this struct can be C++ zero-initialized diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp index 6daf2ea371..2b07bafa00 100644 --- a/lib/CodeGen/CGRecordLayoutBuilder.cpp +++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp @@ -35,7 +35,7 @@ class CGRecordLayoutBuilder { public: /// FieldTypes - Holds the LLVM types that the struct is created from. /// - llvm::SmallVector<const llvm::Type *, 16> FieldTypes; + llvm::SmallVector<llvm::Type *, 16> FieldTypes; /// BaseSubobjectType - Holds the LLVM type for the non-virtual part /// of the struct. For example, consider: @@ -52,7 +52,7 @@ public: /// /// This only gets initialized if the base subobject type is /// different from the complete-object type. - const llvm::StructType *BaseSubobjectType; + llvm::StructType *BaseSubobjectType; /// FieldInfo - Holds a field and its corresponding LLVM field number. llvm::DenseMap<const FieldDecl *, unsigned> Fields; @@ -109,8 +109,8 @@ private: /// LayoutUnionField - Will layout a field in an union and return the type /// that the field will have. - const llvm::Type *LayoutUnionField(const FieldDecl *Field, - const ASTRecordLayout &Layout); + llvm::Type *LayoutUnionField(const FieldDecl *Field, + const ASTRecordLayout &Layout); /// LayoutUnion - Will layout a union RecordDecl. void LayoutUnion(const RecordDecl *D); @@ -151,7 +151,7 @@ private: void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset); /// AppendField - Appends a field with the given offset and type. - void AppendField(CharUnits fieldOffset, const llvm::Type *FieldTy); + void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy); /// AppendPadding - Appends enough padding bytes so that the total /// struct size is a multiple of the field alignment. @@ -165,7 +165,7 @@ private: /// getByteArrayType - Returns a byte array type with the given number of /// elements. - const llvm::Type *getByteArrayType(CharUnits NumBytes); + llvm::Type *getByteArrayType(CharUnits NumBytes); /// AppendBytes - Append a given number of bytes to the record. void AppendBytes(CharUnits numBytes); @@ -230,7 +230,7 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types, uint64_t FieldSize, uint64_t ContainingTypeSizeInBits, unsigned ContainingTypeAlign) { - const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType()); + const llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType()); CharUnits TypeSizeInBytes = CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty)); uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes); @@ -440,7 +440,7 @@ bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D, CharUnits fieldOffsetInBytes = Types.getContext().toCharUnitsFromBits(fieldOffset); - const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType()); + llvm::Type *Ty = Types.ConvertTypeForMem(D->getType()); CharUnits typeAlignment = getTypeAlignment(Ty); // If the type alignment is larger then the struct alignment, we must use @@ -488,7 +488,7 @@ bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D, return true; } -const llvm::Type * +llvm::Type * CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field, const ASTRecordLayout &Layout) { if (Field->isBitField()) { @@ -499,7 +499,7 @@ CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field, if (FieldSize == 0) return 0; - const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext()); + llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext()); CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits( llvm::RoundUpToAlignment(FieldSize, Types.getContext().Target.getCharAlign())); @@ -515,7 +515,7 @@ CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field, // This is a regular union field. Fields[Field] = 0; - return Types.ConvertTypeForMemRecursive(Field->getType()); + return Types.ConvertTypeForMem(Field->getType()); } void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) { @@ -523,7 +523,7 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) { const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D); - const llvm::Type *unionType = 0; + llvm::Type *unionType = 0; CharUnits unionSize = CharUnits::Zero(); CharUnits unionAlign = CharUnits::Zero(); @@ -534,7 +534,7 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) { fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) { assert(layout.getFieldOffset(fieldNo) == 0 && "Union field offset did not start at the beginning of record!"); - const llvm::Type *fieldType = LayoutUnionField(*field, layout); + llvm::Type *fieldType = LayoutUnionField(*field, layout); if (!fieldType) continue; @@ -599,10 +599,8 @@ void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base, // approximation, which is to use the base subobject type if it // has the same LLVM storage size as the nvsize. - const llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType(); + llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType(); AppendField(baseOffset, subobjectType); - - Types.addBaseSubobjectTypeName(base, baseLayout); } void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base, @@ -736,13 +734,14 @@ CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) { FieldTypes.push_back(getByteArrayType(NumBytes)); } - BaseSubobjectType = llvm::StructType::get(Types.getLLVMContext(), - FieldTypes, Packed); + + BaseSubobjectType = llvm::StructType::createNamed(Types.getLLVMContext(), "", + FieldTypes, Packed); + Types.addRecordTypeName(RD, BaseSubobjectType, ".base"); - if (needsPadding) { - // Pull the padding back off. + // Pull the padding back off. + if (needsPadding) FieldTypes.pop_back(); - } return true; } @@ -819,7 +818,7 @@ void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) { } void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset, - const llvm::Type *fieldType) { + llvm::Type *fieldType) { CharUnits fieldSize = CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType)); @@ -865,10 +864,10 @@ bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) { return true; } -const llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) { +llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) { assert(!numBytes.isZero() && "Empty byte arrays aren't allowed."); - const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext()); + llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext()); if (numBytes > CharUnits::One()) Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity()); @@ -924,17 +923,16 @@ void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) { } } -CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) { +CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, + llvm::StructType *Ty) { CGRecordLayoutBuilder Builder(*this); Builder.Layout(D); - const llvm::StructType *Ty = llvm::StructType::get(getLLVMContext(), - Builder.FieldTypes, - Builder.Packed); + Ty->setBody(Builder.FieldTypes, Builder.Packed); // If we're in C++, compute the base subobject type. - const llvm::StructType *BaseTy = 0; + llvm::StructType *BaseTy = 0; if (isa<CXXRecordDecl>(D)) { BaseTy = Builder.BaseSubobjectType; if (!BaseTy) BaseTy = Ty; diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp index 5d5ab2f97a..2097947537 100644 --- a/lib/CodeGen/CGStmt.cpp +++ b/lib/CodeGen/CGStmt.cpp @@ -1410,13 +1410,13 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { std::vector<QualType> ResultRegQualTys; std::vector<const llvm::Type *> ResultRegTypes; std::vector<const llvm::Type *> ResultTruncRegTypes; - std::vector<const llvm::Type*> ArgTypes; + std::vector<llvm::Type*> ArgTypes; std::vector<llvm::Value*> Args; // Keep track of inout constraints. std::string InOutConstraints; std::vector<llvm::Value*> InOutArgs; - std::vector<const llvm::Type*> InOutArgTypes; + std::vector<llvm::Type*> InOutArgTypes; for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i]; diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp index 596358bee5..3d5d0858db 100644 --- a/lib/CodeGen/CodeGenFunction.cpp +++ b/lib/CodeGen/CodeGenFunction.cpp @@ -45,11 +45,11 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm) } -const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { +llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { return CGM.getTypes().ConvertTypeForMem(T); } -const llvm::Type *CodeGenFunction::ConvertType(QualType T) { +llvm::Type *CodeGenFunction::ConvertType(QualType T) { return CGM.getTypes().ConvertType(T); } @@ -213,8 +213,8 @@ bool CodeGenFunction::ShouldInstrumentFunction() { /// function instrumentation is enabled. void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) { // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site); - const llvm::PointerType *PointerTy = Int8PtrTy; - const llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy }; + llvm::PointerType *PointerTy = Int8PtrTy; + llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy }; const llvm::FunctionType *FunctionTy = llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), ProfileFuncArgs, false); diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h index fa12f56374..7f2d8b9ed0 100644 --- a/lib/CodeGen/CodeGenFunction.h +++ b/lib/CodeGen/CodeGenFunction.h @@ -1346,9 +1346,9 @@ public: /// a terminate scope encloses a try. llvm::BasicBlock *getTerminateHandler(); - const llvm::Type *ConvertTypeForMem(QualType T); - const llvm::Type *ConvertType(QualType T); - const llvm::Type *ConvertType(const TypeDecl *T) { + llvm::Type *ConvertTypeForMem(QualType T); + llvm::Type *ConvertType(QualType T); + llvm::Type *ConvertType(const TypeDecl *T) { return ConvertType(getContext().getTypeDeclType(T)); } diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp index ddef39726f..95d7be5392 100644 --- a/lib/CodeGen/CodeGenModule.cpp +++ b/lib/CodeGen/CodeGenModule.cpp @@ -857,8 +857,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName, return Entry; // Make sure the result is of the correct type. - const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty); - return llvm::ConstantExpr::getBitCast(Entry, PTy); + return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo()); } // This function doesn't have a complete type (for example, the return @@ -928,7 +927,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName, return F; } - const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty); + llvm::Type *PTy = llvm::PointerType::getUnqual(Ty); return llvm::ConstantExpr::getBitCast(F, PTy); } @@ -1442,7 +1441,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) { bool variadic = false; if (const FunctionProtoType *fpt = D->getType()->getAs<FunctionProtoType>()) variadic = fpt->isVariadic(); - const llvm::FunctionType *Ty = getTypes().GetFunctionType(FI, variadic, false); + const llvm::FunctionType *Ty = getTypes().GetFunctionType(FI, variadic); // Get or create the prototype for the function. llvm::Constant *Entry = GetAddrOfFunction(GD, Ty); @@ -1613,10 +1612,12 @@ llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD, return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false); } -llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,const llvm::Type **Tys, +llvm::Function *CodeGenModule::getIntrinsic(unsigned IID, llvm::Type **Tys, unsigned NumTys) { return llvm::Intrinsic::getDeclaration(&getModule(), - (llvm::Intrinsic::ID)IID, Tys, NumTys); + (llvm::Intrinsic::ID)IID, + const_cast<const llvm::Type **>(Tys), + NumTys); } static llvm::StringMapEntry<llvm::Constant*> & diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h index 3cca76a171..f59322dc26 100644 --- a/lib/CodeGen/CodeGenModule.h +++ b/lib/CodeGen/CodeGenModule.h @@ -99,31 +99,31 @@ namespace CodeGen { struct CodeGenTypeCache { /// void - const llvm::Type *VoidTy; + llvm::Type *VoidTy; /// i8, i32, and i64 - const llvm::IntegerType *Int8Ty, *Int32Ty, *Int64Ty; + llvm::IntegerType *Int8Ty, *Int32Ty, *Int64Ty; /// int - const llvm::IntegerType *IntTy; + llvm::IntegerType *IntTy; /// intptr_t, size_t, and ptrdiff_t, which we assume are the same size. union { - const llvm::IntegerType *IntPtrTy; - const llvm::IntegerType *SizeTy; - const llvm::IntegerType *PtrDiffTy; + llvm::IntegerType *IntPtrTy; + llvm::IntegerType *SizeTy; + llvm::IntegerType *PtrDiffTy; }; /// void* in address space 0 union { - const llvm::PointerType *VoidPtrTy; - const llvm::PointerType *Int8PtrTy; + llvm::PointerType *VoidPtrTy; + llvm::PointerType *Int8PtrTy; }; /// void** in address space 0 union { - const llvm::PointerType *VoidPtrPtrTy; - const llvm::PointerType *Int8PtrPtrTy; + llvm::PointerType *VoidPtrPtrTy; + llvm::PointerType *Int8PtrPtrTy; }; /// The width of a pointer into the generic address space. @@ -312,8 +312,8 @@ class CodeGenModule : public CodeGenTypeCache { llvm::Constant *BlockObjectAssign; llvm::Constant *BlockObjectDispose; - const llvm::Type *BlockDescriptorType; - const llvm::Type *GenericBlockLiteralType; + llvm::Type *BlockDescriptorType; + llvm::Type *GenericBlockLiteralType; struct { int GlobalUniqueCount; @@ -503,10 +503,10 @@ public: /// getBlockDescriptorType - Fetches the type of a generic block /// descriptor. - const llvm::Type *getBlockDescriptorType(); + llvm::Type *getBlockDescriptorType(); /// getGenericBlockLiteralType - The type of a generic block literal. - const llvm::Type *getGenericBlockLiteralType(); + llvm::Type *getGenericBlockLiteralType(); /// GetAddrOfGlobalBlock - Gets the address of a block which /// requires no captures. @@ -573,7 +573,7 @@ public: llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD, unsigned BuiltinID); - llvm::Function *getIntrinsic(unsigned IID, const llvm::Type **Tys = 0, + llvm::Function *getIntrinsic(unsigned IID, llvm::Type **Tys = 0, unsigned NumTys = 0); /// EmitTopLevelDecl - Emit code for a single top level declaration. diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp index 7826e6e780..e0baf800fb 100644 --- a/lib/CodeGen/CodeGenTypes.cpp +++ b/lib/CodeGen/CodeGenTypes.cpp @@ -44,28 +44,8 @@ CodeGenTypes::~CodeGenTypes() { delete &*I++; } -/// HandleLateResolvedPointers - For top-level ConvertType calls, this handles -/// pointers that are referenced but have not been converted yet. This is used -/// to handle cyclic structures properly. -void CodeGenTypes::HandleLateResolvedPointers() { - assert(!PointersToResolve.empty() && "No pointers to resolve!"); - - // Any pointers that were converted deferred evaluation of their pointee type, - // creating an opaque type instead. This is in order to avoid problems with - // circular types. Loop through all these defered pointees, if any, and - // resolve them now. - while (!PointersToResolve.empty()) { - std::pair<QualType, llvm::OpaqueType*> P = PointersToResolve.pop_back_val(); - - // We can handle bare pointers here because we know that the only pointers - // to the Opaque type are P.second and from other types. Refining the - // opqaue type away will invalidate P.second, but we don't mind :). - const llvm::Type *NT = ConvertTypeForMemRecursive(P.first); - P.second->refineAbstractTypeTo(NT); - } -} - -void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, const llvm::Type *Ty, +void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, + llvm::StructType *Ty, llvm::StringRef suffix) { llvm::SmallString<256> TypeName; llvm::raw_svector_ostream OS(TypeName); @@ -93,47 +73,15 @@ void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, const llvm::Type *Ty, if (!suffix.empty()) OS << suffix; - TheModule.addTypeName(OS.str(), Ty); -} - -/// ConvertType - Convert the specified type to its LLVM form. -const llvm::Type *CodeGenTypes::ConvertType(QualType T, bool IsRecursive) { - const llvm::Type *Result = ConvertTypeRecursive(T); - - // If this is a top-level call to ConvertType and sub-conversions caused - // pointers to get lazily built as opaque types, resolve the pointers, which - // might cause Result to be merged away. - if (!IsRecursive && !PointersToResolve.empty()) { - llvm::PATypeHolder ResultHandle = Result; - HandleLateResolvedPointers(); - Result = ResultHandle; - } - return Result; -} - -const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) { - T = Context.getCanonicalType(T); - - // See if type is already cached. - llvm::DenseMap<const Type *, llvm::PATypeHolder>::iterator - I = TypeCache.find(T.getTypePtr()); - // If type is found in map and this is not a definition for a opaque - // place holder type then use it. Otherwise, convert type T. - if (I != TypeCache.end()) - return I->second.get(); - - const llvm::Type *ResultType = ConvertNewType(T); - TypeCache.insert(std::make_pair(T.getTypePtr(), - llvm::PATypeHolder(ResultType))); - return ResultType; + Ty->setName(OS.str()); } /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from /// ConvertType in that it is used to convert to the memory representation for /// a type. For example, the scalar representation for _Bool is i1, but the /// memory representation is usually i8 or i32, depending on the target. -const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool IsRecursive){ - const llvm::Type *R = ConvertType(T, IsRecursive); +llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T){ + llvm::Type *R = ConvertType(T); // If this is a non-bool type, don't map it. if (!R->isIntegerTy(1)) @@ -153,8 +101,8 @@ const TagType *CodeGenTypes::VerifyFuncTypeComplete(const Type* T) { if (!TT->getDecl()->isDefinition()) return TT; if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(T)) - for (unsigned i = 0; i < FPT->getNumArgs(); i++) - if (const TagType* TT = FPT->getArgType(i)->getAs<TagType>()) + for (unsigned i = 0, e = FPT->getNumArgs(); i != e; i++) + if (const TagType *TT = FPT->getArgType(i)->getAs<TagType>()) if (!TT->getDecl()->isDefinition()) return TT; return 0; @@ -163,48 +111,21 @@ const TagType *CodeGenTypes::VerifyFuncTypeComplete(const Type* T) { /// UpdateCompletedType - When we find the full definition for a TagDecl, /// replace the 'opaque' type we previously made for it if applicable. void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { - const Type *Key = Context.getTagDeclType(TD).getTypePtr(); - llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI = - TagDeclTypes.find(Key); - if (TDTI == TagDeclTypes.end()) return; - - // Remember the opaque LLVM type for this tagdecl. - llvm::PATypeHolder OpaqueHolder = TDTI->second; - assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) && - "Updating compilation of an already non-opaque type?"); - - // Remove it from TagDeclTypes so that it will be regenerated. - TagDeclTypes.erase(TDTI); - - // Generate the new type. - const llvm::Type *NT = ConvertTagDeclType(TD); - - // Refine the old opaque type to its new definition. - cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NT); - - // Since we just completed a tag type, check to see if any function types - // were completed along with the tag type. - // FIXME: This is very inefficient; if we track which function types depend - // on which tag types, though, it should be reasonably efficient. - llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator i; - for (i = FunctionTypes.begin(); i != FunctionTypes.end(); ++i) { - if (const TagType* TT = VerifyFuncTypeComplete(i->first)) { - // This function type still depends on an incomplete tag type; make sure - // that tag type has an associated opaque type. - ConvertTagDeclType(TT->getDecl()); - } else { - // This function no longer depends on an incomplete tag type; create the - // function type, and refine the opaque type to the new function type. - llvm::PATypeHolder OpaqueHolder = i->second; - const llvm::Type *NFT = ConvertNewType(QualType(i->first, 0)); - cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NFT); - FunctionTypes.erase(i); - } + // If this is an enum being completed, then we flush all non-struct types from + // the cache. This allows function types and other things that may be derived + // from the enum to be recomputed. + if (isa<EnumDecl>(TD)) { + TypeCache.clear(); + return; } + + const RecordDecl *RD = cast<RecordDecl>(TD); + if (!RD->isDependentType()) + ConvertRecordDeclType(RD); } -static const llvm::Type* getTypeForFormat(llvm::LLVMContext &VMContext, - const llvm::fltSemantics &format) { +static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext, + const llvm::fltSemantics &format) { if (&format == &llvm::APFloat::IEEEsingle) return llvm::Type::getFloatTy(VMContext); if (&format == &llvm::APFloat::IEEEdouble) @@ -219,10 +140,26 @@ static const llvm::Type* getTypeForFormat(llvm::LLVMContext &VMContext, return 0; } -const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { - const clang::Type &Ty = *Context.getCanonicalType(T).getTypePtr(); +/// ConvertType - Convert the specified type to its LLVM form. +llvm::Type *CodeGenTypes::ConvertType(QualType T) { + T = Context.getCanonicalType(T); + + const Type *Ty = T.getTypePtr(); - switch (Ty.getTypeClass()) { + // RecordTypes are cached and processed specially. + if (const RecordType *RT = dyn_cast<RecordType>(Ty)) + return ConvertRecordDeclType(RT->getDecl()); + + // See if type is already cached. + llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI = TypeCache.find(Ty); + // If type is found in map then use it. Otherwise, convert type T. + if (TCI != TypeCache.end()) + return TCI->second; + + // If we don't have it in the cache, convert it now. + llvm::Type *ResultType = 0; + switch (Ty->getTypeClass()) { + case Type::Record: // Handled above. #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: @@ -233,18 +170,20 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { break; case Type::Builtin: { - switch (cast<BuiltinType>(Ty).getKind()) { + switch (cast<BuiltinType>(Ty)->getKind()) { case BuiltinType::Void: case BuiltinType::ObjCId: case BuiltinType::ObjCClass: case BuiltinType::ObjCSel: // LLVM void type can only be used as the result of a function call. Just // map to the same as char. - return llvm::Type::getInt8Ty(getLLVMContext()); + ResultType = llvm::Type::getInt8Ty(getLLVMContext()); + break; case BuiltinType::Bool: // Note that we always return bool as i1 for use as a scalar type. - return llvm::Type::getInt1Ty(getLLVMContext()); + ResultType = llvm::Type::getInt1Ty(getLLVMContext()); + break; case BuiltinType::Char_S: case BuiltinType::Char_U: @@ -262,24 +201,26 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { case BuiltinType::WChar_U: case BuiltinType::Char16: case BuiltinType::Char32: - return llvm::IntegerType::get(getLLVMContext(), - static_cast<unsigned>(Context.getTypeSize(T))); + ResultType = llvm::IntegerType::get(getLLVMContext(), + static_cast<unsigned>(Context.getTypeSize(T))); + break; case BuiltinType::Float: case BuiltinType::Double: case BuiltinType::LongDouble: - return getTypeForFormat(getLLVMContext(), - Context.getFloatTypeSemantics(T)); + ResultType = getTypeForFormat(getLLVMContext(), + Context.getFloatTypeSemantics(T)); + break; - case BuiltinType::NullPtr: { + case BuiltinType::NullPtr: // Model std::nullptr_t as i8* - const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext()); - return llvm::PointerType::getUnqual(Ty); - } + ResultType = llvm::Type::getInt8PtrTy(getLLVMContext()); + break; case BuiltinType::UInt128: case BuiltinType::Int128: - return llvm::IntegerType::get(getLLVMContext(), 128); + ResultType = llvm::IntegerType::get(getLLVMContext(), 128); + break; case BuiltinType::Overload: case BuiltinType::Dependent: @@ -288,211 +229,210 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { llvm_unreachable("Unexpected placeholder builtin type!"); break; } - llvm_unreachable("Unknown builtin type!"); break; } case Type::Complex: { const llvm::Type *EltTy = - ConvertTypeRecursive(cast<ComplexType>(Ty).getElementType()); - return llvm::StructType::get(EltTy, EltTy, NULL); + ConvertType(cast<ComplexType>(Ty)->getElementType()); + ResultType = llvm::StructType::get(EltTy, EltTy, NULL); + break; } case Type::LValueReference: case Type::RValueReference: { - const ReferenceType &RTy = cast<ReferenceType>(Ty); - QualType ETy = RTy.getPointeeType(); - llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext()); - PointersToResolve.push_back(std::make_pair(ETy, PointeeType)); + RecursionStatePointerRAII X(RecursionState); + const ReferenceType *RTy = cast<ReferenceType>(Ty); + QualType ETy = RTy->getPointeeType(); + llvm::Type *PointeeType = ConvertTypeForMem(ETy); unsigned AS = Context.getTargetAddressSpace(ETy); - return llvm::PointerType::get(PointeeType, AS); + ResultType = llvm::PointerType::get(PointeeType, AS); + break; } case Type::Pointer: { - const PointerType &PTy = cast<PointerType>(Ty); - QualType ETy = PTy.getPointeeType(); - llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext()); - PointersToResolve.push_back(std::make_pair(ETy, PointeeType)); + RecursionStatePointerRAII X(RecursionState); + const PointerType *PTy = cast<PointerType>(Ty); + QualType ETy = PTy->getPointeeType(); + llvm::Type *PointeeType = ConvertTypeForMem(ETy); + if (PointeeType->isVoidTy()) + PointeeType = llvm::Type::getInt8Ty(getLLVMContext()); unsigned AS = Context.getTargetAddressSpace(ETy); - return llvm::PointerType::get(PointeeType, AS); + ResultType = llvm::PointerType::get(PointeeType, AS); + break; } case Type::VariableArray: { - const VariableArrayType &A = cast<VariableArrayType>(Ty); - assert(A.getIndexTypeCVRQualifiers() == 0 && + const VariableArrayType *A = cast<VariableArrayType>(Ty); + assert(A->getIndexTypeCVRQualifiers() == 0 && "FIXME: We only handle trivial array types so far!"); // VLAs resolve to the innermost element type; this matches // the return of alloca, and there isn't any obviously better choice. - return ConvertTypeForMemRecursive(A.getElementType()); + ResultType = ConvertTypeForMem(A->getElementType()); + break; } case Type::IncompleteArray: { - const IncompleteArrayType &A = cast<IncompleteArrayType>(Ty); - assert(A.getIndexTypeCVRQualifiers() == 0 && + const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty); + assert(A->getIndexTypeCVRQualifiers() == 0 && "FIXME: We only handle trivial array types so far!"); // int X[] -> [0 x int] - return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()), - 0); + ResultType = llvm::ArrayType::get(ConvertTypeForMem(A->getElementType()),0); + break; } case Type::ConstantArray: { - const ConstantArrayType &A = cast<ConstantArrayType>(Ty); - const llvm::Type *EltTy = ConvertTypeForMemRecursive(A.getElementType()); - return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue()); + const ConstantArrayType *A = cast<ConstantArrayType>(Ty); + const llvm::Type *EltTy = ConvertTypeForMem(A->getElementType()); + ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue()); + break; } case Type::ExtVector: case Type::Vector: { - const VectorType &VT = cast<VectorType>(Ty); - return llvm::VectorType::get(ConvertTypeRecursive(VT.getElementType()), - VT.getNumElements()); + const VectorType *VT = cast<VectorType>(Ty); + ResultType = llvm::VectorType::get(ConvertType(VT->getElementType()), + VT->getNumElements()); + break; } case Type::FunctionNoProto: case Type::FunctionProto: { // First, check whether we can build the full function type. If the // function type depends on an incomplete type (e.g. a struct or enum), we - // cannot lower the function type. Instead, turn it into an Opaque pointer - // and have UpdateCompletedType revisit the function type when/if the opaque - // argument type is defined. - if (const TagType *TT = VerifyFuncTypeComplete(&Ty)) { - // This function's type depends on an incomplete tag type; make sure - // we have an opaque type corresponding to the tag type. - ConvertTagDeclType(TT->getDecl()); - // Create an opaque type for this function type, save it, and return it. - llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext()); - FunctionTypes.insert(std::make_pair(&Ty, ResultType)); - return ResultType; + // cannot lower the function type. + if (VerifyFuncTypeComplete(Ty)) { + // This function's type depends on an incomplete tag type. + // Return a placeholder type. + ResultType = llvm::StructType::get(getLLVMContext()); + break; } - + // The function type can be built; call the appropriate routines to // build it. const CGFunctionInfo *FI; bool isVariadic; - if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty)) { + if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Ty)) { FI = &getFunctionInfo( - CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)), - true /*Recursive*/); + CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0))); isVariadic = FPT->isVariadic(); } else { - const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty); + const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(Ty); FI = &getFunctionInfo( - CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)), - true /*Recursive*/); + CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0))); isVariadic = true; } - return GetFunctionType(*FI, isVariadic, true); + ResultType = GetFunctionType(*FI, isVariadic); + break; } case Type::ObjCObject: - return ConvertTypeRecursive(cast<ObjCObjectType>(Ty).getBaseType()); + ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType()); + break; case Type::ObjCInterface: { // Objective-C interfaces are always opaque (outside of the // runtime, which can do whatever it likes); we never refine // these. - const llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(&Ty)]; + llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)]; if (!T) - T = llvm::OpaqueType::get(getLLVMContext()); - return T; + T = llvm::StructType::createNamed(getLLVMContext(), ""); + ResultType = T; + break; } case Type::ObjCObjectPointer: { + RecursionStatePointerRAII X(RecursionState); // Protocol qualifications do not influence the LLVM type, we just return a // pointer to the underlying interface type. We don't need to worry about // recursive conversion. const llvm::Type *T = - ConvertTypeRecursive(cast<ObjCObjectPointerType>(Ty).getPointeeType()); - return llvm::PointerType::getUnqual(T); + ConvertType(cast<ObjCObjectPointerType>(Ty)->getPointeeType()); + ResultType = T->getPointerTo(); + break; } - case Type::Record: - case Type::Enum: { - const TagDecl *TD = cast<TagType>(Ty).getDecl(); - const llvm::Type *Res = ConvertTagDeclType(TD); - - if (const RecordDecl *RD = dyn_cast<RecordDecl>(TD)) - addRecordTypeName(RD, Res, llvm::StringRef()); - return Res; + case Type::Enum: { + const EnumDecl *ED = cast<EnumType>(Ty)->getDecl(); + if (ED->isDefinition() || ED->isFixed()) + return ConvertType(ED->getIntegerType()); + // Return a placeholder '{}' type. + ResultType = llvm::StructType::get(getLLVMContext()); + break; } case Type::BlockPointer: { - const QualType FTy = cast<BlockPointerType>(Ty).getPointeeType(); - llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext()); - PointersToResolve.push_back(std::make_pair(FTy, PointeeType)); + RecursionStatePointerRAII X(RecursionState); + const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType(); + llvm::Type *PointeeType = ConvertTypeForMem(FTy); unsigned AS = Context.getTargetAddressSpace(FTy); - return llvm::PointerType::get(PointeeType, AS); + ResultType = llvm::PointerType::get(PointeeType, AS); + break; } case Type::MemberPointer: { - return getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(&Ty)); + ResultType = + getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(Ty)); + break; } } - - // FIXME: implement. - return llvm::OpaqueType::get(getLLVMContext()); + + assert(ResultType && "Didn't convert a type?"); + + TypeCache[Ty] = ResultType; + return ResultType; } -/// ConvertTagDeclType - Lay out a tagged decl type like struct or union or -/// enum. -const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) { +/// ConvertRecordDeclType - Lay out a tagged decl type like struct or union. +llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) { // TagDecl's are not necessarily unique, instead use the (clang) // type connected to the decl. - const Type *Key = - Context.getTagDeclType(TD).getTypePtr(); - llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI = - TagDeclTypes.find(Key); - - // If we've already compiled this tag type, use the previous definition. - if (TDTI != TagDeclTypes.end()) - return TDTI->second; - - const EnumDecl *ED = dyn_cast<EnumDecl>(TD); - - // If this is still a forward declaration, just define an opaque - // type to use for this tagged decl. - // C++0x: If this is a enumeration type with fixed underlying type, - // consider it complete. - if (!TD->isDefinition() && !(ED && ED->isFixed())) { - llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext()); - TagDeclTypes.insert(std::make_pair(Key, ResultType)); - return ResultType; - } - - // Okay, this is a definition of a type. Compile the implementation now. + const Type *Key = Context.getTagDeclType(RD).getTypePtr(); - if (ED) // Don't bother storing enums in TagDeclTypes. - return ConvertTypeRecursive(ED->getIntegerType()); + llvm::StructType *&Entry = RecordDeclTypes[Key]; - // This decl could well be recursive. In this case, insert an opaque - // definition of this type, which the recursive uses will get. We will then - // refine this opaque version later. + // If we don't have a StructType at all yet, create the forward declaration. + if (Entry == 0) + Entry = llvm::StructType::createNamed(getLLVMContext(), + std::string(RD->getKindName()) + "." + + RD->getQualifiedNameAsString()); + llvm::StructType *Ty = Entry; - // Create new OpaqueType now for later use in case this is a recursive - // type. This will later be refined to the actual type. - llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get(getLLVMContext()); - TagDeclTypes.insert(std::make_pair(Key, ResultHolder)); + // If this is still a forward declaration, or the LLVM type is already + // complete, there's nothing more to do. + if (!RD->isDefinition() || !Ty->isOpaque()) + return Ty; + + // If we're recursively nested inside the conversion of a pointer inside the + // struct, defer conversion. + if (RecursionState == RS_StructPointer) { + DeferredRecords.push_back(RD); + return Ty; + } - const RecordDecl *RD = cast<const RecordDecl>(TD); + // Okay, this is a definition of a type. Compile the implementation now. + RecursionStateTy SavedRecursionState = RecursionState; + RecursionState = RS_Struct; // Force conversion of non-virtual base classes recursively. - if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(TD)) { - for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(), - e = RD->bases_end(); i != e; ++i) { + if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { + for (CXXRecordDecl::base_class_const_iterator i = CRD->bases_begin(), + e = CRD->bases_end(); i != e; ++i) { if (!i->isVirtual()) { const CXXRecordDecl *Base = cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); - ConvertTagDeclType(Base); + ConvertRecordDeclType(Base); } } } // Layout fields. - CGRecordLayout *Layout = ComputeRecordLayout(RD); - + CGRecordLayout *Layout = ComputeRecordLayout(RD, Ty); CGRecordLayouts[Key] = Layout; - const llvm::Type *ResultType = Layout->getLLVMType(); - - // Refine our Opaque type to ResultType. This can invalidate ResultType, so - // make sure to read the result out of the holder. - cast<llvm::OpaqueType>(ResultHolder.get()) - ->refineAbstractTypeTo(ResultType); - return ResultHolder.get(); + + // Restore our recursion state. If we're done converting the outer-most + // record, then convert any deferred structs as well. + RecursionState = SavedRecursionState; + if (RecursionState == RS_Normal) + while (!DeferredRecords.empty()) + ConvertRecordDeclType(DeferredRecords.pop_back_val()); + + return Ty; } /// getCGRecordLayout - Return record layout info for the given record decl. @@ -503,7 +443,7 @@ CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) { const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key); if (!Layout) { // Compute the type information. - ConvertTagDeclType(RD); + ConvertRecordDeclType(RD); // Now try again. Layout = CGRecordLayouts.lookup(Key); @@ -513,15 +453,6 @@ CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) { return *Layout; } -void CodeGenTypes::addBaseSubobjectTypeName(const CXXRecordDecl *RD, - const CGRecordLayout &layout) { - llvm::StringRef suffix; - if (layout.getBaseSubobjectLLVMType() != layout.getLLVMType()) - suffix = ".base"; - - addRecordTypeName(RD, layout.getBaseSubobjectLLVMType(), suffix); -} - bool CodeGenTypes::isZeroInitializable(QualType T) { // No need to check for member pointers when not compiling C++. if (!Context.getLangOptions().CPlusPlus) diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h index 4a84dff620..755f87bdb4 100644 --- a/lib/CodeGen/CodeGenTypes.h +++ b/lib/CodeGen/CodeGenTypes.h @@ -23,11 +23,10 @@ namespace llvm { class FunctionType; class Module; - class OpaqueType; - class PATypeHolder; class TargetData; class Type; class LLVMContext; + class StructType; } namespace clang { @@ -59,54 +58,55 @@ namespace CodeGen { class CodeGenTypes { ASTContext &Context; const TargetInfo &Target; - llvm::Module& TheModule; - const llvm::TargetData& TheTargetData; - const ABIInfo& TheABIInfo; + llvm::Module &TheModule; + const llvm::TargetData &TheTargetData; + const ABIInfo &TheABIInfo; CGCXXABI &TheCXXABI; const CodeGenOptions &CodeGenOpts; - llvm::SmallVector<std::pair<QualType, - llvm::OpaqueType *>, 8> PointersToResolve; - - llvm::DenseMap<const Type*, llvm::PATypeHolder> TagDeclTypes; - - llvm::DenseMap<const Type*, llvm::PATypeHolder> FunctionTypes; - /// The opaque type map for Objective-C interfaces. All direct /// manipulation is done by the runtime interfaces, which are /// responsible for coercing to the appropriate type; these opaque /// types are never refined. - llvm::DenseMap<const ObjCInterfaceType*, const llvm::Type *> InterfaceTypes; + llvm::DenseMap<const ObjCInterfaceType*, llvm::Type *> InterfaceTypes; /// CGRecordLayouts - This maps llvm struct type with corresponding /// record layout info. llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts; + /// RecordDeclTypes - This contains the LLVM IR type for any converted + /// RecordDecl. + llvm::DenseMap<const Type*, llvm::StructType *> RecordDeclTypes; + /// FunctionInfos - Hold memoized CGFunctionInfo results. llvm::FoldingSet<CGFunctionInfo> FunctionInfos; + enum RecursionStateTy { + RS_Normal, // Normal type conversion. + RS_Struct, // Recursively inside a struct conversion. + RS_StructPointer // Recursively inside a pointer in a struct. + } RecursionState; + + llvm::SmallVector<const RecordDecl *, 8> DeferredRecords; + + struct RecursionStatePointerRAII { + RecursionStateTy &Val; + RecursionStateTy Saved; + + RecursionStatePointerRAII(RecursionStateTy &V) : Val(V), Saved(V) { + if (Val == RS_Struct) + Val = RS_StructPointer; + } + + ~RecursionStatePointerRAII() { + Val = Saved; + } + }; + private: - /// TypeCache - This map keeps cache of llvm::Types (through PATypeHolder) - /// and maps llvm::Types to corresponding clang::Type. llvm::PATypeHolder is - /// used instead of llvm::Type because it allows us to bypass potential - /// dangling type pointers due to type refinement on llvm side. - llvm::DenseMap<const Type *, llvm::PATypeHolder> TypeCache; - - /// ConvertNewType - Convert type T into a llvm::Type. Do not use this - /// method directly because it does not do any type caching. This method - /// is available only for ConvertType(). CovertType() is preferred - /// interface to convert type T into a llvm::Type. - const llvm::Type *ConvertNewType(QualType T); - - /// HandleLateResolvedPointers - For top-level ConvertType calls, this handles - /// pointers that are referenced but have not been converted yet. This is - /// used to handle cyclic structures properly. - void HandleLateResolvedPointers(); - - /// addRecordTypeName - Compute a name from the given record decl with an - /// optional suffix and name the given LLVM type using it. - void addRecordTypeName(const RecordDecl *RD, const llvm::Type *Ty, - llvm::StringRef suffix); + /// TypeCache - This map keeps cache of llvm::Types + /// and maps llvm::Types to corresponding clang::Type. + llvm::DenseMap<const Type *, llvm::Type *> TypeCache; public: CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD, @@ -123,24 +123,19 @@ public: llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); } /// ConvertType - Convert type T into a llvm::Type. - const llvm::Type *ConvertType(QualType T, bool IsRecursive = false); - const llvm::Type *ConvertTypeRecursive(QualType T); + llvm::Type *ConvertType(QualType T); /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from /// ConvertType in that it is used to convert to the memory representation for /// a type. For example, the scalar representation for _Bool is i1, but the /// memory representation is usually i8 or i32, depending on the target. - const llvm::Type *ConvertTypeForMem(QualType T, bool IsRecursive = false); - const llvm::Type *ConvertTypeForMemRecursive(QualType T) { - return ConvertTypeForMem(T, true); - } + llvm::Type *ConvertTypeForMem(QualType T); /// GetFunctionType - Get the LLVM function type for \arg Info. - const llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info, - bool IsVariadic, - bool IsRecursive = false); + llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info, + bool IsVariadic); - const llvm::FunctionType *GetFunctionType(GlobalDecl GD); + llvm::FunctionType *GetFunctionType(GlobalDecl GD); /// VerifyFuncTypeComplete - Utility to check whether a function type can /// be converted to an LLVM type (i.e. doesn't depend on an incomplete tag @@ -154,11 +149,6 @@ public: const CGRecordLayout &getCGRecordLayout(const RecordDecl*); - /// addBaseSubobjectTypeName - Add a type name for the base subobject of the - /// given record layout. - void addBaseSubobjectTypeName(const CXXRecordDecl *RD, - const CGRecordLayout &layout); - /// UpdateCompletedType - When we find the full definition for a TagDecl, /// replace the 'opaque' type we previously made for it if applicable. void UpdateCompletedType(const TagDecl *TD); @@ -184,10 +174,8 @@ public: Ty->getExtInfo()); } - const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty, - bool IsRecursive = false); - const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty, - bool IsRecursive = false); + const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty); + const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty); /// getFunctionInfo - Get the function info for a member function of /// the given type. This is used for calls through member function @@ -210,23 +198,27 @@ public: /// \param ArgTys - must all actually be canonical as params const CGFunctionInfo &getFunctionInfo(CanQualType RetTy, const llvm::SmallVectorImpl<CanQualType> &ArgTys, - const FunctionType::ExtInfo &Info, - bool IsRecursive = false); + const FunctionType::ExtInfo &Info); /// \brief Compute a new LLVM record layout object for the given record. - CGRecordLayout *ComputeRecordLayout(const RecordDecl *D); + CGRecordLayout *ComputeRecordLayout(const RecordDecl *D, + llvm::StructType *Ty); + + /// addRecordTypeName - Compute a name from the given record decl with an + /// optional suffix and name the given LLVM type using it. + void addRecordTypeName(const RecordDecl *RD, llvm::StructType *Ty, + llvm::StringRef suffix); + public: // These are internal details of CGT that shouldn't be used externally. - /// ConvertTagDeclType - Lay out a tagged decl type like struct or union or - /// enum. - const llvm::Type *ConvertTagDeclType(const TagDecl *TD); + /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union. + llvm::StructType *ConvertRecordDeclType(const RecordDecl *TD); /// GetExpandedTypes - Expand the type \arg Ty into the LLVM /// argument types it would be passed as on the provided vector \arg /// ArgTys. See ABIArgInfo::Expand. void GetExpandedTypes(QualType type, - llvm::SmallVectorImpl<const llvm::Type*> &expanded, - bool isRecursive); + llvm::SmallVectorImpl<llvm::Type*> &expanded); /// IsZeroInitializable - Return whether a type can be /// zero-initialized (in the C++ sense) with an LLVM zeroinitializer. diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp index 9edd793873..e6720c4ed8 100644 --- a/lib/CodeGen/ItaniumCXXABI.cpp +++ b/lib/CodeGen/ItaniumCXXABI.cpp @@ -34,15 +34,15 @@ using namespace CodeGen; namespace { class ItaniumCXXABI : public CodeGen::CGCXXABI { private: - const llvm::IntegerType *PtrDiffTy; + llvm::IntegerType *PtrDiffTy; protected: bool IsARM; // It's a little silly for us to cache this. - const llvm::IntegerType *getPtrDiffTy() { + llvm::IntegerType *getPtrDiffTy() { if (!PtrDiffTy) { QualType T = getContext().getPointerDiffType(); - const llvm::Type *Ty = CGM.getTypes().ConvertTypeRecursive(T); + llvm::Type *Ty = CGM.getTypes().ConvertType(T); PtrDiffTy = cast<llvm::IntegerType>(Ty); } return PtrDiffTy; @@ -58,7 +58,7 @@ public: bool isZeroInitializable(const MemberPointerType *MPT); - const llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT); + llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT); llvm::Value *EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, llvm::Value *&This, @@ -176,7 +176,7 @@ CodeGen::CGCXXABI *CodeGen::CreateARMCXXABI(CodeGenModule &CGM) { return new ARMCXXABI(CGM); } -const llvm::Type * +llvm::Type * ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) { if (MPT->isMemberDataPointer()) return getPtrDiffTy(); @@ -1034,31 +1034,34 @@ void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF, /*********************** Static local initialization **************************/ static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM, - const llvm::PointerType *GuardPtrTy) { + llvm::PointerType *GuardPtrTy) { // int __cxa_guard_acquire(__guard *guard_object); + llvm::Type *ArgTys[] = { GuardPtrTy }; const llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy), - GuardPtrTy, /*isVarArg=*/false); + ArgTys, /*isVarArg=*/false); return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire"); } static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM, - const llvm::PointerType *GuardPtrTy) { + llvm::PointerType *GuardPtrTy) { // void __cxa_guard_release(__guard *guard_object); + llvm::Type *ArgTys[] = { GuardPtrTy }; const llvm::FunctionType *FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()), - GuardPtrTy, /*isVarArg=*/false); + ArgTys, /*isVarArg=*/false); return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release"); } static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM, - const llvm::PointerType *GuardPtrTy) { + llvm::PointerType *GuardPtrTy) { // void __cxa_guard_abort(__guard *guard_object); + llvm::Type *ArgTys[] = { GuardPtrTy }; const llvm::FunctionType *FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()), - GuardPtrTy, /*isVarArg=*/false); + ArgTys, /*isVarArg=*/false); return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort"); } @@ -1098,7 +1101,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF, // Guard variables are 64 bits in the generic ABI and 32 bits on ARM. GuardTy = (IsARM ? CGF.Int32Ty : CGF.Int64Ty); } - const llvm::PointerType *GuardPtrTy = GuardTy->getPointerTo(); + llvm::PointerType *GuardPtrTy = GuardTy->getPointerTo(); // Create the guard variable. llvm::SmallString<256> GuardVName; diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp index 28afda1d66..ae7de5d19c 100644 --- a/lib/CodeGen/TargetInfo.cpp +++ b/lib/CodeGen/TargetInfo.cpp @@ -564,7 +564,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const { } else if (SeltTy->isPointerType()) { // FIXME: It would be really nice if this could come out as the proper // pointer type. - const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext()); + llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext()); return ABIArgInfo::getDirect(PtrTy); } else if (SeltTy->isVectorType()) { // 64- and 128-bit vectors are never returned in a @@ -701,7 +701,7 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const { Size)); } - const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty); + llvm::Type *IRType = CGT.ConvertType(Ty); if (UseX86_MMXType(IRType)) { if (IsMMXDisabled) return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), @@ -848,13 +848,13 @@ class X86_64ABIInfo : public ABIInfo { /// also be ComplexX87. void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; - const llvm::Type *Get16ByteVectorType(QualType Ty) const; - const llvm::Type *GetSSETypeAtOffset(const llvm::Type *IRType, - unsigned IROffset, QualType SourceTy, - unsigned SourceOffset) const; - const llvm::Type *GetINTEGERTypeAtOffset(const llvm::Type *IRType, - unsigned IROffset, QualType SourceTy, - unsigned SourceOffset) const; + llvm::Type *Get16ByteVectorType(QualType Ty) const; + llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, + unsigned IROffset, QualType SourceTy, + unsigned SourceOffset) const; + llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, + unsigned IROffset, QualType SourceTy, + unsigned SourceOffset) const; /// getIndirectResult - Give a source type \arg Ty, return a suitable result /// such that the argument will be returned in memory. @@ -1329,20 +1329,20 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const { /// Get16ByteVectorType - The ABI specifies that a value should be passed in an /// full vector XMM register. Pick an LLVM IR type that will be passed as a /// vector register. -const llvm::Type *X86_64ABIInfo::Get16ByteVectorType(QualType Ty) const { - const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty); +llvm::Type *X86_64ABIInfo::Get16ByteVectorType(QualType Ty) const { + llvm::Type *IRType = CGT.ConvertType(Ty); // Wrapper structs that just contain vectors are passed just like vectors, // strip them off if present. - const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); + llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); while (STy && STy->getNumElements() == 1) { IRType = STy->getElementType(0); STy = dyn_cast<llvm::StructType>(IRType); } // If the preferred type is a 16-byte vector, prefer to pass it. - if (const llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ - const llvm::Type *EltTy = VT->getElementType(); + if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ + llvm::Type *EltTy = VT->getElementType(); if (VT->getBitWidth() == 128 && (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || @@ -1471,8 +1471,8 @@ static bool ContainsFloatAtOffset(const llvm::Type *IRType, unsigned IROffset, /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the /// low 8 bytes of an XMM register, corresponding to the SSE class. -const llvm::Type *X86_64ABIInfo:: -GetSSETypeAtOffset(const llvm::Type *IRType, unsigned IROffset, +llvm::Type *X86_64ABIInfo:: +GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, QualType SourceTy, unsigned SourceOffset) const { // The only three choices we have are either double, <2 x float>, or float. We // pass as float if the last 4 bytes is just padding. This happens for @@ -1506,8 +1506,8 @@ GetSSETypeAtOffset(const llvm::Type *IRType, unsigned IROffset, /// SourceTy is the source level type for the entire argument. SourceOffset is /// an offset into this that we're processing (which is always either 0 or 8). /// -const llvm::Type *X86_64ABIInfo:: -GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset, +llvm::Type *X86_64ABIInfo:: +GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, QualType SourceTy, unsigned SourceOffset) const { // If we're dealing with an un-offset LLVM IR type, then it means that we're // returning an 8-byte unit starting with it. See if we can safely use it. @@ -1545,7 +1545,7 @@ GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset, } if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { - const llvm::Type *EltTy = ATy->getElementType(); + llvm::Type *EltTy = ATy->getElementType(); unsigned EltSize = getTargetData().getTypeAllocSize(EltTy); unsigned EltOffset = IROffset/EltSize*EltSize; return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, @@ -1571,7 +1571,7 @@ GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset, /// first class aggregate to represent them. For example, if the low part of /// a by-value argument should be passed as i32* and the high part as float, /// return {i32*, float}. -static const llvm::Type * +static llvm::Type * GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi, const llvm::TargetData &TD) { // In order to correctly satisfy the ABI, we need to the high part to start @@ -1599,7 +1599,7 @@ GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi, } } - const llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); + llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); // Verify that the second element is at an 8-byte offset. @@ -1619,7 +1619,7 @@ classifyReturnType(QualType RetTy) const { assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); - const llvm::Type *ResType = 0; + llvm::Type *ResType = 0; switch (Lo) { case NoClass: if (Hi == NoClass) @@ -1642,8 +1642,7 @@ classifyReturnType(QualType RetTy) const { // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next // available register of the sequence %rax, %rdx is used. case Integer: - ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0, - RetTy, 0); + ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); // If we have a sign or zero extended integer, make sure to return Extend // so that the parameter gets the right LLVM IR attributes. @@ -1661,7 +1660,7 @@ classifyReturnType(QualType RetTy) const { // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next // available SSE register of the sequence %xmm0, %xmm1 is used. case SSE: - ResType = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0, RetTy, 0); + ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); break; // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is @@ -1681,7 +1680,7 @@ classifyReturnType(QualType RetTy) const { break; } - const llvm::Type *HighPart = 0; + llvm::Type *HighPart = 0; switch (Hi) { // Memory was handled previously and X87 should // never occur as a hi class. @@ -1694,13 +1693,12 @@ classifyReturnType(QualType RetTy) const { break; case Integer: - HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy), - 8, RetTy, 8); + HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); if (Lo == NoClass) // Return HighPart at offset 8 in memory. return ABIArgInfo::getDirect(HighPart, 8); break; case SSE: - HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 8, RetTy, 8); + HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); if (Lo == NoClass) // Return HighPart at offset 8 in memory. return ABIArgInfo::getDirect(HighPart, 8); break; @@ -1722,8 +1720,7 @@ classifyReturnType(QualType RetTy) const { // preceded by X87. In such situations we follow gcc and pass the // extra bits in an SSE reg. if (Lo != X87) { - HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), - 8, RetTy, 8); + HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); if (Lo == NoClass) // Return HighPart at offset 8 in memory. return ABIArgInfo::getDirect(HighPart, 8); } @@ -1751,7 +1748,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt, neededInt = 0; neededSSE = 0; - const llvm::Type *ResType = 0; + llvm::Type *ResType = 0; switch (Lo) { case NoClass: if (Hi == NoClass) @@ -1785,7 +1782,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt, ++neededInt; // Pick an 8-byte type based on the preferred type. - ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 0, Ty, 0); + ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); // If we have a sign or zero extended integer, make sure to return Extend // so that the parameter gets the right LLVM IR attributes. @@ -1805,14 +1802,14 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt, // available SSE register is used, the registers are taken in the // order from %xmm0 to %xmm7. case SSE: { - const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty); + llvm::Type *IRType = CGT.ConvertType(Ty); ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); ++neededSSE; break; } } - const llvm::Type *HighPart = 0; + llvm::Type *HighPart = 0; switch (Hi) { // Memory was handled previously, ComplexX87 and X87 should // never occur as hi classes, and X87Up must be preceded by X87, @@ -1828,7 +1825,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt, case Integer: ++neededInt; // Pick an 8-byte type based on the preferred type. - HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8); + HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); if (Lo == NoClass) // Pass HighPart at offset 8 in memory. return ABIArgInfo::getDirect(HighPart, 8); @@ -1838,7 +1835,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt, // memory), except in situations involving unions. case X87Up: case SSE: - HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8); + HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); if (Lo == NoClass) // Pass HighPart at offset 8 in memory. return ABIArgInfo::getDirect(HighPart, 8); @@ -2373,7 +2370,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const { SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; } - const llvm::Type *STy = + llvm::Type *STy = llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL); return ABIArgInfo::getDirect(STy); } diff --git a/test/CXX/except/except.spec/p14-ir.cpp b/test/CXX/except/except.spec/p14-ir.cpp index c681727e8c..81fbf7d970 100644 --- a/test/CXX/except/except.spec/p14-ir.cpp +++ b/test/CXX/except/except.spec/p14-ir.cpp @@ -26,12 +26,12 @@ struct X4 { struct X5 : X0, X4 { }; void test(X2 x2, X3 x3, X5 x5) { - // CHECK: define linkonce_odr void @_ZN2X2C1ERKS_(%struct.X0* %this, %struct.X0*) unnamed_addr + // CHECK: define linkonce_odr void @_ZN2X2C1ERKS_(%struct.X2* %this, %struct.X2*) unnamed_addr // CHECK: call void @_ZN2X2C2ERKS_({{.*}}) nounwind // CHECK-NEXT: ret void // CHECK-NEXT: } X2 x2a(x2); - // CHECK: define linkonce_odr void @_ZN2X3C1ERKS_(%struct.X0* %this, %struct.X0*) unnamed_addr + // CHECK: define linkonce_odr void @_ZN2X3C1ERKS_(%struct.X3* %this, %struct.X3*) unnamed_addr // CHECK: call void @_ZN2X3C2ERKS_({{.*}}) nounwind // CHECK-NEXT: ret void // CHECK-NEXT: } @@ -55,24 +55,25 @@ struct X8 : X6 { }; struct X9 : X6, X7 { }; void test() { - // CHECK: define linkonce_odr void @_ZN2X8C1Ev(%struct.X0* %this) unnamed_addr + // CHECK: define linkonce_odr void @_ZN2X8C1Ev(%struct.X8* %this) unnamed_addr // CHECK: call void @_ZN2X8C2Ev({{.*}}) nounwind // CHECK-NEXT: ret void X8(); - // CHECK: define linkonce_odr void @_ZN2X9C1Ev(%struct.X0* %this) unnamed_addr + // CHECK: define linkonce_odr void @_ZN2X9C1Ev(%struct.X9* %this) unnamed_addr // FIXME: check that this is the end of the line here: // CHECK: call void @_ZN2X9C2Ev({{.*}}) // CHECK-NEXT: ret void X9(); - // CHECK: define linkonce_odr void @_ZN2X9C2Ev(%struct.X0* %this) unnamed_addr + // CHECK: define linkonce_odr void @_ZN2X9C2Ev(%struct.X9* %this) unnamed_addr // CHECK: call void @_ZN2X6C2Ev({{.*}}) nounwind // FIXME: and here: + // CHECK-NEXT: bitcast // CHECK-NEXT: call void @_ZN2X7C2Ev({{.*}}) // CHECK: ret void - // CHECK: define linkonce_odr void @_ZN2X8C2Ev(%struct.X0* %this) unnamed_addr + // CHECK: define linkonce_odr void @_ZN2X8C2Ev(%struct.X8* %this) unnamed_addr // CHECK: call void @_ZN2X6C2Ev({{.*}}) nounwind // CHECK-NEXT: ret void } diff --git a/test/CodeGen/annotate.c b/test/CodeGen/annotate.c index ffaeebbca5..9ed187d1d1 100644 --- a/test/CodeGen/annotate.c +++ b/test/CodeGen/annotate.c @@ -7,4 +7,4 @@ void a(char *a) { // CHECK: private unnamed_addr global // CHECK: private unnamed_addr global -// CHECK: @llvm.global.annotations = appending global [2 x %0] +// CHECK: @llvm.global.annotations = appending global [2 x { i8*, i8*, i8*, i32 }] diff --git a/test/CodeGen/arm-arguments.c b/test/CodeGen/arm-arguments.c index 73bc03dac7..081bc89dcd 100644 --- a/test/CodeGen/arm-arguments.c +++ b/test/CodeGen/arm-arguments.c @@ -61,7 +61,7 @@ struct s10 { int f0; int : 0; int : 0; }; struct s10 f10(void) {} // APCS-GNU: define void @f11( -// APCS-GNU: struct.s10* sret +// APCS-GNU: struct.s11* sret // AAPCS: define arm_aapcscc i32 @f11() struct s11 { int : 0; int f0; }; struct s11 f11(void) {} @@ -80,7 +80,7 @@ struct s13 { float f0; }; struct s13 f13(void) {} // APCS-GNU: define void @f14( -// APCS-GNU: struct.s13* sret +// APCS-GNU: union.u14* sret // AAPCS: define arm_aapcscc i32 @f14() union u14 { float f0; }; union u14 f14(void) {} diff --git a/test/CodeGen/arm-asm-variable.c b/test/CodeGen/arm-asm-variable.c index 8088051e24..865d1971e8 100644 --- a/test/CodeGen/arm-asm-variable.c +++ b/test/CodeGen/arm-asm-variable.c @@ -16,7 +16,7 @@ int64_t foo(int64_t v, volatile int64_t *p) : [_rl] "=&r" (rl), [_rh] "=&r" (rh) \ : [_p] "p" (p) : "memory"); - // CHECK: call %0 asm sideeffect "ldrexd$0, $1, [$2]", "={r1},={r2},r,~{memory}"(i64* + // CHECK: call { i32, i32 } asm sideeffect "ldrexd$0, $1, [$2]", "={r1},={r2},r,~{memory}"(i64* return r; } diff --git a/test/CodeGen/asm-errors.c b/test/CodeGen/asm-errors.c index cd4d1ffa52..438c82ba74 100644 --- a/test/CodeGen/asm-errors.c +++ b/test/CodeGen/asm-errors.c @@ -1,6 +1,8 @@ // REQUIRES: x86-registered-target -// RUN: not %clang_cc1 -triple i386-apple-darwin10 -emit-obj %s -o /dev/null > %t 2>&1 -// RUN: FileCheck %s < %t + +// RUN: true +// UN: not %clang_cc1 -triple i386-apple-darwin10 -emit-obj %s -o /dev/null > %t 2>&1 +// UN: FileCheck %s < %t int test1(int X) { // CHECK: error: invalid instruction mnemonic 'abc' diff --git a/test/CodeGen/bitfield-2.c b/test/CodeGen/bitfield-2.c index 8de432faa8..69ed5b11fe 100644 --- a/test/CodeGen/bitfield-2.c +++ b/test/CodeGen/bitfield-2.c @@ -11,7 +11,7 @@ // CHECK-RECORD: *** Dumping IRgen Record Layout // CHECK-RECORD: Record: struct s0 // CHECK-RECORD: Layout: <CGRecordLayout -// CHECK-RECORD: LLVMType:<{ [3 x i8] }> +// CHECK-RECORD: LLVMType:%struct.s0 = type <{ [3 x i8] }> // CHECK-RECORD: IsZeroInitializable:1 // CHECK-RECORD: BitFields:[ // CHECK-RECORD: <CGBitFieldInfo Size:24 IsSigned:1 @@ -56,7 +56,7 @@ unsigned long long test_0() { // CHECK-RECORD: *** Dumping IRgen Record Layout // CHECK-RECORD: Record: struct s1 // CHECK-RECORD: Layout: <CGRecordLayout -// CHECK-RECORD: LLVMType:<{ [2 x i8], i8 }> +// CHECK-RECORD: LLVMType:%struct.s1 = type <{ [2 x i8], i8 }> // CHECK-RECORD: IsZeroInitializable:1 // CHECK-RECORD: BitFields:[ // CHECK-RECORD: <CGBitFieldInfo Size:10 IsSigned:1 @@ -113,7 +113,7 @@ unsigned long long test_1() { // CHECK-RECORD: *** Dumping IRgen Record Layout // CHECK-RECORD: Record: union u2 // CHECK-RECORD: Layout: <CGRecordLayout -// CHECK-RECORD: LLVMType:<{ i8 }> +// CHECK-RECORD: LLVMType:%union.u2 = type <{ i8 }> // CHECK-RECORD: IsZeroInitializable:1 // CHECK-RECORD: BitFields:[ // CHECK-RECORD: <CGBitFieldInfo Size:3 IsSigned:0 @@ -288,7 +288,7 @@ _Bool test_6() { // CHECK-RECORD: *** Dumping IRgen Record Layout // CHECK-RECORD: Record: struct s7 // CHECK-RECORD: Layout: <CGRecordLayout -// CHECK-RECORD: LLVMType:{ i32, i32, i32, i8, [3 x i8], [4 x i8], [12 x i8] } +// CHECK-RECORD: LLVMType:%struct.s7 = type { i32, i32, i32, i8, [3 x i8], [4 x i8], [12 x i8] } // CHECK-RECORD: IsZeroInitializable:1 // CHECK-RECORD: BitFields:[ // CHECK-RECORD: <CGBitFieldInfo Size:5 IsSigned:1 diff --git a/test/CodeGen/blocksignature.c b/test/CodeGen/blocksignature.c index 7526f19468..63fe1248a8 100644 --- a/test/CodeGen/blocksignature.c +++ b/test/CodeGen/blocksignature.c @@ -2,13 +2,13 @@ // RUN: %clang_cc1 -fblocks -triple i686-apple-darwin9 %s -emit-llvm -o - | FileCheck %s -check-prefix=X32 // X64: @.str = private unnamed_addr constant [6 x i8] c"v8@?0\00" -// X64: @__block_literal_global = internal constant %1 { i8** @_NSConcreteGlobalBlock, i32 1342177280, +// X64: @__block_literal_global = internal constant {{.*}} { i8** @_NSConcreteGlobalBlock, i32 1342177280, // X64: @.str1 = private unnamed_addr constant [12 x i8] c"i16@?0c8f12\00" // X64: store i32 1073741824, i32* // X32: [[STR1:@.*]] = private unnamed_addr constant [6 x i8] c"v4@?0\00" -// X32: @__block_descriptor_tmp = internal constant [[FULL_DESCRIPTOR_T:%.*]] { i32 0, i32 20, i8* getelementptr inbounds ([6 x i8]* [[STR1]], i32 0, i32 0), i8* null } -// X32: @__block_literal_global = internal constant [[GLOBAL_LITERAL_T:%.*]] { i8** @_NSConcreteGlobalBlock, i32 1342177280, i32 0, i8* bitcast (void (i8*)* @__block_global_{{.*}} to i8*), [[DESCRIPTOR_T:%.*]]* bitcast ([[FULL_DESCRIPTOR_T]]* @__block_descriptor_tmp to {{%.*}}*) } +// X32: @__block_descriptor_tmp = internal constant [[FULL_DESCRIPTOR_T:.*]] { i32 0, i32 20, i8* getelementptr inbounds ([6 x i8]* [[STR1]], i32 0, i32 0), i8* null } +// X32: @__block_literal_global = internal constant [[GLOBAL_LITERAL_T:.*]] { i8** @_NSConcreteGlobalBlock, i32 1342177280, i32 0, i8* bitcast (void (i8*)* @__block_global_{{.*}} to i8*), [[DESCRIPTOR_T:%.*]]* bitcast ([[FULL_DESCRIPTOR_T]]* @__block_descriptor_tmp to {{%.*}}*) } // X32: [[STR2:@.*]] = private unnamed_addr constant [11 x i8] c"i12@?0c4f8\00" // X32: @__block_descriptor_tmp{{.*}} = internal constant [[FULL_DESCRIPTOR_T]] { i32 0, i32 24, i8* getelementptr inbounds ([11 x i8]* [[STR2]], i32 0, i32 0), i8* null } // X32: store i32 1073741824, i32* diff --git a/test/CodeGen/blockstret.c b/test/CodeGen/blockstret.c index e49b52a188..d5dae6f3a6 100644 --- a/test/CodeGen/blockstret.c +++ b/test/CodeGen/blockstret.c @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -fblocks -triple x86_64-apple-darwin9 %s -emit-llvm -o - | FileCheck %s -check-prefix=X64 // RUN: %clang_cc1 -fblocks -triple i686-apple-darwin9 %s -emit-llvm -o - | FileCheck %s -check-prefix=X32 -// X64: internal constant {{%.*}} { i8** @_NSConcreteGlobalBlock, i32 1879048192 +// X64: internal constant {{.*}} { i8** @_NSConcreteGlobalBlock, i32 1879048192 // X64: store i32 1610612736, i32* %want // X32: @_NSConcreteGlobalBlock, i32 1879048192, i32 0, diff --git a/test/CodeGen/complex-indirect.c b/test/CodeGen/complex-indirect.c index fbe73d6ae2..45eb195484 100644 --- a/test/CodeGen/complex-indirect.c +++ b/test/CodeGen/complex-indirect.c @@ -6,5 +6,5 @@ void a(int,int,int,int,int,int,__complex__ char); void b(__complex__ char *y) { a(0,0,0,0,0,0,*y); } // CHECK: define void @b -// CHECK: alloca %0, align 8 -// CHECK: call void @a(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %0* byval align 8 +// CHECK: alloca { i8, i8 }*, align 8 +// CHECK: call void @a(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, { i8, i8 }* byval align 8 diff --git a/test/CodeGen/const-arithmetic.c b/test/CodeGen/const-arithmetic.c index a28f73f004..5b7a9ec412 100644 --- a/test/CodeGen/const-arithmetic.c +++ b/test/CodeGen/const-arithmetic.c @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s -// CHECK: @g1 = global [2 x i8*] [i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -2), i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -46)], align 16 -// CHECK: @g2 = global [2 x i8*] [i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -2), i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -46)], align 16 +// CHECK: @g1 = global [2 x i8*] [i8* getelementptr (i8* getelementptr inbounds ([0 x %"struct.<anonymous>"]* @g0, i32 0, i32 0, i32 0), i64 -2), i8* getelementptr (i8* getelementptr inbounds ([0 x %"struct.<anonymous>"]* @g0, i32 0, i32 0, i32 0), i64 -46)], align 16 +// CHECK: @g2 = global [2 x i8*] [i8* getelementptr (i8* getelementptr inbounds ([0 x %"struct.<anonymous>"]* @g0, i32 0, i32 0, i32 0), i64 -2), i8* getelementptr (i8* getelementptr inbounds ([0 x %"struct.<anonymous>"]* @g0, i32 0, i32 0, i32 0), i64 -46)], align 16 extern struct { unsigned char a, b; } g0[]; void *g1[] = {g0 + -1, g0 + -23 }; diff --git a/test/CodeGen/const-init.c b/test/CodeGen/const-init.c index 9e61fc8c6e..7062fdba6d 100644 --- a/test/CodeGen/const-init.c +++ b/test/CodeGen/const-init.c @@ -26,21 +26,21 @@ union s2 { int g0 = (int)(&(((union s2 *) 0)->f0.f0) - 0); -// CHECK: @g1x = global {{%.}} { double 1.000000e+00{{[0]*}}, double 0.000000e+00{{[0]*}} } +// CHECK: @g1x = global { double, double } { double 1.000000e+00{{[0]*}}, double 0.000000e+00{{[0]*}} } _Complex double g1x = 1.0f; -// CHECK: @g1y = global {{%.}} { double 0.000000e+00{{[0]*}}, double 1.000000e+00{{[0]*}} } +// CHECK: @g1y = global { double, double } { double 0.000000e+00{{[0]*}}, double 1.000000e+00{{[0]*}} } _Complex double g1y = 1.0fi; -// CHECK: @g1 = global {{%.}} { i8 1, i8 10 } +// CHECK: @g1 = global { i8, i8 } { i8 1, i8 10 } _Complex char g1 = (char) 1 + (char) 10 * 1i; -// CHECK: @g2 = global %2 { i32 1, i32 10 } +// CHECK: @g2 = global { i32, i32 } { i32 1, i32 10 } _Complex int g2 = 1 + 10i; -// CHECK: @g3 = global {{%.}} { float 1.000000e+00{{[0]*}}, float 1.000000e+0{{[0]*}}1 } +// CHECK: @g3 = global { float, float } { float 1.000000e+00{{[0]*}}, float 1.000000e+0{{[0]*}}1 } _Complex float g3 = 1.0 + 10.0i; -// CHECK: @g4 = global {{%.}} { double 1.000000e+00{{[0]*}}, double 1.000000e+0{{[0]*}}1 } +// CHECK: @g4 = global { double, double } { double 1.000000e+00{{[0]*}}, double 1.000000e+0{{[0]*}}1 } _Complex double g4 = 1.0 + 10.0i; -// CHECK: @g5 = global %2 zeroinitializer +// CHECK: @g5 = global { i32, i32 } zeroinitializer _Complex int g5 = (2 + 3i) == (5 + 7i); -// CHECK: @g6 = global {{%.}} { double -1.100000e+0{{[0]*}}1, double 2.900000e+0{{[0]*}}1 } +// CHECK: @g6 = global { double, double } { double -1.100000e+0{{[0]*}}1, double 2.900000e+0{{[0]*}}1 } _Complex double g6 = (2.0 + 3.0i) * (5.0 + 7.0i); // CHECK: @g7 = global i32 1 int g7 = (2 + 3i) * (5 + 7i) == (-11 + 29i); @@ -52,14 +52,14 @@ int g9 = (2 + 3i) * (5 + 7i) != (-11 + 29i); int g10 = (2.0 + 3.0i) * (5.0 + 7.0i) != (-11.0 + 29.0i); // PR5108 -// CHECK: @gv1 = global %struct.anon <{ i32 0, i8 7 }>, align 1 +// CHECK: @gv1 = global %"struct.<anonymous>" <{ i32 0, i8 7 }>, align 1 struct { unsigned long a; unsigned long b:3; } __attribute__((__packed__)) gv1 = { .a = 0x0, .b = 7, }; // PR5118 -// CHECK: @gv2 = global %4 <{ i8 1, i8* null }>, align 1 +// CHECK: @gv2 = global %"struct.<anonymous>.0" <{ i8 1, i8* null }>, align 1 struct { unsigned char a; char *b; diff --git a/test/CodeGen/decl.c b/test/CodeGen/decl.c index 29520d77ff..21b75797f0 100644 --- a/test/CodeGen/decl.c +++ b/test/CodeGen/decl.c @@ -2,8 +2,8 @@ // CHECK: @test1.x = internal constant [12 x i32] [i32 1 // CHECK: @test2.x = internal unnamed_addr constant [13 x i32] [i32 1, -// CHECK: @test5w = global %0 { i32 2, [4 x i8] undef } -// CHECK: @test5y = global %union.test5u { double 7.300000e+0{{[0]*}}1 } +// CHECK: @test5w = global { i32, [4 x i8] } { i32 2, [4 x i8] undef } +// CHECK: @test5y = global { double } { double 7.300000e+0{{[0]*}}1 } // CHECK: @test6.x = internal unnamed_addr constant %struct.SelectDest { i8 1, i8 2, i32 3, i32 0 } diff --git a/test/CodeGen/designated-initializers.c b/test/CodeGen/designated-initializers.c index ee1d1701ee..ec96dd3214 100644 --- a/test/CodeGen/designated-initializers.c +++ b/test/CodeGen/designated-initializers.c @@ -5,13 +5,13 @@ struct foo { int b; }; -// CHECK: @u = global %union.anon zeroinitializer +// CHECK: @u = global %"union.<anonymous>" zeroinitializer union { int i; float f; } u = { }; -// CHECK: @u2 = global %2 { i32 0, [4 x i8] undef } +// CHECK: @u2 = global { i32, [4 x i8] } { i32 0, [4 x i8] undef } union { int i; double f; } u2 = { }; -// CHECK: @u3 = global %3 zeroinitializer +// CHECK: @u3 = global %"union.<anonymous>.1" zeroinitializer union { double f; int i; } u3 = { }; // CHECK: @b = global [2 x i32] [i32 0, i32 22] @@ -39,11 +39,11 @@ struct ds ds0 = { { { .a = 0 } } }; struct ds ds1 = { { .a = 1 } }; struct ds ds2 = { { .b = 1 } }; struct ds ds3 = { .a = 0 }; -// CHECK: @ds4 = global %struct.ds { %1 { %struct.anon zeroinitializer, i16 0, %struct.anon { i16 1 } } } +// CHECK: @ds4 = global %struct.ds { %"struct.ds::<anonymous>" { %"struct.ds::<anonymous struct>::<anonymous>" zeroinitializer, i16 0, %"struct.ds::<anonymous struct>::<anonymous>.2" { i16 1 } } } struct ds ds4 = { .c = 1 }; struct ds ds5 = { { { .a = 0 } }, .b = 1 }; struct ds ds6 = { { .a = 0, .b = 1 } }; -// CHECK: @ds7 = global %struct.ds { %1 { %struct.anon { i16 2 }, i16 3, %struct.anon zeroinitializer } } +// CHECK: @ds7 = global %struct.ds { %"struct.ds::<anonymous>" { %"struct.ds::<anonymous struct>::<anonymous>" { i16 2 }, i16 3, %"struct.ds::<anonymous struct>::<anonymous>.2" zeroinitializer } } struct ds ds7 = { { { .a = 1 @@ -59,7 +59,7 @@ void test1(int argc, char **argv) .b = 1024, }; - // CHECK: bitcast %union.anon* %u2 + // CHECK: bitcast %union.* %u2 // CHECK: call void @llvm.memset union { int i; float f; } u2 = { }; diff --git a/test/CodeGen/flexible-array-init.c b/test/CodeGen/flexible-array-init.c index 36323502a4..d3079dce3d 100644 --- a/test/CodeGen/flexible-array-init.c +++ b/test/CodeGen/flexible-array-init.c @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm -o - %s | FileCheck %s struct { int x; int y[]; } a = { 1, 7, 11 }; -// CHECK: @a = global %0 { i32 1, [2 x i32] [i32 7, i32 11] } +// CHECK: @a = global { i32, [2 x i32] } { i32 1, [2 x i32] [i32 7, i32 11] } struct { int x; int y[]; } b = { 1, { 13, 15 } }; -// CHECK: @b = global %0 { i32 1, [2 x i32] [i32 13, i32 15] } +// CHECK: @b = global { i32, [2 x i32] } { i32 1, [2 x i32] [i32 13, i32 15] } diff --git a/test/CodeGen/global-init.c b/test/CodeGen/global-init.c index f8d2975179..074c2a065a 100644 --- a/test/CodeGen/global-init.c +++ b/test/CodeGen/global-init.c @@ -32,7 +32,7 @@ struct ManyFields FewInits = {1, 2}; // PR6766 -// CHECK: @l = global %0 { [24 x i8] c"f\00\00\00o\00\00\00o\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00", i32 1 } +// CHECK: @l = global { [24 x i8], i32 } { [24 x i8] c"f\00\00\00o\00\00\00o\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00", i32 1 } typedef __WCHAR_TYPE__ wchar_t; struct K { wchar_t L[6]; diff --git a/test/CodeGen/mmx-inline-asm.c b/test/CodeGen/mmx-inline-asm.c index c473a930ec..5d1371ed90 100644 --- a/test/CodeGen/mmx-inline-asm.c +++ b/test/CodeGen/mmx-inline-asm.c @@ -2,7 +2,7 @@ // <rdar://problem/9091220> #include <mmintrin.h> -// CHECK: type { x86_mmx, x86_mmx, x86_mmx, x86_mmx, x86_mmx, x86_mmx, x86_mmx } +// CHECK: { x86_mmx, x86_mmx, x86_mmx, x86_mmx, x86_mmx, x86_mmx, x86_mmx } void foo(long long fill) { __m64 vfill = _mm_cvtsi64_m64(fill); diff --git a/test/CodeGen/ms-anonymous-struct.c b/test/CodeGen/ms-anonymous-struct.c index 3afe440caf..b41caab030 100644 --- a/test/CodeGen/ms-anonymous-struct.c +++ b/test/CodeGen/ms-anonymous-struct.c @@ -1,19 +1,19 @@ // RUN: %clang_cc1 -fms-extensions -emit-llvm -o - %s | FileCheck %s +// CHECK: %struct.test = type { i32, %struct.nested2, i32 } +// CHECK: %struct.nested2 = type { i32, %struct.nested1, i32 } // CHECK: %struct.nested1 = type { i32, i32 } typedef struct nested1 { int a1; int b1; } NESTED1; -// CHECK: %struct.nested2 = type { i32, %struct.nested1, i32 } struct nested2 { int a; NESTED1; int b; }; -// CHECK: %struct.test = type { i32, %struct.nested2, i32 } struct test { int x; struct nested2; diff --git a/test/CodeGen/packed-union.c b/test/CodeGen/packed-union.c index 0aeed008b7..31ce614407 100644 --- a/test/CodeGen/packed-union.c +++ b/test/CodeGen/packed-union.c @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin10 -emit-llvm %s -o %t -// RUN: grep "struct._attrs = type <{ i32, i8 }>" %t typedef struct _attrs { unsigned file_attributes; unsigned char filename_length; diff --git a/test/CodeGen/pragma-pack-3.c b/test/CodeGen/pragma-pack-3.c index 676f0d77eb..04b636e9c7 100644 --- a/test/CodeGen/pragma-pack-3.c +++ b/test/CodeGen/pragma-pack-3.c @@ -1,9 +1,7 @@ // RUN: %clang_cc1 -triple i386-apple-darwin9 %s -emit-llvm -o - | FileCheck -check-prefix X32 %s -// CHECK-X32: %struct.menu = type <{ i8*, i8, i8 }> // CHECK-X32: %union.command = type <{ i8*, [2 x i8] }> // RUN: %clang_cc1 -triple x86_64-apple-darwin9 %s -emit-llvm -o - | FileCheck -check-prefix X64 %s -// CHECK-X64: %struct.menu = type <{ i8*, i8, i8 }> // CHECK-X64: %union.command = type <{ i8*, [2 x i8] }> // <rdar://problem/7184250> diff --git a/test/CodeGen/regparm.c b/test/CodeGen/regparm.c index d628b685f9..86931bc245 100644 --- a/test/CodeGen/regparm.c +++ b/test/CodeGen/regparm.c @@ -20,7 +20,7 @@ void f1(int i, int j, int k) { } int main(void) { - // CHECK: call void @reduced(i8 signext inreg 0, {{.*}} %struct.foo* inreg null + // CHECK: call void @reduced(i8 signext inreg 0, {{.*}} %"struct.<anonymous>"* inreg null reduced(0, 0.0, 0, 0.0, 0); // CHECK: call x86_stdcallcc void {{.*}}(i32 inreg 1, i32 inreg 2) bar(1,2); diff --git a/test/CodeGen/transparent-union.c b/test/CodeGen/transparent-union.c index afdb3d6090..7d0c979916 100644 --- a/test/CodeGen/transparent-union.c +++ b/test/CodeGen/transparent-union.c @@ -11,7 +11,7 @@ typedef union { void f0(transp_t0 obj); // CHECK: define void @f1_0(i32* %a0) -// CHECK: call void @f0(%union.transp_t0* byval align 4 %{{.*}}) +// CHECK: call void @f0(%"union.<anonymous>"* byval align 4 %{{.*}}) // CHECK: call void %{{.*}}(i8* %{{[a-z0-9]*}}) // CHECK: } void f1_0(int *a0) { diff --git a/test/CodeGen/trapv.c b/test/CodeGen/trapv.c index 7f192c634c..f52dad5564 100644 --- a/test/CodeGen/trapv.c +++ b/test/CodeGen/trapv.c @@ -1,7 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin10 -ftrapv %s -emit-llvm -o - | FileCheck %s -// CHECK: [[I32O:%.*]] = type { i32, i1 } - unsigned int ui, uj, uk; int i, j, k; @@ -16,9 +14,9 @@ void test0() { // CHECK: [[T1:%.*]] = load i32* @j // CHECK-NEXT: [[T2:%.*]] = load i32* @k - // CHECK-NEXT: [[T3:%.*]] = call [[I32O]] @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 [[T2]]) - // CHECK-NEXT: [[T4:%.*]] = extractvalue [[I32O]] [[T3]], 0 - // CHECK-NEXT: [[T5:%.*]] = extractvalue [[I32O]] [[T3]], 1 + // CHECK-NEXT: [[T3:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 [[T2]]) + // CHECK-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T3]], 0 + // CHECK-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T3]], 1 // CHECK-NEXT: br i1 [[T5]] // CHECK: call void @llvm.trap() i = j + k; @@ -30,9 +28,9 @@ void test1() { opaque(i++); // CHECK: [[T1:%.*]] = load i32* @i - // CHECK-NEXT: [[T2:%.*]] = call [[I32O]] @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 1) - // CHECK-NEXT: [[T3:%.*]] = extractvalue [[I32O]] [[T2]], 0 - // CHECK-NEXT: [[T4:%.*]] = extractvalue [[I32O]] [[T2]], 1 + // CHECK-NEXT: [[T2:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 1) + // CHECK-NEXT: [[T3:%.*]] = extractvalue { i32, i1 } [[T2]], 0 + // CHECK-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T2]], 1 // CHECK-NEXT: br i1 [[T4]] // CHECK: call void @llvm.trap() } @@ -43,9 +41,9 @@ void test2() { opaque(++i); // CHECK: [[T1:%.*]] = load i32* @i - // CHECK-NEXT: [[T2:%.*]] = call [[I32O]] @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 1) - // CHECK-NEXT: [[T3:%.*]] = extractvalue [[I32O]] [[T2]], 0 - // CHECK-NEXT: [[T4:%.*]] = extractvalue [[I32O]] [[T2]], 1 + // CHECK-NEXT: [[T2:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 1) + // CHECK-NEXT: [[T3:%.*]] = extractvalue { i32, i1 } [[T2]], 0 + // CHECK-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T2]], 1 // CHECK-NEXT: br i1 [[T4]] // CHECK: call void @llvm.trap() } diff --git a/test/CodeGen/union-init2.c b/test/CodeGen/union-init2.c index 1386c27cc0..bf20696a22 100644 --- a/test/CodeGen/union-init2.c +++ b/test/CodeGen/union-init2.c @@ -1,11 +1,11 @@ // RUN: %clang_cc1 -emit-llvm %s -o - -triple i686-pc-linux-gnu | FileCheck %s // Make sure we generate something sane instead of a ptrtoint -// CHECK: bitcast (%0* @r to %union.x*), [4 x i8] undef +// CHECK: bitcast ({ %union.x*, [4 x i8] }* @r to %union.x*), [4 x i8] undef union x {long long b;union x* a;} r = {.a = &r}; -// CHECK: global %1 { [3 x i8] zeroinitializer, [5 x i8] undef } +// CHECK: global { [3 x i8], [5 x i8] } { [3 x i8] zeroinitializer, [5 x i8] undef } union z { char a[3]; long long b; diff --git a/test/CodeGen/volatile-1.c b/test/CodeGen/volatile-1.c index f54afffdad..7c7a822e88 100644 --- a/test/CodeGen/volatile-1.c +++ b/test/CodeGen/volatile-1.c @@ -4,7 +4,7 @@ volatile int i, j, k; volatile int ar[5]; volatile char c; -// CHECK: @ci = common global [[CINT:%.*]] zeroinitializer +// CHECK: @ci = common global [[CINT:.*]] zeroinitializer volatile _Complex int ci; volatile struct S { #ifdef __cplusplus diff --git a/test/CodeGen/volatile-2.c b/test/CodeGen/volatile-2.c index 1ceaf17dfc..490b7d776b 100644 --- a/test/CodeGen/volatile-2.c +++ b/test/CodeGen/volatile-2.c @@ -3,8 +3,8 @@ void test0() { // CHECK: define void @test0() // CHECK: [[F:%.*]] = alloca float - // CHECK-NEXT: [[REAL:%.*]] = volatile load float* getelementptr inbounds ({{%.*}} @test0_v, i32 0, i32 0) - // CHECK-NEXT: volatile load float* getelementptr inbounds ({{%.*}} @test0_v, i32 0, i32 1) + // CHECK-NEXT: [[REAL:%.*]] = volatile load float* getelementptr inbounds ({ float, float }* @test0_v, i32 0, i32 0) + // CHECK-NEXT: volatile load float* getelementptr inbounds ({{.*}} @test0_v, i32 0, i32 1) // CHECK-NEXT: store float [[REAL]], float* [[F]], align 4 // CHECK-NEXT: ret void extern volatile _Complex float test0_v; @@ -13,10 +13,10 @@ void test0() { void test1() { // CHECK: define void @test1() - // CHECK: [[REAL:%.*]] = volatile load float* getelementptr inbounds ({{%.*}} @test1_v, i32 0, i32 0) - // CHECK-NEXT: [[IMAG:%.*]] = volatile load float* getelementptr inbounds ({{%.*}} @test1_v, i32 0, i32 1) - // CHECK-NEXT: volatile store float [[REAL]], float* getelementptr inbounds ({{%.*}} @test1_v, i32 0, i32 0) - // CHECK-NEXT: volatile store float [[IMAG]], float* getelementptr inbounds ({{%.*}} @test1_v, i32 0, i32 1) + // CHECK: [[REAL:%.*]] = volatile load float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 0) + // CHECK-NEXT: [[IMAG:%.*]] = volatile load float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 1) + // CHECK-NEXT: volatile store float [[REAL]], float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 0) + // CHECK-NEXT: volatile store float [[IMAG]], float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 1) // CHECK-NEXT: ret void extern volatile _Complex float test1_v; test1_v = test1_v; diff --git a/test/CodeGen/x86_32-arguments-darwin.c b/test/CodeGen/x86_32-arguments-darwin.c index 83f0f648a2..731d4f613f 100644 --- a/test/CodeGen/x86_32-arguments-darwin.c +++ b/test/CodeGen/x86_32-arguments-darwin.c @@ -231,7 +231,7 @@ v4i32 f55(v4i32 arg) { return arg+arg; } // CHECK: i8 signext %a0, %struct.s56_0* byval align 4 %a1, // CHECK: x86_mmx %a2.coerce, %struct.s56_1* byval align 4, // CHECK: i64 %a4.coerce, %struct.s56_2* byval align 4, -// CHECK: <4 x i32> %a6, %struct.s39* byval align 16 %a7, +// CHECK: <4 x i32> %a6, %struct.s56_3* byval align 16 %a7, // CHECK: <2 x double> %a8, %struct.s56_4* byval align 16 %a9, // CHECK: <8 x i32> %a10, %struct.s56_5* byval align 4, // CHECK: <4 x double> %a12, %struct.s56_6* byval align 4) @@ -240,7 +240,7 @@ v4i32 f55(v4i32 arg) { return arg+arg; } // CHECK: i32 %{{[^ ]*}}, %struct.s56_0* byval align 4 %{{[^ ]*}}, // CHECK: x86_mmx %{{[^ ]*}}, %struct.s56_1* byval align 4 %{{[^ ]*}}, // CHECK: i64 %{{[^ ]*}}, %struct.s56_2* byval align 4 %{{[^ ]*}}, -// CHECK: <4 x i32> %{{[^ ]*}}, %struct.s39* byval align 16 %{{[^ ]*}}, +// CHECK: <4 x i32> %{{[^ ]*}}, %struct.s56_3* byval align 16 %{{[^ ]*}}, // CHECK: <2 x double> %{{[^ ]*}}, %struct.s56_4* byval align 16 %{{[^ ]*}}, // CHECK: <8 x i32> {{[^ ]*}}, %struct.s56_5* byval align 4 %{{[^ ]*}}, // CHECK: <4 x double> {{[^ ]*}}, %struct.s56_6* byval align 4 %{{[^ ]*}}) diff --git a/test/CodeGen/x86_64-arguments.c b/test/CodeGen/x86_64-arguments.c index 090e70d810..d7a42822c5 100644 --- a/test/CodeGen/x86_64-arguments.c +++ b/test/CodeGen/x86_64-arguments.c @@ -42,8 +42,8 @@ void f7(e7 a0) { // Test merging/passing of upper eightbyte with X87 class. // -// CHECK: define void @f8_1(%struct.s19* sret %agg.result) -// CHECK: define void @f8_2(%struct.s19* byval align 16 %a0) +// CHECK: define void @f8_1(%union.u8* sret %agg.result) +// CHECK: define void @f8_2(%union.u8* byval align 16 %a0) union u8 { long double a; int b; @@ -58,7 +58,7 @@ struct s9 { int a; int b; int : 0; } f9(void) { while (1) {} } struct s10 { int a; int b; int : 0; }; void f10(struct s10 a0) {} -// CHECK: define void @f11(%struct.s19* sret %agg.result) +// CHECK: define void @f11(%"union.<anonymous>"* sret %agg.result) union { long double a; float b; } f11() { while (1) {} } // CHECK: define i32 @f12_0() @@ -147,7 +147,7 @@ struct f24s { long a; int b; }; struct f23S f24(struct f23S *X, struct f24s *P2) { return *X; - // CHECK: define %struct.f24s @f24(%struct.f23S* %X, %struct.f24s* %P2) + // CHECK: define { i64, i32 } @f24(%struct.f23S* %X, %struct.f24s* %P2) } // rdar://8248065 @@ -169,7 +169,7 @@ struct foo26 { }; struct foo26 f26(struct foo26 *P) { - // CHECK: define %struct.foo26 @f26(%struct.foo26* %P) + // CHECK: define { i32*, float* } @f26(%struct.foo26* %P) return *P; } diff --git a/test/CodeGenCXX/blocks.cpp b/test/CodeGenCXX/blocks.cpp index a4d5b86565..0e310bdbbc 100644 --- a/test/CodeGenCXX/blocks.cpp +++ b/test/CodeGenCXX/blocks.cpp @@ -31,7 +31,7 @@ namespace test1 { // ...unless they have mutable fields... // CHECK: define void @_ZN5test15test3Ev() - // CHECK: [[BLOCK:%.*]] = alloca [[BLOCK_T:%.*]], + // CHECK: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK: [[T0:%.*]] = bitcast [[BLOCK_T]]* [[BLOCK]] to void ()* // CHECK: store void ()* [[T0]], void ()** @out struct mut { mutable int x; }; @@ -43,7 +43,7 @@ namespace test1 { // ...or non-trivial destructors... // CHECK: define void @_ZN5test15test4Ev() // CHECK: [[OBJ:%.*]] = alloca - // CHECK: [[BLOCK:%.*]] = alloca [[BLOCK_T:%.*]], + // CHECK: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK: [[T0:%.*]] = bitcast [[BLOCK_T]]* [[BLOCK]] to void ()* // CHECK: store void ()* [[T0]], void ()** @out struct scope { int x; ~scope(); }; diff --git a/test/CodeGenCXX/class-layout.cpp b/test/CodeGenCXX/class-layout.cpp index 96fbae8970..9569f476dd 100644 --- a/test/CodeGenCXX/class-layout.cpp +++ b/test/CodeGenCXX/class-layout.cpp @@ -20,8 +20,8 @@ namespace Test3 { namespace Test4 { // Test from PR5589. - // CHECK: %"struct.Test4::A" = type { i32, i8, float } // CHECK: %"struct.Test4::B" = type { %"struct.Test4::A", i16, double } + // CHECK: %"struct.Test4::A" = type { i32, i8, float } struct A { int a; char c; diff --git a/test/CodeGenCXX/constructors.cpp b/test/CodeGenCXX/constructors.cpp index 75588ce06c..9e09099ba4 100644 --- a/test/CodeGenCXX/constructors.cpp +++ b/test/CodeGenCXX/constructors.cpp @@ -83,12 +83,12 @@ struct D : A { D::D(int x, ...) : A(ValueClass(x, x+1)), mem(x*x) {} -// CHECK: define void @_ZN1DC1Eiz(%struct.B* %this, i32 %x, ...) unnamed_addr +// CHECK: define void @_ZN1DC1Eiz(%struct.D* %this, i32 %x, ...) unnamed_addr // CHECK: call void @_ZN10ValueClassC1Eii( // CHECK: call void @_ZN1AC2E10ValueClass( // CHECK: call void @_ZN6MemberC1Ei( -// CHECK: define void @_ZN1DC2Eiz(%struct.B* %this, i32 %x, ...) unnamed_addr +// CHECK: define void @_ZN1DC2Eiz(%struct.D* %this, i32 %x, ...) unnamed_addr // CHECK: call void @_ZN10ValueClassC1Eii( // CHECK: call void @_ZN1AC2E10ValueClass( // CHECK: call void @_ZN6MemberC1Ei( diff --git a/test/CodeGenCXX/copy-constructor-elim-2.cpp b/test/CodeGenCXX/copy-constructor-elim-2.cpp index 50aea94b09..a4a688f737 100644 --- a/test/CodeGenCXX/copy-constructor-elim-2.cpp +++ b/test/CodeGenCXX/copy-constructor-elim-2.cpp @@ -21,7 +21,7 @@ namespace no_elide_base { Derived(const Other &O); }; - // CHECK: define {{.*}} @_ZN13no_elide_base7DerivedC1ERKNS_5OtherE(%"struct.no_elide_base::Derived"* %this, %"struct.PR8683::A"* %O) unnamed_addr + // CHECK: define {{.*}} @_ZN13no_elide_base7DerivedC1ERKNS_5OtherE(%"struct.no_elide_base::Derived"* %this, %"struct.no_elide_base::Other"* %O) unnamed_addr Derived::Derived(const Other &O) // CHECK: call {{.*}} @_ZNK13no_elide_base5OthercvNS_4BaseEEv // CHECK: call {{.*}} @_ZN13no_elide_base4BaseC2ERKS0_ diff --git a/test/CodeGenCXX/copy-initialization.cpp b/test/CodeGenCXX/copy-initialization.cpp index 62b9f2678a..aecd64ec1a 100644 --- a/test/CodeGenCXX/copy-initialization.cpp +++ b/test/CodeGenCXX/copy-initialization.cpp @@ -12,7 +12,7 @@ struct Bar { void f(Foo); -// CHECK: define void @_Z1g3Foo(%struct.Bar* %foo) +// CHECK: define void @_Z1g3Foo(%struct.Foo* %foo) void g(Foo foo) { // CHECK: call void @_ZN3BarC1Ev // CHECK: @_ZNK3BarcvRK3FooEv diff --git a/test/CodeGenCXX/delete.cpp b/test/CodeGenCXX/delete.cpp index ddc7bb8dac..51c860e909 100644 --- a/test/CodeGenCXX/delete.cpp +++ b/test/CodeGenCXX/delete.cpp @@ -54,7 +54,7 @@ namespace test0 { delete a; } - // CHECK: define linkonce_odr void @_ZN5test01AD1Ev(%class.A* %this) unnamed_addr + // CHECK: define linkonce_odr void @_ZN5test01AD1Ev(%"struct.test0::A"* %this) unnamed_addr // CHECK: define linkonce_odr void @_ZN5test01AdlEPv } diff --git a/test/CodeGenCXX/destructors.cpp b/test/CodeGenCXX/destructors.cpp index c6f5bedfdf..5e14cbef89 100644 --- a/test/CodeGenCXX/destructors.cpp +++ b/test/CodeGenCXX/destructors.cpp @@ -40,11 +40,11 @@ namespace PR7526 { struct allocator_derived : allocator { }; - // CHECK: define void @_ZN6PR75269allocatorD2Ev(%"struct.PR5529::A"* %this) unnamed_addr + // CHECK: define void @_ZN6PR75269allocatorD2Ev(%"struct.PR7526::allocator"* %this) unnamed_addr // CHECK: call void @__cxa_call_unexpected allocator::~allocator() throw() { foo(); } - // CHECK: define linkonce_odr void @_ZN6PR752617allocator_derivedD1Ev(%"struct.PR5529::A"* %this) unnamed_addr + // CHECK: define linkonce_odr void @_ZN6PR752617allocator_derivedD1Ev(%"struct.PR7526::allocator_derived"* %this) unnamed_addr // CHECK-NOT: call void @__cxa_call_unexpected // CHECK: } void foo() { @@ -145,10 +145,10 @@ namespace test1 { P::~P() {} // CHECK: define void @_ZN5test11PD2Ev(%"struct.test1::P"* %this) unnamed_addr struct Q : A, B { ~Q(); }; - Q::~Q() {} // CHECK: define void @_ZN5test11QD2Ev(%"struct.test1::M"* %this) unnamed_addr + Q::~Q() {} // CHECK: define void @_ZN5test11QD2Ev(%"struct.test1::Q"* %this) unnamed_addr struct R : A { ~R(); }; - R::~R() { A a; } // CHECK: define void @_ZN5test11RD2Ev(%"struct.test1::M"* %this) unnamed_addr + R::~R() { A a; } // CHECK: define void @_ZN5test11RD2Ev(%"struct.test1::R"* %this) unnamed_addr struct S : A { ~S(); int x; }; S::~S() {} // alias tested above @@ -168,7 +168,7 @@ namespace test2 { struct B : A { ~B(); }; B::~B() {} - // CHECK: define void @_ZN5test21BD2Ev(%"struct.test1::M"* %this) unnamed_addr + // CHECK: define void @_ZN5test21BD2Ev(%"struct.test2::B"* %this) unnamed_addr // CHECK: call void @_ZN5test21AD2Ev } diff --git a/test/CodeGenCXX/eh.cpp b/test/CodeGenCXX/eh.cpp index 44219b4f17..58cb44515d 100644 --- a/test/CodeGenCXX/eh.cpp +++ b/test/CodeGenCXX/eh.cpp @@ -14,7 +14,7 @@ void test1() { // CHECK-NEXT: [[EXN:%.*]] = bitcast i8* [[EXNOBJ]] to [[DSTAR:%[^*]*\*]] // CHECK-NEXT: [[EXN2:%.*]] = bitcast [[DSTAR]] [[EXN]] to i8* // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[EXN2]], i8* bitcast ([[DSTAR]] @d1 to i8*), i64 8, i32 8, i1 false) -// CHECK-NEXT: call void @__cxa_throw(i8* [[EXNOBJ]], i8* bitcast (%0* @_ZTI7test1_D to i8*), i8* null) noreturn +// CHECK-NEXT: call void @__cxa_throw(i8* [[EXNOBJ]], i8* bitcast ({ i8*, i8* }* @_ZTI7test1_D to i8*), i8* null) noreturn // CHECK-NEXT: unreachable @@ -38,7 +38,7 @@ void test2() { // CHECK-NEXT: invoke void @_ZN7test2_DC1ERKS_([[DSTAR]] [[EXN]], [[DSTAR]] @d2) // CHECK-NEXT: to label %[[CONT:.*]] unwind label %{{.*}} // : [[CONT]]: (can't check this in Release-Asserts builds) -// CHECK: call void @__cxa_throw(i8* [[EXNOBJ]], i8* bitcast (%{{.*}}* @_ZTI7test2_D to i8*), i8* null) noreturn +// CHECK: call void @__cxa_throw(i8* [[EXNOBJ]], i8* bitcast ({{.*}}* @_ZTI7test2_D to i8*), i8* null) noreturn // CHECK-NEXT: unreachable @@ -56,7 +56,7 @@ void test3() { // CHECK: [[EXNOBJ:%.*]] = call i8* @__cxa_allocate_exception(i64 8) // CHECK-NEXT: [[EXN:%.*]] = bitcast i8* [[EXNOBJ]] to [[D:%[^*]+]]** // CHECK-NEXT: store [[D]]* null, [[D]]** [[EXN]] -// CHECK-NEXT: call void @__cxa_throw(i8* [[EXNOBJ]], i8* bitcast (%1* @_ZTIPV7test3_D to i8*), i8* null) noreturn +// CHECK-NEXT: call void @__cxa_throw(i8* [[EXNOBJ]], i8* bitcast ({ i8*, i8*, i32, i8* }* @_ZTIPV7test3_D to i8*), i8* null) noreturn // CHECK-NEXT: unreachable @@ -84,10 +84,10 @@ namespace test5 { // CHECK: [[EXNOBJ:%.*]] = call i8* @__cxa_allocate_exception(i64 1) // CHECK: [[EXNCAST:%.*]] = bitcast i8* [[EXNOBJ]] to [[A:%[^*]*]]* // CHECK-NEXT: invoke void @_ZN5test51AC1Ev([[A]]* [[EXNCAST]]) -// CHECK: invoke void @__cxa_throw(i8* [[EXNOBJ]], i8* bitcast ({{%.*}}* @_ZTIN5test51AE to i8*), i8* bitcast (void ([[A]]*)* @_ZN5test51AD1Ev to i8*)) noreturn +// CHECK: invoke void @__cxa_throw(i8* [[EXNOBJ]], i8* bitcast ({{.*}}* @_ZTIN5test51AE to i8*), i8* bitcast (void ([[A]]*)* @_ZN5test51AD1Ev to i8*)) noreturn // CHECK-NEXT: to label {{%.*}} unwind label %[[HANDLER:[^ ]*]] // : [[HANDLER]]: (can't check this in Release-Asserts builds) -// CHECK: {{%.*}} = call i32 @llvm.eh.typeid.for(i8* bitcast ({{%.*}}* @_ZTIN5test51AE to i8*)) +// CHECK: {{%.*}} = call i32 @llvm.eh.typeid.for(i8* bitcast ({{.*}}* @_ZTIN5test51AE to i8*)) } namespace test6 { @@ -177,11 +177,11 @@ namespace test9 { struct A { A(); }; - // CHECK: define void @_ZN5test91AC1Ev(%"struct.test10::A"* %this) unnamed_addr + // CHECK: define void @_ZN5test91AC1Ev(%"struct.test9::A"* %this) unnamed_addr // CHECK: call void @_ZN5test91AC2Ev // CHECK-NEXT: ret void - // CHECK: define void @_ZN5test91AC2Ev(%"struct.test10::A"* %this) unnamed_addr + // CHECK: define void @_ZN5test91AC2Ev(%"struct.test9::A"* %this) unnamed_addr A::A() try { // CHECK: invoke void @_ZN5test96opaqueEv() opaque(); diff --git a/test/CodeGenCXX/for-range.cpp b/test/CodeGenCXX/for-range.cpp index af46644623..ab1a2317ba 100644 --- a/test/CodeGenCXX/for-range.cpp +++ b/test/CodeGenCXX/for-range.cpp @@ -69,8 +69,8 @@ void for_range() { A a; for (B b : C()) { // CHECK: call void @_ZN1CC1Ev( - // CHECK: = call %struct.A* @_ZSt5beginR1C( - // CHECK: = call %struct.A* @_ZSt3endR1C( + // CHECK: = call %struct.B* @_ZSt5beginR1C( + // CHECK: = call %struct.B* @_ZSt3endR1C( // CHECK: br label %[[COND:.*]] // CHECK: [[COND]]: @@ -101,8 +101,8 @@ void for_member_range() { A a; for (B b : D()) { // CHECK: call void @_ZN1DC1Ev( - // CHECK: = call %struct.A* @_ZN1D5beginEv( - // CHECK: = call %struct.A* @_ZN1D3endEv( + // CHECK: = call %struct.B* @_ZN1D5beginEv( + // CHECK: = call %struct.B* @_ZN1D3endEv( // CHECK: br label %[[COND:.*]] // CHECK: [[COND]]: diff --git a/test/CodeGenCXX/global-init.cpp b/test/CodeGenCXX/global-init.cpp index fd8734c5fa..053210bdfb 100644 --- a/test/CodeGenCXX/global-init.cpp +++ b/test/CodeGenCXX/global-init.cpp @@ -21,21 +21,21 @@ struct D { ~D(); }; // PR6205: The casts should not require global initializers // CHECK: @_ZN6PR59741cE = external global %"struct.PR5974::C" // CHECK: @_ZN6PR59741aE = global %"struct.PR5974::A"* getelementptr inbounds (%"struct.PR5974::C"* @_ZN6PR59741cE, i32 0, i32 0) -// CHECK: @_ZN6PR59741bE = global %"struct.PR5974::A"* bitcast (i8* getelementptr (i8* bitcast (%"struct.PR5974::C"* @_ZN6PR59741cE to i8*), i64 4) to %"struct.PR5974::A"*), align 8 +// CHECK: @_ZN6PR59741bE = global %"struct.PR5974::B"* bitcast (i8* getelementptr (i8* bitcast (%"struct.PR5974::C"* @_ZN6PR59741cE to i8*), i64 4) to %"struct.PR5974::B"*), align 8 // CHECK: call void @_ZN1AC1Ev(%struct.A* @a) // CHECK: call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.A*)* @_ZN1AD1Ev to void (i8*)*), i8* getelementptr inbounds (%struct.A* @a, i32 0, i32 0), i8* bitcast (i8** @__dso_handle to i8*)) A a; -// CHECK: call void @_ZN1BC1Ev(%struct.A* @b) -// CHECK: call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.A*)* @_ZN1BD1Ev to void (i8*)*), i8* getelementptr inbounds (%struct.A* @b, i32 0, i32 0), i8* bitcast (i8** @__dso_handle to i8*)) +// CHECK: call void @_ZN1BC1Ev(%struct.B* @b) +// CHECK: call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.B*)* @_ZN1BD1Ev to void (i8*)*), i8* getelementptr inbounds (%struct.B* @b, i32 0, i32 0), i8* bitcast (i8** @__dso_handle to i8*)) B b; // PR6205: this should not require a global initializer // CHECK-NOT: call void @_ZN1CC1Ev(%struct.C* @c) C c; -// CHECK: call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.A*)* @_ZN1DD1Ev to void (i8*)*), i8* getelementptr inbounds (%struct.A* @d, i32 0, i32 0), i8* bitcast (i8** @__dso_handle to i8*)) +// CHECK: call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.D*)* @_ZN1DD1Ev to void (i8*)*), i8* getelementptr inbounds (%struct.D* @d, i32 0, i32 0), i8* bitcast (i8** @__dso_handle to i8*)) D d; // <rdar://problem/7458115> diff --git a/test/CodeGenCXX/mangle-subst-std.cpp b/test/CodeGenCXX/mangle-subst-std.cpp index 837d4fa857..30b579ceb8 100644 --- a/test/CodeGenCXX/mangle-subst-std.cpp +++ b/test/CodeGenCXX/mangle-subst-std.cpp @@ -15,8 +15,8 @@ namespace std { struct A { A(); }; - // CHECK: define void @_ZNSt1AC1Ev(%"struct.N::std::A"* %this) unnamed_addr - // CHECK: define void @_ZNSt1AC2Ev(%"struct.N::std::A"* %this) unnamed_addr + // CHECK: define void @_ZNSt1AC1Ev(%"struct.std::A"* %this) unnamed_addr + // CHECK: define void @_ZNSt1AC2Ev(%"struct.std::A"* %this) unnamed_addr A::A() { } }; diff --git a/test/CodeGenCXX/mangle-template.cpp b/test/CodeGenCXX/mangle-template.cpp index d8ba709eb4..f95e152154 100644 --- a/test/CodeGenCXX/mangle-template.cpp +++ b/test/CodeGenCXX/mangle-template.cpp @@ -82,7 +82,7 @@ namespace test7 { X(U*, typename int_c<(meta<T>::value + meta<U>::value)>::type *) { } }; - // CHECK: define weak_odr {{.*}} @_ZN5test71XIiEC1IdEEPT_PNS_5int_cIXplL_ZNS_4metaIiE5valueEEsr4metaIS3_EE5valueEE4typeE(%"class.test1::T"* %this, double*, float*) unnamed_addr + // CHECK: define weak_odr {{.*}} @_ZN5test71XIiEC1IdEEPT_PNS_5int_cIXplL_ZNS_4metaIiE5valueEEsr4metaIS3_EE5valueEE4typeE(%"struct.test7::X"* %this, double*, float*) unnamed_addr template X<int>::X(double*, float*); } diff --git a/test/CodeGenCXX/member-function-pointers.cpp b/test/CodeGenCXX/member-function-pointers.cpp index 011e9cd685..4c42bd8283 100644 --- a/test/CodeGenCXX/member-function-pointers.cpp +++ b/test/CodeGenCXX/member-function-pointers.cpp @@ -11,56 +11,56 @@ void (A::*volatile vpa)(); void (B::*pb)(); void (C::*pc)(); -// CHECK: @pa2 = global %0 { i64 ptrtoint (void (%struct.A*)* @_ZN1A1fEv to i64), i64 0 }, align 8 +// CHECK: @pa2 = global { i64, i64 } { i64 ptrtoint (void (%struct.A*)* @_ZN1A1fEv to i64), i64 0 }, align 8 void (A::*pa2)() = &A::f; -// CHECK: @pa3 = global %0 { i64 1, i64 0 }, align 8 -// CHECK-LP32: @pa3 = global %0 { i32 1, i32 0 }, align 4 +// CHECK: @pa3 = global { i64, i64 } { i64 1, i64 0 }, align 8 +// CHECK-LP32: @pa3 = global { i32, i32 } { i32 1, i32 0 }, align 4 void (A::*pa3)() = &A::vf1; -// CHECK: @pa4 = global %0 { i64 9, i64 0 }, align 8 -// CHECK-LP32: @pa4 = global %0 { i32 5, i32 0 }, align 4 +// CHECK: @pa4 = global { i64, i64 } { i64 9, i64 0 }, align 8 +// CHECK-LP32: @pa4 = global { i32, i32 } { i32 5, i32 0 }, align 4 void (A::*pa4)() = &A::vf2; -// CHECK: @pc2 = global %0 { i64 ptrtoint (void (%struct.A*)* @_ZN1A1fEv to i64), i64 16 }, align 8 +// CHECK: @pc2 = global { i64, i64 } { i64 ptrtoint (void (%struct.A*)* @_ZN1A1fEv to i64), i64 16 }, align 8 void (C::*pc2)() = &C::f; -// CHECK: @pc3 = global %0 { i64 1, i64 0 }, align 8 +// CHECK: @pc3 = global { i64, i64 } { i64 1, i64 0 }, align 8 void (A::*pc3)() = &A::vf1; void f() { - // CHECK: store %0 zeroinitializer, %0* @pa + // CHECK: store { i64, i64 } zeroinitializer, { i64, i64 }* @pa pa = 0; // Is this okay? What are LLVM's volatile semantics for structs? - // CHECK: volatile store %0 zeroinitializer, %0* @vpa + // CHECK: volatile store { i64, i64 } zeroinitializer, { i64, i64 }* @vpa vpa = 0; - // CHECK: [[TMP:%.*]] = load %0* @pa, align 8 - // CHECK: [[TMPADJ:%.*]] = extractvalue %0 [[TMP]], 1 + // CHECK: [[TMP:%.*]] = load { i64, i64 }* @pa, align 8 + // CHECK: [[TMPADJ:%.*]] = extractvalue { i64, i64 } [[TMP]], 1 // CHECK: [[ADJ:%.*]] = add nsw i64 [[TMPADJ]], 16 - // CHECK: [[RES:%.*]] = insertvalue %0 [[TMP]], i64 [[ADJ]], 1 - // CHECK: store %0 [[RES]], %0* @pc, align 8 + // CHECK: [[RES:%.*]] = insertvalue { i64, i64 } [[TMP]], i64 [[ADJ]], 1 + // CHECK: store { i64, i64 } [[RES]], { i64, i64 }* @pc, align 8 pc = pa; - // CHECK: [[TMP:%.*]] = load %0* @pc, align 8 - // CHECK: [[TMPADJ:%.*]] = extractvalue %0 [[TMP]], 1 + // CHECK: [[TMP:%.*]] = load { i64, i64 }* @pc, align 8 + // CHECK: [[TMPADJ:%.*]] = extractvalue { i64, i64 } [[TMP]], 1 // CHECK: [[ADJ:%.*]] = sub nsw i64 [[TMPADJ]], 16 - // CHECK: [[RES:%.*]] = insertvalue %0 [[TMP]], i64 [[ADJ]], 1 - // CHECK: store %0 [[RES]], %0* @pa, align 8 + // CHECK: [[RES:%.*]] = insertvalue { i64, i64 } [[TMP]], i64 [[ADJ]], 1 + // CHECK: store { i64, i64 } [[RES]], { i64, i64 }* @pa, align 8 pa = static_cast<void (A::*)()>(pc); } void f2() { - // CHECK: store %0 { i64 ptrtoint (void (%struct.A*)* @_ZN1A1fEv to i64), i64 0 } + // CHECK: store { i64, i64 } { i64 ptrtoint (void (%struct.A*)* @_ZN1A1fEv to i64), i64 0 } void (A::*pa2)() = &A::f; - // CHECK: store %0 { i64 1, i64 0 } - // CHECK-LP32: store %0 { i32 1, i32 0 } + // CHECK: store { i64, i64 } { i64 1, i64 0 } + // CHECK-LP32: store { i32, i32 } { i32 1, i32 0 } void (A::*pa3)() = &A::vf1; - // CHECK: store %0 { i64 9, i64 0 } - // CHECK-LP32: store %0 { i32 5, i32 0 } + // CHECK: store { i64, i64 } { i64 9, i64 0 } + // CHECK-LP32: store { i32, i32 } { i32 5, i32 0 } void (A::*pa4)() = &A::vf2; } diff --git a/test/CodeGenCXX/new-overflow.cpp b/test/CodeGenCXX/new-overflow.cpp index e16b30e540..fd56d5e902 100644 --- a/test/CodeGenCXX/new-overflow.cpp +++ b/test/CodeGenCXX/new-overflow.cpp @@ -2,8 +2,6 @@ // rdar://problem/9246208 -// CHECK: [[OVR_T:%.*]] = type { i32, i1 } - // Basic test. namespace test0 { struct A { @@ -15,9 +13,9 @@ namespace test0 { // CHECK: define [[A:%.*]]* @_ZN5test04testEs(i16 signext // CHECK: [[N:%.*]] = sext i16 {{%.*}} to i32 - // CHECK-NEXT: [[T0:%.*]] = call [[OVR_T]] @llvm.umul.with.overflow.i32(i32 [[N]], i32 4) - // CHECK-NEXT: [[T1:%.*]] = extractvalue [[OVR_T]] [[T0]], 1 - // CHECK-NEXT: [[T2:%.*]] = extractvalue [[OVR_T]] [[T0]], 0 + // CHECK-NEXT: [[T0:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[N]], i32 4) + // CHECK-NEXT: [[T1:%.*]] = extractvalue { i32, i1 } [[T0]], 1 + // CHECK-NEXT: [[T2:%.*]] = extractvalue { i32, i1 } [[T0]], 0 // CHECK-NEXT: [[T3:%.*]] = select i1 [[T1]], i32 -1, i32 [[T2]] // CHECK-NEXT: call noalias i8* @_Znaj(i32 [[T3]]) // CHECK: icmp ult i32 {{.*}}, [[N]] @@ -37,9 +35,9 @@ namespace test1 { // CHECK: define [100 x [[A:%.*]]]* @_ZN5test14testEs(i16 signext // CHECK: [[N:%.*]] = sext i16 {{%.*}} to i32 - // CHECK-NEXT: [[T0:%.*]] = call [[OVR_T]] @llvm.umul.with.overflow.i32(i32 [[N]], i32 400) - // CHECK-NEXT: [[T1:%.*]] = extractvalue [[OVR_T]] [[T0]], 1 - // CHECK-NEXT: [[T2:%.*]] = extractvalue [[OVR_T]] [[T0]], 0 + // CHECK-NEXT: [[T0:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[N]], i32 400) + // CHECK-NEXT: [[T1:%.*]] = extractvalue { i32, i1 } [[T0]], 1 + // CHECK-NEXT: [[T2:%.*]] = extractvalue { i32, i1 } [[T0]], 0 // CHECK-NEXT: [[T3:%.*]] = mul i32 [[N]], 100 // CHECK-NEXT: [[T4:%.*]] = select i1 [[T1]], i32 -1, i32 [[T2]] // CHECK-NEXT: call noalias i8* @_Znaj(i32 [[T4]]) @@ -61,14 +59,14 @@ namespace test2 { // CHECK: define [100 x [[A:%.*]]]* @_ZN5test24testEs(i16 signext // CHECK: [[N:%.*]] = sext i16 {{%.*}} to i32 - // CHECK-NEXT: [[T0:%.*]] = call [[OVR_T]] @llvm.umul.with.overflow.i32(i32 [[N]], i32 400) - // CHECK-NEXT: [[T1:%.*]] = extractvalue [[OVR_T]] [[T0]], 1 - // CHECK-NEXT: [[T2:%.*]] = extractvalue [[OVR_T]] [[T0]], 0 + // CHECK-NEXT: [[T0:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[N]], i32 400) + // CHECK-NEXT: [[T1:%.*]] = extractvalue { i32, i1 } [[T0]], 1 + // CHECK-NEXT: [[T2:%.*]] = extractvalue { i32, i1 } [[T0]], 0 // CHECK-NEXT: [[T3:%.*]] = mul i32 [[N]], 100 - // CHECK-NEXT: [[T4:%.*]] = call [[OVR_T]] @llvm.uadd.with.overflow.i32(i32 [[T2]], i32 4) - // CHECK-NEXT: [[T5:%.*]] = extractvalue [[OVR_T]] [[T4]], 1 + // CHECK-NEXT: [[T4:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[T2]], i32 4) + // CHECK-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T4]], 1 // CHECK-NEXT: [[T6:%.*]] = or i1 [[T1]], [[T5]] - // CHECK-NEXT: [[T7:%.*]] = extractvalue [[OVR_T]] [[T4]], 0 + // CHECK-NEXT: [[T7:%.*]] = extractvalue { i32, i1 } [[T4]], 0 // CHECK-NEXT: [[T8:%.*]] = select i1 [[T6]], i32 -1, i32 [[T7]] // CHECK-NEXT: call noalias i8* @_Znaj(i32 [[T8]]) // CHECK: icmp ult i32 {{.*}}, [[T3]] @@ -126,9 +124,9 @@ namespace test6 { // CHECK: define [[A:%.*]]* @_ZN5test64testEt(i16 zeroext // CHECK: [[N:%.*]] = zext i16 {{%.*}} to i32 - // CHECK-NEXT: [[T0:%.*]] = call [[OVR_T]] @llvm.umul.with.overflow.i32(i32 [[N]], i32 4) - // CHECK-NEXT: [[T1:%.*]] = extractvalue [[OVR_T]] [[T0]], 1 - // CHECK-NEXT: [[T2:%.*]] = extractvalue [[OVR_T]] [[T0]], 0 + // CHECK-NEXT: [[T0:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[N]], i32 4) + // CHECK-NEXT: [[T1:%.*]] = extractvalue { i32, i1 } [[T0]], 1 + // CHECK-NEXT: [[T2:%.*]] = extractvalue { i32, i1 } [[T0]], 0 // CHECK-NEXT: [[T3:%.*]] = select i1 [[T1]], i32 -1, i32 [[T2]] // CHECK-NEXT: call noalias i8* @_Znaj(i32 [[T3]]) // CHECK: icmp ult i32 {{.*}}, [[N]] @@ -148,9 +146,9 @@ namespace test7 { // CHECK: define [100 x [[A:%.*]]]* @_ZN5test74testEt(i16 zeroext // CHECK: [[N:%.*]] = zext i16 {{%.*}} to i32 - // CHECK-NEXT: [[T0:%.*]] = call [[OVR_T]] @llvm.umul.with.overflow.i32(i32 [[N]], i32 400) - // CHECK-NEXT: [[T1:%.*]] = extractvalue [[OVR_T]] [[T0]], 1 - // CHECK-NEXT: [[T2:%.*]] = extractvalue [[OVR_T]] [[T0]], 0 + // CHECK-NEXT: [[T0:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[N]], i32 400) + // CHECK-NEXT: [[T1:%.*]] = extractvalue { i32, i1 } [[T0]], 1 + // CHECK-NEXT: [[T2:%.*]] = extractvalue { i32, i1 } [[T0]], 0 // CHECK-NEXT: [[T3:%.*]] = mul i32 [[N]], 100 // CHECK-NEXT: [[T4:%.*]] = select i1 [[T1]], i32 -1, i32 [[T2]] // CHECK-NEXT: call noalias i8* @_Znaj(i32 [[T4]]) @@ -173,10 +171,10 @@ namespace test8 { // CHECK: [[N:%.*]] = load i64* // CHECK-NEXT: [[T0:%.*]] = icmp uge i64 [[N]], 4294967296 // CHECK-NEXT: [[T1:%.*]] = trunc i64 [[N]] to i32 - // CHECK-NEXT: [[T2:%.*]] = call [[OVR_T]] @llvm.umul.with.overflow.i32(i32 [[T1]], i32 4) - // CHECK-NEXT: [[T3:%.*]] = extractvalue [[OVR_T]] [[T2]], 1 + // CHECK-NEXT: [[T2:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[T1]], i32 4) + // CHECK-NEXT: [[T3:%.*]] = extractvalue { i32, i1 } [[T2]], 1 // CHECK-NEXT: [[T4:%.*]] = or i1 [[T0]], [[T3]] - // CHECK-NEXT: [[T5:%.*]] = extractvalue [[OVR_T]] [[T2]], 0 + // CHECK-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T2]], 0 // CHECK-NEXT: [[T6:%.*]] = select i1 [[T4]], i32 -1, i32 [[T5]] // CHECK-NEXT: call noalias i8* @_Znaj(i32 [[T6]]) // CHECK: icmp ult i32 {{.*}}, [[T1]] @@ -198,10 +196,10 @@ namespace test9 { // CHECK: [[N:%.*]] = load i64* // CHECK-NEXT: [[T0:%.*]] = icmp uge i64 [[N]], 4294967296 // CHECK-NEXT: [[T1:%.*]] = trunc i64 [[N]] to i32 - // CHECK-NEXT: [[T2:%.*]] = call [[OVR_T]] @llvm.umul.with.overflow.i32(i32 [[T1]], i32 4) - // CHECK-NEXT: [[T3:%.*]] = extractvalue [[OVR_T]] [[T2]], 1 + // CHECK-NEXT: [[T2:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[T1]], i32 4) + // CHECK-NEXT: [[T3:%.*]] = extractvalue { i32, i1 } [[T2]], 1 // CHECK-NEXT: [[T4:%.*]] = or i1 [[T0]], [[T3]] - // CHECK-NEXT: [[T5:%.*]] = extractvalue [[OVR_T]] [[T2]], 0 + // CHECK-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T2]], 0 // CHECK-NEXT: [[T6:%.*]] = select i1 [[T4]], i32 -1, i32 [[T5]] // CHECK-NEXT: call noalias i8* @_Znaj(i32 [[T6]]) // CHECK: icmp ult i32 {{.*}}, [[T1]] diff --git a/test/CodeGenCXX/pointers-to-data-members.cpp b/test/CodeGenCXX/pointers-to-data-members.cpp index 2a22d23e50..e0250410a4 100644 --- a/test/CodeGenCXX/pointers-to-data-members.cpp +++ b/test/CodeGenCXX/pointers-to-data-members.cpp @@ -22,7 +22,7 @@ namespace ZeroInit { // CHECK-GLOBAL: @_ZN8ZeroInit1bE = global i64 -1, int A::* b = 0; - // CHECK-GLOBAL: @_ZN8ZeroInit2saE = internal global %struct.anon { i64 -1 } + // CHECK-GLOBAL: @_ZN8ZeroInit2saE = internal global %"struct.ZeroInit::<anonymous>" { i64 -1 } struct { int A::*a; } sa; @@ -35,7 +35,7 @@ namespace ZeroInit { } ssa[2]; void test_ssa() { (void) ssa; } - // CHECK-GLOBAL: @_ZN8ZeroInit2ssE = internal global %1 { %struct.anon { i64 -1 } } + // CHECK-GLOBAL: @_ZN8ZeroInit2ssE = internal global %"struct.ZeroInit::<anonymous>.1" { %"struct.ZeroInit::<anonymous struct>::<anonymous>" { i64 -1 } } struct { struct { int A::*pa; @@ -55,7 +55,7 @@ namespace ZeroInit { }; struct C : A, B { int j; }; - // CHECK-GLOBAL: @_ZN8ZeroInit1cE = global {{%.*}} { %"struct.PR7139::ptr_to_member_struct" { i64 -1, i32 0 }, %"struct.ZeroInit::B" { [10 x %"struct.PR7139::ptr_to_member_struct"] [%"struct.PR7139::ptr_to_member_struct" { i64 -1, i32 0 }, %"struct.PR7139::ptr_to_member_struct" { i64 -1, i32 0 }, %"struct.PR7139::ptr_to_member_struct" { i64 -1, i32 0 }, %"struct.PR7139::ptr_to_member_struct" { i64 -1, i32 0 }, %"struct.PR7139::ptr_to_member_struct" { i64 -1, i32 0 }, %"struct.PR7139::ptr_to_member_struct" { i64 -1, i32 0 }, %"struct.PR7139::ptr_to_member_struct" { i64 -1, i32 0 }, %"struct.PR7139::ptr_to_member_struct" { i64 -1, i32 0 }, %"struct.PR7139::ptr_to_member_struct" { i64 -1, i32 0 }, %"struct.PR7139::ptr_to_member_struct" { i64 -1, i32 0 }], i8 0, i64 -1 }, i32 0 }, align 8 + // CHECK-GLOBAL: @_ZN8ZeroInit1cE = global {{%.*}} { %"struct.ZeroInit::A" { i64 -1, i32 0 }, %"struct.ZeroInit::B" { [10 x %"struct.ZeroInit::A"] [%"struct.ZeroInit::A" { i64 -1, i32 0 }, %"struct.ZeroInit::A" { i64 -1, i32 0 }, %"struct.ZeroInit::A" { i64 -1, i32 0 }, %"struct.ZeroInit::A" { i64 -1, i32 0 }, %"struct.ZeroInit::A" { i64 -1, i32 0 }, %"struct.ZeroInit::A" { i64 -1, i32 0 }, %"struct.ZeroInit::A" { i64 -1, i32 0 }, %"struct.ZeroInit::A" { i64 -1, i32 0 }, %"struct.ZeroInit::A" { i64 -1, i32 0 }, %"struct.ZeroInit::A" { i64 -1, i32 0 }], i8 0, i64 -1 }, i32 0 }, align 8 C c; } @@ -227,6 +227,6 @@ namespace test4 { struct C : virtual B { int *C_p; }; struct D : C { int *D_p; }; - // CHECK-GLOBAL: @_ZN5test41dE = global %"struct.test4::D" { %"struct.test4::C.base" zeroinitializer, i32* null, %"struct.VirtualBases::C.base" { i32 (...)** null, i64 -1 }, %"struct.test4::A" zeroinitializer }, align 8 + // CHECK-GLOBAL: @_ZN5test41dE = global %"struct.test4::D" { %"struct.test4::C.base" zeroinitializer, i32* null, %"struct.test4::B.base" { i32 (...)** null, i64 -1 }, %"struct.test4::A" zeroinitializer }, align 8 D d; } diff --git a/test/CodeGenCXX/references.cpp b/test/CodeGenCXX/references.cpp index d53815ded7..4bbc251ab5 100644 --- a/test/CodeGenCXX/references.cpp +++ b/test/CodeGenCXX/references.cpp @@ -235,7 +235,7 @@ struct A { }; // CHECK: define internal void @__cxx_global_var_init -// CHECK: call void @_ZN2N31AC1Ei(%"class.N2::X"* @_ZGRN2N35sA123E, i32 123) +// CHECK: call void @_ZN2N31AC1Ei(%"struct.N3::A"* @_ZGRN2N35sA123E, i32 123) // CHECK: call i32 @__cxa_atexit // CHECK: ret void const A &sA123 = A(123); @@ -250,7 +250,7 @@ struct A { void f() { // CHECK: define void @_ZN2N41fEv - // CHECK: call void @_ZN2N41AC1Ev(%"class.N2::X"* @_ZGRZN2N41fEvE2ar) + // CHECK: call void @_ZN2N41AC1Ev(%"struct.N4::A"* @_ZGRZN2N41fEvE2ar) // CHECK: call i32 @__cxa_atexit // CHECK: ret void static const A& ar = A(); diff --git a/test/CodeGenCXX/static-init-3.cpp b/test/CodeGenCXX/static-init-3.cpp index bd717caaa1..dc28d5a32a 100644 --- a/test/CodeGenCXX/static-init-3.cpp +++ b/test/CodeGenCXX/static-init-3.cpp @@ -16,8 +16,8 @@ struct X1 } }; -// CHECK: @_ZN2X1I2X2I1BEE8instanceE = weak_odr global %struct.X0* null, align 8 -// CHECJ: @_ZN2X1I2X2I1AEE8instanceE = weak_odr global %struct.X0* null, align 8 +// CHECK: @_ZN2X1I2X2I1BEE8instanceE = weak_odr global %struct.X2* null, align 8 +// CHECJ: @_ZN2X1I2X2I1AEE8instanceE = weak_odr global %struct.X2* null, align 8 template<class T> T & X1<T>::instance = X1<T>::get(); class A { }; diff --git a/test/CodeGenCXX/template-anonymous-types.cpp b/test/CodeGenCXX/template-anonymous-types.cpp index 68bdc0c4a4..c684fa798c 100644 --- a/test/CodeGenCXX/template-anonymous-types.cpp +++ b/test/CodeGenCXX/template-anonymous-types.cpp @@ -28,8 +28,8 @@ void test() { // reverse order. // // BAR's instantiation of X: - // CHECK: define internal i32 @"_ZN1XIN1S3$_1EE1fEv"(%struct.X* %this) - // CHECK: define internal void @"_ZN1XIN1S3$_1EEC2ES1_"(%struct.X* %this, i32 %t) unnamed_addr + // CHECK: define internal i32 @"_ZN1XIN1S3$_1EE1fEv"(%struct.X.0* %this) + // CHECK: define internal void @"_ZN1XIN1S3$_1EEC2ES1_"(%struct.X.0* %this, i32 %t) unnamed_addr // // FOO's instantiation of X: // CHECK: define internal i32 @"_ZN1XIN1S3$_0EE1fEv"(%struct.X* %this) diff --git a/test/CodeGenCXX/virt-call-offsets.cpp b/test/CodeGenCXX/virt-call-offsets.cpp index 3eb6b5da7d..31c227680b 100644 --- a/test/CodeGenCXX/virt-call-offsets.cpp +++ b/test/CodeGenCXX/virt-call-offsets.cpp @@ -5,4 +5,4 @@ struct B : A {}; struct C : B { virtual void a(); }; void (C::*x)() = &C::a; -// CHECK: @x = global %0 { i{{[0-9]+}} 1, i{{[0-9]+}} 0 } +// CHECK: @x = global { i64, i64 } { i{{[0-9]+}} 1, i{{[0-9]+}} 0 } diff --git a/test/CodeGenCXX/virtual-bases.cpp b/test/CodeGenCXX/virtual-bases.cpp index cfb4c83716..c9f13f853d 100644 --- a/test/CodeGenCXX/virtual-bases.cpp +++ b/test/CodeGenCXX/virtual-bases.cpp @@ -20,8 +20,8 @@ struct C : virtual A { C(bool); }; -// CHECK: define void @_ZN1CC1Eb(%struct.B* %this, i1 zeroext) unnamed_addr -// CHECK: define void @_ZN1CC2Eb(%struct.B* %this, i8** %vtt, i1 zeroext) unnamed_addr +// CHECK: define void @_ZN1CC1Eb(%struct.C* %this, i1 zeroext) unnamed_addr +// CHECK: define void @_ZN1CC2Eb(%struct.C* %this, i8** %vtt, i1 zeroext) unnamed_addr C::C(bool) { } // PR6251 diff --git a/test/CodeGenCXX/virtual-functions-incomplete-types.cpp b/test/CodeGenCXX/virtual-functions-incomplete-types.cpp index 052a0192d6..afa658f7d3 100644 --- a/test/CodeGenCXX/virtual-functions-incomplete-types.cpp +++ b/test/CodeGenCXX/virtual-functions-incomplete-types.cpp @@ -9,7 +9,7 @@ struct B { void B::f() { } -// CHECK: define i32 @_ZN1D1gEv(%struct.B* %this) +// CHECK: define i32 @_ZN1D1gEv(%struct.D* %this) // CHECK: declare void @_ZN1B1gEv() struct C; diff --git a/test/CodeGenCXX/volatile-1.cpp b/test/CodeGenCXX/volatile-1.cpp index 3ae17bd41b..1a69648d42 100644 --- a/test/CodeGenCXX/volatile-1.cpp +++ b/test/CodeGenCXX/volatile-1.cpp @@ -4,7 +4,7 @@ volatile int i, j, k; volatile int ar[5]; volatile char c; -// CHECK: @ci = global [[CINT:%.*]] zeroinitializer +// CHECK: @ci = global [[CINT:.*]] zeroinitializer volatile _Complex int ci; volatile struct S { #ifdef __cplusplus diff --git a/test/CodeGenCXX/vtable-pointer-initialization.cpp b/test/CodeGenCXX/vtable-pointer-initialization.cpp index f629c2db79..9b1eaa5ba7 100644 --- a/test/CodeGenCXX/vtable-pointer-initialization.cpp +++ b/test/CodeGenCXX/vtable-pointer-initialization.cpp @@ -41,16 +41,16 @@ struct B : Base { void f() { B b; } -// CHECK: define linkonce_odr void @_ZN1BC1Ev(%struct.A* %this) unnamed_addr +// CHECK: define linkonce_odr void @_ZN1BC1Ev(%struct.B* %this) unnamed_addr // CHECK: call void @_ZN1BC2Ev( -// CHECK: define linkonce_odr void @_ZN1BD1Ev(%struct.A* %this) unnamed_addr +// CHECK: define linkonce_odr void @_ZN1BD1Ev(%struct.B* %this) unnamed_addr // CHECK: store i8** getelementptr inbounds ([3 x i8*]* @_ZTV1B, i64 0, i64 2) // CHECK: call void @_ZN5FieldD1Ev( // CHECK: call void @_ZN4BaseD2Ev( // CHECK: ret void -// CHECK: define linkonce_odr void @_ZN1BC2Ev(%struct.A* %this) unnamed_addr +// CHECK: define linkonce_odr void @_ZN1BC2Ev(%struct.B* %this) unnamed_addr // CHECK: call void @_ZN4BaseC2Ev( // CHECK: store i8** getelementptr inbounds ([3 x i8*]* @_ZTV1B, i64 0, i64 2) // CHECK: call void @_ZN5FieldC1Ev diff --git a/test/CodeGenCXX/x86_32-arguments.cpp b/test/CodeGenCXX/x86_32-arguments.cpp index 4c0603346f..4d4339381d 100644 --- a/test/CodeGenCXX/x86_32-arguments.cpp +++ b/test/CodeGenCXX/x86_32-arguments.cpp @@ -89,7 +89,7 @@ struct s5 { s5(); int &x; }; s5 f5() { return s5(); } // CHECK: define i32 @_Z4f6_0M2s6i(i32 %a) -// CHECK: define i64 @_Z4f6_1M2s6FivE(%{{.*}} byval align 4) +// CHECK: define i64 @_Z4f6_1M2s6FivE({ i32, i32 }* byval align 4) // FIXME: It would be nice to avoid byval on the previous case. struct s6 {}; typedef int s6::* s6_mdp; diff --git a/test/CodeGenObjC/arc-foreach.m b/test/CodeGenObjC/arc-foreach.m index 3dd8b690be..89e05d59e9 100644 --- a/test/CodeGenObjC/arc-foreach.m +++ b/test/CodeGenObjC/arc-foreach.m @@ -21,7 +21,7 @@ void test0(NSArray *array) { // CHECK-LP64-NEXT: [[X:%.*]] = alloca i8*, // CHECK-LP64-NEXT: [[STATE:%.*]] = alloca [[STATE_T:%.*]], // CHECK-LP64-NEXT: alloca [16 x i8*], align 8 -// CHECK-LP64-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:%.*]], +// CHECK-LP64-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK-LP64: [[T0:%.*]] = getelementptr inbounds [[STATE_T]]* [[STATE]], i32 0, i32 1 // CHECK-LP64-NEXT: [[T1:%.*]] = load i8*** [[T0]] @@ -55,7 +55,7 @@ void test1(NSArray *array) { // CHECK-LP64-NEXT: [[X:%.*]] = alloca i8*, // CHECK-LP64-NEXT: [[STATE:%.*]] = alloca [[STATE_T:%.*]], // CHECK-LP64-NEXT: alloca [16 x i8*], align 8 -// CHECK-LP64-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:%.*]], +// CHECK-LP64-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK-LP64: [[T0:%.*]] = getelementptr inbounds [[STATE_T]]* [[STATE]], i32 0, i32 1 // CHECK-LP64-NEXT: [[T1:%.*]] = load i8*** [[T0]] diff --git a/test/CodeGenObjC/arc.m b/test/CodeGenObjC/arc.m index 479f0d2312..ac7dad31d0 100644 --- a/test/CodeGenObjC/arc.m +++ b/test/CodeGenObjC/arc.m @@ -616,7 +616,7 @@ void test22(_Bool cond) { int (^test25(int x))(void) { // CHECK: define i32 ()* @test25( // CHECK: [[X:%.*]] = alloca i32, - // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:%.*]], + // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK-NEXT: store i32 {{%.*}}, i32* [[X]] // CHECK: [[T0:%.*]] = bitcast [[BLOCK_T]]* [[BLOCK]] to i32 ()* // CHECK-NEXT: [[T1:%.*]] = bitcast i32 ()* [[T0]] to i8* @@ -884,7 +884,7 @@ char *helper; void test31(id x) { // CHECK: define void @test31( // CHECK: [[X:%.*]] = alloca i8*, -// CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:%.*]], +// CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK-NEXT: [[PARM:%.*]] = call i8* @objc_retain(i8* {{%.*}}) // CHECK-NEXT: store i8* [[PARM]], i8** [[X]] // CHECK: [[SLOT:%.*]] = getelementptr inbounds [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 @@ -1159,7 +1159,7 @@ void test38(void) { // CHECK: define void @test38() // CHECK: [[VAR:%.*]] = alloca [[BYREF_T:%.*]], - // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:%.*]], + // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK: [[T0:%.*]] = getelementptr inbounds [[BYREF_T]]* [[VAR]], i32 0, i32 2 // 0x02000000 - has copy/dispose helpers // CHECK-NEXT: store i32 33554432, i32* [[T0]] @@ -1194,7 +1194,7 @@ void test38(void) { // CHECK-NEXT: call void @objc_release(i8* [[T1]]) // CHECK: define internal void @__test38_block_invoke_ - // CHECK: [[SLOT:%.*]] = getelementptr inbounds [[BYREF_T]]* {{%.*}}, i32 0, i32 6 + // CHECK: [[SLOT:%.*]] = getelementptr inbounds {{.*}}, i32 0, i32 6 // CHECK-NEXT: [[T0:%.*]] = load i8** [[SLOT]], align 8 // CHECK-NEXT: store i8* null, i8** [[SLOT]], // CHECK-NEXT: call void @objc_release(i8* [[T0]]) @@ -1215,7 +1215,7 @@ void test39(void) { // CHECK: define void @test39() // CHECK: [[VAR:%.*]] = alloca i8* - // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:%.*]], + // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK-NEXT: [[T0:%.*]] = call i8* @test39_source() // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]]) // CHECK-NEXT: store i8* [[T1]], i8** [[VAR]], @@ -1238,7 +1238,7 @@ void test40(void) { // CHECK: define void @test40() // CHECK: [[VAR:%.*]] = alloca [[BYREF_T:%.*]], - // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:%.*]], + // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK: [[T0:%.*]] = getelementptr inbounds [[BYREF_T]]* [[VAR]], i32 0, i32 2 // 0x02000000 - has copy/dispose helpers // CHECK-NEXT: store i32 33554432, i32* [[T0]] @@ -1270,7 +1270,7 @@ void test40(void) { // CHECK-NEXT: call void @objc_destroyWeak(i8** [[T0]]) // CHECK: define internal void @__test40_block_invoke_ - // CHECK: [[SLOT:%.*]] = getelementptr inbounds [[BYREF_T]]* {{%.*}}, i32 0, i32 6 + // CHECK: [[SLOT:%.*]] = getelementptr inbounds {{.*}}, i32 0, i32 6 // CHECK-NEXT: call i8* @objc_storeWeak(i8** [[SLOT]], i8* null) // CHECK-NEXT: ret void @@ -1292,7 +1292,7 @@ void test41(void) { // CHECK: define void @test41() // CHECK: [[VAR:%.*]] = alloca i8*, - // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:%.*]], + // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK: [[T0:%.*]] = call i8* @test41_source() // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]]) // CHECK-NEXT: call i8* @objc_initWeak(i8** [[VAR]], i8* [[T1]]) @@ -1329,7 +1329,7 @@ void test41(void) { // CHECK: define internal void @"\01-[Test42 test]" // CHECK: [[SELF:%.*]] = alloca [[TEST42:%.*]]*, // CHECK-NEXT: alloca i8* -// CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:%.*]], +// CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK-NEXT: store // CHECK-NEXT: store // CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 diff --git a/test/CodeGenObjC/blocks.m b/test/CodeGenObjC/blocks.m index 151c162342..47d47cca2e 100644 --- a/test/CodeGenObjC/blocks.m +++ b/test/CodeGenObjC/blocks.m @@ -46,7 +46,7 @@ void test2(Test2 *x) { // CHECK: define void @test2( // CHECK: [[X:%.*]] = alloca [[TEST2:%.*]]*, // CHECK-NEXT: [[WEAKX:%.*]] = alloca [[WEAK_T:%.*]], - // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:%.*]], + // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK-NEXT: store [[TEST2]]* // isa=1 for weak byrefs. @@ -95,8 +95,8 @@ void test2(Test2 *x) { // CHECK: [[BLOCK:%.*]] = bitcast i8* {{%.*}} to [[BLOCK_T]]* // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-NEXT: [[T1:%.*]] = load i8** [[T0]] -// CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to [[WEAK_T]]* -// CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [[WEAK_T]]* [[T2]], i32 0, i32 1 -// CHECK-NEXT: [[T4:%.*]] = load [[WEAK_T]]** [[T3]] -// CHECK-NEXT: [[WEAKX:%.*]] = getelementptr inbounds [[WEAK_T]]* [[T4]], i32 0, i32 6 +// CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to [[WEAK_T]]{{.*}}* +// CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [[WEAK_T]]{{.*}}* [[T2]], i32 0, i32 1 +// CHECK-NEXT: [[T4:%.*]] = load [[WEAK_T]]{{.*}}** [[T3]] +// CHECK-NEXT: [[WEAKX:%.*]] = getelementptr inbounds [[WEAK_T]]{{.*}}* [[T4]], i32 0, i32 6 // CHECK-NEXT: [[T0:%.*]] = load [[TEST2]]** [[WEAKX]], align 4 diff --git a/test/CodeGenObjC/property-list-in-class.m b/test/CodeGenObjC/property-list-in-class.m index a5d0dc851d..05210588ba 100644 --- a/test/CodeGenObjC/property-list-in-class.m +++ b/test/CodeGenObjC/property-list-in-class.m @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -fobjc-nonfragile-abi -emit-llvm -o %t %s -// RUN: grep -F 'l_OBJC_$_PROP_LIST_C2" = internal global %8 { i32 16, i32 3' %t +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -fobjc-nonfragile-abi -emit-llvm %s -o - | FileCheck %s +// CHECK: l_OBJC_$_PROP_LIST_C2" = internal global { i32, i32, [3 x %struct._prop_t] } { i32 16, i32 3 @protocol P @property int i; diff --git a/test/CodeGenObjC/variadic-sends.m b/test/CodeGenObjC/variadic-sends.m index ea13823bc7..6b04b50ca1 100644 --- a/test/CodeGenObjC/variadic-sends.m +++ b/test/CodeGenObjC/variadic-sends.m @@ -28,13 +28,13 @@ void f2(A *a) { @interface B : A @end @implementation B : A -(void) foo { - // CHECK-X86-32: call void bitcast (i8* (%struct._objc_method_description*, i8*, ...)* @objc_msgSendSuper to void (%struct._objc_method_description*, i8*, i32)*) - // CHECK-X86-64: call void bitcast (i8* (%struct._objc_method_description*, i8*, ...)* @objc_msgSendSuper to void (%struct._objc_method_description*, i8*, i32)*) + // CHECK-X86-32: call void bitcast (i8* (%struct._objc_super*, i8*, ...)* @objc_msgSendSuper to void (%struct._objc_super*, i8*, i32)*) + // CHECK-X86-64: call void bitcast (i8* (%struct._objc_super*, i8*, ...)* @objc_msgSendSuper to void (%struct._objc_super*, i8*, i32)*) [super im1: 1]; } -(void) bar { - // CHECK-X86-32: call void (%struct._objc_method_description*, i8*, i32, i32, ...)* bitcast (i8* (%struct._objc_method_description*, i8*, ...)* @objc_msgSendSuper to void (%struct._objc_method_description*, i8*, i32, i32, ...)*) - // CHECK-X86-64: call void (%struct._objc_method_description*, i8*, i32, i32, ...)* bitcast (i8* (%struct._objc_method_description*, i8*, ...)* @objc_msgSendSuper to void (%struct._objc_method_description*, i8*, i32, i32, ...)*) + // CHECK-X86-32: call void (%struct._objc_super*, i8*, i32, i32, ...)* bitcast (i8* (%struct._objc_super*, i8*, ...)* @objc_msgSendSuper to void (%struct._objc_super*, i8*, i32, i32, ...)*) + // CHECK-X86-64: call void (%struct._objc_super*, i8*, i32, i32, ...)* bitcast (i8* (%struct._objc_super*, i8*, ...)* @objc_msgSendSuper to void (%struct._objc_super*, i8*, i32, i32, ...)*) [super im2: 1, 2]; } diff --git a/test/CodeGenObjCXX/property-object-conditional-exp.mm b/test/CodeGenObjCXX/property-object-conditional-exp.mm index a3c1027ed7..5d8a882635 100644 --- a/test/CodeGenObjCXX/property-object-conditional-exp.mm +++ b/test/CodeGenObjCXX/property-object-conditional-exp.mm @@ -22,11 +22,12 @@ extern "C" bool CGRectIsEmpty(CGRect); CGRect dataRect; CGRect virtualBounds; -// CHECK: [[SRC:%.*]] = call %struct.CGRect bitcast (i8* (i8*, i8*, ...)* @objc_msgSend -// CHECK-NEXT:getelementptr %struct.CGRect* [[SRC:%.*]] +// CHECK: [[SRC:%.*]] = call { i8*, i32 } bitcast (i8* (i8*, i8*, ...)* @objc_msgSend +// CHECK-NEXT: bitcast +// CHECK-NEXT:getelementptr { i8*, i32 }* [[SRC:%.*]] // CHECK-NEXT:extractvalue // CHECK-NEXT:store -// CHECK-NEXT:getelementptr %struct.CGRect* [[SRC:%.*]] +// CHECK-NEXT:getelementptr { i8*, i32 }* [[SRC:%.*]] // CHECK-NEXT:extractvalue // CHECK-NEXT:store dataRect = CGRectIsEmpty(virtualBounds) ? self.bounds : virtualBounds; diff --git a/test/CodeGenObjCXX/property-objects.mm b/test/CodeGenObjCXX/property-objects.mm index 8e98b0dae7..1f43117635 100644 --- a/test/CodeGenObjCXX/property-objects.mm +++ b/test/CodeGenObjCXX/property-objects.mm @@ -2,7 +2,7 @@ // CHECK-NOT: callq _objc_msgSend_stret // CHECK: call void @_ZN1SC1ERKS_ // CHECK: call %class.S* @_ZN1SaSERKS_ -// CHECK: call %class.S* @_ZN6CGRectaSERKS_ +// CHECK: call %struct.CGRect* @_ZN6CGRectaSERKS_ class S { public: |