1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/Stmt.h" 34 #include "clang/AST/TemplateBase.h" 35 #include "clang/AST/Type.h" 36 #include "clang/AST/TypeLoc.h" 37 #include "clang/AST/UnresolvedSet.h" 38 #include "clang/Basic/AddressSpaces.h" 39 #include "clang/Basic/CharInfo.h" 40 #include "clang/Basic/Diagnostic.h" 41 #include "clang/Basic/IdentifierTable.h" 42 #include "clang/Basic/LLVM.h" 43 #include "clang/Basic/LangOptions.h" 44 #include "clang/Basic/OpenCLOptions.h" 45 #include "clang/Basic/OperatorKinds.h" 46 #include "clang/Basic/PartialDiagnostic.h" 47 #include "clang/Basic/SourceLocation.h" 48 #include "clang/Basic/SourceManager.h" 49 #include "clang/Basic/Specifiers.h" 50 #include "clang/Basic/SyncScope.h" 51 #include "clang/Basic/TargetBuiltins.h" 52 #include "clang/Basic/TargetCXXABI.h" 53 #include "clang/Basic/TargetInfo.h" 54 #include "clang/Basic/TypeTraits.h" 55 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 56 #include "clang/Sema/Initialization.h" 57 #include "clang/Sema/Lookup.h" 58 #include "clang/Sema/Ownership.h" 59 #include "clang/Sema/Scope.h" 60 #include "clang/Sema/ScopeInfo.h" 61 #include "clang/Sema/Sema.h" 62 #include "clang/Sema/SemaInternal.h" 63 #include "llvm/ADT/APFloat.h" 64 #include "llvm/ADT/APInt.h" 65 #include "llvm/ADT/APSInt.h" 66 #include "llvm/ADT/ArrayRef.h" 67 #include "llvm/ADT/DenseMap.h" 68 #include "llvm/ADT/FoldingSet.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallBitVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallString.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/StringSwitch.h" 78 #include "llvm/ADT/Triple.h" 79 #include "llvm/Support/AtomicOrdering.h" 80 #include "llvm/Support/Casting.h" 81 #include "llvm/Support/Compiler.h" 82 #include "llvm/Support/ConvertUTF.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/Format.h" 85 #include "llvm/Support/Locale.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/raw_ostream.h" 88 #include <algorithm> 89 #include <cassert> 90 #include <cstddef> 91 #include <cstdint> 92 #include <functional> 93 #include <limits> 94 #include <string> 95 #include <tuple> 96 #include <utility> 97 98 using namespace clang; 99 using namespace sema; 100 101 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 102 unsigned ByteNo) const { 103 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 104 Context.getTargetInfo()); 105 } 106 107 /// Checks that a call expression's argument count is the desired number. 108 /// This is useful when doing custom type-checking. Returns true on error. 109 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 110 unsigned argCount = call->getNumArgs(); 111 if (argCount == desiredArgCount) return false; 112 113 if (argCount < desiredArgCount) 114 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 115 << 0 /*function call*/ << desiredArgCount << argCount 116 << call->getSourceRange(); 117 118 // Highlight all the excess arguments. 119 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 120 call->getArg(argCount - 1)->getEndLoc()); 121 122 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 123 << 0 /*function call*/ << desiredArgCount << argCount 124 << call->getArg(1)->getSourceRange(); 125 } 126 127 /// Check that the first argument to __builtin_annotation is an integer 128 /// and the second argument is a non-wide string literal. 129 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 130 if (checkArgCount(S, TheCall, 2)) 131 return true; 132 133 // First argument should be an integer. 134 Expr *ValArg = TheCall->getArg(0); 135 QualType Ty = ValArg->getType(); 136 if (!Ty->isIntegerType()) { 137 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 138 << ValArg->getSourceRange(); 139 return true; 140 } 141 142 // Second argument should be a constant string. 143 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 144 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 145 if (!Literal || !Literal->isAscii()) { 146 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 147 << StrArg->getSourceRange(); 148 return true; 149 } 150 151 TheCall->setType(Ty); 152 return false; 153 } 154 155 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 156 // We need at least one argument. 157 if (TheCall->getNumArgs() < 1) { 158 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 159 << 0 << 1 << TheCall->getNumArgs() 160 << TheCall->getCallee()->getSourceRange(); 161 return true; 162 } 163 164 // All arguments should be wide string literals. 165 for (Expr *Arg : TheCall->arguments()) { 166 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 167 if (!Literal || !Literal->isWide()) { 168 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 169 << Arg->getSourceRange(); 170 return true; 171 } 172 } 173 174 return false; 175 } 176 177 /// Check that the argument to __builtin_addressof is a glvalue, and set the 178 /// result type to the corresponding pointer type. 179 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 180 if (checkArgCount(S, TheCall, 1)) 181 return true; 182 183 ExprResult Arg(TheCall->getArg(0)); 184 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 185 if (ResultType.isNull()) 186 return true; 187 188 TheCall->setArg(0, Arg.get()); 189 TheCall->setType(ResultType); 190 return false; 191 } 192 193 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) { 194 if (checkArgCount(S, TheCall, 3)) 195 return true; 196 197 // First two arguments should be integers. 198 for (unsigned I = 0; I < 2; ++I) { 199 ExprResult Arg = TheCall->getArg(I); 200 QualType Ty = Arg.get()->getType(); 201 if (!Ty->isIntegerType()) { 202 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 203 << Ty << Arg.get()->getSourceRange(); 204 return true; 205 } 206 InitializedEntity Entity = InitializedEntity::InitializeParameter( 207 S.getASTContext(), Ty, /*consume*/ false); 208 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 209 if (Arg.isInvalid()) 210 return true; 211 TheCall->setArg(I, Arg.get()); 212 } 213 214 // Third argument should be a pointer to a non-const integer. 215 // IRGen correctly handles volatile, restrict, and address spaces, and 216 // the other qualifiers aren't possible. 217 { 218 ExprResult Arg = TheCall->getArg(2); 219 QualType Ty = Arg.get()->getType(); 220 const auto *PtrTy = Ty->getAs<PointerType>(); 221 if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() && 222 !PtrTy->getPointeeType().isConstQualified())) { 223 S.Diag(Arg.get()->getBeginLoc(), 224 diag::err_overflow_builtin_must_be_ptr_int) 225 << Ty << Arg.get()->getSourceRange(); 226 return true; 227 } 228 InitializedEntity Entity = InitializedEntity::InitializeParameter( 229 S.getASTContext(), Ty, /*consume*/ false); 230 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 231 if (Arg.isInvalid()) 232 return true; 233 TheCall->setArg(2, Arg.get()); 234 } 235 return false; 236 } 237 238 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 239 if (checkArgCount(S, BuiltinCall, 2)) 240 return true; 241 242 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 243 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 244 Expr *Call = BuiltinCall->getArg(0); 245 Expr *Chain = BuiltinCall->getArg(1); 246 247 if (Call->getStmtClass() != Stmt::CallExprClass) { 248 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 249 << Call->getSourceRange(); 250 return true; 251 } 252 253 auto CE = cast<CallExpr>(Call); 254 if (CE->getCallee()->getType()->isBlockPointerType()) { 255 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 256 << Call->getSourceRange(); 257 return true; 258 } 259 260 const Decl *TargetDecl = CE->getCalleeDecl(); 261 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 262 if (FD->getBuiltinID()) { 263 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 264 << Call->getSourceRange(); 265 return true; 266 } 267 268 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 269 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 270 << Call->getSourceRange(); 271 return true; 272 } 273 274 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 275 if (ChainResult.isInvalid()) 276 return true; 277 if (!ChainResult.get()->getType()->isPointerType()) { 278 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 279 << Chain->getSourceRange(); 280 return true; 281 } 282 283 QualType ReturnTy = CE->getCallReturnType(S.Context); 284 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 285 QualType BuiltinTy = S.Context.getFunctionType( 286 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 287 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 288 289 Builtin = 290 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 291 292 BuiltinCall->setType(CE->getType()); 293 BuiltinCall->setValueKind(CE->getValueKind()); 294 BuiltinCall->setObjectKind(CE->getObjectKind()); 295 BuiltinCall->setCallee(Builtin); 296 BuiltinCall->setArg(1, ChainResult.get()); 297 298 return false; 299 } 300 301 /// Check a call to BuiltinID for buffer overflows. If BuiltinID is a 302 /// __builtin_*_chk function, then use the object size argument specified in the 303 /// source. Otherwise, infer the object size using __builtin_object_size. 304 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 305 CallExpr *TheCall) { 306 // FIXME: There are some more useful checks we could be doing here: 307 // - Analyze the format string of sprintf to see how much of buffer is used. 308 // - Evaluate strlen of strcpy arguments, use as object size. 309 310 if (TheCall->isValueDependent() || TheCall->isTypeDependent()) 311 return; 312 313 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true); 314 if (!BuiltinID) 315 return; 316 317 unsigned DiagID = 0; 318 bool IsChkVariant = false; 319 unsigned SizeIndex, ObjectIndex; 320 switch (BuiltinID) { 321 default: 322 return; 323 case Builtin::BI__builtin___memcpy_chk: 324 case Builtin::BI__builtin___memmove_chk: 325 case Builtin::BI__builtin___memset_chk: 326 case Builtin::BI__builtin___strlcat_chk: 327 case Builtin::BI__builtin___strlcpy_chk: 328 case Builtin::BI__builtin___strncat_chk: 329 case Builtin::BI__builtin___strncpy_chk: 330 case Builtin::BI__builtin___stpncpy_chk: 331 case Builtin::BI__builtin___memccpy_chk: { 332 DiagID = diag::warn_builtin_chk_overflow; 333 IsChkVariant = true; 334 SizeIndex = TheCall->getNumArgs() - 2; 335 ObjectIndex = TheCall->getNumArgs() - 1; 336 break; 337 } 338 339 case Builtin::BI__builtin___snprintf_chk: 340 case Builtin::BI__builtin___vsnprintf_chk: { 341 DiagID = diag::warn_builtin_chk_overflow; 342 IsChkVariant = true; 343 SizeIndex = 1; 344 ObjectIndex = 3; 345 break; 346 } 347 348 case Builtin::BIstrncat: 349 case Builtin::BI__builtin_strncat: 350 case Builtin::BIstrncpy: 351 case Builtin::BI__builtin_strncpy: 352 case Builtin::BIstpncpy: 353 case Builtin::BI__builtin_stpncpy: { 354 // Whether these functions overflow depends on the runtime strlen of the 355 // string, not just the buffer size, so emitting the "always overflow" 356 // diagnostic isn't quite right. We should still diagnose passing a buffer 357 // size larger than the destination buffer though; this is a runtime abort 358 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 359 DiagID = diag::warn_fortify_source_size_mismatch; 360 SizeIndex = TheCall->getNumArgs() - 1; 361 ObjectIndex = 0; 362 break; 363 } 364 365 case Builtin::BImemcpy: 366 case Builtin::BI__builtin_memcpy: 367 case Builtin::BImemmove: 368 case Builtin::BI__builtin_memmove: 369 case Builtin::BImemset: 370 case Builtin::BI__builtin_memset: { 371 DiagID = diag::warn_fortify_source_overflow; 372 SizeIndex = TheCall->getNumArgs() - 1; 373 ObjectIndex = 0; 374 break; 375 } 376 case Builtin::BIsnprintf: 377 case Builtin::BI__builtin_snprintf: 378 case Builtin::BIvsnprintf: 379 case Builtin::BI__builtin_vsnprintf: { 380 DiagID = diag::warn_fortify_source_size_mismatch; 381 SizeIndex = 1; 382 ObjectIndex = 0; 383 break; 384 } 385 } 386 387 llvm::APSInt ObjectSize; 388 // For __builtin___*_chk, the object size is explicitly provided by the caller 389 // (usually using __builtin_object_size). Use that value to check this call. 390 if (IsChkVariant) { 391 Expr::EvalResult Result; 392 Expr *SizeArg = TheCall->getArg(ObjectIndex); 393 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 394 return; 395 ObjectSize = Result.Val.getInt(); 396 397 // Otherwise, try to evaluate an imaginary call to __builtin_object_size. 398 } else { 399 // If the parameter has a pass_object_size attribute, then we should use its 400 // (potentially) more strict checking mode. Otherwise, conservatively assume 401 // type 0. 402 int BOSType = 0; 403 if (const auto *POS = 404 FD->getParamDecl(ObjectIndex)->getAttr<PassObjectSizeAttr>()) 405 BOSType = POS->getType(); 406 407 Expr *ObjArg = TheCall->getArg(ObjectIndex); 408 uint64_t Result; 409 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 410 return; 411 // Get the object size in the target's size_t width. 412 const TargetInfo &TI = getASTContext().getTargetInfo(); 413 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 414 ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 415 } 416 417 // Evaluate the number of bytes of the object that this call will use. 418 Expr::EvalResult Result; 419 Expr *UsedSizeArg = TheCall->getArg(SizeIndex); 420 if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext())) 421 return; 422 llvm::APSInt UsedSize = Result.Val.getInt(); 423 424 if (UsedSize.ule(ObjectSize)) 425 return; 426 427 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 428 // Skim off the details of whichever builtin was called to produce a better 429 // diagnostic, as it's unlikley that the user wrote the __builtin explicitly. 430 if (IsChkVariant) { 431 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 432 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 433 } else if (FunctionName.startswith("__builtin_")) { 434 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 435 } 436 437 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 438 PDiag(DiagID) 439 << FunctionName << ObjectSize.toString(/*Radix=*/10) 440 << UsedSize.toString(/*Radix=*/10)); 441 } 442 443 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 444 Scope::ScopeFlags NeededScopeFlags, 445 unsigned DiagID) { 446 // Scopes aren't available during instantiation. Fortunately, builtin 447 // functions cannot be template args so they cannot be formed through template 448 // instantiation. Therefore checking once during the parse is sufficient. 449 if (SemaRef.inTemplateInstantiation()) 450 return false; 451 452 Scope *S = SemaRef.getCurScope(); 453 while (S && !S->isSEHExceptScope()) 454 S = S->getParent(); 455 if (!S || !(S->getFlags() & NeededScopeFlags)) { 456 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 457 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 458 << DRE->getDecl()->getIdentifier(); 459 return true; 460 } 461 462 return false; 463 } 464 465 static inline bool isBlockPointer(Expr *Arg) { 466 return Arg->getType()->isBlockPointerType(); 467 } 468 469 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 470 /// void*, which is a requirement of device side enqueue. 471 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 472 const BlockPointerType *BPT = 473 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 474 ArrayRef<QualType> Params = 475 BPT->getPointeeType()->getAs<FunctionProtoType>()->getParamTypes(); 476 unsigned ArgCounter = 0; 477 bool IllegalParams = false; 478 // Iterate through the block parameters until either one is found that is not 479 // a local void*, or the block is valid. 480 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 481 I != E; ++I, ++ArgCounter) { 482 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 483 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 484 LangAS::opencl_local) { 485 // Get the location of the error. If a block literal has been passed 486 // (BlockExpr) then we can point straight to the offending argument, 487 // else we just point to the variable reference. 488 SourceLocation ErrorLoc; 489 if (isa<BlockExpr>(BlockArg)) { 490 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 491 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 492 } else if (isa<DeclRefExpr>(BlockArg)) { 493 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 494 } 495 S.Diag(ErrorLoc, 496 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 497 IllegalParams = true; 498 } 499 } 500 501 return IllegalParams; 502 } 503 504 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 505 if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) { 506 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 507 << 1 << Call->getDirectCallee() << "cl_khr_subgroups"; 508 return true; 509 } 510 return false; 511 } 512 513 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 514 if (checkArgCount(S, TheCall, 2)) 515 return true; 516 517 if (checkOpenCLSubgroupExt(S, TheCall)) 518 return true; 519 520 // First argument is an ndrange_t type. 521 Expr *NDRangeArg = TheCall->getArg(0); 522 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 523 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 524 << TheCall->getDirectCallee() << "'ndrange_t'"; 525 return true; 526 } 527 528 Expr *BlockArg = TheCall->getArg(1); 529 if (!isBlockPointer(BlockArg)) { 530 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 531 << TheCall->getDirectCallee() << "block"; 532 return true; 533 } 534 return checkOpenCLBlockArgs(S, BlockArg); 535 } 536 537 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 538 /// get_kernel_work_group_size 539 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 540 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 541 if (checkArgCount(S, TheCall, 1)) 542 return true; 543 544 Expr *BlockArg = TheCall->getArg(0); 545 if (!isBlockPointer(BlockArg)) { 546 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 547 << TheCall->getDirectCallee() << "block"; 548 return true; 549 } 550 return checkOpenCLBlockArgs(S, BlockArg); 551 } 552 553 /// Diagnose integer type and any valid implicit conversion to it. 554 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 555 const QualType &IntType); 556 557 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 558 unsigned Start, unsigned End) { 559 bool IllegalParams = false; 560 for (unsigned I = Start; I <= End; ++I) 561 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 562 S.Context.getSizeType()); 563 return IllegalParams; 564 } 565 566 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 567 /// 'local void*' parameter of passed block. 568 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 569 Expr *BlockArg, 570 unsigned NumNonVarArgs) { 571 const BlockPointerType *BPT = 572 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 573 unsigned NumBlockParams = 574 BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams(); 575 unsigned TotalNumArgs = TheCall->getNumArgs(); 576 577 // For each argument passed to the block, a corresponding uint needs to 578 // be passed to describe the size of the local memory. 579 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 580 S.Diag(TheCall->getBeginLoc(), 581 diag::err_opencl_enqueue_kernel_local_size_args); 582 return true; 583 } 584 585 // Check that the sizes of the local memory are specified by integers. 586 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 587 TotalNumArgs - 1); 588 } 589 590 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 591 /// overload formats specified in Table 6.13.17.1. 592 /// int enqueue_kernel(queue_t queue, 593 /// kernel_enqueue_flags_t flags, 594 /// const ndrange_t ndrange, 595 /// void (^block)(void)) 596 /// int enqueue_kernel(queue_t queue, 597 /// kernel_enqueue_flags_t flags, 598 /// const ndrange_t ndrange, 599 /// uint num_events_in_wait_list, 600 /// clk_event_t *event_wait_list, 601 /// clk_event_t *event_ret, 602 /// void (^block)(void)) 603 /// int enqueue_kernel(queue_t queue, 604 /// kernel_enqueue_flags_t flags, 605 /// const ndrange_t ndrange, 606 /// void (^block)(local void*, ...), 607 /// uint size0, ...) 608 /// int enqueue_kernel(queue_t queue, 609 /// kernel_enqueue_flags_t flags, 610 /// const ndrange_t ndrange, 611 /// uint num_events_in_wait_list, 612 /// clk_event_t *event_wait_list, 613 /// clk_event_t *event_ret, 614 /// void (^block)(local void*, ...), 615 /// uint size0, ...) 616 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 617 unsigned NumArgs = TheCall->getNumArgs(); 618 619 if (NumArgs < 4) { 620 S.Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_few_args); 621 return true; 622 } 623 624 Expr *Arg0 = TheCall->getArg(0); 625 Expr *Arg1 = TheCall->getArg(1); 626 Expr *Arg2 = TheCall->getArg(2); 627 Expr *Arg3 = TheCall->getArg(3); 628 629 // First argument always needs to be a queue_t type. 630 if (!Arg0->getType()->isQueueT()) { 631 S.Diag(TheCall->getArg(0)->getBeginLoc(), 632 diag::err_opencl_builtin_expected_type) 633 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 634 return true; 635 } 636 637 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 638 if (!Arg1->getType()->isIntegerType()) { 639 S.Diag(TheCall->getArg(1)->getBeginLoc(), 640 diag::err_opencl_builtin_expected_type) 641 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 642 return true; 643 } 644 645 // Third argument is always an ndrange_t type. 646 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 647 S.Diag(TheCall->getArg(2)->getBeginLoc(), 648 diag::err_opencl_builtin_expected_type) 649 << TheCall->getDirectCallee() << "'ndrange_t'"; 650 return true; 651 } 652 653 // With four arguments, there is only one form that the function could be 654 // called in: no events and no variable arguments. 655 if (NumArgs == 4) { 656 // check that the last argument is the right block type. 657 if (!isBlockPointer(Arg3)) { 658 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 659 << TheCall->getDirectCallee() << "block"; 660 return true; 661 } 662 // we have a block type, check the prototype 663 const BlockPointerType *BPT = 664 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 665 if (BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams() > 0) { 666 S.Diag(Arg3->getBeginLoc(), 667 diag::err_opencl_enqueue_kernel_blocks_no_args); 668 return true; 669 } 670 return false; 671 } 672 // we can have block + varargs. 673 if (isBlockPointer(Arg3)) 674 return (checkOpenCLBlockArgs(S, Arg3) || 675 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 676 // last two cases with either exactly 7 args or 7 args and varargs. 677 if (NumArgs >= 7) { 678 // check common block argument. 679 Expr *Arg6 = TheCall->getArg(6); 680 if (!isBlockPointer(Arg6)) { 681 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 682 << TheCall->getDirectCallee() << "block"; 683 return true; 684 } 685 if (checkOpenCLBlockArgs(S, Arg6)) 686 return true; 687 688 // Forth argument has to be any integer type. 689 if (!Arg3->getType()->isIntegerType()) { 690 S.Diag(TheCall->getArg(3)->getBeginLoc(), 691 diag::err_opencl_builtin_expected_type) 692 << TheCall->getDirectCallee() << "integer"; 693 return true; 694 } 695 // check remaining common arguments. 696 Expr *Arg4 = TheCall->getArg(4); 697 Expr *Arg5 = TheCall->getArg(5); 698 699 // Fifth argument is always passed as a pointer to clk_event_t. 700 if (!Arg4->isNullPointerConstant(S.Context, 701 Expr::NPC_ValueDependentIsNotNull) && 702 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 703 S.Diag(TheCall->getArg(4)->getBeginLoc(), 704 diag::err_opencl_builtin_expected_type) 705 << TheCall->getDirectCallee() 706 << S.Context.getPointerType(S.Context.OCLClkEventTy); 707 return true; 708 } 709 710 // Sixth argument is always passed as a pointer to clk_event_t. 711 if (!Arg5->isNullPointerConstant(S.Context, 712 Expr::NPC_ValueDependentIsNotNull) && 713 !(Arg5->getType()->isPointerType() && 714 Arg5->getType()->getPointeeType()->isClkEventT())) { 715 S.Diag(TheCall->getArg(5)->getBeginLoc(), 716 diag::err_opencl_builtin_expected_type) 717 << TheCall->getDirectCallee() 718 << S.Context.getPointerType(S.Context.OCLClkEventTy); 719 return true; 720 } 721 722 if (NumArgs == 7) 723 return false; 724 725 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 726 } 727 728 // None of the specific case has been detected, give generic error 729 S.Diag(TheCall->getBeginLoc(), 730 diag::err_opencl_enqueue_kernel_incorrect_args); 731 return true; 732 } 733 734 /// Returns OpenCL access qual. 735 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 736 return D->getAttr<OpenCLAccessAttr>(); 737 } 738 739 /// Returns true if pipe element type is different from the pointer. 740 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 741 const Expr *Arg0 = Call->getArg(0); 742 // First argument type should always be pipe. 743 if (!Arg0->getType()->isPipeType()) { 744 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 745 << Call->getDirectCallee() << Arg0->getSourceRange(); 746 return true; 747 } 748 OpenCLAccessAttr *AccessQual = 749 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 750 // Validates the access qualifier is compatible with the call. 751 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 752 // read_only and write_only, and assumed to be read_only if no qualifier is 753 // specified. 754 switch (Call->getDirectCallee()->getBuiltinID()) { 755 case Builtin::BIread_pipe: 756 case Builtin::BIreserve_read_pipe: 757 case Builtin::BIcommit_read_pipe: 758 case Builtin::BIwork_group_reserve_read_pipe: 759 case Builtin::BIsub_group_reserve_read_pipe: 760 case Builtin::BIwork_group_commit_read_pipe: 761 case Builtin::BIsub_group_commit_read_pipe: 762 if (!(!AccessQual || AccessQual->isReadOnly())) { 763 S.Diag(Arg0->getBeginLoc(), 764 diag::err_opencl_builtin_pipe_invalid_access_modifier) 765 << "read_only" << Arg0->getSourceRange(); 766 return true; 767 } 768 break; 769 case Builtin::BIwrite_pipe: 770 case Builtin::BIreserve_write_pipe: 771 case Builtin::BIcommit_write_pipe: 772 case Builtin::BIwork_group_reserve_write_pipe: 773 case Builtin::BIsub_group_reserve_write_pipe: 774 case Builtin::BIwork_group_commit_write_pipe: 775 case Builtin::BIsub_group_commit_write_pipe: 776 if (!(AccessQual && AccessQual->isWriteOnly())) { 777 S.Diag(Arg0->getBeginLoc(), 778 diag::err_opencl_builtin_pipe_invalid_access_modifier) 779 << "write_only" << Arg0->getSourceRange(); 780 return true; 781 } 782 break; 783 default: 784 break; 785 } 786 return false; 787 } 788 789 /// Returns true if pipe element type is different from the pointer. 790 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 791 const Expr *Arg0 = Call->getArg(0); 792 const Expr *ArgIdx = Call->getArg(Idx); 793 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 794 const QualType EltTy = PipeTy->getElementType(); 795 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 796 // The Idx argument should be a pointer and the type of the pointer and 797 // the type of pipe element should also be the same. 798 if (!ArgTy || 799 !S.Context.hasSameType( 800 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 801 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 802 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 803 << ArgIdx->getType() << ArgIdx->getSourceRange(); 804 return true; 805 } 806 return false; 807 } 808 809 // Performs semantic analysis for the read/write_pipe call. 810 // \param S Reference to the semantic analyzer. 811 // \param Call A pointer to the builtin call. 812 // \return True if a semantic error has been found, false otherwise. 813 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 814 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 815 // functions have two forms. 816 switch (Call->getNumArgs()) { 817 case 2: 818 if (checkOpenCLPipeArg(S, Call)) 819 return true; 820 // The call with 2 arguments should be 821 // read/write_pipe(pipe T, T*). 822 // Check packet type T. 823 if (checkOpenCLPipePacketType(S, Call, 1)) 824 return true; 825 break; 826 827 case 4: { 828 if (checkOpenCLPipeArg(S, Call)) 829 return true; 830 // The call with 4 arguments should be 831 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 832 // Check reserve_id_t. 833 if (!Call->getArg(1)->getType()->isReserveIDT()) { 834 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 835 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 836 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 837 return true; 838 } 839 840 // Check the index. 841 const Expr *Arg2 = Call->getArg(2); 842 if (!Arg2->getType()->isIntegerType() && 843 !Arg2->getType()->isUnsignedIntegerType()) { 844 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 845 << Call->getDirectCallee() << S.Context.UnsignedIntTy 846 << Arg2->getType() << Arg2->getSourceRange(); 847 return true; 848 } 849 850 // Check packet type T. 851 if (checkOpenCLPipePacketType(S, Call, 3)) 852 return true; 853 } break; 854 default: 855 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 856 << Call->getDirectCallee() << Call->getSourceRange(); 857 return true; 858 } 859 860 return false; 861 } 862 863 // Performs a semantic analysis on the {work_group_/sub_group_ 864 // /_}reserve_{read/write}_pipe 865 // \param S Reference to the semantic analyzer. 866 // \param Call The call to the builtin function to be analyzed. 867 // \return True if a semantic error was found, false otherwise. 868 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 869 if (checkArgCount(S, Call, 2)) 870 return true; 871 872 if (checkOpenCLPipeArg(S, Call)) 873 return true; 874 875 // Check the reserve size. 876 if (!Call->getArg(1)->getType()->isIntegerType() && 877 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 878 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 879 << Call->getDirectCallee() << S.Context.UnsignedIntTy 880 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 881 return true; 882 } 883 884 // Since return type of reserve_read/write_pipe built-in function is 885 // reserve_id_t, which is not defined in the builtin def file , we used int 886 // as return type and need to override the return type of these functions. 887 Call->setType(S.Context.OCLReserveIDTy); 888 889 return false; 890 } 891 892 // Performs a semantic analysis on {work_group_/sub_group_ 893 // /_}commit_{read/write}_pipe 894 // \param S Reference to the semantic analyzer. 895 // \param Call The call to the builtin function to be analyzed. 896 // \return True if a semantic error was found, false otherwise. 897 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 898 if (checkArgCount(S, Call, 2)) 899 return true; 900 901 if (checkOpenCLPipeArg(S, Call)) 902 return true; 903 904 // Check reserve_id_t. 905 if (!Call->getArg(1)->getType()->isReserveIDT()) { 906 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 907 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 908 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 909 return true; 910 } 911 912 return false; 913 } 914 915 // Performs a semantic analysis on the call to built-in Pipe 916 // Query Functions. 917 // \param S Reference to the semantic analyzer. 918 // \param Call The call to the builtin function to be analyzed. 919 // \return True if a semantic error was found, false otherwise. 920 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 921 if (checkArgCount(S, Call, 1)) 922 return true; 923 924 if (!Call->getArg(0)->getType()->isPipeType()) { 925 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 926 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 927 return true; 928 } 929 930 return false; 931 } 932 933 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 934 // Performs semantic analysis for the to_global/local/private call. 935 // \param S Reference to the semantic analyzer. 936 // \param BuiltinID ID of the builtin function. 937 // \param Call A pointer to the builtin call. 938 // \return True if a semantic error has been found, false otherwise. 939 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 940 CallExpr *Call) { 941 if (Call->getNumArgs() != 1) { 942 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_arg_num) 943 << Call->getDirectCallee() << Call->getSourceRange(); 944 return true; 945 } 946 947 auto RT = Call->getArg(0)->getType(); 948 if (!RT->isPointerType() || RT->getPointeeType() 949 .getAddressSpace() == LangAS::opencl_constant) { 950 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 951 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 952 return true; 953 } 954 955 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 956 S.Diag(Call->getArg(0)->getBeginLoc(), 957 diag::warn_opencl_generic_address_space_arg) 958 << Call->getDirectCallee()->getNameInfo().getAsString() 959 << Call->getArg(0)->getSourceRange(); 960 } 961 962 RT = RT->getPointeeType(); 963 auto Qual = RT.getQualifiers(); 964 switch (BuiltinID) { 965 case Builtin::BIto_global: 966 Qual.setAddressSpace(LangAS::opencl_global); 967 break; 968 case Builtin::BIto_local: 969 Qual.setAddressSpace(LangAS::opencl_local); 970 break; 971 case Builtin::BIto_private: 972 Qual.setAddressSpace(LangAS::opencl_private); 973 break; 974 default: 975 llvm_unreachable("Invalid builtin function"); 976 } 977 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 978 RT.getUnqualifiedType(), Qual))); 979 980 return false; 981 } 982 983 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 984 if (checkArgCount(S, TheCall, 1)) 985 return ExprError(); 986 987 // Compute __builtin_launder's parameter type from the argument. 988 // The parameter type is: 989 // * The type of the argument if it's not an array or function type, 990 // Otherwise, 991 // * The decayed argument type. 992 QualType ParamTy = [&]() { 993 QualType ArgTy = TheCall->getArg(0)->getType(); 994 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 995 return S.Context.getPointerType(Ty->getElementType()); 996 if (ArgTy->isFunctionType()) { 997 return S.Context.getPointerType(ArgTy); 998 } 999 return ArgTy; 1000 }(); 1001 1002 TheCall->setType(ParamTy); 1003 1004 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1005 if (!ParamTy->isPointerType()) 1006 return 0; 1007 if (ParamTy->isFunctionPointerType()) 1008 return 1; 1009 if (ParamTy->isVoidPointerType()) 1010 return 2; 1011 return llvm::Optional<unsigned>{}; 1012 }(); 1013 if (DiagSelect.hasValue()) { 1014 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1015 << DiagSelect.getValue() << TheCall->getSourceRange(); 1016 return ExprError(); 1017 } 1018 1019 // We either have an incomplete class type, or we have a class template 1020 // whose instantiation has not been forced. Example: 1021 // 1022 // template <class T> struct Foo { T value; }; 1023 // Foo<int> *p = nullptr; 1024 // auto *d = __builtin_launder(p); 1025 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1026 diag::err_incomplete_type)) 1027 return ExprError(); 1028 1029 assert(ParamTy->getPointeeType()->isObjectType() && 1030 "Unhandled non-object pointer case"); 1031 1032 InitializedEntity Entity = 1033 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1034 ExprResult Arg = 1035 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1036 if (Arg.isInvalid()) 1037 return ExprError(); 1038 TheCall->setArg(0, Arg.get()); 1039 1040 return TheCall; 1041 } 1042 1043 // Emit an error and return true if the current architecture is not in the list 1044 // of supported architectures. 1045 static bool 1046 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1047 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1048 llvm::Triple::ArchType CurArch = 1049 S.getASTContext().getTargetInfo().getTriple().getArch(); 1050 if (llvm::is_contained(SupportedArchs, CurArch)) 1051 return false; 1052 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1053 << TheCall->getSourceRange(); 1054 return true; 1055 } 1056 1057 ExprResult 1058 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1059 CallExpr *TheCall) { 1060 ExprResult TheCallResult(TheCall); 1061 1062 // Find out if any arguments are required to be integer constant expressions. 1063 unsigned ICEArguments = 0; 1064 ASTContext::GetBuiltinTypeError Error; 1065 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1066 if (Error != ASTContext::GE_None) 1067 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1068 1069 // If any arguments are required to be ICE's, check and diagnose. 1070 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1071 // Skip arguments not required to be ICE's. 1072 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1073 1074 llvm::APSInt Result; 1075 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 1076 return true; 1077 ICEArguments &= ~(1 << ArgNo); 1078 } 1079 1080 switch (BuiltinID) { 1081 case Builtin::BI__builtin___CFStringMakeConstantString: 1082 assert(TheCall->getNumArgs() == 1 && 1083 "Wrong # arguments to builtin CFStringMakeConstantString"); 1084 if (CheckObjCString(TheCall->getArg(0))) 1085 return ExprError(); 1086 break; 1087 case Builtin::BI__builtin_ms_va_start: 1088 case Builtin::BI__builtin_stdarg_start: 1089 case Builtin::BI__builtin_va_start: 1090 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1091 return ExprError(); 1092 break; 1093 case Builtin::BI__va_start: { 1094 switch (Context.getTargetInfo().getTriple().getArch()) { 1095 case llvm::Triple::aarch64: 1096 case llvm::Triple::arm: 1097 case llvm::Triple::thumb: 1098 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 1099 return ExprError(); 1100 break; 1101 default: 1102 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1103 return ExprError(); 1104 break; 1105 } 1106 break; 1107 } 1108 1109 // The acquire, release, and no fence variants are ARM and AArch64 only. 1110 case Builtin::BI_interlockedbittestandset_acq: 1111 case Builtin::BI_interlockedbittestandset_rel: 1112 case Builtin::BI_interlockedbittestandset_nf: 1113 case Builtin::BI_interlockedbittestandreset_acq: 1114 case Builtin::BI_interlockedbittestandreset_rel: 1115 case Builtin::BI_interlockedbittestandreset_nf: 1116 if (CheckBuiltinTargetSupport( 1117 *this, BuiltinID, TheCall, 1118 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 1119 return ExprError(); 1120 break; 1121 1122 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 1123 case Builtin::BI_bittest64: 1124 case Builtin::BI_bittestandcomplement64: 1125 case Builtin::BI_bittestandreset64: 1126 case Builtin::BI_bittestandset64: 1127 case Builtin::BI_interlockedbittestandreset64: 1128 case Builtin::BI_interlockedbittestandset64: 1129 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall, 1130 {llvm::Triple::x86_64, llvm::Triple::arm, 1131 llvm::Triple::thumb, llvm::Triple::aarch64})) 1132 return ExprError(); 1133 break; 1134 1135 case Builtin::BI__builtin_isgreater: 1136 case Builtin::BI__builtin_isgreaterequal: 1137 case Builtin::BI__builtin_isless: 1138 case Builtin::BI__builtin_islessequal: 1139 case Builtin::BI__builtin_islessgreater: 1140 case Builtin::BI__builtin_isunordered: 1141 if (SemaBuiltinUnorderedCompare(TheCall)) 1142 return ExprError(); 1143 break; 1144 case Builtin::BI__builtin_fpclassify: 1145 if (SemaBuiltinFPClassification(TheCall, 6)) 1146 return ExprError(); 1147 break; 1148 case Builtin::BI__builtin_isfinite: 1149 case Builtin::BI__builtin_isinf: 1150 case Builtin::BI__builtin_isinf_sign: 1151 case Builtin::BI__builtin_isnan: 1152 case Builtin::BI__builtin_isnormal: 1153 case Builtin::BI__builtin_signbit: 1154 case Builtin::BI__builtin_signbitf: 1155 case Builtin::BI__builtin_signbitl: 1156 if (SemaBuiltinFPClassification(TheCall, 1)) 1157 return ExprError(); 1158 break; 1159 case Builtin::BI__builtin_shufflevector: 1160 return SemaBuiltinShuffleVector(TheCall); 1161 // TheCall will be freed by the smart pointer here, but that's fine, since 1162 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1163 case Builtin::BI__builtin_prefetch: 1164 if (SemaBuiltinPrefetch(TheCall)) 1165 return ExprError(); 1166 break; 1167 case Builtin::BI__builtin_alloca_with_align: 1168 if (SemaBuiltinAllocaWithAlign(TheCall)) 1169 return ExprError(); 1170 break; 1171 case Builtin::BI__assume: 1172 case Builtin::BI__builtin_assume: 1173 if (SemaBuiltinAssume(TheCall)) 1174 return ExprError(); 1175 break; 1176 case Builtin::BI__builtin_assume_aligned: 1177 if (SemaBuiltinAssumeAligned(TheCall)) 1178 return ExprError(); 1179 break; 1180 case Builtin::BI__builtin_dynamic_object_size: 1181 case Builtin::BI__builtin_object_size: 1182 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1183 return ExprError(); 1184 break; 1185 case Builtin::BI__builtin_longjmp: 1186 if (SemaBuiltinLongjmp(TheCall)) 1187 return ExprError(); 1188 break; 1189 case Builtin::BI__builtin_setjmp: 1190 if (SemaBuiltinSetjmp(TheCall)) 1191 return ExprError(); 1192 break; 1193 case Builtin::BI_setjmp: 1194 case Builtin::BI_setjmpex: 1195 if (checkArgCount(*this, TheCall, 1)) 1196 return true; 1197 break; 1198 case Builtin::BI__builtin_classify_type: 1199 if (checkArgCount(*this, TheCall, 1)) return true; 1200 TheCall->setType(Context.IntTy); 1201 break; 1202 case Builtin::BI__builtin_constant_p: { 1203 if (checkArgCount(*this, TheCall, 1)) return true; 1204 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 1205 if (Arg.isInvalid()) return true; 1206 TheCall->setArg(0, Arg.get()); 1207 TheCall->setType(Context.IntTy); 1208 break; 1209 } 1210 case Builtin::BI__builtin_launder: 1211 return SemaBuiltinLaunder(*this, TheCall); 1212 case Builtin::BI__sync_fetch_and_add: 1213 case Builtin::BI__sync_fetch_and_add_1: 1214 case Builtin::BI__sync_fetch_and_add_2: 1215 case Builtin::BI__sync_fetch_and_add_4: 1216 case Builtin::BI__sync_fetch_and_add_8: 1217 case Builtin::BI__sync_fetch_and_add_16: 1218 case Builtin::BI__sync_fetch_and_sub: 1219 case Builtin::BI__sync_fetch_and_sub_1: 1220 case Builtin::BI__sync_fetch_and_sub_2: 1221 case Builtin::BI__sync_fetch_and_sub_4: 1222 case Builtin::BI__sync_fetch_and_sub_8: 1223 case Builtin::BI__sync_fetch_and_sub_16: 1224 case Builtin::BI__sync_fetch_and_or: 1225 case Builtin::BI__sync_fetch_and_or_1: 1226 case Builtin::BI__sync_fetch_and_or_2: 1227 case Builtin::BI__sync_fetch_and_or_4: 1228 case Builtin::BI__sync_fetch_and_or_8: 1229 case Builtin::BI__sync_fetch_and_or_16: 1230 case Builtin::BI__sync_fetch_and_and: 1231 case Builtin::BI__sync_fetch_and_and_1: 1232 case Builtin::BI__sync_fetch_and_and_2: 1233 case Builtin::BI__sync_fetch_and_and_4: 1234 case Builtin::BI__sync_fetch_and_and_8: 1235 case Builtin::BI__sync_fetch_and_and_16: 1236 case Builtin::BI__sync_fetch_and_xor: 1237 case Builtin::BI__sync_fetch_and_xor_1: 1238 case Builtin::BI__sync_fetch_and_xor_2: 1239 case Builtin::BI__sync_fetch_and_xor_4: 1240 case Builtin::BI__sync_fetch_and_xor_8: 1241 case Builtin::BI__sync_fetch_and_xor_16: 1242 case Builtin::BI__sync_fetch_and_nand: 1243 case Builtin::BI__sync_fetch_and_nand_1: 1244 case Builtin::BI__sync_fetch_and_nand_2: 1245 case Builtin::BI__sync_fetch_and_nand_4: 1246 case Builtin::BI__sync_fetch_and_nand_8: 1247 case Builtin::BI__sync_fetch_and_nand_16: 1248 case Builtin::BI__sync_add_and_fetch: 1249 case Builtin::BI__sync_add_and_fetch_1: 1250 case Builtin::BI__sync_add_and_fetch_2: 1251 case Builtin::BI__sync_add_and_fetch_4: 1252 case Builtin::BI__sync_add_and_fetch_8: 1253 case Builtin::BI__sync_add_and_fetch_16: 1254 case Builtin::BI__sync_sub_and_fetch: 1255 case Builtin::BI__sync_sub_and_fetch_1: 1256 case Builtin::BI__sync_sub_and_fetch_2: 1257 case Builtin::BI__sync_sub_and_fetch_4: 1258 case Builtin::BI__sync_sub_and_fetch_8: 1259 case Builtin::BI__sync_sub_and_fetch_16: 1260 case Builtin::BI__sync_and_and_fetch: 1261 case Builtin::BI__sync_and_and_fetch_1: 1262 case Builtin::BI__sync_and_and_fetch_2: 1263 case Builtin::BI__sync_and_and_fetch_4: 1264 case Builtin::BI__sync_and_and_fetch_8: 1265 case Builtin::BI__sync_and_and_fetch_16: 1266 case Builtin::BI__sync_or_and_fetch: 1267 case Builtin::BI__sync_or_and_fetch_1: 1268 case Builtin::BI__sync_or_and_fetch_2: 1269 case Builtin::BI__sync_or_and_fetch_4: 1270 case Builtin::BI__sync_or_and_fetch_8: 1271 case Builtin::BI__sync_or_and_fetch_16: 1272 case Builtin::BI__sync_xor_and_fetch: 1273 case Builtin::BI__sync_xor_and_fetch_1: 1274 case Builtin::BI__sync_xor_and_fetch_2: 1275 case Builtin::BI__sync_xor_and_fetch_4: 1276 case Builtin::BI__sync_xor_and_fetch_8: 1277 case Builtin::BI__sync_xor_and_fetch_16: 1278 case Builtin::BI__sync_nand_and_fetch: 1279 case Builtin::BI__sync_nand_and_fetch_1: 1280 case Builtin::BI__sync_nand_and_fetch_2: 1281 case Builtin::BI__sync_nand_and_fetch_4: 1282 case Builtin::BI__sync_nand_and_fetch_8: 1283 case Builtin::BI__sync_nand_and_fetch_16: 1284 case Builtin::BI__sync_val_compare_and_swap: 1285 case Builtin::BI__sync_val_compare_and_swap_1: 1286 case Builtin::BI__sync_val_compare_and_swap_2: 1287 case Builtin::BI__sync_val_compare_and_swap_4: 1288 case Builtin::BI__sync_val_compare_and_swap_8: 1289 case Builtin::BI__sync_val_compare_and_swap_16: 1290 case Builtin::BI__sync_bool_compare_and_swap: 1291 case Builtin::BI__sync_bool_compare_and_swap_1: 1292 case Builtin::BI__sync_bool_compare_and_swap_2: 1293 case Builtin::BI__sync_bool_compare_and_swap_4: 1294 case Builtin::BI__sync_bool_compare_and_swap_8: 1295 case Builtin::BI__sync_bool_compare_and_swap_16: 1296 case Builtin::BI__sync_lock_test_and_set: 1297 case Builtin::BI__sync_lock_test_and_set_1: 1298 case Builtin::BI__sync_lock_test_and_set_2: 1299 case Builtin::BI__sync_lock_test_and_set_4: 1300 case Builtin::BI__sync_lock_test_and_set_8: 1301 case Builtin::BI__sync_lock_test_and_set_16: 1302 case Builtin::BI__sync_lock_release: 1303 case Builtin::BI__sync_lock_release_1: 1304 case Builtin::BI__sync_lock_release_2: 1305 case Builtin::BI__sync_lock_release_4: 1306 case Builtin::BI__sync_lock_release_8: 1307 case Builtin::BI__sync_lock_release_16: 1308 case Builtin::BI__sync_swap: 1309 case Builtin::BI__sync_swap_1: 1310 case Builtin::BI__sync_swap_2: 1311 case Builtin::BI__sync_swap_4: 1312 case Builtin::BI__sync_swap_8: 1313 case Builtin::BI__sync_swap_16: 1314 return SemaBuiltinAtomicOverloaded(TheCallResult); 1315 case Builtin::BI__sync_synchronize: 1316 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1317 << TheCall->getCallee()->getSourceRange(); 1318 break; 1319 case Builtin::BI__builtin_nontemporal_load: 1320 case Builtin::BI__builtin_nontemporal_store: 1321 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1322 #define BUILTIN(ID, TYPE, ATTRS) 1323 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1324 case Builtin::BI##ID: \ 1325 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1326 #include "clang/Basic/Builtins.def" 1327 case Builtin::BI__annotation: 1328 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1329 return ExprError(); 1330 break; 1331 case Builtin::BI__builtin_annotation: 1332 if (SemaBuiltinAnnotation(*this, TheCall)) 1333 return ExprError(); 1334 break; 1335 case Builtin::BI__builtin_addressof: 1336 if (SemaBuiltinAddressof(*this, TheCall)) 1337 return ExprError(); 1338 break; 1339 case Builtin::BI__builtin_add_overflow: 1340 case Builtin::BI__builtin_sub_overflow: 1341 case Builtin::BI__builtin_mul_overflow: 1342 if (SemaBuiltinOverflow(*this, TheCall)) 1343 return ExprError(); 1344 break; 1345 case Builtin::BI__builtin_operator_new: 1346 case Builtin::BI__builtin_operator_delete: { 1347 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 1348 ExprResult Res = 1349 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 1350 if (Res.isInvalid()) 1351 CorrectDelayedTyposInExpr(TheCallResult.get()); 1352 return Res; 1353 } 1354 case Builtin::BI__builtin_dump_struct: { 1355 // We first want to ensure we are called with 2 arguments 1356 if (checkArgCount(*this, TheCall, 2)) 1357 return ExprError(); 1358 // Ensure that the first argument is of type 'struct XX *' 1359 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 1360 const QualType PtrArgType = PtrArg->getType(); 1361 if (!PtrArgType->isPointerType() || 1362 !PtrArgType->getPointeeType()->isRecordType()) { 1363 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1364 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 1365 << "structure pointer"; 1366 return ExprError(); 1367 } 1368 1369 // Ensure that the second argument is of type 'FunctionType' 1370 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 1371 const QualType FnPtrArgType = FnPtrArg->getType(); 1372 if (!FnPtrArgType->isPointerType()) { 1373 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1374 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1375 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1376 return ExprError(); 1377 } 1378 1379 const auto *FuncType = 1380 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 1381 1382 if (!FuncType) { 1383 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1384 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1385 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1386 return ExprError(); 1387 } 1388 1389 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 1390 if (!FT->getNumParams()) { 1391 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1392 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1393 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1394 return ExprError(); 1395 } 1396 QualType PT = FT->getParamType(0); 1397 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 1398 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 1399 !PT->getPointeeType().isConstQualified()) { 1400 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1401 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1402 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1403 return ExprError(); 1404 } 1405 } 1406 1407 TheCall->setType(Context.IntTy); 1408 break; 1409 } 1410 case Builtin::BI__builtin_call_with_static_chain: 1411 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 1412 return ExprError(); 1413 break; 1414 case Builtin::BI__exception_code: 1415 case Builtin::BI_exception_code: 1416 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 1417 diag::err_seh___except_block)) 1418 return ExprError(); 1419 break; 1420 case Builtin::BI__exception_info: 1421 case Builtin::BI_exception_info: 1422 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 1423 diag::err_seh___except_filter)) 1424 return ExprError(); 1425 break; 1426 case Builtin::BI__GetExceptionInfo: 1427 if (checkArgCount(*this, TheCall, 1)) 1428 return ExprError(); 1429 1430 if (CheckCXXThrowOperand( 1431 TheCall->getBeginLoc(), 1432 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 1433 TheCall)) 1434 return ExprError(); 1435 1436 TheCall->setType(Context.VoidPtrTy); 1437 break; 1438 // OpenCL v2.0, s6.13.16 - Pipe functions 1439 case Builtin::BIread_pipe: 1440 case Builtin::BIwrite_pipe: 1441 // Since those two functions are declared with var args, we need a semantic 1442 // check for the argument. 1443 if (SemaBuiltinRWPipe(*this, TheCall)) 1444 return ExprError(); 1445 break; 1446 case Builtin::BIreserve_read_pipe: 1447 case Builtin::BIreserve_write_pipe: 1448 case Builtin::BIwork_group_reserve_read_pipe: 1449 case Builtin::BIwork_group_reserve_write_pipe: 1450 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 1451 return ExprError(); 1452 break; 1453 case Builtin::BIsub_group_reserve_read_pipe: 1454 case Builtin::BIsub_group_reserve_write_pipe: 1455 if (checkOpenCLSubgroupExt(*this, TheCall) || 1456 SemaBuiltinReserveRWPipe(*this, TheCall)) 1457 return ExprError(); 1458 break; 1459 case Builtin::BIcommit_read_pipe: 1460 case Builtin::BIcommit_write_pipe: 1461 case Builtin::BIwork_group_commit_read_pipe: 1462 case Builtin::BIwork_group_commit_write_pipe: 1463 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 1464 return ExprError(); 1465 break; 1466 case Builtin::BIsub_group_commit_read_pipe: 1467 case Builtin::BIsub_group_commit_write_pipe: 1468 if (checkOpenCLSubgroupExt(*this, TheCall) || 1469 SemaBuiltinCommitRWPipe(*this, TheCall)) 1470 return ExprError(); 1471 break; 1472 case Builtin::BIget_pipe_num_packets: 1473 case Builtin::BIget_pipe_max_packets: 1474 if (SemaBuiltinPipePackets(*this, TheCall)) 1475 return ExprError(); 1476 break; 1477 case Builtin::BIto_global: 1478 case Builtin::BIto_local: 1479 case Builtin::BIto_private: 1480 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 1481 return ExprError(); 1482 break; 1483 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 1484 case Builtin::BIenqueue_kernel: 1485 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 1486 return ExprError(); 1487 break; 1488 case Builtin::BIget_kernel_work_group_size: 1489 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 1490 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 1491 return ExprError(); 1492 break; 1493 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 1494 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 1495 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 1496 return ExprError(); 1497 break; 1498 case Builtin::BI__builtin_os_log_format: 1499 case Builtin::BI__builtin_os_log_format_buffer_size: 1500 if (SemaBuiltinOSLogFormat(TheCall)) 1501 return ExprError(); 1502 break; 1503 } 1504 1505 // Since the target specific builtins for each arch overlap, only check those 1506 // of the arch we are compiling for. 1507 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 1508 switch (Context.getTargetInfo().getTriple().getArch()) { 1509 case llvm::Triple::arm: 1510 case llvm::Triple::armeb: 1511 case llvm::Triple::thumb: 1512 case llvm::Triple::thumbeb: 1513 if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall)) 1514 return ExprError(); 1515 break; 1516 case llvm::Triple::aarch64: 1517 case llvm::Triple::aarch64_be: 1518 if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall)) 1519 return ExprError(); 1520 break; 1521 case llvm::Triple::hexagon: 1522 if (CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall)) 1523 return ExprError(); 1524 break; 1525 case llvm::Triple::mips: 1526 case llvm::Triple::mipsel: 1527 case llvm::Triple::mips64: 1528 case llvm::Triple::mips64el: 1529 if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall)) 1530 return ExprError(); 1531 break; 1532 case llvm::Triple::systemz: 1533 if (CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall)) 1534 return ExprError(); 1535 break; 1536 case llvm::Triple::x86: 1537 case llvm::Triple::x86_64: 1538 if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall)) 1539 return ExprError(); 1540 break; 1541 case llvm::Triple::ppc: 1542 case llvm::Triple::ppc64: 1543 case llvm::Triple::ppc64le: 1544 if (CheckPPCBuiltinFunctionCall(BuiltinID, TheCall)) 1545 return ExprError(); 1546 break; 1547 default: 1548 break; 1549 } 1550 } 1551 1552 return TheCallResult; 1553 } 1554 1555 // Get the valid immediate range for the specified NEON type code. 1556 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 1557 NeonTypeFlags Type(t); 1558 int IsQuad = ForceQuad ? true : Type.isQuad(); 1559 switch (Type.getEltType()) { 1560 case NeonTypeFlags::Int8: 1561 case NeonTypeFlags::Poly8: 1562 return shift ? 7 : (8 << IsQuad) - 1; 1563 case NeonTypeFlags::Int16: 1564 case NeonTypeFlags::Poly16: 1565 return shift ? 15 : (4 << IsQuad) - 1; 1566 case NeonTypeFlags::Int32: 1567 return shift ? 31 : (2 << IsQuad) - 1; 1568 case NeonTypeFlags::Int64: 1569 case NeonTypeFlags::Poly64: 1570 return shift ? 63 : (1 << IsQuad) - 1; 1571 case NeonTypeFlags::Poly128: 1572 return shift ? 127 : (1 << IsQuad) - 1; 1573 case NeonTypeFlags::Float16: 1574 assert(!shift && "cannot shift float types!"); 1575 return (4 << IsQuad) - 1; 1576 case NeonTypeFlags::Float32: 1577 assert(!shift && "cannot shift float types!"); 1578 return (2 << IsQuad) - 1; 1579 case NeonTypeFlags::Float64: 1580 assert(!shift && "cannot shift float types!"); 1581 return (1 << IsQuad) - 1; 1582 } 1583 llvm_unreachable("Invalid NeonTypeFlag!"); 1584 } 1585 1586 /// getNeonEltType - Return the QualType corresponding to the elements of 1587 /// the vector type specified by the NeonTypeFlags. This is used to check 1588 /// the pointer arguments for Neon load/store intrinsics. 1589 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 1590 bool IsPolyUnsigned, bool IsInt64Long) { 1591 switch (Flags.getEltType()) { 1592 case NeonTypeFlags::Int8: 1593 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 1594 case NeonTypeFlags::Int16: 1595 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 1596 case NeonTypeFlags::Int32: 1597 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 1598 case NeonTypeFlags::Int64: 1599 if (IsInt64Long) 1600 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 1601 else 1602 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 1603 : Context.LongLongTy; 1604 case NeonTypeFlags::Poly8: 1605 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 1606 case NeonTypeFlags::Poly16: 1607 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 1608 case NeonTypeFlags::Poly64: 1609 if (IsInt64Long) 1610 return Context.UnsignedLongTy; 1611 else 1612 return Context.UnsignedLongLongTy; 1613 case NeonTypeFlags::Poly128: 1614 break; 1615 case NeonTypeFlags::Float16: 1616 return Context.HalfTy; 1617 case NeonTypeFlags::Float32: 1618 return Context.FloatTy; 1619 case NeonTypeFlags::Float64: 1620 return Context.DoubleTy; 1621 } 1622 llvm_unreachable("Invalid NeonTypeFlag!"); 1623 } 1624 1625 bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 1626 llvm::APSInt Result; 1627 uint64_t mask = 0; 1628 unsigned TV = 0; 1629 int PtrArgNum = -1; 1630 bool HasConstPtr = false; 1631 switch (BuiltinID) { 1632 #define GET_NEON_OVERLOAD_CHECK 1633 #include "clang/Basic/arm_neon.inc" 1634 #include "clang/Basic/arm_fp16.inc" 1635 #undef GET_NEON_OVERLOAD_CHECK 1636 } 1637 1638 // For NEON intrinsics which are overloaded on vector element type, validate 1639 // the immediate which specifies which variant to emit. 1640 unsigned ImmArg = TheCall->getNumArgs()-1; 1641 if (mask) { 1642 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 1643 return true; 1644 1645 TV = Result.getLimitedValue(64); 1646 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 1647 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 1648 << TheCall->getArg(ImmArg)->getSourceRange(); 1649 } 1650 1651 if (PtrArgNum >= 0) { 1652 // Check that pointer arguments have the specified type. 1653 Expr *Arg = TheCall->getArg(PtrArgNum); 1654 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 1655 Arg = ICE->getSubExpr(); 1656 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 1657 QualType RHSTy = RHS.get()->getType(); 1658 1659 llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch(); 1660 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 1661 Arch == llvm::Triple::aarch64_be; 1662 bool IsInt64Long = 1663 Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong; 1664 QualType EltTy = 1665 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 1666 if (HasConstPtr) 1667 EltTy = EltTy.withConst(); 1668 QualType LHSTy = Context.getPointerType(EltTy); 1669 AssignConvertType ConvTy; 1670 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 1671 if (RHS.isInvalid()) 1672 return true; 1673 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 1674 RHS.get(), AA_Assigning)) 1675 return true; 1676 } 1677 1678 // For NEON intrinsics which take an immediate value as part of the 1679 // instruction, range check them here. 1680 unsigned i = 0, l = 0, u = 0; 1681 switch (BuiltinID) { 1682 default: 1683 return false; 1684 #define GET_NEON_IMMEDIATE_CHECK 1685 #include "clang/Basic/arm_neon.inc" 1686 #include "clang/Basic/arm_fp16.inc" 1687 #undef GET_NEON_IMMEDIATE_CHECK 1688 } 1689 1690 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 1691 } 1692 1693 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 1694 unsigned MaxWidth) { 1695 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 1696 BuiltinID == ARM::BI__builtin_arm_ldaex || 1697 BuiltinID == ARM::BI__builtin_arm_strex || 1698 BuiltinID == ARM::BI__builtin_arm_stlex || 1699 BuiltinID == AArch64::BI__builtin_arm_ldrex || 1700 BuiltinID == AArch64::BI__builtin_arm_ldaex || 1701 BuiltinID == AArch64::BI__builtin_arm_strex || 1702 BuiltinID == AArch64::BI__builtin_arm_stlex) && 1703 "unexpected ARM builtin"); 1704 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 1705 BuiltinID == ARM::BI__builtin_arm_ldaex || 1706 BuiltinID == AArch64::BI__builtin_arm_ldrex || 1707 BuiltinID == AArch64::BI__builtin_arm_ldaex; 1708 1709 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 1710 1711 // Ensure that we have the proper number of arguments. 1712 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 1713 return true; 1714 1715 // Inspect the pointer argument of the atomic builtin. This should always be 1716 // a pointer type, whose element is an integral scalar or pointer type. 1717 // Because it is a pointer type, we don't have to worry about any implicit 1718 // casts here. 1719 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 1720 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 1721 if (PointerArgRes.isInvalid()) 1722 return true; 1723 PointerArg = PointerArgRes.get(); 1724 1725 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 1726 if (!pointerType) { 1727 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 1728 << PointerArg->getType() << PointerArg->getSourceRange(); 1729 return true; 1730 } 1731 1732 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 1733 // task is to insert the appropriate casts into the AST. First work out just 1734 // what the appropriate type is. 1735 QualType ValType = pointerType->getPointeeType(); 1736 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 1737 if (IsLdrex) 1738 AddrType.addConst(); 1739 1740 // Issue a warning if the cast is dodgy. 1741 CastKind CastNeeded = CK_NoOp; 1742 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 1743 CastNeeded = CK_BitCast; 1744 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 1745 << PointerArg->getType() << Context.getPointerType(AddrType) 1746 << AA_Passing << PointerArg->getSourceRange(); 1747 } 1748 1749 // Finally, do the cast and replace the argument with the corrected version. 1750 AddrType = Context.getPointerType(AddrType); 1751 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 1752 if (PointerArgRes.isInvalid()) 1753 return true; 1754 PointerArg = PointerArgRes.get(); 1755 1756 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 1757 1758 // In general, we allow ints, floats and pointers to be loaded and stored. 1759 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 1760 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 1761 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 1762 << PointerArg->getType() << PointerArg->getSourceRange(); 1763 return true; 1764 } 1765 1766 // But ARM doesn't have instructions to deal with 128-bit versions. 1767 if (Context.getTypeSize(ValType) > MaxWidth) { 1768 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 1769 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 1770 << PointerArg->getType() << PointerArg->getSourceRange(); 1771 return true; 1772 } 1773 1774 switch (ValType.getObjCLifetime()) { 1775 case Qualifiers::OCL_None: 1776 case Qualifiers::OCL_ExplicitNone: 1777 // okay 1778 break; 1779 1780 case Qualifiers::OCL_Weak: 1781 case Qualifiers::OCL_Strong: 1782 case Qualifiers::OCL_Autoreleasing: 1783 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 1784 << ValType << PointerArg->getSourceRange(); 1785 return true; 1786 } 1787 1788 if (IsLdrex) { 1789 TheCall->setType(ValType); 1790 return false; 1791 } 1792 1793 // Initialize the argument to be stored. 1794 ExprResult ValArg = TheCall->getArg(0); 1795 InitializedEntity Entity = InitializedEntity::InitializeParameter( 1796 Context, ValType, /*consume*/ false); 1797 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 1798 if (ValArg.isInvalid()) 1799 return true; 1800 TheCall->setArg(0, ValArg.get()); 1801 1802 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 1803 // but the custom checker bypasses all default analysis. 1804 TheCall->setType(Context.IntTy); 1805 return false; 1806 } 1807 1808 bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 1809 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 1810 BuiltinID == ARM::BI__builtin_arm_ldaex || 1811 BuiltinID == ARM::BI__builtin_arm_strex || 1812 BuiltinID == ARM::BI__builtin_arm_stlex) { 1813 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 1814 } 1815 1816 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 1817 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 1818 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 1819 } 1820 1821 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 1822 BuiltinID == ARM::BI__builtin_arm_wsr64) 1823 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 1824 1825 if (BuiltinID == ARM::BI__builtin_arm_rsr || 1826 BuiltinID == ARM::BI__builtin_arm_rsrp || 1827 BuiltinID == ARM::BI__builtin_arm_wsr || 1828 BuiltinID == ARM::BI__builtin_arm_wsrp) 1829 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1830 1831 if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) 1832 return true; 1833 1834 // For intrinsics which take an immediate value as part of the instruction, 1835 // range check them here. 1836 // FIXME: VFP Intrinsics should error if VFP not present. 1837 switch (BuiltinID) { 1838 default: return false; 1839 case ARM::BI__builtin_arm_ssat: 1840 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 1841 case ARM::BI__builtin_arm_usat: 1842 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 1843 case ARM::BI__builtin_arm_ssat16: 1844 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 1845 case ARM::BI__builtin_arm_usat16: 1846 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 1847 case ARM::BI__builtin_arm_vcvtr_f: 1848 case ARM::BI__builtin_arm_vcvtr_d: 1849 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 1850 case ARM::BI__builtin_arm_dmb: 1851 case ARM::BI__builtin_arm_dsb: 1852 case ARM::BI__builtin_arm_isb: 1853 case ARM::BI__builtin_arm_dbg: 1854 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 1855 } 1856 } 1857 1858 bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, 1859 CallExpr *TheCall) { 1860 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 1861 BuiltinID == AArch64::BI__builtin_arm_ldaex || 1862 BuiltinID == AArch64::BI__builtin_arm_strex || 1863 BuiltinID == AArch64::BI__builtin_arm_stlex) { 1864 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 1865 } 1866 1867 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 1868 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 1869 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 1870 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 1871 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 1872 } 1873 1874 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 1875 BuiltinID == AArch64::BI__builtin_arm_wsr64) 1876 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1877 1878 // Memory Tagging Extensions (MTE) Intrinsics 1879 if (BuiltinID == AArch64::BI__builtin_arm_irg || 1880 BuiltinID == AArch64::BI__builtin_arm_addg || 1881 BuiltinID == AArch64::BI__builtin_arm_gmi || 1882 BuiltinID == AArch64::BI__builtin_arm_ldg || 1883 BuiltinID == AArch64::BI__builtin_arm_stg || 1884 BuiltinID == AArch64::BI__builtin_arm_subp) { 1885 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 1886 } 1887 1888 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 1889 BuiltinID == AArch64::BI__builtin_arm_rsrp || 1890 BuiltinID == AArch64::BI__builtin_arm_wsr || 1891 BuiltinID == AArch64::BI__builtin_arm_wsrp) 1892 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1893 1894 // Only check the valid encoding range. Any constant in this range would be 1895 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 1896 // an exception for incorrect registers. This matches MSVC behavior. 1897 if (BuiltinID == AArch64::BI_ReadStatusReg || 1898 BuiltinID == AArch64::BI_WriteStatusReg) 1899 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 1900 1901 if (BuiltinID == AArch64::BI__getReg) 1902 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 1903 1904 if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) 1905 return true; 1906 1907 // For intrinsics which take an immediate value as part of the instruction, 1908 // range check them here. 1909 unsigned i = 0, l = 0, u = 0; 1910 switch (BuiltinID) { 1911 default: return false; 1912 case AArch64::BI__builtin_arm_dmb: 1913 case AArch64::BI__builtin_arm_dsb: 1914 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 1915 } 1916 1917 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 1918 } 1919 1920 bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) { 1921 struct BuiltinAndString { 1922 unsigned BuiltinID; 1923 const char *Str; 1924 }; 1925 1926 static BuiltinAndString ValidCPU[] = { 1927 { Hexagon::BI__builtin_HEXAGON_A6_vcmpbeq_notany, "v65,v66" }, 1928 { Hexagon::BI__builtin_HEXAGON_A6_vminub_RdP, "v62,v65,v66" }, 1929 { Hexagon::BI__builtin_HEXAGON_F2_dfadd, "v66" }, 1930 { Hexagon::BI__builtin_HEXAGON_F2_dfsub, "v66" }, 1931 { Hexagon::BI__builtin_HEXAGON_M2_mnaci, "v66" }, 1932 { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffb, "v62,v65,v66" }, 1933 { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffub, "v62,v65,v66" }, 1934 { Hexagon::BI__builtin_HEXAGON_S2_mask, "v66" }, 1935 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, "v60,v62,v65,v66" }, 1936 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, "v60,v62,v65,v66" }, 1937 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, "v60,v62,v65,v66" }, 1938 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, "v60,v62,v65,v66" }, 1939 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, "v60,v62,v65,v66" }, 1940 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, "v60,v62,v65,v66" }, 1941 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, "v60,v62,v65,v66" }, 1942 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, "v60,v62,v65,v66" }, 1943 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, "v60,v62,v65,v66" }, 1944 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, "v60,v62,v65,v66" }, 1945 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, "v60,v62,v65,v66" }, 1946 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, "v60,v62,v65,v66" }, 1947 { Hexagon::BI__builtin_HEXAGON_S6_vsplatrbp, "v62,v65,v66" }, 1948 { Hexagon::BI__builtin_HEXAGON_S6_vtrunehb_ppp, "v62,v65,v66" }, 1949 { Hexagon::BI__builtin_HEXAGON_S6_vtrunohb_ppp, "v62,v65,v66" }, 1950 }; 1951 1952 static BuiltinAndString ValidHVX[] = { 1953 { Hexagon::BI__builtin_HEXAGON_V6_hi, "v60,v62,v65,v66" }, 1954 { Hexagon::BI__builtin_HEXAGON_V6_hi_128B, "v60,v62,v65,v66" }, 1955 { Hexagon::BI__builtin_HEXAGON_V6_lo, "v60,v62,v65,v66" }, 1956 { Hexagon::BI__builtin_HEXAGON_V6_lo_128B, "v60,v62,v65,v66" }, 1957 { Hexagon::BI__builtin_HEXAGON_V6_extractw, "v60,v62,v65,v66" }, 1958 { Hexagon::BI__builtin_HEXAGON_V6_extractw_128B, "v60,v62,v65,v66" }, 1959 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb, "v62,v65,v66" }, 1960 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb_128B, "v62,v65,v66" }, 1961 { Hexagon::BI__builtin_HEXAGON_V6_lvsplath, "v62,v65,v66" }, 1962 { Hexagon::BI__builtin_HEXAGON_V6_lvsplath_128B, "v62,v65,v66" }, 1963 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw, "v60,v62,v65,v66" }, 1964 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw_128B, "v60,v62,v65,v66" }, 1965 { Hexagon::BI__builtin_HEXAGON_V6_pred_and, "v60,v62,v65,v66" }, 1966 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_128B, "v60,v62,v65,v66" }, 1967 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n, "v60,v62,v65,v66" }, 1968 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n_128B, "v60,v62,v65,v66" }, 1969 { Hexagon::BI__builtin_HEXAGON_V6_pred_not, "v60,v62,v65,v66" }, 1970 { Hexagon::BI__builtin_HEXAGON_V6_pred_not_128B, "v60,v62,v65,v66" }, 1971 { Hexagon::BI__builtin_HEXAGON_V6_pred_or, "v60,v62,v65,v66" }, 1972 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_128B, "v60,v62,v65,v66" }, 1973 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n, "v60,v62,v65,v66" }, 1974 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n_128B, "v60,v62,v65,v66" }, 1975 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2, "v60,v62,v65,v66" }, 1976 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2_128B, "v60,v62,v65,v66" }, 1977 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2, "v62,v65,v66" }, 1978 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2_128B, "v62,v65,v66" }, 1979 { Hexagon::BI__builtin_HEXAGON_V6_pred_xor, "v60,v62,v65,v66" }, 1980 { Hexagon::BI__builtin_HEXAGON_V6_pred_xor_128B, "v60,v62,v65,v66" }, 1981 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh, "v62,v65,v66" }, 1982 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh_128B, "v62,v65,v66" }, 1983 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw, "v62,v65,v66" }, 1984 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw_128B, "v62,v65,v66" }, 1985 { Hexagon::BI__builtin_HEXAGON_V6_vabsb, "v65,v66" }, 1986 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_128B, "v65,v66" }, 1987 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat, "v65,v66" }, 1988 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat_128B, "v65,v66" }, 1989 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh, "v60,v62,v65,v66" }, 1990 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh_128B, "v60,v62,v65,v66" }, 1991 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub, "v60,v62,v65,v66" }, 1992 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub_128B, "v60,v62,v65,v66" }, 1993 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh, "v60,v62,v65,v66" }, 1994 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh_128B, "v60,v62,v65,v66" }, 1995 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw, "v60,v62,v65,v66" }, 1996 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw_128B, "v60,v62,v65,v66" }, 1997 { Hexagon::BI__builtin_HEXAGON_V6_vabsh, "v60,v62,v65,v66" }, 1998 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_128B, "v60,v62,v65,v66" }, 1999 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat, "v60,v62,v65,v66" }, 2000 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat_128B, "v60,v62,v65,v66" }, 2001 { Hexagon::BI__builtin_HEXAGON_V6_vabsw, "v60,v62,v65,v66" }, 2002 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_128B, "v60,v62,v65,v66" }, 2003 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat, "v60,v62,v65,v66" }, 2004 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat_128B, "v60,v62,v65,v66" }, 2005 { Hexagon::BI__builtin_HEXAGON_V6_vaddb, "v60,v62,v65,v66" }, 2006 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_128B, "v60,v62,v65,v66" }, 2007 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv, "v60,v62,v65,v66" }, 2008 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv_128B, "v60,v62,v65,v66" }, 2009 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat, "v62,v65,v66" }, 2010 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_128B, "v62,v65,v66" }, 2011 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv, "v62,v65,v66" }, 2012 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv_128B, "v62,v65,v66" }, 2013 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry, "v62,v65,v66" }, 2014 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B, "v62,v65,v66" }, 2015 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat, "v66" }, 2016 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat_128B, "v66" }, 2017 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh, "v62,v65,v66" }, 2018 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh_128B, "v62,v65,v66" }, 2019 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw, "v62,v65,v66" }, 2020 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw_128B, "v62,v65,v66" }, 2021 { Hexagon::BI__builtin_HEXAGON_V6_vaddh, "v60,v62,v65,v66" }, 2022 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_128B, "v60,v62,v65,v66" }, 2023 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv, "v60,v62,v65,v66" }, 2024 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv_128B, "v60,v62,v65,v66" }, 2025 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat, "v60,v62,v65,v66" }, 2026 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_128B, "v60,v62,v65,v66" }, 2027 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv, "v60,v62,v65,v66" }, 2028 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv_128B, "v60,v62,v65,v66" }, 2029 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw, "v60,v62,v65,v66" }, 2030 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_128B, "v60,v62,v65,v66" }, 2031 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc, "v62,v65,v66" }, 2032 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc_128B, "v62,v65,v66" }, 2033 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh, "v60,v62,v65,v66" }, 2034 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_128B, "v60,v62,v65,v66" }, 2035 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc, "v62,v65,v66" }, 2036 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc_128B, "v62,v65,v66" }, 2037 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat, "v60,v62,v65,v66" }, 2038 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_128B, "v60,v62,v65,v66" }, 2039 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv, "v60,v62,v65,v66" }, 2040 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv_128B, "v60,v62,v65,v66" }, 2041 { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat, "v62,v65,v66" }, 2042 { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat_128B, "v62,v65,v66" }, 2043 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat, "v60,v62,v65,v66" }, 2044 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_128B, "v60,v62,v65,v66" }, 2045 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv, "v60,v62,v65,v66" }, 2046 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv_128B, "v60,v62,v65,v66" }, 2047 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw, "v60,v62,v65,v66" }, 2048 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_128B, "v60,v62,v65,v66" }, 2049 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc, "v62,v65,v66" }, 2050 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc_128B, "v62,v65,v66" }, 2051 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat, "v62,v65,v66" }, 2052 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_128B, "v62,v65,v66" }, 2053 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv, "v62,v65,v66" }, 2054 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv_128B, "v62,v65,v66" }, 2055 { Hexagon::BI__builtin_HEXAGON_V6_vaddw, "v60,v62,v65,v66" }, 2056 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_128B, "v60,v62,v65,v66" }, 2057 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv, "v60,v62,v65,v66" }, 2058 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv_128B, "v60,v62,v65,v66" }, 2059 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat, "v60,v62,v65,v66" }, 2060 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_128B, "v60,v62,v65,v66" }, 2061 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv, "v60,v62,v65,v66" }, 2062 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv_128B, "v60,v62,v65,v66" }, 2063 { Hexagon::BI__builtin_HEXAGON_V6_valignb, "v60,v62,v65,v66" }, 2064 { Hexagon::BI__builtin_HEXAGON_V6_valignb_128B, "v60,v62,v65,v66" }, 2065 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, "v60,v62,v65,v66" }, 2066 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, "v60,v62,v65,v66" }, 2067 { Hexagon::BI__builtin_HEXAGON_V6_vand, "v60,v62,v65,v66" }, 2068 { Hexagon::BI__builtin_HEXAGON_V6_vand_128B, "v60,v62,v65,v66" }, 2069 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt, "v62,v65,v66" }, 2070 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_128B, "v62,v65,v66" }, 2071 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc, "v62,v65,v66" }, 2072 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc_128B, "v62,v65,v66" }, 2073 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt, "v60,v62,v65,v66" }, 2074 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_128B, "v60,v62,v65,v66" }, 2075 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc, "v60,v62,v65,v66" }, 2076 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc_128B, "v60,v62,v65,v66" }, 2077 { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv, "v62,v65,v66" }, 2078 { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv_128B, "v62,v65,v66" }, 2079 { Hexagon::BI__builtin_HEXAGON_V6_vandvqv, "v62,v65,v66" }, 2080 { Hexagon::BI__builtin_HEXAGON_V6_vandvqv_128B, "v62,v65,v66" }, 2081 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt, "v60,v62,v65,v66" }, 2082 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_128B, "v60,v62,v65,v66" }, 2083 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc, "v60,v62,v65,v66" }, 2084 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc_128B, "v60,v62,v65,v66" }, 2085 { Hexagon::BI__builtin_HEXAGON_V6_vaslh, "v60,v62,v65,v66" }, 2086 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_128B, "v60,v62,v65,v66" }, 2087 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc, "v65,v66" }, 2088 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc_128B, "v65,v66" }, 2089 { Hexagon::BI__builtin_HEXAGON_V6_vaslhv, "v60,v62,v65,v66" }, 2090 { Hexagon::BI__builtin_HEXAGON_V6_vaslhv_128B, "v60,v62,v65,v66" }, 2091 { Hexagon::BI__builtin_HEXAGON_V6_vaslw, "v60,v62,v65,v66" }, 2092 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_128B, "v60,v62,v65,v66" }, 2093 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc, "v60,v62,v65,v66" }, 2094 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc_128B, "v60,v62,v65,v66" }, 2095 { Hexagon::BI__builtin_HEXAGON_V6_vaslwv, "v60,v62,v65,v66" }, 2096 { Hexagon::BI__builtin_HEXAGON_V6_vaslwv_128B, "v60,v62,v65,v66" }, 2097 { Hexagon::BI__builtin_HEXAGON_V6_vasrh, "v60,v62,v65,v66" }, 2098 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_128B, "v60,v62,v65,v66" }, 2099 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc, "v65,v66" }, 2100 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc_128B, "v65,v66" }, 2101 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat, "v60,v62,v65,v66" }, 2102 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat_128B, "v60,v62,v65,v66" }, 2103 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat, "v62,v65,v66" }, 2104 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat_128B, "v62,v65,v66" }, 2105 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat, "v60,v62,v65,v66" }, 2106 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat_128B, "v60,v62,v65,v66" }, 2107 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat, "v60,v62,v65,v66" }, 2108 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat_128B, "v60,v62,v65,v66" }, 2109 { Hexagon::BI__builtin_HEXAGON_V6_vasrhv, "v60,v62,v65,v66" }, 2110 { Hexagon::BI__builtin_HEXAGON_V6_vasrhv_128B, "v60,v62,v65,v66" }, 2111 { Hexagon::BI__builtin_HEXAGON_V6_vasr_into, "v66" }, 2112 { Hexagon::BI__builtin_HEXAGON_V6_vasr_into_128B, "v66" }, 2113 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat, "v65,v66" }, 2114 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat_128B, "v65,v66" }, 2115 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat, "v65,v66" }, 2116 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat_128B, "v65,v66" }, 2117 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat, "v62,v65,v66" }, 2118 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat_128B, "v62,v65,v66" }, 2119 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat, "v65,v66" }, 2120 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat_128B, "v65,v66" }, 2121 { Hexagon::BI__builtin_HEXAGON_V6_vasrw, "v60,v62,v65,v66" }, 2122 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_128B, "v60,v62,v65,v66" }, 2123 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc, "v60,v62,v65,v66" }, 2124 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc_128B, "v60,v62,v65,v66" }, 2125 { Hexagon::BI__builtin_HEXAGON_V6_vasrwh, "v60,v62,v65,v66" }, 2126 { Hexagon::BI__builtin_HEXAGON_V6_vasrwh_128B, "v60,v62,v65,v66" }, 2127 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat, "v60,v62,v65,v66" }, 2128 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat_128B, "v60,v62,v65,v66" }, 2129 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat, "v60,v62,v65,v66" }, 2130 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat_128B, "v60,v62,v65,v66" }, 2131 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat, "v62,v65,v66" }, 2132 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat_128B, "v62,v65,v66" }, 2133 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat, "v60,v62,v65,v66" }, 2134 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat_128B, "v60,v62,v65,v66" }, 2135 { Hexagon::BI__builtin_HEXAGON_V6_vasrwv, "v60,v62,v65,v66" }, 2136 { Hexagon::BI__builtin_HEXAGON_V6_vasrwv_128B, "v60,v62,v65,v66" }, 2137 { Hexagon::BI__builtin_HEXAGON_V6_vassign, "v60,v62,v65,v66" }, 2138 { Hexagon::BI__builtin_HEXAGON_V6_vassign_128B, "v60,v62,v65,v66" }, 2139 { Hexagon::BI__builtin_HEXAGON_V6_vassignp, "v60,v62,v65,v66" }, 2140 { Hexagon::BI__builtin_HEXAGON_V6_vassignp_128B, "v60,v62,v65,v66" }, 2141 { Hexagon::BI__builtin_HEXAGON_V6_vavgb, "v65,v66" }, 2142 { Hexagon::BI__builtin_HEXAGON_V6_vavgb_128B, "v65,v66" }, 2143 { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd, "v65,v66" }, 2144 { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd_128B, "v65,v66" }, 2145 { Hexagon::BI__builtin_HEXAGON_V6_vavgh, "v60,v62,v65,v66" }, 2146 { Hexagon::BI__builtin_HEXAGON_V6_vavgh_128B, "v60,v62,v65,v66" }, 2147 { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd, "v60,v62,v65,v66" }, 2148 { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd_128B, "v60,v62,v65,v66" }, 2149 { Hexagon::BI__builtin_HEXAGON_V6_vavgub, "v60,v62,v65,v66" }, 2150 { Hexagon::BI__builtin_HEXAGON_V6_vavgub_128B, "v60,v62,v65,v66" }, 2151 { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd, "v60,v62,v65,v66" }, 2152 { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd_128B, "v60,v62,v65,v66" }, 2153 { Hexagon::BI__builtin_HEXAGON_V6_vavguh, "v60,v62,v65,v66" }, 2154 { Hexagon::BI__builtin_HEXAGON_V6_vavguh_128B, "v60,v62,v65,v66" }, 2155 { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd, "v60,v62,v65,v66" }, 2156 { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd_128B, "v60,v62,v65,v66" }, 2157 { Hexagon::BI__builtin_HEXAGON_V6_vavguw, "v65,v66" }, 2158 { Hexagon::BI__builtin_HEXAGON_V6_vavguw_128B, "v65,v66" }, 2159 { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd, "v65,v66" }, 2160 { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd_128B, "v65,v66" }, 2161 { Hexagon::BI__builtin_HEXAGON_V6_vavgw, "v60,v62,v65,v66" }, 2162 { Hexagon::BI__builtin_HEXAGON_V6_vavgw_128B, "v60,v62,v65,v66" }, 2163 { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd, "v60,v62,v65,v66" }, 2164 { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd_128B, "v60,v62,v65,v66" }, 2165 { Hexagon::BI__builtin_HEXAGON_V6_vcl0h, "v60,v62,v65,v66" }, 2166 { Hexagon::BI__builtin_HEXAGON_V6_vcl0h_128B, "v60,v62,v65,v66" }, 2167 { Hexagon::BI__builtin_HEXAGON_V6_vcl0w, "v60,v62,v65,v66" }, 2168 { Hexagon::BI__builtin_HEXAGON_V6_vcl0w_128B, "v60,v62,v65,v66" }, 2169 { Hexagon::BI__builtin_HEXAGON_V6_vcombine, "v60,v62,v65,v66" }, 2170 { Hexagon::BI__builtin_HEXAGON_V6_vcombine_128B, "v60,v62,v65,v66" }, 2171 { Hexagon::BI__builtin_HEXAGON_V6_vd0, "v60,v62,v65,v66" }, 2172 { Hexagon::BI__builtin_HEXAGON_V6_vd0_128B, "v60,v62,v65,v66" }, 2173 { Hexagon::BI__builtin_HEXAGON_V6_vdd0, "v65,v66" }, 2174 { Hexagon::BI__builtin_HEXAGON_V6_vdd0_128B, "v65,v66" }, 2175 { Hexagon::BI__builtin_HEXAGON_V6_vdealb, "v60,v62,v65,v66" }, 2176 { Hexagon::BI__builtin_HEXAGON_V6_vdealb_128B, "v60,v62,v65,v66" }, 2177 { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w, "v60,v62,v65,v66" }, 2178 { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w_128B, "v60,v62,v65,v66" }, 2179 { Hexagon::BI__builtin_HEXAGON_V6_vdealh, "v60,v62,v65,v66" }, 2180 { Hexagon::BI__builtin_HEXAGON_V6_vdealh_128B, "v60,v62,v65,v66" }, 2181 { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd, "v60,v62,v65,v66" }, 2182 { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd_128B, "v60,v62,v65,v66" }, 2183 { Hexagon::BI__builtin_HEXAGON_V6_vdelta, "v60,v62,v65,v66" }, 2184 { Hexagon::BI__builtin_HEXAGON_V6_vdelta_128B, "v60,v62,v65,v66" }, 2185 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus, "v60,v62,v65,v66" }, 2186 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_128B, "v60,v62,v65,v66" }, 2187 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc, "v60,v62,v65,v66" }, 2188 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc_128B, "v60,v62,v65,v66" }, 2189 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv, "v60,v62,v65,v66" }, 2190 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_128B, "v60,v62,v65,v66" }, 2191 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc, "v60,v62,v65,v66" }, 2192 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, "v60,v62,v65,v66" }, 2193 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb, "v60,v62,v65,v66" }, 2194 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_128B, "v60,v62,v65,v66" }, 2195 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc, "v60,v62,v65,v66" }, 2196 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc_128B, "v60,v62,v65,v66" }, 2197 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv, "v60,v62,v65,v66" }, 2198 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_128B, "v60,v62,v65,v66" }, 2199 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc, "v60,v62,v65,v66" }, 2200 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, "v60,v62,v65,v66" }, 2201 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat, "v60,v62,v65,v66" }, 2202 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_128B, "v60,v62,v65,v66" }, 2203 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc, "v60,v62,v65,v66" }, 2204 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, "v60,v62,v65,v66" }, 2205 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat, "v60,v62,v65,v66" }, 2206 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_128B, "v60,v62,v65,v66" }, 2207 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc, "v60,v62,v65,v66" }, 2208 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, "v60,v62,v65,v66" }, 2209 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat, "v60,v62,v65,v66" }, 2210 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_128B, "v60,v62,v65,v66" }, 2211 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc, "v60,v62,v65,v66" }, 2212 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, "v60,v62,v65,v66" }, 2213 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat, "v60,v62,v65,v66" }, 2214 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_128B, "v60,v62,v65,v66" }, 2215 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc, "v60,v62,v65,v66" }, 2216 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, "v60,v62,v65,v66" }, 2217 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat, "v60,v62,v65,v66" }, 2218 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_128B, "v60,v62,v65,v66" }, 2219 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc, "v60,v62,v65,v66" }, 2220 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, "v60,v62,v65,v66" }, 2221 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh, "v60,v62,v65,v66" }, 2222 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_128B, "v60,v62,v65,v66" }, 2223 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc, "v60,v62,v65,v66" }, 2224 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc_128B, "v60,v62,v65,v66" }, 2225 { Hexagon::BI__builtin_HEXAGON_V6_veqb, "v60,v62,v65,v66" }, 2226 { Hexagon::BI__builtin_HEXAGON_V6_veqb_128B, "v60,v62,v65,v66" }, 2227 { Hexagon::BI__builtin_HEXAGON_V6_veqb_and, "v60,v62,v65,v66" }, 2228 { Hexagon::BI__builtin_HEXAGON_V6_veqb_and_128B, "v60,v62,v65,v66" }, 2229 { Hexagon::BI__builtin_HEXAGON_V6_veqb_or, "v60,v62,v65,v66" }, 2230 { Hexagon::BI__builtin_HEXAGON_V6_veqb_or_128B, "v60,v62,v65,v66" }, 2231 { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor, "v60,v62,v65,v66" }, 2232 { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor_128B, "v60,v62,v65,v66" }, 2233 { Hexagon::BI__builtin_HEXAGON_V6_veqh, "v60,v62,v65,v66" }, 2234 { Hexagon::BI__builtin_HEXAGON_V6_veqh_128B, "v60,v62,v65,v66" }, 2235 { Hexagon::BI__builtin_HEXAGON_V6_veqh_and, "v60,v62,v65,v66" }, 2236 { Hexagon::BI__builtin_HEXAGON_V6_veqh_and_128B, "v60,v62,v65,v66" }, 2237 { Hexagon::BI__builtin_HEXAGON_V6_veqh_or, "v60,v62,v65,v66" }, 2238 { Hexagon::BI__builtin_HEXAGON_V6_veqh_or_128B, "v60,v62,v65,v66" }, 2239 { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor, "v60,v62,v65,v66" }, 2240 { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor_128B, "v60,v62,v65,v66" }, 2241 { Hexagon::BI__builtin_HEXAGON_V6_veqw, "v60,v62,v65,v66" }, 2242 { Hexagon::BI__builtin_HEXAGON_V6_veqw_128B, "v60,v62,v65,v66" }, 2243 { Hexagon::BI__builtin_HEXAGON_V6_veqw_and, "v60,v62,v65,v66" }, 2244 { Hexagon::BI__builtin_HEXAGON_V6_veqw_and_128B, "v60,v62,v65,v66" }, 2245 { Hexagon::BI__builtin_HEXAGON_V6_veqw_or, "v60,v62,v65,v66" }, 2246 { Hexagon::BI__builtin_HEXAGON_V6_veqw_or_128B, "v60,v62,v65,v66" }, 2247 { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor, "v60,v62,v65,v66" }, 2248 { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor_128B, "v60,v62,v65,v66" }, 2249 { Hexagon::BI__builtin_HEXAGON_V6_vgtb, "v60,v62,v65,v66" }, 2250 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_128B, "v60,v62,v65,v66" }, 2251 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and, "v60,v62,v65,v66" }, 2252 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and_128B, "v60,v62,v65,v66" }, 2253 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or, "v60,v62,v65,v66" }, 2254 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or_128B, "v60,v62,v65,v66" }, 2255 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor, "v60,v62,v65,v66" }, 2256 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor_128B, "v60,v62,v65,v66" }, 2257 { Hexagon::BI__builtin_HEXAGON_V6_vgth, "v60,v62,v65,v66" }, 2258 { Hexagon::BI__builtin_HEXAGON_V6_vgth_128B, "v60,v62,v65,v66" }, 2259 { Hexagon::BI__builtin_HEXAGON_V6_vgth_and, "v60,v62,v65,v66" }, 2260 { Hexagon::BI__builtin_HEXAGON_V6_vgth_and_128B, "v60,v62,v65,v66" }, 2261 { Hexagon::BI__builtin_HEXAGON_V6_vgth_or, "v60,v62,v65,v66" }, 2262 { Hexagon::BI__builtin_HEXAGON_V6_vgth_or_128B, "v60,v62,v65,v66" }, 2263 { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor, "v60,v62,v65,v66" }, 2264 { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor_128B, "v60,v62,v65,v66" }, 2265 { Hexagon::BI__builtin_HEXAGON_V6_vgtub, "v60,v62,v65,v66" }, 2266 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_128B, "v60,v62,v65,v66" }, 2267 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and, "v60,v62,v65,v66" }, 2268 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and_128B, "v60,v62,v65,v66" }, 2269 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or, "v60,v62,v65,v66" }, 2270 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or_128B, "v60,v62,v65,v66" }, 2271 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor, "v60,v62,v65,v66" }, 2272 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor_128B, "v60,v62,v65,v66" }, 2273 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh, "v60,v62,v65,v66" }, 2274 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_128B, "v60,v62,v65,v66" }, 2275 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and, "v60,v62,v65,v66" }, 2276 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and_128B, "v60,v62,v65,v66" }, 2277 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or, "v60,v62,v65,v66" }, 2278 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or_128B, "v60,v62,v65,v66" }, 2279 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor, "v60,v62,v65,v66" }, 2280 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor_128B, "v60,v62,v65,v66" }, 2281 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw, "v60,v62,v65,v66" }, 2282 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_128B, "v60,v62,v65,v66" }, 2283 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and, "v60,v62,v65,v66" }, 2284 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and_128B, "v60,v62,v65,v66" }, 2285 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or, "v60,v62,v65,v66" }, 2286 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or_128B, "v60,v62,v65,v66" }, 2287 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor, "v60,v62,v65,v66" }, 2288 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor_128B, "v60,v62,v65,v66" }, 2289 { Hexagon::BI__builtin_HEXAGON_V6_vgtw, "v60,v62,v65,v66" }, 2290 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_128B, "v60,v62,v65,v66" }, 2291 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and, "v60,v62,v65,v66" }, 2292 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and_128B, "v60,v62,v65,v66" }, 2293 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or, "v60,v62,v65,v66" }, 2294 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or_128B, "v60,v62,v65,v66" }, 2295 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor, "v60,v62,v65,v66" }, 2296 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor_128B, "v60,v62,v65,v66" }, 2297 { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr, "v60,v62,v65,v66" }, 2298 { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr_128B, "v60,v62,v65,v66" }, 2299 { Hexagon::BI__builtin_HEXAGON_V6_vlalignb, "v60,v62,v65,v66" }, 2300 { Hexagon::BI__builtin_HEXAGON_V6_vlalignb_128B, "v60,v62,v65,v66" }, 2301 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, "v60,v62,v65,v66" }, 2302 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, "v60,v62,v65,v66" }, 2303 { Hexagon::BI__builtin_HEXAGON_V6_vlsrb, "v62,v65,v66" }, 2304 { Hexagon::BI__builtin_HEXAGON_V6_vlsrb_128B, "v62,v65,v66" }, 2305 { Hexagon::BI__builtin_HEXAGON_V6_vlsrh, "v60,v62,v65,v66" }, 2306 { Hexagon::BI__builtin_HEXAGON_V6_vlsrh_128B, "v60,v62,v65,v66" }, 2307 { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv, "v60,v62,v65,v66" }, 2308 { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv_128B, "v60,v62,v65,v66" }, 2309 { Hexagon::BI__builtin_HEXAGON_V6_vlsrw, "v60,v62,v65,v66" }, 2310 { Hexagon::BI__builtin_HEXAGON_V6_vlsrw_128B, "v60,v62,v65,v66" }, 2311 { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv, "v60,v62,v65,v66" }, 2312 { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv_128B, "v60,v62,v65,v66" }, 2313 { Hexagon::BI__builtin_HEXAGON_V6_vlut4, "v65,v66" }, 2314 { Hexagon::BI__builtin_HEXAGON_V6_vlut4_128B, "v65,v66" }, 2315 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb, "v60,v62,v65,v66" }, 2316 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_128B, "v60,v62,v65,v66" }, 2317 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, "v62,v65,v66" }, 2318 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, "v62,v65,v66" }, 2319 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm, "v62,v65,v66" }, 2320 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm_128B, "v62,v65,v66" }, 2321 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc, "v60,v62,v65,v66" }, 2322 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc_128B, "v60,v62,v65,v66" }, 2323 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, "v62,v65,v66" }, 2324 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, "v62,v65,v66" }, 2325 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh, "v60,v62,v65,v66" }, 2326 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_128B, "v60,v62,v65,v66" }, 2327 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, "v62,v65,v66" }, 2328 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, "v62,v65,v66" }, 2329 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm, "v62,v65,v66" }, 2330 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm_128B, "v62,v65,v66" }, 2331 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc, "v60,v62,v65,v66" }, 2332 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc_128B, "v60,v62,v65,v66" }, 2333 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, "v62,v65,v66" }, 2334 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, "v62,v65,v66" }, 2335 { Hexagon::BI__builtin_HEXAGON_V6_vmaxb, "v62,v65,v66" }, 2336 { Hexagon::BI__builtin_HEXAGON_V6_vmaxb_128B, "v62,v65,v66" }, 2337 { Hexagon::BI__builtin_HEXAGON_V6_vmaxh, "v60,v62,v65,v66" }, 2338 { Hexagon::BI__builtin_HEXAGON_V6_vmaxh_128B, "v60,v62,v65,v66" }, 2339 { Hexagon::BI__builtin_HEXAGON_V6_vmaxub, "v60,v62,v65,v66" }, 2340 { Hexagon::BI__builtin_HEXAGON_V6_vmaxub_128B, "v60,v62,v65,v66" }, 2341 { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh, "v60,v62,v65,v66" }, 2342 { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh_128B, "v60,v62,v65,v66" }, 2343 { Hexagon::BI__builtin_HEXAGON_V6_vmaxw, "v60,v62,v65,v66" }, 2344 { Hexagon::BI__builtin_HEXAGON_V6_vmaxw_128B, "v60,v62,v65,v66" }, 2345 { Hexagon::BI__builtin_HEXAGON_V6_vminb, "v62,v65,v66" }, 2346 { Hexagon::BI__builtin_HEXAGON_V6_vminb_128B, "v62,v65,v66" }, 2347 { Hexagon::BI__builtin_HEXAGON_V6_vminh, "v60,v62,v65,v66" }, 2348 { Hexagon::BI__builtin_HEXAGON_V6_vminh_128B, "v60,v62,v65,v66" }, 2349 { Hexagon::BI__builtin_HEXAGON_V6_vminub, "v60,v62,v65,v66" }, 2350 { Hexagon::BI__builtin_HEXAGON_V6_vminub_128B, "v60,v62,v65,v66" }, 2351 { Hexagon::BI__builtin_HEXAGON_V6_vminuh, "v60,v62,v65,v66" }, 2352 { Hexagon::BI__builtin_HEXAGON_V6_vminuh_128B, "v60,v62,v65,v66" }, 2353 { Hexagon::BI__builtin_HEXAGON_V6_vminw, "v60,v62,v65,v66" }, 2354 { Hexagon::BI__builtin_HEXAGON_V6_vminw_128B, "v60,v62,v65,v66" }, 2355 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus, "v60,v62,v65,v66" }, 2356 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_128B, "v60,v62,v65,v66" }, 2357 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc, "v60,v62,v65,v66" }, 2358 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc_128B, "v60,v62,v65,v66" }, 2359 { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv, "v60,v62,v65,v66" }, 2360 { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv_128B, "v60,v62,v65,v66" }, 2361 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu, "v65,v66" }, 2362 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_128B, "v65,v66" }, 2363 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc, "v65,v66" }, 2364 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc_128B, "v65,v66" }, 2365 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv, "v60,v62,v65,v66" }, 2366 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv_128B, "v60,v62,v65,v66" }, 2367 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb, "v60,v62,v65,v66" }, 2368 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_128B, "v60,v62,v65,v66" }, 2369 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc, "v60,v62,v65,v66" }, 2370 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc_128B, "v60,v62,v65,v66" }, 2371 { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat, "v65,v66" }, 2372 { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat_128B, "v65,v66" }, 2373 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb, "v62,v65,v66" }, 2374 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_128B, "v62,v65,v66" }, 2375 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc, "v62,v65,v66" }, 2376 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc_128B, "v62,v65,v66" }, 2377 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat, "v65,v66" }, 2378 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat_128B, "v65,v66" }, 2379 { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat, "v65,v66" }, 2380 { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat_128B, "v65,v66" }, 2381 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus, "v60,v62,v65,v66" }, 2382 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_128B, "v60,v62,v65,v66" }, 2383 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc, "v60,v62,v65,v66" }, 2384 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc_128B, "v60,v62,v65,v66" }, 2385 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv, "v60,v62,v65,v66" }, 2386 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_128B, "v60,v62,v65,v66" }, 2387 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc, "v60,v62,v65,v66" }, 2388 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc_128B, "v60,v62,v65,v66" }, 2389 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv, "v60,v62,v65,v66" }, 2390 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_128B, "v60,v62,v65,v66" }, 2391 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc, "v60,v62,v65,v66" }, 2392 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc_128B, "v60,v62,v65,v66" }, 2393 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh, "v60,v62,v65,v66" }, 2394 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_128B, "v60,v62,v65,v66" }, 2395 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64, "v62,v65,v66" }, 2396 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64_128B, "v62,v65,v66" }, 2397 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh, "v60,v62,v65,v66" }, 2398 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_128B, "v60,v62,v65,v66" }, 2399 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc, "v65,v66" }, 2400 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc_128B, "v65,v66" }, 2401 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc, "v60,v62,v65,v66" }, 2402 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc_128B, "v60,v62,v65,v66" }, 2403 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs, "v60,v62,v65,v66" }, 2404 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs_128B, "v60,v62,v65,v66" }, 2405 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss, "v60,v62,v65,v66" }, 2406 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss_128B, "v60,v62,v65,v66" }, 2407 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus, "v60,v62,v65,v66" }, 2408 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_128B, "v60,v62,v65,v66" }, 2409 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc, "v60,v62,v65,v66" }, 2410 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc_128B, "v60,v62,v65,v66" }, 2411 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv, "v60,v62,v65,v66" }, 2412 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_128B, "v60,v62,v65,v66" }, 2413 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc, "v60,v62,v65,v66" }, 2414 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc_128B, "v60,v62,v65,v66" }, 2415 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs, "v60,v62,v65,v66" }, 2416 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs_128B, "v60,v62,v65,v66" }, 2417 { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh, "v60,v62,v65,v66" }, 2418 { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh_128B, "v60,v62,v65,v66" }, 2419 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc, "v60,v62,v65,v66" }, 2420 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc_128B, "v60,v62,v65,v66" }, 2421 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh, "v60,v62,v65,v66" }, 2422 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_128B, "v60,v62,v65,v66" }, 2423 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc, "v60,v62,v65,v66" }, 2424 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, "v60,v62,v65,v66" }, 2425 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih, "v60,v62,v65,v66" }, 2426 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_128B, "v60,v62,v65,v66" }, 2427 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc, "v60,v62,v65,v66" }, 2428 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc_128B, "v60,v62,v65,v66" }, 2429 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb, "v60,v62,v65,v66" }, 2430 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_128B, "v60,v62,v65,v66" }, 2431 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc, "v60,v62,v65,v66" }, 2432 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc_128B, "v60,v62,v65,v66" }, 2433 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh, "v60,v62,v65,v66" }, 2434 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh_128B, "v60,v62,v65,v66" }, 2435 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb, "v60,v62,v65,v66" }, 2436 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_128B, "v60,v62,v65,v66" }, 2437 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc, "v60,v62,v65,v66" }, 2438 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc_128B, "v60,v62,v65,v66" }, 2439 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh, "v60,v62,v65,v66" }, 2440 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_128B, "v60,v62,v65,v66" }, 2441 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc, "v60,v62,v65,v66" }, 2442 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc_128B, "v60,v62,v65,v66" }, 2443 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub, "v62,v65,v66" }, 2444 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_128B, "v62,v65,v66" }, 2445 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc, "v62,v65,v66" }, 2446 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc_128B, "v62,v65,v66" }, 2447 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh, "v60,v62,v65,v66" }, 2448 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_128B, "v60,v62,v65,v66" }, 2449 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc, "v62,v65,v66" }, 2450 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, "v62,v65,v66" }, 2451 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd, "v60,v62,v65,v66" }, 2452 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_128B, "v60,v62,v65,v66" }, 2453 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, "v60,v62,v65,v66" }, 2454 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, "v60,v62,v65,v66" }, 2455 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc, "v60,v62,v65,v66" }, 2456 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc_128B, "v60,v62,v65,v66" }, 2457 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub, "v60,v62,v65,v66" }, 2458 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_128B, "v60,v62,v65,v66" }, 2459 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc, "v60,v62,v65,v66" }, 2460 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc_128B, "v60,v62,v65,v66" }, 2461 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv, "v60,v62,v65,v66" }, 2462 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_128B, "v60,v62,v65,v66" }, 2463 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc, "v60,v62,v65,v66" }, 2464 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc_128B, "v60,v62,v65,v66" }, 2465 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh, "v60,v62,v65,v66" }, 2466 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_128B, "v60,v62,v65,v66" }, 2467 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc, "v60,v62,v65,v66" }, 2468 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc_128B, "v60,v62,v65,v66" }, 2469 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe, "v65,v66" }, 2470 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_128B, "v65,v66" }, 2471 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc, "v65,v66" }, 2472 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc_128B, "v65,v66" }, 2473 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv, "v60,v62,v65,v66" }, 2474 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_128B, "v60,v62,v65,v66" }, 2475 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc, "v60,v62,v65,v66" }, 2476 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc_128B, "v60,v62,v65,v66" }, 2477 { Hexagon::BI__builtin_HEXAGON_V6_vmux, "v60,v62,v65,v66" }, 2478 { Hexagon::BI__builtin_HEXAGON_V6_vmux_128B, "v60,v62,v65,v66" }, 2479 { Hexagon::BI__builtin_HEXAGON_V6_vnavgb, "v65,v66" }, 2480 { Hexagon::BI__builtin_HEXAGON_V6_vnavgb_128B, "v65,v66" }, 2481 { Hexagon::BI__builtin_HEXAGON_V6_vnavgh, "v60,v62,v65,v66" }, 2482 { Hexagon::BI__builtin_HEXAGON_V6_vnavgh_128B, "v60,v62,v65,v66" }, 2483 { Hexagon::BI__builtin_HEXAGON_V6_vnavgub, "v60,v62,v65,v66" }, 2484 { Hexagon::BI__builtin_HEXAGON_V6_vnavgub_128B, "v60,v62,v65,v66" }, 2485 { Hexagon::BI__builtin_HEXAGON_V6_vnavgw, "v60,v62,v65,v66" }, 2486 { Hexagon::BI__builtin_HEXAGON_V6_vnavgw_128B, "v60,v62,v65,v66" }, 2487 { Hexagon::BI__builtin_HEXAGON_V6_vnormamth, "v60,v62,v65,v66" }, 2488 { Hexagon::BI__builtin_HEXAGON_V6_vnormamth_128B, "v60,v62,v65,v66" }, 2489 { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw, "v60,v62,v65,v66" }, 2490 { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw_128B, "v60,v62,v65,v66" }, 2491 { Hexagon::BI__builtin_HEXAGON_V6_vnot, "v60,v62,v65,v66" }, 2492 { Hexagon::BI__builtin_HEXAGON_V6_vnot_128B, "v60,v62,v65,v66" }, 2493 { Hexagon::BI__builtin_HEXAGON_V6_vor, "v60,v62,v65,v66" }, 2494 { Hexagon::BI__builtin_HEXAGON_V6_vor_128B, "v60,v62,v65,v66" }, 2495 { Hexagon::BI__builtin_HEXAGON_V6_vpackeb, "v60,v62,v65,v66" }, 2496 { Hexagon::BI__builtin_HEXAGON_V6_vpackeb_128B, "v60,v62,v65,v66" }, 2497 { Hexagon::BI__builtin_HEXAGON_V6_vpackeh, "v60,v62,v65,v66" }, 2498 { Hexagon::BI__builtin_HEXAGON_V6_vpackeh_128B, "v60,v62,v65,v66" }, 2499 { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat, "v60,v62,v65,v66" }, 2500 { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat_128B, "v60,v62,v65,v66" }, 2501 { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat, "v60,v62,v65,v66" }, 2502 { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat_128B, "v60,v62,v65,v66" }, 2503 { Hexagon::BI__builtin_HEXAGON_V6_vpackob, "v60,v62,v65,v66" }, 2504 { Hexagon::BI__builtin_HEXAGON_V6_vpackob_128B, "v60,v62,v65,v66" }, 2505 { Hexagon::BI__builtin_HEXAGON_V6_vpackoh, "v60,v62,v65,v66" }, 2506 { Hexagon::BI__builtin_HEXAGON_V6_vpackoh_128B, "v60,v62,v65,v66" }, 2507 { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat, "v60,v62,v65,v66" }, 2508 { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat_128B, "v60,v62,v65,v66" }, 2509 { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat, "v60,v62,v65,v66" }, 2510 { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat_128B, "v60,v62,v65,v66" }, 2511 { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth, "v60,v62,v65,v66" }, 2512 { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth_128B, "v60,v62,v65,v66" }, 2513 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb, "v65,v66" }, 2514 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb_128B, "v65,v66" }, 2515 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh, "v65,v66" }, 2516 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh_128B, "v65,v66" }, 2517 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw, "v65,v66" }, 2518 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw_128B, "v65,v66" }, 2519 { Hexagon::BI__builtin_HEXAGON_V6_vrdelta, "v60,v62,v65,v66" }, 2520 { Hexagon::BI__builtin_HEXAGON_V6_vrdelta_128B, "v60,v62,v65,v66" }, 2521 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt, "v65" }, 2522 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_128B, "v65" }, 2523 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc, "v65" }, 2524 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B, "v65" }, 2525 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus, "v60,v62,v65,v66" }, 2526 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_128B, "v60,v62,v65,v66" }, 2527 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc, "v60,v62,v65,v66" }, 2528 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc_128B, "v60,v62,v65,v66" }, 2529 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, "v60,v62,v65,v66" }, 2530 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, "v60,v62,v65,v66" }, 2531 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, "v60,v62,v65,v66" }, 2532 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, "v60,v62,v65,v66" }, 2533 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv, "v60,v62,v65,v66" }, 2534 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_128B, "v60,v62,v65,v66" }, 2535 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc, "v60,v62,v65,v66" }, 2536 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc_128B, "v60,v62,v65,v66" }, 2537 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv, "v60,v62,v65,v66" }, 2538 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_128B, "v60,v62,v65,v66" }, 2539 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc, "v60,v62,v65,v66" }, 2540 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc_128B, "v60,v62,v65,v66" }, 2541 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub, "v60,v62,v65,v66" }, 2542 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_128B, "v60,v62,v65,v66" }, 2543 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc, "v60,v62,v65,v66" }, 2544 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc_128B, "v60,v62,v65,v66" }, 2545 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, "v60,v62,v65,v66" }, 2546 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, "v60,v62,v65,v66" }, 2547 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, "v60,v62,v65,v66" }, 2548 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, "v60,v62,v65,v66" }, 2549 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt, "v65" }, 2550 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_128B, "v65" }, 2551 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc, "v65" }, 2552 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B, "v65" }, 2553 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv, "v60,v62,v65,v66" }, 2554 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_128B, "v60,v62,v65,v66" }, 2555 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc, "v60,v62,v65,v66" }, 2556 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc_128B, "v60,v62,v65,v66" }, 2557 { Hexagon::BI__builtin_HEXAGON_V6_vror, "v60,v62,v65,v66" }, 2558 { Hexagon::BI__builtin_HEXAGON_V6_vror_128B, "v60,v62,v65,v66" }, 2559 { Hexagon::BI__builtin_HEXAGON_V6_vrotr, "v66" }, 2560 { Hexagon::BI__builtin_HEXAGON_V6_vrotr_128B, "v66" }, 2561 { Hexagon::BI__builtin_HEXAGON_V6_vroundhb, "v60,v62,v65,v66" }, 2562 { Hexagon::BI__builtin_HEXAGON_V6_vroundhb_128B, "v60,v62,v65,v66" }, 2563 { Hexagon::BI__builtin_HEXAGON_V6_vroundhub, "v60,v62,v65,v66" }, 2564 { Hexagon::BI__builtin_HEXAGON_V6_vroundhub_128B, "v60,v62,v65,v66" }, 2565 { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub, "v62,v65,v66" }, 2566 { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub_128B, "v62,v65,v66" }, 2567 { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh, "v62,v65,v66" }, 2568 { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh_128B, "v62,v65,v66" }, 2569 { Hexagon::BI__builtin_HEXAGON_V6_vroundwh, "v60,v62,v65,v66" }, 2570 { Hexagon::BI__builtin_HEXAGON_V6_vroundwh_128B, "v60,v62,v65,v66" }, 2571 { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh, "v60,v62,v65,v66" }, 2572 { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh_128B, "v60,v62,v65,v66" }, 2573 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, "v60,v62,v65,v66" }, 2574 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, "v60,v62,v65,v66" }, 2575 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, "v60,v62,v65,v66" }, 2576 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, "v60,v62,v65,v66" }, 2577 { Hexagon::BI__builtin_HEXAGON_V6_vsatdw, "v66" }, 2578 { Hexagon::BI__builtin_HEXAGON_V6_vsatdw_128B, "v66" }, 2579 { Hexagon::BI__builtin_HEXAGON_V6_vsathub, "v60,v62,v65,v66" }, 2580 { Hexagon::BI__builtin_HEXAGON_V6_vsathub_128B, "v60,v62,v65,v66" }, 2581 { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh, "v62,v65,v66" }, 2582 { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh_128B, "v62,v65,v66" }, 2583 { Hexagon::BI__builtin_HEXAGON_V6_vsatwh, "v60,v62,v65,v66" }, 2584 { Hexagon::BI__builtin_HEXAGON_V6_vsatwh_128B, "v60,v62,v65,v66" }, 2585 { Hexagon::BI__builtin_HEXAGON_V6_vsb, "v60,v62,v65,v66" }, 2586 { Hexagon::BI__builtin_HEXAGON_V6_vsb_128B, "v60,v62,v65,v66" }, 2587 { Hexagon::BI__builtin_HEXAGON_V6_vsh, "v60,v62,v65,v66" }, 2588 { Hexagon::BI__builtin_HEXAGON_V6_vsh_128B, "v60,v62,v65,v66" }, 2589 { Hexagon::BI__builtin_HEXAGON_V6_vshufeh, "v60,v62,v65,v66" }, 2590 { Hexagon::BI__builtin_HEXAGON_V6_vshufeh_128B, "v60,v62,v65,v66" }, 2591 { Hexagon::BI__builtin_HEXAGON_V6_vshuffb, "v60,v62,v65,v66" }, 2592 { Hexagon::BI__builtin_HEXAGON_V6_vshuffb_128B, "v60,v62,v65,v66" }, 2593 { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb, "v60,v62,v65,v66" }, 2594 { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb_128B, "v60,v62,v65,v66" }, 2595 { Hexagon::BI__builtin_HEXAGON_V6_vshuffh, "v60,v62,v65,v66" }, 2596 { Hexagon::BI__builtin_HEXAGON_V6_vshuffh_128B, "v60,v62,v65,v66" }, 2597 { Hexagon::BI__builtin_HEXAGON_V6_vshuffob, "v60,v62,v65,v66" }, 2598 { Hexagon::BI__builtin_HEXAGON_V6_vshuffob_128B, "v60,v62,v65,v66" }, 2599 { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd, "v60,v62,v65,v66" }, 2600 { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd_128B, "v60,v62,v65,v66" }, 2601 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb, "v60,v62,v65,v66" }, 2602 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb_128B, "v60,v62,v65,v66" }, 2603 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh, "v60,v62,v65,v66" }, 2604 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh_128B, "v60,v62,v65,v66" }, 2605 { Hexagon::BI__builtin_HEXAGON_V6_vshufoh, "v60,v62,v65,v66" }, 2606 { Hexagon::BI__builtin_HEXAGON_V6_vshufoh_128B, "v60,v62,v65,v66" }, 2607 { Hexagon::BI__builtin_HEXAGON_V6_vsubb, "v60,v62,v65,v66" }, 2608 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_128B, "v60,v62,v65,v66" }, 2609 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv, "v60,v62,v65,v66" }, 2610 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv_128B, "v60,v62,v65,v66" }, 2611 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat, "v62,v65,v66" }, 2612 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_128B, "v62,v65,v66" }, 2613 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv, "v62,v65,v66" }, 2614 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv_128B, "v62,v65,v66" }, 2615 { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry, "v62,v65,v66" }, 2616 { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B, "v62,v65,v66" }, 2617 { Hexagon::BI__builtin_HEXAGON_V6_vsubh, "v60,v62,v65,v66" }, 2618 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_128B, "v60,v62,v65,v66" }, 2619 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv, "v60,v62,v65,v66" }, 2620 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv_128B, "v60,v62,v65,v66" }, 2621 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat, "v60,v62,v65,v66" }, 2622 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_128B, "v60,v62,v65,v66" }, 2623 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv, "v60,v62,v65,v66" }, 2624 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv_128B, "v60,v62,v65,v66" }, 2625 { Hexagon::BI__builtin_HEXAGON_V6_vsubhw, "v60,v62,v65,v66" }, 2626 { Hexagon::BI__builtin_HEXAGON_V6_vsubhw_128B, "v60,v62,v65,v66" }, 2627 { Hexagon::BI__builtin_HEXAGON_V6_vsububh, "v60,v62,v65,v66" }, 2628 { Hexagon::BI__builtin_HEXAGON_V6_vsububh_128B, "v60,v62,v65,v66" }, 2629 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat, "v60,v62,v65,v66" }, 2630 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_128B, "v60,v62,v65,v66" }, 2631 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv, "v60,v62,v65,v66" }, 2632 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv_128B, "v60,v62,v65,v66" }, 2633 { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat, "v62,v65,v66" }, 2634 { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat_128B, "v62,v65,v66" }, 2635 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat, "v60,v62,v65,v66" }, 2636 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_128B, "v60,v62,v65,v66" }, 2637 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv, "v60,v62,v65,v66" }, 2638 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv_128B, "v60,v62,v65,v66" }, 2639 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw, "v60,v62,v65,v66" }, 2640 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw_128B, "v60,v62,v65,v66" }, 2641 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat, "v62,v65,v66" }, 2642 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_128B, "v62,v65,v66" }, 2643 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv, "v62,v65,v66" }, 2644 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv_128B, "v62,v65,v66" }, 2645 { Hexagon::BI__builtin_HEXAGON_V6_vsubw, "v60,v62,v65,v66" }, 2646 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_128B, "v60,v62,v65,v66" }, 2647 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv, "v60,v62,v65,v66" }, 2648 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv_128B, "v60,v62,v65,v66" }, 2649 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat, "v60,v62,v65,v66" }, 2650 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_128B, "v60,v62,v65,v66" }, 2651 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv, "v60,v62,v65,v66" }, 2652 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv_128B, "v60,v62,v65,v66" }, 2653 { Hexagon::BI__builtin_HEXAGON_V6_vswap, "v60,v62,v65,v66" }, 2654 { Hexagon::BI__builtin_HEXAGON_V6_vswap_128B, "v60,v62,v65,v66" }, 2655 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb, "v60,v62,v65,v66" }, 2656 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_128B, "v60,v62,v65,v66" }, 2657 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc, "v60,v62,v65,v66" }, 2658 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc_128B, "v60,v62,v65,v66" }, 2659 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus, "v60,v62,v65,v66" }, 2660 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_128B, "v60,v62,v65,v66" }, 2661 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc, "v60,v62,v65,v66" }, 2662 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc_128B, "v60,v62,v65,v66" }, 2663 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb, "v60,v62,v65,v66" }, 2664 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_128B, "v60,v62,v65,v66" }, 2665 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc, "v60,v62,v65,v66" }, 2666 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc_128B, "v60,v62,v65,v66" }, 2667 { Hexagon::BI__builtin_HEXAGON_V6_vunpackb, "v60,v62,v65,v66" }, 2668 { Hexagon::BI__builtin_HEXAGON_V6_vunpackb_128B, "v60,v62,v65,v66" }, 2669 { Hexagon::BI__builtin_HEXAGON_V6_vunpackh, "v60,v62,v65,v66" }, 2670 { Hexagon::BI__builtin_HEXAGON_V6_vunpackh_128B, "v60,v62,v65,v66" }, 2671 { Hexagon::BI__builtin_HEXAGON_V6_vunpackob, "v60,v62,v65,v66" }, 2672 { Hexagon::BI__builtin_HEXAGON_V6_vunpackob_128B, "v60,v62,v65,v66" }, 2673 { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh, "v60,v62,v65,v66" }, 2674 { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh_128B, "v60,v62,v65,v66" }, 2675 { Hexagon::BI__builtin_HEXAGON_V6_vunpackub, "v60,v62,v65,v66" }, 2676 { Hexagon::BI__builtin_HEXAGON_V6_vunpackub_128B, "v60,v62,v65,v66" }, 2677 { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh, "v60,v62,v65,v66" }, 2678 { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh_128B, "v60,v62,v65,v66" }, 2679 { Hexagon::BI__builtin_HEXAGON_V6_vxor, "v60,v62,v65,v66" }, 2680 { Hexagon::BI__builtin_HEXAGON_V6_vxor_128B, "v60,v62,v65,v66" }, 2681 { Hexagon::BI__builtin_HEXAGON_V6_vzb, "v60,v62,v65,v66" }, 2682 { Hexagon::BI__builtin_HEXAGON_V6_vzb_128B, "v60,v62,v65,v66" }, 2683 { Hexagon::BI__builtin_HEXAGON_V6_vzh, "v60,v62,v65,v66" }, 2684 { Hexagon::BI__builtin_HEXAGON_V6_vzh_128B, "v60,v62,v65,v66" }, 2685 }; 2686 2687 // Sort the tables on first execution so we can binary search them. 2688 auto SortCmp = [](const BuiltinAndString &LHS, const BuiltinAndString &RHS) { 2689 return LHS.BuiltinID < RHS.BuiltinID; 2690 }; 2691 static const bool SortOnce = 2692 (llvm::sort(ValidCPU, SortCmp), 2693 llvm::sort(ValidHVX, SortCmp), true); 2694 (void)SortOnce; 2695 auto LowerBoundCmp = [](const BuiltinAndString &BI, unsigned BuiltinID) { 2696 return BI.BuiltinID < BuiltinID; 2697 }; 2698 2699 const TargetInfo &TI = Context.getTargetInfo(); 2700 2701 const BuiltinAndString *FC = 2702 std::lower_bound(std::begin(ValidCPU), std::end(ValidCPU), BuiltinID, 2703 LowerBoundCmp); 2704 if (FC != std::end(ValidCPU) && FC->BuiltinID == BuiltinID) { 2705 const TargetOptions &Opts = TI.getTargetOpts(); 2706 StringRef CPU = Opts.CPU; 2707 if (!CPU.empty()) { 2708 assert(CPU.startswith("hexagon") && "Unexpected CPU name"); 2709 CPU.consume_front("hexagon"); 2710 SmallVector<StringRef, 3> CPUs; 2711 StringRef(FC->Str).split(CPUs, ','); 2712 if (llvm::none_of(CPUs, [CPU](StringRef S) { return S == CPU; })) 2713 return Diag(TheCall->getBeginLoc(), 2714 diag::err_hexagon_builtin_unsupported_cpu); 2715 } 2716 } 2717 2718 const BuiltinAndString *FH = 2719 std::lower_bound(std::begin(ValidHVX), std::end(ValidHVX), BuiltinID, 2720 LowerBoundCmp); 2721 if (FH != std::end(ValidHVX) && FH->BuiltinID == BuiltinID) { 2722 if (!TI.hasFeature("hvx")) 2723 return Diag(TheCall->getBeginLoc(), 2724 diag::err_hexagon_builtin_requires_hvx); 2725 2726 SmallVector<StringRef, 3> HVXs; 2727 StringRef(FH->Str).split(HVXs, ','); 2728 bool IsValid = llvm::any_of(HVXs, 2729 [&TI] (StringRef V) { 2730 std::string F = "hvx" + V.str(); 2731 return TI.hasFeature(F); 2732 }); 2733 if (!IsValid) 2734 return Diag(TheCall->getBeginLoc(), 2735 diag::err_hexagon_builtin_unsupported_hvx); 2736 } 2737 2738 return false; 2739 } 2740 2741 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2742 struct ArgInfo { 2743 uint8_t OpNum; 2744 bool IsSigned; 2745 uint8_t BitWidth; 2746 uint8_t Align; 2747 }; 2748 struct BuiltinInfo { 2749 unsigned BuiltinID; 2750 ArgInfo Infos[2]; 2751 }; 2752 2753 static BuiltinInfo Infos[] = { 2754 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 2755 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 2756 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 2757 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 0 }} }, 2758 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 2759 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 2760 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 2761 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 2762 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 2763 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 2764 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 2765 2766 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 2767 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 2768 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 2769 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 2770 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 2771 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 2772 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 2773 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 2774 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 2775 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 2776 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 2777 2778 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 2779 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 2780 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 2781 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 2782 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 2783 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 2784 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 2785 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 2786 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 2787 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 2788 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 2789 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 2790 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 2791 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 2792 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 2793 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 2794 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 2795 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 2796 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 2797 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 2798 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 2799 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 2800 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 2801 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 2802 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 2803 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 2804 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 2805 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 2806 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 2807 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 2808 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 2809 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 2810 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 2811 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 2812 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 2813 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 2814 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 2815 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 2816 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 2817 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 2818 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 2819 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 2820 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 2821 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 2822 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 2823 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 2824 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 2825 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 2826 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 2827 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 2828 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 2829 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 2830 {{ 1, false, 6, 0 }} }, 2831 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 2832 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 2833 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 2834 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 2835 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 2836 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 2837 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 2838 {{ 1, false, 5, 0 }} }, 2839 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 2840 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 2841 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 2842 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 2843 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 2844 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 2845 { 2, false, 5, 0 }} }, 2846 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 2847 { 2, false, 6, 0 }} }, 2848 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 2849 { 3, false, 5, 0 }} }, 2850 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 2851 { 3, false, 6, 0 }} }, 2852 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 2853 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 2854 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 2855 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 2856 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 2857 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 2858 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 2859 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 2860 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 2861 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 2862 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 2863 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 2864 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 2865 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 2866 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 2867 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 2868 {{ 2, false, 4, 0 }, 2869 { 3, false, 5, 0 }} }, 2870 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 2871 {{ 2, false, 4, 0 }, 2872 { 3, false, 5, 0 }} }, 2873 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 2874 {{ 2, false, 4, 0 }, 2875 { 3, false, 5, 0 }} }, 2876 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 2877 {{ 2, false, 4, 0 }, 2878 { 3, false, 5, 0 }} }, 2879 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 2880 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 2881 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 2882 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 2883 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 2884 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 2885 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 2886 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 2887 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 2888 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 2889 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 2890 { 2, false, 5, 0 }} }, 2891 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 2892 { 2, false, 6, 0 }} }, 2893 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 2894 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 2895 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 2896 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 2897 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 2898 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 2899 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 2900 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 2901 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 2902 {{ 1, false, 4, 0 }} }, 2903 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 2904 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 2905 {{ 1, false, 4, 0 }} }, 2906 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 2907 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 2908 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 2909 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 2910 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 2911 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 2912 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 2913 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 2914 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 2915 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 2916 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 2917 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 2918 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 2919 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 2920 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 2921 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 2922 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 2923 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 2924 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 2925 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 2926 {{ 3, false, 1, 0 }} }, 2927 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 2928 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 2929 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 2930 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 2931 {{ 3, false, 1, 0 }} }, 2932 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 2933 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 2934 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 2935 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 2936 {{ 3, false, 1, 0 }} }, 2937 }; 2938 2939 // Use a dynamically initialized static to sort the table exactly once on 2940 // first run. 2941 static const bool SortOnce = 2942 (llvm::sort(Infos, 2943 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 2944 return LHS.BuiltinID < RHS.BuiltinID; 2945 }), 2946 true); 2947 (void)SortOnce; 2948 2949 const BuiltinInfo *F = 2950 std::lower_bound(std::begin(Infos), std::end(Infos), BuiltinID, 2951 [](const BuiltinInfo &BI, unsigned BuiltinID) { 2952 return BI.BuiltinID < BuiltinID; 2953 }); 2954 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 2955 return false; 2956 2957 bool Error = false; 2958 2959 for (const ArgInfo &A : F->Infos) { 2960 // Ignore empty ArgInfo elements. 2961 if (A.BitWidth == 0) 2962 continue; 2963 2964 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 2965 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 2966 if (!A.Align) { 2967 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 2968 } else { 2969 unsigned M = 1 << A.Align; 2970 Min *= M; 2971 Max *= M; 2972 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) | 2973 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 2974 } 2975 } 2976 return Error; 2977 } 2978 2979 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 2980 CallExpr *TheCall) { 2981 return CheckHexagonBuiltinCpu(BuiltinID, TheCall) || 2982 CheckHexagonBuiltinArgument(BuiltinID, TheCall); 2983 } 2984 2985 2986 // CheckMipsBuiltinFunctionCall - Checks the constant value passed to the 2987 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 2988 // ordering for DSP is unspecified. MSA is ordered by the data format used 2989 // by the underlying instruction i.e., df/m, df/n and then by size. 2990 // 2991 // FIXME: The size tests here should instead be tablegen'd along with the 2992 // definitions from include/clang/Basic/BuiltinsMips.def. 2993 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 2994 // be too. 2995 bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2996 unsigned i = 0, l = 0, u = 0, m = 0; 2997 switch (BuiltinID) { 2998 default: return false; 2999 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3000 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3001 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3002 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3003 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3004 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3005 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3006 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3007 // df/m field. 3008 // These intrinsics take an unsigned 3 bit immediate. 3009 case Mips::BI__builtin_msa_bclri_b: 3010 case Mips::BI__builtin_msa_bnegi_b: 3011 case Mips::BI__builtin_msa_bseti_b: 3012 case Mips::BI__builtin_msa_sat_s_b: 3013 case Mips::BI__builtin_msa_sat_u_b: 3014 case Mips::BI__builtin_msa_slli_b: 3015 case Mips::BI__builtin_msa_srai_b: 3016 case Mips::BI__builtin_msa_srari_b: 3017 case Mips::BI__builtin_msa_srli_b: 3018 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3019 case Mips::BI__builtin_msa_binsli_b: 3020 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3021 // These intrinsics take an unsigned 4 bit immediate. 3022 case Mips::BI__builtin_msa_bclri_h: 3023 case Mips::BI__builtin_msa_bnegi_h: 3024 case Mips::BI__builtin_msa_bseti_h: 3025 case Mips::BI__builtin_msa_sat_s_h: 3026 case Mips::BI__builtin_msa_sat_u_h: 3027 case Mips::BI__builtin_msa_slli_h: 3028 case Mips::BI__builtin_msa_srai_h: 3029 case Mips::BI__builtin_msa_srari_h: 3030 case Mips::BI__builtin_msa_srli_h: 3031 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3032 case Mips::BI__builtin_msa_binsli_h: 3033 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3034 // These intrinsics take an unsigned 5 bit immediate. 3035 // The first block of intrinsics actually have an unsigned 5 bit field, 3036 // not a df/n field. 3037 case Mips::BI__builtin_msa_cfcmsa: 3038 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3039 case Mips::BI__builtin_msa_clei_u_b: 3040 case Mips::BI__builtin_msa_clei_u_h: 3041 case Mips::BI__builtin_msa_clei_u_w: 3042 case Mips::BI__builtin_msa_clei_u_d: 3043 case Mips::BI__builtin_msa_clti_u_b: 3044 case Mips::BI__builtin_msa_clti_u_h: 3045 case Mips::BI__builtin_msa_clti_u_w: 3046 case Mips::BI__builtin_msa_clti_u_d: 3047 case Mips::BI__builtin_msa_maxi_u_b: 3048 case Mips::BI__builtin_msa_maxi_u_h: 3049 case Mips::BI__builtin_msa_maxi_u_w: 3050 case Mips::BI__builtin_msa_maxi_u_d: 3051 case Mips::BI__builtin_msa_mini_u_b: 3052 case Mips::BI__builtin_msa_mini_u_h: 3053 case Mips::BI__builtin_msa_mini_u_w: 3054 case Mips::BI__builtin_msa_mini_u_d: 3055 case Mips::BI__builtin_msa_addvi_b: 3056 case Mips::BI__builtin_msa_addvi_h: 3057 case Mips::BI__builtin_msa_addvi_w: 3058 case Mips::BI__builtin_msa_addvi_d: 3059 case Mips::BI__builtin_msa_bclri_w: 3060 case Mips::BI__builtin_msa_bnegi_w: 3061 case Mips::BI__builtin_msa_bseti_w: 3062 case Mips::BI__builtin_msa_sat_s_w: 3063 case Mips::BI__builtin_msa_sat_u_w: 3064 case Mips::BI__builtin_msa_slli_w: 3065 case Mips::BI__builtin_msa_srai_w: 3066 case Mips::BI__builtin_msa_srari_w: 3067 case Mips::BI__builtin_msa_srli_w: 3068 case Mips::BI__builtin_msa_srlri_w: 3069 case Mips::BI__builtin_msa_subvi_b: 3070 case Mips::BI__builtin_msa_subvi_h: 3071 case Mips::BI__builtin_msa_subvi_w: 3072 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3073 case Mips::BI__builtin_msa_binsli_w: 3074 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3075 // These intrinsics take an unsigned 6 bit immediate. 3076 case Mips::BI__builtin_msa_bclri_d: 3077 case Mips::BI__builtin_msa_bnegi_d: 3078 case Mips::BI__builtin_msa_bseti_d: 3079 case Mips::BI__builtin_msa_sat_s_d: 3080 case Mips::BI__builtin_msa_sat_u_d: 3081 case Mips::BI__builtin_msa_slli_d: 3082 case Mips::BI__builtin_msa_srai_d: 3083 case Mips::BI__builtin_msa_srari_d: 3084 case Mips::BI__builtin_msa_srli_d: 3085 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3086 case Mips::BI__builtin_msa_binsli_d: 3087 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3088 // These intrinsics take a signed 5 bit immediate. 3089 case Mips::BI__builtin_msa_ceqi_b: 3090 case Mips::BI__builtin_msa_ceqi_h: 3091 case Mips::BI__builtin_msa_ceqi_w: 3092 case Mips::BI__builtin_msa_ceqi_d: 3093 case Mips::BI__builtin_msa_clti_s_b: 3094 case Mips::BI__builtin_msa_clti_s_h: 3095 case Mips::BI__builtin_msa_clti_s_w: 3096 case Mips::BI__builtin_msa_clti_s_d: 3097 case Mips::BI__builtin_msa_clei_s_b: 3098 case Mips::BI__builtin_msa_clei_s_h: 3099 case Mips::BI__builtin_msa_clei_s_w: 3100 case Mips::BI__builtin_msa_clei_s_d: 3101 case Mips::BI__builtin_msa_maxi_s_b: 3102 case Mips::BI__builtin_msa_maxi_s_h: 3103 case Mips::BI__builtin_msa_maxi_s_w: 3104 case Mips::BI__builtin_msa_maxi_s_d: 3105 case Mips::BI__builtin_msa_mini_s_b: 3106 case Mips::BI__builtin_msa_mini_s_h: 3107 case Mips::BI__builtin_msa_mini_s_w: 3108 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3109 // These intrinsics take an unsigned 8 bit immediate. 3110 case Mips::BI__builtin_msa_andi_b: 3111 case Mips::BI__builtin_msa_nori_b: 3112 case Mips::BI__builtin_msa_ori_b: 3113 case Mips::BI__builtin_msa_shf_b: 3114 case Mips::BI__builtin_msa_shf_h: 3115 case Mips::BI__builtin_msa_shf_w: 3116 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3117 case Mips::BI__builtin_msa_bseli_b: 3118 case Mips::BI__builtin_msa_bmnzi_b: 3119 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3120 // df/n format 3121 // These intrinsics take an unsigned 4 bit immediate. 3122 case Mips::BI__builtin_msa_copy_s_b: 3123 case Mips::BI__builtin_msa_copy_u_b: 3124 case Mips::BI__builtin_msa_insve_b: 3125 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3126 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3127 // These intrinsics take an unsigned 3 bit immediate. 3128 case Mips::BI__builtin_msa_copy_s_h: 3129 case Mips::BI__builtin_msa_copy_u_h: 3130 case Mips::BI__builtin_msa_insve_h: 3131 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3132 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3133 // These intrinsics take an unsigned 2 bit immediate. 3134 case Mips::BI__builtin_msa_copy_s_w: 3135 case Mips::BI__builtin_msa_copy_u_w: 3136 case Mips::BI__builtin_msa_insve_w: 3137 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3138 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3139 // These intrinsics take an unsigned 1 bit immediate. 3140 case Mips::BI__builtin_msa_copy_s_d: 3141 case Mips::BI__builtin_msa_copy_u_d: 3142 case Mips::BI__builtin_msa_insve_d: 3143 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3144 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3145 // Memory offsets and immediate loads. 3146 // These intrinsics take a signed 10 bit immediate. 3147 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3148 case Mips::BI__builtin_msa_ldi_h: 3149 case Mips::BI__builtin_msa_ldi_w: 3150 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3151 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3152 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3153 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3154 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3155 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3156 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3157 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3158 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3159 } 3160 3161 if (!m) 3162 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3163 3164 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3165 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3166 } 3167 3168 bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3169 unsigned i = 0, l = 0, u = 0; 3170 bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde || 3171 BuiltinID == PPC::BI__builtin_divdeu || 3172 BuiltinID == PPC::BI__builtin_bpermd; 3173 bool IsTarget64Bit = Context.getTargetInfo() 3174 .getTypeWidth(Context 3175 .getTargetInfo() 3176 .getIntPtrType()) == 64; 3177 bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe || 3178 BuiltinID == PPC::BI__builtin_divweu || 3179 BuiltinID == PPC::BI__builtin_divde || 3180 BuiltinID == PPC::BI__builtin_divdeu; 3181 3182 if (Is64BitBltin && !IsTarget64Bit) 3183 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3184 << TheCall->getSourceRange(); 3185 3186 if ((IsBltinExtDiv && !Context.getTargetInfo().hasFeature("extdiv")) || 3187 (BuiltinID == PPC::BI__builtin_bpermd && 3188 !Context.getTargetInfo().hasFeature("bpermd"))) 3189 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3190 << TheCall->getSourceRange(); 3191 3192 auto SemaVSXCheck = [&](CallExpr *TheCall) -> bool { 3193 if (!Context.getTargetInfo().hasFeature("vsx")) 3194 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3195 << TheCall->getSourceRange(); 3196 return false; 3197 }; 3198 3199 switch (BuiltinID) { 3200 default: return false; 3201 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3202 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3203 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3204 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3205 case PPC::BI__builtin_tbegin: 3206 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; 3207 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; 3208 case PPC::BI__builtin_tabortwc: 3209 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; 3210 case PPC::BI__builtin_tabortwci: 3211 case PPC::BI__builtin_tabortdci: 3212 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3213 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 3214 case PPC::BI__builtin_vsx_xxpermdi: 3215 case PPC::BI__builtin_vsx_xxsldwi: 3216 return SemaBuiltinVSX(TheCall); 3217 case PPC::BI__builtin_unpack_vector_int128: 3218 return SemaVSXCheck(TheCall) || 3219 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3220 case PPC::BI__builtin_pack_vector_int128: 3221 return SemaVSXCheck(TheCall); 3222 } 3223 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3224 } 3225 3226 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 3227 CallExpr *TheCall) { 3228 if (BuiltinID == SystemZ::BI__builtin_tabort) { 3229 Expr *Arg = TheCall->getArg(0); 3230 llvm::APSInt AbortCode(32); 3231 if (Arg->isIntegerConstantExpr(AbortCode, Context) && 3232 AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256) 3233 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 3234 << Arg->getSourceRange(); 3235 } 3236 3237 // For intrinsics which take an immediate value as part of the instruction, 3238 // range check them here. 3239 unsigned i = 0, l = 0, u = 0; 3240 switch (BuiltinID) { 3241 default: return false; 3242 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 3243 case SystemZ::BI__builtin_s390_verimb: 3244 case SystemZ::BI__builtin_s390_verimh: 3245 case SystemZ::BI__builtin_s390_verimf: 3246 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 3247 case SystemZ::BI__builtin_s390_vfaeb: 3248 case SystemZ::BI__builtin_s390_vfaeh: 3249 case SystemZ::BI__builtin_s390_vfaef: 3250 case SystemZ::BI__builtin_s390_vfaebs: 3251 case SystemZ::BI__builtin_s390_vfaehs: 3252 case SystemZ::BI__builtin_s390_vfaefs: 3253 case SystemZ::BI__builtin_s390_vfaezb: 3254 case SystemZ::BI__builtin_s390_vfaezh: 3255 case SystemZ::BI__builtin_s390_vfaezf: 3256 case SystemZ::BI__builtin_s390_vfaezbs: 3257 case SystemZ::BI__builtin_s390_vfaezhs: 3258 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 3259 case SystemZ::BI__builtin_s390_vfisb: 3260 case SystemZ::BI__builtin_s390_vfidb: 3261 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 3262 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3263 case SystemZ::BI__builtin_s390_vftcisb: 3264 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 3265 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 3266 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 3267 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 3268 case SystemZ::BI__builtin_s390_vstrcb: 3269 case SystemZ::BI__builtin_s390_vstrch: 3270 case SystemZ::BI__builtin_s390_vstrcf: 3271 case SystemZ::BI__builtin_s390_vstrczb: 3272 case SystemZ::BI__builtin_s390_vstrczh: 3273 case SystemZ::BI__builtin_s390_vstrczf: 3274 case SystemZ::BI__builtin_s390_vstrcbs: 3275 case SystemZ::BI__builtin_s390_vstrchs: 3276 case SystemZ::BI__builtin_s390_vstrcfs: 3277 case SystemZ::BI__builtin_s390_vstrczbs: 3278 case SystemZ::BI__builtin_s390_vstrczhs: 3279 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 3280 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 3281 case SystemZ::BI__builtin_s390_vfminsb: 3282 case SystemZ::BI__builtin_s390_vfmaxsb: 3283 case SystemZ::BI__builtin_s390_vfmindb: 3284 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 3285 } 3286 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3287 } 3288 3289 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 3290 /// This checks that the target supports __builtin_cpu_supports and 3291 /// that the string argument is constant and valid. 3292 static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) { 3293 Expr *Arg = TheCall->getArg(0); 3294 3295 // Check if the argument is a string literal. 3296 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3297 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3298 << Arg->getSourceRange(); 3299 3300 // Check the contents of the string. 3301 StringRef Feature = 3302 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3303 if (!S.Context.getTargetInfo().validateCpuSupports(Feature)) 3304 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 3305 << Arg->getSourceRange(); 3306 return false; 3307 } 3308 3309 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 3310 /// This checks that the target supports __builtin_cpu_is and 3311 /// that the string argument is constant and valid. 3312 static bool SemaBuiltinCpuIs(Sema &S, CallExpr *TheCall) { 3313 Expr *Arg = TheCall->getArg(0); 3314 3315 // Check if the argument is a string literal. 3316 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3317 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3318 << Arg->getSourceRange(); 3319 3320 // Check the contents of the string. 3321 StringRef Feature = 3322 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3323 if (!S.Context.getTargetInfo().validateCpuIs(Feature)) 3324 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 3325 << Arg->getSourceRange(); 3326 return false; 3327 } 3328 3329 // Check if the rounding mode is legal. 3330 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 3331 // Indicates if this instruction has rounding control or just SAE. 3332 bool HasRC = false; 3333 3334 unsigned ArgNum = 0; 3335 switch (BuiltinID) { 3336 default: 3337 return false; 3338 case X86::BI__builtin_ia32_vcvttsd2si32: 3339 case X86::BI__builtin_ia32_vcvttsd2si64: 3340 case X86::BI__builtin_ia32_vcvttsd2usi32: 3341 case X86::BI__builtin_ia32_vcvttsd2usi64: 3342 case X86::BI__builtin_ia32_vcvttss2si32: 3343 case X86::BI__builtin_ia32_vcvttss2si64: 3344 case X86::BI__builtin_ia32_vcvttss2usi32: 3345 case X86::BI__builtin_ia32_vcvttss2usi64: 3346 ArgNum = 1; 3347 break; 3348 case X86::BI__builtin_ia32_maxpd512: 3349 case X86::BI__builtin_ia32_maxps512: 3350 case X86::BI__builtin_ia32_minpd512: 3351 case X86::BI__builtin_ia32_minps512: 3352 ArgNum = 2; 3353 break; 3354 case X86::BI__builtin_ia32_cvtps2pd512_mask: 3355 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 3356 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 3357 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 3358 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 3359 case X86::BI__builtin_ia32_cvttps2dq512_mask: 3360 case X86::BI__builtin_ia32_cvttps2qq512_mask: 3361 case X86::BI__builtin_ia32_cvttps2udq512_mask: 3362 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 3363 case X86::BI__builtin_ia32_exp2pd_mask: 3364 case X86::BI__builtin_ia32_exp2ps_mask: 3365 case X86::BI__builtin_ia32_getexppd512_mask: 3366 case X86::BI__builtin_ia32_getexpps512_mask: 3367 case X86::BI__builtin_ia32_rcp28pd_mask: 3368 case X86::BI__builtin_ia32_rcp28ps_mask: 3369 case X86::BI__builtin_ia32_rsqrt28pd_mask: 3370 case X86::BI__builtin_ia32_rsqrt28ps_mask: 3371 case X86::BI__builtin_ia32_vcomisd: 3372 case X86::BI__builtin_ia32_vcomiss: 3373 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 3374 ArgNum = 3; 3375 break; 3376 case X86::BI__builtin_ia32_cmppd512_mask: 3377 case X86::BI__builtin_ia32_cmpps512_mask: 3378 case X86::BI__builtin_ia32_cmpsd_mask: 3379 case X86::BI__builtin_ia32_cmpss_mask: 3380 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 3381 case X86::BI__builtin_ia32_getexpsd128_round_mask: 3382 case X86::BI__builtin_ia32_getexpss128_round_mask: 3383 case X86::BI__builtin_ia32_getmantpd512_mask: 3384 case X86::BI__builtin_ia32_getmantps512_mask: 3385 case X86::BI__builtin_ia32_maxsd_round_mask: 3386 case X86::BI__builtin_ia32_maxss_round_mask: 3387 case X86::BI__builtin_ia32_minsd_round_mask: 3388 case X86::BI__builtin_ia32_minss_round_mask: 3389 case X86::BI__builtin_ia32_rcp28sd_round_mask: 3390 case X86::BI__builtin_ia32_rcp28ss_round_mask: 3391 case X86::BI__builtin_ia32_reducepd512_mask: 3392 case X86::BI__builtin_ia32_reduceps512_mask: 3393 case X86::BI__builtin_ia32_rndscalepd_mask: 3394 case X86::BI__builtin_ia32_rndscaleps_mask: 3395 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 3396 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 3397 ArgNum = 4; 3398 break; 3399 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3400 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3401 case X86::BI__builtin_ia32_fixupimmps512_mask: 3402 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3403 case X86::BI__builtin_ia32_fixupimmsd_mask: 3404 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3405 case X86::BI__builtin_ia32_fixupimmss_mask: 3406 case X86::BI__builtin_ia32_fixupimmss_maskz: 3407 case X86::BI__builtin_ia32_getmantsd_round_mask: 3408 case X86::BI__builtin_ia32_getmantss_round_mask: 3409 case X86::BI__builtin_ia32_rangepd512_mask: 3410 case X86::BI__builtin_ia32_rangeps512_mask: 3411 case X86::BI__builtin_ia32_rangesd128_round_mask: 3412 case X86::BI__builtin_ia32_rangess128_round_mask: 3413 case X86::BI__builtin_ia32_reducesd_mask: 3414 case X86::BI__builtin_ia32_reducess_mask: 3415 case X86::BI__builtin_ia32_rndscalesd_round_mask: 3416 case X86::BI__builtin_ia32_rndscaless_round_mask: 3417 ArgNum = 5; 3418 break; 3419 case X86::BI__builtin_ia32_vcvtsd2si64: 3420 case X86::BI__builtin_ia32_vcvtsd2si32: 3421 case X86::BI__builtin_ia32_vcvtsd2usi32: 3422 case X86::BI__builtin_ia32_vcvtsd2usi64: 3423 case X86::BI__builtin_ia32_vcvtss2si32: 3424 case X86::BI__builtin_ia32_vcvtss2si64: 3425 case X86::BI__builtin_ia32_vcvtss2usi32: 3426 case X86::BI__builtin_ia32_vcvtss2usi64: 3427 case X86::BI__builtin_ia32_sqrtpd512: 3428 case X86::BI__builtin_ia32_sqrtps512: 3429 ArgNum = 1; 3430 HasRC = true; 3431 break; 3432 case X86::BI__builtin_ia32_addpd512: 3433 case X86::BI__builtin_ia32_addps512: 3434 case X86::BI__builtin_ia32_divpd512: 3435 case X86::BI__builtin_ia32_divps512: 3436 case X86::BI__builtin_ia32_mulpd512: 3437 case X86::BI__builtin_ia32_mulps512: 3438 case X86::BI__builtin_ia32_subpd512: 3439 case X86::BI__builtin_ia32_subps512: 3440 case X86::BI__builtin_ia32_cvtsi2sd64: 3441 case X86::BI__builtin_ia32_cvtsi2ss32: 3442 case X86::BI__builtin_ia32_cvtsi2ss64: 3443 case X86::BI__builtin_ia32_cvtusi2sd64: 3444 case X86::BI__builtin_ia32_cvtusi2ss32: 3445 case X86::BI__builtin_ia32_cvtusi2ss64: 3446 ArgNum = 2; 3447 HasRC = true; 3448 break; 3449 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 3450 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 3451 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 3452 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 3453 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 3454 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 3455 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 3456 case X86::BI__builtin_ia32_cvtps2dq512_mask: 3457 case X86::BI__builtin_ia32_cvtps2qq512_mask: 3458 case X86::BI__builtin_ia32_cvtps2udq512_mask: 3459 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 3460 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 3461 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 3462 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 3463 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 3464 ArgNum = 3; 3465 HasRC = true; 3466 break; 3467 case X86::BI__builtin_ia32_addss_round_mask: 3468 case X86::BI__builtin_ia32_addsd_round_mask: 3469 case X86::BI__builtin_ia32_divss_round_mask: 3470 case X86::BI__builtin_ia32_divsd_round_mask: 3471 case X86::BI__builtin_ia32_mulss_round_mask: 3472 case X86::BI__builtin_ia32_mulsd_round_mask: 3473 case X86::BI__builtin_ia32_subss_round_mask: 3474 case X86::BI__builtin_ia32_subsd_round_mask: 3475 case X86::BI__builtin_ia32_scalefpd512_mask: 3476 case X86::BI__builtin_ia32_scalefps512_mask: 3477 case X86::BI__builtin_ia32_scalefsd_round_mask: 3478 case X86::BI__builtin_ia32_scalefss_round_mask: 3479 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 3480 case X86::BI__builtin_ia32_sqrtsd_round_mask: 3481 case X86::BI__builtin_ia32_sqrtss_round_mask: 3482 case X86::BI__builtin_ia32_vfmaddsd3_mask: 3483 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 3484 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 3485 case X86::BI__builtin_ia32_vfmaddss3_mask: 3486 case X86::BI__builtin_ia32_vfmaddss3_maskz: 3487 case X86::BI__builtin_ia32_vfmaddss3_mask3: 3488 case X86::BI__builtin_ia32_vfmaddpd512_mask: 3489 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 3490 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 3491 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 3492 case X86::BI__builtin_ia32_vfmaddps512_mask: 3493 case X86::BI__builtin_ia32_vfmaddps512_maskz: 3494 case X86::BI__builtin_ia32_vfmaddps512_mask3: 3495 case X86::BI__builtin_ia32_vfmsubps512_mask3: 3496 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 3497 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 3498 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 3499 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 3500 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 3501 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 3502 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 3503 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 3504 ArgNum = 4; 3505 HasRC = true; 3506 break; 3507 } 3508 3509 llvm::APSInt Result; 3510 3511 // We can't check the value of a dependent argument. 3512 Expr *Arg = TheCall->getArg(ArgNum); 3513 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3514 return false; 3515 3516 // Check constant-ness first. 3517 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3518 return true; 3519 3520 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 3521 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 3522 // combined with ROUND_NO_EXC. 3523 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 3524 Result == 8/*ROUND_NO_EXC*/ || 3525 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 3526 return false; 3527 3528 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 3529 << Arg->getSourceRange(); 3530 } 3531 3532 // Check if the gather/scatter scale is legal. 3533 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 3534 CallExpr *TheCall) { 3535 unsigned ArgNum = 0; 3536 switch (BuiltinID) { 3537 default: 3538 return false; 3539 case X86::BI__builtin_ia32_gatherpfdpd: 3540 case X86::BI__builtin_ia32_gatherpfdps: 3541 case X86::BI__builtin_ia32_gatherpfqpd: 3542 case X86::BI__builtin_ia32_gatherpfqps: 3543 case X86::BI__builtin_ia32_scatterpfdpd: 3544 case X86::BI__builtin_ia32_scatterpfdps: 3545 case X86::BI__builtin_ia32_scatterpfqpd: 3546 case X86::BI__builtin_ia32_scatterpfqps: 3547 ArgNum = 3; 3548 break; 3549 case X86::BI__builtin_ia32_gatherd_pd: 3550 case X86::BI__builtin_ia32_gatherd_pd256: 3551 case X86::BI__builtin_ia32_gatherq_pd: 3552 case X86::BI__builtin_ia32_gatherq_pd256: 3553 case X86::BI__builtin_ia32_gatherd_ps: 3554 case X86::BI__builtin_ia32_gatherd_ps256: 3555 case X86::BI__builtin_ia32_gatherq_ps: 3556 case X86::BI__builtin_ia32_gatherq_ps256: 3557 case X86::BI__builtin_ia32_gatherd_q: 3558 case X86::BI__builtin_ia32_gatherd_q256: 3559 case X86::BI__builtin_ia32_gatherq_q: 3560 case X86::BI__builtin_ia32_gatherq_q256: 3561 case X86::BI__builtin_ia32_gatherd_d: 3562 case X86::BI__builtin_ia32_gatherd_d256: 3563 case X86::BI__builtin_ia32_gatherq_d: 3564 case X86::BI__builtin_ia32_gatherq_d256: 3565 case X86::BI__builtin_ia32_gather3div2df: 3566 case X86::BI__builtin_ia32_gather3div2di: 3567 case X86::BI__builtin_ia32_gather3div4df: 3568 case X86::BI__builtin_ia32_gather3div4di: 3569 case X86::BI__builtin_ia32_gather3div4sf: 3570 case X86::BI__builtin_ia32_gather3div4si: 3571 case X86::BI__builtin_ia32_gather3div8sf: 3572 case X86::BI__builtin_ia32_gather3div8si: 3573 case X86::BI__builtin_ia32_gather3siv2df: 3574 case X86::BI__builtin_ia32_gather3siv2di: 3575 case X86::BI__builtin_ia32_gather3siv4df: 3576 case X86::BI__builtin_ia32_gather3siv4di: 3577 case X86::BI__builtin_ia32_gather3siv4sf: 3578 case X86::BI__builtin_ia32_gather3siv4si: 3579 case X86::BI__builtin_ia32_gather3siv8sf: 3580 case X86::BI__builtin_ia32_gather3siv8si: 3581 case X86::BI__builtin_ia32_gathersiv8df: 3582 case X86::BI__builtin_ia32_gathersiv16sf: 3583 case X86::BI__builtin_ia32_gatherdiv8df: 3584 case X86::BI__builtin_ia32_gatherdiv16sf: 3585 case X86::BI__builtin_ia32_gathersiv8di: 3586 case X86::BI__builtin_ia32_gathersiv16si: 3587 case X86::BI__builtin_ia32_gatherdiv8di: 3588 case X86::BI__builtin_ia32_gatherdiv16si: 3589 case X86::BI__builtin_ia32_scatterdiv2df: 3590 case X86::BI__builtin_ia32_scatterdiv2di: 3591 case X86::BI__builtin_ia32_scatterdiv4df: 3592 case X86::BI__builtin_ia32_scatterdiv4di: 3593 case X86::BI__builtin_ia32_scatterdiv4sf: 3594 case X86::BI__builtin_ia32_scatterdiv4si: 3595 case X86::BI__builtin_ia32_scatterdiv8sf: 3596 case X86::BI__builtin_ia32_scatterdiv8si: 3597 case X86::BI__builtin_ia32_scattersiv2df: 3598 case X86::BI__builtin_ia32_scattersiv2di: 3599 case X86::BI__builtin_ia32_scattersiv4df: 3600 case X86::BI__builtin_ia32_scattersiv4di: 3601 case X86::BI__builtin_ia32_scattersiv4sf: 3602 case X86::BI__builtin_ia32_scattersiv4si: 3603 case X86::BI__builtin_ia32_scattersiv8sf: 3604 case X86::BI__builtin_ia32_scattersiv8si: 3605 case X86::BI__builtin_ia32_scattersiv8df: 3606 case X86::BI__builtin_ia32_scattersiv16sf: 3607 case X86::BI__builtin_ia32_scatterdiv8df: 3608 case X86::BI__builtin_ia32_scatterdiv16sf: 3609 case X86::BI__builtin_ia32_scattersiv8di: 3610 case X86::BI__builtin_ia32_scattersiv16si: 3611 case X86::BI__builtin_ia32_scatterdiv8di: 3612 case X86::BI__builtin_ia32_scatterdiv16si: 3613 ArgNum = 4; 3614 break; 3615 } 3616 3617 llvm::APSInt Result; 3618 3619 // We can't check the value of a dependent argument. 3620 Expr *Arg = TheCall->getArg(ArgNum); 3621 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3622 return false; 3623 3624 // Check constant-ness first. 3625 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3626 return true; 3627 3628 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 3629 return false; 3630 3631 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 3632 << Arg->getSourceRange(); 3633 } 3634 3635 static bool isX86_32Builtin(unsigned BuiltinID) { 3636 // These builtins only work on x86-32 targets. 3637 switch (BuiltinID) { 3638 case X86::BI__builtin_ia32_readeflags_u32: 3639 case X86::BI__builtin_ia32_writeeflags_u32: 3640 return true; 3641 } 3642 3643 return false; 3644 } 3645 3646 bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3647 if (BuiltinID == X86::BI__builtin_cpu_supports) 3648 return SemaBuiltinCpuSupports(*this, TheCall); 3649 3650 if (BuiltinID == X86::BI__builtin_cpu_is) 3651 return SemaBuiltinCpuIs(*this, TheCall); 3652 3653 // Check for 32-bit only builtins on a 64-bit target. 3654 const llvm::Triple &TT = Context.getTargetInfo().getTriple(); 3655 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 3656 return Diag(TheCall->getCallee()->getBeginLoc(), 3657 diag::err_32_bit_builtin_64_bit_tgt); 3658 3659 // If the intrinsic has rounding or SAE make sure its valid. 3660 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 3661 return true; 3662 3663 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 3664 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 3665 return true; 3666 3667 // For intrinsics which take an immediate value as part of the instruction, 3668 // range check them here. 3669 int i = 0, l = 0, u = 0; 3670 switch (BuiltinID) { 3671 default: 3672 return false; 3673 case X86::BI__builtin_ia32_vec_ext_v2si: 3674 case X86::BI__builtin_ia32_vec_ext_v2di: 3675 case X86::BI__builtin_ia32_vextractf128_pd256: 3676 case X86::BI__builtin_ia32_vextractf128_ps256: 3677 case X86::BI__builtin_ia32_vextractf128_si256: 3678 case X86::BI__builtin_ia32_extract128i256: 3679 case X86::BI__builtin_ia32_extractf64x4_mask: 3680 case X86::BI__builtin_ia32_extracti64x4_mask: 3681 case X86::BI__builtin_ia32_extractf32x8_mask: 3682 case X86::BI__builtin_ia32_extracti32x8_mask: 3683 case X86::BI__builtin_ia32_extractf64x2_256_mask: 3684 case X86::BI__builtin_ia32_extracti64x2_256_mask: 3685 case X86::BI__builtin_ia32_extractf32x4_256_mask: 3686 case X86::BI__builtin_ia32_extracti32x4_256_mask: 3687 i = 1; l = 0; u = 1; 3688 break; 3689 case X86::BI__builtin_ia32_vec_set_v2di: 3690 case X86::BI__builtin_ia32_vinsertf128_pd256: 3691 case X86::BI__builtin_ia32_vinsertf128_ps256: 3692 case X86::BI__builtin_ia32_vinsertf128_si256: 3693 case X86::BI__builtin_ia32_insert128i256: 3694 case X86::BI__builtin_ia32_insertf32x8: 3695 case X86::BI__builtin_ia32_inserti32x8: 3696 case X86::BI__builtin_ia32_insertf64x4: 3697 case X86::BI__builtin_ia32_inserti64x4: 3698 case X86::BI__builtin_ia32_insertf64x2_256: 3699 case X86::BI__builtin_ia32_inserti64x2_256: 3700 case X86::BI__builtin_ia32_insertf32x4_256: 3701 case X86::BI__builtin_ia32_inserti32x4_256: 3702 i = 2; l = 0; u = 1; 3703 break; 3704 case X86::BI__builtin_ia32_vpermilpd: 3705 case X86::BI__builtin_ia32_vec_ext_v4hi: 3706 case X86::BI__builtin_ia32_vec_ext_v4si: 3707 case X86::BI__builtin_ia32_vec_ext_v4sf: 3708 case X86::BI__builtin_ia32_vec_ext_v4di: 3709 case X86::BI__builtin_ia32_extractf32x4_mask: 3710 case X86::BI__builtin_ia32_extracti32x4_mask: 3711 case X86::BI__builtin_ia32_extractf64x2_512_mask: 3712 case X86::BI__builtin_ia32_extracti64x2_512_mask: 3713 i = 1; l = 0; u = 3; 3714 break; 3715 case X86::BI_mm_prefetch: 3716 case X86::BI__builtin_ia32_vec_ext_v8hi: 3717 case X86::BI__builtin_ia32_vec_ext_v8si: 3718 i = 1; l = 0; u = 7; 3719 break; 3720 case X86::BI__builtin_ia32_sha1rnds4: 3721 case X86::BI__builtin_ia32_blendpd: 3722 case X86::BI__builtin_ia32_shufpd: 3723 case X86::BI__builtin_ia32_vec_set_v4hi: 3724 case X86::BI__builtin_ia32_vec_set_v4si: 3725 case X86::BI__builtin_ia32_vec_set_v4di: 3726 case X86::BI__builtin_ia32_shuf_f32x4_256: 3727 case X86::BI__builtin_ia32_shuf_f64x2_256: 3728 case X86::BI__builtin_ia32_shuf_i32x4_256: 3729 case X86::BI__builtin_ia32_shuf_i64x2_256: 3730 case X86::BI__builtin_ia32_insertf64x2_512: 3731 case X86::BI__builtin_ia32_inserti64x2_512: 3732 case X86::BI__builtin_ia32_insertf32x4: 3733 case X86::BI__builtin_ia32_inserti32x4: 3734 i = 2; l = 0; u = 3; 3735 break; 3736 case X86::BI__builtin_ia32_vpermil2pd: 3737 case X86::BI__builtin_ia32_vpermil2pd256: 3738 case X86::BI__builtin_ia32_vpermil2ps: 3739 case X86::BI__builtin_ia32_vpermil2ps256: 3740 i = 3; l = 0; u = 3; 3741 break; 3742 case X86::BI__builtin_ia32_cmpb128_mask: 3743 case X86::BI__builtin_ia32_cmpw128_mask: 3744 case X86::BI__builtin_ia32_cmpd128_mask: 3745 case X86::BI__builtin_ia32_cmpq128_mask: 3746 case X86::BI__builtin_ia32_cmpb256_mask: 3747 case X86::BI__builtin_ia32_cmpw256_mask: 3748 case X86::BI__builtin_ia32_cmpd256_mask: 3749 case X86::BI__builtin_ia32_cmpq256_mask: 3750 case X86::BI__builtin_ia32_cmpb512_mask: 3751 case X86::BI__builtin_ia32_cmpw512_mask: 3752 case X86::BI__builtin_ia32_cmpd512_mask: 3753 case X86::BI__builtin_ia32_cmpq512_mask: 3754 case X86::BI__builtin_ia32_ucmpb128_mask: 3755 case X86::BI__builtin_ia32_ucmpw128_mask: 3756 case X86::BI__builtin_ia32_ucmpd128_mask: 3757 case X86::BI__builtin_ia32_ucmpq128_mask: 3758 case X86::BI__builtin_ia32_ucmpb256_mask: 3759 case X86::BI__builtin_ia32_ucmpw256_mask: 3760 case X86::BI__builtin_ia32_ucmpd256_mask: 3761 case X86::BI__builtin_ia32_ucmpq256_mask: 3762 case X86::BI__builtin_ia32_ucmpb512_mask: 3763 case X86::BI__builtin_ia32_ucmpw512_mask: 3764 case X86::BI__builtin_ia32_ucmpd512_mask: 3765 case X86::BI__builtin_ia32_ucmpq512_mask: 3766 case X86::BI__builtin_ia32_vpcomub: 3767 case X86::BI__builtin_ia32_vpcomuw: 3768 case X86::BI__builtin_ia32_vpcomud: 3769 case X86::BI__builtin_ia32_vpcomuq: 3770 case X86::BI__builtin_ia32_vpcomb: 3771 case X86::BI__builtin_ia32_vpcomw: 3772 case X86::BI__builtin_ia32_vpcomd: 3773 case X86::BI__builtin_ia32_vpcomq: 3774 case X86::BI__builtin_ia32_vec_set_v8hi: 3775 case X86::BI__builtin_ia32_vec_set_v8si: 3776 i = 2; l = 0; u = 7; 3777 break; 3778 case X86::BI__builtin_ia32_vpermilpd256: 3779 case X86::BI__builtin_ia32_roundps: 3780 case X86::BI__builtin_ia32_roundpd: 3781 case X86::BI__builtin_ia32_roundps256: 3782 case X86::BI__builtin_ia32_roundpd256: 3783 case X86::BI__builtin_ia32_getmantpd128_mask: 3784 case X86::BI__builtin_ia32_getmantpd256_mask: 3785 case X86::BI__builtin_ia32_getmantps128_mask: 3786 case X86::BI__builtin_ia32_getmantps256_mask: 3787 case X86::BI__builtin_ia32_getmantpd512_mask: 3788 case X86::BI__builtin_ia32_getmantps512_mask: 3789 case X86::BI__builtin_ia32_vec_ext_v16qi: 3790 case X86::BI__builtin_ia32_vec_ext_v16hi: 3791 i = 1; l = 0; u = 15; 3792 break; 3793 case X86::BI__builtin_ia32_pblendd128: 3794 case X86::BI__builtin_ia32_blendps: 3795 case X86::BI__builtin_ia32_blendpd256: 3796 case X86::BI__builtin_ia32_shufpd256: 3797 case X86::BI__builtin_ia32_roundss: 3798 case X86::BI__builtin_ia32_roundsd: 3799 case X86::BI__builtin_ia32_rangepd128_mask: 3800 case X86::BI__builtin_ia32_rangepd256_mask: 3801 case X86::BI__builtin_ia32_rangepd512_mask: 3802 case X86::BI__builtin_ia32_rangeps128_mask: 3803 case X86::BI__builtin_ia32_rangeps256_mask: 3804 case X86::BI__builtin_ia32_rangeps512_mask: 3805 case X86::BI__builtin_ia32_getmantsd_round_mask: 3806 case X86::BI__builtin_ia32_getmantss_round_mask: 3807 case X86::BI__builtin_ia32_vec_set_v16qi: 3808 case X86::BI__builtin_ia32_vec_set_v16hi: 3809 i = 2; l = 0; u = 15; 3810 break; 3811 case X86::BI__builtin_ia32_vec_ext_v32qi: 3812 i = 1; l = 0; u = 31; 3813 break; 3814 case X86::BI__builtin_ia32_cmpps: 3815 case X86::BI__builtin_ia32_cmpss: 3816 case X86::BI__builtin_ia32_cmppd: 3817 case X86::BI__builtin_ia32_cmpsd: 3818 case X86::BI__builtin_ia32_cmpps256: 3819 case X86::BI__builtin_ia32_cmppd256: 3820 case X86::BI__builtin_ia32_cmpps128_mask: 3821 case X86::BI__builtin_ia32_cmppd128_mask: 3822 case X86::BI__builtin_ia32_cmpps256_mask: 3823 case X86::BI__builtin_ia32_cmppd256_mask: 3824 case X86::BI__builtin_ia32_cmpps512_mask: 3825 case X86::BI__builtin_ia32_cmppd512_mask: 3826 case X86::BI__builtin_ia32_cmpsd_mask: 3827 case X86::BI__builtin_ia32_cmpss_mask: 3828 case X86::BI__builtin_ia32_vec_set_v32qi: 3829 i = 2; l = 0; u = 31; 3830 break; 3831 case X86::BI__builtin_ia32_permdf256: 3832 case X86::BI__builtin_ia32_permdi256: 3833 case X86::BI__builtin_ia32_permdf512: 3834 case X86::BI__builtin_ia32_permdi512: 3835 case X86::BI__builtin_ia32_vpermilps: 3836 case X86::BI__builtin_ia32_vpermilps256: 3837 case X86::BI__builtin_ia32_vpermilpd512: 3838 case X86::BI__builtin_ia32_vpermilps512: 3839 case X86::BI__builtin_ia32_pshufd: 3840 case X86::BI__builtin_ia32_pshufd256: 3841 case X86::BI__builtin_ia32_pshufd512: 3842 case X86::BI__builtin_ia32_pshufhw: 3843 case X86::BI__builtin_ia32_pshufhw256: 3844 case X86::BI__builtin_ia32_pshufhw512: 3845 case X86::BI__builtin_ia32_pshuflw: 3846 case X86::BI__builtin_ia32_pshuflw256: 3847 case X86::BI__builtin_ia32_pshuflw512: 3848 case X86::BI__builtin_ia32_vcvtps2ph: 3849 case X86::BI__builtin_ia32_vcvtps2ph_mask: 3850 case X86::BI__builtin_ia32_vcvtps2ph256: 3851 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 3852 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 3853 case X86::BI__builtin_ia32_rndscaleps_128_mask: 3854 case X86::BI__builtin_ia32_rndscalepd_128_mask: 3855 case X86::BI__builtin_ia32_rndscaleps_256_mask: 3856 case X86::BI__builtin_ia32_rndscalepd_256_mask: 3857 case X86::BI__builtin_ia32_rndscaleps_mask: 3858 case X86::BI__builtin_ia32_rndscalepd_mask: 3859 case X86::BI__builtin_ia32_reducepd128_mask: 3860 case X86::BI__builtin_ia32_reducepd256_mask: 3861 case X86::BI__builtin_ia32_reducepd512_mask: 3862 case X86::BI__builtin_ia32_reduceps128_mask: 3863 case X86::BI__builtin_ia32_reduceps256_mask: 3864 case X86::BI__builtin_ia32_reduceps512_mask: 3865 case X86::BI__builtin_ia32_prold512: 3866 case X86::BI__builtin_ia32_prolq512: 3867 case X86::BI__builtin_ia32_prold128: 3868 case X86::BI__builtin_ia32_prold256: 3869 case X86::BI__builtin_ia32_prolq128: 3870 case X86::BI__builtin_ia32_prolq256: 3871 case X86::BI__builtin_ia32_prord512: 3872 case X86::BI__builtin_ia32_prorq512: 3873 case X86::BI__builtin_ia32_prord128: 3874 case X86::BI__builtin_ia32_prord256: 3875 case X86::BI__builtin_ia32_prorq128: 3876 case X86::BI__builtin_ia32_prorq256: 3877 case X86::BI__builtin_ia32_fpclasspd128_mask: 3878 case X86::BI__builtin_ia32_fpclasspd256_mask: 3879 case X86::BI__builtin_ia32_fpclassps128_mask: 3880 case X86::BI__builtin_ia32_fpclassps256_mask: 3881 case X86::BI__builtin_ia32_fpclassps512_mask: 3882 case X86::BI__builtin_ia32_fpclasspd512_mask: 3883 case X86::BI__builtin_ia32_fpclasssd_mask: 3884 case X86::BI__builtin_ia32_fpclassss_mask: 3885 case X86::BI__builtin_ia32_pslldqi128_byteshift: 3886 case X86::BI__builtin_ia32_pslldqi256_byteshift: 3887 case X86::BI__builtin_ia32_pslldqi512_byteshift: 3888 case X86::BI__builtin_ia32_psrldqi128_byteshift: 3889 case X86::BI__builtin_ia32_psrldqi256_byteshift: 3890 case X86::BI__builtin_ia32_psrldqi512_byteshift: 3891 case X86::BI__builtin_ia32_kshiftliqi: 3892 case X86::BI__builtin_ia32_kshiftlihi: 3893 case X86::BI__builtin_ia32_kshiftlisi: 3894 case X86::BI__builtin_ia32_kshiftlidi: 3895 case X86::BI__builtin_ia32_kshiftriqi: 3896 case X86::BI__builtin_ia32_kshiftrihi: 3897 case X86::BI__builtin_ia32_kshiftrisi: 3898 case X86::BI__builtin_ia32_kshiftridi: 3899 i = 1; l = 0; u = 255; 3900 break; 3901 case X86::BI__builtin_ia32_vperm2f128_pd256: 3902 case X86::BI__builtin_ia32_vperm2f128_ps256: 3903 case X86::BI__builtin_ia32_vperm2f128_si256: 3904 case X86::BI__builtin_ia32_permti256: 3905 case X86::BI__builtin_ia32_pblendw128: 3906 case X86::BI__builtin_ia32_pblendw256: 3907 case X86::BI__builtin_ia32_blendps256: 3908 case X86::BI__builtin_ia32_pblendd256: 3909 case X86::BI__builtin_ia32_palignr128: 3910 case X86::BI__builtin_ia32_palignr256: 3911 case X86::BI__builtin_ia32_palignr512: 3912 case X86::BI__builtin_ia32_alignq512: 3913 case X86::BI__builtin_ia32_alignd512: 3914 case X86::BI__builtin_ia32_alignd128: 3915 case X86::BI__builtin_ia32_alignd256: 3916 case X86::BI__builtin_ia32_alignq128: 3917 case X86::BI__builtin_ia32_alignq256: 3918 case X86::BI__builtin_ia32_vcomisd: 3919 case X86::BI__builtin_ia32_vcomiss: 3920 case X86::BI__builtin_ia32_shuf_f32x4: 3921 case X86::BI__builtin_ia32_shuf_f64x2: 3922 case X86::BI__builtin_ia32_shuf_i32x4: 3923 case X86::BI__builtin_ia32_shuf_i64x2: 3924 case X86::BI__builtin_ia32_shufpd512: 3925 case X86::BI__builtin_ia32_shufps: 3926 case X86::BI__builtin_ia32_shufps256: 3927 case X86::BI__builtin_ia32_shufps512: 3928 case X86::BI__builtin_ia32_dbpsadbw128: 3929 case X86::BI__builtin_ia32_dbpsadbw256: 3930 case X86::BI__builtin_ia32_dbpsadbw512: 3931 case X86::BI__builtin_ia32_vpshldd128: 3932 case X86::BI__builtin_ia32_vpshldd256: 3933 case X86::BI__builtin_ia32_vpshldd512: 3934 case X86::BI__builtin_ia32_vpshldq128: 3935 case X86::BI__builtin_ia32_vpshldq256: 3936 case X86::BI__builtin_ia32_vpshldq512: 3937 case X86::BI__builtin_ia32_vpshldw128: 3938 case X86::BI__builtin_ia32_vpshldw256: 3939 case X86::BI__builtin_ia32_vpshldw512: 3940 case X86::BI__builtin_ia32_vpshrdd128: 3941 case X86::BI__builtin_ia32_vpshrdd256: 3942 case X86::BI__builtin_ia32_vpshrdd512: 3943 case X86::BI__builtin_ia32_vpshrdq128: 3944 case X86::BI__builtin_ia32_vpshrdq256: 3945 case X86::BI__builtin_ia32_vpshrdq512: 3946 case X86::BI__builtin_ia32_vpshrdw128: 3947 case X86::BI__builtin_ia32_vpshrdw256: 3948 case X86::BI__builtin_ia32_vpshrdw512: 3949 i = 2; l = 0; u = 255; 3950 break; 3951 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3952 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3953 case X86::BI__builtin_ia32_fixupimmps512_mask: 3954 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3955 case X86::BI__builtin_ia32_fixupimmsd_mask: 3956 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3957 case X86::BI__builtin_ia32_fixupimmss_mask: 3958 case X86::BI__builtin_ia32_fixupimmss_maskz: 3959 case X86::BI__builtin_ia32_fixupimmpd128_mask: 3960 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 3961 case X86::BI__builtin_ia32_fixupimmpd256_mask: 3962 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 3963 case X86::BI__builtin_ia32_fixupimmps128_mask: 3964 case X86::BI__builtin_ia32_fixupimmps128_maskz: 3965 case X86::BI__builtin_ia32_fixupimmps256_mask: 3966 case X86::BI__builtin_ia32_fixupimmps256_maskz: 3967 case X86::BI__builtin_ia32_pternlogd512_mask: 3968 case X86::BI__builtin_ia32_pternlogd512_maskz: 3969 case X86::BI__builtin_ia32_pternlogq512_mask: 3970 case X86::BI__builtin_ia32_pternlogq512_maskz: 3971 case X86::BI__builtin_ia32_pternlogd128_mask: 3972 case X86::BI__builtin_ia32_pternlogd128_maskz: 3973 case X86::BI__builtin_ia32_pternlogd256_mask: 3974 case X86::BI__builtin_ia32_pternlogd256_maskz: 3975 case X86::BI__builtin_ia32_pternlogq128_mask: 3976 case X86::BI__builtin_ia32_pternlogq128_maskz: 3977 case X86::BI__builtin_ia32_pternlogq256_mask: 3978 case X86::BI__builtin_ia32_pternlogq256_maskz: 3979 i = 3; l = 0; u = 255; 3980 break; 3981 case X86::BI__builtin_ia32_gatherpfdpd: 3982 case X86::BI__builtin_ia32_gatherpfdps: 3983 case X86::BI__builtin_ia32_gatherpfqpd: 3984 case X86::BI__builtin_ia32_gatherpfqps: 3985 case X86::BI__builtin_ia32_scatterpfdpd: 3986 case X86::BI__builtin_ia32_scatterpfdps: 3987 case X86::BI__builtin_ia32_scatterpfqpd: 3988 case X86::BI__builtin_ia32_scatterpfqps: 3989 i = 4; l = 2; u = 3; 3990 break; 3991 case X86::BI__builtin_ia32_rndscalesd_round_mask: 3992 case X86::BI__builtin_ia32_rndscaless_round_mask: 3993 i = 4; l = 0; u = 255; 3994 break; 3995 } 3996 3997 // Note that we don't force a hard error on the range check here, allowing 3998 // template-generated or macro-generated dead code to potentially have out-of- 3999 // range values. These need to code generate, but don't need to necessarily 4000 // make any sense. We use a warning that defaults to an error. 4001 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 4002 } 4003 4004 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 4005 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 4006 /// Returns true when the format fits the function and the FormatStringInfo has 4007 /// been populated. 4008 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 4009 FormatStringInfo *FSI) { 4010 FSI->HasVAListArg = Format->getFirstArg() == 0; 4011 FSI->FormatIdx = Format->getFormatIdx() - 1; 4012 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 4013 4014 // The way the format attribute works in GCC, the implicit this argument 4015 // of member functions is counted. However, it doesn't appear in our own 4016 // lists, so decrement format_idx in that case. 4017 if (IsCXXMember) { 4018 if(FSI->FormatIdx == 0) 4019 return false; 4020 --FSI->FormatIdx; 4021 if (FSI->FirstDataArg != 0) 4022 --FSI->FirstDataArg; 4023 } 4024 return true; 4025 } 4026 4027 /// Checks if a the given expression evaluates to null. 4028 /// 4029 /// Returns true if the value evaluates to null. 4030 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 4031 // If the expression has non-null type, it doesn't evaluate to null. 4032 if (auto nullability 4033 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 4034 if (*nullability == NullabilityKind::NonNull) 4035 return false; 4036 } 4037 4038 // As a special case, transparent unions initialized with zero are 4039 // considered null for the purposes of the nonnull attribute. 4040 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 4041 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 4042 if (const CompoundLiteralExpr *CLE = 4043 dyn_cast<CompoundLiteralExpr>(Expr)) 4044 if (const InitListExpr *ILE = 4045 dyn_cast<InitListExpr>(CLE->getInitializer())) 4046 Expr = ILE->getInit(0); 4047 } 4048 4049 bool Result; 4050 return (!Expr->isValueDependent() && 4051 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 4052 !Result); 4053 } 4054 4055 static void CheckNonNullArgument(Sema &S, 4056 const Expr *ArgExpr, 4057 SourceLocation CallSiteLoc) { 4058 if (CheckNonNullExpr(S, ArgExpr)) 4059 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 4060 S.PDiag(diag::warn_null_arg) << ArgExpr->getSourceRange()); 4061 } 4062 4063 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 4064 FormatStringInfo FSI; 4065 if ((GetFormatStringType(Format) == FST_NSString) && 4066 getFormatStringInfo(Format, false, &FSI)) { 4067 Idx = FSI.FormatIdx; 4068 return true; 4069 } 4070 return false; 4071 } 4072 4073 /// Diagnose use of %s directive in an NSString which is being passed 4074 /// as formatting string to formatting method. 4075 static void 4076 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 4077 const NamedDecl *FDecl, 4078 Expr **Args, 4079 unsigned NumArgs) { 4080 unsigned Idx = 0; 4081 bool Format = false; 4082 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 4083 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 4084 Idx = 2; 4085 Format = true; 4086 } 4087 else 4088 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4089 if (S.GetFormatNSStringIdx(I, Idx)) { 4090 Format = true; 4091 break; 4092 } 4093 } 4094 if (!Format || NumArgs <= Idx) 4095 return; 4096 const Expr *FormatExpr = Args[Idx]; 4097 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 4098 FormatExpr = CSCE->getSubExpr(); 4099 const StringLiteral *FormatString; 4100 if (const ObjCStringLiteral *OSL = 4101 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 4102 FormatString = OSL->getString(); 4103 else 4104 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 4105 if (!FormatString) 4106 return; 4107 if (S.FormatStringHasSArg(FormatString)) { 4108 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 4109 << "%s" << 1 << 1; 4110 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 4111 << FDecl->getDeclName(); 4112 } 4113 } 4114 4115 /// Determine whether the given type has a non-null nullability annotation. 4116 static bool isNonNullType(ASTContext &ctx, QualType type) { 4117 if (auto nullability = type->getNullability(ctx)) 4118 return *nullability == NullabilityKind::NonNull; 4119 4120 return false; 4121 } 4122 4123 static void CheckNonNullArguments(Sema &S, 4124 const NamedDecl *FDecl, 4125 const FunctionProtoType *Proto, 4126 ArrayRef<const Expr *> Args, 4127 SourceLocation CallSiteLoc) { 4128 assert((FDecl || Proto) && "Need a function declaration or prototype"); 4129 4130 // Check the attributes attached to the method/function itself. 4131 llvm::SmallBitVector NonNullArgs; 4132 if (FDecl) { 4133 // Handle the nonnull attribute on the function/method declaration itself. 4134 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 4135 if (!NonNull->args_size()) { 4136 // Easy case: all pointer arguments are nonnull. 4137 for (const auto *Arg : Args) 4138 if (S.isValidPointerAttrType(Arg->getType())) 4139 CheckNonNullArgument(S, Arg, CallSiteLoc); 4140 return; 4141 } 4142 4143 for (const ParamIdx &Idx : NonNull->args()) { 4144 unsigned IdxAST = Idx.getASTIndex(); 4145 if (IdxAST >= Args.size()) 4146 continue; 4147 if (NonNullArgs.empty()) 4148 NonNullArgs.resize(Args.size()); 4149 NonNullArgs.set(IdxAST); 4150 } 4151 } 4152 } 4153 4154 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 4155 // Handle the nonnull attribute on the parameters of the 4156 // function/method. 4157 ArrayRef<ParmVarDecl*> parms; 4158 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 4159 parms = FD->parameters(); 4160 else 4161 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 4162 4163 unsigned ParamIndex = 0; 4164 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 4165 I != E; ++I, ++ParamIndex) { 4166 const ParmVarDecl *PVD = *I; 4167 if (PVD->hasAttr<NonNullAttr>() || 4168 isNonNullType(S.Context, PVD->getType())) { 4169 if (NonNullArgs.empty()) 4170 NonNullArgs.resize(Args.size()); 4171 4172 NonNullArgs.set(ParamIndex); 4173 } 4174 } 4175 } else { 4176 // If we have a non-function, non-method declaration but no 4177 // function prototype, try to dig out the function prototype. 4178 if (!Proto) { 4179 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 4180 QualType type = VD->getType().getNonReferenceType(); 4181 if (auto pointerType = type->getAs<PointerType>()) 4182 type = pointerType->getPointeeType(); 4183 else if (auto blockType = type->getAs<BlockPointerType>()) 4184 type = blockType->getPointeeType(); 4185 // FIXME: data member pointers? 4186 4187 // Dig out the function prototype, if there is one. 4188 Proto = type->getAs<FunctionProtoType>(); 4189 } 4190 } 4191 4192 // Fill in non-null argument information from the nullability 4193 // information on the parameter types (if we have them). 4194 if (Proto) { 4195 unsigned Index = 0; 4196 for (auto paramType : Proto->getParamTypes()) { 4197 if (isNonNullType(S.Context, paramType)) { 4198 if (NonNullArgs.empty()) 4199 NonNullArgs.resize(Args.size()); 4200 4201 NonNullArgs.set(Index); 4202 } 4203 4204 ++Index; 4205 } 4206 } 4207 } 4208 4209 // Check for non-null arguments. 4210 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 4211 ArgIndex != ArgIndexEnd; ++ArgIndex) { 4212 if (NonNullArgs[ArgIndex]) 4213 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 4214 } 4215 } 4216 4217 /// Handles the checks for format strings, non-POD arguments to vararg 4218 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 4219 /// attributes. 4220 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 4221 const Expr *ThisArg, ArrayRef<const Expr *> Args, 4222 bool IsMemberFunction, SourceLocation Loc, 4223 SourceRange Range, VariadicCallType CallType) { 4224 // FIXME: We should check as much as we can in the template definition. 4225 if (CurContext->isDependentContext()) 4226 return; 4227 4228 // Printf and scanf checking. 4229 llvm::SmallBitVector CheckedVarArgs; 4230 if (FDecl) { 4231 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4232 // Only create vector if there are format attributes. 4233 CheckedVarArgs.resize(Args.size()); 4234 4235 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 4236 CheckedVarArgs); 4237 } 4238 } 4239 4240 // Refuse POD arguments that weren't caught by the format string 4241 // checks above. 4242 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 4243 if (CallType != VariadicDoesNotApply && 4244 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 4245 unsigned NumParams = Proto ? Proto->getNumParams() 4246 : FDecl && isa<FunctionDecl>(FDecl) 4247 ? cast<FunctionDecl>(FDecl)->getNumParams() 4248 : FDecl && isa<ObjCMethodDecl>(FDecl) 4249 ? cast<ObjCMethodDecl>(FDecl)->param_size() 4250 : 0; 4251 4252 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 4253 // Args[ArgIdx] can be null in malformed code. 4254 if (const Expr *Arg = Args[ArgIdx]) { 4255 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 4256 checkVariadicArgument(Arg, CallType); 4257 } 4258 } 4259 } 4260 4261 if (FDecl || Proto) { 4262 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 4263 4264 // Type safety checking. 4265 if (FDecl) { 4266 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 4267 CheckArgumentWithTypeTag(I, Args, Loc); 4268 } 4269 } 4270 4271 if (FD) 4272 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 4273 } 4274 4275 /// CheckConstructorCall - Check a constructor call for correctness and safety 4276 /// properties not enforced by the C type system. 4277 void Sema::CheckConstructorCall(FunctionDecl *FDecl, 4278 ArrayRef<const Expr *> Args, 4279 const FunctionProtoType *Proto, 4280 SourceLocation Loc) { 4281 VariadicCallType CallType = 4282 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 4283 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 4284 Loc, SourceRange(), CallType); 4285 } 4286 4287 /// CheckFunctionCall - Check a direct function call for various correctness 4288 /// and safety properties not strictly enforced by the C type system. 4289 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 4290 const FunctionProtoType *Proto) { 4291 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 4292 isa<CXXMethodDecl>(FDecl); 4293 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 4294 IsMemberOperatorCall; 4295 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 4296 TheCall->getCallee()); 4297 Expr** Args = TheCall->getArgs(); 4298 unsigned NumArgs = TheCall->getNumArgs(); 4299 4300 Expr *ImplicitThis = nullptr; 4301 if (IsMemberOperatorCall) { 4302 // If this is a call to a member operator, hide the first argument 4303 // from checkCall. 4304 // FIXME: Our choice of AST representation here is less than ideal. 4305 ImplicitThis = Args[0]; 4306 ++Args; 4307 --NumArgs; 4308 } else if (IsMemberFunction) 4309 ImplicitThis = 4310 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 4311 4312 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 4313 IsMemberFunction, TheCall->getRParenLoc(), 4314 TheCall->getCallee()->getSourceRange(), CallType); 4315 4316 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 4317 // None of the checks below are needed for functions that don't have 4318 // simple names (e.g., C++ conversion functions). 4319 if (!FnInfo) 4320 return false; 4321 4322 CheckAbsoluteValueFunction(TheCall, FDecl); 4323 CheckMaxUnsignedZero(TheCall, FDecl); 4324 4325 if (getLangOpts().ObjC) 4326 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 4327 4328 unsigned CMId = FDecl->getMemoryFunctionKind(); 4329 if (CMId == 0) 4330 return false; 4331 4332 // Handle memory setting and copying functions. 4333 if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat) 4334 CheckStrlcpycatArguments(TheCall, FnInfo); 4335 else if (CMId == Builtin::BIstrncat) 4336 CheckStrncatArguments(TheCall, FnInfo); 4337 else 4338 CheckMemaccessArguments(TheCall, CMId, FnInfo); 4339 4340 return false; 4341 } 4342 4343 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 4344 ArrayRef<const Expr *> Args) { 4345 VariadicCallType CallType = 4346 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 4347 4348 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 4349 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 4350 CallType); 4351 4352 return false; 4353 } 4354 4355 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 4356 const FunctionProtoType *Proto) { 4357 QualType Ty; 4358 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 4359 Ty = V->getType().getNonReferenceType(); 4360 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 4361 Ty = F->getType().getNonReferenceType(); 4362 else 4363 return false; 4364 4365 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 4366 !Ty->isFunctionProtoType()) 4367 return false; 4368 4369 VariadicCallType CallType; 4370 if (!Proto || !Proto->isVariadic()) { 4371 CallType = VariadicDoesNotApply; 4372 } else if (Ty->isBlockPointerType()) { 4373 CallType = VariadicBlock; 4374 } else { // Ty->isFunctionPointerType() 4375 CallType = VariadicFunction; 4376 } 4377 4378 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 4379 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4380 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4381 TheCall->getCallee()->getSourceRange(), CallType); 4382 4383 return false; 4384 } 4385 4386 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 4387 /// such as function pointers returned from functions. 4388 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 4389 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 4390 TheCall->getCallee()); 4391 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 4392 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4393 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4394 TheCall->getCallee()->getSourceRange(), CallType); 4395 4396 return false; 4397 } 4398 4399 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 4400 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 4401 return false; 4402 4403 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 4404 switch (Op) { 4405 case AtomicExpr::AO__c11_atomic_init: 4406 case AtomicExpr::AO__opencl_atomic_init: 4407 llvm_unreachable("There is no ordering argument for an init"); 4408 4409 case AtomicExpr::AO__c11_atomic_load: 4410 case AtomicExpr::AO__opencl_atomic_load: 4411 case AtomicExpr::AO__atomic_load_n: 4412 case AtomicExpr::AO__atomic_load: 4413 return OrderingCABI != llvm::AtomicOrderingCABI::release && 4414 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4415 4416 case AtomicExpr::AO__c11_atomic_store: 4417 case AtomicExpr::AO__opencl_atomic_store: 4418 case AtomicExpr::AO__atomic_store: 4419 case AtomicExpr::AO__atomic_store_n: 4420 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 4421 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 4422 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4423 4424 default: 4425 return true; 4426 } 4427 } 4428 4429 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 4430 AtomicExpr::AtomicOp Op) { 4431 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 4432 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 4433 4434 // All the non-OpenCL operations take one of the following forms. 4435 // The OpenCL operations take the __c11 forms with one extra argument for 4436 // synchronization scope. 4437 enum { 4438 // C __c11_atomic_init(A *, C) 4439 Init, 4440 4441 // C __c11_atomic_load(A *, int) 4442 Load, 4443 4444 // void __atomic_load(A *, CP, int) 4445 LoadCopy, 4446 4447 // void __atomic_store(A *, CP, int) 4448 Copy, 4449 4450 // C __c11_atomic_add(A *, M, int) 4451 Arithmetic, 4452 4453 // C __atomic_exchange_n(A *, CP, int) 4454 Xchg, 4455 4456 // void __atomic_exchange(A *, C *, CP, int) 4457 GNUXchg, 4458 4459 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 4460 C11CmpXchg, 4461 4462 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 4463 GNUCmpXchg 4464 } Form = Init; 4465 4466 const unsigned NumForm = GNUCmpXchg + 1; 4467 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 4468 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 4469 // where: 4470 // C is an appropriate type, 4471 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 4472 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 4473 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 4474 // the int parameters are for orderings. 4475 4476 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 4477 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 4478 "need to update code for modified forms"); 4479 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 4480 AtomicExpr::AO__c11_atomic_fetch_xor + 1 == 4481 AtomicExpr::AO__atomic_load, 4482 "need to update code for modified C11 atomics"); 4483 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 4484 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 4485 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 4486 Op <= AtomicExpr::AO__c11_atomic_fetch_xor) || 4487 IsOpenCL; 4488 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 4489 Op == AtomicExpr::AO__atomic_store_n || 4490 Op == AtomicExpr::AO__atomic_exchange_n || 4491 Op == AtomicExpr::AO__atomic_compare_exchange_n; 4492 bool IsAddSub = false; 4493 bool IsMinMax = false; 4494 4495 switch (Op) { 4496 case AtomicExpr::AO__c11_atomic_init: 4497 case AtomicExpr::AO__opencl_atomic_init: 4498 Form = Init; 4499 break; 4500 4501 case AtomicExpr::AO__c11_atomic_load: 4502 case AtomicExpr::AO__opencl_atomic_load: 4503 case AtomicExpr::AO__atomic_load_n: 4504 Form = Load; 4505 break; 4506 4507 case AtomicExpr::AO__atomic_load: 4508 Form = LoadCopy; 4509 break; 4510 4511 case AtomicExpr::AO__c11_atomic_store: 4512 case AtomicExpr::AO__opencl_atomic_store: 4513 case AtomicExpr::AO__atomic_store: 4514 case AtomicExpr::AO__atomic_store_n: 4515 Form = Copy; 4516 break; 4517 4518 case AtomicExpr::AO__c11_atomic_fetch_add: 4519 case AtomicExpr::AO__c11_atomic_fetch_sub: 4520 case AtomicExpr::AO__opencl_atomic_fetch_add: 4521 case AtomicExpr::AO__opencl_atomic_fetch_sub: 4522 case AtomicExpr::AO__opencl_atomic_fetch_min: 4523 case AtomicExpr::AO__opencl_atomic_fetch_max: 4524 case AtomicExpr::AO__atomic_fetch_add: 4525 case AtomicExpr::AO__atomic_fetch_sub: 4526 case AtomicExpr::AO__atomic_add_fetch: 4527 case AtomicExpr::AO__atomic_sub_fetch: 4528 IsAddSub = true; 4529 LLVM_FALLTHROUGH; 4530 case AtomicExpr::AO__c11_atomic_fetch_and: 4531 case AtomicExpr::AO__c11_atomic_fetch_or: 4532 case AtomicExpr::AO__c11_atomic_fetch_xor: 4533 case AtomicExpr::AO__opencl_atomic_fetch_and: 4534 case AtomicExpr::AO__opencl_atomic_fetch_or: 4535 case AtomicExpr::AO__opencl_atomic_fetch_xor: 4536 case AtomicExpr::AO__atomic_fetch_and: 4537 case AtomicExpr::AO__atomic_fetch_or: 4538 case AtomicExpr::AO__atomic_fetch_xor: 4539 case AtomicExpr::AO__atomic_fetch_nand: 4540 case AtomicExpr::AO__atomic_and_fetch: 4541 case AtomicExpr::AO__atomic_or_fetch: 4542 case AtomicExpr::AO__atomic_xor_fetch: 4543 case AtomicExpr::AO__atomic_nand_fetch: 4544 Form = Arithmetic; 4545 break; 4546 4547 case AtomicExpr::AO__atomic_fetch_min: 4548 case AtomicExpr::AO__atomic_fetch_max: 4549 IsMinMax = true; 4550 Form = Arithmetic; 4551 break; 4552 4553 case AtomicExpr::AO__c11_atomic_exchange: 4554 case AtomicExpr::AO__opencl_atomic_exchange: 4555 case AtomicExpr::AO__atomic_exchange_n: 4556 Form = Xchg; 4557 break; 4558 4559 case AtomicExpr::AO__atomic_exchange: 4560 Form = GNUXchg; 4561 break; 4562 4563 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 4564 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 4565 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 4566 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 4567 Form = C11CmpXchg; 4568 break; 4569 4570 case AtomicExpr::AO__atomic_compare_exchange: 4571 case AtomicExpr::AO__atomic_compare_exchange_n: 4572 Form = GNUCmpXchg; 4573 break; 4574 } 4575 4576 unsigned AdjustedNumArgs = NumArgs[Form]; 4577 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init) 4578 ++AdjustedNumArgs; 4579 // Check we have the right number of arguments. 4580 if (TheCall->getNumArgs() < AdjustedNumArgs) { 4581 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 4582 << 0 << AdjustedNumArgs << TheCall->getNumArgs() 4583 << TheCall->getCallee()->getSourceRange(); 4584 return ExprError(); 4585 } else if (TheCall->getNumArgs() > AdjustedNumArgs) { 4586 Diag(TheCall->getArg(AdjustedNumArgs)->getBeginLoc(), 4587 diag::err_typecheck_call_too_many_args) 4588 << 0 << AdjustedNumArgs << TheCall->getNumArgs() 4589 << TheCall->getCallee()->getSourceRange(); 4590 return ExprError(); 4591 } 4592 4593 // Inspect the first argument of the atomic operation. 4594 Expr *Ptr = TheCall->getArg(0); 4595 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 4596 if (ConvertedPtr.isInvalid()) 4597 return ExprError(); 4598 4599 Ptr = ConvertedPtr.get(); 4600 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 4601 if (!pointerType) { 4602 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 4603 << Ptr->getType() << Ptr->getSourceRange(); 4604 return ExprError(); 4605 } 4606 4607 // For a __c11 builtin, this should be a pointer to an _Atomic type. 4608 QualType AtomTy = pointerType->getPointeeType(); // 'A' 4609 QualType ValType = AtomTy; // 'C' 4610 if (IsC11) { 4611 if (!AtomTy->isAtomicType()) { 4612 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic) 4613 << Ptr->getType() << Ptr->getSourceRange(); 4614 return ExprError(); 4615 } 4616 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 4617 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 4618 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_non_const_atomic) 4619 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 4620 << Ptr->getSourceRange(); 4621 return ExprError(); 4622 } 4623 ValType = AtomTy->getAs<AtomicType>()->getValueType(); 4624 } else if (Form != Load && Form != LoadCopy) { 4625 if (ValType.isConstQualified()) { 4626 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_non_const_pointer) 4627 << Ptr->getType() << Ptr->getSourceRange(); 4628 return ExprError(); 4629 } 4630 } 4631 4632 // For an arithmetic operation, the implied arithmetic must be well-formed. 4633 if (Form == Arithmetic) { 4634 // gcc does not enforce these rules for GNU atomics, but we do so for sanity. 4635 if (IsAddSub && !ValType->isIntegerType() 4636 && !ValType->isPointerType()) { 4637 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4638 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4639 return ExprError(); 4640 } 4641 if (IsMinMax) { 4642 const BuiltinType *BT = ValType->getAs<BuiltinType>(); 4643 if (!BT || (BT->getKind() != BuiltinType::Int && 4644 BT->getKind() != BuiltinType::UInt)) { 4645 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_int32_or_ptr); 4646 return ExprError(); 4647 } 4648 } 4649 if (!IsAddSub && !IsMinMax && !ValType->isIntegerType()) { 4650 Diag(DRE->getBeginLoc(), diag::err_atomic_op_bitwise_needs_atomic_int) 4651 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4652 return ExprError(); 4653 } 4654 if (IsC11 && ValType->isPointerType() && 4655 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 4656 diag::err_incomplete_type)) { 4657 return ExprError(); 4658 } 4659 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 4660 // For __atomic_*_n operations, the value type must be a scalar integral or 4661 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 4662 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4663 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4664 return ExprError(); 4665 } 4666 4667 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 4668 !AtomTy->isScalarType()) { 4669 // For GNU atomics, require a trivially-copyable type. This is not part of 4670 // the GNU atomics specification, but we enforce it for sanity. 4671 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_trivial_copy) 4672 << Ptr->getType() << Ptr->getSourceRange(); 4673 return ExprError(); 4674 } 4675 4676 switch (ValType.getObjCLifetime()) { 4677 case Qualifiers::OCL_None: 4678 case Qualifiers::OCL_ExplicitNone: 4679 // okay 4680 break; 4681 4682 case Qualifiers::OCL_Weak: 4683 case Qualifiers::OCL_Strong: 4684 case Qualifiers::OCL_Autoreleasing: 4685 // FIXME: Can this happen? By this point, ValType should be known 4686 // to be trivially copyable. 4687 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 4688 << ValType << Ptr->getSourceRange(); 4689 return ExprError(); 4690 } 4691 4692 // All atomic operations have an overload which takes a pointer to a volatile 4693 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 4694 // into the result or the other operands. Similarly atomic_load takes a 4695 // pointer to a const 'A'. 4696 ValType.removeLocalVolatile(); 4697 ValType.removeLocalConst(); 4698 QualType ResultType = ValType; 4699 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 4700 Form == Init) 4701 ResultType = Context.VoidTy; 4702 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 4703 ResultType = Context.BoolTy; 4704 4705 // The type of a parameter passed 'by value'. In the GNU atomics, such 4706 // arguments are actually passed as pointers. 4707 QualType ByValType = ValType; // 'CP' 4708 bool IsPassedByAddress = false; 4709 if (!IsC11 && !IsN) { 4710 ByValType = Ptr->getType(); 4711 IsPassedByAddress = true; 4712 } 4713 4714 // The first argument's non-CV pointer type is used to deduce the type of 4715 // subsequent arguments, except for: 4716 // - weak flag (always converted to bool) 4717 // - memory order (always converted to int) 4718 // - scope (always converted to int) 4719 for (unsigned i = 0; i != TheCall->getNumArgs(); ++i) { 4720 QualType Ty; 4721 if (i < NumVals[Form] + 1) { 4722 switch (i) { 4723 case 0: 4724 // The first argument is always a pointer. It has a fixed type. 4725 // It is always dereferenced, a nullptr is undefined. 4726 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc()); 4727 // Nothing else to do: we already know all we want about this pointer. 4728 continue; 4729 case 1: 4730 // The second argument is the non-atomic operand. For arithmetic, this 4731 // is always passed by value, and for a compare_exchange it is always 4732 // passed by address. For the rest, GNU uses by-address and C11 uses 4733 // by-value. 4734 assert(Form != Load); 4735 if (Form == Init || (Form == Arithmetic && ValType->isIntegerType())) 4736 Ty = ValType; 4737 else if (Form == Copy || Form == Xchg) { 4738 if (IsPassedByAddress) 4739 // The value pointer is always dereferenced, a nullptr is undefined. 4740 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc()); 4741 Ty = ByValType; 4742 } else if (Form == Arithmetic) 4743 Ty = Context.getPointerDiffType(); 4744 else { 4745 Expr *ValArg = TheCall->getArg(i); 4746 // The value pointer is always dereferenced, a nullptr is undefined. 4747 CheckNonNullArgument(*this, ValArg, DRE->getBeginLoc()); 4748 LangAS AS = LangAS::Default; 4749 // Keep address space of non-atomic pointer type. 4750 if (const PointerType *PtrTy = 4751 ValArg->getType()->getAs<PointerType>()) { 4752 AS = PtrTy->getPointeeType().getAddressSpace(); 4753 } 4754 Ty = Context.getPointerType( 4755 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 4756 } 4757 break; 4758 case 2: 4759 // The third argument to compare_exchange / GNU exchange is the desired 4760 // value, either by-value (for the C11 and *_n variant) or as a pointer. 4761 if (IsPassedByAddress) 4762 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc()); 4763 Ty = ByValType; 4764 break; 4765 case 3: 4766 // The fourth argument to GNU compare_exchange is a 'weak' flag. 4767 Ty = Context.BoolTy; 4768 break; 4769 } 4770 } else { 4771 // The order(s) and scope are always converted to int. 4772 Ty = Context.IntTy; 4773 } 4774 4775 InitializedEntity Entity = 4776 InitializedEntity::InitializeParameter(Context, Ty, false); 4777 ExprResult Arg = TheCall->getArg(i); 4778 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 4779 if (Arg.isInvalid()) 4780 return true; 4781 TheCall->setArg(i, Arg.get()); 4782 } 4783 4784 // Permute the arguments into a 'consistent' order. 4785 SmallVector<Expr*, 5> SubExprs; 4786 SubExprs.push_back(Ptr); 4787 switch (Form) { 4788 case Init: 4789 // Note, AtomicExpr::getVal1() has a special case for this atomic. 4790 SubExprs.push_back(TheCall->getArg(1)); // Val1 4791 break; 4792 case Load: 4793 SubExprs.push_back(TheCall->getArg(1)); // Order 4794 break; 4795 case LoadCopy: 4796 case Copy: 4797 case Arithmetic: 4798 case Xchg: 4799 SubExprs.push_back(TheCall->getArg(2)); // Order 4800 SubExprs.push_back(TheCall->getArg(1)); // Val1 4801 break; 4802 case GNUXchg: 4803 // Note, AtomicExpr::getVal2() has a special case for this atomic. 4804 SubExprs.push_back(TheCall->getArg(3)); // Order 4805 SubExprs.push_back(TheCall->getArg(1)); // Val1 4806 SubExprs.push_back(TheCall->getArg(2)); // Val2 4807 break; 4808 case C11CmpXchg: 4809 SubExprs.push_back(TheCall->getArg(3)); // Order 4810 SubExprs.push_back(TheCall->getArg(1)); // Val1 4811 SubExprs.push_back(TheCall->getArg(4)); // OrderFail 4812 SubExprs.push_back(TheCall->getArg(2)); // Val2 4813 break; 4814 case GNUCmpXchg: 4815 SubExprs.push_back(TheCall->getArg(4)); // Order 4816 SubExprs.push_back(TheCall->getArg(1)); // Val1 4817 SubExprs.push_back(TheCall->getArg(5)); // OrderFail 4818 SubExprs.push_back(TheCall->getArg(2)); // Val2 4819 SubExprs.push_back(TheCall->getArg(3)); // Weak 4820 break; 4821 } 4822 4823 if (SubExprs.size() >= 2 && Form != Init) { 4824 llvm::APSInt Result(32); 4825 if (SubExprs[1]->isIntegerConstantExpr(Result, Context) && 4826 !isValidOrderingForOp(Result.getSExtValue(), Op)) 4827 Diag(SubExprs[1]->getBeginLoc(), 4828 diag::warn_atomic_op_has_invalid_memory_order) 4829 << SubExprs[1]->getSourceRange(); 4830 } 4831 4832 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 4833 auto *Scope = TheCall->getArg(TheCall->getNumArgs() - 1); 4834 llvm::APSInt Result(32); 4835 if (Scope->isIntegerConstantExpr(Result, Context) && 4836 !ScopeModel->isValid(Result.getZExtValue())) { 4837 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 4838 << Scope->getSourceRange(); 4839 } 4840 SubExprs.push_back(Scope); 4841 } 4842 4843 AtomicExpr *AE = 4844 new (Context) AtomicExpr(TheCall->getCallee()->getBeginLoc(), SubExprs, 4845 ResultType, Op, TheCall->getRParenLoc()); 4846 4847 if ((Op == AtomicExpr::AO__c11_atomic_load || 4848 Op == AtomicExpr::AO__c11_atomic_store || 4849 Op == AtomicExpr::AO__opencl_atomic_load || 4850 Op == AtomicExpr::AO__opencl_atomic_store ) && 4851 Context.AtomicUsesUnsupportedLibcall(AE)) 4852 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 4853 << ((Op == AtomicExpr::AO__c11_atomic_load || 4854 Op == AtomicExpr::AO__opencl_atomic_load) 4855 ? 0 4856 : 1); 4857 4858 return AE; 4859 } 4860 4861 /// checkBuiltinArgument - Given a call to a builtin function, perform 4862 /// normal type-checking on the given argument, updating the call in 4863 /// place. This is useful when a builtin function requires custom 4864 /// type-checking for some of its arguments but not necessarily all of 4865 /// them. 4866 /// 4867 /// Returns true on error. 4868 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 4869 FunctionDecl *Fn = E->getDirectCallee(); 4870 assert(Fn && "builtin call without direct callee!"); 4871 4872 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 4873 InitializedEntity Entity = 4874 InitializedEntity::InitializeParameter(S.Context, Param); 4875 4876 ExprResult Arg = E->getArg(0); 4877 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 4878 if (Arg.isInvalid()) 4879 return true; 4880 4881 E->setArg(ArgIndex, Arg.get()); 4882 return false; 4883 } 4884 4885 /// We have a call to a function like __sync_fetch_and_add, which is an 4886 /// overloaded function based on the pointer type of its first argument. 4887 /// The main BuildCallExpr routines have already promoted the types of 4888 /// arguments because all of these calls are prototyped as void(...). 4889 /// 4890 /// This function goes through and does final semantic checking for these 4891 /// builtins, as well as generating any warnings. 4892 ExprResult 4893 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 4894 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 4895 Expr *Callee = TheCall->getCallee(); 4896 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 4897 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 4898 4899 // Ensure that we have at least one argument to do type inference from. 4900 if (TheCall->getNumArgs() < 1) { 4901 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 4902 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 4903 return ExprError(); 4904 } 4905 4906 // Inspect the first argument of the atomic builtin. This should always be 4907 // a pointer type, whose element is an integral scalar or pointer type. 4908 // Because it is a pointer type, we don't have to worry about any implicit 4909 // casts here. 4910 // FIXME: We don't allow floating point scalars as input. 4911 Expr *FirstArg = TheCall->getArg(0); 4912 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 4913 if (FirstArgResult.isInvalid()) 4914 return ExprError(); 4915 FirstArg = FirstArgResult.get(); 4916 TheCall->setArg(0, FirstArg); 4917 4918 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 4919 if (!pointerType) { 4920 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 4921 << FirstArg->getType() << FirstArg->getSourceRange(); 4922 return ExprError(); 4923 } 4924 4925 QualType ValType = pointerType->getPointeeType(); 4926 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 4927 !ValType->isBlockPointerType()) { 4928 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 4929 << FirstArg->getType() << FirstArg->getSourceRange(); 4930 return ExprError(); 4931 } 4932 4933 if (ValType.isConstQualified()) { 4934 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 4935 << FirstArg->getType() << FirstArg->getSourceRange(); 4936 return ExprError(); 4937 } 4938 4939 switch (ValType.getObjCLifetime()) { 4940 case Qualifiers::OCL_None: 4941 case Qualifiers::OCL_ExplicitNone: 4942 // okay 4943 break; 4944 4945 case Qualifiers::OCL_Weak: 4946 case Qualifiers::OCL_Strong: 4947 case Qualifiers::OCL_Autoreleasing: 4948 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 4949 << ValType << FirstArg->getSourceRange(); 4950 return ExprError(); 4951 } 4952 4953 // Strip any qualifiers off ValType. 4954 ValType = ValType.getUnqualifiedType(); 4955 4956 // The majority of builtins return a value, but a few have special return 4957 // types, so allow them to override appropriately below. 4958 QualType ResultType = ValType; 4959 4960 // We need to figure out which concrete builtin this maps onto. For example, 4961 // __sync_fetch_and_add with a 2 byte object turns into 4962 // __sync_fetch_and_add_2. 4963 #define BUILTIN_ROW(x) \ 4964 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 4965 Builtin::BI##x##_8, Builtin::BI##x##_16 } 4966 4967 static const unsigned BuiltinIndices[][5] = { 4968 BUILTIN_ROW(__sync_fetch_and_add), 4969 BUILTIN_ROW(__sync_fetch_and_sub), 4970 BUILTIN_ROW(__sync_fetch_and_or), 4971 BUILTIN_ROW(__sync_fetch_and_and), 4972 BUILTIN_ROW(__sync_fetch_and_xor), 4973 BUILTIN_ROW(__sync_fetch_and_nand), 4974 4975 BUILTIN_ROW(__sync_add_and_fetch), 4976 BUILTIN_ROW(__sync_sub_and_fetch), 4977 BUILTIN_ROW(__sync_and_and_fetch), 4978 BUILTIN_ROW(__sync_or_and_fetch), 4979 BUILTIN_ROW(__sync_xor_and_fetch), 4980 BUILTIN_ROW(__sync_nand_and_fetch), 4981 4982 BUILTIN_ROW(__sync_val_compare_and_swap), 4983 BUILTIN_ROW(__sync_bool_compare_and_swap), 4984 BUILTIN_ROW(__sync_lock_test_and_set), 4985 BUILTIN_ROW(__sync_lock_release), 4986 BUILTIN_ROW(__sync_swap) 4987 }; 4988 #undef BUILTIN_ROW 4989 4990 // Determine the index of the size. 4991 unsigned SizeIndex; 4992 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 4993 case 1: SizeIndex = 0; break; 4994 case 2: SizeIndex = 1; break; 4995 case 4: SizeIndex = 2; break; 4996 case 8: SizeIndex = 3; break; 4997 case 16: SizeIndex = 4; break; 4998 default: 4999 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 5000 << FirstArg->getType() << FirstArg->getSourceRange(); 5001 return ExprError(); 5002 } 5003 5004 // Each of these builtins has one pointer argument, followed by some number of 5005 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 5006 // that we ignore. Find out which row of BuiltinIndices to read from as well 5007 // as the number of fixed args. 5008 unsigned BuiltinID = FDecl->getBuiltinID(); 5009 unsigned BuiltinIndex, NumFixed = 1; 5010 bool WarnAboutSemanticsChange = false; 5011 switch (BuiltinID) { 5012 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 5013 case Builtin::BI__sync_fetch_and_add: 5014 case Builtin::BI__sync_fetch_and_add_1: 5015 case Builtin::BI__sync_fetch_and_add_2: 5016 case Builtin::BI__sync_fetch_and_add_4: 5017 case Builtin::BI__sync_fetch_and_add_8: 5018 case Builtin::BI__sync_fetch_and_add_16: 5019 BuiltinIndex = 0; 5020 break; 5021 5022 case Builtin::BI__sync_fetch_and_sub: 5023 case Builtin::BI__sync_fetch_and_sub_1: 5024 case Builtin::BI__sync_fetch_and_sub_2: 5025 case Builtin::BI__sync_fetch_and_sub_4: 5026 case Builtin::BI__sync_fetch_and_sub_8: 5027 case Builtin::BI__sync_fetch_and_sub_16: 5028 BuiltinIndex = 1; 5029 break; 5030 5031 case Builtin::BI__sync_fetch_and_or: 5032 case Builtin::BI__sync_fetch_and_or_1: 5033 case Builtin::BI__sync_fetch_and_or_2: 5034 case Builtin::BI__sync_fetch_and_or_4: 5035 case Builtin::BI__sync_fetch_and_or_8: 5036 case Builtin::BI__sync_fetch_and_or_16: 5037 BuiltinIndex = 2; 5038 break; 5039 5040 case Builtin::BI__sync_fetch_and_and: 5041 case Builtin::BI__sync_fetch_and_and_1: 5042 case Builtin::BI__sync_fetch_and_and_2: 5043 case Builtin::BI__sync_fetch_and_and_4: 5044 case Builtin::BI__sync_fetch_and_and_8: 5045 case Builtin::BI__sync_fetch_and_and_16: 5046 BuiltinIndex = 3; 5047 break; 5048 5049 case Builtin::BI__sync_fetch_and_xor: 5050 case Builtin::BI__sync_fetch_and_xor_1: 5051 case Builtin::BI__sync_fetch_and_xor_2: 5052 case Builtin::BI__sync_fetch_and_xor_4: 5053 case Builtin::BI__sync_fetch_and_xor_8: 5054 case Builtin::BI__sync_fetch_and_xor_16: 5055 BuiltinIndex = 4; 5056 break; 5057 5058 case Builtin::BI__sync_fetch_and_nand: 5059 case Builtin::BI__sync_fetch_and_nand_1: 5060 case Builtin::BI__sync_fetch_and_nand_2: 5061 case Builtin::BI__sync_fetch_and_nand_4: 5062 case Builtin::BI__sync_fetch_and_nand_8: 5063 case Builtin::BI__sync_fetch_and_nand_16: 5064 BuiltinIndex = 5; 5065 WarnAboutSemanticsChange = true; 5066 break; 5067 5068 case Builtin::BI__sync_add_and_fetch: 5069 case Builtin::BI__sync_add_and_fetch_1: 5070 case Builtin::BI__sync_add_and_fetch_2: 5071 case Builtin::BI__sync_add_and_fetch_4: 5072 case Builtin::BI__sync_add_and_fetch_8: 5073 case Builtin::BI__sync_add_and_fetch_16: 5074 BuiltinIndex = 6; 5075 break; 5076 5077 case Builtin::BI__sync_sub_and_fetch: 5078 case Builtin::BI__sync_sub_and_fetch_1: 5079 case Builtin::BI__sync_sub_and_fetch_2: 5080 case Builtin::BI__sync_sub_and_fetch_4: 5081 case Builtin::BI__sync_sub_and_fetch_8: 5082 case Builtin::BI__sync_sub_and_fetch_16: 5083 BuiltinIndex = 7; 5084 break; 5085 5086 case Builtin::BI__sync_and_and_fetch: 5087 case Builtin::BI__sync_and_and_fetch_1: 5088 case Builtin::BI__sync_and_and_fetch_2: 5089 case Builtin::BI__sync_and_and_fetch_4: 5090 case Builtin::BI__sync_and_and_fetch_8: 5091 case Builtin::BI__sync_and_and_fetch_16: 5092 BuiltinIndex = 8; 5093 break; 5094 5095 case Builtin::BI__sync_or_and_fetch: 5096 case Builtin::BI__sync_or_and_fetch_1: 5097 case Builtin::BI__sync_or_and_fetch_2: 5098 case Builtin::BI__sync_or_and_fetch_4: 5099 case Builtin::BI__sync_or_and_fetch_8: 5100 case Builtin::BI__sync_or_and_fetch_16: 5101 BuiltinIndex = 9; 5102 break; 5103 5104 case Builtin::BI__sync_xor_and_fetch: 5105 case Builtin::BI__sync_xor_and_fetch_1: 5106 case Builtin::BI__sync_xor_and_fetch_2: 5107 case Builtin::BI__sync_xor_and_fetch_4: 5108 case Builtin::BI__sync_xor_and_fetch_8: 5109 case Builtin::BI__sync_xor_and_fetch_16: 5110 BuiltinIndex = 10; 5111 break; 5112 5113 case Builtin::BI__sync_nand_and_fetch: 5114 case Builtin::BI__sync_nand_and_fetch_1: 5115 case Builtin::BI__sync_nand_and_fetch_2: 5116 case Builtin::BI__sync_nand_and_fetch_4: 5117 case Builtin::BI__sync_nand_and_fetch_8: 5118 case Builtin::BI__sync_nand_and_fetch_16: 5119 BuiltinIndex = 11; 5120 WarnAboutSemanticsChange = true; 5121 break; 5122 5123 case Builtin::BI__sync_val_compare_and_swap: 5124 case Builtin::BI__sync_val_compare_and_swap_1: 5125 case Builtin::BI__sync_val_compare_and_swap_2: 5126 case Builtin::BI__sync_val_compare_and_swap_4: 5127 case Builtin::BI__sync_val_compare_and_swap_8: 5128 case Builtin::BI__sync_val_compare_and_swap_16: 5129 BuiltinIndex = 12; 5130 NumFixed = 2; 5131 break; 5132 5133 case Builtin::BI__sync_bool_compare_and_swap: 5134 case Builtin::BI__sync_bool_compare_and_swap_1: 5135 case Builtin::BI__sync_bool_compare_and_swap_2: 5136 case Builtin::BI__sync_bool_compare_and_swap_4: 5137 case Builtin::BI__sync_bool_compare_and_swap_8: 5138 case Builtin::BI__sync_bool_compare_and_swap_16: 5139 BuiltinIndex = 13; 5140 NumFixed = 2; 5141 ResultType = Context.BoolTy; 5142 break; 5143 5144 case Builtin::BI__sync_lock_test_and_set: 5145 case Builtin::BI__sync_lock_test_and_set_1: 5146 case Builtin::BI__sync_lock_test_and_set_2: 5147 case Builtin::BI__sync_lock_test_and_set_4: 5148 case Builtin::BI__sync_lock_test_and_set_8: 5149 case Builtin::BI__sync_lock_test_and_set_16: 5150 BuiltinIndex = 14; 5151 break; 5152 5153 case Builtin::BI__sync_lock_release: 5154 case Builtin::BI__sync_lock_release_1: 5155 case Builtin::BI__sync_lock_release_2: 5156 case Builtin::BI__sync_lock_release_4: 5157 case Builtin::BI__sync_lock_release_8: 5158 case Builtin::BI__sync_lock_release_16: 5159 BuiltinIndex = 15; 5160 NumFixed = 0; 5161 ResultType = Context.VoidTy; 5162 break; 5163 5164 case Builtin::BI__sync_swap: 5165 case Builtin::BI__sync_swap_1: 5166 case Builtin::BI__sync_swap_2: 5167 case Builtin::BI__sync_swap_4: 5168 case Builtin::BI__sync_swap_8: 5169 case Builtin::BI__sync_swap_16: 5170 BuiltinIndex = 16; 5171 break; 5172 } 5173 5174 // Now that we know how many fixed arguments we expect, first check that we 5175 // have at least that many. 5176 if (TheCall->getNumArgs() < 1+NumFixed) { 5177 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5178 << 0 << 1 + NumFixed << TheCall->getNumArgs() 5179 << Callee->getSourceRange(); 5180 return ExprError(); 5181 } 5182 5183 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 5184 << Callee->getSourceRange(); 5185 5186 if (WarnAboutSemanticsChange) { 5187 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 5188 << Callee->getSourceRange(); 5189 } 5190 5191 // Get the decl for the concrete builtin from this, we can tell what the 5192 // concrete integer type we should convert to is. 5193 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 5194 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 5195 FunctionDecl *NewBuiltinDecl; 5196 if (NewBuiltinID == BuiltinID) 5197 NewBuiltinDecl = FDecl; 5198 else { 5199 // Perform builtin lookup to avoid redeclaring it. 5200 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 5201 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 5202 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 5203 assert(Res.getFoundDecl()); 5204 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 5205 if (!NewBuiltinDecl) 5206 return ExprError(); 5207 } 5208 5209 // The first argument --- the pointer --- has a fixed type; we 5210 // deduce the types of the rest of the arguments accordingly. Walk 5211 // the remaining arguments, converting them to the deduced value type. 5212 for (unsigned i = 0; i != NumFixed; ++i) { 5213 ExprResult Arg = TheCall->getArg(i+1); 5214 5215 // GCC does an implicit conversion to the pointer or integer ValType. This 5216 // can fail in some cases (1i -> int**), check for this error case now. 5217 // Initialize the argument. 5218 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 5219 ValType, /*consume*/ false); 5220 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5221 if (Arg.isInvalid()) 5222 return ExprError(); 5223 5224 // Okay, we have something that *can* be converted to the right type. Check 5225 // to see if there is a potentially weird extension going on here. This can 5226 // happen when you do an atomic operation on something like an char* and 5227 // pass in 42. The 42 gets converted to char. This is even more strange 5228 // for things like 45.123 -> char, etc. 5229 // FIXME: Do this check. 5230 TheCall->setArg(i+1, Arg.get()); 5231 } 5232 5233 // Create a new DeclRefExpr to refer to the new decl. 5234 DeclRefExpr *NewDRE = DeclRefExpr::Create( 5235 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 5236 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 5237 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 5238 5239 // Set the callee in the CallExpr. 5240 // FIXME: This loses syntactic information. 5241 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 5242 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 5243 CK_BuiltinFnToFnPtr); 5244 TheCall->setCallee(PromotedCall.get()); 5245 5246 // Change the result type of the call to match the original value type. This 5247 // is arbitrary, but the codegen for these builtins ins design to handle it 5248 // gracefully. 5249 TheCall->setType(ResultType); 5250 5251 return TheCallResult; 5252 } 5253 5254 /// SemaBuiltinNontemporalOverloaded - We have a call to 5255 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 5256 /// overloaded function based on the pointer type of its last argument. 5257 /// 5258 /// This function goes through and does final semantic checking for these 5259 /// builtins. 5260 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 5261 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 5262 DeclRefExpr *DRE = 5263 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5264 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5265 unsigned BuiltinID = FDecl->getBuiltinID(); 5266 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 5267 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 5268 "Unexpected nontemporal load/store builtin!"); 5269 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 5270 unsigned numArgs = isStore ? 2 : 1; 5271 5272 // Ensure that we have the proper number of arguments. 5273 if (checkArgCount(*this, TheCall, numArgs)) 5274 return ExprError(); 5275 5276 // Inspect the last argument of the nontemporal builtin. This should always 5277 // be a pointer type, from which we imply the type of the memory access. 5278 // Because it is a pointer type, we don't have to worry about any implicit 5279 // casts here. 5280 Expr *PointerArg = TheCall->getArg(numArgs - 1); 5281 ExprResult PointerArgResult = 5282 DefaultFunctionArrayLvalueConversion(PointerArg); 5283 5284 if (PointerArgResult.isInvalid()) 5285 return ExprError(); 5286 PointerArg = PointerArgResult.get(); 5287 TheCall->setArg(numArgs - 1, PointerArg); 5288 5289 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 5290 if (!pointerType) { 5291 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 5292 << PointerArg->getType() << PointerArg->getSourceRange(); 5293 return ExprError(); 5294 } 5295 5296 QualType ValType = pointerType->getPointeeType(); 5297 5298 // Strip any qualifiers off ValType. 5299 ValType = ValType.getUnqualifiedType(); 5300 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5301 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 5302 !ValType->isVectorType()) { 5303 Diag(DRE->getBeginLoc(), 5304 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 5305 << PointerArg->getType() << PointerArg->getSourceRange(); 5306 return ExprError(); 5307 } 5308 5309 if (!isStore) { 5310 TheCall->setType(ValType); 5311 return TheCallResult; 5312 } 5313 5314 ExprResult ValArg = TheCall->getArg(0); 5315 InitializedEntity Entity = InitializedEntity::InitializeParameter( 5316 Context, ValType, /*consume*/ false); 5317 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 5318 if (ValArg.isInvalid()) 5319 return ExprError(); 5320 5321 TheCall->setArg(0, ValArg.get()); 5322 TheCall->setType(Context.VoidTy); 5323 return TheCallResult; 5324 } 5325 5326 /// CheckObjCString - Checks that the argument to the builtin 5327 /// CFString constructor is correct 5328 /// Note: It might also make sense to do the UTF-16 conversion here (would 5329 /// simplify the backend). 5330 bool Sema::CheckObjCString(Expr *Arg) { 5331 Arg = Arg->IgnoreParenCasts(); 5332 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 5333 5334 if (!Literal || !Literal->isAscii()) { 5335 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 5336 << Arg->getSourceRange(); 5337 return true; 5338 } 5339 5340 if (Literal->containsNonAsciiOrNull()) { 5341 StringRef String = Literal->getString(); 5342 unsigned NumBytes = String.size(); 5343 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 5344 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 5345 llvm::UTF16 *ToPtr = &ToBuf[0]; 5346 5347 llvm::ConversionResult Result = 5348 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 5349 ToPtr + NumBytes, llvm::strictConversion); 5350 // Check for conversion failure. 5351 if (Result != llvm::conversionOK) 5352 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 5353 << Arg->getSourceRange(); 5354 } 5355 return false; 5356 } 5357 5358 /// CheckObjCString - Checks that the format string argument to the os_log() 5359 /// and os_trace() functions is correct, and converts it to const char *. 5360 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 5361 Arg = Arg->IgnoreParenCasts(); 5362 auto *Literal = dyn_cast<StringLiteral>(Arg); 5363 if (!Literal) { 5364 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 5365 Literal = ObjcLiteral->getString(); 5366 } 5367 } 5368 5369 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 5370 return ExprError( 5371 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 5372 << Arg->getSourceRange()); 5373 } 5374 5375 ExprResult Result(Literal); 5376 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 5377 InitializedEntity Entity = 5378 InitializedEntity::InitializeParameter(Context, ResultTy, false); 5379 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 5380 return Result; 5381 } 5382 5383 /// Check that the user is calling the appropriate va_start builtin for the 5384 /// target and calling convention. 5385 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 5386 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 5387 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 5388 bool IsAArch64 = TT.getArch() == llvm::Triple::aarch64; 5389 bool IsWindows = TT.isOSWindows(); 5390 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 5391 if (IsX64 || IsAArch64) { 5392 CallingConv CC = CC_C; 5393 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 5394 CC = FD->getType()->getAs<FunctionType>()->getCallConv(); 5395 if (IsMSVAStart) { 5396 // Don't allow this in System V ABI functions. 5397 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 5398 return S.Diag(Fn->getBeginLoc(), 5399 diag::err_ms_va_start_used_in_sysv_function); 5400 } else { 5401 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 5402 // On x64 Windows, don't allow this in System V ABI functions. 5403 // (Yes, that means there's no corresponding way to support variadic 5404 // System V ABI functions on Windows.) 5405 if ((IsWindows && CC == CC_X86_64SysV) || 5406 (!IsWindows && CC == CC_Win64)) 5407 return S.Diag(Fn->getBeginLoc(), 5408 diag::err_va_start_used_in_wrong_abi_function) 5409 << !IsWindows; 5410 } 5411 return false; 5412 } 5413 5414 if (IsMSVAStart) 5415 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 5416 return false; 5417 } 5418 5419 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 5420 ParmVarDecl **LastParam = nullptr) { 5421 // Determine whether the current function, block, or obj-c method is variadic 5422 // and get its parameter list. 5423 bool IsVariadic = false; 5424 ArrayRef<ParmVarDecl *> Params; 5425 DeclContext *Caller = S.CurContext; 5426 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 5427 IsVariadic = Block->isVariadic(); 5428 Params = Block->parameters(); 5429 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 5430 IsVariadic = FD->isVariadic(); 5431 Params = FD->parameters(); 5432 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 5433 IsVariadic = MD->isVariadic(); 5434 // FIXME: This isn't correct for methods (results in bogus warning). 5435 Params = MD->parameters(); 5436 } else if (isa<CapturedDecl>(Caller)) { 5437 // We don't support va_start in a CapturedDecl. 5438 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 5439 return true; 5440 } else { 5441 // This must be some other declcontext that parses exprs. 5442 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 5443 return true; 5444 } 5445 5446 if (!IsVariadic) { 5447 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 5448 return true; 5449 } 5450 5451 if (LastParam) 5452 *LastParam = Params.empty() ? nullptr : Params.back(); 5453 5454 return false; 5455 } 5456 5457 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 5458 /// for validity. Emit an error and return true on failure; return false 5459 /// on success. 5460 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 5461 Expr *Fn = TheCall->getCallee(); 5462 5463 if (checkVAStartABI(*this, BuiltinID, Fn)) 5464 return true; 5465 5466 if (TheCall->getNumArgs() > 2) { 5467 Diag(TheCall->getArg(2)->getBeginLoc(), 5468 diag::err_typecheck_call_too_many_args) 5469 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5470 << Fn->getSourceRange() 5471 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5472 (*(TheCall->arg_end() - 1))->getEndLoc()); 5473 return true; 5474 } 5475 5476 if (TheCall->getNumArgs() < 2) { 5477 return Diag(TheCall->getEndLoc(), 5478 diag::err_typecheck_call_too_few_args_at_least) 5479 << 0 /*function call*/ << 2 << TheCall->getNumArgs(); 5480 } 5481 5482 // Type-check the first argument normally. 5483 if (checkBuiltinArgument(*this, TheCall, 0)) 5484 return true; 5485 5486 // Check that the current function is variadic, and get its last parameter. 5487 ParmVarDecl *LastParam; 5488 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 5489 return true; 5490 5491 // Verify that the second argument to the builtin is the last argument of the 5492 // current function or method. 5493 bool SecondArgIsLastNamedArgument = false; 5494 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 5495 5496 // These are valid if SecondArgIsLastNamedArgument is false after the next 5497 // block. 5498 QualType Type; 5499 SourceLocation ParamLoc; 5500 bool IsCRegister = false; 5501 5502 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 5503 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 5504 SecondArgIsLastNamedArgument = PV == LastParam; 5505 5506 Type = PV->getType(); 5507 ParamLoc = PV->getLocation(); 5508 IsCRegister = 5509 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 5510 } 5511 } 5512 5513 if (!SecondArgIsLastNamedArgument) 5514 Diag(TheCall->getArg(1)->getBeginLoc(), 5515 diag::warn_second_arg_of_va_start_not_last_named_param); 5516 else if (IsCRegister || Type->isReferenceType() || 5517 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 5518 // Promotable integers are UB, but enumerations need a bit of 5519 // extra checking to see what their promotable type actually is. 5520 if (!Type->isPromotableIntegerType()) 5521 return false; 5522 if (!Type->isEnumeralType()) 5523 return true; 5524 const EnumDecl *ED = Type->getAs<EnumType>()->getDecl(); 5525 return !(ED && 5526 Context.typesAreCompatible(ED->getPromotionType(), Type)); 5527 }()) { 5528 unsigned Reason = 0; 5529 if (Type->isReferenceType()) Reason = 1; 5530 else if (IsCRegister) Reason = 2; 5531 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 5532 Diag(ParamLoc, diag::note_parameter_type) << Type; 5533 } 5534 5535 TheCall->setType(Context.VoidTy); 5536 return false; 5537 } 5538 5539 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 5540 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 5541 // const char *named_addr); 5542 5543 Expr *Func = Call->getCallee(); 5544 5545 if (Call->getNumArgs() < 3) 5546 return Diag(Call->getEndLoc(), 5547 diag::err_typecheck_call_too_few_args_at_least) 5548 << 0 /*function call*/ << 3 << Call->getNumArgs(); 5549 5550 // Type-check the first argument normally. 5551 if (checkBuiltinArgument(*this, Call, 0)) 5552 return true; 5553 5554 // Check that the current function is variadic. 5555 if (checkVAStartIsInVariadicFunction(*this, Func)) 5556 return true; 5557 5558 // __va_start on Windows does not validate the parameter qualifiers 5559 5560 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 5561 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 5562 5563 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 5564 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 5565 5566 const QualType &ConstCharPtrTy = 5567 Context.getPointerType(Context.CharTy.withConst()); 5568 if (!Arg1Ty->isPointerType() || 5569 Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy) 5570 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5571 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 5572 << 0 /* qualifier difference */ 5573 << 3 /* parameter mismatch */ 5574 << 2 << Arg1->getType() << ConstCharPtrTy; 5575 5576 const QualType SizeTy = Context.getSizeType(); 5577 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 5578 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5579 << Arg2->getType() << SizeTy << 1 /* different class */ 5580 << 0 /* qualifier difference */ 5581 << 3 /* parameter mismatch */ 5582 << 3 << Arg2->getType() << SizeTy; 5583 5584 return false; 5585 } 5586 5587 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 5588 /// friends. This is declared to take (...), so we have to check everything. 5589 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 5590 if (TheCall->getNumArgs() < 2) 5591 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5592 << 0 << 2 << TheCall->getNumArgs() /*function call*/; 5593 if (TheCall->getNumArgs() > 2) 5594 return Diag(TheCall->getArg(2)->getBeginLoc(), 5595 diag::err_typecheck_call_too_many_args) 5596 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5597 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5598 (*(TheCall->arg_end() - 1))->getEndLoc()); 5599 5600 ExprResult OrigArg0 = TheCall->getArg(0); 5601 ExprResult OrigArg1 = TheCall->getArg(1); 5602 5603 // Do standard promotions between the two arguments, returning their common 5604 // type. 5605 QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false); 5606 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 5607 return true; 5608 5609 // Make sure any conversions are pushed back into the call; this is 5610 // type safe since unordered compare builtins are declared as "_Bool 5611 // foo(...)". 5612 TheCall->setArg(0, OrigArg0.get()); 5613 TheCall->setArg(1, OrigArg1.get()); 5614 5615 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 5616 return false; 5617 5618 // If the common type isn't a real floating type, then the arguments were 5619 // invalid for this operation. 5620 if (Res.isNull() || !Res->isRealFloatingType()) 5621 return Diag(OrigArg0.get()->getBeginLoc(), 5622 diag::err_typecheck_call_invalid_ordered_compare) 5623 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 5624 << SourceRange(OrigArg0.get()->getBeginLoc(), 5625 OrigArg1.get()->getEndLoc()); 5626 5627 return false; 5628 } 5629 5630 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 5631 /// __builtin_isnan and friends. This is declared to take (...), so we have 5632 /// to check everything. We expect the last argument to be a floating point 5633 /// value. 5634 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 5635 if (TheCall->getNumArgs() < NumArgs) 5636 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5637 << 0 << NumArgs << TheCall->getNumArgs() /*function call*/; 5638 if (TheCall->getNumArgs() > NumArgs) 5639 return Diag(TheCall->getArg(NumArgs)->getBeginLoc(), 5640 diag::err_typecheck_call_too_many_args) 5641 << 0 /*function call*/ << NumArgs << TheCall->getNumArgs() 5642 << SourceRange(TheCall->getArg(NumArgs)->getBeginLoc(), 5643 (*(TheCall->arg_end() - 1))->getEndLoc()); 5644 5645 Expr *OrigArg = TheCall->getArg(NumArgs-1); 5646 5647 if (OrigArg->isTypeDependent()) 5648 return false; 5649 5650 // This operation requires a non-_Complex floating-point number. 5651 if (!OrigArg->getType()->isRealFloatingType()) 5652 return Diag(OrigArg->getBeginLoc(), 5653 diag::err_typecheck_call_invalid_unary_fp) 5654 << OrigArg->getType() << OrigArg->getSourceRange(); 5655 5656 // If this is an implicit conversion from float -> float, double, or 5657 // long double, remove it. 5658 if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(OrigArg)) { 5659 // Only remove standard FloatCasts, leaving other casts inplace 5660 if (Cast->getCastKind() == CK_FloatingCast) { 5661 Expr *CastArg = Cast->getSubExpr(); 5662 if (CastArg->getType()->isSpecificBuiltinType(BuiltinType::Float)) { 5663 assert( 5664 (Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) || 5665 Cast->getType()->isSpecificBuiltinType(BuiltinType::Float) || 5666 Cast->getType()->isSpecificBuiltinType(BuiltinType::LongDouble)) && 5667 "promotion from float to either float, double, or long double is " 5668 "the only expected cast here"); 5669 Cast->setSubExpr(nullptr); 5670 TheCall->setArg(NumArgs-1, CastArg); 5671 } 5672 } 5673 } 5674 5675 return false; 5676 } 5677 5678 // Customized Sema Checking for VSX builtins that have the following signature: 5679 // vector [...] builtinName(vector [...], vector [...], const int); 5680 // Which takes the same type of vectors (any legal vector type) for the first 5681 // two arguments and takes compile time constant for the third argument. 5682 // Example builtins are : 5683 // vector double vec_xxpermdi(vector double, vector double, int); 5684 // vector short vec_xxsldwi(vector short, vector short, int); 5685 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 5686 unsigned ExpectedNumArgs = 3; 5687 if (TheCall->getNumArgs() < ExpectedNumArgs) 5688 return Diag(TheCall->getEndLoc(), 5689 diag::err_typecheck_call_too_few_args_at_least) 5690 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs() 5691 << TheCall->getSourceRange(); 5692 5693 if (TheCall->getNumArgs() > ExpectedNumArgs) 5694 return Diag(TheCall->getEndLoc(), 5695 diag::err_typecheck_call_too_many_args_at_most) 5696 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs() 5697 << TheCall->getSourceRange(); 5698 5699 // Check the third argument is a compile time constant 5700 llvm::APSInt Value; 5701 if(!TheCall->getArg(2)->isIntegerConstantExpr(Value, Context)) 5702 return Diag(TheCall->getBeginLoc(), 5703 diag::err_vsx_builtin_nonconstant_argument) 5704 << 3 /* argument index */ << TheCall->getDirectCallee() 5705 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5706 TheCall->getArg(2)->getEndLoc()); 5707 5708 QualType Arg1Ty = TheCall->getArg(0)->getType(); 5709 QualType Arg2Ty = TheCall->getArg(1)->getType(); 5710 5711 // Check the type of argument 1 and argument 2 are vectors. 5712 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 5713 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 5714 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 5715 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 5716 << TheCall->getDirectCallee() 5717 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5718 TheCall->getArg(1)->getEndLoc()); 5719 } 5720 5721 // Check the first two arguments are the same type. 5722 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 5723 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 5724 << TheCall->getDirectCallee() 5725 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5726 TheCall->getArg(1)->getEndLoc()); 5727 } 5728 5729 // When default clang type checking is turned off and the customized type 5730 // checking is used, the returning type of the function must be explicitly 5731 // set. Otherwise it is _Bool by default. 5732 TheCall->setType(Arg1Ty); 5733 5734 return false; 5735 } 5736 5737 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 5738 // This is declared to take (...), so we have to check everything. 5739 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 5740 if (TheCall->getNumArgs() < 2) 5741 return ExprError(Diag(TheCall->getEndLoc(), 5742 diag::err_typecheck_call_too_few_args_at_least) 5743 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5744 << TheCall->getSourceRange()); 5745 5746 // Determine which of the following types of shufflevector we're checking: 5747 // 1) unary, vector mask: (lhs, mask) 5748 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 5749 QualType resType = TheCall->getArg(0)->getType(); 5750 unsigned numElements = 0; 5751 5752 if (!TheCall->getArg(0)->isTypeDependent() && 5753 !TheCall->getArg(1)->isTypeDependent()) { 5754 QualType LHSType = TheCall->getArg(0)->getType(); 5755 QualType RHSType = TheCall->getArg(1)->getType(); 5756 5757 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 5758 return ExprError( 5759 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 5760 << TheCall->getDirectCallee() 5761 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5762 TheCall->getArg(1)->getEndLoc())); 5763 5764 numElements = LHSType->getAs<VectorType>()->getNumElements(); 5765 unsigned numResElements = TheCall->getNumArgs() - 2; 5766 5767 // Check to see if we have a call with 2 vector arguments, the unary shuffle 5768 // with mask. If so, verify that RHS is an integer vector type with the 5769 // same number of elts as lhs. 5770 if (TheCall->getNumArgs() == 2) { 5771 if (!RHSType->hasIntegerRepresentation() || 5772 RHSType->getAs<VectorType>()->getNumElements() != numElements) 5773 return ExprError(Diag(TheCall->getBeginLoc(), 5774 diag::err_vec_builtin_incompatible_vector) 5775 << TheCall->getDirectCallee() 5776 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 5777 TheCall->getArg(1)->getEndLoc())); 5778 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 5779 return ExprError(Diag(TheCall->getBeginLoc(), 5780 diag::err_vec_builtin_incompatible_vector) 5781 << TheCall->getDirectCallee() 5782 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5783 TheCall->getArg(1)->getEndLoc())); 5784 } else if (numElements != numResElements) { 5785 QualType eltType = LHSType->getAs<VectorType>()->getElementType(); 5786 resType = Context.getVectorType(eltType, numResElements, 5787 VectorType::GenericVector); 5788 } 5789 } 5790 5791 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 5792 if (TheCall->getArg(i)->isTypeDependent() || 5793 TheCall->getArg(i)->isValueDependent()) 5794 continue; 5795 5796 llvm::APSInt Result(32); 5797 if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context)) 5798 return ExprError(Diag(TheCall->getBeginLoc(), 5799 diag::err_shufflevector_nonconstant_argument) 5800 << TheCall->getArg(i)->getSourceRange()); 5801 5802 // Allow -1 which will be translated to undef in the IR. 5803 if (Result.isSigned() && Result.isAllOnesValue()) 5804 continue; 5805 5806 if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2) 5807 return ExprError(Diag(TheCall->getBeginLoc(), 5808 diag::err_shufflevector_argument_too_large) 5809 << TheCall->getArg(i)->getSourceRange()); 5810 } 5811 5812 SmallVector<Expr*, 32> exprs; 5813 5814 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 5815 exprs.push_back(TheCall->getArg(i)); 5816 TheCall->setArg(i, nullptr); 5817 } 5818 5819 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 5820 TheCall->getCallee()->getBeginLoc(), 5821 TheCall->getRParenLoc()); 5822 } 5823 5824 /// SemaConvertVectorExpr - Handle __builtin_convertvector 5825 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 5826 SourceLocation BuiltinLoc, 5827 SourceLocation RParenLoc) { 5828 ExprValueKind VK = VK_RValue; 5829 ExprObjectKind OK = OK_Ordinary; 5830 QualType DstTy = TInfo->getType(); 5831 QualType SrcTy = E->getType(); 5832 5833 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 5834 return ExprError(Diag(BuiltinLoc, 5835 diag::err_convertvector_non_vector) 5836 << E->getSourceRange()); 5837 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 5838 return ExprError(Diag(BuiltinLoc, 5839 diag::err_convertvector_non_vector_type)); 5840 5841 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 5842 unsigned SrcElts = SrcTy->getAs<VectorType>()->getNumElements(); 5843 unsigned DstElts = DstTy->getAs<VectorType>()->getNumElements(); 5844 if (SrcElts != DstElts) 5845 return ExprError(Diag(BuiltinLoc, 5846 diag::err_convertvector_incompatible_vector) 5847 << E->getSourceRange()); 5848 } 5849 5850 return new (Context) 5851 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 5852 } 5853 5854 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 5855 // This is declared to take (const void*, ...) and can take two 5856 // optional constant int args. 5857 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 5858 unsigned NumArgs = TheCall->getNumArgs(); 5859 5860 if (NumArgs > 3) 5861 return Diag(TheCall->getEndLoc(), 5862 diag::err_typecheck_call_too_many_args_at_most) 5863 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 5864 5865 // Argument 0 is checked for us and the remaining arguments must be 5866 // constant integers. 5867 for (unsigned i = 1; i != NumArgs; ++i) 5868 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 5869 return true; 5870 5871 return false; 5872 } 5873 5874 /// SemaBuiltinAssume - Handle __assume (MS Extension). 5875 // __assume does not evaluate its arguments, and should warn if its argument 5876 // has side effects. 5877 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 5878 Expr *Arg = TheCall->getArg(0); 5879 if (Arg->isInstantiationDependent()) return false; 5880 5881 if (Arg->HasSideEffects(Context)) 5882 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 5883 << Arg->getSourceRange() 5884 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 5885 5886 return false; 5887 } 5888 5889 /// Handle __builtin_alloca_with_align. This is declared 5890 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 5891 /// than 8. 5892 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 5893 // The alignment must be a constant integer. 5894 Expr *Arg = TheCall->getArg(1); 5895 5896 // We can't check the value of a dependent argument. 5897 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 5898 if (const auto *UE = 5899 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 5900 if (UE->getKind() == UETT_AlignOf || 5901 UE->getKind() == UETT_PreferredAlignOf) 5902 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 5903 << Arg->getSourceRange(); 5904 5905 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 5906 5907 if (!Result.isPowerOf2()) 5908 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 5909 << Arg->getSourceRange(); 5910 5911 if (Result < Context.getCharWidth()) 5912 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 5913 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 5914 5915 if (Result > std::numeric_limits<int32_t>::max()) 5916 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 5917 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 5918 } 5919 5920 return false; 5921 } 5922 5923 /// Handle __builtin_assume_aligned. This is declared 5924 /// as (const void*, size_t, ...) and can take one optional constant int arg. 5925 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 5926 unsigned NumArgs = TheCall->getNumArgs(); 5927 5928 if (NumArgs > 3) 5929 return Diag(TheCall->getEndLoc(), 5930 diag::err_typecheck_call_too_many_args_at_most) 5931 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 5932 5933 // The alignment must be a constant integer. 5934 Expr *Arg = TheCall->getArg(1); 5935 5936 // We can't check the value of a dependent argument. 5937 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 5938 llvm::APSInt Result; 5939 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 5940 return true; 5941 5942 if (!Result.isPowerOf2()) 5943 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 5944 << Arg->getSourceRange(); 5945 } 5946 5947 if (NumArgs > 2) { 5948 ExprResult Arg(TheCall->getArg(2)); 5949 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 5950 Context.getSizeType(), false); 5951 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5952 if (Arg.isInvalid()) return true; 5953 TheCall->setArg(2, Arg.get()); 5954 } 5955 5956 return false; 5957 } 5958 5959 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 5960 unsigned BuiltinID = 5961 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 5962 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 5963 5964 unsigned NumArgs = TheCall->getNumArgs(); 5965 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 5966 if (NumArgs < NumRequiredArgs) { 5967 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5968 << 0 /* function call */ << NumRequiredArgs << NumArgs 5969 << TheCall->getSourceRange(); 5970 } 5971 if (NumArgs >= NumRequiredArgs + 0x100) { 5972 return Diag(TheCall->getEndLoc(), 5973 diag::err_typecheck_call_too_many_args_at_most) 5974 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 5975 << TheCall->getSourceRange(); 5976 } 5977 unsigned i = 0; 5978 5979 // For formatting call, check buffer arg. 5980 if (!IsSizeCall) { 5981 ExprResult Arg(TheCall->getArg(i)); 5982 InitializedEntity Entity = InitializedEntity::InitializeParameter( 5983 Context, Context.VoidPtrTy, false); 5984 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5985 if (Arg.isInvalid()) 5986 return true; 5987 TheCall->setArg(i, Arg.get()); 5988 i++; 5989 } 5990 5991 // Check string literal arg. 5992 unsigned FormatIdx = i; 5993 { 5994 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 5995 if (Arg.isInvalid()) 5996 return true; 5997 TheCall->setArg(i, Arg.get()); 5998 i++; 5999 } 6000 6001 // Make sure variadic args are scalar. 6002 unsigned FirstDataArg = i; 6003 while (i < NumArgs) { 6004 ExprResult Arg = DefaultVariadicArgumentPromotion( 6005 TheCall->getArg(i), VariadicFunction, nullptr); 6006 if (Arg.isInvalid()) 6007 return true; 6008 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 6009 if (ArgSize.getQuantity() >= 0x100) { 6010 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 6011 << i << (int)ArgSize.getQuantity() << 0xff 6012 << TheCall->getSourceRange(); 6013 } 6014 TheCall->setArg(i, Arg.get()); 6015 i++; 6016 } 6017 6018 // Check formatting specifiers. NOTE: We're only doing this for the non-size 6019 // call to avoid duplicate diagnostics. 6020 if (!IsSizeCall) { 6021 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 6022 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 6023 bool Success = CheckFormatArguments( 6024 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 6025 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 6026 CheckedVarArgs); 6027 if (!Success) 6028 return true; 6029 } 6030 6031 if (IsSizeCall) { 6032 TheCall->setType(Context.getSizeType()); 6033 } else { 6034 TheCall->setType(Context.VoidPtrTy); 6035 } 6036 return false; 6037 } 6038 6039 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 6040 /// TheCall is a constant expression. 6041 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 6042 llvm::APSInt &Result) { 6043 Expr *Arg = TheCall->getArg(ArgNum); 6044 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6045 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6046 6047 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 6048 6049 if (!Arg->isIntegerConstantExpr(Result, Context)) 6050 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 6051 << FDecl->getDeclName() << Arg->getSourceRange(); 6052 6053 return false; 6054 } 6055 6056 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 6057 /// TheCall is a constant expression in the range [Low, High]. 6058 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 6059 int Low, int High, bool RangeIsError) { 6060 llvm::APSInt Result; 6061 6062 // We can't check the value of a dependent argument. 6063 Expr *Arg = TheCall->getArg(ArgNum); 6064 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6065 return false; 6066 6067 // Check constant-ness first. 6068 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6069 return true; 6070 6071 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 6072 if (RangeIsError) 6073 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 6074 << Result.toString(10) << Low << High << Arg->getSourceRange(); 6075 else 6076 // Defer the warning until we know if the code will be emitted so that 6077 // dead code can ignore this. 6078 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 6079 PDiag(diag::warn_argument_invalid_range) 6080 << Result.toString(10) << Low << High 6081 << Arg->getSourceRange()); 6082 } 6083 6084 return false; 6085 } 6086 6087 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 6088 /// TheCall is a constant expression is a multiple of Num.. 6089 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 6090 unsigned Num) { 6091 llvm::APSInt Result; 6092 6093 // We can't check the value of a dependent argument. 6094 Expr *Arg = TheCall->getArg(ArgNum); 6095 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6096 return false; 6097 6098 // Check constant-ness first. 6099 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6100 return true; 6101 6102 if (Result.getSExtValue() % Num != 0) 6103 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 6104 << Num << Arg->getSourceRange(); 6105 6106 return false; 6107 } 6108 6109 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 6110 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 6111 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 6112 if (checkArgCount(*this, TheCall, 2)) 6113 return true; 6114 Expr *Arg0 = TheCall->getArg(0); 6115 Expr *Arg1 = TheCall->getArg(1); 6116 6117 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6118 if (FirstArg.isInvalid()) 6119 return true; 6120 QualType FirstArgType = FirstArg.get()->getType(); 6121 if (!FirstArgType->isAnyPointerType()) 6122 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6123 << "first" << FirstArgType << Arg0->getSourceRange(); 6124 TheCall->setArg(0, FirstArg.get()); 6125 6126 ExprResult SecArg = DefaultLvalueConversion(Arg1); 6127 if (SecArg.isInvalid()) 6128 return true; 6129 QualType SecArgType = SecArg.get()->getType(); 6130 if (!SecArgType->isIntegerType()) 6131 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6132 << "second" << SecArgType << Arg1->getSourceRange(); 6133 6134 // Derive the return type from the pointer argument. 6135 TheCall->setType(FirstArgType); 6136 return false; 6137 } 6138 6139 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 6140 if (checkArgCount(*this, TheCall, 2)) 6141 return true; 6142 6143 Expr *Arg0 = TheCall->getArg(0); 6144 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6145 if (FirstArg.isInvalid()) 6146 return true; 6147 QualType FirstArgType = FirstArg.get()->getType(); 6148 if (!FirstArgType->isAnyPointerType()) 6149 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6150 << "first" << FirstArgType << Arg0->getSourceRange(); 6151 TheCall->setArg(0, FirstArg.get()); 6152 6153 // Derive the return type from the pointer argument. 6154 TheCall->setType(FirstArgType); 6155 6156 // Second arg must be an constant in range [0,15] 6157 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6158 } 6159 6160 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 6161 if (checkArgCount(*this, TheCall, 2)) 6162 return true; 6163 Expr *Arg0 = TheCall->getArg(0); 6164 Expr *Arg1 = TheCall->getArg(1); 6165 6166 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6167 if (FirstArg.isInvalid()) 6168 return true; 6169 QualType FirstArgType = FirstArg.get()->getType(); 6170 if (!FirstArgType->isAnyPointerType()) 6171 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6172 << "first" << FirstArgType << Arg0->getSourceRange(); 6173 6174 QualType SecArgType = Arg1->getType(); 6175 if (!SecArgType->isIntegerType()) 6176 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6177 << "second" << SecArgType << Arg1->getSourceRange(); 6178 TheCall->setType(Context.IntTy); 6179 return false; 6180 } 6181 6182 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 6183 BuiltinID == AArch64::BI__builtin_arm_stg) { 6184 if (checkArgCount(*this, TheCall, 1)) 6185 return true; 6186 Expr *Arg0 = TheCall->getArg(0); 6187 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6188 if (FirstArg.isInvalid()) 6189 return true; 6190 6191 QualType FirstArgType = FirstArg.get()->getType(); 6192 if (!FirstArgType->isAnyPointerType()) 6193 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6194 << "first" << FirstArgType << Arg0->getSourceRange(); 6195 TheCall->setArg(0, FirstArg.get()); 6196 6197 // Derive the return type from the pointer argument. 6198 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 6199 TheCall->setType(FirstArgType); 6200 return false; 6201 } 6202 6203 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 6204 Expr *ArgA = TheCall->getArg(0); 6205 Expr *ArgB = TheCall->getArg(1); 6206 6207 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 6208 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 6209 6210 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 6211 return true; 6212 6213 QualType ArgTypeA = ArgExprA.get()->getType(); 6214 QualType ArgTypeB = ArgExprB.get()->getType(); 6215 6216 auto isNull = [&] (Expr *E) -> bool { 6217 return E->isNullPointerConstant( 6218 Context, Expr::NPC_ValueDependentIsNotNull); }; 6219 6220 // argument should be either a pointer or null 6221 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 6222 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6223 << "first" << ArgTypeA << ArgA->getSourceRange(); 6224 6225 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 6226 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6227 << "second" << ArgTypeB << ArgB->getSourceRange(); 6228 6229 // Ensure Pointee types are compatible 6230 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 6231 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 6232 QualType pointeeA = ArgTypeA->getPointeeType(); 6233 QualType pointeeB = ArgTypeB->getPointeeType(); 6234 if (!Context.typesAreCompatible( 6235 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 6236 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 6237 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 6238 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 6239 << ArgB->getSourceRange(); 6240 } 6241 } 6242 6243 // at least one argument should be pointer type 6244 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 6245 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 6246 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 6247 6248 if (isNull(ArgA)) // adopt type of the other pointer 6249 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 6250 6251 if (isNull(ArgB)) 6252 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 6253 6254 TheCall->setArg(0, ArgExprA.get()); 6255 TheCall->setArg(1, ArgExprB.get()); 6256 TheCall->setType(Context.LongLongTy); 6257 return false; 6258 } 6259 assert(false && "Unhandled ARM MTE intrinsic"); 6260 return true; 6261 } 6262 6263 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 6264 /// TheCall is an ARM/AArch64 special register string literal. 6265 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 6266 int ArgNum, unsigned ExpectedFieldNum, 6267 bool AllowName) { 6268 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 6269 BuiltinID == ARM::BI__builtin_arm_wsr64 || 6270 BuiltinID == ARM::BI__builtin_arm_rsr || 6271 BuiltinID == ARM::BI__builtin_arm_rsrp || 6272 BuiltinID == ARM::BI__builtin_arm_wsr || 6273 BuiltinID == ARM::BI__builtin_arm_wsrp; 6274 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 6275 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 6276 BuiltinID == AArch64::BI__builtin_arm_rsr || 6277 BuiltinID == AArch64::BI__builtin_arm_rsrp || 6278 BuiltinID == AArch64::BI__builtin_arm_wsr || 6279 BuiltinID == AArch64::BI__builtin_arm_wsrp; 6280 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 6281 6282 // We can't check the value of a dependent argument. 6283 Expr *Arg = TheCall->getArg(ArgNum); 6284 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6285 return false; 6286 6287 // Check if the argument is a string literal. 6288 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 6289 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 6290 << Arg->getSourceRange(); 6291 6292 // Check the type of special register given. 6293 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 6294 SmallVector<StringRef, 6> Fields; 6295 Reg.split(Fields, ":"); 6296 6297 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 6298 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6299 << Arg->getSourceRange(); 6300 6301 // If the string is the name of a register then we cannot check that it is 6302 // valid here but if the string is of one the forms described in ACLE then we 6303 // can check that the supplied fields are integers and within the valid 6304 // ranges. 6305 if (Fields.size() > 1) { 6306 bool FiveFields = Fields.size() == 5; 6307 6308 bool ValidString = true; 6309 if (IsARMBuiltin) { 6310 ValidString &= Fields[0].startswith_lower("cp") || 6311 Fields[0].startswith_lower("p"); 6312 if (ValidString) 6313 Fields[0] = 6314 Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1); 6315 6316 ValidString &= Fields[2].startswith_lower("c"); 6317 if (ValidString) 6318 Fields[2] = Fields[2].drop_front(1); 6319 6320 if (FiveFields) { 6321 ValidString &= Fields[3].startswith_lower("c"); 6322 if (ValidString) 6323 Fields[3] = Fields[3].drop_front(1); 6324 } 6325 } 6326 6327 SmallVector<int, 5> Ranges; 6328 if (FiveFields) 6329 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 6330 else 6331 Ranges.append({15, 7, 15}); 6332 6333 for (unsigned i=0; i<Fields.size(); ++i) { 6334 int IntField; 6335 ValidString &= !Fields[i].getAsInteger(10, IntField); 6336 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 6337 } 6338 6339 if (!ValidString) 6340 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6341 << Arg->getSourceRange(); 6342 } else if (IsAArch64Builtin && Fields.size() == 1) { 6343 // If the register name is one of those that appear in the condition below 6344 // and the special register builtin being used is one of the write builtins, 6345 // then we require that the argument provided for writing to the register 6346 // is an integer constant expression. This is because it will be lowered to 6347 // an MSR (immediate) instruction, so we need to know the immediate at 6348 // compile time. 6349 if (TheCall->getNumArgs() != 2) 6350 return false; 6351 6352 std::string RegLower = Reg.lower(); 6353 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 6354 RegLower != "pan" && RegLower != "uao") 6355 return false; 6356 6357 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6358 } 6359 6360 return false; 6361 } 6362 6363 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 6364 /// This checks that the target supports __builtin_longjmp and 6365 /// that val is a constant 1. 6366 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 6367 if (!Context.getTargetInfo().hasSjLjLowering()) 6368 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 6369 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6370 6371 Expr *Arg = TheCall->getArg(1); 6372 llvm::APSInt Result; 6373 6374 // TODO: This is less than ideal. Overload this to take a value. 6375 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6376 return true; 6377 6378 if (Result != 1) 6379 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 6380 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 6381 6382 return false; 6383 } 6384 6385 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 6386 /// This checks that the target supports __builtin_setjmp. 6387 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 6388 if (!Context.getTargetInfo().hasSjLjLowering()) 6389 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 6390 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6391 return false; 6392 } 6393 6394 namespace { 6395 6396 class UncoveredArgHandler { 6397 enum { Unknown = -1, AllCovered = -2 }; 6398 6399 signed FirstUncoveredArg = Unknown; 6400 SmallVector<const Expr *, 4> DiagnosticExprs; 6401 6402 public: 6403 UncoveredArgHandler() = default; 6404 6405 bool hasUncoveredArg() const { 6406 return (FirstUncoveredArg >= 0); 6407 } 6408 6409 unsigned getUncoveredArg() const { 6410 assert(hasUncoveredArg() && "no uncovered argument"); 6411 return FirstUncoveredArg; 6412 } 6413 6414 void setAllCovered() { 6415 // A string has been found with all arguments covered, so clear out 6416 // the diagnostics. 6417 DiagnosticExprs.clear(); 6418 FirstUncoveredArg = AllCovered; 6419 } 6420 6421 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 6422 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 6423 6424 // Don't update if a previous string covers all arguments. 6425 if (FirstUncoveredArg == AllCovered) 6426 return; 6427 6428 // UncoveredArgHandler tracks the highest uncovered argument index 6429 // and with it all the strings that match this index. 6430 if (NewFirstUncoveredArg == FirstUncoveredArg) 6431 DiagnosticExprs.push_back(StrExpr); 6432 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 6433 DiagnosticExprs.clear(); 6434 DiagnosticExprs.push_back(StrExpr); 6435 FirstUncoveredArg = NewFirstUncoveredArg; 6436 } 6437 } 6438 6439 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 6440 }; 6441 6442 enum StringLiteralCheckType { 6443 SLCT_NotALiteral, 6444 SLCT_UncheckedLiteral, 6445 SLCT_CheckedLiteral 6446 }; 6447 6448 } // namespace 6449 6450 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 6451 BinaryOperatorKind BinOpKind, 6452 bool AddendIsRight) { 6453 unsigned BitWidth = Offset.getBitWidth(); 6454 unsigned AddendBitWidth = Addend.getBitWidth(); 6455 // There might be negative interim results. 6456 if (Addend.isUnsigned()) { 6457 Addend = Addend.zext(++AddendBitWidth); 6458 Addend.setIsSigned(true); 6459 } 6460 // Adjust the bit width of the APSInts. 6461 if (AddendBitWidth > BitWidth) { 6462 Offset = Offset.sext(AddendBitWidth); 6463 BitWidth = AddendBitWidth; 6464 } else if (BitWidth > AddendBitWidth) { 6465 Addend = Addend.sext(BitWidth); 6466 } 6467 6468 bool Ov = false; 6469 llvm::APSInt ResOffset = Offset; 6470 if (BinOpKind == BO_Add) 6471 ResOffset = Offset.sadd_ov(Addend, Ov); 6472 else { 6473 assert(AddendIsRight && BinOpKind == BO_Sub && 6474 "operator must be add or sub with addend on the right"); 6475 ResOffset = Offset.ssub_ov(Addend, Ov); 6476 } 6477 6478 // We add an offset to a pointer here so we should support an offset as big as 6479 // possible. 6480 if (Ov) { 6481 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 6482 "index (intermediate) result too big"); 6483 Offset = Offset.sext(2 * BitWidth); 6484 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 6485 return; 6486 } 6487 6488 Offset = ResOffset; 6489 } 6490 6491 namespace { 6492 6493 // This is a wrapper class around StringLiteral to support offsetted string 6494 // literals as format strings. It takes the offset into account when returning 6495 // the string and its length or the source locations to display notes correctly. 6496 class FormatStringLiteral { 6497 const StringLiteral *FExpr; 6498 int64_t Offset; 6499 6500 public: 6501 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 6502 : FExpr(fexpr), Offset(Offset) {} 6503 6504 StringRef getString() const { 6505 return FExpr->getString().drop_front(Offset); 6506 } 6507 6508 unsigned getByteLength() const { 6509 return FExpr->getByteLength() - getCharByteWidth() * Offset; 6510 } 6511 6512 unsigned getLength() const { return FExpr->getLength() - Offset; } 6513 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 6514 6515 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 6516 6517 QualType getType() const { return FExpr->getType(); } 6518 6519 bool isAscii() const { return FExpr->isAscii(); } 6520 bool isWide() const { return FExpr->isWide(); } 6521 bool isUTF8() const { return FExpr->isUTF8(); } 6522 bool isUTF16() const { return FExpr->isUTF16(); } 6523 bool isUTF32() const { return FExpr->isUTF32(); } 6524 bool isPascal() const { return FExpr->isPascal(); } 6525 6526 SourceLocation getLocationOfByte( 6527 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 6528 const TargetInfo &Target, unsigned *StartToken = nullptr, 6529 unsigned *StartTokenByteOffset = nullptr) const { 6530 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 6531 StartToken, StartTokenByteOffset); 6532 } 6533 6534 SourceLocation getBeginLoc() const LLVM_READONLY { 6535 return FExpr->getBeginLoc().getLocWithOffset(Offset); 6536 } 6537 6538 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 6539 }; 6540 6541 } // namespace 6542 6543 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 6544 const Expr *OrigFormatExpr, 6545 ArrayRef<const Expr *> Args, 6546 bool HasVAListArg, unsigned format_idx, 6547 unsigned firstDataArg, 6548 Sema::FormatStringType Type, 6549 bool inFunctionCall, 6550 Sema::VariadicCallType CallType, 6551 llvm::SmallBitVector &CheckedVarArgs, 6552 UncoveredArgHandler &UncoveredArg); 6553 6554 // Determine if an expression is a string literal or constant string. 6555 // If this function returns false on the arguments to a function expecting a 6556 // format string, we will usually need to emit a warning. 6557 // True string literals are then checked by CheckFormatString. 6558 static StringLiteralCheckType 6559 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 6560 bool HasVAListArg, unsigned format_idx, 6561 unsigned firstDataArg, Sema::FormatStringType Type, 6562 Sema::VariadicCallType CallType, bool InFunctionCall, 6563 llvm::SmallBitVector &CheckedVarArgs, 6564 UncoveredArgHandler &UncoveredArg, 6565 llvm::APSInt Offset) { 6566 tryAgain: 6567 assert(Offset.isSigned() && "invalid offset"); 6568 6569 if (E->isTypeDependent() || E->isValueDependent()) 6570 return SLCT_NotALiteral; 6571 6572 E = E->IgnoreParenCasts(); 6573 6574 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 6575 // Technically -Wformat-nonliteral does not warn about this case. 6576 // The behavior of printf and friends in this case is implementation 6577 // dependent. Ideally if the format string cannot be null then 6578 // it should have a 'nonnull' attribute in the function prototype. 6579 return SLCT_UncheckedLiteral; 6580 6581 switch (E->getStmtClass()) { 6582 case Stmt::BinaryConditionalOperatorClass: 6583 case Stmt::ConditionalOperatorClass: { 6584 // The expression is a literal if both sub-expressions were, and it was 6585 // completely checked only if both sub-expressions were checked. 6586 const AbstractConditionalOperator *C = 6587 cast<AbstractConditionalOperator>(E); 6588 6589 // Determine whether it is necessary to check both sub-expressions, for 6590 // example, because the condition expression is a constant that can be 6591 // evaluated at compile time. 6592 bool CheckLeft = true, CheckRight = true; 6593 6594 bool Cond; 6595 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext())) { 6596 if (Cond) 6597 CheckRight = false; 6598 else 6599 CheckLeft = false; 6600 } 6601 6602 // We need to maintain the offsets for the right and the left hand side 6603 // separately to check if every possible indexed expression is a valid 6604 // string literal. They might have different offsets for different string 6605 // literals in the end. 6606 StringLiteralCheckType Left; 6607 if (!CheckLeft) 6608 Left = SLCT_UncheckedLiteral; 6609 else { 6610 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 6611 HasVAListArg, format_idx, firstDataArg, 6612 Type, CallType, InFunctionCall, 6613 CheckedVarArgs, UncoveredArg, Offset); 6614 if (Left == SLCT_NotALiteral || !CheckRight) { 6615 return Left; 6616 } 6617 } 6618 6619 StringLiteralCheckType Right = 6620 checkFormatStringExpr(S, C->getFalseExpr(), Args, 6621 HasVAListArg, format_idx, firstDataArg, 6622 Type, CallType, InFunctionCall, CheckedVarArgs, 6623 UncoveredArg, Offset); 6624 6625 return (CheckLeft && Left < Right) ? Left : Right; 6626 } 6627 6628 case Stmt::ImplicitCastExprClass: 6629 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 6630 goto tryAgain; 6631 6632 case Stmt::OpaqueValueExprClass: 6633 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 6634 E = src; 6635 goto tryAgain; 6636 } 6637 return SLCT_NotALiteral; 6638 6639 case Stmt::PredefinedExprClass: 6640 // While __func__, etc., are technically not string literals, they 6641 // cannot contain format specifiers and thus are not a security 6642 // liability. 6643 return SLCT_UncheckedLiteral; 6644 6645 case Stmt::DeclRefExprClass: { 6646 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 6647 6648 // As an exception, do not flag errors for variables binding to 6649 // const string literals. 6650 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 6651 bool isConstant = false; 6652 QualType T = DR->getType(); 6653 6654 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 6655 isConstant = AT->getElementType().isConstant(S.Context); 6656 } else if (const PointerType *PT = T->getAs<PointerType>()) { 6657 isConstant = T.isConstant(S.Context) && 6658 PT->getPointeeType().isConstant(S.Context); 6659 } else if (T->isObjCObjectPointerType()) { 6660 // In ObjC, there is usually no "const ObjectPointer" type, 6661 // so don't check if the pointee type is constant. 6662 isConstant = T.isConstant(S.Context); 6663 } 6664 6665 if (isConstant) { 6666 if (const Expr *Init = VD->getAnyInitializer()) { 6667 // Look through initializers like const char c[] = { "foo" } 6668 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 6669 if (InitList->isStringLiteralInit()) 6670 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 6671 } 6672 return checkFormatStringExpr(S, Init, Args, 6673 HasVAListArg, format_idx, 6674 firstDataArg, Type, CallType, 6675 /*InFunctionCall*/ false, CheckedVarArgs, 6676 UncoveredArg, Offset); 6677 } 6678 } 6679 6680 // For vprintf* functions (i.e., HasVAListArg==true), we add a 6681 // special check to see if the format string is a function parameter 6682 // of the function calling the printf function. If the function 6683 // has an attribute indicating it is a printf-like function, then we 6684 // should suppress warnings concerning non-literals being used in a call 6685 // to a vprintf function. For example: 6686 // 6687 // void 6688 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 6689 // va_list ap; 6690 // va_start(ap, fmt); 6691 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 6692 // ... 6693 // } 6694 if (HasVAListArg) { 6695 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 6696 if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) { 6697 int PVIndex = PV->getFunctionScopeIndex() + 1; 6698 for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) { 6699 // adjust for implicit parameter 6700 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) 6701 if (MD->isInstance()) 6702 ++PVIndex; 6703 // We also check if the formats are compatible. 6704 // We can't pass a 'scanf' string to a 'printf' function. 6705 if (PVIndex == PVFormat->getFormatIdx() && 6706 Type == S.GetFormatStringType(PVFormat)) 6707 return SLCT_UncheckedLiteral; 6708 } 6709 } 6710 } 6711 } 6712 } 6713 6714 return SLCT_NotALiteral; 6715 } 6716 6717 case Stmt::CallExprClass: 6718 case Stmt::CXXMemberCallExprClass: { 6719 const CallExpr *CE = cast<CallExpr>(E); 6720 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 6721 bool IsFirst = true; 6722 StringLiteralCheckType CommonResult; 6723 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 6724 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 6725 StringLiteralCheckType Result = checkFormatStringExpr( 6726 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 6727 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset); 6728 if (IsFirst) { 6729 CommonResult = Result; 6730 IsFirst = false; 6731 } 6732 } 6733 if (!IsFirst) 6734 return CommonResult; 6735 6736 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 6737 unsigned BuiltinID = FD->getBuiltinID(); 6738 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 6739 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 6740 const Expr *Arg = CE->getArg(0); 6741 return checkFormatStringExpr(S, Arg, Args, 6742 HasVAListArg, format_idx, 6743 firstDataArg, Type, CallType, 6744 InFunctionCall, CheckedVarArgs, 6745 UncoveredArg, Offset); 6746 } 6747 } 6748 } 6749 6750 return SLCT_NotALiteral; 6751 } 6752 case Stmt::ObjCMessageExprClass: { 6753 const auto *ME = cast<ObjCMessageExpr>(E); 6754 if (const auto *ND = ME->getMethodDecl()) { 6755 if (const auto *FA = ND->getAttr<FormatArgAttr>()) { 6756 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 6757 return checkFormatStringExpr( 6758 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 6759 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset); 6760 } 6761 } 6762 6763 return SLCT_NotALiteral; 6764 } 6765 case Stmt::ObjCStringLiteralClass: 6766 case Stmt::StringLiteralClass: { 6767 const StringLiteral *StrE = nullptr; 6768 6769 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 6770 StrE = ObjCFExpr->getString(); 6771 else 6772 StrE = cast<StringLiteral>(E); 6773 6774 if (StrE) { 6775 if (Offset.isNegative() || Offset > StrE->getLength()) { 6776 // TODO: It would be better to have an explicit warning for out of 6777 // bounds literals. 6778 return SLCT_NotALiteral; 6779 } 6780 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 6781 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 6782 firstDataArg, Type, InFunctionCall, CallType, 6783 CheckedVarArgs, UncoveredArg); 6784 return SLCT_CheckedLiteral; 6785 } 6786 6787 return SLCT_NotALiteral; 6788 } 6789 case Stmt::BinaryOperatorClass: { 6790 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 6791 6792 // A string literal + an int offset is still a string literal. 6793 if (BinOp->isAdditiveOp()) { 6794 Expr::EvalResult LResult, RResult; 6795 6796 bool LIsInt = BinOp->getLHS()->EvaluateAsInt(LResult, S.Context); 6797 bool RIsInt = BinOp->getRHS()->EvaluateAsInt(RResult, S.Context); 6798 6799 if (LIsInt != RIsInt) { 6800 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 6801 6802 if (LIsInt) { 6803 if (BinOpKind == BO_Add) { 6804 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 6805 E = BinOp->getRHS(); 6806 goto tryAgain; 6807 } 6808 } else { 6809 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 6810 E = BinOp->getLHS(); 6811 goto tryAgain; 6812 } 6813 } 6814 } 6815 6816 return SLCT_NotALiteral; 6817 } 6818 case Stmt::UnaryOperatorClass: { 6819 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 6820 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 6821 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 6822 Expr::EvalResult IndexResult; 6823 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context)) { 6824 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 6825 /*RHS is int*/ true); 6826 E = ASE->getBase(); 6827 goto tryAgain; 6828 } 6829 } 6830 6831 return SLCT_NotALiteral; 6832 } 6833 6834 default: 6835 return SLCT_NotALiteral; 6836 } 6837 } 6838 6839 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 6840 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 6841 .Case("scanf", FST_Scanf) 6842 .Cases("printf", "printf0", FST_Printf) 6843 .Cases("NSString", "CFString", FST_NSString) 6844 .Case("strftime", FST_Strftime) 6845 .Case("strfmon", FST_Strfmon) 6846 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 6847 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 6848 .Case("os_trace", FST_OSLog) 6849 .Case("os_log", FST_OSLog) 6850 .Default(FST_Unknown); 6851 } 6852 6853 /// CheckFormatArguments - Check calls to printf and scanf (and similar 6854 /// functions) for correct use of format strings. 6855 /// Returns true if a format string has been fully checked. 6856 bool Sema::CheckFormatArguments(const FormatAttr *Format, 6857 ArrayRef<const Expr *> Args, 6858 bool IsCXXMember, 6859 VariadicCallType CallType, 6860 SourceLocation Loc, SourceRange Range, 6861 llvm::SmallBitVector &CheckedVarArgs) { 6862 FormatStringInfo FSI; 6863 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 6864 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 6865 FSI.FirstDataArg, GetFormatStringType(Format), 6866 CallType, Loc, Range, CheckedVarArgs); 6867 return false; 6868 } 6869 6870 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 6871 bool HasVAListArg, unsigned format_idx, 6872 unsigned firstDataArg, FormatStringType Type, 6873 VariadicCallType CallType, 6874 SourceLocation Loc, SourceRange Range, 6875 llvm::SmallBitVector &CheckedVarArgs) { 6876 // CHECK: printf/scanf-like function is called with no format string. 6877 if (format_idx >= Args.size()) { 6878 Diag(Loc, diag::warn_missing_format_string) << Range; 6879 return false; 6880 } 6881 6882 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 6883 6884 // CHECK: format string is not a string literal. 6885 // 6886 // Dynamically generated format strings are difficult to 6887 // automatically vet at compile time. Requiring that format strings 6888 // are string literals: (1) permits the checking of format strings by 6889 // the compiler and thereby (2) can practically remove the source of 6890 // many format string exploits. 6891 6892 // Format string can be either ObjC string (e.g. @"%d") or 6893 // C string (e.g. "%d") 6894 // ObjC string uses the same format specifiers as C string, so we can use 6895 // the same format string checking logic for both ObjC and C strings. 6896 UncoveredArgHandler UncoveredArg; 6897 StringLiteralCheckType CT = 6898 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 6899 format_idx, firstDataArg, Type, CallType, 6900 /*IsFunctionCall*/ true, CheckedVarArgs, 6901 UncoveredArg, 6902 /*no string offset*/ llvm::APSInt(64, false) = 0); 6903 6904 // Generate a diagnostic where an uncovered argument is detected. 6905 if (UncoveredArg.hasUncoveredArg()) { 6906 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 6907 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 6908 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 6909 } 6910 6911 if (CT != SLCT_NotALiteral) 6912 // Literal format string found, check done! 6913 return CT == SLCT_CheckedLiteral; 6914 6915 // Strftime is particular as it always uses a single 'time' argument, 6916 // so it is safe to pass a non-literal string. 6917 if (Type == FST_Strftime) 6918 return false; 6919 6920 // Do not emit diag when the string param is a macro expansion and the 6921 // format is either NSString or CFString. This is a hack to prevent 6922 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 6923 // which are usually used in place of NS and CF string literals. 6924 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 6925 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 6926 return false; 6927 6928 // If there are no arguments specified, warn with -Wformat-security, otherwise 6929 // warn only with -Wformat-nonliteral. 6930 if (Args.size() == firstDataArg) { 6931 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 6932 << OrigFormatExpr->getSourceRange(); 6933 switch (Type) { 6934 default: 6935 break; 6936 case FST_Kprintf: 6937 case FST_FreeBSDKPrintf: 6938 case FST_Printf: 6939 Diag(FormatLoc, diag::note_format_security_fixit) 6940 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 6941 break; 6942 case FST_NSString: 6943 Diag(FormatLoc, diag::note_format_security_fixit) 6944 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 6945 break; 6946 } 6947 } else { 6948 Diag(FormatLoc, diag::warn_format_nonliteral) 6949 << OrigFormatExpr->getSourceRange(); 6950 } 6951 return false; 6952 } 6953 6954 namespace { 6955 6956 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 6957 protected: 6958 Sema &S; 6959 const FormatStringLiteral *FExpr; 6960 const Expr *OrigFormatExpr; 6961 const Sema::FormatStringType FSType; 6962 const unsigned FirstDataArg; 6963 const unsigned NumDataArgs; 6964 const char *Beg; // Start of format string. 6965 const bool HasVAListArg; 6966 ArrayRef<const Expr *> Args; 6967 unsigned FormatIdx; 6968 llvm::SmallBitVector CoveredArgs; 6969 bool usesPositionalArgs = false; 6970 bool atFirstArg = true; 6971 bool inFunctionCall; 6972 Sema::VariadicCallType CallType; 6973 llvm::SmallBitVector &CheckedVarArgs; 6974 UncoveredArgHandler &UncoveredArg; 6975 6976 public: 6977 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 6978 const Expr *origFormatExpr, 6979 const Sema::FormatStringType type, unsigned firstDataArg, 6980 unsigned numDataArgs, const char *beg, bool hasVAListArg, 6981 ArrayRef<const Expr *> Args, unsigned formatIdx, 6982 bool inFunctionCall, Sema::VariadicCallType callType, 6983 llvm::SmallBitVector &CheckedVarArgs, 6984 UncoveredArgHandler &UncoveredArg) 6985 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 6986 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 6987 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 6988 inFunctionCall(inFunctionCall), CallType(callType), 6989 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 6990 CoveredArgs.resize(numDataArgs); 6991 CoveredArgs.reset(); 6992 } 6993 6994 void DoneProcessing(); 6995 6996 void HandleIncompleteSpecifier(const char *startSpecifier, 6997 unsigned specifierLen) override; 6998 6999 void HandleInvalidLengthModifier( 7000 const analyze_format_string::FormatSpecifier &FS, 7001 const analyze_format_string::ConversionSpecifier &CS, 7002 const char *startSpecifier, unsigned specifierLen, 7003 unsigned DiagID); 7004 7005 void HandleNonStandardLengthModifier( 7006 const analyze_format_string::FormatSpecifier &FS, 7007 const char *startSpecifier, unsigned specifierLen); 7008 7009 void HandleNonStandardConversionSpecifier( 7010 const analyze_format_string::ConversionSpecifier &CS, 7011 const char *startSpecifier, unsigned specifierLen); 7012 7013 void HandlePosition(const char *startPos, unsigned posLen) override; 7014 7015 void HandleInvalidPosition(const char *startSpecifier, 7016 unsigned specifierLen, 7017 analyze_format_string::PositionContext p) override; 7018 7019 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 7020 7021 void HandleNullChar(const char *nullCharacter) override; 7022 7023 template <typename Range> 7024 static void 7025 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 7026 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 7027 bool IsStringLocation, Range StringRange, 7028 ArrayRef<FixItHint> Fixit = None); 7029 7030 protected: 7031 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 7032 const char *startSpec, 7033 unsigned specifierLen, 7034 const char *csStart, unsigned csLen); 7035 7036 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 7037 const char *startSpec, 7038 unsigned specifierLen); 7039 7040 SourceRange getFormatStringRange(); 7041 CharSourceRange getSpecifierRange(const char *startSpecifier, 7042 unsigned specifierLen); 7043 SourceLocation getLocationOfByte(const char *x); 7044 7045 const Expr *getDataArg(unsigned i) const; 7046 7047 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 7048 const analyze_format_string::ConversionSpecifier &CS, 7049 const char *startSpecifier, unsigned specifierLen, 7050 unsigned argIndex); 7051 7052 template <typename Range> 7053 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 7054 bool IsStringLocation, Range StringRange, 7055 ArrayRef<FixItHint> Fixit = None); 7056 }; 7057 7058 } // namespace 7059 7060 SourceRange CheckFormatHandler::getFormatStringRange() { 7061 return OrigFormatExpr->getSourceRange(); 7062 } 7063 7064 CharSourceRange CheckFormatHandler:: 7065 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 7066 SourceLocation Start = getLocationOfByte(startSpecifier); 7067 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 7068 7069 // Advance the end SourceLocation by one due to half-open ranges. 7070 End = End.getLocWithOffset(1); 7071 7072 return CharSourceRange::getCharRange(Start, End); 7073 } 7074 7075 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 7076 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 7077 S.getLangOpts(), S.Context.getTargetInfo()); 7078 } 7079 7080 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 7081 unsigned specifierLen){ 7082 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 7083 getLocationOfByte(startSpecifier), 7084 /*IsStringLocation*/true, 7085 getSpecifierRange(startSpecifier, specifierLen)); 7086 } 7087 7088 void CheckFormatHandler::HandleInvalidLengthModifier( 7089 const analyze_format_string::FormatSpecifier &FS, 7090 const analyze_format_string::ConversionSpecifier &CS, 7091 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 7092 using namespace analyze_format_string; 7093 7094 const LengthModifier &LM = FS.getLengthModifier(); 7095 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7096 7097 // See if we know how to fix this length modifier. 7098 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7099 if (FixedLM) { 7100 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7101 getLocationOfByte(LM.getStart()), 7102 /*IsStringLocation*/true, 7103 getSpecifierRange(startSpecifier, specifierLen)); 7104 7105 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7106 << FixedLM->toString() 7107 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7108 7109 } else { 7110 FixItHint Hint; 7111 if (DiagID == diag::warn_format_nonsensical_length) 7112 Hint = FixItHint::CreateRemoval(LMRange); 7113 7114 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7115 getLocationOfByte(LM.getStart()), 7116 /*IsStringLocation*/true, 7117 getSpecifierRange(startSpecifier, specifierLen), 7118 Hint); 7119 } 7120 } 7121 7122 void CheckFormatHandler::HandleNonStandardLengthModifier( 7123 const analyze_format_string::FormatSpecifier &FS, 7124 const char *startSpecifier, unsigned specifierLen) { 7125 using namespace analyze_format_string; 7126 7127 const LengthModifier &LM = FS.getLengthModifier(); 7128 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7129 7130 // See if we know how to fix this length modifier. 7131 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7132 if (FixedLM) { 7133 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7134 << LM.toString() << 0, 7135 getLocationOfByte(LM.getStart()), 7136 /*IsStringLocation*/true, 7137 getSpecifierRange(startSpecifier, specifierLen)); 7138 7139 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7140 << FixedLM->toString() 7141 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7142 7143 } else { 7144 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7145 << LM.toString() << 0, 7146 getLocationOfByte(LM.getStart()), 7147 /*IsStringLocation*/true, 7148 getSpecifierRange(startSpecifier, specifierLen)); 7149 } 7150 } 7151 7152 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 7153 const analyze_format_string::ConversionSpecifier &CS, 7154 const char *startSpecifier, unsigned specifierLen) { 7155 using namespace analyze_format_string; 7156 7157 // See if we know how to fix this conversion specifier. 7158 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 7159 if (FixedCS) { 7160 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7161 << CS.toString() << /*conversion specifier*/1, 7162 getLocationOfByte(CS.getStart()), 7163 /*IsStringLocation*/true, 7164 getSpecifierRange(startSpecifier, specifierLen)); 7165 7166 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 7167 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 7168 << FixedCS->toString() 7169 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 7170 } else { 7171 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7172 << CS.toString() << /*conversion specifier*/1, 7173 getLocationOfByte(CS.getStart()), 7174 /*IsStringLocation*/true, 7175 getSpecifierRange(startSpecifier, specifierLen)); 7176 } 7177 } 7178 7179 void CheckFormatHandler::HandlePosition(const char *startPos, 7180 unsigned posLen) { 7181 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 7182 getLocationOfByte(startPos), 7183 /*IsStringLocation*/true, 7184 getSpecifierRange(startPos, posLen)); 7185 } 7186 7187 void 7188 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 7189 analyze_format_string::PositionContext p) { 7190 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 7191 << (unsigned) p, 7192 getLocationOfByte(startPos), /*IsStringLocation*/true, 7193 getSpecifierRange(startPos, posLen)); 7194 } 7195 7196 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 7197 unsigned posLen) { 7198 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 7199 getLocationOfByte(startPos), 7200 /*IsStringLocation*/true, 7201 getSpecifierRange(startPos, posLen)); 7202 } 7203 7204 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 7205 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 7206 // The presence of a null character is likely an error. 7207 EmitFormatDiagnostic( 7208 S.PDiag(diag::warn_printf_format_string_contains_null_char), 7209 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 7210 getFormatStringRange()); 7211 } 7212 } 7213 7214 // Note that this may return NULL if there was an error parsing or building 7215 // one of the argument expressions. 7216 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 7217 return Args[FirstDataArg + i]; 7218 } 7219 7220 void CheckFormatHandler::DoneProcessing() { 7221 // Does the number of data arguments exceed the number of 7222 // format conversions in the format string? 7223 if (!HasVAListArg) { 7224 // Find any arguments that weren't covered. 7225 CoveredArgs.flip(); 7226 signed notCoveredArg = CoveredArgs.find_first(); 7227 if (notCoveredArg >= 0) { 7228 assert((unsigned)notCoveredArg < NumDataArgs); 7229 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 7230 } else { 7231 UncoveredArg.setAllCovered(); 7232 } 7233 } 7234 } 7235 7236 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 7237 const Expr *ArgExpr) { 7238 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 7239 "Invalid state"); 7240 7241 if (!ArgExpr) 7242 return; 7243 7244 SourceLocation Loc = ArgExpr->getBeginLoc(); 7245 7246 if (S.getSourceManager().isInSystemMacro(Loc)) 7247 return; 7248 7249 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 7250 for (auto E : DiagnosticExprs) 7251 PDiag << E->getSourceRange(); 7252 7253 CheckFormatHandler::EmitFormatDiagnostic( 7254 S, IsFunctionCall, DiagnosticExprs[0], 7255 PDiag, Loc, /*IsStringLocation*/false, 7256 DiagnosticExprs[0]->getSourceRange()); 7257 } 7258 7259 bool 7260 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 7261 SourceLocation Loc, 7262 const char *startSpec, 7263 unsigned specifierLen, 7264 const char *csStart, 7265 unsigned csLen) { 7266 bool keepGoing = true; 7267 if (argIndex < NumDataArgs) { 7268 // Consider the argument coverered, even though the specifier doesn't 7269 // make sense. 7270 CoveredArgs.set(argIndex); 7271 } 7272 else { 7273 // If argIndex exceeds the number of data arguments we 7274 // don't issue a warning because that is just a cascade of warnings (and 7275 // they may have intended '%%' anyway). We don't want to continue processing 7276 // the format string after this point, however, as we will like just get 7277 // gibberish when trying to match arguments. 7278 keepGoing = false; 7279 } 7280 7281 StringRef Specifier(csStart, csLen); 7282 7283 // If the specifier in non-printable, it could be the first byte of a UTF-8 7284 // sequence. In that case, print the UTF-8 code point. If not, print the byte 7285 // hex value. 7286 std::string CodePointStr; 7287 if (!llvm::sys::locale::isPrint(*csStart)) { 7288 llvm::UTF32 CodePoint; 7289 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 7290 const llvm::UTF8 *E = 7291 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 7292 llvm::ConversionResult Result = 7293 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 7294 7295 if (Result != llvm::conversionOK) { 7296 unsigned char FirstChar = *csStart; 7297 CodePoint = (llvm::UTF32)FirstChar; 7298 } 7299 7300 llvm::raw_string_ostream OS(CodePointStr); 7301 if (CodePoint < 256) 7302 OS << "\\x" << llvm::format("%02x", CodePoint); 7303 else if (CodePoint <= 0xFFFF) 7304 OS << "\\u" << llvm::format("%04x", CodePoint); 7305 else 7306 OS << "\\U" << llvm::format("%08x", CodePoint); 7307 OS.flush(); 7308 Specifier = CodePointStr; 7309 } 7310 7311 EmitFormatDiagnostic( 7312 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 7313 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 7314 7315 return keepGoing; 7316 } 7317 7318 void 7319 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 7320 const char *startSpec, 7321 unsigned specifierLen) { 7322 EmitFormatDiagnostic( 7323 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 7324 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 7325 } 7326 7327 bool 7328 CheckFormatHandler::CheckNumArgs( 7329 const analyze_format_string::FormatSpecifier &FS, 7330 const analyze_format_string::ConversionSpecifier &CS, 7331 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 7332 7333 if (argIndex >= NumDataArgs) { 7334 PartialDiagnostic PDiag = FS.usesPositionalArg() 7335 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 7336 << (argIndex+1) << NumDataArgs) 7337 : S.PDiag(diag::warn_printf_insufficient_data_args); 7338 EmitFormatDiagnostic( 7339 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 7340 getSpecifierRange(startSpecifier, specifierLen)); 7341 7342 // Since more arguments than conversion tokens are given, by extension 7343 // all arguments are covered, so mark this as so. 7344 UncoveredArg.setAllCovered(); 7345 return false; 7346 } 7347 return true; 7348 } 7349 7350 template<typename Range> 7351 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 7352 SourceLocation Loc, 7353 bool IsStringLocation, 7354 Range StringRange, 7355 ArrayRef<FixItHint> FixIt) { 7356 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 7357 Loc, IsStringLocation, StringRange, FixIt); 7358 } 7359 7360 /// If the format string is not within the function call, emit a note 7361 /// so that the function call and string are in diagnostic messages. 7362 /// 7363 /// \param InFunctionCall if true, the format string is within the function 7364 /// call and only one diagnostic message will be produced. Otherwise, an 7365 /// extra note will be emitted pointing to location of the format string. 7366 /// 7367 /// \param ArgumentExpr the expression that is passed as the format string 7368 /// argument in the function call. Used for getting locations when two 7369 /// diagnostics are emitted. 7370 /// 7371 /// \param PDiag the callee should already have provided any strings for the 7372 /// diagnostic message. This function only adds locations and fixits 7373 /// to diagnostics. 7374 /// 7375 /// \param Loc primary location for diagnostic. If two diagnostics are 7376 /// required, one will be at Loc and a new SourceLocation will be created for 7377 /// the other one. 7378 /// 7379 /// \param IsStringLocation if true, Loc points to the format string should be 7380 /// used for the note. Otherwise, Loc points to the argument list and will 7381 /// be used with PDiag. 7382 /// 7383 /// \param StringRange some or all of the string to highlight. This is 7384 /// templated so it can accept either a CharSourceRange or a SourceRange. 7385 /// 7386 /// \param FixIt optional fix it hint for the format string. 7387 template <typename Range> 7388 void CheckFormatHandler::EmitFormatDiagnostic( 7389 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 7390 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 7391 Range StringRange, ArrayRef<FixItHint> FixIt) { 7392 if (InFunctionCall) { 7393 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 7394 D << StringRange; 7395 D << FixIt; 7396 } else { 7397 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 7398 << ArgumentExpr->getSourceRange(); 7399 7400 const Sema::SemaDiagnosticBuilder &Note = 7401 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 7402 diag::note_format_string_defined); 7403 7404 Note << StringRange; 7405 Note << FixIt; 7406 } 7407 } 7408 7409 //===--- CHECK: Printf format string checking ------------------------------===// 7410 7411 namespace { 7412 7413 class CheckPrintfHandler : public CheckFormatHandler { 7414 public: 7415 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 7416 const Expr *origFormatExpr, 7417 const Sema::FormatStringType type, unsigned firstDataArg, 7418 unsigned numDataArgs, bool isObjC, const char *beg, 7419 bool hasVAListArg, ArrayRef<const Expr *> Args, 7420 unsigned formatIdx, bool inFunctionCall, 7421 Sema::VariadicCallType CallType, 7422 llvm::SmallBitVector &CheckedVarArgs, 7423 UncoveredArgHandler &UncoveredArg) 7424 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 7425 numDataArgs, beg, hasVAListArg, Args, formatIdx, 7426 inFunctionCall, CallType, CheckedVarArgs, 7427 UncoveredArg) {} 7428 7429 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 7430 7431 /// Returns true if '%@' specifiers are allowed in the format string. 7432 bool allowsObjCArg() const { 7433 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 7434 FSType == Sema::FST_OSTrace; 7435 } 7436 7437 bool HandleInvalidPrintfConversionSpecifier( 7438 const analyze_printf::PrintfSpecifier &FS, 7439 const char *startSpecifier, 7440 unsigned specifierLen) override; 7441 7442 void handleInvalidMaskType(StringRef MaskType) override; 7443 7444 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 7445 const char *startSpecifier, 7446 unsigned specifierLen) override; 7447 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 7448 const char *StartSpecifier, 7449 unsigned SpecifierLen, 7450 const Expr *E); 7451 7452 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 7453 const char *startSpecifier, unsigned specifierLen); 7454 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 7455 const analyze_printf::OptionalAmount &Amt, 7456 unsigned type, 7457 const char *startSpecifier, unsigned specifierLen); 7458 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7459 const analyze_printf::OptionalFlag &flag, 7460 const char *startSpecifier, unsigned specifierLen); 7461 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 7462 const analyze_printf::OptionalFlag &ignoredFlag, 7463 const analyze_printf::OptionalFlag &flag, 7464 const char *startSpecifier, unsigned specifierLen); 7465 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 7466 const Expr *E); 7467 7468 void HandleEmptyObjCModifierFlag(const char *startFlag, 7469 unsigned flagLen) override; 7470 7471 void HandleInvalidObjCModifierFlag(const char *startFlag, 7472 unsigned flagLen) override; 7473 7474 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 7475 const char *flagsEnd, 7476 const char *conversionPosition) 7477 override; 7478 }; 7479 7480 } // namespace 7481 7482 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 7483 const analyze_printf::PrintfSpecifier &FS, 7484 const char *startSpecifier, 7485 unsigned specifierLen) { 7486 const analyze_printf::PrintfConversionSpecifier &CS = 7487 FS.getConversionSpecifier(); 7488 7489 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 7490 getLocationOfByte(CS.getStart()), 7491 startSpecifier, specifierLen, 7492 CS.getStart(), CS.getLength()); 7493 } 7494 7495 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 7496 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 7497 } 7498 7499 bool CheckPrintfHandler::HandleAmount( 7500 const analyze_format_string::OptionalAmount &Amt, 7501 unsigned k, const char *startSpecifier, 7502 unsigned specifierLen) { 7503 if (Amt.hasDataArgument()) { 7504 if (!HasVAListArg) { 7505 unsigned argIndex = Amt.getArgIndex(); 7506 if (argIndex >= NumDataArgs) { 7507 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 7508 << k, 7509 getLocationOfByte(Amt.getStart()), 7510 /*IsStringLocation*/true, 7511 getSpecifierRange(startSpecifier, specifierLen)); 7512 // Don't do any more checking. We will just emit 7513 // spurious errors. 7514 return false; 7515 } 7516 7517 // Type check the data argument. It should be an 'int'. 7518 // Although not in conformance with C99, we also allow the argument to be 7519 // an 'unsigned int' as that is a reasonably safe case. GCC also 7520 // doesn't emit a warning for that case. 7521 CoveredArgs.set(argIndex); 7522 const Expr *Arg = getDataArg(argIndex); 7523 if (!Arg) 7524 return false; 7525 7526 QualType T = Arg->getType(); 7527 7528 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 7529 assert(AT.isValid()); 7530 7531 if (!AT.matchesType(S.Context, T)) { 7532 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 7533 << k << AT.getRepresentativeTypeName(S.Context) 7534 << T << Arg->getSourceRange(), 7535 getLocationOfByte(Amt.getStart()), 7536 /*IsStringLocation*/true, 7537 getSpecifierRange(startSpecifier, specifierLen)); 7538 // Don't do any more checking. We will just emit 7539 // spurious errors. 7540 return false; 7541 } 7542 } 7543 } 7544 return true; 7545 } 7546 7547 void CheckPrintfHandler::HandleInvalidAmount( 7548 const analyze_printf::PrintfSpecifier &FS, 7549 const analyze_printf::OptionalAmount &Amt, 7550 unsigned type, 7551 const char *startSpecifier, 7552 unsigned specifierLen) { 7553 const analyze_printf::PrintfConversionSpecifier &CS = 7554 FS.getConversionSpecifier(); 7555 7556 FixItHint fixit = 7557 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 7558 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 7559 Amt.getConstantLength())) 7560 : FixItHint(); 7561 7562 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 7563 << type << CS.toString(), 7564 getLocationOfByte(Amt.getStart()), 7565 /*IsStringLocation*/true, 7566 getSpecifierRange(startSpecifier, specifierLen), 7567 fixit); 7568 } 7569 7570 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7571 const analyze_printf::OptionalFlag &flag, 7572 const char *startSpecifier, 7573 unsigned specifierLen) { 7574 // Warn about pointless flag with a fixit removal. 7575 const analyze_printf::PrintfConversionSpecifier &CS = 7576 FS.getConversionSpecifier(); 7577 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 7578 << flag.toString() << CS.toString(), 7579 getLocationOfByte(flag.getPosition()), 7580 /*IsStringLocation*/true, 7581 getSpecifierRange(startSpecifier, specifierLen), 7582 FixItHint::CreateRemoval( 7583 getSpecifierRange(flag.getPosition(), 1))); 7584 } 7585 7586 void CheckPrintfHandler::HandleIgnoredFlag( 7587 const analyze_printf::PrintfSpecifier &FS, 7588 const analyze_printf::OptionalFlag &ignoredFlag, 7589 const analyze_printf::OptionalFlag &flag, 7590 const char *startSpecifier, 7591 unsigned specifierLen) { 7592 // Warn about ignored flag with a fixit removal. 7593 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 7594 << ignoredFlag.toString() << flag.toString(), 7595 getLocationOfByte(ignoredFlag.getPosition()), 7596 /*IsStringLocation*/true, 7597 getSpecifierRange(startSpecifier, specifierLen), 7598 FixItHint::CreateRemoval( 7599 getSpecifierRange(ignoredFlag.getPosition(), 1))); 7600 } 7601 7602 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 7603 unsigned flagLen) { 7604 // Warn about an empty flag. 7605 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 7606 getLocationOfByte(startFlag), 7607 /*IsStringLocation*/true, 7608 getSpecifierRange(startFlag, flagLen)); 7609 } 7610 7611 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 7612 unsigned flagLen) { 7613 // Warn about an invalid flag. 7614 auto Range = getSpecifierRange(startFlag, flagLen); 7615 StringRef flag(startFlag, flagLen); 7616 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 7617 getLocationOfByte(startFlag), 7618 /*IsStringLocation*/true, 7619 Range, FixItHint::CreateRemoval(Range)); 7620 } 7621 7622 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 7623 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 7624 // Warn about using '[...]' without a '@' conversion. 7625 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 7626 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 7627 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 7628 getLocationOfByte(conversionPosition), 7629 /*IsStringLocation*/true, 7630 Range, FixItHint::CreateRemoval(Range)); 7631 } 7632 7633 // Determines if the specified is a C++ class or struct containing 7634 // a member with the specified name and kind (e.g. a CXXMethodDecl named 7635 // "c_str()"). 7636 template<typename MemberKind> 7637 static llvm::SmallPtrSet<MemberKind*, 1> 7638 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 7639 const RecordType *RT = Ty->getAs<RecordType>(); 7640 llvm::SmallPtrSet<MemberKind*, 1> Results; 7641 7642 if (!RT) 7643 return Results; 7644 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 7645 if (!RD || !RD->getDefinition()) 7646 return Results; 7647 7648 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 7649 Sema::LookupMemberName); 7650 R.suppressDiagnostics(); 7651 7652 // We just need to include all members of the right kind turned up by the 7653 // filter, at this point. 7654 if (S.LookupQualifiedName(R, RT->getDecl())) 7655 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 7656 NamedDecl *decl = (*I)->getUnderlyingDecl(); 7657 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 7658 Results.insert(FK); 7659 } 7660 return Results; 7661 } 7662 7663 /// Check if we could call '.c_str()' on an object. 7664 /// 7665 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 7666 /// allow the call, or if it would be ambiguous). 7667 bool Sema::hasCStrMethod(const Expr *E) { 7668 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 7669 7670 MethodSet Results = 7671 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 7672 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 7673 MI != ME; ++MI) 7674 if ((*MI)->getMinRequiredArguments() == 0) 7675 return true; 7676 return false; 7677 } 7678 7679 // Check if a (w)string was passed when a (w)char* was needed, and offer a 7680 // better diagnostic if so. AT is assumed to be valid. 7681 // Returns true when a c_str() conversion method is found. 7682 bool CheckPrintfHandler::checkForCStrMembers( 7683 const analyze_printf::ArgType &AT, const Expr *E) { 7684 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 7685 7686 MethodSet Results = 7687 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 7688 7689 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 7690 MI != ME; ++MI) { 7691 const CXXMethodDecl *Method = *MI; 7692 if (Method->getMinRequiredArguments() == 0 && 7693 AT.matchesType(S.Context, Method->getReturnType())) { 7694 // FIXME: Suggest parens if the expression needs them. 7695 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 7696 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 7697 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 7698 return true; 7699 } 7700 } 7701 7702 return false; 7703 } 7704 7705 bool 7706 CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier 7707 &FS, 7708 const char *startSpecifier, 7709 unsigned specifierLen) { 7710 using namespace analyze_format_string; 7711 using namespace analyze_printf; 7712 7713 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 7714 7715 if (FS.consumesDataArgument()) { 7716 if (atFirstArg) { 7717 atFirstArg = false; 7718 usesPositionalArgs = FS.usesPositionalArg(); 7719 } 7720 else if (usesPositionalArgs != FS.usesPositionalArg()) { 7721 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 7722 startSpecifier, specifierLen); 7723 return false; 7724 } 7725 } 7726 7727 // First check if the field width, precision, and conversion specifier 7728 // have matching data arguments. 7729 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 7730 startSpecifier, specifierLen)) { 7731 return false; 7732 } 7733 7734 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 7735 startSpecifier, specifierLen)) { 7736 return false; 7737 } 7738 7739 if (!CS.consumesDataArgument()) { 7740 // FIXME: Technically specifying a precision or field width here 7741 // makes no sense. Worth issuing a warning at some point. 7742 return true; 7743 } 7744 7745 // Consume the argument. 7746 unsigned argIndex = FS.getArgIndex(); 7747 if (argIndex < NumDataArgs) { 7748 // The check to see if the argIndex is valid will come later. 7749 // We set the bit here because we may exit early from this 7750 // function if we encounter some other error. 7751 CoveredArgs.set(argIndex); 7752 } 7753 7754 // FreeBSD kernel extensions. 7755 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 7756 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 7757 // We need at least two arguments. 7758 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 7759 return false; 7760 7761 // Claim the second argument. 7762 CoveredArgs.set(argIndex + 1); 7763 7764 // Type check the first argument (int for %b, pointer for %D) 7765 const Expr *Ex = getDataArg(argIndex); 7766 const analyze_printf::ArgType &AT = 7767 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 7768 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 7769 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 7770 EmitFormatDiagnostic( 7771 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 7772 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 7773 << false << Ex->getSourceRange(), 7774 Ex->getBeginLoc(), /*IsStringLocation*/ false, 7775 getSpecifierRange(startSpecifier, specifierLen)); 7776 7777 // Type check the second argument (char * for both %b and %D) 7778 Ex = getDataArg(argIndex + 1); 7779 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 7780 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 7781 EmitFormatDiagnostic( 7782 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 7783 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 7784 << false << Ex->getSourceRange(), 7785 Ex->getBeginLoc(), /*IsStringLocation*/ false, 7786 getSpecifierRange(startSpecifier, specifierLen)); 7787 7788 return true; 7789 } 7790 7791 // Check for using an Objective-C specific conversion specifier 7792 // in a non-ObjC literal. 7793 if (!allowsObjCArg() && CS.isObjCArg()) { 7794 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 7795 specifierLen); 7796 } 7797 7798 // %P can only be used with os_log. 7799 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 7800 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 7801 specifierLen); 7802 } 7803 7804 // %n is not allowed with os_log. 7805 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 7806 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 7807 getLocationOfByte(CS.getStart()), 7808 /*IsStringLocation*/ false, 7809 getSpecifierRange(startSpecifier, specifierLen)); 7810 7811 return true; 7812 } 7813 7814 // Only scalars are allowed for os_trace. 7815 if (FSType == Sema::FST_OSTrace && 7816 (CS.getKind() == ConversionSpecifier::PArg || 7817 CS.getKind() == ConversionSpecifier::sArg || 7818 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 7819 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 7820 specifierLen); 7821 } 7822 7823 // Check for use of public/private annotation outside of os_log(). 7824 if (FSType != Sema::FST_OSLog) { 7825 if (FS.isPublic().isSet()) { 7826 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 7827 << "public", 7828 getLocationOfByte(FS.isPublic().getPosition()), 7829 /*IsStringLocation*/ false, 7830 getSpecifierRange(startSpecifier, specifierLen)); 7831 } 7832 if (FS.isPrivate().isSet()) { 7833 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 7834 << "private", 7835 getLocationOfByte(FS.isPrivate().getPosition()), 7836 /*IsStringLocation*/ false, 7837 getSpecifierRange(startSpecifier, specifierLen)); 7838 } 7839 } 7840 7841 // Check for invalid use of field width 7842 if (!FS.hasValidFieldWidth()) { 7843 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 7844 startSpecifier, specifierLen); 7845 } 7846 7847 // Check for invalid use of precision 7848 if (!FS.hasValidPrecision()) { 7849 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 7850 startSpecifier, specifierLen); 7851 } 7852 7853 // Precision is mandatory for %P specifier. 7854 if (CS.getKind() == ConversionSpecifier::PArg && 7855 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 7856 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 7857 getLocationOfByte(startSpecifier), 7858 /*IsStringLocation*/ false, 7859 getSpecifierRange(startSpecifier, specifierLen)); 7860 } 7861 7862 // Check each flag does not conflict with any other component. 7863 if (!FS.hasValidThousandsGroupingPrefix()) 7864 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 7865 if (!FS.hasValidLeadingZeros()) 7866 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 7867 if (!FS.hasValidPlusPrefix()) 7868 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 7869 if (!FS.hasValidSpacePrefix()) 7870 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 7871 if (!FS.hasValidAlternativeForm()) 7872 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 7873 if (!FS.hasValidLeftJustified()) 7874 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 7875 7876 // Check that flags are not ignored by another flag 7877 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 7878 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 7879 startSpecifier, specifierLen); 7880 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 7881 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 7882 startSpecifier, specifierLen); 7883 7884 // Check the length modifier is valid with the given conversion specifier. 7885 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 7886 S.getLangOpts())) 7887 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 7888 diag::warn_format_nonsensical_length); 7889 else if (!FS.hasStandardLengthModifier()) 7890 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 7891 else if (!FS.hasStandardLengthConversionCombination()) 7892 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 7893 diag::warn_format_non_standard_conversion_spec); 7894 7895 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 7896 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 7897 7898 // The remaining checks depend on the data arguments. 7899 if (HasVAListArg) 7900 return true; 7901 7902 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 7903 return false; 7904 7905 const Expr *Arg = getDataArg(argIndex); 7906 if (!Arg) 7907 return true; 7908 7909 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 7910 } 7911 7912 static bool requiresParensToAddCast(const Expr *E) { 7913 // FIXME: We should have a general way to reason about operator 7914 // precedence and whether parens are actually needed here. 7915 // Take care of a few common cases where they aren't. 7916 const Expr *Inside = E->IgnoreImpCasts(); 7917 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 7918 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 7919 7920 switch (Inside->getStmtClass()) { 7921 case Stmt::ArraySubscriptExprClass: 7922 case Stmt::CallExprClass: 7923 case Stmt::CharacterLiteralClass: 7924 case Stmt::CXXBoolLiteralExprClass: 7925 case Stmt::DeclRefExprClass: 7926 case Stmt::FloatingLiteralClass: 7927 case Stmt::IntegerLiteralClass: 7928 case Stmt::MemberExprClass: 7929 case Stmt::ObjCArrayLiteralClass: 7930 case Stmt::ObjCBoolLiteralExprClass: 7931 case Stmt::ObjCBoxedExprClass: 7932 case Stmt::ObjCDictionaryLiteralClass: 7933 case Stmt::ObjCEncodeExprClass: 7934 case Stmt::ObjCIvarRefExprClass: 7935 case Stmt::ObjCMessageExprClass: 7936 case Stmt::ObjCPropertyRefExprClass: 7937 case Stmt::ObjCStringLiteralClass: 7938 case Stmt::ObjCSubscriptRefExprClass: 7939 case Stmt::ParenExprClass: 7940 case Stmt::StringLiteralClass: 7941 case Stmt::UnaryOperatorClass: 7942 return false; 7943 default: 7944 return true; 7945 } 7946 } 7947 7948 static std::pair<QualType, StringRef> 7949 shouldNotPrintDirectly(const ASTContext &Context, 7950 QualType IntendedTy, 7951 const Expr *E) { 7952 // Use a 'while' to peel off layers of typedefs. 7953 QualType TyTy = IntendedTy; 7954 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 7955 StringRef Name = UserTy->getDecl()->getName(); 7956 QualType CastTy = llvm::StringSwitch<QualType>(Name) 7957 .Case("CFIndex", Context.getNSIntegerType()) 7958 .Case("NSInteger", Context.getNSIntegerType()) 7959 .Case("NSUInteger", Context.getNSUIntegerType()) 7960 .Case("SInt32", Context.IntTy) 7961 .Case("UInt32", Context.UnsignedIntTy) 7962 .Default(QualType()); 7963 7964 if (!CastTy.isNull()) 7965 return std::make_pair(CastTy, Name); 7966 7967 TyTy = UserTy->desugar(); 7968 } 7969 7970 // Strip parens if necessary. 7971 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 7972 return shouldNotPrintDirectly(Context, 7973 PE->getSubExpr()->getType(), 7974 PE->getSubExpr()); 7975 7976 // If this is a conditional expression, then its result type is constructed 7977 // via usual arithmetic conversions and thus there might be no necessary 7978 // typedef sugar there. Recurse to operands to check for NSInteger & 7979 // Co. usage condition. 7980 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 7981 QualType TrueTy, FalseTy; 7982 StringRef TrueName, FalseName; 7983 7984 std::tie(TrueTy, TrueName) = 7985 shouldNotPrintDirectly(Context, 7986 CO->getTrueExpr()->getType(), 7987 CO->getTrueExpr()); 7988 std::tie(FalseTy, FalseName) = 7989 shouldNotPrintDirectly(Context, 7990 CO->getFalseExpr()->getType(), 7991 CO->getFalseExpr()); 7992 7993 if (TrueTy == FalseTy) 7994 return std::make_pair(TrueTy, TrueName); 7995 else if (TrueTy.isNull()) 7996 return std::make_pair(FalseTy, FalseName); 7997 else if (FalseTy.isNull()) 7998 return std::make_pair(TrueTy, TrueName); 7999 } 8000 8001 return std::make_pair(QualType(), StringRef()); 8002 } 8003 8004 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 8005 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 8006 /// type do not count. 8007 static bool 8008 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 8009 QualType From = ICE->getSubExpr()->getType(); 8010 QualType To = ICE->getType(); 8011 // It's an integer promotion if the destination type is the promoted 8012 // source type. 8013 if (ICE->getCastKind() == CK_IntegralCast && 8014 From->isPromotableIntegerType() && 8015 S.Context.getPromotedIntegerType(From) == To) 8016 return true; 8017 // Look through vector types, since we do default argument promotion for 8018 // those in OpenCL. 8019 if (const auto *VecTy = From->getAs<ExtVectorType>()) 8020 From = VecTy->getElementType(); 8021 if (const auto *VecTy = To->getAs<ExtVectorType>()) 8022 To = VecTy->getElementType(); 8023 // It's a floating promotion if the source type is a lower rank. 8024 return ICE->getCastKind() == CK_FloatingCast && 8025 S.Context.getFloatingTypeOrder(From, To) < 0; 8026 } 8027 8028 bool 8029 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 8030 const char *StartSpecifier, 8031 unsigned SpecifierLen, 8032 const Expr *E) { 8033 using namespace analyze_format_string; 8034 using namespace analyze_printf; 8035 8036 // Now type check the data expression that matches the 8037 // format specifier. 8038 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 8039 if (!AT.isValid()) 8040 return true; 8041 8042 QualType ExprTy = E->getType(); 8043 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 8044 ExprTy = TET->getUnderlyingExpr()->getType(); 8045 } 8046 8047 const analyze_printf::ArgType::MatchKind Match = 8048 AT.matchesType(S.Context, ExprTy); 8049 bool Pedantic = Match == analyze_printf::ArgType::NoMatchPedantic; 8050 if (Match == analyze_printf::ArgType::Match) 8051 return true; 8052 8053 // Look through argument promotions for our error message's reported type. 8054 // This includes the integral and floating promotions, but excludes array 8055 // and function pointer decay (seeing that an argument intended to be a 8056 // string has type 'char [6]' is probably more confusing than 'char *') and 8057 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 8058 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 8059 if (isArithmeticArgumentPromotion(S, ICE)) { 8060 E = ICE->getSubExpr(); 8061 ExprTy = E->getType(); 8062 8063 // Check if we didn't match because of an implicit cast from a 'char' 8064 // or 'short' to an 'int'. This is done because printf is a varargs 8065 // function. 8066 if (ICE->getType() == S.Context.IntTy || 8067 ICE->getType() == S.Context.UnsignedIntTy) { 8068 // All further checking is done on the subexpression. 8069 if (AT.matchesType(S.Context, ExprTy)) 8070 return true; 8071 } 8072 } 8073 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 8074 // Special case for 'a', which has type 'int' in C. 8075 // Note, however, that we do /not/ want to treat multibyte constants like 8076 // 'MooV' as characters! This form is deprecated but still exists. 8077 if (ExprTy == S.Context.IntTy) 8078 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 8079 ExprTy = S.Context.CharTy; 8080 } 8081 8082 // Look through enums to their underlying type. 8083 bool IsEnum = false; 8084 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 8085 ExprTy = EnumTy->getDecl()->getIntegerType(); 8086 IsEnum = true; 8087 } 8088 8089 // %C in an Objective-C context prints a unichar, not a wchar_t. 8090 // If the argument is an integer of some kind, believe the %C and suggest 8091 // a cast instead of changing the conversion specifier. 8092 QualType IntendedTy = ExprTy; 8093 if (isObjCContext() && 8094 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 8095 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 8096 !ExprTy->isCharType()) { 8097 // 'unichar' is defined as a typedef of unsigned short, but we should 8098 // prefer using the typedef if it is visible. 8099 IntendedTy = S.Context.UnsignedShortTy; 8100 8101 // While we are here, check if the value is an IntegerLiteral that happens 8102 // to be within the valid range. 8103 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 8104 const llvm::APInt &V = IL->getValue(); 8105 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 8106 return true; 8107 } 8108 8109 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 8110 Sema::LookupOrdinaryName); 8111 if (S.LookupName(Result, S.getCurScope())) { 8112 NamedDecl *ND = Result.getFoundDecl(); 8113 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 8114 if (TD->getUnderlyingType() == IntendedTy) 8115 IntendedTy = S.Context.getTypedefType(TD); 8116 } 8117 } 8118 } 8119 8120 // Special-case some of Darwin's platform-independence types by suggesting 8121 // casts to primitive types that are known to be large enough. 8122 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 8123 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 8124 QualType CastTy; 8125 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 8126 if (!CastTy.isNull()) { 8127 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 8128 // (long in ASTContext). Only complain to pedants. 8129 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 8130 (AT.isSizeT() || AT.isPtrdiffT()) && 8131 AT.matchesType(S.Context, CastTy)) 8132 Pedantic = true; 8133 IntendedTy = CastTy; 8134 ShouldNotPrintDirectly = true; 8135 } 8136 } 8137 8138 // We may be able to offer a FixItHint if it is a supported type. 8139 PrintfSpecifier fixedFS = FS; 8140 bool Success = 8141 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 8142 8143 if (Success) { 8144 // Get the fix string from the fixed format specifier 8145 SmallString<16> buf; 8146 llvm::raw_svector_ostream os(buf); 8147 fixedFS.toString(os); 8148 8149 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 8150 8151 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 8152 unsigned Diag = 8153 Pedantic 8154 ? diag::warn_format_conversion_argument_type_mismatch_pedantic 8155 : diag::warn_format_conversion_argument_type_mismatch; 8156 // In this case, the specifier is wrong and should be changed to match 8157 // the argument. 8158 EmitFormatDiagnostic(S.PDiag(Diag) 8159 << AT.getRepresentativeTypeName(S.Context) 8160 << IntendedTy << IsEnum << E->getSourceRange(), 8161 E->getBeginLoc(), 8162 /*IsStringLocation*/ false, SpecRange, 8163 FixItHint::CreateReplacement(SpecRange, os.str())); 8164 } else { 8165 // The canonical type for formatting this value is different from the 8166 // actual type of the expression. (This occurs, for example, with Darwin's 8167 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 8168 // should be printed as 'long' for 64-bit compatibility.) 8169 // Rather than emitting a normal format/argument mismatch, we want to 8170 // add a cast to the recommended type (and correct the format string 8171 // if necessary). 8172 SmallString<16> CastBuf; 8173 llvm::raw_svector_ostream CastFix(CastBuf); 8174 CastFix << "("; 8175 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 8176 CastFix << ")"; 8177 8178 SmallVector<FixItHint,4> Hints; 8179 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 8180 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 8181 8182 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 8183 // If there's already a cast present, just replace it. 8184 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 8185 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 8186 8187 } else if (!requiresParensToAddCast(E)) { 8188 // If the expression has high enough precedence, 8189 // just write the C-style cast. 8190 Hints.push_back( 8191 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8192 } else { 8193 // Otherwise, add parens around the expression as well as the cast. 8194 CastFix << "("; 8195 Hints.push_back( 8196 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8197 8198 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 8199 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 8200 } 8201 8202 if (ShouldNotPrintDirectly) { 8203 // The expression has a type that should not be printed directly. 8204 // We extract the name from the typedef because we don't want to show 8205 // the underlying type in the diagnostic. 8206 StringRef Name; 8207 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 8208 Name = TypedefTy->getDecl()->getName(); 8209 else 8210 Name = CastTyName; 8211 unsigned Diag = Pedantic 8212 ? diag::warn_format_argument_needs_cast_pedantic 8213 : diag::warn_format_argument_needs_cast; 8214 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 8215 << E->getSourceRange(), 8216 E->getBeginLoc(), /*IsStringLocation=*/false, 8217 SpecRange, Hints); 8218 } else { 8219 // In this case, the expression could be printed using a different 8220 // specifier, but we've decided that the specifier is probably correct 8221 // and we should cast instead. Just use the normal warning message. 8222 EmitFormatDiagnostic( 8223 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8224 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 8225 << E->getSourceRange(), 8226 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 8227 } 8228 } 8229 } else { 8230 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 8231 SpecifierLen); 8232 // Since the warning for passing non-POD types to variadic functions 8233 // was deferred until now, we emit a warning for non-POD 8234 // arguments here. 8235 switch (S.isValidVarArgType(ExprTy)) { 8236 case Sema::VAK_Valid: 8237 case Sema::VAK_ValidInCXX11: { 8238 unsigned Diag = 8239 Pedantic 8240 ? diag::warn_format_conversion_argument_type_mismatch_pedantic 8241 : diag::warn_format_conversion_argument_type_mismatch; 8242 8243 EmitFormatDiagnostic( 8244 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 8245 << IsEnum << CSR << E->getSourceRange(), 8246 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8247 break; 8248 } 8249 case Sema::VAK_Undefined: 8250 case Sema::VAK_MSVCUndefined: 8251 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 8252 << S.getLangOpts().CPlusPlus11 << ExprTy 8253 << CallType 8254 << AT.getRepresentativeTypeName(S.Context) << CSR 8255 << E->getSourceRange(), 8256 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8257 checkForCStrMembers(AT, E); 8258 break; 8259 8260 case Sema::VAK_Invalid: 8261 if (ExprTy->isObjCObjectType()) 8262 EmitFormatDiagnostic( 8263 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 8264 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 8265 << AT.getRepresentativeTypeName(S.Context) << CSR 8266 << E->getSourceRange(), 8267 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8268 else 8269 // FIXME: If this is an initializer list, suggest removing the braces 8270 // or inserting a cast to the target type. 8271 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 8272 << isa<InitListExpr>(E) << ExprTy << CallType 8273 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 8274 break; 8275 } 8276 8277 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 8278 "format string specifier index out of range"); 8279 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 8280 } 8281 8282 return true; 8283 } 8284 8285 //===--- CHECK: Scanf format string checking ------------------------------===// 8286 8287 namespace { 8288 8289 class CheckScanfHandler : public CheckFormatHandler { 8290 public: 8291 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 8292 const Expr *origFormatExpr, Sema::FormatStringType type, 8293 unsigned firstDataArg, unsigned numDataArgs, 8294 const char *beg, bool hasVAListArg, 8295 ArrayRef<const Expr *> Args, unsigned formatIdx, 8296 bool inFunctionCall, Sema::VariadicCallType CallType, 8297 llvm::SmallBitVector &CheckedVarArgs, 8298 UncoveredArgHandler &UncoveredArg) 8299 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 8300 numDataArgs, beg, hasVAListArg, Args, formatIdx, 8301 inFunctionCall, CallType, CheckedVarArgs, 8302 UncoveredArg) {} 8303 8304 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 8305 const char *startSpecifier, 8306 unsigned specifierLen) override; 8307 8308 bool HandleInvalidScanfConversionSpecifier( 8309 const analyze_scanf::ScanfSpecifier &FS, 8310 const char *startSpecifier, 8311 unsigned specifierLen) override; 8312 8313 void HandleIncompleteScanList(const char *start, const char *end) override; 8314 }; 8315 8316 } // namespace 8317 8318 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 8319 const char *end) { 8320 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 8321 getLocationOfByte(end), /*IsStringLocation*/true, 8322 getSpecifierRange(start, end - start)); 8323 } 8324 8325 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 8326 const analyze_scanf::ScanfSpecifier &FS, 8327 const char *startSpecifier, 8328 unsigned specifierLen) { 8329 const analyze_scanf::ScanfConversionSpecifier &CS = 8330 FS.getConversionSpecifier(); 8331 8332 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8333 getLocationOfByte(CS.getStart()), 8334 startSpecifier, specifierLen, 8335 CS.getStart(), CS.getLength()); 8336 } 8337 8338 bool CheckScanfHandler::HandleScanfSpecifier( 8339 const analyze_scanf::ScanfSpecifier &FS, 8340 const char *startSpecifier, 8341 unsigned specifierLen) { 8342 using namespace analyze_scanf; 8343 using namespace analyze_format_string; 8344 8345 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 8346 8347 // Handle case where '%' and '*' don't consume an argument. These shouldn't 8348 // be used to decide if we are using positional arguments consistently. 8349 if (FS.consumesDataArgument()) { 8350 if (atFirstArg) { 8351 atFirstArg = false; 8352 usesPositionalArgs = FS.usesPositionalArg(); 8353 } 8354 else if (usesPositionalArgs != FS.usesPositionalArg()) { 8355 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 8356 startSpecifier, specifierLen); 8357 return false; 8358 } 8359 } 8360 8361 // Check if the field with is non-zero. 8362 const OptionalAmount &Amt = FS.getFieldWidth(); 8363 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 8364 if (Amt.getConstantAmount() == 0) { 8365 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 8366 Amt.getConstantLength()); 8367 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 8368 getLocationOfByte(Amt.getStart()), 8369 /*IsStringLocation*/true, R, 8370 FixItHint::CreateRemoval(R)); 8371 } 8372 } 8373 8374 if (!FS.consumesDataArgument()) { 8375 // FIXME: Technically specifying a precision or field width here 8376 // makes no sense. Worth issuing a warning at some point. 8377 return true; 8378 } 8379 8380 // Consume the argument. 8381 unsigned argIndex = FS.getArgIndex(); 8382 if (argIndex < NumDataArgs) { 8383 // The check to see if the argIndex is valid will come later. 8384 // We set the bit here because we may exit early from this 8385 // function if we encounter some other error. 8386 CoveredArgs.set(argIndex); 8387 } 8388 8389 // Check the length modifier is valid with the given conversion specifier. 8390 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 8391 S.getLangOpts())) 8392 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8393 diag::warn_format_nonsensical_length); 8394 else if (!FS.hasStandardLengthModifier()) 8395 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8396 else if (!FS.hasStandardLengthConversionCombination()) 8397 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8398 diag::warn_format_non_standard_conversion_spec); 8399 8400 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 8401 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 8402 8403 // The remaining checks depend on the data arguments. 8404 if (HasVAListArg) 8405 return true; 8406 8407 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 8408 return false; 8409 8410 // Check that the argument type matches the format specifier. 8411 const Expr *Ex = getDataArg(argIndex); 8412 if (!Ex) 8413 return true; 8414 8415 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 8416 8417 if (!AT.isValid()) { 8418 return true; 8419 } 8420 8421 analyze_format_string::ArgType::MatchKind Match = 8422 AT.matchesType(S.Context, Ex->getType()); 8423 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 8424 if (Match == analyze_format_string::ArgType::Match) 8425 return true; 8426 8427 ScanfSpecifier fixedFS = FS; 8428 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 8429 S.getLangOpts(), S.Context); 8430 8431 unsigned Diag = 8432 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 8433 : diag::warn_format_conversion_argument_type_mismatch; 8434 8435 if (Success) { 8436 // Get the fix string from the fixed format specifier. 8437 SmallString<128> buf; 8438 llvm::raw_svector_ostream os(buf); 8439 fixedFS.toString(os); 8440 8441 EmitFormatDiagnostic( 8442 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 8443 << Ex->getType() << false << Ex->getSourceRange(), 8444 Ex->getBeginLoc(), 8445 /*IsStringLocation*/ false, 8446 getSpecifierRange(startSpecifier, specifierLen), 8447 FixItHint::CreateReplacement( 8448 getSpecifierRange(startSpecifier, specifierLen), os.str())); 8449 } else { 8450 EmitFormatDiagnostic(S.PDiag(Diag) 8451 << AT.getRepresentativeTypeName(S.Context) 8452 << Ex->getType() << false << Ex->getSourceRange(), 8453 Ex->getBeginLoc(), 8454 /*IsStringLocation*/ false, 8455 getSpecifierRange(startSpecifier, specifierLen)); 8456 } 8457 8458 return true; 8459 } 8460 8461 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 8462 const Expr *OrigFormatExpr, 8463 ArrayRef<const Expr *> Args, 8464 bool HasVAListArg, unsigned format_idx, 8465 unsigned firstDataArg, 8466 Sema::FormatStringType Type, 8467 bool inFunctionCall, 8468 Sema::VariadicCallType CallType, 8469 llvm::SmallBitVector &CheckedVarArgs, 8470 UncoveredArgHandler &UncoveredArg) { 8471 // CHECK: is the format string a wide literal? 8472 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 8473 CheckFormatHandler::EmitFormatDiagnostic( 8474 S, inFunctionCall, Args[format_idx], 8475 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 8476 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8477 return; 8478 } 8479 8480 // Str - The format string. NOTE: this is NOT null-terminated! 8481 StringRef StrRef = FExpr->getString(); 8482 const char *Str = StrRef.data(); 8483 // Account for cases where the string literal is truncated in a declaration. 8484 const ConstantArrayType *T = 8485 S.Context.getAsConstantArrayType(FExpr->getType()); 8486 assert(T && "String literal not of constant array type!"); 8487 size_t TypeSize = T->getSize().getZExtValue(); 8488 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8489 const unsigned numDataArgs = Args.size() - firstDataArg; 8490 8491 // Emit a warning if the string literal is truncated and does not contain an 8492 // embedded null character. 8493 if (TypeSize <= StrRef.size() && 8494 StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) { 8495 CheckFormatHandler::EmitFormatDiagnostic( 8496 S, inFunctionCall, Args[format_idx], 8497 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 8498 FExpr->getBeginLoc(), 8499 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 8500 return; 8501 } 8502 8503 // CHECK: empty format string? 8504 if (StrLen == 0 && numDataArgs > 0) { 8505 CheckFormatHandler::EmitFormatDiagnostic( 8506 S, inFunctionCall, Args[format_idx], 8507 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 8508 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8509 return; 8510 } 8511 8512 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 8513 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 8514 Type == Sema::FST_OSTrace) { 8515 CheckPrintfHandler H( 8516 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 8517 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 8518 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 8519 CheckedVarArgs, UncoveredArg); 8520 8521 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 8522 S.getLangOpts(), 8523 S.Context.getTargetInfo(), 8524 Type == Sema::FST_FreeBSDKPrintf)) 8525 H.DoneProcessing(); 8526 } else if (Type == Sema::FST_Scanf) { 8527 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 8528 numDataArgs, Str, HasVAListArg, Args, format_idx, 8529 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 8530 8531 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 8532 S.getLangOpts(), 8533 S.Context.getTargetInfo())) 8534 H.DoneProcessing(); 8535 } // TODO: handle other formats 8536 } 8537 8538 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 8539 // Str - The format string. NOTE: this is NOT null-terminated! 8540 StringRef StrRef = FExpr->getString(); 8541 const char *Str = StrRef.data(); 8542 // Account for cases where the string literal is truncated in a declaration. 8543 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 8544 assert(T && "String literal not of constant array type!"); 8545 size_t TypeSize = T->getSize().getZExtValue(); 8546 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8547 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 8548 getLangOpts(), 8549 Context.getTargetInfo()); 8550 } 8551 8552 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 8553 8554 // Returns the related absolute value function that is larger, of 0 if one 8555 // does not exist. 8556 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 8557 switch (AbsFunction) { 8558 default: 8559 return 0; 8560 8561 case Builtin::BI__builtin_abs: 8562 return Builtin::BI__builtin_labs; 8563 case Builtin::BI__builtin_labs: 8564 return Builtin::BI__builtin_llabs; 8565 case Builtin::BI__builtin_llabs: 8566 return 0; 8567 8568 case Builtin::BI__builtin_fabsf: 8569 return Builtin::BI__builtin_fabs; 8570 case Builtin::BI__builtin_fabs: 8571 return Builtin::BI__builtin_fabsl; 8572 case Builtin::BI__builtin_fabsl: 8573 return 0; 8574 8575 case Builtin::BI__builtin_cabsf: 8576 return Builtin::BI__builtin_cabs; 8577 case Builtin::BI__builtin_cabs: 8578 return Builtin::BI__builtin_cabsl; 8579 case Builtin::BI__builtin_cabsl: 8580 return 0; 8581 8582 case Builtin::BIabs: 8583 return Builtin::BIlabs; 8584 case Builtin::BIlabs: 8585 return Builtin::BIllabs; 8586 case Builtin::BIllabs: 8587 return 0; 8588 8589 case Builtin::BIfabsf: 8590 return Builtin::BIfabs; 8591 case Builtin::BIfabs: 8592 return Builtin::BIfabsl; 8593 case Builtin::BIfabsl: 8594 return 0; 8595 8596 case Builtin::BIcabsf: 8597 return Builtin::BIcabs; 8598 case Builtin::BIcabs: 8599 return Builtin::BIcabsl; 8600 case Builtin::BIcabsl: 8601 return 0; 8602 } 8603 } 8604 8605 // Returns the argument type of the absolute value function. 8606 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 8607 unsigned AbsType) { 8608 if (AbsType == 0) 8609 return QualType(); 8610 8611 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 8612 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 8613 if (Error != ASTContext::GE_None) 8614 return QualType(); 8615 8616 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 8617 if (!FT) 8618 return QualType(); 8619 8620 if (FT->getNumParams() != 1) 8621 return QualType(); 8622 8623 return FT->getParamType(0); 8624 } 8625 8626 // Returns the best absolute value function, or zero, based on type and 8627 // current absolute value function. 8628 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 8629 unsigned AbsFunctionKind) { 8630 unsigned BestKind = 0; 8631 uint64_t ArgSize = Context.getTypeSize(ArgType); 8632 for (unsigned Kind = AbsFunctionKind; Kind != 0; 8633 Kind = getLargerAbsoluteValueFunction(Kind)) { 8634 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 8635 if (Context.getTypeSize(ParamType) >= ArgSize) { 8636 if (BestKind == 0) 8637 BestKind = Kind; 8638 else if (Context.hasSameType(ParamType, ArgType)) { 8639 BestKind = Kind; 8640 break; 8641 } 8642 } 8643 } 8644 return BestKind; 8645 } 8646 8647 enum AbsoluteValueKind { 8648 AVK_Integer, 8649 AVK_Floating, 8650 AVK_Complex 8651 }; 8652 8653 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 8654 if (T->isIntegralOrEnumerationType()) 8655 return AVK_Integer; 8656 if (T->isRealFloatingType()) 8657 return AVK_Floating; 8658 if (T->isAnyComplexType()) 8659 return AVK_Complex; 8660 8661 llvm_unreachable("Type not integer, floating, or complex"); 8662 } 8663 8664 // Changes the absolute value function to a different type. Preserves whether 8665 // the function is a builtin. 8666 static unsigned changeAbsFunction(unsigned AbsKind, 8667 AbsoluteValueKind ValueKind) { 8668 switch (ValueKind) { 8669 case AVK_Integer: 8670 switch (AbsKind) { 8671 default: 8672 return 0; 8673 case Builtin::BI__builtin_fabsf: 8674 case Builtin::BI__builtin_fabs: 8675 case Builtin::BI__builtin_fabsl: 8676 case Builtin::BI__builtin_cabsf: 8677 case Builtin::BI__builtin_cabs: 8678 case Builtin::BI__builtin_cabsl: 8679 return Builtin::BI__builtin_abs; 8680 case Builtin::BIfabsf: 8681 case Builtin::BIfabs: 8682 case Builtin::BIfabsl: 8683 case Builtin::BIcabsf: 8684 case Builtin::BIcabs: 8685 case Builtin::BIcabsl: 8686 return Builtin::BIabs; 8687 } 8688 case AVK_Floating: 8689 switch (AbsKind) { 8690 default: 8691 return 0; 8692 case Builtin::BI__builtin_abs: 8693 case Builtin::BI__builtin_labs: 8694 case Builtin::BI__builtin_llabs: 8695 case Builtin::BI__builtin_cabsf: 8696 case Builtin::BI__builtin_cabs: 8697 case Builtin::BI__builtin_cabsl: 8698 return Builtin::BI__builtin_fabsf; 8699 case Builtin::BIabs: 8700 case Builtin::BIlabs: 8701 case Builtin::BIllabs: 8702 case Builtin::BIcabsf: 8703 case Builtin::BIcabs: 8704 case Builtin::BIcabsl: 8705 return Builtin::BIfabsf; 8706 } 8707 case AVK_Complex: 8708 switch (AbsKind) { 8709 default: 8710 return 0; 8711 case Builtin::BI__builtin_abs: 8712 case Builtin::BI__builtin_labs: 8713 case Builtin::BI__builtin_llabs: 8714 case Builtin::BI__builtin_fabsf: 8715 case Builtin::BI__builtin_fabs: 8716 case Builtin::BI__builtin_fabsl: 8717 return Builtin::BI__builtin_cabsf; 8718 case Builtin::BIabs: 8719 case Builtin::BIlabs: 8720 case Builtin::BIllabs: 8721 case Builtin::BIfabsf: 8722 case Builtin::BIfabs: 8723 case Builtin::BIfabsl: 8724 return Builtin::BIcabsf; 8725 } 8726 } 8727 llvm_unreachable("Unable to convert function"); 8728 } 8729 8730 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 8731 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 8732 if (!FnInfo) 8733 return 0; 8734 8735 switch (FDecl->getBuiltinID()) { 8736 default: 8737 return 0; 8738 case Builtin::BI__builtin_abs: 8739 case Builtin::BI__builtin_fabs: 8740 case Builtin::BI__builtin_fabsf: 8741 case Builtin::BI__builtin_fabsl: 8742 case Builtin::BI__builtin_labs: 8743 case Builtin::BI__builtin_llabs: 8744 case Builtin::BI__builtin_cabs: 8745 case Builtin::BI__builtin_cabsf: 8746 case Builtin::BI__builtin_cabsl: 8747 case Builtin::BIabs: 8748 case Builtin::BIlabs: 8749 case Builtin::BIllabs: 8750 case Builtin::BIfabs: 8751 case Builtin::BIfabsf: 8752 case Builtin::BIfabsl: 8753 case Builtin::BIcabs: 8754 case Builtin::BIcabsf: 8755 case Builtin::BIcabsl: 8756 return FDecl->getBuiltinID(); 8757 } 8758 llvm_unreachable("Unknown Builtin type"); 8759 } 8760 8761 // If the replacement is valid, emit a note with replacement function. 8762 // Additionally, suggest including the proper header if not already included. 8763 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 8764 unsigned AbsKind, QualType ArgType) { 8765 bool EmitHeaderHint = true; 8766 const char *HeaderName = nullptr; 8767 const char *FunctionName = nullptr; 8768 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 8769 FunctionName = "std::abs"; 8770 if (ArgType->isIntegralOrEnumerationType()) { 8771 HeaderName = "cstdlib"; 8772 } else if (ArgType->isRealFloatingType()) { 8773 HeaderName = "cmath"; 8774 } else { 8775 llvm_unreachable("Invalid Type"); 8776 } 8777 8778 // Lookup all std::abs 8779 if (NamespaceDecl *Std = S.getStdNamespace()) { 8780 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 8781 R.suppressDiagnostics(); 8782 S.LookupQualifiedName(R, Std); 8783 8784 for (const auto *I : R) { 8785 const FunctionDecl *FDecl = nullptr; 8786 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 8787 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 8788 } else { 8789 FDecl = dyn_cast<FunctionDecl>(I); 8790 } 8791 if (!FDecl) 8792 continue; 8793 8794 // Found std::abs(), check that they are the right ones. 8795 if (FDecl->getNumParams() != 1) 8796 continue; 8797 8798 // Check that the parameter type can handle the argument. 8799 QualType ParamType = FDecl->getParamDecl(0)->getType(); 8800 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 8801 S.Context.getTypeSize(ArgType) <= 8802 S.Context.getTypeSize(ParamType)) { 8803 // Found a function, don't need the header hint. 8804 EmitHeaderHint = false; 8805 break; 8806 } 8807 } 8808 } 8809 } else { 8810 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 8811 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 8812 8813 if (HeaderName) { 8814 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 8815 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 8816 R.suppressDiagnostics(); 8817 S.LookupName(R, S.getCurScope()); 8818 8819 if (R.isSingleResult()) { 8820 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 8821 if (FD && FD->getBuiltinID() == AbsKind) { 8822 EmitHeaderHint = false; 8823 } else { 8824 return; 8825 } 8826 } else if (!R.empty()) { 8827 return; 8828 } 8829 } 8830 } 8831 8832 S.Diag(Loc, diag::note_replace_abs_function) 8833 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 8834 8835 if (!HeaderName) 8836 return; 8837 8838 if (!EmitHeaderHint) 8839 return; 8840 8841 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 8842 << FunctionName; 8843 } 8844 8845 template <std::size_t StrLen> 8846 static bool IsStdFunction(const FunctionDecl *FDecl, 8847 const char (&Str)[StrLen]) { 8848 if (!FDecl) 8849 return false; 8850 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 8851 return false; 8852 if (!FDecl->isInStdNamespace()) 8853 return false; 8854 8855 return true; 8856 } 8857 8858 // Warn when using the wrong abs() function. 8859 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 8860 const FunctionDecl *FDecl) { 8861 if (Call->getNumArgs() != 1) 8862 return; 8863 8864 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 8865 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 8866 if (AbsKind == 0 && !IsStdAbs) 8867 return; 8868 8869 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 8870 QualType ParamType = Call->getArg(0)->getType(); 8871 8872 // Unsigned types cannot be negative. Suggest removing the absolute value 8873 // function call. 8874 if (ArgType->isUnsignedIntegerType()) { 8875 const char *FunctionName = 8876 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 8877 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 8878 Diag(Call->getExprLoc(), diag::note_remove_abs) 8879 << FunctionName 8880 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 8881 return; 8882 } 8883 8884 // Taking the absolute value of a pointer is very suspicious, they probably 8885 // wanted to index into an array, dereference a pointer, call a function, etc. 8886 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 8887 unsigned DiagType = 0; 8888 if (ArgType->isFunctionType()) 8889 DiagType = 1; 8890 else if (ArgType->isArrayType()) 8891 DiagType = 2; 8892 8893 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 8894 return; 8895 } 8896 8897 // std::abs has overloads which prevent most of the absolute value problems 8898 // from occurring. 8899 if (IsStdAbs) 8900 return; 8901 8902 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 8903 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 8904 8905 // The argument and parameter are the same kind. Check if they are the right 8906 // size. 8907 if (ArgValueKind == ParamValueKind) { 8908 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 8909 return; 8910 8911 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 8912 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 8913 << FDecl << ArgType << ParamType; 8914 8915 if (NewAbsKind == 0) 8916 return; 8917 8918 emitReplacement(*this, Call->getExprLoc(), 8919 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 8920 return; 8921 } 8922 8923 // ArgValueKind != ParamValueKind 8924 // The wrong type of absolute value function was used. Attempt to find the 8925 // proper one. 8926 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 8927 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 8928 if (NewAbsKind == 0) 8929 return; 8930 8931 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 8932 << FDecl << ParamValueKind << ArgValueKind; 8933 8934 emitReplacement(*this, Call->getExprLoc(), 8935 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 8936 } 8937 8938 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 8939 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 8940 const FunctionDecl *FDecl) { 8941 if (!Call || !FDecl) return; 8942 8943 // Ignore template specializations and macros. 8944 if (inTemplateInstantiation()) return; 8945 if (Call->getExprLoc().isMacroID()) return; 8946 8947 // Only care about the one template argument, two function parameter std::max 8948 if (Call->getNumArgs() != 2) return; 8949 if (!IsStdFunction(FDecl, "max")) return; 8950 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 8951 if (!ArgList) return; 8952 if (ArgList->size() != 1) return; 8953 8954 // Check that template type argument is unsigned integer. 8955 const auto& TA = ArgList->get(0); 8956 if (TA.getKind() != TemplateArgument::Type) return; 8957 QualType ArgType = TA.getAsType(); 8958 if (!ArgType->isUnsignedIntegerType()) return; 8959 8960 // See if either argument is a literal zero. 8961 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 8962 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 8963 if (!MTE) return false; 8964 const auto *Num = dyn_cast<IntegerLiteral>(MTE->GetTemporaryExpr()); 8965 if (!Num) return false; 8966 if (Num->getValue() != 0) return false; 8967 return true; 8968 }; 8969 8970 const Expr *FirstArg = Call->getArg(0); 8971 const Expr *SecondArg = Call->getArg(1); 8972 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 8973 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 8974 8975 // Only warn when exactly one argument is zero. 8976 if (IsFirstArgZero == IsSecondArgZero) return; 8977 8978 SourceRange FirstRange = FirstArg->getSourceRange(); 8979 SourceRange SecondRange = SecondArg->getSourceRange(); 8980 8981 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 8982 8983 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 8984 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 8985 8986 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 8987 SourceRange RemovalRange; 8988 if (IsFirstArgZero) { 8989 RemovalRange = SourceRange(FirstRange.getBegin(), 8990 SecondRange.getBegin().getLocWithOffset(-1)); 8991 } else { 8992 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 8993 SecondRange.getEnd()); 8994 } 8995 8996 Diag(Call->getExprLoc(), diag::note_remove_max_call) 8997 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 8998 << FixItHint::CreateRemoval(RemovalRange); 8999 } 9000 9001 //===--- CHECK: Standard memory functions ---------------------------------===// 9002 9003 /// Takes the expression passed to the size_t parameter of functions 9004 /// such as memcmp, strncat, etc and warns if it's a comparison. 9005 /// 9006 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 9007 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 9008 IdentifierInfo *FnName, 9009 SourceLocation FnLoc, 9010 SourceLocation RParenLoc) { 9011 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 9012 if (!Size) 9013 return false; 9014 9015 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 9016 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 9017 return false; 9018 9019 SourceRange SizeRange = Size->getSourceRange(); 9020 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 9021 << SizeRange << FnName; 9022 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 9023 << FnName 9024 << FixItHint::CreateInsertion( 9025 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 9026 << FixItHint::CreateRemoval(RParenLoc); 9027 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 9028 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 9029 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 9030 ")"); 9031 9032 return true; 9033 } 9034 9035 /// Determine whether the given type is or contains a dynamic class type 9036 /// (e.g., whether it has a vtable). 9037 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 9038 bool &IsContained) { 9039 // Look through array types while ignoring qualifiers. 9040 const Type *Ty = T->getBaseElementTypeUnsafe(); 9041 IsContained = false; 9042 9043 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 9044 RD = RD ? RD->getDefinition() : nullptr; 9045 if (!RD || RD->isInvalidDecl()) 9046 return nullptr; 9047 9048 if (RD->isDynamicClass()) 9049 return RD; 9050 9051 // Check all the fields. If any bases were dynamic, the class is dynamic. 9052 // It's impossible for a class to transitively contain itself by value, so 9053 // infinite recursion is impossible. 9054 for (auto *FD : RD->fields()) { 9055 bool SubContained; 9056 if (const CXXRecordDecl *ContainedRD = 9057 getContainedDynamicClass(FD->getType(), SubContained)) { 9058 IsContained = true; 9059 return ContainedRD; 9060 } 9061 } 9062 9063 return nullptr; 9064 } 9065 9066 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 9067 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 9068 if (Unary->getKind() == UETT_SizeOf) 9069 return Unary; 9070 return nullptr; 9071 } 9072 9073 /// If E is a sizeof expression, returns its argument expression, 9074 /// otherwise returns NULL. 9075 static const Expr *getSizeOfExprArg(const Expr *E) { 9076 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9077 if (!SizeOf->isArgumentType()) 9078 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 9079 return nullptr; 9080 } 9081 9082 /// If E is a sizeof expression, returns its argument type. 9083 static QualType getSizeOfArgType(const Expr *E) { 9084 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9085 return SizeOf->getTypeOfArgument(); 9086 return QualType(); 9087 } 9088 9089 namespace { 9090 9091 struct SearchNonTrivialToInitializeField 9092 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 9093 using Super = 9094 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 9095 9096 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 9097 9098 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 9099 SourceLocation SL) { 9100 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9101 asDerived().visitArray(PDIK, AT, SL); 9102 return; 9103 } 9104 9105 Super::visitWithKind(PDIK, FT, SL); 9106 } 9107 9108 void visitARCStrong(QualType FT, SourceLocation SL) { 9109 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9110 } 9111 void visitARCWeak(QualType FT, SourceLocation SL) { 9112 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9113 } 9114 void visitStruct(QualType FT, SourceLocation SL) { 9115 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9116 visit(FD->getType(), FD->getLocation()); 9117 } 9118 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 9119 const ArrayType *AT, SourceLocation SL) { 9120 visit(getContext().getBaseElementType(AT), SL); 9121 } 9122 void visitTrivial(QualType FT, SourceLocation SL) {} 9123 9124 static void diag(QualType RT, const Expr *E, Sema &S) { 9125 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 9126 } 9127 9128 ASTContext &getContext() { return S.getASTContext(); } 9129 9130 const Expr *E; 9131 Sema &S; 9132 }; 9133 9134 struct SearchNonTrivialToCopyField 9135 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 9136 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 9137 9138 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 9139 9140 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 9141 SourceLocation SL) { 9142 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9143 asDerived().visitArray(PCK, AT, SL); 9144 return; 9145 } 9146 9147 Super::visitWithKind(PCK, FT, SL); 9148 } 9149 9150 void visitARCStrong(QualType FT, SourceLocation SL) { 9151 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9152 } 9153 void visitARCWeak(QualType FT, SourceLocation SL) { 9154 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9155 } 9156 void visitStruct(QualType FT, SourceLocation SL) { 9157 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9158 visit(FD->getType(), FD->getLocation()); 9159 } 9160 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 9161 SourceLocation SL) { 9162 visit(getContext().getBaseElementType(AT), SL); 9163 } 9164 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 9165 SourceLocation SL) {} 9166 void visitTrivial(QualType FT, SourceLocation SL) {} 9167 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 9168 9169 static void diag(QualType RT, const Expr *E, Sema &S) { 9170 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 9171 } 9172 9173 ASTContext &getContext() { return S.getASTContext(); } 9174 9175 const Expr *E; 9176 Sema &S; 9177 }; 9178 9179 } 9180 9181 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 9182 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 9183 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 9184 9185 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 9186 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 9187 return false; 9188 9189 return doesExprLikelyComputeSize(BO->getLHS()) || 9190 doesExprLikelyComputeSize(BO->getRHS()); 9191 } 9192 9193 return getAsSizeOfExpr(SizeofExpr) != nullptr; 9194 } 9195 9196 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 9197 /// 9198 /// \code 9199 /// #define MACRO 0 9200 /// foo(MACRO); 9201 /// foo(0); 9202 /// \endcode 9203 /// 9204 /// This should return true for the first call to foo, but not for the second 9205 /// (regardless of whether foo is a macro or function). 9206 static bool isArgumentExpandedFromMacro(SourceManager &SM, 9207 SourceLocation CallLoc, 9208 SourceLocation ArgLoc) { 9209 if (!CallLoc.isMacroID()) 9210 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 9211 9212 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 9213 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 9214 } 9215 9216 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 9217 /// last two arguments transposed. 9218 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 9219 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 9220 return; 9221 9222 const Expr *SizeArg = 9223 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 9224 9225 auto isLiteralZero = [](const Expr *E) { 9226 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 9227 }; 9228 9229 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 9230 SourceLocation CallLoc = Call->getRParenLoc(); 9231 SourceManager &SM = S.getSourceManager(); 9232 if (isLiteralZero(SizeArg) && 9233 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 9234 9235 SourceLocation DiagLoc = SizeArg->getExprLoc(); 9236 9237 // Some platforms #define bzero to __builtin_memset. See if this is the 9238 // case, and if so, emit a better diagnostic. 9239 if (BId == Builtin::BIbzero || 9240 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 9241 CallLoc, SM, S.getLangOpts()) == "bzero")) { 9242 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 9243 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 9244 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 9245 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 9246 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 9247 } 9248 return; 9249 } 9250 9251 // If the second argument to a memset is a sizeof expression and the third 9252 // isn't, this is also likely an error. This should catch 9253 // 'memset(buf, sizeof(buf), 0xff)'. 9254 if (BId == Builtin::BImemset && 9255 doesExprLikelyComputeSize(Call->getArg(1)) && 9256 !doesExprLikelyComputeSize(Call->getArg(2))) { 9257 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 9258 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 9259 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 9260 return; 9261 } 9262 } 9263 9264 /// Check for dangerous or invalid arguments to memset(). 9265 /// 9266 /// This issues warnings on known problematic, dangerous or unspecified 9267 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 9268 /// function calls. 9269 /// 9270 /// \param Call The call expression to diagnose. 9271 void Sema::CheckMemaccessArguments(const CallExpr *Call, 9272 unsigned BId, 9273 IdentifierInfo *FnName) { 9274 assert(BId != 0); 9275 9276 // It is possible to have a non-standard definition of memset. Validate 9277 // we have enough arguments, and if not, abort further checking. 9278 unsigned ExpectedNumArgs = 9279 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 9280 if (Call->getNumArgs() < ExpectedNumArgs) 9281 return; 9282 9283 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 9284 BId == Builtin::BIstrndup ? 1 : 2); 9285 unsigned LenArg = 9286 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 9287 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 9288 9289 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 9290 Call->getBeginLoc(), Call->getRParenLoc())) 9291 return; 9292 9293 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 9294 CheckMemaccessSize(*this, BId, Call); 9295 9296 // We have special checking when the length is a sizeof expression. 9297 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 9298 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 9299 llvm::FoldingSetNodeID SizeOfArgID; 9300 9301 // Although widely used, 'bzero' is not a standard function. Be more strict 9302 // with the argument types before allowing diagnostics and only allow the 9303 // form bzero(ptr, sizeof(...)). 9304 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 9305 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 9306 return; 9307 9308 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 9309 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 9310 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 9311 9312 QualType DestTy = Dest->getType(); 9313 QualType PointeeTy; 9314 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 9315 PointeeTy = DestPtrTy->getPointeeType(); 9316 9317 // Never warn about void type pointers. This can be used to suppress 9318 // false positives. 9319 if (PointeeTy->isVoidType()) 9320 continue; 9321 9322 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 9323 // actually comparing the expressions for equality. Because computing the 9324 // expression IDs can be expensive, we only do this if the diagnostic is 9325 // enabled. 9326 if (SizeOfArg && 9327 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 9328 SizeOfArg->getExprLoc())) { 9329 // We only compute IDs for expressions if the warning is enabled, and 9330 // cache the sizeof arg's ID. 9331 if (SizeOfArgID == llvm::FoldingSetNodeID()) 9332 SizeOfArg->Profile(SizeOfArgID, Context, true); 9333 llvm::FoldingSetNodeID DestID; 9334 Dest->Profile(DestID, Context, true); 9335 if (DestID == SizeOfArgID) { 9336 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 9337 // over sizeof(src) as well. 9338 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 9339 StringRef ReadableName = FnName->getName(); 9340 9341 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 9342 if (UnaryOp->getOpcode() == UO_AddrOf) 9343 ActionIdx = 1; // If its an address-of operator, just remove it. 9344 if (!PointeeTy->isIncompleteType() && 9345 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 9346 ActionIdx = 2; // If the pointee's size is sizeof(char), 9347 // suggest an explicit length. 9348 9349 // If the function is defined as a builtin macro, do not show macro 9350 // expansion. 9351 SourceLocation SL = SizeOfArg->getExprLoc(); 9352 SourceRange DSR = Dest->getSourceRange(); 9353 SourceRange SSR = SizeOfArg->getSourceRange(); 9354 SourceManager &SM = getSourceManager(); 9355 9356 if (SM.isMacroArgExpansion(SL)) { 9357 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 9358 SL = SM.getSpellingLoc(SL); 9359 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 9360 SM.getSpellingLoc(DSR.getEnd())); 9361 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 9362 SM.getSpellingLoc(SSR.getEnd())); 9363 } 9364 9365 DiagRuntimeBehavior(SL, SizeOfArg, 9366 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 9367 << ReadableName 9368 << PointeeTy 9369 << DestTy 9370 << DSR 9371 << SSR); 9372 DiagRuntimeBehavior(SL, SizeOfArg, 9373 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 9374 << ActionIdx 9375 << SSR); 9376 9377 break; 9378 } 9379 } 9380 9381 // Also check for cases where the sizeof argument is the exact same 9382 // type as the memory argument, and where it points to a user-defined 9383 // record type. 9384 if (SizeOfArgTy != QualType()) { 9385 if (PointeeTy->isRecordType() && 9386 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 9387 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 9388 PDiag(diag::warn_sizeof_pointer_type_memaccess) 9389 << FnName << SizeOfArgTy << ArgIdx 9390 << PointeeTy << Dest->getSourceRange() 9391 << LenExpr->getSourceRange()); 9392 break; 9393 } 9394 } 9395 } else if (DestTy->isArrayType()) { 9396 PointeeTy = DestTy; 9397 } 9398 9399 if (PointeeTy == QualType()) 9400 continue; 9401 9402 // Always complain about dynamic classes. 9403 bool IsContained; 9404 if (const CXXRecordDecl *ContainedRD = 9405 getContainedDynamicClass(PointeeTy, IsContained)) { 9406 9407 unsigned OperationType = 0; 9408 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 9409 // "overwritten" if we're warning about the destination for any call 9410 // but memcmp; otherwise a verb appropriate to the call. 9411 if (ArgIdx != 0 || IsCmp) { 9412 if (BId == Builtin::BImemcpy) 9413 OperationType = 1; 9414 else if(BId == Builtin::BImemmove) 9415 OperationType = 2; 9416 else if (IsCmp) 9417 OperationType = 3; 9418 } 9419 9420 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9421 PDiag(diag::warn_dyn_class_memaccess) 9422 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 9423 << IsContained << ContainedRD << OperationType 9424 << Call->getCallee()->getSourceRange()); 9425 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 9426 BId != Builtin::BImemset) 9427 DiagRuntimeBehavior( 9428 Dest->getExprLoc(), Dest, 9429 PDiag(diag::warn_arc_object_memaccess) 9430 << ArgIdx << FnName << PointeeTy 9431 << Call->getCallee()->getSourceRange()); 9432 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 9433 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 9434 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 9435 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9436 PDiag(diag::warn_cstruct_memaccess) 9437 << ArgIdx << FnName << PointeeTy << 0); 9438 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 9439 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 9440 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 9441 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9442 PDiag(diag::warn_cstruct_memaccess) 9443 << ArgIdx << FnName << PointeeTy << 1); 9444 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 9445 } else { 9446 continue; 9447 } 9448 } else 9449 continue; 9450 9451 DiagRuntimeBehavior( 9452 Dest->getExprLoc(), Dest, 9453 PDiag(diag::note_bad_memaccess_silence) 9454 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 9455 break; 9456 } 9457 } 9458 9459 // A little helper routine: ignore addition and subtraction of integer literals. 9460 // This intentionally does not ignore all integer constant expressions because 9461 // we don't want to remove sizeof(). 9462 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 9463 Ex = Ex->IgnoreParenCasts(); 9464 9465 while (true) { 9466 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 9467 if (!BO || !BO->isAdditiveOp()) 9468 break; 9469 9470 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 9471 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 9472 9473 if (isa<IntegerLiteral>(RHS)) 9474 Ex = LHS; 9475 else if (isa<IntegerLiteral>(LHS)) 9476 Ex = RHS; 9477 else 9478 break; 9479 } 9480 9481 return Ex; 9482 } 9483 9484 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 9485 ASTContext &Context) { 9486 // Only handle constant-sized or VLAs, but not flexible members. 9487 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 9488 // Only issue the FIXIT for arrays of size > 1. 9489 if (CAT->getSize().getSExtValue() <= 1) 9490 return false; 9491 } else if (!Ty->isVariableArrayType()) { 9492 return false; 9493 } 9494 return true; 9495 } 9496 9497 // Warn if the user has made the 'size' argument to strlcpy or strlcat 9498 // be the size of the source, instead of the destination. 9499 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 9500 IdentifierInfo *FnName) { 9501 9502 // Don't crash if the user has the wrong number of arguments 9503 unsigned NumArgs = Call->getNumArgs(); 9504 if ((NumArgs != 3) && (NumArgs != 4)) 9505 return; 9506 9507 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 9508 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 9509 const Expr *CompareWithSrc = nullptr; 9510 9511 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 9512 Call->getBeginLoc(), Call->getRParenLoc())) 9513 return; 9514 9515 // Look for 'strlcpy(dst, x, sizeof(x))' 9516 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 9517 CompareWithSrc = Ex; 9518 else { 9519 // Look for 'strlcpy(dst, x, strlen(x))' 9520 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 9521 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 9522 SizeCall->getNumArgs() == 1) 9523 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 9524 } 9525 } 9526 9527 if (!CompareWithSrc) 9528 return; 9529 9530 // Determine if the argument to sizeof/strlen is equal to the source 9531 // argument. In principle there's all kinds of things you could do 9532 // here, for instance creating an == expression and evaluating it with 9533 // EvaluateAsBooleanCondition, but this uses a more direct technique: 9534 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 9535 if (!SrcArgDRE) 9536 return; 9537 9538 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 9539 if (!CompareWithSrcDRE || 9540 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 9541 return; 9542 9543 const Expr *OriginalSizeArg = Call->getArg(2); 9544 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 9545 << OriginalSizeArg->getSourceRange() << FnName; 9546 9547 // Output a FIXIT hint if the destination is an array (rather than a 9548 // pointer to an array). This could be enhanced to handle some 9549 // pointers if we know the actual size, like if DstArg is 'array+2' 9550 // we could say 'sizeof(array)-2'. 9551 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 9552 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 9553 return; 9554 9555 SmallString<128> sizeString; 9556 llvm::raw_svector_ostream OS(sizeString); 9557 OS << "sizeof("; 9558 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9559 OS << ")"; 9560 9561 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 9562 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 9563 OS.str()); 9564 } 9565 9566 /// Check if two expressions refer to the same declaration. 9567 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 9568 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 9569 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 9570 return D1->getDecl() == D2->getDecl(); 9571 return false; 9572 } 9573 9574 static const Expr *getStrlenExprArg(const Expr *E) { 9575 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 9576 const FunctionDecl *FD = CE->getDirectCallee(); 9577 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 9578 return nullptr; 9579 return CE->getArg(0)->IgnoreParenCasts(); 9580 } 9581 return nullptr; 9582 } 9583 9584 // Warn on anti-patterns as the 'size' argument to strncat. 9585 // The correct size argument should look like following: 9586 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 9587 void Sema::CheckStrncatArguments(const CallExpr *CE, 9588 IdentifierInfo *FnName) { 9589 // Don't crash if the user has the wrong number of arguments. 9590 if (CE->getNumArgs() < 3) 9591 return; 9592 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 9593 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 9594 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 9595 9596 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 9597 CE->getRParenLoc())) 9598 return; 9599 9600 // Identify common expressions, which are wrongly used as the size argument 9601 // to strncat and may lead to buffer overflows. 9602 unsigned PatternType = 0; 9603 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 9604 // - sizeof(dst) 9605 if (referToTheSameDecl(SizeOfArg, DstArg)) 9606 PatternType = 1; 9607 // - sizeof(src) 9608 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 9609 PatternType = 2; 9610 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 9611 if (BE->getOpcode() == BO_Sub) { 9612 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 9613 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 9614 // - sizeof(dst) - strlen(dst) 9615 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 9616 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 9617 PatternType = 1; 9618 // - sizeof(src) - (anything) 9619 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 9620 PatternType = 2; 9621 } 9622 } 9623 9624 if (PatternType == 0) 9625 return; 9626 9627 // Generate the diagnostic. 9628 SourceLocation SL = LenArg->getBeginLoc(); 9629 SourceRange SR = LenArg->getSourceRange(); 9630 SourceManager &SM = getSourceManager(); 9631 9632 // If the function is defined as a builtin macro, do not show macro expansion. 9633 if (SM.isMacroArgExpansion(SL)) { 9634 SL = SM.getSpellingLoc(SL); 9635 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 9636 SM.getSpellingLoc(SR.getEnd())); 9637 } 9638 9639 // Check if the destination is an array (rather than a pointer to an array). 9640 QualType DstTy = DstArg->getType(); 9641 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 9642 Context); 9643 if (!isKnownSizeArray) { 9644 if (PatternType == 1) 9645 Diag(SL, diag::warn_strncat_wrong_size) << SR; 9646 else 9647 Diag(SL, diag::warn_strncat_src_size) << SR; 9648 return; 9649 } 9650 9651 if (PatternType == 1) 9652 Diag(SL, diag::warn_strncat_large_size) << SR; 9653 else 9654 Diag(SL, diag::warn_strncat_src_size) << SR; 9655 9656 SmallString<128> sizeString; 9657 llvm::raw_svector_ostream OS(sizeString); 9658 OS << "sizeof("; 9659 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9660 OS << ") - "; 9661 OS << "strlen("; 9662 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9663 OS << ") - 1"; 9664 9665 Diag(SL, diag::note_strncat_wrong_size) 9666 << FixItHint::CreateReplacement(SR, OS.str()); 9667 } 9668 9669 void 9670 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 9671 SourceLocation ReturnLoc, 9672 bool isObjCMethod, 9673 const AttrVec *Attrs, 9674 const FunctionDecl *FD) { 9675 // Check if the return value is null but should not be. 9676 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 9677 (!isObjCMethod && isNonNullType(Context, lhsType))) && 9678 CheckNonNullExpr(*this, RetValExp)) 9679 Diag(ReturnLoc, diag::warn_null_ret) 9680 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 9681 9682 // C++11 [basic.stc.dynamic.allocation]p4: 9683 // If an allocation function declared with a non-throwing 9684 // exception-specification fails to allocate storage, it shall return 9685 // a null pointer. Any other allocation function that fails to allocate 9686 // storage shall indicate failure only by throwing an exception [...] 9687 if (FD) { 9688 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 9689 if (Op == OO_New || Op == OO_Array_New) { 9690 const FunctionProtoType *Proto 9691 = FD->getType()->castAs<FunctionProtoType>(); 9692 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 9693 CheckNonNullExpr(*this, RetValExp)) 9694 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 9695 << FD << getLangOpts().CPlusPlus11; 9696 } 9697 } 9698 } 9699 9700 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 9701 9702 /// Check for comparisons of floating point operands using != and ==. 9703 /// Issue a warning if these are no self-comparisons, as they are not likely 9704 /// to do what the programmer intended. 9705 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { 9706 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 9707 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 9708 9709 // Special case: check for x == x (which is OK). 9710 // Do not emit warnings for such cases. 9711 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 9712 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 9713 if (DRL->getDecl() == DRR->getDecl()) 9714 return; 9715 9716 // Special case: check for comparisons against literals that can be exactly 9717 // represented by APFloat. In such cases, do not emit a warning. This 9718 // is a heuristic: often comparison against such literals are used to 9719 // detect if a value in a variable has not changed. This clearly can 9720 // lead to false negatives. 9721 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 9722 if (FLL->isExact()) 9723 return; 9724 } else 9725 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 9726 if (FLR->isExact()) 9727 return; 9728 9729 // Check for comparisons with builtin types. 9730 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 9731 if (CL->getBuiltinCallee()) 9732 return; 9733 9734 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 9735 if (CR->getBuiltinCallee()) 9736 return; 9737 9738 // Emit the diagnostic. 9739 Diag(Loc, diag::warn_floatingpoint_eq) 9740 << LHS->getSourceRange() << RHS->getSourceRange(); 9741 } 9742 9743 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 9744 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 9745 9746 namespace { 9747 9748 /// Structure recording the 'active' range of an integer-valued 9749 /// expression. 9750 struct IntRange { 9751 /// The number of bits active in the int. 9752 unsigned Width; 9753 9754 /// True if the int is known not to have negative values. 9755 bool NonNegative; 9756 9757 IntRange(unsigned Width, bool NonNegative) 9758 : Width(Width), NonNegative(NonNegative) {} 9759 9760 /// Returns the range of the bool type. 9761 static IntRange forBoolType() { 9762 return IntRange(1, true); 9763 } 9764 9765 /// Returns the range of an opaque value of the given integral type. 9766 static IntRange forValueOfType(ASTContext &C, QualType T) { 9767 return forValueOfCanonicalType(C, 9768 T->getCanonicalTypeInternal().getTypePtr()); 9769 } 9770 9771 /// Returns the range of an opaque value of a canonical integral type. 9772 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 9773 assert(T->isCanonicalUnqualified()); 9774 9775 if (const VectorType *VT = dyn_cast<VectorType>(T)) 9776 T = VT->getElementType().getTypePtr(); 9777 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 9778 T = CT->getElementType().getTypePtr(); 9779 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 9780 T = AT->getValueType().getTypePtr(); 9781 9782 if (!C.getLangOpts().CPlusPlus) { 9783 // For enum types in C code, use the underlying datatype. 9784 if (const EnumType *ET = dyn_cast<EnumType>(T)) 9785 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 9786 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 9787 // For enum types in C++, use the known bit width of the enumerators. 9788 EnumDecl *Enum = ET->getDecl(); 9789 // In C++11, enums can have a fixed underlying type. Use this type to 9790 // compute the range. 9791 if (Enum->isFixed()) { 9792 return IntRange(C.getIntWidth(QualType(T, 0)), 9793 !ET->isSignedIntegerOrEnumerationType()); 9794 } 9795 9796 unsigned NumPositive = Enum->getNumPositiveBits(); 9797 unsigned NumNegative = Enum->getNumNegativeBits(); 9798 9799 if (NumNegative == 0) 9800 return IntRange(NumPositive, true/*NonNegative*/); 9801 else 9802 return IntRange(std::max(NumPositive + 1, NumNegative), 9803 false/*NonNegative*/); 9804 } 9805 9806 const BuiltinType *BT = cast<BuiltinType>(T); 9807 assert(BT->isInteger()); 9808 9809 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 9810 } 9811 9812 /// Returns the "target" range of a canonical integral type, i.e. 9813 /// the range of values expressible in the type. 9814 /// 9815 /// This matches forValueOfCanonicalType except that enums have the 9816 /// full range of their type, not the range of their enumerators. 9817 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 9818 assert(T->isCanonicalUnqualified()); 9819 9820 if (const VectorType *VT = dyn_cast<VectorType>(T)) 9821 T = VT->getElementType().getTypePtr(); 9822 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 9823 T = CT->getElementType().getTypePtr(); 9824 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 9825 T = AT->getValueType().getTypePtr(); 9826 if (const EnumType *ET = dyn_cast<EnumType>(T)) 9827 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 9828 9829 const BuiltinType *BT = cast<BuiltinType>(T); 9830 assert(BT->isInteger()); 9831 9832 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 9833 } 9834 9835 /// Returns the supremum of two ranges: i.e. their conservative merge. 9836 static IntRange join(IntRange L, IntRange R) { 9837 return IntRange(std::max(L.Width, R.Width), 9838 L.NonNegative && R.NonNegative); 9839 } 9840 9841 /// Returns the infinum of two ranges: i.e. their aggressive merge. 9842 static IntRange meet(IntRange L, IntRange R) { 9843 return IntRange(std::min(L.Width, R.Width), 9844 L.NonNegative || R.NonNegative); 9845 } 9846 }; 9847 9848 } // namespace 9849 9850 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 9851 unsigned MaxWidth) { 9852 if (value.isSigned() && value.isNegative()) 9853 return IntRange(value.getMinSignedBits(), false); 9854 9855 if (value.getBitWidth() > MaxWidth) 9856 value = value.trunc(MaxWidth); 9857 9858 // isNonNegative() just checks the sign bit without considering 9859 // signedness. 9860 return IntRange(value.getActiveBits(), true); 9861 } 9862 9863 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 9864 unsigned MaxWidth) { 9865 if (result.isInt()) 9866 return GetValueRange(C, result.getInt(), MaxWidth); 9867 9868 if (result.isVector()) { 9869 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 9870 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 9871 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 9872 R = IntRange::join(R, El); 9873 } 9874 return R; 9875 } 9876 9877 if (result.isComplexInt()) { 9878 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 9879 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 9880 return IntRange::join(R, I); 9881 } 9882 9883 // This can happen with lossless casts to intptr_t of "based" lvalues. 9884 // Assume it might use arbitrary bits. 9885 // FIXME: The only reason we need to pass the type in here is to get 9886 // the sign right on this one case. It would be nice if APValue 9887 // preserved this. 9888 assert(result.isLValue() || result.isAddrLabelDiff()); 9889 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 9890 } 9891 9892 static QualType GetExprType(const Expr *E) { 9893 QualType Ty = E->getType(); 9894 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 9895 Ty = AtomicRHS->getValueType(); 9896 return Ty; 9897 } 9898 9899 /// Pseudo-evaluate the given integer expression, estimating the 9900 /// range of values it might take. 9901 /// 9902 /// \param MaxWidth - the width to which the value will be truncated 9903 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) { 9904 E = E->IgnoreParens(); 9905 9906 // Try a full evaluation first. 9907 Expr::EvalResult result; 9908 if (E->EvaluateAsRValue(result, C)) 9909 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 9910 9911 // I think we only want to look through implicit casts here; if the 9912 // user has an explicit widening cast, we should treat the value as 9913 // being of the new, wider type. 9914 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 9915 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 9916 return GetExprRange(C, CE->getSubExpr(), MaxWidth); 9917 9918 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 9919 9920 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 9921 CE->getCastKind() == CK_BooleanToSignedIntegral; 9922 9923 // Assume that non-integer casts can span the full range of the type. 9924 if (!isIntegerCast) 9925 return OutputTypeRange; 9926 9927 IntRange SubRange 9928 = GetExprRange(C, CE->getSubExpr(), 9929 std::min(MaxWidth, OutputTypeRange.Width)); 9930 9931 // Bail out if the subexpr's range is as wide as the cast type. 9932 if (SubRange.Width >= OutputTypeRange.Width) 9933 return OutputTypeRange; 9934 9935 // Otherwise, we take the smaller width, and we're non-negative if 9936 // either the output type or the subexpr is. 9937 return IntRange(SubRange.Width, 9938 SubRange.NonNegative || OutputTypeRange.NonNegative); 9939 } 9940 9941 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 9942 // If we can fold the condition, just take that operand. 9943 bool CondResult; 9944 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 9945 return GetExprRange(C, CondResult ? CO->getTrueExpr() 9946 : CO->getFalseExpr(), 9947 MaxWidth); 9948 9949 // Otherwise, conservatively merge. 9950 IntRange L = GetExprRange(C, CO->getTrueExpr(), MaxWidth); 9951 IntRange R = GetExprRange(C, CO->getFalseExpr(), MaxWidth); 9952 return IntRange::join(L, R); 9953 } 9954 9955 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 9956 switch (BO->getOpcode()) { 9957 case BO_Cmp: 9958 llvm_unreachable("builtin <=> should have class type"); 9959 9960 // Boolean-valued operations are single-bit and positive. 9961 case BO_LAnd: 9962 case BO_LOr: 9963 case BO_LT: 9964 case BO_GT: 9965 case BO_LE: 9966 case BO_GE: 9967 case BO_EQ: 9968 case BO_NE: 9969 return IntRange::forBoolType(); 9970 9971 // The type of the assignments is the type of the LHS, so the RHS 9972 // is not necessarily the same type. 9973 case BO_MulAssign: 9974 case BO_DivAssign: 9975 case BO_RemAssign: 9976 case BO_AddAssign: 9977 case BO_SubAssign: 9978 case BO_XorAssign: 9979 case BO_OrAssign: 9980 // TODO: bitfields? 9981 return IntRange::forValueOfType(C, GetExprType(E)); 9982 9983 // Simple assignments just pass through the RHS, which will have 9984 // been coerced to the LHS type. 9985 case BO_Assign: 9986 // TODO: bitfields? 9987 return GetExprRange(C, BO->getRHS(), MaxWidth); 9988 9989 // Operations with opaque sources are black-listed. 9990 case BO_PtrMemD: 9991 case BO_PtrMemI: 9992 return IntRange::forValueOfType(C, GetExprType(E)); 9993 9994 // Bitwise-and uses the *infinum* of the two source ranges. 9995 case BO_And: 9996 case BO_AndAssign: 9997 return IntRange::meet(GetExprRange(C, BO->getLHS(), MaxWidth), 9998 GetExprRange(C, BO->getRHS(), MaxWidth)); 9999 10000 // Left shift gets black-listed based on a judgement call. 10001 case BO_Shl: 10002 // ...except that we want to treat '1 << (blah)' as logically 10003 // positive. It's an important idiom. 10004 if (IntegerLiteral *I 10005 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 10006 if (I->getValue() == 1) { 10007 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 10008 return IntRange(R.Width, /*NonNegative*/ true); 10009 } 10010 } 10011 LLVM_FALLTHROUGH; 10012 10013 case BO_ShlAssign: 10014 return IntRange::forValueOfType(C, GetExprType(E)); 10015 10016 // Right shift by a constant can narrow its left argument. 10017 case BO_Shr: 10018 case BO_ShrAssign: { 10019 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth); 10020 10021 // If the shift amount is a positive constant, drop the width by 10022 // that much. 10023 llvm::APSInt shift; 10024 if (BO->getRHS()->isIntegerConstantExpr(shift, C) && 10025 shift.isNonNegative()) { 10026 unsigned zext = shift.getZExtValue(); 10027 if (zext >= L.Width) 10028 L.Width = (L.NonNegative ? 0 : 1); 10029 else 10030 L.Width -= zext; 10031 } 10032 10033 return L; 10034 } 10035 10036 // Comma acts as its right operand. 10037 case BO_Comma: 10038 return GetExprRange(C, BO->getRHS(), MaxWidth); 10039 10040 // Black-list pointer subtractions. 10041 case BO_Sub: 10042 if (BO->getLHS()->getType()->isPointerType()) 10043 return IntRange::forValueOfType(C, GetExprType(E)); 10044 break; 10045 10046 // The width of a division result is mostly determined by the size 10047 // of the LHS. 10048 case BO_Div: { 10049 // Don't 'pre-truncate' the operands. 10050 unsigned opWidth = C.getIntWidth(GetExprType(E)); 10051 IntRange L = GetExprRange(C, BO->getLHS(), opWidth); 10052 10053 // If the divisor is constant, use that. 10054 llvm::APSInt divisor; 10055 if (BO->getRHS()->isIntegerConstantExpr(divisor, C)) { 10056 unsigned log2 = divisor.logBase2(); // floor(log_2(divisor)) 10057 if (log2 >= L.Width) 10058 L.Width = (L.NonNegative ? 0 : 1); 10059 else 10060 L.Width = std::min(L.Width - log2, MaxWidth); 10061 return L; 10062 } 10063 10064 // Otherwise, just use the LHS's width. 10065 IntRange R = GetExprRange(C, BO->getRHS(), opWidth); 10066 return IntRange(L.Width, L.NonNegative && R.NonNegative); 10067 } 10068 10069 // The result of a remainder can't be larger than the result of 10070 // either side. 10071 case BO_Rem: { 10072 // Don't 'pre-truncate' the operands. 10073 unsigned opWidth = C.getIntWidth(GetExprType(E)); 10074 IntRange L = GetExprRange(C, BO->getLHS(), opWidth); 10075 IntRange R = GetExprRange(C, BO->getRHS(), opWidth); 10076 10077 IntRange meet = IntRange::meet(L, R); 10078 meet.Width = std::min(meet.Width, MaxWidth); 10079 return meet; 10080 } 10081 10082 // The default behavior is okay for these. 10083 case BO_Mul: 10084 case BO_Add: 10085 case BO_Xor: 10086 case BO_Or: 10087 break; 10088 } 10089 10090 // The default case is to treat the operation as if it were closed 10091 // on the narrowest type that encompasses both operands. 10092 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth); 10093 IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth); 10094 return IntRange::join(L, R); 10095 } 10096 10097 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 10098 switch (UO->getOpcode()) { 10099 // Boolean-valued operations are white-listed. 10100 case UO_LNot: 10101 return IntRange::forBoolType(); 10102 10103 // Operations with opaque sources are black-listed. 10104 case UO_Deref: 10105 case UO_AddrOf: // should be impossible 10106 return IntRange::forValueOfType(C, GetExprType(E)); 10107 10108 default: 10109 return GetExprRange(C, UO->getSubExpr(), MaxWidth); 10110 } 10111 } 10112 10113 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 10114 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth); 10115 10116 if (const auto *BitField = E->getSourceBitField()) 10117 return IntRange(BitField->getBitWidthValue(C), 10118 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 10119 10120 return IntRange::forValueOfType(C, GetExprType(E)); 10121 } 10122 10123 static IntRange GetExprRange(ASTContext &C, const Expr *E) { 10124 return GetExprRange(C, E, C.getIntWidth(GetExprType(E))); 10125 } 10126 10127 /// Checks whether the given value, which currently has the given 10128 /// source semantics, has the same value when coerced through the 10129 /// target semantics. 10130 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 10131 const llvm::fltSemantics &Src, 10132 const llvm::fltSemantics &Tgt) { 10133 llvm::APFloat truncated = value; 10134 10135 bool ignored; 10136 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 10137 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 10138 10139 return truncated.bitwiseIsEqual(value); 10140 } 10141 10142 /// Checks whether the given value, which currently has the given 10143 /// source semantics, has the same value when coerced through the 10144 /// target semantics. 10145 /// 10146 /// The value might be a vector of floats (or a complex number). 10147 static bool IsSameFloatAfterCast(const APValue &value, 10148 const llvm::fltSemantics &Src, 10149 const llvm::fltSemantics &Tgt) { 10150 if (value.isFloat()) 10151 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 10152 10153 if (value.isVector()) { 10154 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 10155 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 10156 return false; 10157 return true; 10158 } 10159 10160 assert(value.isComplexFloat()); 10161 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 10162 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 10163 } 10164 10165 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC); 10166 10167 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 10168 // Suppress cases where we are comparing against an enum constant. 10169 if (const DeclRefExpr *DR = 10170 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 10171 if (isa<EnumConstantDecl>(DR->getDecl())) 10172 return true; 10173 10174 // Suppress cases where the '0' value is expanded from a macro. 10175 if (E->getBeginLoc().isMacroID()) 10176 return true; 10177 10178 return false; 10179 } 10180 10181 static bool isKnownToHaveUnsignedValue(Expr *E) { 10182 return E->getType()->isIntegerType() && 10183 (!E->getType()->isSignedIntegerType() || 10184 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 10185 } 10186 10187 namespace { 10188 /// The promoted range of values of a type. In general this has the 10189 /// following structure: 10190 /// 10191 /// |-----------| . . . |-----------| 10192 /// ^ ^ ^ ^ 10193 /// Min HoleMin HoleMax Max 10194 /// 10195 /// ... where there is only a hole if a signed type is promoted to unsigned 10196 /// (in which case Min and Max are the smallest and largest representable 10197 /// values). 10198 struct PromotedRange { 10199 // Min, or HoleMax if there is a hole. 10200 llvm::APSInt PromotedMin; 10201 // Max, or HoleMin if there is a hole. 10202 llvm::APSInt PromotedMax; 10203 10204 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 10205 if (R.Width == 0) 10206 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 10207 else if (R.Width >= BitWidth && !Unsigned) { 10208 // Promotion made the type *narrower*. This happens when promoting 10209 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 10210 // Treat all values of 'signed int' as being in range for now. 10211 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 10212 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 10213 } else { 10214 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 10215 .extOrTrunc(BitWidth); 10216 PromotedMin.setIsUnsigned(Unsigned); 10217 10218 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 10219 .extOrTrunc(BitWidth); 10220 PromotedMax.setIsUnsigned(Unsigned); 10221 } 10222 } 10223 10224 // Determine whether this range is contiguous (has no hole). 10225 bool isContiguous() const { return PromotedMin <= PromotedMax; } 10226 10227 // Where a constant value is within the range. 10228 enum ComparisonResult { 10229 LT = 0x1, 10230 LE = 0x2, 10231 GT = 0x4, 10232 GE = 0x8, 10233 EQ = 0x10, 10234 NE = 0x20, 10235 InRangeFlag = 0x40, 10236 10237 Less = LE | LT | NE, 10238 Min = LE | InRangeFlag, 10239 InRange = InRangeFlag, 10240 Max = GE | InRangeFlag, 10241 Greater = GE | GT | NE, 10242 10243 OnlyValue = LE | GE | EQ | InRangeFlag, 10244 InHole = NE 10245 }; 10246 10247 ComparisonResult compare(const llvm::APSInt &Value) const { 10248 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 10249 Value.isUnsigned() == PromotedMin.isUnsigned()); 10250 if (!isContiguous()) { 10251 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 10252 if (Value.isMinValue()) return Min; 10253 if (Value.isMaxValue()) return Max; 10254 if (Value >= PromotedMin) return InRange; 10255 if (Value <= PromotedMax) return InRange; 10256 return InHole; 10257 } 10258 10259 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 10260 case -1: return Less; 10261 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 10262 case 1: 10263 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 10264 case -1: return InRange; 10265 case 0: return Max; 10266 case 1: return Greater; 10267 } 10268 } 10269 10270 llvm_unreachable("impossible compare result"); 10271 } 10272 10273 static llvm::Optional<StringRef> 10274 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 10275 if (Op == BO_Cmp) { 10276 ComparisonResult LTFlag = LT, GTFlag = GT; 10277 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 10278 10279 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 10280 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 10281 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 10282 return llvm::None; 10283 } 10284 10285 ComparisonResult TrueFlag, FalseFlag; 10286 if (Op == BO_EQ) { 10287 TrueFlag = EQ; 10288 FalseFlag = NE; 10289 } else if (Op == BO_NE) { 10290 TrueFlag = NE; 10291 FalseFlag = EQ; 10292 } else { 10293 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 10294 TrueFlag = LT; 10295 FalseFlag = GE; 10296 } else { 10297 TrueFlag = GT; 10298 FalseFlag = LE; 10299 } 10300 if (Op == BO_GE || Op == BO_LE) 10301 std::swap(TrueFlag, FalseFlag); 10302 } 10303 if (R & TrueFlag) 10304 return StringRef("true"); 10305 if (R & FalseFlag) 10306 return StringRef("false"); 10307 return llvm::None; 10308 } 10309 }; 10310 } 10311 10312 static bool HasEnumType(Expr *E) { 10313 // Strip off implicit integral promotions. 10314 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 10315 if (ICE->getCastKind() != CK_IntegralCast && 10316 ICE->getCastKind() != CK_NoOp) 10317 break; 10318 E = ICE->getSubExpr(); 10319 } 10320 10321 return E->getType()->isEnumeralType(); 10322 } 10323 10324 static int classifyConstantValue(Expr *Constant) { 10325 // The values of this enumeration are used in the diagnostics 10326 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 10327 enum ConstantValueKind { 10328 Miscellaneous = 0, 10329 LiteralTrue, 10330 LiteralFalse 10331 }; 10332 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 10333 return BL->getValue() ? ConstantValueKind::LiteralTrue 10334 : ConstantValueKind::LiteralFalse; 10335 return ConstantValueKind::Miscellaneous; 10336 } 10337 10338 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 10339 Expr *Constant, Expr *Other, 10340 const llvm::APSInt &Value, 10341 bool RhsConstant) { 10342 if (S.inTemplateInstantiation()) 10343 return false; 10344 10345 Expr *OriginalOther = Other; 10346 10347 Constant = Constant->IgnoreParenImpCasts(); 10348 Other = Other->IgnoreParenImpCasts(); 10349 10350 // Suppress warnings on tautological comparisons between values of the same 10351 // enumeration type. There are only two ways we could warn on this: 10352 // - If the constant is outside the range of representable values of 10353 // the enumeration. In such a case, we should warn about the cast 10354 // to enumeration type, not about the comparison. 10355 // - If the constant is the maximum / minimum in-range value. For an 10356 // enumeratin type, such comparisons can be meaningful and useful. 10357 if (Constant->getType()->isEnumeralType() && 10358 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 10359 return false; 10360 10361 // TODO: Investigate using GetExprRange() to get tighter bounds 10362 // on the bit ranges. 10363 QualType OtherT = Other->getType(); 10364 if (const auto *AT = OtherT->getAs<AtomicType>()) 10365 OtherT = AT->getValueType(); 10366 IntRange OtherRange = IntRange::forValueOfType(S.Context, OtherT); 10367 10368 // Whether we're treating Other as being a bool because of the form of 10369 // expression despite it having another type (typically 'int' in C). 10370 bool OtherIsBooleanDespiteType = 10371 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 10372 if (OtherIsBooleanDespiteType) 10373 OtherRange = IntRange::forBoolType(); 10374 10375 // Determine the promoted range of the other type and see if a comparison of 10376 // the constant against that range is tautological. 10377 PromotedRange OtherPromotedRange(OtherRange, Value.getBitWidth(), 10378 Value.isUnsigned()); 10379 auto Cmp = OtherPromotedRange.compare(Value); 10380 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 10381 if (!Result) 10382 return false; 10383 10384 // Suppress the diagnostic for an in-range comparison if the constant comes 10385 // from a macro or enumerator. We don't want to diagnose 10386 // 10387 // some_long_value <= INT_MAX 10388 // 10389 // when sizeof(int) == sizeof(long). 10390 bool InRange = Cmp & PromotedRange::InRangeFlag; 10391 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 10392 return false; 10393 10394 // If this is a comparison to an enum constant, include that 10395 // constant in the diagnostic. 10396 const EnumConstantDecl *ED = nullptr; 10397 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 10398 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 10399 10400 // Should be enough for uint128 (39 decimal digits) 10401 SmallString<64> PrettySourceValue; 10402 llvm::raw_svector_ostream OS(PrettySourceValue); 10403 if (ED) 10404 OS << '\'' << *ED << "' (" << Value << ")"; 10405 else 10406 OS << Value; 10407 10408 // FIXME: We use a somewhat different formatting for the in-range cases and 10409 // cases involving boolean values for historical reasons. We should pick a 10410 // consistent way of presenting these diagnostics. 10411 if (!InRange || Other->isKnownToHaveBooleanValue()) { 10412 S.DiagRuntimeBehavior( 10413 E->getOperatorLoc(), E, 10414 S.PDiag(!InRange ? diag::warn_out_of_range_compare 10415 : diag::warn_tautological_bool_compare) 10416 << OS.str() << classifyConstantValue(Constant) 10417 << OtherT << OtherIsBooleanDespiteType << *Result 10418 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 10419 } else { 10420 unsigned Diag = (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 10421 ? (HasEnumType(OriginalOther) 10422 ? diag::warn_unsigned_enum_always_true_comparison 10423 : diag::warn_unsigned_always_true_comparison) 10424 : diag::warn_tautological_constant_compare; 10425 10426 S.Diag(E->getOperatorLoc(), Diag) 10427 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 10428 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 10429 } 10430 10431 return true; 10432 } 10433 10434 /// Analyze the operands of the given comparison. Implements the 10435 /// fallback case from AnalyzeComparison. 10436 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 10437 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 10438 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 10439 } 10440 10441 /// Implements -Wsign-compare. 10442 /// 10443 /// \param E the binary operator to check for warnings 10444 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 10445 // The type the comparison is being performed in. 10446 QualType T = E->getLHS()->getType(); 10447 10448 // Only analyze comparison operators where both sides have been converted to 10449 // the same type. 10450 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 10451 return AnalyzeImpConvsInComparison(S, E); 10452 10453 // Don't analyze value-dependent comparisons directly. 10454 if (E->isValueDependent()) 10455 return AnalyzeImpConvsInComparison(S, E); 10456 10457 Expr *LHS = E->getLHS(); 10458 Expr *RHS = E->getRHS(); 10459 10460 if (T->isIntegralType(S.Context)) { 10461 llvm::APSInt RHSValue; 10462 llvm::APSInt LHSValue; 10463 10464 bool IsRHSIntegralLiteral = RHS->isIntegerConstantExpr(RHSValue, S.Context); 10465 bool IsLHSIntegralLiteral = LHS->isIntegerConstantExpr(LHSValue, S.Context); 10466 10467 // We don't care about expressions whose result is a constant. 10468 if (IsRHSIntegralLiteral && IsLHSIntegralLiteral) 10469 return AnalyzeImpConvsInComparison(S, E); 10470 10471 // We only care about expressions where just one side is literal 10472 if (IsRHSIntegralLiteral ^ IsLHSIntegralLiteral) { 10473 // Is the constant on the RHS or LHS? 10474 const bool RhsConstant = IsRHSIntegralLiteral; 10475 Expr *Const = RhsConstant ? RHS : LHS; 10476 Expr *Other = RhsConstant ? LHS : RHS; 10477 const llvm::APSInt &Value = RhsConstant ? RHSValue : LHSValue; 10478 10479 // Check whether an integer constant comparison results in a value 10480 // of 'true' or 'false'. 10481 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 10482 return AnalyzeImpConvsInComparison(S, E); 10483 } 10484 } 10485 10486 if (!T->hasUnsignedIntegerRepresentation()) { 10487 // We don't do anything special if this isn't an unsigned integral 10488 // comparison: we're only interested in integral comparisons, and 10489 // signed comparisons only happen in cases we don't care to warn about. 10490 return AnalyzeImpConvsInComparison(S, E); 10491 } 10492 10493 LHS = LHS->IgnoreParenImpCasts(); 10494 RHS = RHS->IgnoreParenImpCasts(); 10495 10496 if (!S.getLangOpts().CPlusPlus) { 10497 // Avoid warning about comparison of integers with different signs when 10498 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 10499 // the type of `E`. 10500 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 10501 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 10502 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 10503 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 10504 } 10505 10506 // Check to see if one of the (unmodified) operands is of different 10507 // signedness. 10508 Expr *signedOperand, *unsignedOperand; 10509 if (LHS->getType()->hasSignedIntegerRepresentation()) { 10510 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 10511 "unsigned comparison between two signed integer expressions?"); 10512 signedOperand = LHS; 10513 unsignedOperand = RHS; 10514 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 10515 signedOperand = RHS; 10516 unsignedOperand = LHS; 10517 } else { 10518 return AnalyzeImpConvsInComparison(S, E); 10519 } 10520 10521 // Otherwise, calculate the effective range of the signed operand. 10522 IntRange signedRange = GetExprRange(S.Context, signedOperand); 10523 10524 // Go ahead and analyze implicit conversions in the operands. Note 10525 // that we skip the implicit conversions on both sides. 10526 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 10527 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 10528 10529 // If the signed range is non-negative, -Wsign-compare won't fire. 10530 if (signedRange.NonNegative) 10531 return; 10532 10533 // For (in)equality comparisons, if the unsigned operand is a 10534 // constant which cannot collide with a overflowed signed operand, 10535 // then reinterpreting the signed operand as unsigned will not 10536 // change the result of the comparison. 10537 if (E->isEqualityOp()) { 10538 unsigned comparisonWidth = S.Context.getIntWidth(T); 10539 IntRange unsignedRange = GetExprRange(S.Context, unsignedOperand); 10540 10541 // We should never be unable to prove that the unsigned operand is 10542 // non-negative. 10543 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 10544 10545 if (unsignedRange.Width < comparisonWidth) 10546 return; 10547 } 10548 10549 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 10550 S.PDiag(diag::warn_mixed_sign_comparison) 10551 << LHS->getType() << RHS->getType() 10552 << LHS->getSourceRange() << RHS->getSourceRange()); 10553 } 10554 10555 /// Analyzes an attempt to assign the given value to a bitfield. 10556 /// 10557 /// Returns true if there was something fishy about the attempt. 10558 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 10559 SourceLocation InitLoc) { 10560 assert(Bitfield->isBitField()); 10561 if (Bitfield->isInvalidDecl()) 10562 return false; 10563 10564 // White-list bool bitfields. 10565 QualType BitfieldType = Bitfield->getType(); 10566 if (BitfieldType->isBooleanType()) 10567 return false; 10568 10569 if (BitfieldType->isEnumeralType()) { 10570 EnumDecl *BitfieldEnumDecl = BitfieldType->getAs<EnumType>()->getDecl(); 10571 // If the underlying enum type was not explicitly specified as an unsigned 10572 // type and the enum contain only positive values, MSVC++ will cause an 10573 // inconsistency by storing this as a signed type. 10574 if (S.getLangOpts().CPlusPlus11 && 10575 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 10576 BitfieldEnumDecl->getNumPositiveBits() > 0 && 10577 BitfieldEnumDecl->getNumNegativeBits() == 0) { 10578 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 10579 << BitfieldEnumDecl->getNameAsString(); 10580 } 10581 } 10582 10583 if (Bitfield->getType()->isBooleanType()) 10584 return false; 10585 10586 // Ignore value- or type-dependent expressions. 10587 if (Bitfield->getBitWidth()->isValueDependent() || 10588 Bitfield->getBitWidth()->isTypeDependent() || 10589 Init->isValueDependent() || 10590 Init->isTypeDependent()) 10591 return false; 10592 10593 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 10594 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 10595 10596 Expr::EvalResult Result; 10597 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 10598 Expr::SE_AllowSideEffects)) { 10599 // The RHS is not constant. If the RHS has an enum type, make sure the 10600 // bitfield is wide enough to hold all the values of the enum without 10601 // truncation. 10602 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 10603 EnumDecl *ED = EnumTy->getDecl(); 10604 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 10605 10606 // Enum types are implicitly signed on Windows, so check if there are any 10607 // negative enumerators to see if the enum was intended to be signed or 10608 // not. 10609 bool SignedEnum = ED->getNumNegativeBits() > 0; 10610 10611 // Check for surprising sign changes when assigning enum values to a 10612 // bitfield of different signedness. If the bitfield is signed and we 10613 // have exactly the right number of bits to store this unsigned enum, 10614 // suggest changing the enum to an unsigned type. This typically happens 10615 // on Windows where unfixed enums always use an underlying type of 'int'. 10616 unsigned DiagID = 0; 10617 if (SignedEnum && !SignedBitfield) { 10618 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 10619 } else if (SignedBitfield && !SignedEnum && 10620 ED->getNumPositiveBits() == FieldWidth) { 10621 DiagID = diag::warn_signed_bitfield_enum_conversion; 10622 } 10623 10624 if (DiagID) { 10625 S.Diag(InitLoc, DiagID) << Bitfield << ED; 10626 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 10627 SourceRange TypeRange = 10628 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 10629 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 10630 << SignedEnum << TypeRange; 10631 } 10632 10633 // Compute the required bitwidth. If the enum has negative values, we need 10634 // one more bit than the normal number of positive bits to represent the 10635 // sign bit. 10636 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 10637 ED->getNumNegativeBits()) 10638 : ED->getNumPositiveBits(); 10639 10640 // Check the bitwidth. 10641 if (BitsNeeded > FieldWidth) { 10642 Expr *WidthExpr = Bitfield->getBitWidth(); 10643 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 10644 << Bitfield << ED; 10645 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 10646 << BitsNeeded << ED << WidthExpr->getSourceRange(); 10647 } 10648 } 10649 10650 return false; 10651 } 10652 10653 llvm::APSInt Value = Result.Val.getInt(); 10654 10655 unsigned OriginalWidth = Value.getBitWidth(); 10656 10657 if (!Value.isSigned() || Value.isNegative()) 10658 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 10659 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 10660 OriginalWidth = Value.getMinSignedBits(); 10661 10662 if (OriginalWidth <= FieldWidth) 10663 return false; 10664 10665 // Compute the value which the bitfield will contain. 10666 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 10667 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 10668 10669 // Check whether the stored value is equal to the original value. 10670 TruncatedValue = TruncatedValue.extend(OriginalWidth); 10671 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 10672 return false; 10673 10674 // Special-case bitfields of width 1: booleans are naturally 0/1, and 10675 // therefore don't strictly fit into a signed bitfield of width 1. 10676 if (FieldWidth == 1 && Value == 1) 10677 return false; 10678 10679 std::string PrettyValue = Value.toString(10); 10680 std::string PrettyTrunc = TruncatedValue.toString(10); 10681 10682 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 10683 << PrettyValue << PrettyTrunc << OriginalInit->getType() 10684 << Init->getSourceRange(); 10685 10686 return true; 10687 } 10688 10689 /// Analyze the given simple or compound assignment for warning-worthy 10690 /// operations. 10691 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 10692 // Just recurse on the LHS. 10693 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 10694 10695 // We want to recurse on the RHS as normal unless we're assigning to 10696 // a bitfield. 10697 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 10698 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 10699 E->getOperatorLoc())) { 10700 // Recurse, ignoring any implicit conversions on the RHS. 10701 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 10702 E->getOperatorLoc()); 10703 } 10704 } 10705 10706 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 10707 10708 // Diagnose implicitly sequentially-consistent atomic assignment. 10709 if (E->getLHS()->getType()->isAtomicType()) 10710 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 10711 } 10712 10713 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 10714 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 10715 SourceLocation CContext, unsigned diag, 10716 bool pruneControlFlow = false) { 10717 if (pruneControlFlow) { 10718 S.DiagRuntimeBehavior(E->getExprLoc(), E, 10719 S.PDiag(diag) 10720 << SourceType << T << E->getSourceRange() 10721 << SourceRange(CContext)); 10722 return; 10723 } 10724 S.Diag(E->getExprLoc(), diag) 10725 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 10726 } 10727 10728 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 10729 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 10730 SourceLocation CContext, 10731 unsigned diag, bool pruneControlFlow = false) { 10732 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 10733 } 10734 10735 /// Diagnose an implicit cast from a floating point value to an integer value. 10736 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 10737 SourceLocation CContext) { 10738 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 10739 const bool PruneWarnings = S.inTemplateInstantiation(); 10740 10741 Expr *InnerE = E->IgnoreParenImpCasts(); 10742 // We also want to warn on, e.g., "int i = -1.234" 10743 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 10744 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 10745 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 10746 10747 const bool IsLiteral = 10748 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 10749 10750 llvm::APFloat Value(0.0); 10751 bool IsConstant = 10752 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 10753 if (!IsConstant) { 10754 return DiagnoseImpCast(S, E, T, CContext, 10755 diag::warn_impcast_float_integer, PruneWarnings); 10756 } 10757 10758 bool isExact = false; 10759 10760 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 10761 T->hasUnsignedIntegerRepresentation()); 10762 llvm::APFloat::opStatus Result = Value.convertToInteger( 10763 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 10764 10765 if (Result == llvm::APFloat::opOK && isExact) { 10766 if (IsLiteral) return; 10767 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 10768 PruneWarnings); 10769 } 10770 10771 // Conversion of a floating-point value to a non-bool integer where the 10772 // integral part cannot be represented by the integer type is undefined. 10773 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 10774 return DiagnoseImpCast( 10775 S, E, T, CContext, 10776 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 10777 : diag::warn_impcast_float_to_integer_out_of_range, 10778 PruneWarnings); 10779 10780 unsigned DiagID = 0; 10781 if (IsLiteral) { 10782 // Warn on floating point literal to integer. 10783 DiagID = diag::warn_impcast_literal_float_to_integer; 10784 } else if (IntegerValue == 0) { 10785 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 10786 return DiagnoseImpCast(S, E, T, CContext, 10787 diag::warn_impcast_float_integer, PruneWarnings); 10788 } 10789 // Warn on non-zero to zero conversion. 10790 DiagID = diag::warn_impcast_float_to_integer_zero; 10791 } else { 10792 if (IntegerValue.isUnsigned()) { 10793 if (!IntegerValue.isMaxValue()) { 10794 return DiagnoseImpCast(S, E, T, CContext, 10795 diag::warn_impcast_float_integer, PruneWarnings); 10796 } 10797 } else { // IntegerValue.isSigned() 10798 if (!IntegerValue.isMaxSignedValue() && 10799 !IntegerValue.isMinSignedValue()) { 10800 return DiagnoseImpCast(S, E, T, CContext, 10801 diag::warn_impcast_float_integer, PruneWarnings); 10802 } 10803 } 10804 // Warn on evaluatable floating point expression to integer conversion. 10805 DiagID = diag::warn_impcast_float_to_integer; 10806 } 10807 10808 // FIXME: Force the precision of the source value down so we don't print 10809 // digits which are usually useless (we don't really care here if we 10810 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 10811 // would automatically print the shortest representation, but it's a bit 10812 // tricky to implement. 10813 SmallString<16> PrettySourceValue; 10814 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 10815 precision = (precision * 59 + 195) / 196; 10816 Value.toString(PrettySourceValue, precision); 10817 10818 SmallString<16> PrettyTargetValue; 10819 if (IsBool) 10820 PrettyTargetValue = Value.isZero() ? "false" : "true"; 10821 else 10822 IntegerValue.toString(PrettyTargetValue); 10823 10824 if (PruneWarnings) { 10825 S.DiagRuntimeBehavior(E->getExprLoc(), E, 10826 S.PDiag(DiagID) 10827 << E->getType() << T.getUnqualifiedType() 10828 << PrettySourceValue << PrettyTargetValue 10829 << E->getSourceRange() << SourceRange(CContext)); 10830 } else { 10831 S.Diag(E->getExprLoc(), DiagID) 10832 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 10833 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 10834 } 10835 } 10836 10837 /// Analyze the given compound assignment for the possible losing of 10838 /// floating-point precision. 10839 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 10840 assert(isa<CompoundAssignOperator>(E) && 10841 "Must be compound assignment operation"); 10842 // Recurse on the LHS and RHS in here 10843 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 10844 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 10845 10846 if (E->getLHS()->getType()->isAtomicType()) 10847 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 10848 10849 // Now check the outermost expression 10850 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 10851 const auto *RBT = cast<CompoundAssignOperator>(E) 10852 ->getComputationResultType() 10853 ->getAs<BuiltinType>(); 10854 10855 // The below checks assume source is floating point. 10856 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 10857 10858 // If source is floating point but target is an integer. 10859 if (ResultBT->isInteger()) 10860 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 10861 E->getExprLoc(), diag::warn_impcast_float_integer); 10862 10863 if (!ResultBT->isFloatingPoint()) 10864 return; 10865 10866 // If both source and target are floating points, warn about losing precision. 10867 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 10868 QualType(ResultBT, 0), QualType(RBT, 0)); 10869 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 10870 // warn about dropping FP rank. 10871 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 10872 diag::warn_impcast_float_result_precision); 10873 } 10874 10875 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 10876 IntRange Range) { 10877 if (!Range.Width) return "0"; 10878 10879 llvm::APSInt ValueInRange = Value; 10880 ValueInRange.setIsSigned(!Range.NonNegative); 10881 ValueInRange = ValueInRange.trunc(Range.Width); 10882 return ValueInRange.toString(10); 10883 } 10884 10885 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 10886 if (!isa<ImplicitCastExpr>(Ex)) 10887 return false; 10888 10889 Expr *InnerE = Ex->IgnoreParenImpCasts(); 10890 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 10891 const Type *Source = 10892 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 10893 if (Target->isDependentType()) 10894 return false; 10895 10896 const BuiltinType *FloatCandidateBT = 10897 dyn_cast<BuiltinType>(ToBool ? Source : Target); 10898 const Type *BoolCandidateType = ToBool ? Target : Source; 10899 10900 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 10901 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 10902 } 10903 10904 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 10905 SourceLocation CC) { 10906 unsigned NumArgs = TheCall->getNumArgs(); 10907 for (unsigned i = 0; i < NumArgs; ++i) { 10908 Expr *CurrA = TheCall->getArg(i); 10909 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 10910 continue; 10911 10912 bool IsSwapped = ((i > 0) && 10913 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 10914 IsSwapped |= ((i < (NumArgs - 1)) && 10915 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 10916 if (IsSwapped) { 10917 // Warn on this floating-point to bool conversion. 10918 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 10919 CurrA->getType(), CC, 10920 diag::warn_impcast_floating_point_to_bool); 10921 } 10922 } 10923 } 10924 10925 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 10926 SourceLocation CC) { 10927 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 10928 E->getExprLoc())) 10929 return; 10930 10931 // Don't warn on functions which have return type nullptr_t. 10932 if (isa<CallExpr>(E)) 10933 return; 10934 10935 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 10936 const Expr::NullPointerConstantKind NullKind = 10937 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 10938 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 10939 return; 10940 10941 // Return if target type is a safe conversion. 10942 if (T->isAnyPointerType() || T->isBlockPointerType() || 10943 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 10944 return; 10945 10946 SourceLocation Loc = E->getSourceRange().getBegin(); 10947 10948 // Venture through the macro stacks to get to the source of macro arguments. 10949 // The new location is a better location than the complete location that was 10950 // passed in. 10951 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 10952 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 10953 10954 // __null is usually wrapped in a macro. Go up a macro if that is the case. 10955 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 10956 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 10957 Loc, S.SourceMgr, S.getLangOpts()); 10958 if (MacroName == "NULL") 10959 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 10960 } 10961 10962 // Only warn if the null and context location are in the same macro expansion. 10963 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 10964 return; 10965 10966 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 10967 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 10968 << FixItHint::CreateReplacement(Loc, 10969 S.getFixItZeroLiteralForType(T, Loc)); 10970 } 10971 10972 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 10973 ObjCArrayLiteral *ArrayLiteral); 10974 10975 static void 10976 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 10977 ObjCDictionaryLiteral *DictionaryLiteral); 10978 10979 /// Check a single element within a collection literal against the 10980 /// target element type. 10981 static void checkObjCCollectionLiteralElement(Sema &S, 10982 QualType TargetElementType, 10983 Expr *Element, 10984 unsigned ElementKind) { 10985 // Skip a bitcast to 'id' or qualified 'id'. 10986 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 10987 if (ICE->getCastKind() == CK_BitCast && 10988 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 10989 Element = ICE->getSubExpr(); 10990 } 10991 10992 QualType ElementType = Element->getType(); 10993 ExprResult ElementResult(Element); 10994 if (ElementType->getAs<ObjCObjectPointerType>() && 10995 S.CheckSingleAssignmentConstraints(TargetElementType, 10996 ElementResult, 10997 false, false) 10998 != Sema::Compatible) { 10999 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 11000 << ElementType << ElementKind << TargetElementType 11001 << Element->getSourceRange(); 11002 } 11003 11004 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 11005 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 11006 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 11007 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 11008 } 11009 11010 /// Check an Objective-C array literal being converted to the given 11011 /// target type. 11012 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 11013 ObjCArrayLiteral *ArrayLiteral) { 11014 if (!S.NSArrayDecl) 11015 return; 11016 11017 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11018 if (!TargetObjCPtr) 11019 return; 11020 11021 if (TargetObjCPtr->isUnspecialized() || 11022 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11023 != S.NSArrayDecl->getCanonicalDecl()) 11024 return; 11025 11026 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11027 if (TypeArgs.size() != 1) 11028 return; 11029 11030 QualType TargetElementType = TypeArgs[0]; 11031 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 11032 checkObjCCollectionLiteralElement(S, TargetElementType, 11033 ArrayLiteral->getElement(I), 11034 0); 11035 } 11036 } 11037 11038 /// Check an Objective-C dictionary literal being converted to the given 11039 /// target type. 11040 static void 11041 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 11042 ObjCDictionaryLiteral *DictionaryLiteral) { 11043 if (!S.NSDictionaryDecl) 11044 return; 11045 11046 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11047 if (!TargetObjCPtr) 11048 return; 11049 11050 if (TargetObjCPtr->isUnspecialized() || 11051 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11052 != S.NSDictionaryDecl->getCanonicalDecl()) 11053 return; 11054 11055 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11056 if (TypeArgs.size() != 2) 11057 return; 11058 11059 QualType TargetKeyType = TypeArgs[0]; 11060 QualType TargetObjectType = TypeArgs[1]; 11061 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 11062 auto Element = DictionaryLiteral->getKeyValueElement(I); 11063 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 11064 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 11065 } 11066 } 11067 11068 // Helper function to filter out cases for constant width constant conversion. 11069 // Don't warn on char array initialization or for non-decimal values. 11070 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 11071 SourceLocation CC) { 11072 // If initializing from a constant, and the constant starts with '0', 11073 // then it is a binary, octal, or hexadecimal. Allow these constants 11074 // to fill all the bits, even if there is a sign change. 11075 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 11076 const char FirstLiteralCharacter = 11077 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 11078 if (FirstLiteralCharacter == '0') 11079 return false; 11080 } 11081 11082 // If the CC location points to a '{', and the type is char, then assume 11083 // assume it is an array initialization. 11084 if (CC.isValid() && T->isCharType()) { 11085 const char FirstContextCharacter = 11086 S.getSourceManager().getCharacterData(CC)[0]; 11087 if (FirstContextCharacter == '{') 11088 return false; 11089 } 11090 11091 return true; 11092 } 11093 11094 static void 11095 CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC, 11096 bool *ICContext = nullptr) { 11097 if (E->isTypeDependent() || E->isValueDependent()) return; 11098 11099 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 11100 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 11101 if (Source == Target) return; 11102 if (Target->isDependentType()) return; 11103 11104 // If the conversion context location is invalid don't complain. We also 11105 // don't want to emit a warning if the issue occurs from the expansion of 11106 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 11107 // delay this check as long as possible. Once we detect we are in that 11108 // scenario, we just return. 11109 if (CC.isInvalid()) 11110 return; 11111 11112 if (Source->isAtomicType()) 11113 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 11114 11115 // Diagnose implicit casts to bool. 11116 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 11117 if (isa<StringLiteral>(E)) 11118 // Warn on string literal to bool. Checks for string literals in logical 11119 // and expressions, for instance, assert(0 && "error here"), are 11120 // prevented by a check in AnalyzeImplicitConversions(). 11121 return DiagnoseImpCast(S, E, T, CC, 11122 diag::warn_impcast_string_literal_to_bool); 11123 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 11124 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 11125 // This covers the literal expressions that evaluate to Objective-C 11126 // objects. 11127 return DiagnoseImpCast(S, E, T, CC, 11128 diag::warn_impcast_objective_c_literal_to_bool); 11129 } 11130 if (Source->isPointerType() || Source->canDecayToPointerType()) { 11131 // Warn on pointer to bool conversion that is always true. 11132 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 11133 SourceRange(CC)); 11134 } 11135 } 11136 11137 // Check implicit casts from Objective-C collection literals to specialized 11138 // collection types, e.g., NSArray<NSString *> *. 11139 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 11140 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 11141 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 11142 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 11143 11144 // Strip vector types. 11145 if (isa<VectorType>(Source)) { 11146 if (!isa<VectorType>(Target)) { 11147 if (S.SourceMgr.isInSystemMacro(CC)) 11148 return; 11149 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 11150 } 11151 11152 // If the vector cast is cast between two vectors of the same size, it is 11153 // a bitcast, not a conversion. 11154 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 11155 return; 11156 11157 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 11158 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 11159 } 11160 if (auto VecTy = dyn_cast<VectorType>(Target)) 11161 Target = VecTy->getElementType().getTypePtr(); 11162 11163 // Strip complex types. 11164 if (isa<ComplexType>(Source)) { 11165 if (!isa<ComplexType>(Target)) { 11166 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 11167 return; 11168 11169 return DiagnoseImpCast(S, E, T, CC, 11170 S.getLangOpts().CPlusPlus 11171 ? diag::err_impcast_complex_scalar 11172 : diag::warn_impcast_complex_scalar); 11173 } 11174 11175 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 11176 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 11177 } 11178 11179 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 11180 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 11181 11182 // If the source is floating point... 11183 if (SourceBT && SourceBT->isFloatingPoint()) { 11184 // ...and the target is floating point... 11185 if (TargetBT && TargetBT->isFloatingPoint()) { 11186 // ...then warn if we're dropping FP rank. 11187 11188 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 11189 QualType(SourceBT, 0), QualType(TargetBT, 0)); 11190 if (Order > 0) { 11191 // Don't warn about float constants that are precisely 11192 // representable in the target type. 11193 Expr::EvalResult result; 11194 if (E->EvaluateAsRValue(result, S.Context)) { 11195 // Value might be a float, a float vector, or a float complex. 11196 if (IsSameFloatAfterCast(result.Val, 11197 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 11198 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 11199 return; 11200 } 11201 11202 if (S.SourceMgr.isInSystemMacro(CC)) 11203 return; 11204 11205 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 11206 } 11207 // ... or possibly if we're increasing rank, too 11208 else if (Order < 0) { 11209 if (S.SourceMgr.isInSystemMacro(CC)) 11210 return; 11211 11212 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 11213 } 11214 return; 11215 } 11216 11217 // If the target is integral, always warn. 11218 if (TargetBT && TargetBT->isInteger()) { 11219 if (S.SourceMgr.isInSystemMacro(CC)) 11220 return; 11221 11222 DiagnoseFloatingImpCast(S, E, T, CC); 11223 } 11224 11225 // Detect the case where a call result is converted from floating-point to 11226 // to bool, and the final argument to the call is converted from bool, to 11227 // discover this typo: 11228 // 11229 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 11230 // 11231 // FIXME: This is an incredibly special case; is there some more general 11232 // way to detect this class of misplaced-parentheses bug? 11233 if (Target->isBooleanType() && isa<CallExpr>(E)) { 11234 // Check last argument of function call to see if it is an 11235 // implicit cast from a type matching the type the result 11236 // is being cast to. 11237 CallExpr *CEx = cast<CallExpr>(E); 11238 if (unsigned NumArgs = CEx->getNumArgs()) { 11239 Expr *LastA = CEx->getArg(NumArgs - 1); 11240 Expr *InnerE = LastA->IgnoreParenImpCasts(); 11241 if (isa<ImplicitCastExpr>(LastA) && 11242 InnerE->getType()->isBooleanType()) { 11243 // Warn on this floating-point to bool conversion 11244 DiagnoseImpCast(S, E, T, CC, 11245 diag::warn_impcast_floating_point_to_bool); 11246 } 11247 } 11248 } 11249 return; 11250 } 11251 11252 // Valid casts involving fixed point types should be accounted for here. 11253 if (Source->isFixedPointType()) { 11254 if (Target->isUnsaturatedFixedPointType()) { 11255 Expr::EvalResult Result; 11256 if (E->EvaluateAsFixedPoint(Result, S.Context, 11257 Expr::SE_AllowSideEffects)) { 11258 APFixedPoint Value = Result.Val.getFixedPoint(); 11259 APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 11260 APFixedPoint MinVal = S.Context.getFixedPointMin(T); 11261 if (Value > MaxVal || Value < MinVal) { 11262 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11263 S.PDiag(diag::warn_impcast_fixed_point_range) 11264 << Value.toString() << T 11265 << E->getSourceRange() 11266 << clang::SourceRange(CC)); 11267 return; 11268 } 11269 } 11270 } else if (Target->isIntegerType()) { 11271 Expr::EvalResult Result; 11272 if (E->EvaluateAsFixedPoint(Result, S.Context, 11273 Expr::SE_AllowSideEffects)) { 11274 APFixedPoint FXResult = Result.Val.getFixedPoint(); 11275 11276 bool Overflowed; 11277 llvm::APSInt IntResult = FXResult.convertToInt( 11278 S.Context.getIntWidth(T), 11279 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 11280 11281 if (Overflowed) { 11282 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11283 S.PDiag(diag::warn_impcast_fixed_point_range) 11284 << FXResult.toString() << T 11285 << E->getSourceRange() 11286 << clang::SourceRange(CC)); 11287 return; 11288 } 11289 } 11290 } 11291 } else if (Target->isUnsaturatedFixedPointType()) { 11292 if (Source->isIntegerType()) { 11293 Expr::EvalResult Result; 11294 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 11295 llvm::APSInt Value = Result.Val.getInt(); 11296 11297 bool Overflowed; 11298 APFixedPoint IntResult = APFixedPoint::getFromIntValue( 11299 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 11300 11301 if (Overflowed) { 11302 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11303 S.PDiag(diag::warn_impcast_fixed_point_range) 11304 << Value.toString(/*radix=*/10) << T 11305 << E->getSourceRange() 11306 << clang::SourceRange(CC)); 11307 return; 11308 } 11309 } 11310 } 11311 } 11312 11313 DiagnoseNullConversion(S, E, T, CC); 11314 11315 S.DiscardMisalignedMemberAddress(Target, E); 11316 11317 if (!Source->isIntegerType() || !Target->isIntegerType()) 11318 return; 11319 11320 // TODO: remove this early return once the false positives for constant->bool 11321 // in templates, macros, etc, are reduced or removed. 11322 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 11323 return; 11324 11325 IntRange SourceRange = GetExprRange(S.Context, E); 11326 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 11327 11328 if (SourceRange.Width > TargetRange.Width) { 11329 // If the source is a constant, use a default-on diagnostic. 11330 // TODO: this should happen for bitfield stores, too. 11331 Expr::EvalResult Result; 11332 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 11333 llvm::APSInt Value(32); 11334 Value = Result.Val.getInt(); 11335 11336 if (S.SourceMgr.isInSystemMacro(CC)) 11337 return; 11338 11339 std::string PrettySourceValue = Value.toString(10); 11340 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 11341 11342 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11343 S.PDiag(diag::warn_impcast_integer_precision_constant) 11344 << PrettySourceValue << PrettyTargetValue 11345 << E->getType() << T << E->getSourceRange() 11346 << clang::SourceRange(CC)); 11347 return; 11348 } 11349 11350 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 11351 if (S.SourceMgr.isInSystemMacro(CC)) 11352 return; 11353 11354 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 11355 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 11356 /* pruneControlFlow */ true); 11357 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 11358 } 11359 11360 if (TargetRange.Width > SourceRange.Width) { 11361 if (auto *UO = dyn_cast<UnaryOperator>(E)) 11362 if (UO->getOpcode() == UO_Minus) 11363 if (Source->isUnsignedIntegerType()) { 11364 if (Target->isUnsignedIntegerType()) 11365 return DiagnoseImpCast(S, E, T, CC, 11366 diag::warn_impcast_high_order_zero_bits); 11367 if (Target->isSignedIntegerType()) 11368 return DiagnoseImpCast(S, E, T, CC, 11369 diag::warn_impcast_nonnegative_result); 11370 } 11371 } 11372 11373 if (TargetRange.Width == SourceRange.Width && !TargetRange.NonNegative && 11374 SourceRange.NonNegative && Source->isSignedIntegerType()) { 11375 // Warn when doing a signed to signed conversion, warn if the positive 11376 // source value is exactly the width of the target type, which will 11377 // cause a negative value to be stored. 11378 11379 Expr::EvalResult Result; 11380 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 11381 !S.SourceMgr.isInSystemMacro(CC)) { 11382 llvm::APSInt Value = Result.Val.getInt(); 11383 if (isSameWidthConstantConversion(S, E, T, CC)) { 11384 std::string PrettySourceValue = Value.toString(10); 11385 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 11386 11387 S.DiagRuntimeBehavior( 11388 E->getExprLoc(), E, 11389 S.PDiag(diag::warn_impcast_integer_precision_constant) 11390 << PrettySourceValue << PrettyTargetValue << E->getType() << T 11391 << E->getSourceRange() << clang::SourceRange(CC)); 11392 return; 11393 } 11394 } 11395 11396 // Fall through for non-constants to give a sign conversion warning. 11397 } 11398 11399 if ((TargetRange.NonNegative && !SourceRange.NonNegative) || 11400 (!TargetRange.NonNegative && SourceRange.NonNegative && 11401 SourceRange.Width == TargetRange.Width)) { 11402 if (S.SourceMgr.isInSystemMacro(CC)) 11403 return; 11404 11405 unsigned DiagID = diag::warn_impcast_integer_sign; 11406 11407 // Traditionally, gcc has warned about this under -Wsign-compare. 11408 // We also want to warn about it in -Wconversion. 11409 // So if -Wconversion is off, use a completely identical diagnostic 11410 // in the sign-compare group. 11411 // The conditional-checking code will 11412 if (ICContext) { 11413 DiagID = diag::warn_impcast_integer_sign_conditional; 11414 *ICContext = true; 11415 } 11416 11417 return DiagnoseImpCast(S, E, T, CC, DiagID); 11418 } 11419 11420 // Diagnose conversions between different enumeration types. 11421 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 11422 // type, to give us better diagnostics. 11423 QualType SourceType = E->getType(); 11424 if (!S.getLangOpts().CPlusPlus) { 11425 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 11426 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 11427 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 11428 SourceType = S.Context.getTypeDeclType(Enum); 11429 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 11430 } 11431 } 11432 11433 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 11434 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 11435 if (SourceEnum->getDecl()->hasNameForLinkage() && 11436 TargetEnum->getDecl()->hasNameForLinkage() && 11437 SourceEnum != TargetEnum) { 11438 if (S.SourceMgr.isInSystemMacro(CC)) 11439 return; 11440 11441 return DiagnoseImpCast(S, E, SourceType, T, CC, 11442 diag::warn_impcast_different_enum_types); 11443 } 11444 } 11445 11446 static void CheckConditionalOperator(Sema &S, ConditionalOperator *E, 11447 SourceLocation CC, QualType T); 11448 11449 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 11450 SourceLocation CC, bool &ICContext) { 11451 E = E->IgnoreParenImpCasts(); 11452 11453 if (isa<ConditionalOperator>(E)) 11454 return CheckConditionalOperator(S, cast<ConditionalOperator>(E), CC, T); 11455 11456 AnalyzeImplicitConversions(S, E, CC); 11457 if (E->getType() != T) 11458 return CheckImplicitConversion(S, E, T, CC, &ICContext); 11459 } 11460 11461 static void CheckConditionalOperator(Sema &S, ConditionalOperator *E, 11462 SourceLocation CC, QualType T) { 11463 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 11464 11465 bool Suspicious = false; 11466 CheckConditionalOperand(S, E->getTrueExpr(), T, CC, Suspicious); 11467 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 11468 11469 // If -Wconversion would have warned about either of the candidates 11470 // for a signedness conversion to the context type... 11471 if (!Suspicious) return; 11472 11473 // ...but it's currently ignored... 11474 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 11475 return; 11476 11477 // ...then check whether it would have warned about either of the 11478 // candidates for a signedness conversion to the condition type. 11479 if (E->getType() == T) return; 11480 11481 Suspicious = false; 11482 CheckImplicitConversion(S, E->getTrueExpr()->IgnoreParenImpCasts(), 11483 E->getType(), CC, &Suspicious); 11484 if (!Suspicious) 11485 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 11486 E->getType(), CC, &Suspicious); 11487 } 11488 11489 /// Check conversion of given expression to boolean. 11490 /// Input argument E is a logical expression. 11491 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 11492 if (S.getLangOpts().Bool) 11493 return; 11494 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 11495 return; 11496 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 11497 } 11498 11499 /// AnalyzeImplicitConversions - Find and report any interesting 11500 /// implicit conversions in the given expression. There are a couple 11501 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 11502 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, 11503 SourceLocation CC) { 11504 QualType T = OrigE->getType(); 11505 Expr *E = OrigE->IgnoreParenImpCasts(); 11506 11507 if (E->isTypeDependent() || E->isValueDependent()) 11508 return; 11509 11510 // For conditional operators, we analyze the arguments as if they 11511 // were being fed directly into the output. 11512 if (isa<ConditionalOperator>(E)) { 11513 ConditionalOperator *CO = cast<ConditionalOperator>(E); 11514 CheckConditionalOperator(S, CO, CC, T); 11515 return; 11516 } 11517 11518 // Check implicit argument conversions for function calls. 11519 if (CallExpr *Call = dyn_cast<CallExpr>(E)) 11520 CheckImplicitArgumentConversions(S, Call, CC); 11521 11522 // Go ahead and check any implicit conversions we might have skipped. 11523 // The non-canonical typecheck is just an optimization; 11524 // CheckImplicitConversion will filter out dead implicit conversions. 11525 if (E->getType() != T) 11526 CheckImplicitConversion(S, E, T, CC); 11527 11528 // Now continue drilling into this expression. 11529 11530 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 11531 // The bound subexpressions in a PseudoObjectExpr are not reachable 11532 // as transitive children. 11533 // FIXME: Use a more uniform representation for this. 11534 for (auto *SE : POE->semantics()) 11535 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 11536 AnalyzeImplicitConversions(S, OVE->getSourceExpr(), CC); 11537 } 11538 11539 // Skip past explicit casts. 11540 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 11541 E = CE->getSubExpr()->IgnoreParenImpCasts(); 11542 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 11543 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 11544 return AnalyzeImplicitConversions(S, E, CC); 11545 } 11546 11547 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 11548 // Do a somewhat different check with comparison operators. 11549 if (BO->isComparisonOp()) 11550 return AnalyzeComparison(S, BO); 11551 11552 // And with simple assignments. 11553 if (BO->getOpcode() == BO_Assign) 11554 return AnalyzeAssignment(S, BO); 11555 // And with compound assignments. 11556 if (BO->isAssignmentOp()) 11557 return AnalyzeCompoundAssignment(S, BO); 11558 } 11559 11560 // These break the otherwise-useful invariant below. Fortunately, 11561 // we don't really need to recurse into them, because any internal 11562 // expressions should have been analyzed already when they were 11563 // built into statements. 11564 if (isa<StmtExpr>(E)) return; 11565 11566 // Don't descend into unevaluated contexts. 11567 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 11568 11569 // Now just recurse over the expression's children. 11570 CC = E->getExprLoc(); 11571 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 11572 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 11573 for (Stmt *SubStmt : E->children()) { 11574 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 11575 if (!ChildExpr) 11576 continue; 11577 11578 if (IsLogicalAndOperator && 11579 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 11580 // Ignore checking string literals that are in logical and operators. 11581 // This is a common pattern for asserts. 11582 continue; 11583 AnalyzeImplicitConversions(S, ChildExpr, CC); 11584 } 11585 11586 if (BO && BO->isLogicalOp()) { 11587 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 11588 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 11589 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 11590 11591 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 11592 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 11593 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 11594 } 11595 11596 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 11597 if (U->getOpcode() == UO_LNot) { 11598 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 11599 } else if (U->getOpcode() != UO_AddrOf) { 11600 if (U->getSubExpr()->getType()->isAtomicType()) 11601 S.Diag(U->getSubExpr()->getBeginLoc(), 11602 diag::warn_atomic_implicit_seq_cst); 11603 } 11604 } 11605 } 11606 11607 /// Diagnose integer type and any valid implicit conversion to it. 11608 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 11609 // Taking into account implicit conversions, 11610 // allow any integer. 11611 if (!E->getType()->isIntegerType()) { 11612 S.Diag(E->getBeginLoc(), 11613 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 11614 return true; 11615 } 11616 // Potentially emit standard warnings for implicit conversions if enabled 11617 // using -Wconversion. 11618 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 11619 return false; 11620 } 11621 11622 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 11623 // Returns true when emitting a warning about taking the address of a reference. 11624 static bool CheckForReference(Sema &SemaRef, const Expr *E, 11625 const PartialDiagnostic &PD) { 11626 E = E->IgnoreParenImpCasts(); 11627 11628 const FunctionDecl *FD = nullptr; 11629 11630 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 11631 if (!DRE->getDecl()->getType()->isReferenceType()) 11632 return false; 11633 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 11634 if (!M->getMemberDecl()->getType()->isReferenceType()) 11635 return false; 11636 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 11637 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 11638 return false; 11639 FD = Call->getDirectCallee(); 11640 } else { 11641 return false; 11642 } 11643 11644 SemaRef.Diag(E->getExprLoc(), PD); 11645 11646 // If possible, point to location of function. 11647 if (FD) { 11648 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 11649 } 11650 11651 return true; 11652 } 11653 11654 // Returns true if the SourceLocation is expanded from any macro body. 11655 // Returns false if the SourceLocation is invalid, is from not in a macro 11656 // expansion, or is from expanded from a top-level macro argument. 11657 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 11658 if (Loc.isInvalid()) 11659 return false; 11660 11661 while (Loc.isMacroID()) { 11662 if (SM.isMacroBodyExpansion(Loc)) 11663 return true; 11664 Loc = SM.getImmediateMacroCallerLoc(Loc); 11665 } 11666 11667 return false; 11668 } 11669 11670 /// Diagnose pointers that are always non-null. 11671 /// \param E the expression containing the pointer 11672 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 11673 /// compared to a null pointer 11674 /// \param IsEqual True when the comparison is equal to a null pointer 11675 /// \param Range Extra SourceRange to highlight in the diagnostic 11676 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 11677 Expr::NullPointerConstantKind NullKind, 11678 bool IsEqual, SourceRange Range) { 11679 if (!E) 11680 return; 11681 11682 // Don't warn inside macros. 11683 if (E->getExprLoc().isMacroID()) { 11684 const SourceManager &SM = getSourceManager(); 11685 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 11686 IsInAnyMacroBody(SM, Range.getBegin())) 11687 return; 11688 } 11689 E = E->IgnoreImpCasts(); 11690 11691 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 11692 11693 if (isa<CXXThisExpr>(E)) { 11694 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 11695 : diag::warn_this_bool_conversion; 11696 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 11697 return; 11698 } 11699 11700 bool IsAddressOf = false; 11701 11702 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 11703 if (UO->getOpcode() != UO_AddrOf) 11704 return; 11705 IsAddressOf = true; 11706 E = UO->getSubExpr(); 11707 } 11708 11709 if (IsAddressOf) { 11710 unsigned DiagID = IsCompare 11711 ? diag::warn_address_of_reference_null_compare 11712 : diag::warn_address_of_reference_bool_conversion; 11713 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 11714 << IsEqual; 11715 if (CheckForReference(*this, E, PD)) { 11716 return; 11717 } 11718 } 11719 11720 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 11721 bool IsParam = isa<NonNullAttr>(NonnullAttr); 11722 std::string Str; 11723 llvm::raw_string_ostream S(Str); 11724 E->printPretty(S, nullptr, getPrintingPolicy()); 11725 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 11726 : diag::warn_cast_nonnull_to_bool; 11727 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 11728 << E->getSourceRange() << Range << IsEqual; 11729 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 11730 }; 11731 11732 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 11733 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 11734 if (auto *Callee = Call->getDirectCallee()) { 11735 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 11736 ComplainAboutNonnullParamOrCall(A); 11737 return; 11738 } 11739 } 11740 } 11741 11742 // Expect to find a single Decl. Skip anything more complicated. 11743 ValueDecl *D = nullptr; 11744 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 11745 D = R->getDecl(); 11746 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 11747 D = M->getMemberDecl(); 11748 } 11749 11750 // Weak Decls can be null. 11751 if (!D || D->isWeak()) 11752 return; 11753 11754 // Check for parameter decl with nonnull attribute 11755 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 11756 if (getCurFunction() && 11757 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 11758 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 11759 ComplainAboutNonnullParamOrCall(A); 11760 return; 11761 } 11762 11763 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 11764 // Skip function template not specialized yet. 11765 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 11766 return; 11767 auto ParamIter = llvm::find(FD->parameters(), PV); 11768 assert(ParamIter != FD->param_end()); 11769 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 11770 11771 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 11772 if (!NonNull->args_size()) { 11773 ComplainAboutNonnullParamOrCall(NonNull); 11774 return; 11775 } 11776 11777 for (const ParamIdx &ArgNo : NonNull->args()) { 11778 if (ArgNo.getASTIndex() == ParamNo) { 11779 ComplainAboutNonnullParamOrCall(NonNull); 11780 return; 11781 } 11782 } 11783 } 11784 } 11785 } 11786 } 11787 11788 QualType T = D->getType(); 11789 const bool IsArray = T->isArrayType(); 11790 const bool IsFunction = T->isFunctionType(); 11791 11792 // Address of function is used to silence the function warning. 11793 if (IsAddressOf && IsFunction) { 11794 return; 11795 } 11796 11797 // Found nothing. 11798 if (!IsAddressOf && !IsFunction && !IsArray) 11799 return; 11800 11801 // Pretty print the expression for the diagnostic. 11802 std::string Str; 11803 llvm::raw_string_ostream S(Str); 11804 E->printPretty(S, nullptr, getPrintingPolicy()); 11805 11806 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 11807 : diag::warn_impcast_pointer_to_bool; 11808 enum { 11809 AddressOf, 11810 FunctionPointer, 11811 ArrayPointer 11812 } DiagType; 11813 if (IsAddressOf) 11814 DiagType = AddressOf; 11815 else if (IsFunction) 11816 DiagType = FunctionPointer; 11817 else if (IsArray) 11818 DiagType = ArrayPointer; 11819 else 11820 llvm_unreachable("Could not determine diagnostic."); 11821 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 11822 << Range << IsEqual; 11823 11824 if (!IsFunction) 11825 return; 11826 11827 // Suggest '&' to silence the function warning. 11828 Diag(E->getExprLoc(), diag::note_function_warning_silence) 11829 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 11830 11831 // Check to see if '()' fixit should be emitted. 11832 QualType ReturnType; 11833 UnresolvedSet<4> NonTemplateOverloads; 11834 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 11835 if (ReturnType.isNull()) 11836 return; 11837 11838 if (IsCompare) { 11839 // There are two cases here. If there is null constant, the only suggest 11840 // for a pointer return type. If the null is 0, then suggest if the return 11841 // type is a pointer or an integer type. 11842 if (!ReturnType->isPointerType()) { 11843 if (NullKind == Expr::NPCK_ZeroExpression || 11844 NullKind == Expr::NPCK_ZeroLiteral) { 11845 if (!ReturnType->isIntegerType()) 11846 return; 11847 } else { 11848 return; 11849 } 11850 } 11851 } else { // !IsCompare 11852 // For function to bool, only suggest if the function pointer has bool 11853 // return type. 11854 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 11855 return; 11856 } 11857 Diag(E->getExprLoc(), diag::note_function_to_function_call) 11858 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 11859 } 11860 11861 /// Diagnoses "dangerous" implicit conversions within the given 11862 /// expression (which is a full expression). Implements -Wconversion 11863 /// and -Wsign-compare. 11864 /// 11865 /// \param CC the "context" location of the implicit conversion, i.e. 11866 /// the most location of the syntactic entity requiring the implicit 11867 /// conversion 11868 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 11869 // Don't diagnose in unevaluated contexts. 11870 if (isUnevaluatedContext()) 11871 return; 11872 11873 // Don't diagnose for value- or type-dependent expressions. 11874 if (E->isTypeDependent() || E->isValueDependent()) 11875 return; 11876 11877 // Check for array bounds violations in cases where the check isn't triggered 11878 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 11879 // ArraySubscriptExpr is on the RHS of a variable initialization. 11880 CheckArrayAccess(E); 11881 11882 // This is not the right CC for (e.g.) a variable initialization. 11883 AnalyzeImplicitConversions(*this, E, CC); 11884 } 11885 11886 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 11887 /// Input argument E is a logical expression. 11888 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 11889 ::CheckBoolLikeConversion(*this, E, CC); 11890 } 11891 11892 /// Diagnose when expression is an integer constant expression and its evaluation 11893 /// results in integer overflow 11894 void Sema::CheckForIntOverflow (Expr *E) { 11895 // Use a work list to deal with nested struct initializers. 11896 SmallVector<Expr *, 2> Exprs(1, E); 11897 11898 do { 11899 Expr *OriginalE = Exprs.pop_back_val(); 11900 Expr *E = OriginalE->IgnoreParenCasts(); 11901 11902 if (isa<BinaryOperator>(E)) { 11903 E->EvaluateForOverflow(Context); 11904 continue; 11905 } 11906 11907 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 11908 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 11909 else if (isa<ObjCBoxedExpr>(OriginalE)) 11910 E->EvaluateForOverflow(Context); 11911 else if (auto Call = dyn_cast<CallExpr>(E)) 11912 Exprs.append(Call->arg_begin(), Call->arg_end()); 11913 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 11914 Exprs.append(Message->arg_begin(), Message->arg_end()); 11915 } while (!Exprs.empty()); 11916 } 11917 11918 namespace { 11919 11920 /// Visitor for expressions which looks for unsequenced operations on the 11921 /// same object. 11922 class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> { 11923 using Base = EvaluatedExprVisitor<SequenceChecker>; 11924 11925 /// A tree of sequenced regions within an expression. Two regions are 11926 /// unsequenced if one is an ancestor or a descendent of the other. When we 11927 /// finish processing an expression with sequencing, such as a comma 11928 /// expression, we fold its tree nodes into its parent, since they are 11929 /// unsequenced with respect to nodes we will visit later. 11930 class SequenceTree { 11931 struct Value { 11932 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 11933 unsigned Parent : 31; 11934 unsigned Merged : 1; 11935 }; 11936 SmallVector<Value, 8> Values; 11937 11938 public: 11939 /// A region within an expression which may be sequenced with respect 11940 /// to some other region. 11941 class Seq { 11942 friend class SequenceTree; 11943 11944 unsigned Index; 11945 11946 explicit Seq(unsigned N) : Index(N) {} 11947 11948 public: 11949 Seq() : Index(0) {} 11950 }; 11951 11952 SequenceTree() { Values.push_back(Value(0)); } 11953 Seq root() const { return Seq(0); } 11954 11955 /// Create a new sequence of operations, which is an unsequenced 11956 /// subset of \p Parent. This sequence of operations is sequenced with 11957 /// respect to other children of \p Parent. 11958 Seq allocate(Seq Parent) { 11959 Values.push_back(Value(Parent.Index)); 11960 return Seq(Values.size() - 1); 11961 } 11962 11963 /// Merge a sequence of operations into its parent. 11964 void merge(Seq S) { 11965 Values[S.Index].Merged = true; 11966 } 11967 11968 /// Determine whether two operations are unsequenced. This operation 11969 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 11970 /// should have been merged into its parent as appropriate. 11971 bool isUnsequenced(Seq Cur, Seq Old) { 11972 unsigned C = representative(Cur.Index); 11973 unsigned Target = representative(Old.Index); 11974 while (C >= Target) { 11975 if (C == Target) 11976 return true; 11977 C = Values[C].Parent; 11978 } 11979 return false; 11980 } 11981 11982 private: 11983 /// Pick a representative for a sequence. 11984 unsigned representative(unsigned K) { 11985 if (Values[K].Merged) 11986 // Perform path compression as we go. 11987 return Values[K].Parent = representative(Values[K].Parent); 11988 return K; 11989 } 11990 }; 11991 11992 /// An object for which we can track unsequenced uses. 11993 using Object = NamedDecl *; 11994 11995 /// Different flavors of object usage which we track. We only track the 11996 /// least-sequenced usage of each kind. 11997 enum UsageKind { 11998 /// A read of an object. Multiple unsequenced reads are OK. 11999 UK_Use, 12000 12001 /// A modification of an object which is sequenced before the value 12002 /// computation of the expression, such as ++n in C++. 12003 UK_ModAsValue, 12004 12005 /// A modification of an object which is not sequenced before the value 12006 /// computation of the expression, such as n++. 12007 UK_ModAsSideEffect, 12008 12009 UK_Count = UK_ModAsSideEffect + 1 12010 }; 12011 12012 struct Usage { 12013 Expr *Use; 12014 SequenceTree::Seq Seq; 12015 12016 Usage() : Use(nullptr), Seq() {} 12017 }; 12018 12019 struct UsageInfo { 12020 Usage Uses[UK_Count]; 12021 12022 /// Have we issued a diagnostic for this variable already? 12023 bool Diagnosed; 12024 12025 UsageInfo() : Uses(), Diagnosed(false) {} 12026 }; 12027 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 12028 12029 Sema &SemaRef; 12030 12031 /// Sequenced regions within the expression. 12032 SequenceTree Tree; 12033 12034 /// Declaration modifications and references which we have seen. 12035 UsageInfoMap UsageMap; 12036 12037 /// The region we are currently within. 12038 SequenceTree::Seq Region; 12039 12040 /// Filled in with declarations which were modified as a side-effect 12041 /// (that is, post-increment operations). 12042 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 12043 12044 /// Expressions to check later. We defer checking these to reduce 12045 /// stack usage. 12046 SmallVectorImpl<Expr *> &WorkList; 12047 12048 /// RAII object wrapping the visitation of a sequenced subexpression of an 12049 /// expression. At the end of this process, the side-effects of the evaluation 12050 /// become sequenced with respect to the value computation of the result, so 12051 /// we downgrade any UK_ModAsSideEffect within the evaluation to 12052 /// UK_ModAsValue. 12053 struct SequencedSubexpression { 12054 SequencedSubexpression(SequenceChecker &Self) 12055 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 12056 Self.ModAsSideEffect = &ModAsSideEffect; 12057 } 12058 12059 ~SequencedSubexpression() { 12060 for (auto &M : llvm::reverse(ModAsSideEffect)) { 12061 UsageInfo &U = Self.UsageMap[M.first]; 12062 auto &SideEffectUsage = U.Uses[UK_ModAsSideEffect]; 12063 Self.addUsage(U, M.first, SideEffectUsage.Use, UK_ModAsValue); 12064 SideEffectUsage = M.second; 12065 } 12066 Self.ModAsSideEffect = OldModAsSideEffect; 12067 } 12068 12069 SequenceChecker &Self; 12070 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 12071 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 12072 }; 12073 12074 /// RAII object wrapping the visitation of a subexpression which we might 12075 /// choose to evaluate as a constant. If any subexpression is evaluated and 12076 /// found to be non-constant, this allows us to suppress the evaluation of 12077 /// the outer expression. 12078 class EvaluationTracker { 12079 public: 12080 EvaluationTracker(SequenceChecker &Self) 12081 : Self(Self), Prev(Self.EvalTracker) { 12082 Self.EvalTracker = this; 12083 } 12084 12085 ~EvaluationTracker() { 12086 Self.EvalTracker = Prev; 12087 if (Prev) 12088 Prev->EvalOK &= EvalOK; 12089 } 12090 12091 bool evaluate(const Expr *E, bool &Result) { 12092 if (!EvalOK || E->isValueDependent()) 12093 return false; 12094 EvalOK = E->EvaluateAsBooleanCondition(Result, Self.SemaRef.Context); 12095 return EvalOK; 12096 } 12097 12098 private: 12099 SequenceChecker &Self; 12100 EvaluationTracker *Prev; 12101 bool EvalOK = true; 12102 } *EvalTracker = nullptr; 12103 12104 /// Find the object which is produced by the specified expression, 12105 /// if any. 12106 Object getObject(Expr *E, bool Mod) const { 12107 E = E->IgnoreParenCasts(); 12108 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 12109 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 12110 return getObject(UO->getSubExpr(), Mod); 12111 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 12112 if (BO->getOpcode() == BO_Comma) 12113 return getObject(BO->getRHS(), Mod); 12114 if (Mod && BO->isAssignmentOp()) 12115 return getObject(BO->getLHS(), Mod); 12116 } else if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 12117 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 12118 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 12119 return ME->getMemberDecl(); 12120 } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 12121 // FIXME: If this is a reference, map through to its value. 12122 return DRE->getDecl(); 12123 return nullptr; 12124 } 12125 12126 /// Note that an object was modified or used by an expression. 12127 void addUsage(UsageInfo &UI, Object O, Expr *Ref, UsageKind UK) { 12128 Usage &U = UI.Uses[UK]; 12129 if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) { 12130 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 12131 ModAsSideEffect->push_back(std::make_pair(O, U)); 12132 U.Use = Ref; 12133 U.Seq = Region; 12134 } 12135 } 12136 12137 /// Check whether a modification or use conflicts with a prior usage. 12138 void checkUsage(Object O, UsageInfo &UI, Expr *Ref, UsageKind OtherKind, 12139 bool IsModMod) { 12140 if (UI.Diagnosed) 12141 return; 12142 12143 const Usage &U = UI.Uses[OtherKind]; 12144 if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) 12145 return; 12146 12147 Expr *Mod = U.Use; 12148 Expr *ModOrUse = Ref; 12149 if (OtherKind == UK_Use) 12150 std::swap(Mod, ModOrUse); 12151 12152 SemaRef.DiagRuntimeBehavior( 12153 Mod->getExprLoc(), {Mod, ModOrUse}, 12154 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 12155 : diag::warn_unsequenced_mod_use) 12156 << O << SourceRange(ModOrUse->getExprLoc())); 12157 UI.Diagnosed = true; 12158 } 12159 12160 void notePreUse(Object O, Expr *Use) { 12161 UsageInfo &U = UsageMap[O]; 12162 // Uses conflict with other modifications. 12163 checkUsage(O, U, Use, UK_ModAsValue, false); 12164 } 12165 12166 void notePostUse(Object O, Expr *Use) { 12167 UsageInfo &U = UsageMap[O]; 12168 checkUsage(O, U, Use, UK_ModAsSideEffect, false); 12169 addUsage(U, O, Use, UK_Use); 12170 } 12171 12172 void notePreMod(Object O, Expr *Mod) { 12173 UsageInfo &U = UsageMap[O]; 12174 // Modifications conflict with other modifications and with uses. 12175 checkUsage(O, U, Mod, UK_ModAsValue, true); 12176 checkUsage(O, U, Mod, UK_Use, false); 12177 } 12178 12179 void notePostMod(Object O, Expr *Use, UsageKind UK) { 12180 UsageInfo &U = UsageMap[O]; 12181 checkUsage(O, U, Use, UK_ModAsSideEffect, true); 12182 addUsage(U, O, Use, UK); 12183 } 12184 12185 public: 12186 SequenceChecker(Sema &S, Expr *E, SmallVectorImpl<Expr *> &WorkList) 12187 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 12188 Visit(E); 12189 } 12190 12191 void VisitStmt(Stmt *S) { 12192 // Skip all statements which aren't expressions for now. 12193 } 12194 12195 void VisitExpr(Expr *E) { 12196 // By default, just recurse to evaluated subexpressions. 12197 Base::VisitStmt(E); 12198 } 12199 12200 void VisitCastExpr(CastExpr *E) { 12201 Object O = Object(); 12202 if (E->getCastKind() == CK_LValueToRValue) 12203 O = getObject(E->getSubExpr(), false); 12204 12205 if (O) 12206 notePreUse(O, E); 12207 VisitExpr(E); 12208 if (O) 12209 notePostUse(O, E); 12210 } 12211 12212 void VisitSequencedExpressions(Expr *SequencedBefore, Expr *SequencedAfter) { 12213 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 12214 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 12215 SequenceTree::Seq OldRegion = Region; 12216 12217 { 12218 SequencedSubexpression SeqBefore(*this); 12219 Region = BeforeRegion; 12220 Visit(SequencedBefore); 12221 } 12222 12223 Region = AfterRegion; 12224 Visit(SequencedAfter); 12225 12226 Region = OldRegion; 12227 12228 Tree.merge(BeforeRegion); 12229 Tree.merge(AfterRegion); 12230 } 12231 12232 void VisitArraySubscriptExpr(ArraySubscriptExpr *ASE) { 12233 // C++17 [expr.sub]p1: 12234 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 12235 // expression E1 is sequenced before the expression E2. 12236 if (SemaRef.getLangOpts().CPlusPlus17) 12237 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 12238 else 12239 Base::VisitStmt(ASE); 12240 } 12241 12242 void VisitBinComma(BinaryOperator *BO) { 12243 // C++11 [expr.comma]p1: 12244 // Every value computation and side effect associated with the left 12245 // expression is sequenced before every value computation and side 12246 // effect associated with the right expression. 12247 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 12248 } 12249 12250 void VisitBinAssign(BinaryOperator *BO) { 12251 // The modification is sequenced after the value computation of the LHS 12252 // and RHS, so check it before inspecting the operands and update the 12253 // map afterwards. 12254 Object O = getObject(BO->getLHS(), true); 12255 if (!O) 12256 return VisitExpr(BO); 12257 12258 notePreMod(O, BO); 12259 12260 // C++11 [expr.ass]p7: 12261 // E1 op= E2 is equivalent to E1 = E1 op E2, except that E1 is evaluated 12262 // only once. 12263 // 12264 // Therefore, for a compound assignment operator, O is considered used 12265 // everywhere except within the evaluation of E1 itself. 12266 if (isa<CompoundAssignOperator>(BO)) 12267 notePreUse(O, BO); 12268 12269 Visit(BO->getLHS()); 12270 12271 if (isa<CompoundAssignOperator>(BO)) 12272 notePostUse(O, BO); 12273 12274 Visit(BO->getRHS()); 12275 12276 // C++11 [expr.ass]p1: 12277 // the assignment is sequenced [...] before the value computation of the 12278 // assignment expression. 12279 // C11 6.5.16/3 has no such rule. 12280 notePostMod(O, BO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 12281 : UK_ModAsSideEffect); 12282 } 12283 12284 void VisitCompoundAssignOperator(CompoundAssignOperator *CAO) { 12285 VisitBinAssign(CAO); 12286 } 12287 12288 void VisitUnaryPreInc(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 12289 void VisitUnaryPreDec(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 12290 void VisitUnaryPreIncDec(UnaryOperator *UO) { 12291 Object O = getObject(UO->getSubExpr(), true); 12292 if (!O) 12293 return VisitExpr(UO); 12294 12295 notePreMod(O, UO); 12296 Visit(UO->getSubExpr()); 12297 // C++11 [expr.pre.incr]p1: 12298 // the expression ++x is equivalent to x+=1 12299 notePostMod(O, UO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 12300 : UK_ModAsSideEffect); 12301 } 12302 12303 void VisitUnaryPostInc(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 12304 void VisitUnaryPostDec(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 12305 void VisitUnaryPostIncDec(UnaryOperator *UO) { 12306 Object O = getObject(UO->getSubExpr(), true); 12307 if (!O) 12308 return VisitExpr(UO); 12309 12310 notePreMod(O, UO); 12311 Visit(UO->getSubExpr()); 12312 notePostMod(O, UO, UK_ModAsSideEffect); 12313 } 12314 12315 /// Don't visit the RHS of '&&' or '||' if it might not be evaluated. 12316 void VisitBinLOr(BinaryOperator *BO) { 12317 // The side-effects of the LHS of an '&&' are sequenced before the 12318 // value computation of the RHS, and hence before the value computation 12319 // of the '&&' itself, unless the LHS evaluates to zero. We treat them 12320 // as if they were unconditionally sequenced. 12321 EvaluationTracker Eval(*this); 12322 { 12323 SequencedSubexpression Sequenced(*this); 12324 Visit(BO->getLHS()); 12325 } 12326 12327 bool Result; 12328 if (Eval.evaluate(BO->getLHS(), Result)) { 12329 if (!Result) 12330 Visit(BO->getRHS()); 12331 } else { 12332 // Check for unsequenced operations in the RHS, treating it as an 12333 // entirely separate evaluation. 12334 // 12335 // FIXME: If there are operations in the RHS which are unsequenced 12336 // with respect to operations outside the RHS, and those operations 12337 // are unconditionally evaluated, diagnose them. 12338 WorkList.push_back(BO->getRHS()); 12339 } 12340 } 12341 void VisitBinLAnd(BinaryOperator *BO) { 12342 EvaluationTracker Eval(*this); 12343 { 12344 SequencedSubexpression Sequenced(*this); 12345 Visit(BO->getLHS()); 12346 } 12347 12348 bool Result; 12349 if (Eval.evaluate(BO->getLHS(), Result)) { 12350 if (Result) 12351 Visit(BO->getRHS()); 12352 } else { 12353 WorkList.push_back(BO->getRHS()); 12354 } 12355 } 12356 12357 // Only visit the condition, unless we can be sure which subexpression will 12358 // be chosen. 12359 void VisitAbstractConditionalOperator(AbstractConditionalOperator *CO) { 12360 EvaluationTracker Eval(*this); 12361 { 12362 SequencedSubexpression Sequenced(*this); 12363 Visit(CO->getCond()); 12364 } 12365 12366 bool Result; 12367 if (Eval.evaluate(CO->getCond(), Result)) 12368 Visit(Result ? CO->getTrueExpr() : CO->getFalseExpr()); 12369 else { 12370 WorkList.push_back(CO->getTrueExpr()); 12371 WorkList.push_back(CO->getFalseExpr()); 12372 } 12373 } 12374 12375 void VisitCallExpr(CallExpr *CE) { 12376 // C++11 [intro.execution]p15: 12377 // When calling a function [...], every value computation and side effect 12378 // associated with any argument expression, or with the postfix expression 12379 // designating the called function, is sequenced before execution of every 12380 // expression or statement in the body of the function [and thus before 12381 // the value computation of its result]. 12382 SequencedSubexpression Sequenced(*this); 12383 Base::VisitCallExpr(CE); 12384 12385 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 12386 } 12387 12388 void VisitCXXConstructExpr(CXXConstructExpr *CCE) { 12389 // This is a call, so all subexpressions are sequenced before the result. 12390 SequencedSubexpression Sequenced(*this); 12391 12392 if (!CCE->isListInitialization()) 12393 return VisitExpr(CCE); 12394 12395 // In C++11, list initializations are sequenced. 12396 SmallVector<SequenceTree::Seq, 32> Elts; 12397 SequenceTree::Seq Parent = Region; 12398 for (CXXConstructExpr::arg_iterator I = CCE->arg_begin(), 12399 E = CCE->arg_end(); 12400 I != E; ++I) { 12401 Region = Tree.allocate(Parent); 12402 Elts.push_back(Region); 12403 Visit(*I); 12404 } 12405 12406 // Forget that the initializers are sequenced. 12407 Region = Parent; 12408 for (unsigned I = 0; I < Elts.size(); ++I) 12409 Tree.merge(Elts[I]); 12410 } 12411 12412 void VisitInitListExpr(InitListExpr *ILE) { 12413 if (!SemaRef.getLangOpts().CPlusPlus11) 12414 return VisitExpr(ILE); 12415 12416 // In C++11, list initializations are sequenced. 12417 SmallVector<SequenceTree::Seq, 32> Elts; 12418 SequenceTree::Seq Parent = Region; 12419 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 12420 Expr *E = ILE->getInit(I); 12421 if (!E) continue; 12422 Region = Tree.allocate(Parent); 12423 Elts.push_back(Region); 12424 Visit(E); 12425 } 12426 12427 // Forget that the initializers are sequenced. 12428 Region = Parent; 12429 for (unsigned I = 0; I < Elts.size(); ++I) 12430 Tree.merge(Elts[I]); 12431 } 12432 }; 12433 12434 } // namespace 12435 12436 void Sema::CheckUnsequencedOperations(Expr *E) { 12437 SmallVector<Expr *, 8> WorkList; 12438 WorkList.push_back(E); 12439 while (!WorkList.empty()) { 12440 Expr *Item = WorkList.pop_back_val(); 12441 SequenceChecker(*this, Item, WorkList); 12442 } 12443 } 12444 12445 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 12446 bool IsConstexpr) { 12447 CheckImplicitConversions(E, CheckLoc); 12448 if (!E->isInstantiationDependent()) 12449 CheckUnsequencedOperations(E); 12450 if (!IsConstexpr && !E->isValueDependent()) 12451 CheckForIntOverflow(E); 12452 DiagnoseMisalignedMembers(); 12453 } 12454 12455 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 12456 FieldDecl *BitField, 12457 Expr *Init) { 12458 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 12459 } 12460 12461 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 12462 SourceLocation Loc) { 12463 if (!PType->isVariablyModifiedType()) 12464 return; 12465 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 12466 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 12467 return; 12468 } 12469 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 12470 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 12471 return; 12472 } 12473 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 12474 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 12475 return; 12476 } 12477 12478 const ArrayType *AT = S.Context.getAsArrayType(PType); 12479 if (!AT) 12480 return; 12481 12482 if (AT->getSizeModifier() != ArrayType::Star) { 12483 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 12484 return; 12485 } 12486 12487 S.Diag(Loc, diag::err_array_star_in_function_definition); 12488 } 12489 12490 /// CheckParmsForFunctionDef - Check that the parameters of the given 12491 /// function are appropriate for the definition of a function. This 12492 /// takes care of any checks that cannot be performed on the 12493 /// declaration itself, e.g., that the types of each of the function 12494 /// parameters are complete. 12495 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 12496 bool CheckParameterNames) { 12497 bool HasInvalidParm = false; 12498 for (ParmVarDecl *Param : Parameters) { 12499 // C99 6.7.5.3p4: the parameters in a parameter type list in a 12500 // function declarator that is part of a function definition of 12501 // that function shall not have incomplete type. 12502 // 12503 // This is also C++ [dcl.fct]p6. 12504 if (!Param->isInvalidDecl() && 12505 RequireCompleteType(Param->getLocation(), Param->getType(), 12506 diag::err_typecheck_decl_incomplete_type)) { 12507 Param->setInvalidDecl(); 12508 HasInvalidParm = true; 12509 } 12510 12511 // C99 6.9.1p5: If the declarator includes a parameter type list, the 12512 // declaration of each parameter shall include an identifier. 12513 if (CheckParameterNames && 12514 Param->getIdentifier() == nullptr && 12515 !Param->isImplicit() && 12516 !getLangOpts().CPlusPlus) 12517 Diag(Param->getLocation(), diag::err_parameter_name_omitted); 12518 12519 // C99 6.7.5.3p12: 12520 // If the function declarator is not part of a definition of that 12521 // function, parameters may have incomplete type and may use the [*] 12522 // notation in their sequences of declarator specifiers to specify 12523 // variable length array types. 12524 QualType PType = Param->getOriginalType(); 12525 // FIXME: This diagnostic should point the '[*]' if source-location 12526 // information is added for it. 12527 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 12528 12529 // If the parameter is a c++ class type and it has to be destructed in the 12530 // callee function, declare the destructor so that it can be called by the 12531 // callee function. Do not perform any direct access check on the dtor here. 12532 if (!Param->isInvalidDecl()) { 12533 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 12534 if (!ClassDecl->isInvalidDecl() && 12535 !ClassDecl->hasIrrelevantDestructor() && 12536 !ClassDecl->isDependentContext() && 12537 ClassDecl->isParamDestroyedInCallee()) { 12538 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 12539 MarkFunctionReferenced(Param->getLocation(), Destructor); 12540 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 12541 } 12542 } 12543 } 12544 12545 // Parameters with the pass_object_size attribute only need to be marked 12546 // constant at function definitions. Because we lack information about 12547 // whether we're on a declaration or definition when we're instantiating the 12548 // attribute, we need to check for constness here. 12549 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 12550 if (!Param->getType().isConstQualified()) 12551 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 12552 << Attr->getSpelling() << 1; 12553 12554 // Check for parameter names shadowing fields from the class. 12555 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 12556 // The owning context for the parameter should be the function, but we 12557 // want to see if this function's declaration context is a record. 12558 DeclContext *DC = Param->getDeclContext(); 12559 if (DC && DC->isFunctionOrMethod()) { 12560 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 12561 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 12562 RD, /*DeclIsField*/ false); 12563 } 12564 } 12565 } 12566 12567 return HasInvalidParm; 12568 } 12569 12570 /// A helper function to get the alignment of a Decl referred to by DeclRefExpr 12571 /// or MemberExpr. 12572 static CharUnits getDeclAlign(Expr *E, CharUnits TypeAlign, 12573 ASTContext &Context) { 12574 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) 12575 return Context.getDeclAlign(DRE->getDecl()); 12576 12577 if (const auto *ME = dyn_cast<MemberExpr>(E)) 12578 return Context.getDeclAlign(ME->getMemberDecl()); 12579 12580 return TypeAlign; 12581 } 12582 12583 /// CheckCastAlign - Implements -Wcast-align, which warns when a 12584 /// pointer cast increases the alignment requirements. 12585 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 12586 // This is actually a lot of work to potentially be doing on every 12587 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 12588 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 12589 return; 12590 12591 // Ignore dependent types. 12592 if (T->isDependentType() || Op->getType()->isDependentType()) 12593 return; 12594 12595 // Require that the destination be a pointer type. 12596 const PointerType *DestPtr = T->getAs<PointerType>(); 12597 if (!DestPtr) return; 12598 12599 // If the destination has alignment 1, we're done. 12600 QualType DestPointee = DestPtr->getPointeeType(); 12601 if (DestPointee->isIncompleteType()) return; 12602 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 12603 if (DestAlign.isOne()) return; 12604 12605 // Require that the source be a pointer type. 12606 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 12607 if (!SrcPtr) return; 12608 QualType SrcPointee = SrcPtr->getPointeeType(); 12609 12610 // Whitelist casts from cv void*. We already implicitly 12611 // whitelisted casts to cv void*, since they have alignment 1. 12612 // Also whitelist casts involving incomplete types, which implicitly 12613 // includes 'void'. 12614 if (SrcPointee->isIncompleteType()) return; 12615 12616 CharUnits SrcAlign = Context.getTypeAlignInChars(SrcPointee); 12617 12618 if (auto *CE = dyn_cast<CastExpr>(Op)) { 12619 if (CE->getCastKind() == CK_ArrayToPointerDecay) 12620 SrcAlign = getDeclAlign(CE->getSubExpr(), SrcAlign, Context); 12621 } else if (auto *UO = dyn_cast<UnaryOperator>(Op)) { 12622 if (UO->getOpcode() == UO_AddrOf) 12623 SrcAlign = getDeclAlign(UO->getSubExpr(), SrcAlign, Context); 12624 } 12625 12626 if (SrcAlign >= DestAlign) return; 12627 12628 Diag(TRange.getBegin(), diag::warn_cast_align) 12629 << Op->getType() << T 12630 << static_cast<unsigned>(SrcAlign.getQuantity()) 12631 << static_cast<unsigned>(DestAlign.getQuantity()) 12632 << TRange << Op->getSourceRange(); 12633 } 12634 12635 /// Check whether this array fits the idiom of a size-one tail padded 12636 /// array member of a struct. 12637 /// 12638 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 12639 /// commonly used to emulate flexible arrays in C89 code. 12640 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 12641 const NamedDecl *ND) { 12642 if (Size != 1 || !ND) return false; 12643 12644 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 12645 if (!FD) return false; 12646 12647 // Don't consider sizes resulting from macro expansions or template argument 12648 // substitution to form C89 tail-padded arrays. 12649 12650 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 12651 while (TInfo) { 12652 TypeLoc TL = TInfo->getTypeLoc(); 12653 // Look through typedefs. 12654 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 12655 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 12656 TInfo = TDL->getTypeSourceInfo(); 12657 continue; 12658 } 12659 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 12660 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 12661 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 12662 return false; 12663 } 12664 break; 12665 } 12666 12667 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 12668 if (!RD) return false; 12669 if (RD->isUnion()) return false; 12670 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 12671 if (!CRD->isStandardLayout()) return false; 12672 } 12673 12674 // See if this is the last field decl in the record. 12675 const Decl *D = FD; 12676 while ((D = D->getNextDeclInContext())) 12677 if (isa<FieldDecl>(D)) 12678 return false; 12679 return true; 12680 } 12681 12682 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 12683 const ArraySubscriptExpr *ASE, 12684 bool AllowOnePastEnd, bool IndexNegated) { 12685 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 12686 if (IndexExpr->isValueDependent()) 12687 return; 12688 12689 const Type *EffectiveType = 12690 BaseExpr->getType()->getPointeeOrArrayElementType(); 12691 BaseExpr = BaseExpr->IgnoreParenCasts(); 12692 const ConstantArrayType *ArrayTy = 12693 Context.getAsConstantArrayType(BaseExpr->getType()); 12694 12695 if (!ArrayTy) 12696 return; 12697 12698 const Type *BaseType = ArrayTy->getElementType().getTypePtr(); 12699 if (EffectiveType->isDependentType() || BaseType->isDependentType()) 12700 return; 12701 12702 Expr::EvalResult Result; 12703 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 12704 return; 12705 12706 llvm::APSInt index = Result.Val.getInt(); 12707 if (IndexNegated) 12708 index = -index; 12709 12710 const NamedDecl *ND = nullptr; 12711 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 12712 ND = DRE->getDecl(); 12713 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 12714 ND = ME->getMemberDecl(); 12715 12716 if (index.isUnsigned() || !index.isNegative()) { 12717 // It is possible that the type of the base expression after 12718 // IgnoreParenCasts is incomplete, even though the type of the base 12719 // expression before IgnoreParenCasts is complete (see PR39746 for an 12720 // example). In this case we have no information about whether the array 12721 // access exceeds the array bounds. However we can still diagnose an array 12722 // access which precedes the array bounds. 12723 if (BaseType->isIncompleteType()) 12724 return; 12725 12726 llvm::APInt size = ArrayTy->getSize(); 12727 if (!size.isStrictlyPositive()) 12728 return; 12729 12730 if (BaseType != EffectiveType) { 12731 // Make sure we're comparing apples to apples when comparing index to size 12732 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 12733 uint64_t array_typesize = Context.getTypeSize(BaseType); 12734 // Handle ptrarith_typesize being zero, such as when casting to void* 12735 if (!ptrarith_typesize) ptrarith_typesize = 1; 12736 if (ptrarith_typesize != array_typesize) { 12737 // There's a cast to a different size type involved 12738 uint64_t ratio = array_typesize / ptrarith_typesize; 12739 // TODO: Be smarter about handling cases where array_typesize is not a 12740 // multiple of ptrarith_typesize 12741 if (ptrarith_typesize * ratio == array_typesize) 12742 size *= llvm::APInt(size.getBitWidth(), ratio); 12743 } 12744 } 12745 12746 if (size.getBitWidth() > index.getBitWidth()) 12747 index = index.zext(size.getBitWidth()); 12748 else if (size.getBitWidth() < index.getBitWidth()) 12749 size = size.zext(index.getBitWidth()); 12750 12751 // For array subscripting the index must be less than size, but for pointer 12752 // arithmetic also allow the index (offset) to be equal to size since 12753 // computing the next address after the end of the array is legal and 12754 // commonly done e.g. in C++ iterators and range-based for loops. 12755 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 12756 return; 12757 12758 // Also don't warn for arrays of size 1 which are members of some 12759 // structure. These are often used to approximate flexible arrays in C89 12760 // code. 12761 if (IsTailPaddedMemberArray(*this, size, ND)) 12762 return; 12763 12764 // Suppress the warning if the subscript expression (as identified by the 12765 // ']' location) and the index expression are both from macro expansions 12766 // within a system header. 12767 if (ASE) { 12768 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 12769 ASE->getRBracketLoc()); 12770 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 12771 SourceLocation IndexLoc = 12772 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 12773 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 12774 return; 12775 } 12776 } 12777 12778 unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds; 12779 if (ASE) 12780 DiagID = diag::warn_array_index_exceeds_bounds; 12781 12782 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 12783 PDiag(DiagID) << index.toString(10, true) 12784 << size.toString(10, true) 12785 << (unsigned)size.getLimitedValue(~0U) 12786 << IndexExpr->getSourceRange()); 12787 } else { 12788 unsigned DiagID = diag::warn_array_index_precedes_bounds; 12789 if (!ASE) { 12790 DiagID = diag::warn_ptr_arith_precedes_bounds; 12791 if (index.isNegative()) index = -index; 12792 } 12793 12794 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 12795 PDiag(DiagID) << index.toString(10, true) 12796 << IndexExpr->getSourceRange()); 12797 } 12798 12799 if (!ND) { 12800 // Try harder to find a NamedDecl to point at in the note. 12801 while (const ArraySubscriptExpr *ASE = 12802 dyn_cast<ArraySubscriptExpr>(BaseExpr)) 12803 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 12804 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 12805 ND = DRE->getDecl(); 12806 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 12807 ND = ME->getMemberDecl(); 12808 } 12809 12810 if (ND) 12811 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 12812 PDiag(diag::note_array_index_out_of_bounds) 12813 << ND->getDeclName()); 12814 } 12815 12816 void Sema::CheckArrayAccess(const Expr *expr) { 12817 int AllowOnePastEnd = 0; 12818 while (expr) { 12819 expr = expr->IgnoreParenImpCasts(); 12820 switch (expr->getStmtClass()) { 12821 case Stmt::ArraySubscriptExprClass: { 12822 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 12823 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 12824 AllowOnePastEnd > 0); 12825 expr = ASE->getBase(); 12826 break; 12827 } 12828 case Stmt::MemberExprClass: { 12829 expr = cast<MemberExpr>(expr)->getBase(); 12830 break; 12831 } 12832 case Stmt::OMPArraySectionExprClass: { 12833 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 12834 if (ASE->getLowerBound()) 12835 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 12836 /*ASE=*/nullptr, AllowOnePastEnd > 0); 12837 return; 12838 } 12839 case Stmt::UnaryOperatorClass: { 12840 // Only unwrap the * and & unary operators 12841 const UnaryOperator *UO = cast<UnaryOperator>(expr); 12842 expr = UO->getSubExpr(); 12843 switch (UO->getOpcode()) { 12844 case UO_AddrOf: 12845 AllowOnePastEnd++; 12846 break; 12847 case UO_Deref: 12848 AllowOnePastEnd--; 12849 break; 12850 default: 12851 return; 12852 } 12853 break; 12854 } 12855 case Stmt::ConditionalOperatorClass: { 12856 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 12857 if (const Expr *lhs = cond->getLHS()) 12858 CheckArrayAccess(lhs); 12859 if (const Expr *rhs = cond->getRHS()) 12860 CheckArrayAccess(rhs); 12861 return; 12862 } 12863 case Stmt::CXXOperatorCallExprClass: { 12864 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 12865 for (const auto *Arg : OCE->arguments()) 12866 CheckArrayAccess(Arg); 12867 return; 12868 } 12869 default: 12870 return; 12871 } 12872 } 12873 } 12874 12875 //===--- CHECK: Objective-C retain cycles ----------------------------------// 12876 12877 namespace { 12878 12879 struct RetainCycleOwner { 12880 VarDecl *Variable = nullptr; 12881 SourceRange Range; 12882 SourceLocation Loc; 12883 bool Indirect = false; 12884 12885 RetainCycleOwner() = default; 12886 12887 void setLocsFrom(Expr *e) { 12888 Loc = e->getExprLoc(); 12889 Range = e->getSourceRange(); 12890 } 12891 }; 12892 12893 } // namespace 12894 12895 /// Consider whether capturing the given variable can possibly lead to 12896 /// a retain cycle. 12897 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 12898 // In ARC, it's captured strongly iff the variable has __strong 12899 // lifetime. In MRR, it's captured strongly if the variable is 12900 // __block and has an appropriate type. 12901 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 12902 return false; 12903 12904 owner.Variable = var; 12905 if (ref) 12906 owner.setLocsFrom(ref); 12907 return true; 12908 } 12909 12910 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 12911 while (true) { 12912 e = e->IgnoreParens(); 12913 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 12914 switch (cast->getCastKind()) { 12915 case CK_BitCast: 12916 case CK_LValueBitCast: 12917 case CK_LValueToRValue: 12918 case CK_ARCReclaimReturnedObject: 12919 e = cast->getSubExpr(); 12920 continue; 12921 12922 default: 12923 return false; 12924 } 12925 } 12926 12927 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 12928 ObjCIvarDecl *ivar = ref->getDecl(); 12929 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 12930 return false; 12931 12932 // Try to find a retain cycle in the base. 12933 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 12934 return false; 12935 12936 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 12937 owner.Indirect = true; 12938 return true; 12939 } 12940 12941 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 12942 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 12943 if (!var) return false; 12944 return considerVariable(var, ref, owner); 12945 } 12946 12947 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 12948 if (member->isArrow()) return false; 12949 12950 // Don't count this as an indirect ownership. 12951 e = member->getBase(); 12952 continue; 12953 } 12954 12955 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 12956 // Only pay attention to pseudo-objects on property references. 12957 ObjCPropertyRefExpr *pre 12958 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 12959 ->IgnoreParens()); 12960 if (!pre) return false; 12961 if (pre->isImplicitProperty()) return false; 12962 ObjCPropertyDecl *property = pre->getExplicitProperty(); 12963 if (!property->isRetaining() && 12964 !(property->getPropertyIvarDecl() && 12965 property->getPropertyIvarDecl()->getType() 12966 .getObjCLifetime() == Qualifiers::OCL_Strong)) 12967 return false; 12968 12969 owner.Indirect = true; 12970 if (pre->isSuperReceiver()) { 12971 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 12972 if (!owner.Variable) 12973 return false; 12974 owner.Loc = pre->getLocation(); 12975 owner.Range = pre->getSourceRange(); 12976 return true; 12977 } 12978 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 12979 ->getSourceExpr()); 12980 continue; 12981 } 12982 12983 // Array ivars? 12984 12985 return false; 12986 } 12987 } 12988 12989 namespace { 12990 12991 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 12992 ASTContext &Context; 12993 VarDecl *Variable; 12994 Expr *Capturer = nullptr; 12995 bool VarWillBeReased = false; 12996 12997 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 12998 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 12999 Context(Context), Variable(variable) {} 13000 13001 void VisitDeclRefExpr(DeclRefExpr *ref) { 13002 if (ref->getDecl() == Variable && !Capturer) 13003 Capturer = ref; 13004 } 13005 13006 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 13007 if (Capturer) return; 13008 Visit(ref->getBase()); 13009 if (Capturer && ref->isFreeIvar()) 13010 Capturer = ref; 13011 } 13012 13013 void VisitBlockExpr(BlockExpr *block) { 13014 // Look inside nested blocks 13015 if (block->getBlockDecl()->capturesVariable(Variable)) 13016 Visit(block->getBlockDecl()->getBody()); 13017 } 13018 13019 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 13020 if (Capturer) return; 13021 if (OVE->getSourceExpr()) 13022 Visit(OVE->getSourceExpr()); 13023 } 13024 13025 void VisitBinaryOperator(BinaryOperator *BinOp) { 13026 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 13027 return; 13028 Expr *LHS = BinOp->getLHS(); 13029 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 13030 if (DRE->getDecl() != Variable) 13031 return; 13032 if (Expr *RHS = BinOp->getRHS()) { 13033 RHS = RHS->IgnoreParenCasts(); 13034 llvm::APSInt Value; 13035 VarWillBeReased = 13036 (RHS && RHS->isIntegerConstantExpr(Value, Context) && Value == 0); 13037 } 13038 } 13039 } 13040 }; 13041 13042 } // namespace 13043 13044 /// Check whether the given argument is a block which captures a 13045 /// variable. 13046 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 13047 assert(owner.Variable && owner.Loc.isValid()); 13048 13049 e = e->IgnoreParenCasts(); 13050 13051 // Look through [^{...} copy] and Block_copy(^{...}). 13052 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 13053 Selector Cmd = ME->getSelector(); 13054 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 13055 e = ME->getInstanceReceiver(); 13056 if (!e) 13057 return nullptr; 13058 e = e->IgnoreParenCasts(); 13059 } 13060 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 13061 if (CE->getNumArgs() == 1) { 13062 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 13063 if (Fn) { 13064 const IdentifierInfo *FnI = Fn->getIdentifier(); 13065 if (FnI && FnI->isStr("_Block_copy")) { 13066 e = CE->getArg(0)->IgnoreParenCasts(); 13067 } 13068 } 13069 } 13070 } 13071 13072 BlockExpr *block = dyn_cast<BlockExpr>(e); 13073 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 13074 return nullptr; 13075 13076 FindCaptureVisitor visitor(S.Context, owner.Variable); 13077 visitor.Visit(block->getBlockDecl()->getBody()); 13078 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 13079 } 13080 13081 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 13082 RetainCycleOwner &owner) { 13083 assert(capturer); 13084 assert(owner.Variable && owner.Loc.isValid()); 13085 13086 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 13087 << owner.Variable << capturer->getSourceRange(); 13088 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 13089 << owner.Indirect << owner.Range; 13090 } 13091 13092 /// Check for a keyword selector that starts with the word 'add' or 13093 /// 'set'. 13094 static bool isSetterLikeSelector(Selector sel) { 13095 if (sel.isUnarySelector()) return false; 13096 13097 StringRef str = sel.getNameForSlot(0); 13098 while (!str.empty() && str.front() == '_') str = str.substr(1); 13099 if (str.startswith("set")) 13100 str = str.substr(3); 13101 else if (str.startswith("add")) { 13102 // Specially whitelist 'addOperationWithBlock:'. 13103 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 13104 return false; 13105 str = str.substr(3); 13106 } 13107 else 13108 return false; 13109 13110 if (str.empty()) return true; 13111 return !isLowercase(str.front()); 13112 } 13113 13114 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 13115 ObjCMessageExpr *Message) { 13116 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 13117 Message->getReceiverInterface(), 13118 NSAPI::ClassId_NSMutableArray); 13119 if (!IsMutableArray) { 13120 return None; 13121 } 13122 13123 Selector Sel = Message->getSelector(); 13124 13125 Optional<NSAPI::NSArrayMethodKind> MKOpt = 13126 S.NSAPIObj->getNSArrayMethodKind(Sel); 13127 if (!MKOpt) { 13128 return None; 13129 } 13130 13131 NSAPI::NSArrayMethodKind MK = *MKOpt; 13132 13133 switch (MK) { 13134 case NSAPI::NSMutableArr_addObject: 13135 case NSAPI::NSMutableArr_insertObjectAtIndex: 13136 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 13137 return 0; 13138 case NSAPI::NSMutableArr_replaceObjectAtIndex: 13139 return 1; 13140 13141 default: 13142 return None; 13143 } 13144 13145 return None; 13146 } 13147 13148 static 13149 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 13150 ObjCMessageExpr *Message) { 13151 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 13152 Message->getReceiverInterface(), 13153 NSAPI::ClassId_NSMutableDictionary); 13154 if (!IsMutableDictionary) { 13155 return None; 13156 } 13157 13158 Selector Sel = Message->getSelector(); 13159 13160 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 13161 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 13162 if (!MKOpt) { 13163 return None; 13164 } 13165 13166 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 13167 13168 switch (MK) { 13169 case NSAPI::NSMutableDict_setObjectForKey: 13170 case NSAPI::NSMutableDict_setValueForKey: 13171 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 13172 return 0; 13173 13174 default: 13175 return None; 13176 } 13177 13178 return None; 13179 } 13180 13181 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 13182 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 13183 Message->getReceiverInterface(), 13184 NSAPI::ClassId_NSMutableSet); 13185 13186 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 13187 Message->getReceiverInterface(), 13188 NSAPI::ClassId_NSMutableOrderedSet); 13189 if (!IsMutableSet && !IsMutableOrderedSet) { 13190 return None; 13191 } 13192 13193 Selector Sel = Message->getSelector(); 13194 13195 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 13196 if (!MKOpt) { 13197 return None; 13198 } 13199 13200 NSAPI::NSSetMethodKind MK = *MKOpt; 13201 13202 switch (MK) { 13203 case NSAPI::NSMutableSet_addObject: 13204 case NSAPI::NSOrderedSet_setObjectAtIndex: 13205 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 13206 case NSAPI::NSOrderedSet_insertObjectAtIndex: 13207 return 0; 13208 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 13209 return 1; 13210 } 13211 13212 return None; 13213 } 13214 13215 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 13216 if (!Message->isInstanceMessage()) { 13217 return; 13218 } 13219 13220 Optional<int> ArgOpt; 13221 13222 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 13223 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 13224 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 13225 return; 13226 } 13227 13228 int ArgIndex = *ArgOpt; 13229 13230 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 13231 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 13232 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 13233 } 13234 13235 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 13236 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 13237 if (ArgRE->isObjCSelfExpr()) { 13238 Diag(Message->getSourceRange().getBegin(), 13239 diag::warn_objc_circular_container) 13240 << ArgRE->getDecl() << StringRef("'super'"); 13241 } 13242 } 13243 } else { 13244 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 13245 13246 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 13247 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 13248 } 13249 13250 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 13251 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 13252 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 13253 ValueDecl *Decl = ReceiverRE->getDecl(); 13254 Diag(Message->getSourceRange().getBegin(), 13255 diag::warn_objc_circular_container) 13256 << Decl << Decl; 13257 if (!ArgRE->isObjCSelfExpr()) { 13258 Diag(Decl->getLocation(), 13259 diag::note_objc_circular_container_declared_here) 13260 << Decl; 13261 } 13262 } 13263 } 13264 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 13265 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 13266 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 13267 ObjCIvarDecl *Decl = IvarRE->getDecl(); 13268 Diag(Message->getSourceRange().getBegin(), 13269 diag::warn_objc_circular_container) 13270 << Decl << Decl; 13271 Diag(Decl->getLocation(), 13272 diag::note_objc_circular_container_declared_here) 13273 << Decl; 13274 } 13275 } 13276 } 13277 } 13278 } 13279 13280 /// Check a message send to see if it's likely to cause a retain cycle. 13281 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 13282 // Only check instance methods whose selector looks like a setter. 13283 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 13284 return; 13285 13286 // Try to find a variable that the receiver is strongly owned by. 13287 RetainCycleOwner owner; 13288 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 13289 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 13290 return; 13291 } else { 13292 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 13293 owner.Variable = getCurMethodDecl()->getSelfDecl(); 13294 owner.Loc = msg->getSuperLoc(); 13295 owner.Range = msg->getSuperLoc(); 13296 } 13297 13298 // Check whether the receiver is captured by any of the arguments. 13299 const ObjCMethodDecl *MD = msg->getMethodDecl(); 13300 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 13301 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 13302 // noescape blocks should not be retained by the method. 13303 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 13304 continue; 13305 return diagnoseRetainCycle(*this, capturer, owner); 13306 } 13307 } 13308 } 13309 13310 /// Check a property assign to see if it's likely to cause a retain cycle. 13311 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 13312 RetainCycleOwner owner; 13313 if (!findRetainCycleOwner(*this, receiver, owner)) 13314 return; 13315 13316 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 13317 diagnoseRetainCycle(*this, capturer, owner); 13318 } 13319 13320 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 13321 RetainCycleOwner Owner; 13322 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 13323 return; 13324 13325 // Because we don't have an expression for the variable, we have to set the 13326 // location explicitly here. 13327 Owner.Loc = Var->getLocation(); 13328 Owner.Range = Var->getSourceRange(); 13329 13330 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 13331 diagnoseRetainCycle(*this, Capturer, Owner); 13332 } 13333 13334 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 13335 Expr *RHS, bool isProperty) { 13336 // Check if RHS is an Objective-C object literal, which also can get 13337 // immediately zapped in a weak reference. Note that we explicitly 13338 // allow ObjCStringLiterals, since those are designed to never really die. 13339 RHS = RHS->IgnoreParenImpCasts(); 13340 13341 // This enum needs to match with the 'select' in 13342 // warn_objc_arc_literal_assign (off-by-1). 13343 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 13344 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 13345 return false; 13346 13347 S.Diag(Loc, diag::warn_arc_literal_assign) 13348 << (unsigned) Kind 13349 << (isProperty ? 0 : 1) 13350 << RHS->getSourceRange(); 13351 13352 return true; 13353 } 13354 13355 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 13356 Qualifiers::ObjCLifetime LT, 13357 Expr *RHS, bool isProperty) { 13358 // Strip off any implicit cast added to get to the one ARC-specific. 13359 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 13360 if (cast->getCastKind() == CK_ARCConsumeObject) { 13361 S.Diag(Loc, diag::warn_arc_retained_assign) 13362 << (LT == Qualifiers::OCL_ExplicitNone) 13363 << (isProperty ? 0 : 1) 13364 << RHS->getSourceRange(); 13365 return true; 13366 } 13367 RHS = cast->getSubExpr(); 13368 } 13369 13370 if (LT == Qualifiers::OCL_Weak && 13371 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 13372 return true; 13373 13374 return false; 13375 } 13376 13377 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 13378 QualType LHS, Expr *RHS) { 13379 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 13380 13381 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 13382 return false; 13383 13384 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 13385 return true; 13386 13387 return false; 13388 } 13389 13390 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 13391 Expr *LHS, Expr *RHS) { 13392 QualType LHSType; 13393 // PropertyRef on LHS type need be directly obtained from 13394 // its declaration as it has a PseudoType. 13395 ObjCPropertyRefExpr *PRE 13396 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 13397 if (PRE && !PRE->isImplicitProperty()) { 13398 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 13399 if (PD) 13400 LHSType = PD->getType(); 13401 } 13402 13403 if (LHSType.isNull()) 13404 LHSType = LHS->getType(); 13405 13406 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 13407 13408 if (LT == Qualifiers::OCL_Weak) { 13409 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 13410 getCurFunction()->markSafeWeakUse(LHS); 13411 } 13412 13413 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 13414 return; 13415 13416 // FIXME. Check for other life times. 13417 if (LT != Qualifiers::OCL_None) 13418 return; 13419 13420 if (PRE) { 13421 if (PRE->isImplicitProperty()) 13422 return; 13423 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 13424 if (!PD) 13425 return; 13426 13427 unsigned Attributes = PD->getPropertyAttributes(); 13428 if (Attributes & ObjCPropertyDecl::OBJC_PR_assign) { 13429 // when 'assign' attribute was not explicitly specified 13430 // by user, ignore it and rely on property type itself 13431 // for lifetime info. 13432 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 13433 if (!(AsWrittenAttr & ObjCPropertyDecl::OBJC_PR_assign) && 13434 LHSType->isObjCRetainableType()) 13435 return; 13436 13437 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 13438 if (cast->getCastKind() == CK_ARCConsumeObject) { 13439 Diag(Loc, diag::warn_arc_retained_property_assign) 13440 << RHS->getSourceRange(); 13441 return; 13442 } 13443 RHS = cast->getSubExpr(); 13444 } 13445 } 13446 else if (Attributes & ObjCPropertyDecl::OBJC_PR_weak) { 13447 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 13448 return; 13449 } 13450 } 13451 } 13452 13453 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 13454 13455 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 13456 SourceLocation StmtLoc, 13457 const NullStmt *Body) { 13458 // Do not warn if the body is a macro that expands to nothing, e.g: 13459 // 13460 // #define CALL(x) 13461 // if (condition) 13462 // CALL(0); 13463 if (Body->hasLeadingEmptyMacro()) 13464 return false; 13465 13466 // Get line numbers of statement and body. 13467 bool StmtLineInvalid; 13468 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 13469 &StmtLineInvalid); 13470 if (StmtLineInvalid) 13471 return false; 13472 13473 bool BodyLineInvalid; 13474 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 13475 &BodyLineInvalid); 13476 if (BodyLineInvalid) 13477 return false; 13478 13479 // Warn if null statement and body are on the same line. 13480 if (StmtLine != BodyLine) 13481 return false; 13482 13483 return true; 13484 } 13485 13486 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 13487 const Stmt *Body, 13488 unsigned DiagID) { 13489 // Since this is a syntactic check, don't emit diagnostic for template 13490 // instantiations, this just adds noise. 13491 if (CurrentInstantiationScope) 13492 return; 13493 13494 // The body should be a null statement. 13495 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 13496 if (!NBody) 13497 return; 13498 13499 // Do the usual checks. 13500 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 13501 return; 13502 13503 Diag(NBody->getSemiLoc(), DiagID); 13504 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 13505 } 13506 13507 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 13508 const Stmt *PossibleBody) { 13509 assert(!CurrentInstantiationScope); // Ensured by caller 13510 13511 SourceLocation StmtLoc; 13512 const Stmt *Body; 13513 unsigned DiagID; 13514 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 13515 StmtLoc = FS->getRParenLoc(); 13516 Body = FS->getBody(); 13517 DiagID = diag::warn_empty_for_body; 13518 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 13519 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 13520 Body = WS->getBody(); 13521 DiagID = diag::warn_empty_while_body; 13522 } else 13523 return; // Neither `for' nor `while'. 13524 13525 // The body should be a null statement. 13526 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 13527 if (!NBody) 13528 return; 13529 13530 // Skip expensive checks if diagnostic is disabled. 13531 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 13532 return; 13533 13534 // Do the usual checks. 13535 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 13536 return; 13537 13538 // `for(...);' and `while(...);' are popular idioms, so in order to keep 13539 // noise level low, emit diagnostics only if for/while is followed by a 13540 // CompoundStmt, e.g.: 13541 // for (int i = 0; i < n; i++); 13542 // { 13543 // a(i); 13544 // } 13545 // or if for/while is followed by a statement with more indentation 13546 // than for/while itself: 13547 // for (int i = 0; i < n; i++); 13548 // a(i); 13549 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 13550 if (!ProbableTypo) { 13551 bool BodyColInvalid; 13552 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 13553 PossibleBody->getBeginLoc(), &BodyColInvalid); 13554 if (BodyColInvalid) 13555 return; 13556 13557 bool StmtColInvalid; 13558 unsigned StmtCol = 13559 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 13560 if (StmtColInvalid) 13561 return; 13562 13563 if (BodyCol > StmtCol) 13564 ProbableTypo = true; 13565 } 13566 13567 if (ProbableTypo) { 13568 Diag(NBody->getSemiLoc(), DiagID); 13569 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 13570 } 13571 } 13572 13573 //===--- CHECK: Warn on self move with std::move. -------------------------===// 13574 13575 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 13576 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 13577 SourceLocation OpLoc) { 13578 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 13579 return; 13580 13581 if (inTemplateInstantiation()) 13582 return; 13583 13584 // Strip parens and casts away. 13585 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 13586 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 13587 13588 // Check for a call expression 13589 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 13590 if (!CE || CE->getNumArgs() != 1) 13591 return; 13592 13593 // Check for a call to std::move 13594 if (!CE->isCallToStdMove()) 13595 return; 13596 13597 // Get argument from std::move 13598 RHSExpr = CE->getArg(0); 13599 13600 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 13601 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 13602 13603 // Two DeclRefExpr's, check that the decls are the same. 13604 if (LHSDeclRef && RHSDeclRef) { 13605 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 13606 return; 13607 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 13608 RHSDeclRef->getDecl()->getCanonicalDecl()) 13609 return; 13610 13611 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 13612 << LHSExpr->getSourceRange() 13613 << RHSExpr->getSourceRange(); 13614 return; 13615 } 13616 13617 // Member variables require a different approach to check for self moves. 13618 // MemberExpr's are the same if every nested MemberExpr refers to the same 13619 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 13620 // the base Expr's are CXXThisExpr's. 13621 const Expr *LHSBase = LHSExpr; 13622 const Expr *RHSBase = RHSExpr; 13623 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 13624 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 13625 if (!LHSME || !RHSME) 13626 return; 13627 13628 while (LHSME && RHSME) { 13629 if (LHSME->getMemberDecl()->getCanonicalDecl() != 13630 RHSME->getMemberDecl()->getCanonicalDecl()) 13631 return; 13632 13633 LHSBase = LHSME->getBase(); 13634 RHSBase = RHSME->getBase(); 13635 LHSME = dyn_cast<MemberExpr>(LHSBase); 13636 RHSME = dyn_cast<MemberExpr>(RHSBase); 13637 } 13638 13639 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 13640 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 13641 if (LHSDeclRef && RHSDeclRef) { 13642 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 13643 return; 13644 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 13645 RHSDeclRef->getDecl()->getCanonicalDecl()) 13646 return; 13647 13648 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 13649 << LHSExpr->getSourceRange() 13650 << RHSExpr->getSourceRange(); 13651 return; 13652 } 13653 13654 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 13655 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 13656 << LHSExpr->getSourceRange() 13657 << RHSExpr->getSourceRange(); 13658 } 13659 13660 //===--- Layout compatibility ----------------------------------------------// 13661 13662 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 13663 13664 /// Check if two enumeration types are layout-compatible. 13665 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 13666 // C++11 [dcl.enum] p8: 13667 // Two enumeration types are layout-compatible if they have the same 13668 // underlying type. 13669 return ED1->isComplete() && ED2->isComplete() && 13670 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 13671 } 13672 13673 /// Check if two fields are layout-compatible. 13674 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 13675 FieldDecl *Field2) { 13676 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 13677 return false; 13678 13679 if (Field1->isBitField() != Field2->isBitField()) 13680 return false; 13681 13682 if (Field1->isBitField()) { 13683 // Make sure that the bit-fields are the same length. 13684 unsigned Bits1 = Field1->getBitWidthValue(C); 13685 unsigned Bits2 = Field2->getBitWidthValue(C); 13686 13687 if (Bits1 != Bits2) 13688 return false; 13689 } 13690 13691 return true; 13692 } 13693 13694 /// Check if two standard-layout structs are layout-compatible. 13695 /// (C++11 [class.mem] p17) 13696 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 13697 RecordDecl *RD2) { 13698 // If both records are C++ classes, check that base classes match. 13699 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 13700 // If one of records is a CXXRecordDecl we are in C++ mode, 13701 // thus the other one is a CXXRecordDecl, too. 13702 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 13703 // Check number of base classes. 13704 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 13705 return false; 13706 13707 // Check the base classes. 13708 for (CXXRecordDecl::base_class_const_iterator 13709 Base1 = D1CXX->bases_begin(), 13710 BaseEnd1 = D1CXX->bases_end(), 13711 Base2 = D2CXX->bases_begin(); 13712 Base1 != BaseEnd1; 13713 ++Base1, ++Base2) { 13714 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 13715 return false; 13716 } 13717 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 13718 // If only RD2 is a C++ class, it should have zero base classes. 13719 if (D2CXX->getNumBases() > 0) 13720 return false; 13721 } 13722 13723 // Check the fields. 13724 RecordDecl::field_iterator Field2 = RD2->field_begin(), 13725 Field2End = RD2->field_end(), 13726 Field1 = RD1->field_begin(), 13727 Field1End = RD1->field_end(); 13728 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 13729 if (!isLayoutCompatible(C, *Field1, *Field2)) 13730 return false; 13731 } 13732 if (Field1 != Field1End || Field2 != Field2End) 13733 return false; 13734 13735 return true; 13736 } 13737 13738 /// Check if two standard-layout unions are layout-compatible. 13739 /// (C++11 [class.mem] p18) 13740 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 13741 RecordDecl *RD2) { 13742 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 13743 for (auto *Field2 : RD2->fields()) 13744 UnmatchedFields.insert(Field2); 13745 13746 for (auto *Field1 : RD1->fields()) { 13747 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 13748 I = UnmatchedFields.begin(), 13749 E = UnmatchedFields.end(); 13750 13751 for ( ; I != E; ++I) { 13752 if (isLayoutCompatible(C, Field1, *I)) { 13753 bool Result = UnmatchedFields.erase(*I); 13754 (void) Result; 13755 assert(Result); 13756 break; 13757 } 13758 } 13759 if (I == E) 13760 return false; 13761 } 13762 13763 return UnmatchedFields.empty(); 13764 } 13765 13766 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 13767 RecordDecl *RD2) { 13768 if (RD1->isUnion() != RD2->isUnion()) 13769 return false; 13770 13771 if (RD1->isUnion()) 13772 return isLayoutCompatibleUnion(C, RD1, RD2); 13773 else 13774 return isLayoutCompatibleStruct(C, RD1, RD2); 13775 } 13776 13777 /// Check if two types are layout-compatible in C++11 sense. 13778 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 13779 if (T1.isNull() || T2.isNull()) 13780 return false; 13781 13782 // C++11 [basic.types] p11: 13783 // If two types T1 and T2 are the same type, then T1 and T2 are 13784 // layout-compatible types. 13785 if (C.hasSameType(T1, T2)) 13786 return true; 13787 13788 T1 = T1.getCanonicalType().getUnqualifiedType(); 13789 T2 = T2.getCanonicalType().getUnqualifiedType(); 13790 13791 const Type::TypeClass TC1 = T1->getTypeClass(); 13792 const Type::TypeClass TC2 = T2->getTypeClass(); 13793 13794 if (TC1 != TC2) 13795 return false; 13796 13797 if (TC1 == Type::Enum) { 13798 return isLayoutCompatible(C, 13799 cast<EnumType>(T1)->getDecl(), 13800 cast<EnumType>(T2)->getDecl()); 13801 } else if (TC1 == Type::Record) { 13802 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 13803 return false; 13804 13805 return isLayoutCompatible(C, 13806 cast<RecordType>(T1)->getDecl(), 13807 cast<RecordType>(T2)->getDecl()); 13808 } 13809 13810 return false; 13811 } 13812 13813 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 13814 13815 /// Given a type tag expression find the type tag itself. 13816 /// 13817 /// \param TypeExpr Type tag expression, as it appears in user's code. 13818 /// 13819 /// \param VD Declaration of an identifier that appears in a type tag. 13820 /// 13821 /// \param MagicValue Type tag magic value. 13822 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 13823 const ValueDecl **VD, uint64_t *MagicValue) { 13824 while(true) { 13825 if (!TypeExpr) 13826 return false; 13827 13828 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 13829 13830 switch (TypeExpr->getStmtClass()) { 13831 case Stmt::UnaryOperatorClass: { 13832 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 13833 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 13834 TypeExpr = UO->getSubExpr(); 13835 continue; 13836 } 13837 return false; 13838 } 13839 13840 case Stmt::DeclRefExprClass: { 13841 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 13842 *VD = DRE->getDecl(); 13843 return true; 13844 } 13845 13846 case Stmt::IntegerLiteralClass: { 13847 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 13848 llvm::APInt MagicValueAPInt = IL->getValue(); 13849 if (MagicValueAPInt.getActiveBits() <= 64) { 13850 *MagicValue = MagicValueAPInt.getZExtValue(); 13851 return true; 13852 } else 13853 return false; 13854 } 13855 13856 case Stmt::BinaryConditionalOperatorClass: 13857 case Stmt::ConditionalOperatorClass: { 13858 const AbstractConditionalOperator *ACO = 13859 cast<AbstractConditionalOperator>(TypeExpr); 13860 bool Result; 13861 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx)) { 13862 if (Result) 13863 TypeExpr = ACO->getTrueExpr(); 13864 else 13865 TypeExpr = ACO->getFalseExpr(); 13866 continue; 13867 } 13868 return false; 13869 } 13870 13871 case Stmt::BinaryOperatorClass: { 13872 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 13873 if (BO->getOpcode() == BO_Comma) { 13874 TypeExpr = BO->getRHS(); 13875 continue; 13876 } 13877 return false; 13878 } 13879 13880 default: 13881 return false; 13882 } 13883 } 13884 } 13885 13886 /// Retrieve the C type corresponding to type tag TypeExpr. 13887 /// 13888 /// \param TypeExpr Expression that specifies a type tag. 13889 /// 13890 /// \param MagicValues Registered magic values. 13891 /// 13892 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 13893 /// kind. 13894 /// 13895 /// \param TypeInfo Information about the corresponding C type. 13896 /// 13897 /// \returns true if the corresponding C type was found. 13898 static bool GetMatchingCType( 13899 const IdentifierInfo *ArgumentKind, 13900 const Expr *TypeExpr, const ASTContext &Ctx, 13901 const llvm::DenseMap<Sema::TypeTagMagicValue, 13902 Sema::TypeTagData> *MagicValues, 13903 bool &FoundWrongKind, 13904 Sema::TypeTagData &TypeInfo) { 13905 FoundWrongKind = false; 13906 13907 // Variable declaration that has type_tag_for_datatype attribute. 13908 const ValueDecl *VD = nullptr; 13909 13910 uint64_t MagicValue; 13911 13912 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue)) 13913 return false; 13914 13915 if (VD) { 13916 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 13917 if (I->getArgumentKind() != ArgumentKind) { 13918 FoundWrongKind = true; 13919 return false; 13920 } 13921 TypeInfo.Type = I->getMatchingCType(); 13922 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 13923 TypeInfo.MustBeNull = I->getMustBeNull(); 13924 return true; 13925 } 13926 return false; 13927 } 13928 13929 if (!MagicValues) 13930 return false; 13931 13932 llvm::DenseMap<Sema::TypeTagMagicValue, 13933 Sema::TypeTagData>::const_iterator I = 13934 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 13935 if (I == MagicValues->end()) 13936 return false; 13937 13938 TypeInfo = I->second; 13939 return true; 13940 } 13941 13942 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 13943 uint64_t MagicValue, QualType Type, 13944 bool LayoutCompatible, 13945 bool MustBeNull) { 13946 if (!TypeTagForDatatypeMagicValues) 13947 TypeTagForDatatypeMagicValues.reset( 13948 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 13949 13950 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 13951 (*TypeTagForDatatypeMagicValues)[Magic] = 13952 TypeTagData(Type, LayoutCompatible, MustBeNull); 13953 } 13954 13955 static bool IsSameCharType(QualType T1, QualType T2) { 13956 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 13957 if (!BT1) 13958 return false; 13959 13960 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 13961 if (!BT2) 13962 return false; 13963 13964 BuiltinType::Kind T1Kind = BT1->getKind(); 13965 BuiltinType::Kind T2Kind = BT2->getKind(); 13966 13967 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 13968 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 13969 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 13970 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 13971 } 13972 13973 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 13974 const ArrayRef<const Expr *> ExprArgs, 13975 SourceLocation CallSiteLoc) { 13976 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 13977 bool IsPointerAttr = Attr->getIsPointer(); 13978 13979 // Retrieve the argument representing the 'type_tag'. 13980 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 13981 if (TypeTagIdxAST >= ExprArgs.size()) { 13982 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 13983 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 13984 return; 13985 } 13986 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 13987 bool FoundWrongKind; 13988 TypeTagData TypeInfo; 13989 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 13990 TypeTagForDatatypeMagicValues.get(), 13991 FoundWrongKind, TypeInfo)) { 13992 if (FoundWrongKind) 13993 Diag(TypeTagExpr->getExprLoc(), 13994 diag::warn_type_tag_for_datatype_wrong_kind) 13995 << TypeTagExpr->getSourceRange(); 13996 return; 13997 } 13998 13999 // Retrieve the argument representing the 'arg_idx'. 14000 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 14001 if (ArgumentIdxAST >= ExprArgs.size()) { 14002 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 14003 << 1 << Attr->getArgumentIdx().getSourceIndex(); 14004 return; 14005 } 14006 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 14007 if (IsPointerAttr) { 14008 // Skip implicit cast of pointer to `void *' (as a function argument). 14009 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 14010 if (ICE->getType()->isVoidPointerType() && 14011 ICE->getCastKind() == CK_BitCast) 14012 ArgumentExpr = ICE->getSubExpr(); 14013 } 14014 QualType ArgumentType = ArgumentExpr->getType(); 14015 14016 // Passing a `void*' pointer shouldn't trigger a warning. 14017 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 14018 return; 14019 14020 if (TypeInfo.MustBeNull) { 14021 // Type tag with matching void type requires a null pointer. 14022 if (!ArgumentExpr->isNullPointerConstant(Context, 14023 Expr::NPC_ValueDependentIsNotNull)) { 14024 Diag(ArgumentExpr->getExprLoc(), 14025 diag::warn_type_safety_null_pointer_required) 14026 << ArgumentKind->getName() 14027 << ArgumentExpr->getSourceRange() 14028 << TypeTagExpr->getSourceRange(); 14029 } 14030 return; 14031 } 14032 14033 QualType RequiredType = TypeInfo.Type; 14034 if (IsPointerAttr) 14035 RequiredType = Context.getPointerType(RequiredType); 14036 14037 bool mismatch = false; 14038 if (!TypeInfo.LayoutCompatible) { 14039 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 14040 14041 // C++11 [basic.fundamental] p1: 14042 // Plain char, signed char, and unsigned char are three distinct types. 14043 // 14044 // But we treat plain `char' as equivalent to `signed char' or `unsigned 14045 // char' depending on the current char signedness mode. 14046 if (mismatch) 14047 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 14048 RequiredType->getPointeeType())) || 14049 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 14050 mismatch = false; 14051 } else 14052 if (IsPointerAttr) 14053 mismatch = !isLayoutCompatible(Context, 14054 ArgumentType->getPointeeType(), 14055 RequiredType->getPointeeType()); 14056 else 14057 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 14058 14059 if (mismatch) 14060 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 14061 << ArgumentType << ArgumentKind 14062 << TypeInfo.LayoutCompatible << RequiredType 14063 << ArgumentExpr->getSourceRange() 14064 << TypeTagExpr->getSourceRange(); 14065 } 14066 14067 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 14068 CharUnits Alignment) { 14069 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 14070 } 14071 14072 void Sema::DiagnoseMisalignedMembers() { 14073 for (MisalignedMember &m : MisalignedMembers) { 14074 const NamedDecl *ND = m.RD; 14075 if (ND->getName().empty()) { 14076 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 14077 ND = TD; 14078 } 14079 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 14080 << m.MD << ND << m.E->getSourceRange(); 14081 } 14082 MisalignedMembers.clear(); 14083 } 14084 14085 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 14086 E = E->IgnoreParens(); 14087 if (!T->isPointerType() && !T->isIntegerType()) 14088 return; 14089 if (isa<UnaryOperator>(E) && 14090 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 14091 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 14092 if (isa<MemberExpr>(Op)) { 14093 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 14094 if (MA != MisalignedMembers.end() && 14095 (T->isIntegerType() || 14096 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 14097 Context.getTypeAlignInChars( 14098 T->getPointeeType()) <= MA->Alignment)))) 14099 MisalignedMembers.erase(MA); 14100 } 14101 } 14102 } 14103 14104 void Sema::RefersToMemberWithReducedAlignment( 14105 Expr *E, 14106 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 14107 Action) { 14108 const auto *ME = dyn_cast<MemberExpr>(E); 14109 if (!ME) 14110 return; 14111 14112 // No need to check expressions with an __unaligned-qualified type. 14113 if (E->getType().getQualifiers().hasUnaligned()) 14114 return; 14115 14116 // For a chain of MemberExpr like "a.b.c.d" this list 14117 // will keep FieldDecl's like [d, c, b]. 14118 SmallVector<FieldDecl *, 4> ReverseMemberChain; 14119 const MemberExpr *TopME = nullptr; 14120 bool AnyIsPacked = false; 14121 do { 14122 QualType BaseType = ME->getBase()->getType(); 14123 if (ME->isArrow()) 14124 BaseType = BaseType->getPointeeType(); 14125 RecordDecl *RD = BaseType->getAs<RecordType>()->getDecl(); 14126 if (RD->isInvalidDecl()) 14127 return; 14128 14129 ValueDecl *MD = ME->getMemberDecl(); 14130 auto *FD = dyn_cast<FieldDecl>(MD); 14131 // We do not care about non-data members. 14132 if (!FD || FD->isInvalidDecl()) 14133 return; 14134 14135 AnyIsPacked = 14136 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 14137 ReverseMemberChain.push_back(FD); 14138 14139 TopME = ME; 14140 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 14141 } while (ME); 14142 assert(TopME && "We did not compute a topmost MemberExpr!"); 14143 14144 // Not the scope of this diagnostic. 14145 if (!AnyIsPacked) 14146 return; 14147 14148 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 14149 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 14150 // TODO: The innermost base of the member expression may be too complicated. 14151 // For now, just disregard these cases. This is left for future 14152 // improvement. 14153 if (!DRE && !isa<CXXThisExpr>(TopBase)) 14154 return; 14155 14156 // Alignment expected by the whole expression. 14157 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 14158 14159 // No need to do anything else with this case. 14160 if (ExpectedAlignment.isOne()) 14161 return; 14162 14163 // Synthesize offset of the whole access. 14164 CharUnits Offset; 14165 for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend(); 14166 I++) { 14167 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I)); 14168 } 14169 14170 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 14171 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 14172 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 14173 14174 // The base expression of the innermost MemberExpr may give 14175 // stronger guarantees than the class containing the member. 14176 if (DRE && !TopME->isArrow()) { 14177 const ValueDecl *VD = DRE->getDecl(); 14178 if (!VD->getType()->isReferenceType()) 14179 CompleteObjectAlignment = 14180 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 14181 } 14182 14183 // Check if the synthesized offset fulfills the alignment. 14184 if (Offset % ExpectedAlignment != 0 || 14185 // It may fulfill the offset it but the effective alignment may still be 14186 // lower than the expected expression alignment. 14187 CompleteObjectAlignment < ExpectedAlignment) { 14188 // If this happens, we want to determine a sensible culprit of this. 14189 // Intuitively, watching the chain of member expressions from right to 14190 // left, we start with the required alignment (as required by the field 14191 // type) but some packed attribute in that chain has reduced the alignment. 14192 // It may happen that another packed structure increases it again. But if 14193 // we are here such increase has not been enough. So pointing the first 14194 // FieldDecl that either is packed or else its RecordDecl is, 14195 // seems reasonable. 14196 FieldDecl *FD = nullptr; 14197 CharUnits Alignment; 14198 for (FieldDecl *FDI : ReverseMemberChain) { 14199 if (FDI->hasAttr<PackedAttr>() || 14200 FDI->getParent()->hasAttr<PackedAttr>()) { 14201 FD = FDI; 14202 Alignment = std::min( 14203 Context.getTypeAlignInChars(FD->getType()), 14204 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 14205 break; 14206 } 14207 } 14208 assert(FD && "We did not find a packed FieldDecl!"); 14209 Action(E, FD->getParent(), FD, Alignment); 14210 } 14211 } 14212 14213 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 14214 using namespace std::placeholders; 14215 14216 RefersToMemberWithReducedAlignment( 14217 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 14218 _2, _3, _4)); 14219 } 14220