1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/Stmt.h" 34 #include "clang/AST/TemplateBase.h" 35 #include "clang/AST/Type.h" 36 #include "clang/AST/TypeLoc.h" 37 #include "clang/AST/UnresolvedSet.h" 38 #include "clang/Basic/AddressSpaces.h" 39 #include "clang/Basic/CharInfo.h" 40 #include "clang/Basic/Diagnostic.h" 41 #include "clang/Basic/IdentifierTable.h" 42 #include "clang/Basic/LLVM.h" 43 #include "clang/Basic/LangOptions.h" 44 #include "clang/Basic/OpenCLOptions.h" 45 #include "clang/Basic/OperatorKinds.h" 46 #include "clang/Basic/PartialDiagnostic.h" 47 #include "clang/Basic/SourceLocation.h" 48 #include "clang/Basic/SourceManager.h" 49 #include "clang/Basic/Specifiers.h" 50 #include "clang/Basic/SyncScope.h" 51 #include "clang/Basic/TargetBuiltins.h" 52 #include "clang/Basic/TargetCXXABI.h" 53 #include "clang/Basic/TargetInfo.h" 54 #include "clang/Basic/TypeTraits.h" 55 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 56 #include "clang/Sema/Initialization.h" 57 #include "clang/Sema/Lookup.h" 58 #include "clang/Sema/Ownership.h" 59 #include "clang/Sema/Scope.h" 60 #include "clang/Sema/ScopeInfo.h" 61 #include "clang/Sema/Sema.h" 62 #include "clang/Sema/SemaInternal.h" 63 #include "llvm/ADT/APFloat.h" 64 #include "llvm/ADT/APInt.h" 65 #include "llvm/ADT/APSInt.h" 66 #include "llvm/ADT/ArrayRef.h" 67 #include "llvm/ADT/DenseMap.h" 68 #include "llvm/ADT/FoldingSet.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallBitVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallString.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/StringSwitch.h" 78 #include "llvm/ADT/Triple.h" 79 #include "llvm/Support/AtomicOrdering.h" 80 #include "llvm/Support/Casting.h" 81 #include "llvm/Support/Compiler.h" 82 #include "llvm/Support/ConvertUTF.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/Format.h" 85 #include "llvm/Support/Locale.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/SaveAndRestore.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include <algorithm> 90 #include <cassert> 91 #include <cstddef> 92 #include <cstdint> 93 #include <functional> 94 #include <limits> 95 #include <string> 96 #include <tuple> 97 #include <utility> 98 99 using namespace clang; 100 using namespace sema; 101 102 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 103 unsigned ByteNo) const { 104 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 105 Context.getTargetInfo()); 106 } 107 108 /// Checks that a call expression's argument count is the desired number. 109 /// This is useful when doing custom type-checking. Returns true on error. 110 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 111 unsigned argCount = call->getNumArgs(); 112 if (argCount == desiredArgCount) return false; 113 114 if (argCount < desiredArgCount) 115 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 116 << 0 /*function call*/ << desiredArgCount << argCount 117 << call->getSourceRange(); 118 119 // Highlight all the excess arguments. 120 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 121 call->getArg(argCount - 1)->getEndLoc()); 122 123 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 124 << 0 /*function call*/ << desiredArgCount << argCount 125 << call->getArg(1)->getSourceRange(); 126 } 127 128 /// Check that the first argument to __builtin_annotation is an integer 129 /// and the second argument is a non-wide string literal. 130 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 131 if (checkArgCount(S, TheCall, 2)) 132 return true; 133 134 // First argument should be an integer. 135 Expr *ValArg = TheCall->getArg(0); 136 QualType Ty = ValArg->getType(); 137 if (!Ty->isIntegerType()) { 138 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 139 << ValArg->getSourceRange(); 140 return true; 141 } 142 143 // Second argument should be a constant string. 144 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 145 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 146 if (!Literal || !Literal->isAscii()) { 147 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 148 << StrArg->getSourceRange(); 149 return true; 150 } 151 152 TheCall->setType(Ty); 153 return false; 154 } 155 156 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 157 // We need at least one argument. 158 if (TheCall->getNumArgs() < 1) { 159 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 160 << 0 << 1 << TheCall->getNumArgs() 161 << TheCall->getCallee()->getSourceRange(); 162 return true; 163 } 164 165 // All arguments should be wide string literals. 166 for (Expr *Arg : TheCall->arguments()) { 167 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 168 if (!Literal || !Literal->isWide()) { 169 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 170 << Arg->getSourceRange(); 171 return true; 172 } 173 } 174 175 return false; 176 } 177 178 /// Check that the argument to __builtin_addressof is a glvalue, and set the 179 /// result type to the corresponding pointer type. 180 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 181 if (checkArgCount(S, TheCall, 1)) 182 return true; 183 184 ExprResult Arg(TheCall->getArg(0)); 185 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 186 if (ResultType.isNull()) 187 return true; 188 189 TheCall->setArg(0, Arg.get()); 190 TheCall->setType(ResultType); 191 return false; 192 } 193 194 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) { 195 if (checkArgCount(S, TheCall, 3)) 196 return true; 197 198 // First two arguments should be integers. 199 for (unsigned I = 0; I < 2; ++I) { 200 ExprResult Arg = TheCall->getArg(I); 201 QualType Ty = Arg.get()->getType(); 202 if (!Ty->isIntegerType()) { 203 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 204 << Ty << Arg.get()->getSourceRange(); 205 return true; 206 } 207 InitializedEntity Entity = InitializedEntity::InitializeParameter( 208 S.getASTContext(), Ty, /*consume*/ false); 209 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 210 if (Arg.isInvalid()) 211 return true; 212 TheCall->setArg(I, Arg.get()); 213 } 214 215 // Third argument should be a pointer to a non-const integer. 216 // IRGen correctly handles volatile, restrict, and address spaces, and 217 // the other qualifiers aren't possible. 218 { 219 ExprResult Arg = TheCall->getArg(2); 220 QualType Ty = Arg.get()->getType(); 221 const auto *PtrTy = Ty->getAs<PointerType>(); 222 if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() && 223 !PtrTy->getPointeeType().isConstQualified())) { 224 S.Diag(Arg.get()->getBeginLoc(), 225 diag::err_overflow_builtin_must_be_ptr_int) 226 << Ty << Arg.get()->getSourceRange(); 227 return true; 228 } 229 InitializedEntity Entity = InitializedEntity::InitializeParameter( 230 S.getASTContext(), Ty, /*consume*/ false); 231 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 232 if (Arg.isInvalid()) 233 return true; 234 TheCall->setArg(2, Arg.get()); 235 } 236 return false; 237 } 238 239 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 240 if (checkArgCount(S, BuiltinCall, 2)) 241 return true; 242 243 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 244 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 245 Expr *Call = BuiltinCall->getArg(0); 246 Expr *Chain = BuiltinCall->getArg(1); 247 248 if (Call->getStmtClass() != Stmt::CallExprClass) { 249 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 250 << Call->getSourceRange(); 251 return true; 252 } 253 254 auto CE = cast<CallExpr>(Call); 255 if (CE->getCallee()->getType()->isBlockPointerType()) { 256 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 257 << Call->getSourceRange(); 258 return true; 259 } 260 261 const Decl *TargetDecl = CE->getCalleeDecl(); 262 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 263 if (FD->getBuiltinID()) { 264 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 265 << Call->getSourceRange(); 266 return true; 267 } 268 269 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 270 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 271 << Call->getSourceRange(); 272 return true; 273 } 274 275 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 276 if (ChainResult.isInvalid()) 277 return true; 278 if (!ChainResult.get()->getType()->isPointerType()) { 279 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 280 << Chain->getSourceRange(); 281 return true; 282 } 283 284 QualType ReturnTy = CE->getCallReturnType(S.Context); 285 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 286 QualType BuiltinTy = S.Context.getFunctionType( 287 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 288 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 289 290 Builtin = 291 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 292 293 BuiltinCall->setType(CE->getType()); 294 BuiltinCall->setValueKind(CE->getValueKind()); 295 BuiltinCall->setObjectKind(CE->getObjectKind()); 296 BuiltinCall->setCallee(Builtin); 297 BuiltinCall->setArg(1, ChainResult.get()); 298 299 return false; 300 } 301 302 /// Check a call to BuiltinID for buffer overflows. If BuiltinID is a 303 /// __builtin_*_chk function, then use the object size argument specified in the 304 /// source. Otherwise, infer the object size using __builtin_object_size. 305 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 306 CallExpr *TheCall) { 307 // FIXME: There are some more useful checks we could be doing here: 308 // - Analyze the format string of sprintf to see how much of buffer is used. 309 // - Evaluate strlen of strcpy arguments, use as object size. 310 311 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 312 isConstantEvaluated()) 313 return; 314 315 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true); 316 if (!BuiltinID) 317 return; 318 319 unsigned DiagID = 0; 320 bool IsChkVariant = false; 321 unsigned SizeIndex, ObjectIndex; 322 switch (BuiltinID) { 323 default: 324 return; 325 case Builtin::BI__builtin___memcpy_chk: 326 case Builtin::BI__builtin___memmove_chk: 327 case Builtin::BI__builtin___memset_chk: 328 case Builtin::BI__builtin___strlcat_chk: 329 case Builtin::BI__builtin___strlcpy_chk: 330 case Builtin::BI__builtin___strncat_chk: 331 case Builtin::BI__builtin___strncpy_chk: 332 case Builtin::BI__builtin___stpncpy_chk: 333 case Builtin::BI__builtin___memccpy_chk: { 334 DiagID = diag::warn_builtin_chk_overflow; 335 IsChkVariant = true; 336 SizeIndex = TheCall->getNumArgs() - 2; 337 ObjectIndex = TheCall->getNumArgs() - 1; 338 break; 339 } 340 341 case Builtin::BI__builtin___snprintf_chk: 342 case Builtin::BI__builtin___vsnprintf_chk: { 343 DiagID = diag::warn_builtin_chk_overflow; 344 IsChkVariant = true; 345 SizeIndex = 1; 346 ObjectIndex = 3; 347 break; 348 } 349 350 case Builtin::BIstrncat: 351 case Builtin::BI__builtin_strncat: 352 case Builtin::BIstrncpy: 353 case Builtin::BI__builtin_strncpy: 354 case Builtin::BIstpncpy: 355 case Builtin::BI__builtin_stpncpy: { 356 // Whether these functions overflow depends on the runtime strlen of the 357 // string, not just the buffer size, so emitting the "always overflow" 358 // diagnostic isn't quite right. We should still diagnose passing a buffer 359 // size larger than the destination buffer though; this is a runtime abort 360 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 361 DiagID = diag::warn_fortify_source_size_mismatch; 362 SizeIndex = TheCall->getNumArgs() - 1; 363 ObjectIndex = 0; 364 break; 365 } 366 367 case Builtin::BImemcpy: 368 case Builtin::BI__builtin_memcpy: 369 case Builtin::BImemmove: 370 case Builtin::BI__builtin_memmove: 371 case Builtin::BImemset: 372 case Builtin::BI__builtin_memset: { 373 DiagID = diag::warn_fortify_source_overflow; 374 SizeIndex = TheCall->getNumArgs() - 1; 375 ObjectIndex = 0; 376 break; 377 } 378 case Builtin::BIsnprintf: 379 case Builtin::BI__builtin_snprintf: 380 case Builtin::BIvsnprintf: 381 case Builtin::BI__builtin_vsnprintf: { 382 DiagID = diag::warn_fortify_source_size_mismatch; 383 SizeIndex = 1; 384 ObjectIndex = 0; 385 break; 386 } 387 } 388 389 llvm::APSInt ObjectSize; 390 // For __builtin___*_chk, the object size is explicitly provided by the caller 391 // (usually using __builtin_object_size). Use that value to check this call. 392 if (IsChkVariant) { 393 Expr::EvalResult Result; 394 Expr *SizeArg = TheCall->getArg(ObjectIndex); 395 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 396 return; 397 ObjectSize = Result.Val.getInt(); 398 399 // Otherwise, try to evaluate an imaginary call to __builtin_object_size. 400 } else { 401 // If the parameter has a pass_object_size attribute, then we should use its 402 // (potentially) more strict checking mode. Otherwise, conservatively assume 403 // type 0. 404 int BOSType = 0; 405 if (const auto *POS = 406 FD->getParamDecl(ObjectIndex)->getAttr<PassObjectSizeAttr>()) 407 BOSType = POS->getType(); 408 409 Expr *ObjArg = TheCall->getArg(ObjectIndex); 410 uint64_t Result; 411 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 412 return; 413 // Get the object size in the target's size_t width. 414 const TargetInfo &TI = getASTContext().getTargetInfo(); 415 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 416 ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 417 } 418 419 // Evaluate the number of bytes of the object that this call will use. 420 Expr::EvalResult Result; 421 Expr *UsedSizeArg = TheCall->getArg(SizeIndex); 422 if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext())) 423 return; 424 llvm::APSInt UsedSize = Result.Val.getInt(); 425 426 if (UsedSize.ule(ObjectSize)) 427 return; 428 429 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 430 // Skim off the details of whichever builtin was called to produce a better 431 // diagnostic, as it's unlikley that the user wrote the __builtin explicitly. 432 if (IsChkVariant) { 433 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 434 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 435 } else if (FunctionName.startswith("__builtin_")) { 436 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 437 } 438 439 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 440 PDiag(DiagID) 441 << FunctionName << ObjectSize.toString(/*Radix=*/10) 442 << UsedSize.toString(/*Radix=*/10)); 443 } 444 445 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 446 Scope::ScopeFlags NeededScopeFlags, 447 unsigned DiagID) { 448 // Scopes aren't available during instantiation. Fortunately, builtin 449 // functions cannot be template args so they cannot be formed through template 450 // instantiation. Therefore checking once during the parse is sufficient. 451 if (SemaRef.inTemplateInstantiation()) 452 return false; 453 454 Scope *S = SemaRef.getCurScope(); 455 while (S && !S->isSEHExceptScope()) 456 S = S->getParent(); 457 if (!S || !(S->getFlags() & NeededScopeFlags)) { 458 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 459 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 460 << DRE->getDecl()->getIdentifier(); 461 return true; 462 } 463 464 return false; 465 } 466 467 static inline bool isBlockPointer(Expr *Arg) { 468 return Arg->getType()->isBlockPointerType(); 469 } 470 471 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 472 /// void*, which is a requirement of device side enqueue. 473 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 474 const BlockPointerType *BPT = 475 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 476 ArrayRef<QualType> Params = 477 BPT->getPointeeType()->getAs<FunctionProtoType>()->getParamTypes(); 478 unsigned ArgCounter = 0; 479 bool IllegalParams = false; 480 // Iterate through the block parameters until either one is found that is not 481 // a local void*, or the block is valid. 482 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 483 I != E; ++I, ++ArgCounter) { 484 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 485 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 486 LangAS::opencl_local) { 487 // Get the location of the error. If a block literal has been passed 488 // (BlockExpr) then we can point straight to the offending argument, 489 // else we just point to the variable reference. 490 SourceLocation ErrorLoc; 491 if (isa<BlockExpr>(BlockArg)) { 492 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 493 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 494 } else if (isa<DeclRefExpr>(BlockArg)) { 495 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 496 } 497 S.Diag(ErrorLoc, 498 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 499 IllegalParams = true; 500 } 501 } 502 503 return IllegalParams; 504 } 505 506 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 507 if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) { 508 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 509 << 1 << Call->getDirectCallee() << "cl_khr_subgroups"; 510 return true; 511 } 512 return false; 513 } 514 515 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 516 if (checkArgCount(S, TheCall, 2)) 517 return true; 518 519 if (checkOpenCLSubgroupExt(S, TheCall)) 520 return true; 521 522 // First argument is an ndrange_t type. 523 Expr *NDRangeArg = TheCall->getArg(0); 524 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 525 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 526 << TheCall->getDirectCallee() << "'ndrange_t'"; 527 return true; 528 } 529 530 Expr *BlockArg = TheCall->getArg(1); 531 if (!isBlockPointer(BlockArg)) { 532 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 533 << TheCall->getDirectCallee() << "block"; 534 return true; 535 } 536 return checkOpenCLBlockArgs(S, BlockArg); 537 } 538 539 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 540 /// get_kernel_work_group_size 541 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 542 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 543 if (checkArgCount(S, TheCall, 1)) 544 return true; 545 546 Expr *BlockArg = TheCall->getArg(0); 547 if (!isBlockPointer(BlockArg)) { 548 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 549 << TheCall->getDirectCallee() << "block"; 550 return true; 551 } 552 return checkOpenCLBlockArgs(S, BlockArg); 553 } 554 555 /// Diagnose integer type and any valid implicit conversion to it. 556 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 557 const QualType &IntType); 558 559 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 560 unsigned Start, unsigned End) { 561 bool IllegalParams = false; 562 for (unsigned I = Start; I <= End; ++I) 563 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 564 S.Context.getSizeType()); 565 return IllegalParams; 566 } 567 568 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 569 /// 'local void*' parameter of passed block. 570 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 571 Expr *BlockArg, 572 unsigned NumNonVarArgs) { 573 const BlockPointerType *BPT = 574 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 575 unsigned NumBlockParams = 576 BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams(); 577 unsigned TotalNumArgs = TheCall->getNumArgs(); 578 579 // For each argument passed to the block, a corresponding uint needs to 580 // be passed to describe the size of the local memory. 581 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 582 S.Diag(TheCall->getBeginLoc(), 583 diag::err_opencl_enqueue_kernel_local_size_args); 584 return true; 585 } 586 587 // Check that the sizes of the local memory are specified by integers. 588 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 589 TotalNumArgs - 1); 590 } 591 592 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 593 /// overload formats specified in Table 6.13.17.1. 594 /// int enqueue_kernel(queue_t queue, 595 /// kernel_enqueue_flags_t flags, 596 /// const ndrange_t ndrange, 597 /// void (^block)(void)) 598 /// int enqueue_kernel(queue_t queue, 599 /// kernel_enqueue_flags_t flags, 600 /// const ndrange_t ndrange, 601 /// uint num_events_in_wait_list, 602 /// clk_event_t *event_wait_list, 603 /// clk_event_t *event_ret, 604 /// void (^block)(void)) 605 /// int enqueue_kernel(queue_t queue, 606 /// kernel_enqueue_flags_t flags, 607 /// const ndrange_t ndrange, 608 /// void (^block)(local void*, ...), 609 /// uint size0, ...) 610 /// int enqueue_kernel(queue_t queue, 611 /// kernel_enqueue_flags_t flags, 612 /// const ndrange_t ndrange, 613 /// uint num_events_in_wait_list, 614 /// clk_event_t *event_wait_list, 615 /// clk_event_t *event_ret, 616 /// void (^block)(local void*, ...), 617 /// uint size0, ...) 618 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 619 unsigned NumArgs = TheCall->getNumArgs(); 620 621 if (NumArgs < 4) { 622 S.Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_few_args); 623 return true; 624 } 625 626 Expr *Arg0 = TheCall->getArg(0); 627 Expr *Arg1 = TheCall->getArg(1); 628 Expr *Arg2 = TheCall->getArg(2); 629 Expr *Arg3 = TheCall->getArg(3); 630 631 // First argument always needs to be a queue_t type. 632 if (!Arg0->getType()->isQueueT()) { 633 S.Diag(TheCall->getArg(0)->getBeginLoc(), 634 diag::err_opencl_builtin_expected_type) 635 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 636 return true; 637 } 638 639 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 640 if (!Arg1->getType()->isIntegerType()) { 641 S.Diag(TheCall->getArg(1)->getBeginLoc(), 642 diag::err_opencl_builtin_expected_type) 643 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 644 return true; 645 } 646 647 // Third argument is always an ndrange_t type. 648 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 649 S.Diag(TheCall->getArg(2)->getBeginLoc(), 650 diag::err_opencl_builtin_expected_type) 651 << TheCall->getDirectCallee() << "'ndrange_t'"; 652 return true; 653 } 654 655 // With four arguments, there is only one form that the function could be 656 // called in: no events and no variable arguments. 657 if (NumArgs == 4) { 658 // check that the last argument is the right block type. 659 if (!isBlockPointer(Arg3)) { 660 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 661 << TheCall->getDirectCallee() << "block"; 662 return true; 663 } 664 // we have a block type, check the prototype 665 const BlockPointerType *BPT = 666 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 667 if (BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams() > 0) { 668 S.Diag(Arg3->getBeginLoc(), 669 diag::err_opencl_enqueue_kernel_blocks_no_args); 670 return true; 671 } 672 return false; 673 } 674 // we can have block + varargs. 675 if (isBlockPointer(Arg3)) 676 return (checkOpenCLBlockArgs(S, Arg3) || 677 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 678 // last two cases with either exactly 7 args or 7 args and varargs. 679 if (NumArgs >= 7) { 680 // check common block argument. 681 Expr *Arg6 = TheCall->getArg(6); 682 if (!isBlockPointer(Arg6)) { 683 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 684 << TheCall->getDirectCallee() << "block"; 685 return true; 686 } 687 if (checkOpenCLBlockArgs(S, Arg6)) 688 return true; 689 690 // Forth argument has to be any integer type. 691 if (!Arg3->getType()->isIntegerType()) { 692 S.Diag(TheCall->getArg(3)->getBeginLoc(), 693 diag::err_opencl_builtin_expected_type) 694 << TheCall->getDirectCallee() << "integer"; 695 return true; 696 } 697 // check remaining common arguments. 698 Expr *Arg4 = TheCall->getArg(4); 699 Expr *Arg5 = TheCall->getArg(5); 700 701 // Fifth argument is always passed as a pointer to clk_event_t. 702 if (!Arg4->isNullPointerConstant(S.Context, 703 Expr::NPC_ValueDependentIsNotNull) && 704 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 705 S.Diag(TheCall->getArg(4)->getBeginLoc(), 706 diag::err_opencl_builtin_expected_type) 707 << TheCall->getDirectCallee() 708 << S.Context.getPointerType(S.Context.OCLClkEventTy); 709 return true; 710 } 711 712 // Sixth argument is always passed as a pointer to clk_event_t. 713 if (!Arg5->isNullPointerConstant(S.Context, 714 Expr::NPC_ValueDependentIsNotNull) && 715 !(Arg5->getType()->isPointerType() && 716 Arg5->getType()->getPointeeType()->isClkEventT())) { 717 S.Diag(TheCall->getArg(5)->getBeginLoc(), 718 diag::err_opencl_builtin_expected_type) 719 << TheCall->getDirectCallee() 720 << S.Context.getPointerType(S.Context.OCLClkEventTy); 721 return true; 722 } 723 724 if (NumArgs == 7) 725 return false; 726 727 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 728 } 729 730 // None of the specific case has been detected, give generic error 731 S.Diag(TheCall->getBeginLoc(), 732 diag::err_opencl_enqueue_kernel_incorrect_args); 733 return true; 734 } 735 736 /// Returns OpenCL access qual. 737 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 738 return D->getAttr<OpenCLAccessAttr>(); 739 } 740 741 /// Returns true if pipe element type is different from the pointer. 742 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 743 const Expr *Arg0 = Call->getArg(0); 744 // First argument type should always be pipe. 745 if (!Arg0->getType()->isPipeType()) { 746 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 747 << Call->getDirectCallee() << Arg0->getSourceRange(); 748 return true; 749 } 750 OpenCLAccessAttr *AccessQual = 751 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 752 // Validates the access qualifier is compatible with the call. 753 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 754 // read_only and write_only, and assumed to be read_only if no qualifier is 755 // specified. 756 switch (Call->getDirectCallee()->getBuiltinID()) { 757 case Builtin::BIread_pipe: 758 case Builtin::BIreserve_read_pipe: 759 case Builtin::BIcommit_read_pipe: 760 case Builtin::BIwork_group_reserve_read_pipe: 761 case Builtin::BIsub_group_reserve_read_pipe: 762 case Builtin::BIwork_group_commit_read_pipe: 763 case Builtin::BIsub_group_commit_read_pipe: 764 if (!(!AccessQual || AccessQual->isReadOnly())) { 765 S.Diag(Arg0->getBeginLoc(), 766 diag::err_opencl_builtin_pipe_invalid_access_modifier) 767 << "read_only" << Arg0->getSourceRange(); 768 return true; 769 } 770 break; 771 case Builtin::BIwrite_pipe: 772 case Builtin::BIreserve_write_pipe: 773 case Builtin::BIcommit_write_pipe: 774 case Builtin::BIwork_group_reserve_write_pipe: 775 case Builtin::BIsub_group_reserve_write_pipe: 776 case Builtin::BIwork_group_commit_write_pipe: 777 case Builtin::BIsub_group_commit_write_pipe: 778 if (!(AccessQual && AccessQual->isWriteOnly())) { 779 S.Diag(Arg0->getBeginLoc(), 780 diag::err_opencl_builtin_pipe_invalid_access_modifier) 781 << "write_only" << Arg0->getSourceRange(); 782 return true; 783 } 784 break; 785 default: 786 break; 787 } 788 return false; 789 } 790 791 /// Returns true if pipe element type is different from the pointer. 792 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 793 const Expr *Arg0 = Call->getArg(0); 794 const Expr *ArgIdx = Call->getArg(Idx); 795 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 796 const QualType EltTy = PipeTy->getElementType(); 797 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 798 // The Idx argument should be a pointer and the type of the pointer and 799 // the type of pipe element should also be the same. 800 if (!ArgTy || 801 !S.Context.hasSameType( 802 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 803 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 804 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 805 << ArgIdx->getType() << ArgIdx->getSourceRange(); 806 return true; 807 } 808 return false; 809 } 810 811 // Performs semantic analysis for the read/write_pipe call. 812 // \param S Reference to the semantic analyzer. 813 // \param Call A pointer to the builtin call. 814 // \return True if a semantic error has been found, false otherwise. 815 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 816 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 817 // functions have two forms. 818 switch (Call->getNumArgs()) { 819 case 2: 820 if (checkOpenCLPipeArg(S, Call)) 821 return true; 822 // The call with 2 arguments should be 823 // read/write_pipe(pipe T, T*). 824 // Check packet type T. 825 if (checkOpenCLPipePacketType(S, Call, 1)) 826 return true; 827 break; 828 829 case 4: { 830 if (checkOpenCLPipeArg(S, Call)) 831 return true; 832 // The call with 4 arguments should be 833 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 834 // Check reserve_id_t. 835 if (!Call->getArg(1)->getType()->isReserveIDT()) { 836 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 837 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 838 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 839 return true; 840 } 841 842 // Check the index. 843 const Expr *Arg2 = Call->getArg(2); 844 if (!Arg2->getType()->isIntegerType() && 845 !Arg2->getType()->isUnsignedIntegerType()) { 846 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 847 << Call->getDirectCallee() << S.Context.UnsignedIntTy 848 << Arg2->getType() << Arg2->getSourceRange(); 849 return true; 850 } 851 852 // Check packet type T. 853 if (checkOpenCLPipePacketType(S, Call, 3)) 854 return true; 855 } break; 856 default: 857 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 858 << Call->getDirectCallee() << Call->getSourceRange(); 859 return true; 860 } 861 862 return false; 863 } 864 865 // Performs a semantic analysis on the {work_group_/sub_group_ 866 // /_}reserve_{read/write}_pipe 867 // \param S Reference to the semantic analyzer. 868 // \param Call The call to the builtin function to be analyzed. 869 // \return True if a semantic error was found, false otherwise. 870 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 871 if (checkArgCount(S, Call, 2)) 872 return true; 873 874 if (checkOpenCLPipeArg(S, Call)) 875 return true; 876 877 // Check the reserve size. 878 if (!Call->getArg(1)->getType()->isIntegerType() && 879 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 880 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 881 << Call->getDirectCallee() << S.Context.UnsignedIntTy 882 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 883 return true; 884 } 885 886 // Since return type of reserve_read/write_pipe built-in function is 887 // reserve_id_t, which is not defined in the builtin def file , we used int 888 // as return type and need to override the return type of these functions. 889 Call->setType(S.Context.OCLReserveIDTy); 890 891 return false; 892 } 893 894 // Performs a semantic analysis on {work_group_/sub_group_ 895 // /_}commit_{read/write}_pipe 896 // \param S Reference to the semantic analyzer. 897 // \param Call The call to the builtin function to be analyzed. 898 // \return True if a semantic error was found, false otherwise. 899 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 900 if (checkArgCount(S, Call, 2)) 901 return true; 902 903 if (checkOpenCLPipeArg(S, Call)) 904 return true; 905 906 // Check reserve_id_t. 907 if (!Call->getArg(1)->getType()->isReserveIDT()) { 908 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 909 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 910 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 911 return true; 912 } 913 914 return false; 915 } 916 917 // Performs a semantic analysis on the call to built-in Pipe 918 // Query Functions. 919 // \param S Reference to the semantic analyzer. 920 // \param Call The call to the builtin function to be analyzed. 921 // \return True if a semantic error was found, false otherwise. 922 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 923 if (checkArgCount(S, Call, 1)) 924 return true; 925 926 if (!Call->getArg(0)->getType()->isPipeType()) { 927 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 928 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 929 return true; 930 } 931 932 return false; 933 } 934 935 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 936 // Performs semantic analysis for the to_global/local/private call. 937 // \param S Reference to the semantic analyzer. 938 // \param BuiltinID ID of the builtin function. 939 // \param Call A pointer to the builtin call. 940 // \return True if a semantic error has been found, false otherwise. 941 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 942 CallExpr *Call) { 943 if (Call->getNumArgs() != 1) { 944 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_arg_num) 945 << Call->getDirectCallee() << Call->getSourceRange(); 946 return true; 947 } 948 949 auto RT = Call->getArg(0)->getType(); 950 if (!RT->isPointerType() || RT->getPointeeType() 951 .getAddressSpace() == LangAS::opencl_constant) { 952 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 953 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 954 return true; 955 } 956 957 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 958 S.Diag(Call->getArg(0)->getBeginLoc(), 959 diag::warn_opencl_generic_address_space_arg) 960 << Call->getDirectCallee()->getNameInfo().getAsString() 961 << Call->getArg(0)->getSourceRange(); 962 } 963 964 RT = RT->getPointeeType(); 965 auto Qual = RT.getQualifiers(); 966 switch (BuiltinID) { 967 case Builtin::BIto_global: 968 Qual.setAddressSpace(LangAS::opencl_global); 969 break; 970 case Builtin::BIto_local: 971 Qual.setAddressSpace(LangAS::opencl_local); 972 break; 973 case Builtin::BIto_private: 974 Qual.setAddressSpace(LangAS::opencl_private); 975 break; 976 default: 977 llvm_unreachable("Invalid builtin function"); 978 } 979 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 980 RT.getUnqualifiedType(), Qual))); 981 982 return false; 983 } 984 985 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 986 if (checkArgCount(S, TheCall, 1)) 987 return ExprError(); 988 989 // Compute __builtin_launder's parameter type from the argument. 990 // The parameter type is: 991 // * The type of the argument if it's not an array or function type, 992 // Otherwise, 993 // * The decayed argument type. 994 QualType ParamTy = [&]() { 995 QualType ArgTy = TheCall->getArg(0)->getType(); 996 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 997 return S.Context.getPointerType(Ty->getElementType()); 998 if (ArgTy->isFunctionType()) { 999 return S.Context.getPointerType(ArgTy); 1000 } 1001 return ArgTy; 1002 }(); 1003 1004 TheCall->setType(ParamTy); 1005 1006 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1007 if (!ParamTy->isPointerType()) 1008 return 0; 1009 if (ParamTy->isFunctionPointerType()) 1010 return 1; 1011 if (ParamTy->isVoidPointerType()) 1012 return 2; 1013 return llvm::Optional<unsigned>{}; 1014 }(); 1015 if (DiagSelect.hasValue()) { 1016 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1017 << DiagSelect.getValue() << TheCall->getSourceRange(); 1018 return ExprError(); 1019 } 1020 1021 // We either have an incomplete class type, or we have a class template 1022 // whose instantiation has not been forced. Example: 1023 // 1024 // template <class T> struct Foo { T value; }; 1025 // Foo<int> *p = nullptr; 1026 // auto *d = __builtin_launder(p); 1027 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1028 diag::err_incomplete_type)) 1029 return ExprError(); 1030 1031 assert(ParamTy->getPointeeType()->isObjectType() && 1032 "Unhandled non-object pointer case"); 1033 1034 InitializedEntity Entity = 1035 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1036 ExprResult Arg = 1037 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1038 if (Arg.isInvalid()) 1039 return ExprError(); 1040 TheCall->setArg(0, Arg.get()); 1041 1042 return TheCall; 1043 } 1044 1045 // Emit an error and return true if the current architecture is not in the list 1046 // of supported architectures. 1047 static bool 1048 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1049 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1050 llvm::Triple::ArchType CurArch = 1051 S.getASTContext().getTargetInfo().getTriple().getArch(); 1052 if (llvm::is_contained(SupportedArchs, CurArch)) 1053 return false; 1054 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1055 << TheCall->getSourceRange(); 1056 return true; 1057 } 1058 1059 ExprResult 1060 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1061 CallExpr *TheCall) { 1062 ExprResult TheCallResult(TheCall); 1063 1064 // Find out if any arguments are required to be integer constant expressions. 1065 unsigned ICEArguments = 0; 1066 ASTContext::GetBuiltinTypeError Error; 1067 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1068 if (Error != ASTContext::GE_None) 1069 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1070 1071 // If any arguments are required to be ICE's, check and diagnose. 1072 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1073 // Skip arguments not required to be ICE's. 1074 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1075 1076 llvm::APSInt Result; 1077 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 1078 return true; 1079 ICEArguments &= ~(1 << ArgNo); 1080 } 1081 1082 switch (BuiltinID) { 1083 case Builtin::BI__builtin___CFStringMakeConstantString: 1084 assert(TheCall->getNumArgs() == 1 && 1085 "Wrong # arguments to builtin CFStringMakeConstantString"); 1086 if (CheckObjCString(TheCall->getArg(0))) 1087 return ExprError(); 1088 break; 1089 case Builtin::BI__builtin_ms_va_start: 1090 case Builtin::BI__builtin_stdarg_start: 1091 case Builtin::BI__builtin_va_start: 1092 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1093 return ExprError(); 1094 break; 1095 case Builtin::BI__va_start: { 1096 switch (Context.getTargetInfo().getTriple().getArch()) { 1097 case llvm::Triple::aarch64: 1098 case llvm::Triple::arm: 1099 case llvm::Triple::thumb: 1100 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 1101 return ExprError(); 1102 break; 1103 default: 1104 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1105 return ExprError(); 1106 break; 1107 } 1108 break; 1109 } 1110 1111 // The acquire, release, and no fence variants are ARM and AArch64 only. 1112 case Builtin::BI_interlockedbittestandset_acq: 1113 case Builtin::BI_interlockedbittestandset_rel: 1114 case Builtin::BI_interlockedbittestandset_nf: 1115 case Builtin::BI_interlockedbittestandreset_acq: 1116 case Builtin::BI_interlockedbittestandreset_rel: 1117 case Builtin::BI_interlockedbittestandreset_nf: 1118 if (CheckBuiltinTargetSupport( 1119 *this, BuiltinID, TheCall, 1120 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 1121 return ExprError(); 1122 break; 1123 1124 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 1125 case Builtin::BI_bittest64: 1126 case Builtin::BI_bittestandcomplement64: 1127 case Builtin::BI_bittestandreset64: 1128 case Builtin::BI_bittestandset64: 1129 case Builtin::BI_interlockedbittestandreset64: 1130 case Builtin::BI_interlockedbittestandset64: 1131 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall, 1132 {llvm::Triple::x86_64, llvm::Triple::arm, 1133 llvm::Triple::thumb, llvm::Triple::aarch64})) 1134 return ExprError(); 1135 break; 1136 1137 case Builtin::BI__builtin_isgreater: 1138 case Builtin::BI__builtin_isgreaterequal: 1139 case Builtin::BI__builtin_isless: 1140 case Builtin::BI__builtin_islessequal: 1141 case Builtin::BI__builtin_islessgreater: 1142 case Builtin::BI__builtin_isunordered: 1143 if (SemaBuiltinUnorderedCompare(TheCall)) 1144 return ExprError(); 1145 break; 1146 case Builtin::BI__builtin_fpclassify: 1147 if (SemaBuiltinFPClassification(TheCall, 6)) 1148 return ExprError(); 1149 break; 1150 case Builtin::BI__builtin_isfinite: 1151 case Builtin::BI__builtin_isinf: 1152 case Builtin::BI__builtin_isinf_sign: 1153 case Builtin::BI__builtin_isnan: 1154 case Builtin::BI__builtin_isnormal: 1155 case Builtin::BI__builtin_signbit: 1156 case Builtin::BI__builtin_signbitf: 1157 case Builtin::BI__builtin_signbitl: 1158 if (SemaBuiltinFPClassification(TheCall, 1)) 1159 return ExprError(); 1160 break; 1161 case Builtin::BI__builtin_shufflevector: 1162 return SemaBuiltinShuffleVector(TheCall); 1163 // TheCall will be freed by the smart pointer here, but that's fine, since 1164 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1165 case Builtin::BI__builtin_prefetch: 1166 if (SemaBuiltinPrefetch(TheCall)) 1167 return ExprError(); 1168 break; 1169 case Builtin::BI__builtin_alloca_with_align: 1170 if (SemaBuiltinAllocaWithAlign(TheCall)) 1171 return ExprError(); 1172 break; 1173 case Builtin::BI__assume: 1174 case Builtin::BI__builtin_assume: 1175 if (SemaBuiltinAssume(TheCall)) 1176 return ExprError(); 1177 break; 1178 case Builtin::BI__builtin_assume_aligned: 1179 if (SemaBuiltinAssumeAligned(TheCall)) 1180 return ExprError(); 1181 break; 1182 case Builtin::BI__builtin_dynamic_object_size: 1183 case Builtin::BI__builtin_object_size: 1184 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1185 return ExprError(); 1186 break; 1187 case Builtin::BI__builtin_longjmp: 1188 if (SemaBuiltinLongjmp(TheCall)) 1189 return ExprError(); 1190 break; 1191 case Builtin::BI__builtin_setjmp: 1192 if (SemaBuiltinSetjmp(TheCall)) 1193 return ExprError(); 1194 break; 1195 case Builtin::BI_setjmp: 1196 case Builtin::BI_setjmpex: 1197 if (checkArgCount(*this, TheCall, 1)) 1198 return true; 1199 break; 1200 case Builtin::BI__builtin_classify_type: 1201 if (checkArgCount(*this, TheCall, 1)) return true; 1202 TheCall->setType(Context.IntTy); 1203 break; 1204 case Builtin::BI__builtin_constant_p: { 1205 if (checkArgCount(*this, TheCall, 1)) return true; 1206 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 1207 if (Arg.isInvalid()) return true; 1208 TheCall->setArg(0, Arg.get()); 1209 TheCall->setType(Context.IntTy); 1210 break; 1211 } 1212 case Builtin::BI__builtin_launder: 1213 return SemaBuiltinLaunder(*this, TheCall); 1214 case Builtin::BI__sync_fetch_and_add: 1215 case Builtin::BI__sync_fetch_and_add_1: 1216 case Builtin::BI__sync_fetch_and_add_2: 1217 case Builtin::BI__sync_fetch_and_add_4: 1218 case Builtin::BI__sync_fetch_and_add_8: 1219 case Builtin::BI__sync_fetch_and_add_16: 1220 case Builtin::BI__sync_fetch_and_sub: 1221 case Builtin::BI__sync_fetch_and_sub_1: 1222 case Builtin::BI__sync_fetch_and_sub_2: 1223 case Builtin::BI__sync_fetch_and_sub_4: 1224 case Builtin::BI__sync_fetch_and_sub_8: 1225 case Builtin::BI__sync_fetch_and_sub_16: 1226 case Builtin::BI__sync_fetch_and_or: 1227 case Builtin::BI__sync_fetch_and_or_1: 1228 case Builtin::BI__sync_fetch_and_or_2: 1229 case Builtin::BI__sync_fetch_and_or_4: 1230 case Builtin::BI__sync_fetch_and_or_8: 1231 case Builtin::BI__sync_fetch_and_or_16: 1232 case Builtin::BI__sync_fetch_and_and: 1233 case Builtin::BI__sync_fetch_and_and_1: 1234 case Builtin::BI__sync_fetch_and_and_2: 1235 case Builtin::BI__sync_fetch_and_and_4: 1236 case Builtin::BI__sync_fetch_and_and_8: 1237 case Builtin::BI__sync_fetch_and_and_16: 1238 case Builtin::BI__sync_fetch_and_xor: 1239 case Builtin::BI__sync_fetch_and_xor_1: 1240 case Builtin::BI__sync_fetch_and_xor_2: 1241 case Builtin::BI__sync_fetch_and_xor_4: 1242 case Builtin::BI__sync_fetch_and_xor_8: 1243 case Builtin::BI__sync_fetch_and_xor_16: 1244 case Builtin::BI__sync_fetch_and_nand: 1245 case Builtin::BI__sync_fetch_and_nand_1: 1246 case Builtin::BI__sync_fetch_and_nand_2: 1247 case Builtin::BI__sync_fetch_and_nand_4: 1248 case Builtin::BI__sync_fetch_and_nand_8: 1249 case Builtin::BI__sync_fetch_and_nand_16: 1250 case Builtin::BI__sync_add_and_fetch: 1251 case Builtin::BI__sync_add_and_fetch_1: 1252 case Builtin::BI__sync_add_and_fetch_2: 1253 case Builtin::BI__sync_add_and_fetch_4: 1254 case Builtin::BI__sync_add_and_fetch_8: 1255 case Builtin::BI__sync_add_and_fetch_16: 1256 case Builtin::BI__sync_sub_and_fetch: 1257 case Builtin::BI__sync_sub_and_fetch_1: 1258 case Builtin::BI__sync_sub_and_fetch_2: 1259 case Builtin::BI__sync_sub_and_fetch_4: 1260 case Builtin::BI__sync_sub_and_fetch_8: 1261 case Builtin::BI__sync_sub_and_fetch_16: 1262 case Builtin::BI__sync_and_and_fetch: 1263 case Builtin::BI__sync_and_and_fetch_1: 1264 case Builtin::BI__sync_and_and_fetch_2: 1265 case Builtin::BI__sync_and_and_fetch_4: 1266 case Builtin::BI__sync_and_and_fetch_8: 1267 case Builtin::BI__sync_and_and_fetch_16: 1268 case Builtin::BI__sync_or_and_fetch: 1269 case Builtin::BI__sync_or_and_fetch_1: 1270 case Builtin::BI__sync_or_and_fetch_2: 1271 case Builtin::BI__sync_or_and_fetch_4: 1272 case Builtin::BI__sync_or_and_fetch_8: 1273 case Builtin::BI__sync_or_and_fetch_16: 1274 case Builtin::BI__sync_xor_and_fetch: 1275 case Builtin::BI__sync_xor_and_fetch_1: 1276 case Builtin::BI__sync_xor_and_fetch_2: 1277 case Builtin::BI__sync_xor_and_fetch_4: 1278 case Builtin::BI__sync_xor_and_fetch_8: 1279 case Builtin::BI__sync_xor_and_fetch_16: 1280 case Builtin::BI__sync_nand_and_fetch: 1281 case Builtin::BI__sync_nand_and_fetch_1: 1282 case Builtin::BI__sync_nand_and_fetch_2: 1283 case Builtin::BI__sync_nand_and_fetch_4: 1284 case Builtin::BI__sync_nand_and_fetch_8: 1285 case Builtin::BI__sync_nand_and_fetch_16: 1286 case Builtin::BI__sync_val_compare_and_swap: 1287 case Builtin::BI__sync_val_compare_and_swap_1: 1288 case Builtin::BI__sync_val_compare_and_swap_2: 1289 case Builtin::BI__sync_val_compare_and_swap_4: 1290 case Builtin::BI__sync_val_compare_and_swap_8: 1291 case Builtin::BI__sync_val_compare_and_swap_16: 1292 case Builtin::BI__sync_bool_compare_and_swap: 1293 case Builtin::BI__sync_bool_compare_and_swap_1: 1294 case Builtin::BI__sync_bool_compare_and_swap_2: 1295 case Builtin::BI__sync_bool_compare_and_swap_4: 1296 case Builtin::BI__sync_bool_compare_and_swap_8: 1297 case Builtin::BI__sync_bool_compare_and_swap_16: 1298 case Builtin::BI__sync_lock_test_and_set: 1299 case Builtin::BI__sync_lock_test_and_set_1: 1300 case Builtin::BI__sync_lock_test_and_set_2: 1301 case Builtin::BI__sync_lock_test_and_set_4: 1302 case Builtin::BI__sync_lock_test_and_set_8: 1303 case Builtin::BI__sync_lock_test_and_set_16: 1304 case Builtin::BI__sync_lock_release: 1305 case Builtin::BI__sync_lock_release_1: 1306 case Builtin::BI__sync_lock_release_2: 1307 case Builtin::BI__sync_lock_release_4: 1308 case Builtin::BI__sync_lock_release_8: 1309 case Builtin::BI__sync_lock_release_16: 1310 case Builtin::BI__sync_swap: 1311 case Builtin::BI__sync_swap_1: 1312 case Builtin::BI__sync_swap_2: 1313 case Builtin::BI__sync_swap_4: 1314 case Builtin::BI__sync_swap_8: 1315 case Builtin::BI__sync_swap_16: 1316 return SemaBuiltinAtomicOverloaded(TheCallResult); 1317 case Builtin::BI__sync_synchronize: 1318 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1319 << TheCall->getCallee()->getSourceRange(); 1320 break; 1321 case Builtin::BI__builtin_nontemporal_load: 1322 case Builtin::BI__builtin_nontemporal_store: 1323 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1324 #define BUILTIN(ID, TYPE, ATTRS) 1325 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1326 case Builtin::BI##ID: \ 1327 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1328 #include "clang/Basic/Builtins.def" 1329 case Builtin::BI__annotation: 1330 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1331 return ExprError(); 1332 break; 1333 case Builtin::BI__builtin_annotation: 1334 if (SemaBuiltinAnnotation(*this, TheCall)) 1335 return ExprError(); 1336 break; 1337 case Builtin::BI__builtin_addressof: 1338 if (SemaBuiltinAddressof(*this, TheCall)) 1339 return ExprError(); 1340 break; 1341 case Builtin::BI__builtin_add_overflow: 1342 case Builtin::BI__builtin_sub_overflow: 1343 case Builtin::BI__builtin_mul_overflow: 1344 if (SemaBuiltinOverflow(*this, TheCall)) 1345 return ExprError(); 1346 break; 1347 case Builtin::BI__builtin_operator_new: 1348 case Builtin::BI__builtin_operator_delete: { 1349 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 1350 ExprResult Res = 1351 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 1352 if (Res.isInvalid()) 1353 CorrectDelayedTyposInExpr(TheCallResult.get()); 1354 return Res; 1355 } 1356 case Builtin::BI__builtin_dump_struct: { 1357 // We first want to ensure we are called with 2 arguments 1358 if (checkArgCount(*this, TheCall, 2)) 1359 return ExprError(); 1360 // Ensure that the first argument is of type 'struct XX *' 1361 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 1362 const QualType PtrArgType = PtrArg->getType(); 1363 if (!PtrArgType->isPointerType() || 1364 !PtrArgType->getPointeeType()->isRecordType()) { 1365 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1366 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 1367 << "structure pointer"; 1368 return ExprError(); 1369 } 1370 1371 // Ensure that the second argument is of type 'FunctionType' 1372 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 1373 const QualType FnPtrArgType = FnPtrArg->getType(); 1374 if (!FnPtrArgType->isPointerType()) { 1375 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1376 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1377 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1378 return ExprError(); 1379 } 1380 1381 const auto *FuncType = 1382 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 1383 1384 if (!FuncType) { 1385 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1386 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1387 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1388 return ExprError(); 1389 } 1390 1391 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 1392 if (!FT->getNumParams()) { 1393 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1394 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1395 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1396 return ExprError(); 1397 } 1398 QualType PT = FT->getParamType(0); 1399 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 1400 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 1401 !PT->getPointeeType().isConstQualified()) { 1402 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1403 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1404 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1405 return ExprError(); 1406 } 1407 } 1408 1409 TheCall->setType(Context.IntTy); 1410 break; 1411 } 1412 case Builtin::BI__builtin_call_with_static_chain: 1413 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 1414 return ExprError(); 1415 break; 1416 case Builtin::BI__exception_code: 1417 case Builtin::BI_exception_code: 1418 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 1419 diag::err_seh___except_block)) 1420 return ExprError(); 1421 break; 1422 case Builtin::BI__exception_info: 1423 case Builtin::BI_exception_info: 1424 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 1425 diag::err_seh___except_filter)) 1426 return ExprError(); 1427 break; 1428 case Builtin::BI__GetExceptionInfo: 1429 if (checkArgCount(*this, TheCall, 1)) 1430 return ExprError(); 1431 1432 if (CheckCXXThrowOperand( 1433 TheCall->getBeginLoc(), 1434 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 1435 TheCall)) 1436 return ExprError(); 1437 1438 TheCall->setType(Context.VoidPtrTy); 1439 break; 1440 // OpenCL v2.0, s6.13.16 - Pipe functions 1441 case Builtin::BIread_pipe: 1442 case Builtin::BIwrite_pipe: 1443 // Since those two functions are declared with var args, we need a semantic 1444 // check for the argument. 1445 if (SemaBuiltinRWPipe(*this, TheCall)) 1446 return ExprError(); 1447 break; 1448 case Builtin::BIreserve_read_pipe: 1449 case Builtin::BIreserve_write_pipe: 1450 case Builtin::BIwork_group_reserve_read_pipe: 1451 case Builtin::BIwork_group_reserve_write_pipe: 1452 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 1453 return ExprError(); 1454 break; 1455 case Builtin::BIsub_group_reserve_read_pipe: 1456 case Builtin::BIsub_group_reserve_write_pipe: 1457 if (checkOpenCLSubgroupExt(*this, TheCall) || 1458 SemaBuiltinReserveRWPipe(*this, TheCall)) 1459 return ExprError(); 1460 break; 1461 case Builtin::BIcommit_read_pipe: 1462 case Builtin::BIcommit_write_pipe: 1463 case Builtin::BIwork_group_commit_read_pipe: 1464 case Builtin::BIwork_group_commit_write_pipe: 1465 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 1466 return ExprError(); 1467 break; 1468 case Builtin::BIsub_group_commit_read_pipe: 1469 case Builtin::BIsub_group_commit_write_pipe: 1470 if (checkOpenCLSubgroupExt(*this, TheCall) || 1471 SemaBuiltinCommitRWPipe(*this, TheCall)) 1472 return ExprError(); 1473 break; 1474 case Builtin::BIget_pipe_num_packets: 1475 case Builtin::BIget_pipe_max_packets: 1476 if (SemaBuiltinPipePackets(*this, TheCall)) 1477 return ExprError(); 1478 break; 1479 case Builtin::BIto_global: 1480 case Builtin::BIto_local: 1481 case Builtin::BIto_private: 1482 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 1483 return ExprError(); 1484 break; 1485 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 1486 case Builtin::BIenqueue_kernel: 1487 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 1488 return ExprError(); 1489 break; 1490 case Builtin::BIget_kernel_work_group_size: 1491 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 1492 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 1493 return ExprError(); 1494 break; 1495 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 1496 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 1497 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 1498 return ExprError(); 1499 break; 1500 case Builtin::BI__builtin_os_log_format: 1501 case Builtin::BI__builtin_os_log_format_buffer_size: 1502 if (SemaBuiltinOSLogFormat(TheCall)) 1503 return ExprError(); 1504 break; 1505 } 1506 1507 // Since the target specific builtins for each arch overlap, only check those 1508 // of the arch we are compiling for. 1509 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 1510 switch (Context.getTargetInfo().getTriple().getArch()) { 1511 case llvm::Triple::arm: 1512 case llvm::Triple::armeb: 1513 case llvm::Triple::thumb: 1514 case llvm::Triple::thumbeb: 1515 if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall)) 1516 return ExprError(); 1517 break; 1518 case llvm::Triple::aarch64: 1519 case llvm::Triple::aarch64_be: 1520 if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall)) 1521 return ExprError(); 1522 break; 1523 case llvm::Triple::hexagon: 1524 if (CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall)) 1525 return ExprError(); 1526 break; 1527 case llvm::Triple::mips: 1528 case llvm::Triple::mipsel: 1529 case llvm::Triple::mips64: 1530 case llvm::Triple::mips64el: 1531 if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall)) 1532 return ExprError(); 1533 break; 1534 case llvm::Triple::systemz: 1535 if (CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall)) 1536 return ExprError(); 1537 break; 1538 case llvm::Triple::x86: 1539 case llvm::Triple::x86_64: 1540 if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall)) 1541 return ExprError(); 1542 break; 1543 case llvm::Triple::ppc: 1544 case llvm::Triple::ppc64: 1545 case llvm::Triple::ppc64le: 1546 if (CheckPPCBuiltinFunctionCall(BuiltinID, TheCall)) 1547 return ExprError(); 1548 break; 1549 default: 1550 break; 1551 } 1552 } 1553 1554 return TheCallResult; 1555 } 1556 1557 // Get the valid immediate range for the specified NEON type code. 1558 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 1559 NeonTypeFlags Type(t); 1560 int IsQuad = ForceQuad ? true : Type.isQuad(); 1561 switch (Type.getEltType()) { 1562 case NeonTypeFlags::Int8: 1563 case NeonTypeFlags::Poly8: 1564 return shift ? 7 : (8 << IsQuad) - 1; 1565 case NeonTypeFlags::Int16: 1566 case NeonTypeFlags::Poly16: 1567 return shift ? 15 : (4 << IsQuad) - 1; 1568 case NeonTypeFlags::Int32: 1569 return shift ? 31 : (2 << IsQuad) - 1; 1570 case NeonTypeFlags::Int64: 1571 case NeonTypeFlags::Poly64: 1572 return shift ? 63 : (1 << IsQuad) - 1; 1573 case NeonTypeFlags::Poly128: 1574 return shift ? 127 : (1 << IsQuad) - 1; 1575 case NeonTypeFlags::Float16: 1576 assert(!shift && "cannot shift float types!"); 1577 return (4 << IsQuad) - 1; 1578 case NeonTypeFlags::Float32: 1579 assert(!shift && "cannot shift float types!"); 1580 return (2 << IsQuad) - 1; 1581 case NeonTypeFlags::Float64: 1582 assert(!shift && "cannot shift float types!"); 1583 return (1 << IsQuad) - 1; 1584 } 1585 llvm_unreachable("Invalid NeonTypeFlag!"); 1586 } 1587 1588 /// getNeonEltType - Return the QualType corresponding to the elements of 1589 /// the vector type specified by the NeonTypeFlags. This is used to check 1590 /// the pointer arguments for Neon load/store intrinsics. 1591 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 1592 bool IsPolyUnsigned, bool IsInt64Long) { 1593 switch (Flags.getEltType()) { 1594 case NeonTypeFlags::Int8: 1595 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 1596 case NeonTypeFlags::Int16: 1597 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 1598 case NeonTypeFlags::Int32: 1599 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 1600 case NeonTypeFlags::Int64: 1601 if (IsInt64Long) 1602 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 1603 else 1604 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 1605 : Context.LongLongTy; 1606 case NeonTypeFlags::Poly8: 1607 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 1608 case NeonTypeFlags::Poly16: 1609 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 1610 case NeonTypeFlags::Poly64: 1611 if (IsInt64Long) 1612 return Context.UnsignedLongTy; 1613 else 1614 return Context.UnsignedLongLongTy; 1615 case NeonTypeFlags::Poly128: 1616 break; 1617 case NeonTypeFlags::Float16: 1618 return Context.HalfTy; 1619 case NeonTypeFlags::Float32: 1620 return Context.FloatTy; 1621 case NeonTypeFlags::Float64: 1622 return Context.DoubleTy; 1623 } 1624 llvm_unreachable("Invalid NeonTypeFlag!"); 1625 } 1626 1627 bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 1628 llvm::APSInt Result; 1629 uint64_t mask = 0; 1630 unsigned TV = 0; 1631 int PtrArgNum = -1; 1632 bool HasConstPtr = false; 1633 switch (BuiltinID) { 1634 #define GET_NEON_OVERLOAD_CHECK 1635 #include "clang/Basic/arm_neon.inc" 1636 #include "clang/Basic/arm_fp16.inc" 1637 #undef GET_NEON_OVERLOAD_CHECK 1638 } 1639 1640 // For NEON intrinsics which are overloaded on vector element type, validate 1641 // the immediate which specifies which variant to emit. 1642 unsigned ImmArg = TheCall->getNumArgs()-1; 1643 if (mask) { 1644 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 1645 return true; 1646 1647 TV = Result.getLimitedValue(64); 1648 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 1649 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 1650 << TheCall->getArg(ImmArg)->getSourceRange(); 1651 } 1652 1653 if (PtrArgNum >= 0) { 1654 // Check that pointer arguments have the specified type. 1655 Expr *Arg = TheCall->getArg(PtrArgNum); 1656 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 1657 Arg = ICE->getSubExpr(); 1658 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 1659 QualType RHSTy = RHS.get()->getType(); 1660 1661 llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch(); 1662 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 1663 Arch == llvm::Triple::aarch64_be; 1664 bool IsInt64Long = 1665 Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong; 1666 QualType EltTy = 1667 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 1668 if (HasConstPtr) 1669 EltTy = EltTy.withConst(); 1670 QualType LHSTy = Context.getPointerType(EltTy); 1671 AssignConvertType ConvTy; 1672 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 1673 if (RHS.isInvalid()) 1674 return true; 1675 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 1676 RHS.get(), AA_Assigning)) 1677 return true; 1678 } 1679 1680 // For NEON intrinsics which take an immediate value as part of the 1681 // instruction, range check them here. 1682 unsigned i = 0, l = 0, u = 0; 1683 switch (BuiltinID) { 1684 default: 1685 return false; 1686 #define GET_NEON_IMMEDIATE_CHECK 1687 #include "clang/Basic/arm_neon.inc" 1688 #include "clang/Basic/arm_fp16.inc" 1689 #undef GET_NEON_IMMEDIATE_CHECK 1690 } 1691 1692 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 1693 } 1694 1695 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 1696 unsigned MaxWidth) { 1697 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 1698 BuiltinID == ARM::BI__builtin_arm_ldaex || 1699 BuiltinID == ARM::BI__builtin_arm_strex || 1700 BuiltinID == ARM::BI__builtin_arm_stlex || 1701 BuiltinID == AArch64::BI__builtin_arm_ldrex || 1702 BuiltinID == AArch64::BI__builtin_arm_ldaex || 1703 BuiltinID == AArch64::BI__builtin_arm_strex || 1704 BuiltinID == AArch64::BI__builtin_arm_stlex) && 1705 "unexpected ARM builtin"); 1706 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 1707 BuiltinID == ARM::BI__builtin_arm_ldaex || 1708 BuiltinID == AArch64::BI__builtin_arm_ldrex || 1709 BuiltinID == AArch64::BI__builtin_arm_ldaex; 1710 1711 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 1712 1713 // Ensure that we have the proper number of arguments. 1714 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 1715 return true; 1716 1717 // Inspect the pointer argument of the atomic builtin. This should always be 1718 // a pointer type, whose element is an integral scalar or pointer type. 1719 // Because it is a pointer type, we don't have to worry about any implicit 1720 // casts here. 1721 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 1722 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 1723 if (PointerArgRes.isInvalid()) 1724 return true; 1725 PointerArg = PointerArgRes.get(); 1726 1727 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 1728 if (!pointerType) { 1729 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 1730 << PointerArg->getType() << PointerArg->getSourceRange(); 1731 return true; 1732 } 1733 1734 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 1735 // task is to insert the appropriate casts into the AST. First work out just 1736 // what the appropriate type is. 1737 QualType ValType = pointerType->getPointeeType(); 1738 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 1739 if (IsLdrex) 1740 AddrType.addConst(); 1741 1742 // Issue a warning if the cast is dodgy. 1743 CastKind CastNeeded = CK_NoOp; 1744 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 1745 CastNeeded = CK_BitCast; 1746 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 1747 << PointerArg->getType() << Context.getPointerType(AddrType) 1748 << AA_Passing << PointerArg->getSourceRange(); 1749 } 1750 1751 // Finally, do the cast and replace the argument with the corrected version. 1752 AddrType = Context.getPointerType(AddrType); 1753 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 1754 if (PointerArgRes.isInvalid()) 1755 return true; 1756 PointerArg = PointerArgRes.get(); 1757 1758 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 1759 1760 // In general, we allow ints, floats and pointers to be loaded and stored. 1761 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 1762 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 1763 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 1764 << PointerArg->getType() << PointerArg->getSourceRange(); 1765 return true; 1766 } 1767 1768 // But ARM doesn't have instructions to deal with 128-bit versions. 1769 if (Context.getTypeSize(ValType) > MaxWidth) { 1770 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 1771 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 1772 << PointerArg->getType() << PointerArg->getSourceRange(); 1773 return true; 1774 } 1775 1776 switch (ValType.getObjCLifetime()) { 1777 case Qualifiers::OCL_None: 1778 case Qualifiers::OCL_ExplicitNone: 1779 // okay 1780 break; 1781 1782 case Qualifiers::OCL_Weak: 1783 case Qualifiers::OCL_Strong: 1784 case Qualifiers::OCL_Autoreleasing: 1785 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 1786 << ValType << PointerArg->getSourceRange(); 1787 return true; 1788 } 1789 1790 if (IsLdrex) { 1791 TheCall->setType(ValType); 1792 return false; 1793 } 1794 1795 // Initialize the argument to be stored. 1796 ExprResult ValArg = TheCall->getArg(0); 1797 InitializedEntity Entity = InitializedEntity::InitializeParameter( 1798 Context, ValType, /*consume*/ false); 1799 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 1800 if (ValArg.isInvalid()) 1801 return true; 1802 TheCall->setArg(0, ValArg.get()); 1803 1804 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 1805 // but the custom checker bypasses all default analysis. 1806 TheCall->setType(Context.IntTy); 1807 return false; 1808 } 1809 1810 bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 1811 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 1812 BuiltinID == ARM::BI__builtin_arm_ldaex || 1813 BuiltinID == ARM::BI__builtin_arm_strex || 1814 BuiltinID == ARM::BI__builtin_arm_stlex) { 1815 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 1816 } 1817 1818 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 1819 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 1820 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 1821 } 1822 1823 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 1824 BuiltinID == ARM::BI__builtin_arm_wsr64) 1825 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 1826 1827 if (BuiltinID == ARM::BI__builtin_arm_rsr || 1828 BuiltinID == ARM::BI__builtin_arm_rsrp || 1829 BuiltinID == ARM::BI__builtin_arm_wsr || 1830 BuiltinID == ARM::BI__builtin_arm_wsrp) 1831 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1832 1833 if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) 1834 return true; 1835 1836 // For intrinsics which take an immediate value as part of the instruction, 1837 // range check them here. 1838 // FIXME: VFP Intrinsics should error if VFP not present. 1839 switch (BuiltinID) { 1840 default: return false; 1841 case ARM::BI__builtin_arm_ssat: 1842 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 1843 case ARM::BI__builtin_arm_usat: 1844 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 1845 case ARM::BI__builtin_arm_ssat16: 1846 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 1847 case ARM::BI__builtin_arm_usat16: 1848 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 1849 case ARM::BI__builtin_arm_vcvtr_f: 1850 case ARM::BI__builtin_arm_vcvtr_d: 1851 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 1852 case ARM::BI__builtin_arm_dmb: 1853 case ARM::BI__builtin_arm_dsb: 1854 case ARM::BI__builtin_arm_isb: 1855 case ARM::BI__builtin_arm_dbg: 1856 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 1857 } 1858 } 1859 1860 bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, 1861 CallExpr *TheCall) { 1862 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 1863 BuiltinID == AArch64::BI__builtin_arm_ldaex || 1864 BuiltinID == AArch64::BI__builtin_arm_strex || 1865 BuiltinID == AArch64::BI__builtin_arm_stlex) { 1866 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 1867 } 1868 1869 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 1870 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 1871 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 1872 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 1873 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 1874 } 1875 1876 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 1877 BuiltinID == AArch64::BI__builtin_arm_wsr64) 1878 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1879 1880 // Memory Tagging Extensions (MTE) Intrinsics 1881 if (BuiltinID == AArch64::BI__builtin_arm_irg || 1882 BuiltinID == AArch64::BI__builtin_arm_addg || 1883 BuiltinID == AArch64::BI__builtin_arm_gmi || 1884 BuiltinID == AArch64::BI__builtin_arm_ldg || 1885 BuiltinID == AArch64::BI__builtin_arm_stg || 1886 BuiltinID == AArch64::BI__builtin_arm_subp) { 1887 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 1888 } 1889 1890 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 1891 BuiltinID == AArch64::BI__builtin_arm_rsrp || 1892 BuiltinID == AArch64::BI__builtin_arm_wsr || 1893 BuiltinID == AArch64::BI__builtin_arm_wsrp) 1894 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1895 1896 // Only check the valid encoding range. Any constant in this range would be 1897 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 1898 // an exception for incorrect registers. This matches MSVC behavior. 1899 if (BuiltinID == AArch64::BI_ReadStatusReg || 1900 BuiltinID == AArch64::BI_WriteStatusReg) 1901 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 1902 1903 if (BuiltinID == AArch64::BI__getReg) 1904 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 1905 1906 if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) 1907 return true; 1908 1909 // For intrinsics which take an immediate value as part of the instruction, 1910 // range check them here. 1911 unsigned i = 0, l = 0, u = 0; 1912 switch (BuiltinID) { 1913 default: return false; 1914 case AArch64::BI__builtin_arm_dmb: 1915 case AArch64::BI__builtin_arm_dsb: 1916 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 1917 } 1918 1919 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 1920 } 1921 1922 bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) { 1923 struct BuiltinAndString { 1924 unsigned BuiltinID; 1925 const char *Str; 1926 }; 1927 1928 static BuiltinAndString ValidCPU[] = { 1929 { Hexagon::BI__builtin_HEXAGON_A6_vcmpbeq_notany, "v65,v66" }, 1930 { Hexagon::BI__builtin_HEXAGON_A6_vminub_RdP, "v62,v65,v66" }, 1931 { Hexagon::BI__builtin_HEXAGON_F2_dfadd, "v66" }, 1932 { Hexagon::BI__builtin_HEXAGON_F2_dfsub, "v66" }, 1933 { Hexagon::BI__builtin_HEXAGON_M2_mnaci, "v66" }, 1934 { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffb, "v62,v65,v66" }, 1935 { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffub, "v62,v65,v66" }, 1936 { Hexagon::BI__builtin_HEXAGON_S2_mask, "v66" }, 1937 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, "v60,v62,v65,v66" }, 1938 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, "v60,v62,v65,v66" }, 1939 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, "v60,v62,v65,v66" }, 1940 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, "v60,v62,v65,v66" }, 1941 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, "v60,v62,v65,v66" }, 1942 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, "v60,v62,v65,v66" }, 1943 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, "v60,v62,v65,v66" }, 1944 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, "v60,v62,v65,v66" }, 1945 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, "v60,v62,v65,v66" }, 1946 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, "v60,v62,v65,v66" }, 1947 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, "v60,v62,v65,v66" }, 1948 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, "v60,v62,v65,v66" }, 1949 { Hexagon::BI__builtin_HEXAGON_S6_vsplatrbp, "v62,v65,v66" }, 1950 { Hexagon::BI__builtin_HEXAGON_S6_vtrunehb_ppp, "v62,v65,v66" }, 1951 { Hexagon::BI__builtin_HEXAGON_S6_vtrunohb_ppp, "v62,v65,v66" }, 1952 }; 1953 1954 static BuiltinAndString ValidHVX[] = { 1955 { Hexagon::BI__builtin_HEXAGON_V6_hi, "v60,v62,v65,v66" }, 1956 { Hexagon::BI__builtin_HEXAGON_V6_hi_128B, "v60,v62,v65,v66" }, 1957 { Hexagon::BI__builtin_HEXAGON_V6_lo, "v60,v62,v65,v66" }, 1958 { Hexagon::BI__builtin_HEXAGON_V6_lo_128B, "v60,v62,v65,v66" }, 1959 { Hexagon::BI__builtin_HEXAGON_V6_extractw, "v60,v62,v65,v66" }, 1960 { Hexagon::BI__builtin_HEXAGON_V6_extractw_128B, "v60,v62,v65,v66" }, 1961 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb, "v62,v65,v66" }, 1962 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb_128B, "v62,v65,v66" }, 1963 { Hexagon::BI__builtin_HEXAGON_V6_lvsplath, "v62,v65,v66" }, 1964 { Hexagon::BI__builtin_HEXAGON_V6_lvsplath_128B, "v62,v65,v66" }, 1965 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw, "v60,v62,v65,v66" }, 1966 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw_128B, "v60,v62,v65,v66" }, 1967 { Hexagon::BI__builtin_HEXAGON_V6_pred_and, "v60,v62,v65,v66" }, 1968 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_128B, "v60,v62,v65,v66" }, 1969 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n, "v60,v62,v65,v66" }, 1970 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n_128B, "v60,v62,v65,v66" }, 1971 { Hexagon::BI__builtin_HEXAGON_V6_pred_not, "v60,v62,v65,v66" }, 1972 { Hexagon::BI__builtin_HEXAGON_V6_pred_not_128B, "v60,v62,v65,v66" }, 1973 { Hexagon::BI__builtin_HEXAGON_V6_pred_or, "v60,v62,v65,v66" }, 1974 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_128B, "v60,v62,v65,v66" }, 1975 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n, "v60,v62,v65,v66" }, 1976 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n_128B, "v60,v62,v65,v66" }, 1977 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2, "v60,v62,v65,v66" }, 1978 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2_128B, "v60,v62,v65,v66" }, 1979 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2, "v62,v65,v66" }, 1980 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2_128B, "v62,v65,v66" }, 1981 { Hexagon::BI__builtin_HEXAGON_V6_pred_xor, "v60,v62,v65,v66" }, 1982 { Hexagon::BI__builtin_HEXAGON_V6_pred_xor_128B, "v60,v62,v65,v66" }, 1983 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh, "v62,v65,v66" }, 1984 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh_128B, "v62,v65,v66" }, 1985 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw, "v62,v65,v66" }, 1986 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw_128B, "v62,v65,v66" }, 1987 { Hexagon::BI__builtin_HEXAGON_V6_vabsb, "v65,v66" }, 1988 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_128B, "v65,v66" }, 1989 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat, "v65,v66" }, 1990 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat_128B, "v65,v66" }, 1991 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh, "v60,v62,v65,v66" }, 1992 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh_128B, "v60,v62,v65,v66" }, 1993 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub, "v60,v62,v65,v66" }, 1994 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub_128B, "v60,v62,v65,v66" }, 1995 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh, "v60,v62,v65,v66" }, 1996 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh_128B, "v60,v62,v65,v66" }, 1997 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw, "v60,v62,v65,v66" }, 1998 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw_128B, "v60,v62,v65,v66" }, 1999 { Hexagon::BI__builtin_HEXAGON_V6_vabsh, "v60,v62,v65,v66" }, 2000 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_128B, "v60,v62,v65,v66" }, 2001 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat, "v60,v62,v65,v66" }, 2002 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat_128B, "v60,v62,v65,v66" }, 2003 { Hexagon::BI__builtin_HEXAGON_V6_vabsw, "v60,v62,v65,v66" }, 2004 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_128B, "v60,v62,v65,v66" }, 2005 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat, "v60,v62,v65,v66" }, 2006 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat_128B, "v60,v62,v65,v66" }, 2007 { Hexagon::BI__builtin_HEXAGON_V6_vaddb, "v60,v62,v65,v66" }, 2008 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_128B, "v60,v62,v65,v66" }, 2009 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv, "v60,v62,v65,v66" }, 2010 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv_128B, "v60,v62,v65,v66" }, 2011 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat, "v62,v65,v66" }, 2012 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_128B, "v62,v65,v66" }, 2013 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv, "v62,v65,v66" }, 2014 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv_128B, "v62,v65,v66" }, 2015 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry, "v62,v65,v66" }, 2016 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B, "v62,v65,v66" }, 2017 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat, "v66" }, 2018 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat_128B, "v66" }, 2019 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh, "v62,v65,v66" }, 2020 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh_128B, "v62,v65,v66" }, 2021 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw, "v62,v65,v66" }, 2022 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw_128B, "v62,v65,v66" }, 2023 { Hexagon::BI__builtin_HEXAGON_V6_vaddh, "v60,v62,v65,v66" }, 2024 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_128B, "v60,v62,v65,v66" }, 2025 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv, "v60,v62,v65,v66" }, 2026 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv_128B, "v60,v62,v65,v66" }, 2027 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat, "v60,v62,v65,v66" }, 2028 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_128B, "v60,v62,v65,v66" }, 2029 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv, "v60,v62,v65,v66" }, 2030 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv_128B, "v60,v62,v65,v66" }, 2031 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw, "v60,v62,v65,v66" }, 2032 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_128B, "v60,v62,v65,v66" }, 2033 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc, "v62,v65,v66" }, 2034 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc_128B, "v62,v65,v66" }, 2035 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh, "v60,v62,v65,v66" }, 2036 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_128B, "v60,v62,v65,v66" }, 2037 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc, "v62,v65,v66" }, 2038 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc_128B, "v62,v65,v66" }, 2039 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat, "v60,v62,v65,v66" }, 2040 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_128B, "v60,v62,v65,v66" }, 2041 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv, "v60,v62,v65,v66" }, 2042 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv_128B, "v60,v62,v65,v66" }, 2043 { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat, "v62,v65,v66" }, 2044 { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat_128B, "v62,v65,v66" }, 2045 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat, "v60,v62,v65,v66" }, 2046 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_128B, "v60,v62,v65,v66" }, 2047 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv, "v60,v62,v65,v66" }, 2048 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv_128B, "v60,v62,v65,v66" }, 2049 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw, "v60,v62,v65,v66" }, 2050 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_128B, "v60,v62,v65,v66" }, 2051 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc, "v62,v65,v66" }, 2052 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc_128B, "v62,v65,v66" }, 2053 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat, "v62,v65,v66" }, 2054 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_128B, "v62,v65,v66" }, 2055 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv, "v62,v65,v66" }, 2056 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv_128B, "v62,v65,v66" }, 2057 { Hexagon::BI__builtin_HEXAGON_V6_vaddw, "v60,v62,v65,v66" }, 2058 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_128B, "v60,v62,v65,v66" }, 2059 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv, "v60,v62,v65,v66" }, 2060 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv_128B, "v60,v62,v65,v66" }, 2061 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat, "v60,v62,v65,v66" }, 2062 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_128B, "v60,v62,v65,v66" }, 2063 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv, "v60,v62,v65,v66" }, 2064 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv_128B, "v60,v62,v65,v66" }, 2065 { Hexagon::BI__builtin_HEXAGON_V6_valignb, "v60,v62,v65,v66" }, 2066 { Hexagon::BI__builtin_HEXAGON_V6_valignb_128B, "v60,v62,v65,v66" }, 2067 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, "v60,v62,v65,v66" }, 2068 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, "v60,v62,v65,v66" }, 2069 { Hexagon::BI__builtin_HEXAGON_V6_vand, "v60,v62,v65,v66" }, 2070 { Hexagon::BI__builtin_HEXAGON_V6_vand_128B, "v60,v62,v65,v66" }, 2071 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt, "v62,v65,v66" }, 2072 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_128B, "v62,v65,v66" }, 2073 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc, "v62,v65,v66" }, 2074 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc_128B, "v62,v65,v66" }, 2075 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt, "v60,v62,v65,v66" }, 2076 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_128B, "v60,v62,v65,v66" }, 2077 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc, "v60,v62,v65,v66" }, 2078 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc_128B, "v60,v62,v65,v66" }, 2079 { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv, "v62,v65,v66" }, 2080 { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv_128B, "v62,v65,v66" }, 2081 { Hexagon::BI__builtin_HEXAGON_V6_vandvqv, "v62,v65,v66" }, 2082 { Hexagon::BI__builtin_HEXAGON_V6_vandvqv_128B, "v62,v65,v66" }, 2083 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt, "v60,v62,v65,v66" }, 2084 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_128B, "v60,v62,v65,v66" }, 2085 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc, "v60,v62,v65,v66" }, 2086 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc_128B, "v60,v62,v65,v66" }, 2087 { Hexagon::BI__builtin_HEXAGON_V6_vaslh, "v60,v62,v65,v66" }, 2088 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_128B, "v60,v62,v65,v66" }, 2089 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc, "v65,v66" }, 2090 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc_128B, "v65,v66" }, 2091 { Hexagon::BI__builtin_HEXAGON_V6_vaslhv, "v60,v62,v65,v66" }, 2092 { Hexagon::BI__builtin_HEXAGON_V6_vaslhv_128B, "v60,v62,v65,v66" }, 2093 { Hexagon::BI__builtin_HEXAGON_V6_vaslw, "v60,v62,v65,v66" }, 2094 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_128B, "v60,v62,v65,v66" }, 2095 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc, "v60,v62,v65,v66" }, 2096 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc_128B, "v60,v62,v65,v66" }, 2097 { Hexagon::BI__builtin_HEXAGON_V6_vaslwv, "v60,v62,v65,v66" }, 2098 { Hexagon::BI__builtin_HEXAGON_V6_vaslwv_128B, "v60,v62,v65,v66" }, 2099 { Hexagon::BI__builtin_HEXAGON_V6_vasrh, "v60,v62,v65,v66" }, 2100 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_128B, "v60,v62,v65,v66" }, 2101 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc, "v65,v66" }, 2102 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc_128B, "v65,v66" }, 2103 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat, "v60,v62,v65,v66" }, 2104 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat_128B, "v60,v62,v65,v66" }, 2105 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat, "v62,v65,v66" }, 2106 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat_128B, "v62,v65,v66" }, 2107 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat, "v60,v62,v65,v66" }, 2108 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat_128B, "v60,v62,v65,v66" }, 2109 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat, "v60,v62,v65,v66" }, 2110 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat_128B, "v60,v62,v65,v66" }, 2111 { Hexagon::BI__builtin_HEXAGON_V6_vasrhv, "v60,v62,v65,v66" }, 2112 { Hexagon::BI__builtin_HEXAGON_V6_vasrhv_128B, "v60,v62,v65,v66" }, 2113 { Hexagon::BI__builtin_HEXAGON_V6_vasr_into, "v66" }, 2114 { Hexagon::BI__builtin_HEXAGON_V6_vasr_into_128B, "v66" }, 2115 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat, "v65,v66" }, 2116 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat_128B, "v65,v66" }, 2117 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat, "v65,v66" }, 2118 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat_128B, "v65,v66" }, 2119 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat, "v62,v65,v66" }, 2120 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat_128B, "v62,v65,v66" }, 2121 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat, "v65,v66" }, 2122 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat_128B, "v65,v66" }, 2123 { Hexagon::BI__builtin_HEXAGON_V6_vasrw, "v60,v62,v65,v66" }, 2124 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_128B, "v60,v62,v65,v66" }, 2125 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc, "v60,v62,v65,v66" }, 2126 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc_128B, "v60,v62,v65,v66" }, 2127 { Hexagon::BI__builtin_HEXAGON_V6_vasrwh, "v60,v62,v65,v66" }, 2128 { Hexagon::BI__builtin_HEXAGON_V6_vasrwh_128B, "v60,v62,v65,v66" }, 2129 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat, "v60,v62,v65,v66" }, 2130 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat_128B, "v60,v62,v65,v66" }, 2131 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat, "v60,v62,v65,v66" }, 2132 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat_128B, "v60,v62,v65,v66" }, 2133 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat, "v62,v65,v66" }, 2134 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat_128B, "v62,v65,v66" }, 2135 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat, "v60,v62,v65,v66" }, 2136 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat_128B, "v60,v62,v65,v66" }, 2137 { Hexagon::BI__builtin_HEXAGON_V6_vasrwv, "v60,v62,v65,v66" }, 2138 { Hexagon::BI__builtin_HEXAGON_V6_vasrwv_128B, "v60,v62,v65,v66" }, 2139 { Hexagon::BI__builtin_HEXAGON_V6_vassign, "v60,v62,v65,v66" }, 2140 { Hexagon::BI__builtin_HEXAGON_V6_vassign_128B, "v60,v62,v65,v66" }, 2141 { Hexagon::BI__builtin_HEXAGON_V6_vassignp, "v60,v62,v65,v66" }, 2142 { Hexagon::BI__builtin_HEXAGON_V6_vassignp_128B, "v60,v62,v65,v66" }, 2143 { Hexagon::BI__builtin_HEXAGON_V6_vavgb, "v65,v66" }, 2144 { Hexagon::BI__builtin_HEXAGON_V6_vavgb_128B, "v65,v66" }, 2145 { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd, "v65,v66" }, 2146 { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd_128B, "v65,v66" }, 2147 { Hexagon::BI__builtin_HEXAGON_V6_vavgh, "v60,v62,v65,v66" }, 2148 { Hexagon::BI__builtin_HEXAGON_V6_vavgh_128B, "v60,v62,v65,v66" }, 2149 { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd, "v60,v62,v65,v66" }, 2150 { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd_128B, "v60,v62,v65,v66" }, 2151 { Hexagon::BI__builtin_HEXAGON_V6_vavgub, "v60,v62,v65,v66" }, 2152 { Hexagon::BI__builtin_HEXAGON_V6_vavgub_128B, "v60,v62,v65,v66" }, 2153 { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd, "v60,v62,v65,v66" }, 2154 { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd_128B, "v60,v62,v65,v66" }, 2155 { Hexagon::BI__builtin_HEXAGON_V6_vavguh, "v60,v62,v65,v66" }, 2156 { Hexagon::BI__builtin_HEXAGON_V6_vavguh_128B, "v60,v62,v65,v66" }, 2157 { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd, "v60,v62,v65,v66" }, 2158 { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd_128B, "v60,v62,v65,v66" }, 2159 { Hexagon::BI__builtin_HEXAGON_V6_vavguw, "v65,v66" }, 2160 { Hexagon::BI__builtin_HEXAGON_V6_vavguw_128B, "v65,v66" }, 2161 { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd, "v65,v66" }, 2162 { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd_128B, "v65,v66" }, 2163 { Hexagon::BI__builtin_HEXAGON_V6_vavgw, "v60,v62,v65,v66" }, 2164 { Hexagon::BI__builtin_HEXAGON_V6_vavgw_128B, "v60,v62,v65,v66" }, 2165 { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd, "v60,v62,v65,v66" }, 2166 { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd_128B, "v60,v62,v65,v66" }, 2167 { Hexagon::BI__builtin_HEXAGON_V6_vcl0h, "v60,v62,v65,v66" }, 2168 { Hexagon::BI__builtin_HEXAGON_V6_vcl0h_128B, "v60,v62,v65,v66" }, 2169 { Hexagon::BI__builtin_HEXAGON_V6_vcl0w, "v60,v62,v65,v66" }, 2170 { Hexagon::BI__builtin_HEXAGON_V6_vcl0w_128B, "v60,v62,v65,v66" }, 2171 { Hexagon::BI__builtin_HEXAGON_V6_vcombine, "v60,v62,v65,v66" }, 2172 { Hexagon::BI__builtin_HEXAGON_V6_vcombine_128B, "v60,v62,v65,v66" }, 2173 { Hexagon::BI__builtin_HEXAGON_V6_vd0, "v60,v62,v65,v66" }, 2174 { Hexagon::BI__builtin_HEXAGON_V6_vd0_128B, "v60,v62,v65,v66" }, 2175 { Hexagon::BI__builtin_HEXAGON_V6_vdd0, "v65,v66" }, 2176 { Hexagon::BI__builtin_HEXAGON_V6_vdd0_128B, "v65,v66" }, 2177 { Hexagon::BI__builtin_HEXAGON_V6_vdealb, "v60,v62,v65,v66" }, 2178 { Hexagon::BI__builtin_HEXAGON_V6_vdealb_128B, "v60,v62,v65,v66" }, 2179 { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w, "v60,v62,v65,v66" }, 2180 { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w_128B, "v60,v62,v65,v66" }, 2181 { Hexagon::BI__builtin_HEXAGON_V6_vdealh, "v60,v62,v65,v66" }, 2182 { Hexagon::BI__builtin_HEXAGON_V6_vdealh_128B, "v60,v62,v65,v66" }, 2183 { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd, "v60,v62,v65,v66" }, 2184 { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd_128B, "v60,v62,v65,v66" }, 2185 { Hexagon::BI__builtin_HEXAGON_V6_vdelta, "v60,v62,v65,v66" }, 2186 { Hexagon::BI__builtin_HEXAGON_V6_vdelta_128B, "v60,v62,v65,v66" }, 2187 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus, "v60,v62,v65,v66" }, 2188 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_128B, "v60,v62,v65,v66" }, 2189 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc, "v60,v62,v65,v66" }, 2190 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc_128B, "v60,v62,v65,v66" }, 2191 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv, "v60,v62,v65,v66" }, 2192 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_128B, "v60,v62,v65,v66" }, 2193 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc, "v60,v62,v65,v66" }, 2194 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, "v60,v62,v65,v66" }, 2195 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb, "v60,v62,v65,v66" }, 2196 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_128B, "v60,v62,v65,v66" }, 2197 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc, "v60,v62,v65,v66" }, 2198 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc_128B, "v60,v62,v65,v66" }, 2199 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv, "v60,v62,v65,v66" }, 2200 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_128B, "v60,v62,v65,v66" }, 2201 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc, "v60,v62,v65,v66" }, 2202 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, "v60,v62,v65,v66" }, 2203 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat, "v60,v62,v65,v66" }, 2204 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_128B, "v60,v62,v65,v66" }, 2205 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc, "v60,v62,v65,v66" }, 2206 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, "v60,v62,v65,v66" }, 2207 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat, "v60,v62,v65,v66" }, 2208 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_128B, "v60,v62,v65,v66" }, 2209 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc, "v60,v62,v65,v66" }, 2210 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, "v60,v62,v65,v66" }, 2211 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat, "v60,v62,v65,v66" }, 2212 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_128B, "v60,v62,v65,v66" }, 2213 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc, "v60,v62,v65,v66" }, 2214 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, "v60,v62,v65,v66" }, 2215 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat, "v60,v62,v65,v66" }, 2216 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_128B, "v60,v62,v65,v66" }, 2217 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc, "v60,v62,v65,v66" }, 2218 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, "v60,v62,v65,v66" }, 2219 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat, "v60,v62,v65,v66" }, 2220 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_128B, "v60,v62,v65,v66" }, 2221 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc, "v60,v62,v65,v66" }, 2222 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, "v60,v62,v65,v66" }, 2223 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh, "v60,v62,v65,v66" }, 2224 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_128B, "v60,v62,v65,v66" }, 2225 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc, "v60,v62,v65,v66" }, 2226 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc_128B, "v60,v62,v65,v66" }, 2227 { Hexagon::BI__builtin_HEXAGON_V6_veqb, "v60,v62,v65,v66" }, 2228 { Hexagon::BI__builtin_HEXAGON_V6_veqb_128B, "v60,v62,v65,v66" }, 2229 { Hexagon::BI__builtin_HEXAGON_V6_veqb_and, "v60,v62,v65,v66" }, 2230 { Hexagon::BI__builtin_HEXAGON_V6_veqb_and_128B, "v60,v62,v65,v66" }, 2231 { Hexagon::BI__builtin_HEXAGON_V6_veqb_or, "v60,v62,v65,v66" }, 2232 { Hexagon::BI__builtin_HEXAGON_V6_veqb_or_128B, "v60,v62,v65,v66" }, 2233 { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor, "v60,v62,v65,v66" }, 2234 { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor_128B, "v60,v62,v65,v66" }, 2235 { Hexagon::BI__builtin_HEXAGON_V6_veqh, "v60,v62,v65,v66" }, 2236 { Hexagon::BI__builtin_HEXAGON_V6_veqh_128B, "v60,v62,v65,v66" }, 2237 { Hexagon::BI__builtin_HEXAGON_V6_veqh_and, "v60,v62,v65,v66" }, 2238 { Hexagon::BI__builtin_HEXAGON_V6_veqh_and_128B, "v60,v62,v65,v66" }, 2239 { Hexagon::BI__builtin_HEXAGON_V6_veqh_or, "v60,v62,v65,v66" }, 2240 { Hexagon::BI__builtin_HEXAGON_V6_veqh_or_128B, "v60,v62,v65,v66" }, 2241 { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor, "v60,v62,v65,v66" }, 2242 { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor_128B, "v60,v62,v65,v66" }, 2243 { Hexagon::BI__builtin_HEXAGON_V6_veqw, "v60,v62,v65,v66" }, 2244 { Hexagon::BI__builtin_HEXAGON_V6_veqw_128B, "v60,v62,v65,v66" }, 2245 { Hexagon::BI__builtin_HEXAGON_V6_veqw_and, "v60,v62,v65,v66" }, 2246 { Hexagon::BI__builtin_HEXAGON_V6_veqw_and_128B, "v60,v62,v65,v66" }, 2247 { Hexagon::BI__builtin_HEXAGON_V6_veqw_or, "v60,v62,v65,v66" }, 2248 { Hexagon::BI__builtin_HEXAGON_V6_veqw_or_128B, "v60,v62,v65,v66" }, 2249 { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor, "v60,v62,v65,v66" }, 2250 { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor_128B, "v60,v62,v65,v66" }, 2251 { Hexagon::BI__builtin_HEXAGON_V6_vgtb, "v60,v62,v65,v66" }, 2252 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_128B, "v60,v62,v65,v66" }, 2253 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and, "v60,v62,v65,v66" }, 2254 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and_128B, "v60,v62,v65,v66" }, 2255 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or, "v60,v62,v65,v66" }, 2256 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or_128B, "v60,v62,v65,v66" }, 2257 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor, "v60,v62,v65,v66" }, 2258 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor_128B, "v60,v62,v65,v66" }, 2259 { Hexagon::BI__builtin_HEXAGON_V6_vgth, "v60,v62,v65,v66" }, 2260 { Hexagon::BI__builtin_HEXAGON_V6_vgth_128B, "v60,v62,v65,v66" }, 2261 { Hexagon::BI__builtin_HEXAGON_V6_vgth_and, "v60,v62,v65,v66" }, 2262 { Hexagon::BI__builtin_HEXAGON_V6_vgth_and_128B, "v60,v62,v65,v66" }, 2263 { Hexagon::BI__builtin_HEXAGON_V6_vgth_or, "v60,v62,v65,v66" }, 2264 { Hexagon::BI__builtin_HEXAGON_V6_vgth_or_128B, "v60,v62,v65,v66" }, 2265 { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor, "v60,v62,v65,v66" }, 2266 { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor_128B, "v60,v62,v65,v66" }, 2267 { Hexagon::BI__builtin_HEXAGON_V6_vgtub, "v60,v62,v65,v66" }, 2268 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_128B, "v60,v62,v65,v66" }, 2269 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and, "v60,v62,v65,v66" }, 2270 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and_128B, "v60,v62,v65,v66" }, 2271 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or, "v60,v62,v65,v66" }, 2272 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or_128B, "v60,v62,v65,v66" }, 2273 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor, "v60,v62,v65,v66" }, 2274 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor_128B, "v60,v62,v65,v66" }, 2275 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh, "v60,v62,v65,v66" }, 2276 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_128B, "v60,v62,v65,v66" }, 2277 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and, "v60,v62,v65,v66" }, 2278 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and_128B, "v60,v62,v65,v66" }, 2279 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or, "v60,v62,v65,v66" }, 2280 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or_128B, "v60,v62,v65,v66" }, 2281 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor, "v60,v62,v65,v66" }, 2282 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor_128B, "v60,v62,v65,v66" }, 2283 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw, "v60,v62,v65,v66" }, 2284 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_128B, "v60,v62,v65,v66" }, 2285 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and, "v60,v62,v65,v66" }, 2286 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and_128B, "v60,v62,v65,v66" }, 2287 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or, "v60,v62,v65,v66" }, 2288 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or_128B, "v60,v62,v65,v66" }, 2289 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor, "v60,v62,v65,v66" }, 2290 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor_128B, "v60,v62,v65,v66" }, 2291 { Hexagon::BI__builtin_HEXAGON_V6_vgtw, "v60,v62,v65,v66" }, 2292 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_128B, "v60,v62,v65,v66" }, 2293 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and, "v60,v62,v65,v66" }, 2294 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and_128B, "v60,v62,v65,v66" }, 2295 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or, "v60,v62,v65,v66" }, 2296 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or_128B, "v60,v62,v65,v66" }, 2297 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor, "v60,v62,v65,v66" }, 2298 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor_128B, "v60,v62,v65,v66" }, 2299 { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr, "v60,v62,v65,v66" }, 2300 { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr_128B, "v60,v62,v65,v66" }, 2301 { Hexagon::BI__builtin_HEXAGON_V6_vlalignb, "v60,v62,v65,v66" }, 2302 { Hexagon::BI__builtin_HEXAGON_V6_vlalignb_128B, "v60,v62,v65,v66" }, 2303 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, "v60,v62,v65,v66" }, 2304 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, "v60,v62,v65,v66" }, 2305 { Hexagon::BI__builtin_HEXAGON_V6_vlsrb, "v62,v65,v66" }, 2306 { Hexagon::BI__builtin_HEXAGON_V6_vlsrb_128B, "v62,v65,v66" }, 2307 { Hexagon::BI__builtin_HEXAGON_V6_vlsrh, "v60,v62,v65,v66" }, 2308 { Hexagon::BI__builtin_HEXAGON_V6_vlsrh_128B, "v60,v62,v65,v66" }, 2309 { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv, "v60,v62,v65,v66" }, 2310 { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv_128B, "v60,v62,v65,v66" }, 2311 { Hexagon::BI__builtin_HEXAGON_V6_vlsrw, "v60,v62,v65,v66" }, 2312 { Hexagon::BI__builtin_HEXAGON_V6_vlsrw_128B, "v60,v62,v65,v66" }, 2313 { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv, "v60,v62,v65,v66" }, 2314 { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv_128B, "v60,v62,v65,v66" }, 2315 { Hexagon::BI__builtin_HEXAGON_V6_vlut4, "v65,v66" }, 2316 { Hexagon::BI__builtin_HEXAGON_V6_vlut4_128B, "v65,v66" }, 2317 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb, "v60,v62,v65,v66" }, 2318 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_128B, "v60,v62,v65,v66" }, 2319 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, "v62,v65,v66" }, 2320 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, "v62,v65,v66" }, 2321 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm, "v62,v65,v66" }, 2322 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm_128B, "v62,v65,v66" }, 2323 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc, "v60,v62,v65,v66" }, 2324 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc_128B, "v60,v62,v65,v66" }, 2325 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, "v62,v65,v66" }, 2326 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, "v62,v65,v66" }, 2327 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh, "v60,v62,v65,v66" }, 2328 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_128B, "v60,v62,v65,v66" }, 2329 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, "v62,v65,v66" }, 2330 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, "v62,v65,v66" }, 2331 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm, "v62,v65,v66" }, 2332 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm_128B, "v62,v65,v66" }, 2333 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc, "v60,v62,v65,v66" }, 2334 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc_128B, "v60,v62,v65,v66" }, 2335 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, "v62,v65,v66" }, 2336 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, "v62,v65,v66" }, 2337 { Hexagon::BI__builtin_HEXAGON_V6_vmaxb, "v62,v65,v66" }, 2338 { Hexagon::BI__builtin_HEXAGON_V6_vmaxb_128B, "v62,v65,v66" }, 2339 { Hexagon::BI__builtin_HEXAGON_V6_vmaxh, "v60,v62,v65,v66" }, 2340 { Hexagon::BI__builtin_HEXAGON_V6_vmaxh_128B, "v60,v62,v65,v66" }, 2341 { Hexagon::BI__builtin_HEXAGON_V6_vmaxub, "v60,v62,v65,v66" }, 2342 { Hexagon::BI__builtin_HEXAGON_V6_vmaxub_128B, "v60,v62,v65,v66" }, 2343 { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh, "v60,v62,v65,v66" }, 2344 { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh_128B, "v60,v62,v65,v66" }, 2345 { Hexagon::BI__builtin_HEXAGON_V6_vmaxw, "v60,v62,v65,v66" }, 2346 { Hexagon::BI__builtin_HEXAGON_V6_vmaxw_128B, "v60,v62,v65,v66" }, 2347 { Hexagon::BI__builtin_HEXAGON_V6_vminb, "v62,v65,v66" }, 2348 { Hexagon::BI__builtin_HEXAGON_V6_vminb_128B, "v62,v65,v66" }, 2349 { Hexagon::BI__builtin_HEXAGON_V6_vminh, "v60,v62,v65,v66" }, 2350 { Hexagon::BI__builtin_HEXAGON_V6_vminh_128B, "v60,v62,v65,v66" }, 2351 { Hexagon::BI__builtin_HEXAGON_V6_vminub, "v60,v62,v65,v66" }, 2352 { Hexagon::BI__builtin_HEXAGON_V6_vminub_128B, "v60,v62,v65,v66" }, 2353 { Hexagon::BI__builtin_HEXAGON_V6_vminuh, "v60,v62,v65,v66" }, 2354 { Hexagon::BI__builtin_HEXAGON_V6_vminuh_128B, "v60,v62,v65,v66" }, 2355 { Hexagon::BI__builtin_HEXAGON_V6_vminw, "v60,v62,v65,v66" }, 2356 { Hexagon::BI__builtin_HEXAGON_V6_vminw_128B, "v60,v62,v65,v66" }, 2357 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus, "v60,v62,v65,v66" }, 2358 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_128B, "v60,v62,v65,v66" }, 2359 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc, "v60,v62,v65,v66" }, 2360 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc_128B, "v60,v62,v65,v66" }, 2361 { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv, "v60,v62,v65,v66" }, 2362 { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv_128B, "v60,v62,v65,v66" }, 2363 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu, "v65,v66" }, 2364 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_128B, "v65,v66" }, 2365 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc, "v65,v66" }, 2366 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc_128B, "v65,v66" }, 2367 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv, "v60,v62,v65,v66" }, 2368 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv_128B, "v60,v62,v65,v66" }, 2369 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb, "v60,v62,v65,v66" }, 2370 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_128B, "v60,v62,v65,v66" }, 2371 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc, "v60,v62,v65,v66" }, 2372 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc_128B, "v60,v62,v65,v66" }, 2373 { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat, "v65,v66" }, 2374 { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat_128B, "v65,v66" }, 2375 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb, "v62,v65,v66" }, 2376 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_128B, "v62,v65,v66" }, 2377 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc, "v62,v65,v66" }, 2378 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc_128B, "v62,v65,v66" }, 2379 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat, "v65,v66" }, 2380 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat_128B, "v65,v66" }, 2381 { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat, "v65,v66" }, 2382 { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat_128B, "v65,v66" }, 2383 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus, "v60,v62,v65,v66" }, 2384 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_128B, "v60,v62,v65,v66" }, 2385 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc, "v60,v62,v65,v66" }, 2386 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc_128B, "v60,v62,v65,v66" }, 2387 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv, "v60,v62,v65,v66" }, 2388 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_128B, "v60,v62,v65,v66" }, 2389 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc, "v60,v62,v65,v66" }, 2390 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc_128B, "v60,v62,v65,v66" }, 2391 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv, "v60,v62,v65,v66" }, 2392 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_128B, "v60,v62,v65,v66" }, 2393 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc, "v60,v62,v65,v66" }, 2394 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc_128B, "v60,v62,v65,v66" }, 2395 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh, "v60,v62,v65,v66" }, 2396 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_128B, "v60,v62,v65,v66" }, 2397 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64, "v62,v65,v66" }, 2398 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64_128B, "v62,v65,v66" }, 2399 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh, "v60,v62,v65,v66" }, 2400 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_128B, "v60,v62,v65,v66" }, 2401 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc, "v65,v66" }, 2402 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc_128B, "v65,v66" }, 2403 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc, "v60,v62,v65,v66" }, 2404 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc_128B, "v60,v62,v65,v66" }, 2405 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs, "v60,v62,v65,v66" }, 2406 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs_128B, "v60,v62,v65,v66" }, 2407 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss, "v60,v62,v65,v66" }, 2408 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss_128B, "v60,v62,v65,v66" }, 2409 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus, "v60,v62,v65,v66" }, 2410 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_128B, "v60,v62,v65,v66" }, 2411 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc, "v60,v62,v65,v66" }, 2412 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc_128B, "v60,v62,v65,v66" }, 2413 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv, "v60,v62,v65,v66" }, 2414 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_128B, "v60,v62,v65,v66" }, 2415 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc, "v60,v62,v65,v66" }, 2416 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc_128B, "v60,v62,v65,v66" }, 2417 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs, "v60,v62,v65,v66" }, 2418 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs_128B, "v60,v62,v65,v66" }, 2419 { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh, "v60,v62,v65,v66" }, 2420 { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh_128B, "v60,v62,v65,v66" }, 2421 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc, "v60,v62,v65,v66" }, 2422 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc_128B, "v60,v62,v65,v66" }, 2423 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh, "v60,v62,v65,v66" }, 2424 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_128B, "v60,v62,v65,v66" }, 2425 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc, "v60,v62,v65,v66" }, 2426 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, "v60,v62,v65,v66" }, 2427 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih, "v60,v62,v65,v66" }, 2428 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_128B, "v60,v62,v65,v66" }, 2429 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc, "v60,v62,v65,v66" }, 2430 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc_128B, "v60,v62,v65,v66" }, 2431 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb, "v60,v62,v65,v66" }, 2432 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_128B, "v60,v62,v65,v66" }, 2433 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc, "v60,v62,v65,v66" }, 2434 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc_128B, "v60,v62,v65,v66" }, 2435 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh, "v60,v62,v65,v66" }, 2436 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh_128B, "v60,v62,v65,v66" }, 2437 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb, "v60,v62,v65,v66" }, 2438 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_128B, "v60,v62,v65,v66" }, 2439 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc, "v60,v62,v65,v66" }, 2440 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc_128B, "v60,v62,v65,v66" }, 2441 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh, "v60,v62,v65,v66" }, 2442 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_128B, "v60,v62,v65,v66" }, 2443 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc, "v60,v62,v65,v66" }, 2444 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc_128B, "v60,v62,v65,v66" }, 2445 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub, "v62,v65,v66" }, 2446 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_128B, "v62,v65,v66" }, 2447 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc, "v62,v65,v66" }, 2448 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc_128B, "v62,v65,v66" }, 2449 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh, "v60,v62,v65,v66" }, 2450 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_128B, "v60,v62,v65,v66" }, 2451 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc, "v62,v65,v66" }, 2452 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, "v62,v65,v66" }, 2453 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd, "v60,v62,v65,v66" }, 2454 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_128B, "v60,v62,v65,v66" }, 2455 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, "v60,v62,v65,v66" }, 2456 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, "v60,v62,v65,v66" }, 2457 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc, "v60,v62,v65,v66" }, 2458 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc_128B, "v60,v62,v65,v66" }, 2459 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub, "v60,v62,v65,v66" }, 2460 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_128B, "v60,v62,v65,v66" }, 2461 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc, "v60,v62,v65,v66" }, 2462 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc_128B, "v60,v62,v65,v66" }, 2463 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv, "v60,v62,v65,v66" }, 2464 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_128B, "v60,v62,v65,v66" }, 2465 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc, "v60,v62,v65,v66" }, 2466 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc_128B, "v60,v62,v65,v66" }, 2467 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh, "v60,v62,v65,v66" }, 2468 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_128B, "v60,v62,v65,v66" }, 2469 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc, "v60,v62,v65,v66" }, 2470 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc_128B, "v60,v62,v65,v66" }, 2471 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe, "v65,v66" }, 2472 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_128B, "v65,v66" }, 2473 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc, "v65,v66" }, 2474 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc_128B, "v65,v66" }, 2475 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv, "v60,v62,v65,v66" }, 2476 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_128B, "v60,v62,v65,v66" }, 2477 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc, "v60,v62,v65,v66" }, 2478 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc_128B, "v60,v62,v65,v66" }, 2479 { Hexagon::BI__builtin_HEXAGON_V6_vmux, "v60,v62,v65,v66" }, 2480 { Hexagon::BI__builtin_HEXAGON_V6_vmux_128B, "v60,v62,v65,v66" }, 2481 { Hexagon::BI__builtin_HEXAGON_V6_vnavgb, "v65,v66" }, 2482 { Hexagon::BI__builtin_HEXAGON_V6_vnavgb_128B, "v65,v66" }, 2483 { Hexagon::BI__builtin_HEXAGON_V6_vnavgh, "v60,v62,v65,v66" }, 2484 { Hexagon::BI__builtin_HEXAGON_V6_vnavgh_128B, "v60,v62,v65,v66" }, 2485 { Hexagon::BI__builtin_HEXAGON_V6_vnavgub, "v60,v62,v65,v66" }, 2486 { Hexagon::BI__builtin_HEXAGON_V6_vnavgub_128B, "v60,v62,v65,v66" }, 2487 { Hexagon::BI__builtin_HEXAGON_V6_vnavgw, "v60,v62,v65,v66" }, 2488 { Hexagon::BI__builtin_HEXAGON_V6_vnavgw_128B, "v60,v62,v65,v66" }, 2489 { Hexagon::BI__builtin_HEXAGON_V6_vnormamth, "v60,v62,v65,v66" }, 2490 { Hexagon::BI__builtin_HEXAGON_V6_vnormamth_128B, "v60,v62,v65,v66" }, 2491 { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw, "v60,v62,v65,v66" }, 2492 { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw_128B, "v60,v62,v65,v66" }, 2493 { Hexagon::BI__builtin_HEXAGON_V6_vnot, "v60,v62,v65,v66" }, 2494 { Hexagon::BI__builtin_HEXAGON_V6_vnot_128B, "v60,v62,v65,v66" }, 2495 { Hexagon::BI__builtin_HEXAGON_V6_vor, "v60,v62,v65,v66" }, 2496 { Hexagon::BI__builtin_HEXAGON_V6_vor_128B, "v60,v62,v65,v66" }, 2497 { Hexagon::BI__builtin_HEXAGON_V6_vpackeb, "v60,v62,v65,v66" }, 2498 { Hexagon::BI__builtin_HEXAGON_V6_vpackeb_128B, "v60,v62,v65,v66" }, 2499 { Hexagon::BI__builtin_HEXAGON_V6_vpackeh, "v60,v62,v65,v66" }, 2500 { Hexagon::BI__builtin_HEXAGON_V6_vpackeh_128B, "v60,v62,v65,v66" }, 2501 { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat, "v60,v62,v65,v66" }, 2502 { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat_128B, "v60,v62,v65,v66" }, 2503 { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat, "v60,v62,v65,v66" }, 2504 { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat_128B, "v60,v62,v65,v66" }, 2505 { Hexagon::BI__builtin_HEXAGON_V6_vpackob, "v60,v62,v65,v66" }, 2506 { Hexagon::BI__builtin_HEXAGON_V6_vpackob_128B, "v60,v62,v65,v66" }, 2507 { Hexagon::BI__builtin_HEXAGON_V6_vpackoh, "v60,v62,v65,v66" }, 2508 { Hexagon::BI__builtin_HEXAGON_V6_vpackoh_128B, "v60,v62,v65,v66" }, 2509 { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat, "v60,v62,v65,v66" }, 2510 { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat_128B, "v60,v62,v65,v66" }, 2511 { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat, "v60,v62,v65,v66" }, 2512 { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat_128B, "v60,v62,v65,v66" }, 2513 { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth, "v60,v62,v65,v66" }, 2514 { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth_128B, "v60,v62,v65,v66" }, 2515 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb, "v65,v66" }, 2516 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb_128B, "v65,v66" }, 2517 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh, "v65,v66" }, 2518 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh_128B, "v65,v66" }, 2519 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw, "v65,v66" }, 2520 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw_128B, "v65,v66" }, 2521 { Hexagon::BI__builtin_HEXAGON_V6_vrdelta, "v60,v62,v65,v66" }, 2522 { Hexagon::BI__builtin_HEXAGON_V6_vrdelta_128B, "v60,v62,v65,v66" }, 2523 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt, "v65" }, 2524 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_128B, "v65" }, 2525 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc, "v65" }, 2526 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B, "v65" }, 2527 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus, "v60,v62,v65,v66" }, 2528 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_128B, "v60,v62,v65,v66" }, 2529 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc, "v60,v62,v65,v66" }, 2530 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc_128B, "v60,v62,v65,v66" }, 2531 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, "v60,v62,v65,v66" }, 2532 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, "v60,v62,v65,v66" }, 2533 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, "v60,v62,v65,v66" }, 2534 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, "v60,v62,v65,v66" }, 2535 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv, "v60,v62,v65,v66" }, 2536 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_128B, "v60,v62,v65,v66" }, 2537 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc, "v60,v62,v65,v66" }, 2538 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc_128B, "v60,v62,v65,v66" }, 2539 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv, "v60,v62,v65,v66" }, 2540 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_128B, "v60,v62,v65,v66" }, 2541 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc, "v60,v62,v65,v66" }, 2542 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc_128B, "v60,v62,v65,v66" }, 2543 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub, "v60,v62,v65,v66" }, 2544 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_128B, "v60,v62,v65,v66" }, 2545 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc, "v60,v62,v65,v66" }, 2546 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc_128B, "v60,v62,v65,v66" }, 2547 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, "v60,v62,v65,v66" }, 2548 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, "v60,v62,v65,v66" }, 2549 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, "v60,v62,v65,v66" }, 2550 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, "v60,v62,v65,v66" }, 2551 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt, "v65" }, 2552 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_128B, "v65" }, 2553 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc, "v65" }, 2554 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B, "v65" }, 2555 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv, "v60,v62,v65,v66" }, 2556 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_128B, "v60,v62,v65,v66" }, 2557 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc, "v60,v62,v65,v66" }, 2558 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc_128B, "v60,v62,v65,v66" }, 2559 { Hexagon::BI__builtin_HEXAGON_V6_vror, "v60,v62,v65,v66" }, 2560 { Hexagon::BI__builtin_HEXAGON_V6_vror_128B, "v60,v62,v65,v66" }, 2561 { Hexagon::BI__builtin_HEXAGON_V6_vrotr, "v66" }, 2562 { Hexagon::BI__builtin_HEXAGON_V6_vrotr_128B, "v66" }, 2563 { Hexagon::BI__builtin_HEXAGON_V6_vroundhb, "v60,v62,v65,v66" }, 2564 { Hexagon::BI__builtin_HEXAGON_V6_vroundhb_128B, "v60,v62,v65,v66" }, 2565 { Hexagon::BI__builtin_HEXAGON_V6_vroundhub, "v60,v62,v65,v66" }, 2566 { Hexagon::BI__builtin_HEXAGON_V6_vroundhub_128B, "v60,v62,v65,v66" }, 2567 { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub, "v62,v65,v66" }, 2568 { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub_128B, "v62,v65,v66" }, 2569 { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh, "v62,v65,v66" }, 2570 { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh_128B, "v62,v65,v66" }, 2571 { Hexagon::BI__builtin_HEXAGON_V6_vroundwh, "v60,v62,v65,v66" }, 2572 { Hexagon::BI__builtin_HEXAGON_V6_vroundwh_128B, "v60,v62,v65,v66" }, 2573 { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh, "v60,v62,v65,v66" }, 2574 { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh_128B, "v60,v62,v65,v66" }, 2575 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, "v60,v62,v65,v66" }, 2576 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, "v60,v62,v65,v66" }, 2577 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, "v60,v62,v65,v66" }, 2578 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, "v60,v62,v65,v66" }, 2579 { Hexagon::BI__builtin_HEXAGON_V6_vsatdw, "v66" }, 2580 { Hexagon::BI__builtin_HEXAGON_V6_vsatdw_128B, "v66" }, 2581 { Hexagon::BI__builtin_HEXAGON_V6_vsathub, "v60,v62,v65,v66" }, 2582 { Hexagon::BI__builtin_HEXAGON_V6_vsathub_128B, "v60,v62,v65,v66" }, 2583 { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh, "v62,v65,v66" }, 2584 { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh_128B, "v62,v65,v66" }, 2585 { Hexagon::BI__builtin_HEXAGON_V6_vsatwh, "v60,v62,v65,v66" }, 2586 { Hexagon::BI__builtin_HEXAGON_V6_vsatwh_128B, "v60,v62,v65,v66" }, 2587 { Hexagon::BI__builtin_HEXAGON_V6_vsb, "v60,v62,v65,v66" }, 2588 { Hexagon::BI__builtin_HEXAGON_V6_vsb_128B, "v60,v62,v65,v66" }, 2589 { Hexagon::BI__builtin_HEXAGON_V6_vsh, "v60,v62,v65,v66" }, 2590 { Hexagon::BI__builtin_HEXAGON_V6_vsh_128B, "v60,v62,v65,v66" }, 2591 { Hexagon::BI__builtin_HEXAGON_V6_vshufeh, "v60,v62,v65,v66" }, 2592 { Hexagon::BI__builtin_HEXAGON_V6_vshufeh_128B, "v60,v62,v65,v66" }, 2593 { Hexagon::BI__builtin_HEXAGON_V6_vshuffb, "v60,v62,v65,v66" }, 2594 { Hexagon::BI__builtin_HEXAGON_V6_vshuffb_128B, "v60,v62,v65,v66" }, 2595 { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb, "v60,v62,v65,v66" }, 2596 { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb_128B, "v60,v62,v65,v66" }, 2597 { Hexagon::BI__builtin_HEXAGON_V6_vshuffh, "v60,v62,v65,v66" }, 2598 { Hexagon::BI__builtin_HEXAGON_V6_vshuffh_128B, "v60,v62,v65,v66" }, 2599 { Hexagon::BI__builtin_HEXAGON_V6_vshuffob, "v60,v62,v65,v66" }, 2600 { Hexagon::BI__builtin_HEXAGON_V6_vshuffob_128B, "v60,v62,v65,v66" }, 2601 { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd, "v60,v62,v65,v66" }, 2602 { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd_128B, "v60,v62,v65,v66" }, 2603 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb, "v60,v62,v65,v66" }, 2604 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb_128B, "v60,v62,v65,v66" }, 2605 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh, "v60,v62,v65,v66" }, 2606 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh_128B, "v60,v62,v65,v66" }, 2607 { Hexagon::BI__builtin_HEXAGON_V6_vshufoh, "v60,v62,v65,v66" }, 2608 { Hexagon::BI__builtin_HEXAGON_V6_vshufoh_128B, "v60,v62,v65,v66" }, 2609 { Hexagon::BI__builtin_HEXAGON_V6_vsubb, "v60,v62,v65,v66" }, 2610 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_128B, "v60,v62,v65,v66" }, 2611 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv, "v60,v62,v65,v66" }, 2612 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv_128B, "v60,v62,v65,v66" }, 2613 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat, "v62,v65,v66" }, 2614 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_128B, "v62,v65,v66" }, 2615 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv, "v62,v65,v66" }, 2616 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv_128B, "v62,v65,v66" }, 2617 { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry, "v62,v65,v66" }, 2618 { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B, "v62,v65,v66" }, 2619 { Hexagon::BI__builtin_HEXAGON_V6_vsubh, "v60,v62,v65,v66" }, 2620 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_128B, "v60,v62,v65,v66" }, 2621 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv, "v60,v62,v65,v66" }, 2622 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv_128B, "v60,v62,v65,v66" }, 2623 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat, "v60,v62,v65,v66" }, 2624 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_128B, "v60,v62,v65,v66" }, 2625 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv, "v60,v62,v65,v66" }, 2626 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv_128B, "v60,v62,v65,v66" }, 2627 { Hexagon::BI__builtin_HEXAGON_V6_vsubhw, "v60,v62,v65,v66" }, 2628 { Hexagon::BI__builtin_HEXAGON_V6_vsubhw_128B, "v60,v62,v65,v66" }, 2629 { Hexagon::BI__builtin_HEXAGON_V6_vsububh, "v60,v62,v65,v66" }, 2630 { Hexagon::BI__builtin_HEXAGON_V6_vsububh_128B, "v60,v62,v65,v66" }, 2631 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat, "v60,v62,v65,v66" }, 2632 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_128B, "v60,v62,v65,v66" }, 2633 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv, "v60,v62,v65,v66" }, 2634 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv_128B, "v60,v62,v65,v66" }, 2635 { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat, "v62,v65,v66" }, 2636 { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat_128B, "v62,v65,v66" }, 2637 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat, "v60,v62,v65,v66" }, 2638 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_128B, "v60,v62,v65,v66" }, 2639 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv, "v60,v62,v65,v66" }, 2640 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv_128B, "v60,v62,v65,v66" }, 2641 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw, "v60,v62,v65,v66" }, 2642 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw_128B, "v60,v62,v65,v66" }, 2643 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat, "v62,v65,v66" }, 2644 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_128B, "v62,v65,v66" }, 2645 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv, "v62,v65,v66" }, 2646 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv_128B, "v62,v65,v66" }, 2647 { Hexagon::BI__builtin_HEXAGON_V6_vsubw, "v60,v62,v65,v66" }, 2648 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_128B, "v60,v62,v65,v66" }, 2649 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv, "v60,v62,v65,v66" }, 2650 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv_128B, "v60,v62,v65,v66" }, 2651 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat, "v60,v62,v65,v66" }, 2652 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_128B, "v60,v62,v65,v66" }, 2653 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv, "v60,v62,v65,v66" }, 2654 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv_128B, "v60,v62,v65,v66" }, 2655 { Hexagon::BI__builtin_HEXAGON_V6_vswap, "v60,v62,v65,v66" }, 2656 { Hexagon::BI__builtin_HEXAGON_V6_vswap_128B, "v60,v62,v65,v66" }, 2657 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb, "v60,v62,v65,v66" }, 2658 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_128B, "v60,v62,v65,v66" }, 2659 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc, "v60,v62,v65,v66" }, 2660 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc_128B, "v60,v62,v65,v66" }, 2661 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus, "v60,v62,v65,v66" }, 2662 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_128B, "v60,v62,v65,v66" }, 2663 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc, "v60,v62,v65,v66" }, 2664 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc_128B, "v60,v62,v65,v66" }, 2665 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb, "v60,v62,v65,v66" }, 2666 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_128B, "v60,v62,v65,v66" }, 2667 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc, "v60,v62,v65,v66" }, 2668 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc_128B, "v60,v62,v65,v66" }, 2669 { Hexagon::BI__builtin_HEXAGON_V6_vunpackb, "v60,v62,v65,v66" }, 2670 { Hexagon::BI__builtin_HEXAGON_V6_vunpackb_128B, "v60,v62,v65,v66" }, 2671 { Hexagon::BI__builtin_HEXAGON_V6_vunpackh, "v60,v62,v65,v66" }, 2672 { Hexagon::BI__builtin_HEXAGON_V6_vunpackh_128B, "v60,v62,v65,v66" }, 2673 { Hexagon::BI__builtin_HEXAGON_V6_vunpackob, "v60,v62,v65,v66" }, 2674 { Hexagon::BI__builtin_HEXAGON_V6_vunpackob_128B, "v60,v62,v65,v66" }, 2675 { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh, "v60,v62,v65,v66" }, 2676 { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh_128B, "v60,v62,v65,v66" }, 2677 { Hexagon::BI__builtin_HEXAGON_V6_vunpackub, "v60,v62,v65,v66" }, 2678 { Hexagon::BI__builtin_HEXAGON_V6_vunpackub_128B, "v60,v62,v65,v66" }, 2679 { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh, "v60,v62,v65,v66" }, 2680 { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh_128B, "v60,v62,v65,v66" }, 2681 { Hexagon::BI__builtin_HEXAGON_V6_vxor, "v60,v62,v65,v66" }, 2682 { Hexagon::BI__builtin_HEXAGON_V6_vxor_128B, "v60,v62,v65,v66" }, 2683 { Hexagon::BI__builtin_HEXAGON_V6_vzb, "v60,v62,v65,v66" }, 2684 { Hexagon::BI__builtin_HEXAGON_V6_vzb_128B, "v60,v62,v65,v66" }, 2685 { Hexagon::BI__builtin_HEXAGON_V6_vzh, "v60,v62,v65,v66" }, 2686 { Hexagon::BI__builtin_HEXAGON_V6_vzh_128B, "v60,v62,v65,v66" }, 2687 }; 2688 2689 // Sort the tables on first execution so we can binary search them. 2690 auto SortCmp = [](const BuiltinAndString &LHS, const BuiltinAndString &RHS) { 2691 return LHS.BuiltinID < RHS.BuiltinID; 2692 }; 2693 static const bool SortOnce = 2694 (llvm::sort(ValidCPU, SortCmp), 2695 llvm::sort(ValidHVX, SortCmp), true); 2696 (void)SortOnce; 2697 auto LowerBoundCmp = [](const BuiltinAndString &BI, unsigned BuiltinID) { 2698 return BI.BuiltinID < BuiltinID; 2699 }; 2700 2701 const TargetInfo &TI = Context.getTargetInfo(); 2702 2703 const BuiltinAndString *FC = 2704 llvm::lower_bound(ValidCPU, BuiltinID, LowerBoundCmp); 2705 if (FC != std::end(ValidCPU) && FC->BuiltinID == BuiltinID) { 2706 const TargetOptions &Opts = TI.getTargetOpts(); 2707 StringRef CPU = Opts.CPU; 2708 if (!CPU.empty()) { 2709 assert(CPU.startswith("hexagon") && "Unexpected CPU name"); 2710 CPU.consume_front("hexagon"); 2711 SmallVector<StringRef, 3> CPUs; 2712 StringRef(FC->Str).split(CPUs, ','); 2713 if (llvm::none_of(CPUs, [CPU](StringRef S) { return S == CPU; })) 2714 return Diag(TheCall->getBeginLoc(), 2715 diag::err_hexagon_builtin_unsupported_cpu); 2716 } 2717 } 2718 2719 const BuiltinAndString *FH = 2720 llvm::lower_bound(ValidHVX, BuiltinID, LowerBoundCmp); 2721 if (FH != std::end(ValidHVX) && FH->BuiltinID == BuiltinID) { 2722 if (!TI.hasFeature("hvx")) 2723 return Diag(TheCall->getBeginLoc(), 2724 diag::err_hexagon_builtin_requires_hvx); 2725 2726 SmallVector<StringRef, 3> HVXs; 2727 StringRef(FH->Str).split(HVXs, ','); 2728 bool IsValid = llvm::any_of(HVXs, 2729 [&TI] (StringRef V) { 2730 std::string F = "hvx" + V.str(); 2731 return TI.hasFeature(F); 2732 }); 2733 if (!IsValid) 2734 return Diag(TheCall->getBeginLoc(), 2735 diag::err_hexagon_builtin_unsupported_hvx); 2736 } 2737 2738 return false; 2739 } 2740 2741 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2742 struct ArgInfo { 2743 uint8_t OpNum; 2744 bool IsSigned; 2745 uint8_t BitWidth; 2746 uint8_t Align; 2747 }; 2748 struct BuiltinInfo { 2749 unsigned BuiltinID; 2750 ArgInfo Infos[2]; 2751 }; 2752 2753 static BuiltinInfo Infos[] = { 2754 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 2755 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 2756 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 2757 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 0 }} }, 2758 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 2759 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 2760 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 2761 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 2762 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 2763 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 2764 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 2765 2766 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 2767 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 2768 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 2769 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 2770 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 2771 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 2772 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 2773 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 2774 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 2775 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 2776 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 2777 2778 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 2779 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 2780 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 2781 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 2782 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 2783 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 2784 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 2785 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 2786 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 2787 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 2788 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 2789 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 2790 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 2791 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 2792 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 2793 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 2794 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 2795 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 2796 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 2797 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 2798 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 2799 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 2800 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 2801 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 2802 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 2803 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 2804 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 2805 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 2806 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 2807 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 2808 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 2809 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 2810 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 2811 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 2812 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 2813 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 2814 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 2815 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 2816 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 2817 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 2818 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 2819 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 2820 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 2821 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 2822 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 2823 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 2824 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 2825 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 2826 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 2827 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 2828 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 2829 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 2830 {{ 1, false, 6, 0 }} }, 2831 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 2832 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 2833 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 2834 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 2835 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 2836 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 2837 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 2838 {{ 1, false, 5, 0 }} }, 2839 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 2840 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 2841 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 2842 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 2843 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 2844 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 2845 { 2, false, 5, 0 }} }, 2846 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 2847 { 2, false, 6, 0 }} }, 2848 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 2849 { 3, false, 5, 0 }} }, 2850 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 2851 { 3, false, 6, 0 }} }, 2852 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 2853 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 2854 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 2855 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 2856 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 2857 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 2858 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 2859 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 2860 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 2861 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 2862 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 2863 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 2864 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 2865 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 2866 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 2867 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 2868 {{ 2, false, 4, 0 }, 2869 { 3, false, 5, 0 }} }, 2870 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 2871 {{ 2, false, 4, 0 }, 2872 { 3, false, 5, 0 }} }, 2873 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 2874 {{ 2, false, 4, 0 }, 2875 { 3, false, 5, 0 }} }, 2876 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 2877 {{ 2, false, 4, 0 }, 2878 { 3, false, 5, 0 }} }, 2879 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 2880 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 2881 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 2882 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 2883 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 2884 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 2885 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 2886 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 2887 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 2888 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 2889 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 2890 { 2, false, 5, 0 }} }, 2891 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 2892 { 2, false, 6, 0 }} }, 2893 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 2894 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 2895 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 2896 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 2897 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 2898 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 2899 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 2900 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 2901 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 2902 {{ 1, false, 4, 0 }} }, 2903 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 2904 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 2905 {{ 1, false, 4, 0 }} }, 2906 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 2907 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 2908 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 2909 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 2910 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 2911 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 2912 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 2913 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 2914 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 2915 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 2916 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 2917 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 2918 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 2919 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 2920 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 2921 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 2922 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 2923 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 2924 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 2925 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 2926 {{ 3, false, 1, 0 }} }, 2927 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 2928 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 2929 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 2930 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 2931 {{ 3, false, 1, 0 }} }, 2932 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 2933 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 2934 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 2935 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 2936 {{ 3, false, 1, 0 }} }, 2937 }; 2938 2939 // Use a dynamically initialized static to sort the table exactly once on 2940 // first run. 2941 static const bool SortOnce = 2942 (llvm::sort(Infos, 2943 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 2944 return LHS.BuiltinID < RHS.BuiltinID; 2945 }), 2946 true); 2947 (void)SortOnce; 2948 2949 const BuiltinInfo *F = llvm::partition_point( 2950 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 2951 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 2952 return false; 2953 2954 bool Error = false; 2955 2956 for (const ArgInfo &A : F->Infos) { 2957 // Ignore empty ArgInfo elements. 2958 if (A.BitWidth == 0) 2959 continue; 2960 2961 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 2962 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 2963 if (!A.Align) { 2964 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 2965 } else { 2966 unsigned M = 1 << A.Align; 2967 Min *= M; 2968 Max *= M; 2969 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) | 2970 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 2971 } 2972 } 2973 return Error; 2974 } 2975 2976 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 2977 CallExpr *TheCall) { 2978 return CheckHexagonBuiltinCpu(BuiltinID, TheCall) || 2979 CheckHexagonBuiltinArgument(BuiltinID, TheCall); 2980 } 2981 2982 2983 // CheckMipsBuiltinFunctionCall - Checks the constant value passed to the 2984 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 2985 // ordering for DSP is unspecified. MSA is ordered by the data format used 2986 // by the underlying instruction i.e., df/m, df/n and then by size. 2987 // 2988 // FIXME: The size tests here should instead be tablegen'd along with the 2989 // definitions from include/clang/Basic/BuiltinsMips.def. 2990 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 2991 // be too. 2992 bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2993 unsigned i = 0, l = 0, u = 0, m = 0; 2994 switch (BuiltinID) { 2995 default: return false; 2996 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 2997 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 2998 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 2999 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3000 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3001 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3002 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3003 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3004 // df/m field. 3005 // These intrinsics take an unsigned 3 bit immediate. 3006 case Mips::BI__builtin_msa_bclri_b: 3007 case Mips::BI__builtin_msa_bnegi_b: 3008 case Mips::BI__builtin_msa_bseti_b: 3009 case Mips::BI__builtin_msa_sat_s_b: 3010 case Mips::BI__builtin_msa_sat_u_b: 3011 case Mips::BI__builtin_msa_slli_b: 3012 case Mips::BI__builtin_msa_srai_b: 3013 case Mips::BI__builtin_msa_srari_b: 3014 case Mips::BI__builtin_msa_srli_b: 3015 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3016 case Mips::BI__builtin_msa_binsli_b: 3017 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3018 // These intrinsics take an unsigned 4 bit immediate. 3019 case Mips::BI__builtin_msa_bclri_h: 3020 case Mips::BI__builtin_msa_bnegi_h: 3021 case Mips::BI__builtin_msa_bseti_h: 3022 case Mips::BI__builtin_msa_sat_s_h: 3023 case Mips::BI__builtin_msa_sat_u_h: 3024 case Mips::BI__builtin_msa_slli_h: 3025 case Mips::BI__builtin_msa_srai_h: 3026 case Mips::BI__builtin_msa_srari_h: 3027 case Mips::BI__builtin_msa_srli_h: 3028 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3029 case Mips::BI__builtin_msa_binsli_h: 3030 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3031 // These intrinsics take an unsigned 5 bit immediate. 3032 // The first block of intrinsics actually have an unsigned 5 bit field, 3033 // not a df/n field. 3034 case Mips::BI__builtin_msa_cfcmsa: 3035 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3036 case Mips::BI__builtin_msa_clei_u_b: 3037 case Mips::BI__builtin_msa_clei_u_h: 3038 case Mips::BI__builtin_msa_clei_u_w: 3039 case Mips::BI__builtin_msa_clei_u_d: 3040 case Mips::BI__builtin_msa_clti_u_b: 3041 case Mips::BI__builtin_msa_clti_u_h: 3042 case Mips::BI__builtin_msa_clti_u_w: 3043 case Mips::BI__builtin_msa_clti_u_d: 3044 case Mips::BI__builtin_msa_maxi_u_b: 3045 case Mips::BI__builtin_msa_maxi_u_h: 3046 case Mips::BI__builtin_msa_maxi_u_w: 3047 case Mips::BI__builtin_msa_maxi_u_d: 3048 case Mips::BI__builtin_msa_mini_u_b: 3049 case Mips::BI__builtin_msa_mini_u_h: 3050 case Mips::BI__builtin_msa_mini_u_w: 3051 case Mips::BI__builtin_msa_mini_u_d: 3052 case Mips::BI__builtin_msa_addvi_b: 3053 case Mips::BI__builtin_msa_addvi_h: 3054 case Mips::BI__builtin_msa_addvi_w: 3055 case Mips::BI__builtin_msa_addvi_d: 3056 case Mips::BI__builtin_msa_bclri_w: 3057 case Mips::BI__builtin_msa_bnegi_w: 3058 case Mips::BI__builtin_msa_bseti_w: 3059 case Mips::BI__builtin_msa_sat_s_w: 3060 case Mips::BI__builtin_msa_sat_u_w: 3061 case Mips::BI__builtin_msa_slli_w: 3062 case Mips::BI__builtin_msa_srai_w: 3063 case Mips::BI__builtin_msa_srari_w: 3064 case Mips::BI__builtin_msa_srli_w: 3065 case Mips::BI__builtin_msa_srlri_w: 3066 case Mips::BI__builtin_msa_subvi_b: 3067 case Mips::BI__builtin_msa_subvi_h: 3068 case Mips::BI__builtin_msa_subvi_w: 3069 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3070 case Mips::BI__builtin_msa_binsli_w: 3071 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3072 // These intrinsics take an unsigned 6 bit immediate. 3073 case Mips::BI__builtin_msa_bclri_d: 3074 case Mips::BI__builtin_msa_bnegi_d: 3075 case Mips::BI__builtin_msa_bseti_d: 3076 case Mips::BI__builtin_msa_sat_s_d: 3077 case Mips::BI__builtin_msa_sat_u_d: 3078 case Mips::BI__builtin_msa_slli_d: 3079 case Mips::BI__builtin_msa_srai_d: 3080 case Mips::BI__builtin_msa_srari_d: 3081 case Mips::BI__builtin_msa_srli_d: 3082 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3083 case Mips::BI__builtin_msa_binsli_d: 3084 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3085 // These intrinsics take a signed 5 bit immediate. 3086 case Mips::BI__builtin_msa_ceqi_b: 3087 case Mips::BI__builtin_msa_ceqi_h: 3088 case Mips::BI__builtin_msa_ceqi_w: 3089 case Mips::BI__builtin_msa_ceqi_d: 3090 case Mips::BI__builtin_msa_clti_s_b: 3091 case Mips::BI__builtin_msa_clti_s_h: 3092 case Mips::BI__builtin_msa_clti_s_w: 3093 case Mips::BI__builtin_msa_clti_s_d: 3094 case Mips::BI__builtin_msa_clei_s_b: 3095 case Mips::BI__builtin_msa_clei_s_h: 3096 case Mips::BI__builtin_msa_clei_s_w: 3097 case Mips::BI__builtin_msa_clei_s_d: 3098 case Mips::BI__builtin_msa_maxi_s_b: 3099 case Mips::BI__builtin_msa_maxi_s_h: 3100 case Mips::BI__builtin_msa_maxi_s_w: 3101 case Mips::BI__builtin_msa_maxi_s_d: 3102 case Mips::BI__builtin_msa_mini_s_b: 3103 case Mips::BI__builtin_msa_mini_s_h: 3104 case Mips::BI__builtin_msa_mini_s_w: 3105 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3106 // These intrinsics take an unsigned 8 bit immediate. 3107 case Mips::BI__builtin_msa_andi_b: 3108 case Mips::BI__builtin_msa_nori_b: 3109 case Mips::BI__builtin_msa_ori_b: 3110 case Mips::BI__builtin_msa_shf_b: 3111 case Mips::BI__builtin_msa_shf_h: 3112 case Mips::BI__builtin_msa_shf_w: 3113 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3114 case Mips::BI__builtin_msa_bseli_b: 3115 case Mips::BI__builtin_msa_bmnzi_b: 3116 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3117 // df/n format 3118 // These intrinsics take an unsigned 4 bit immediate. 3119 case Mips::BI__builtin_msa_copy_s_b: 3120 case Mips::BI__builtin_msa_copy_u_b: 3121 case Mips::BI__builtin_msa_insve_b: 3122 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3123 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3124 // These intrinsics take an unsigned 3 bit immediate. 3125 case Mips::BI__builtin_msa_copy_s_h: 3126 case Mips::BI__builtin_msa_copy_u_h: 3127 case Mips::BI__builtin_msa_insve_h: 3128 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3129 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3130 // These intrinsics take an unsigned 2 bit immediate. 3131 case Mips::BI__builtin_msa_copy_s_w: 3132 case Mips::BI__builtin_msa_copy_u_w: 3133 case Mips::BI__builtin_msa_insve_w: 3134 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3135 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3136 // These intrinsics take an unsigned 1 bit immediate. 3137 case Mips::BI__builtin_msa_copy_s_d: 3138 case Mips::BI__builtin_msa_copy_u_d: 3139 case Mips::BI__builtin_msa_insve_d: 3140 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3141 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3142 // Memory offsets and immediate loads. 3143 // These intrinsics take a signed 10 bit immediate. 3144 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3145 case Mips::BI__builtin_msa_ldi_h: 3146 case Mips::BI__builtin_msa_ldi_w: 3147 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3148 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3149 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3150 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3151 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3152 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3153 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3154 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3155 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3156 } 3157 3158 if (!m) 3159 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3160 3161 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3162 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3163 } 3164 3165 bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3166 unsigned i = 0, l = 0, u = 0; 3167 bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde || 3168 BuiltinID == PPC::BI__builtin_divdeu || 3169 BuiltinID == PPC::BI__builtin_bpermd; 3170 bool IsTarget64Bit = Context.getTargetInfo() 3171 .getTypeWidth(Context 3172 .getTargetInfo() 3173 .getIntPtrType()) == 64; 3174 bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe || 3175 BuiltinID == PPC::BI__builtin_divweu || 3176 BuiltinID == PPC::BI__builtin_divde || 3177 BuiltinID == PPC::BI__builtin_divdeu; 3178 3179 if (Is64BitBltin && !IsTarget64Bit) 3180 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3181 << TheCall->getSourceRange(); 3182 3183 if ((IsBltinExtDiv && !Context.getTargetInfo().hasFeature("extdiv")) || 3184 (BuiltinID == PPC::BI__builtin_bpermd && 3185 !Context.getTargetInfo().hasFeature("bpermd"))) 3186 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3187 << TheCall->getSourceRange(); 3188 3189 auto SemaVSXCheck = [&](CallExpr *TheCall) -> bool { 3190 if (!Context.getTargetInfo().hasFeature("vsx")) 3191 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3192 << TheCall->getSourceRange(); 3193 return false; 3194 }; 3195 3196 switch (BuiltinID) { 3197 default: return false; 3198 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3199 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3200 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3201 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3202 case PPC::BI__builtin_tbegin: 3203 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; 3204 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; 3205 case PPC::BI__builtin_tabortwc: 3206 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; 3207 case PPC::BI__builtin_tabortwci: 3208 case PPC::BI__builtin_tabortdci: 3209 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3210 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 3211 case PPC::BI__builtin_vsx_xxpermdi: 3212 case PPC::BI__builtin_vsx_xxsldwi: 3213 return SemaBuiltinVSX(TheCall); 3214 case PPC::BI__builtin_unpack_vector_int128: 3215 return SemaVSXCheck(TheCall) || 3216 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3217 case PPC::BI__builtin_pack_vector_int128: 3218 return SemaVSXCheck(TheCall); 3219 } 3220 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3221 } 3222 3223 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 3224 CallExpr *TheCall) { 3225 if (BuiltinID == SystemZ::BI__builtin_tabort) { 3226 Expr *Arg = TheCall->getArg(0); 3227 llvm::APSInt AbortCode(32); 3228 if (Arg->isIntegerConstantExpr(AbortCode, Context) && 3229 AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256) 3230 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 3231 << Arg->getSourceRange(); 3232 } 3233 3234 // For intrinsics which take an immediate value as part of the instruction, 3235 // range check them here. 3236 unsigned i = 0, l = 0, u = 0; 3237 switch (BuiltinID) { 3238 default: return false; 3239 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 3240 case SystemZ::BI__builtin_s390_verimb: 3241 case SystemZ::BI__builtin_s390_verimh: 3242 case SystemZ::BI__builtin_s390_verimf: 3243 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 3244 case SystemZ::BI__builtin_s390_vfaeb: 3245 case SystemZ::BI__builtin_s390_vfaeh: 3246 case SystemZ::BI__builtin_s390_vfaef: 3247 case SystemZ::BI__builtin_s390_vfaebs: 3248 case SystemZ::BI__builtin_s390_vfaehs: 3249 case SystemZ::BI__builtin_s390_vfaefs: 3250 case SystemZ::BI__builtin_s390_vfaezb: 3251 case SystemZ::BI__builtin_s390_vfaezh: 3252 case SystemZ::BI__builtin_s390_vfaezf: 3253 case SystemZ::BI__builtin_s390_vfaezbs: 3254 case SystemZ::BI__builtin_s390_vfaezhs: 3255 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 3256 case SystemZ::BI__builtin_s390_vfisb: 3257 case SystemZ::BI__builtin_s390_vfidb: 3258 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 3259 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3260 case SystemZ::BI__builtin_s390_vftcisb: 3261 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 3262 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 3263 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 3264 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 3265 case SystemZ::BI__builtin_s390_vstrcb: 3266 case SystemZ::BI__builtin_s390_vstrch: 3267 case SystemZ::BI__builtin_s390_vstrcf: 3268 case SystemZ::BI__builtin_s390_vstrczb: 3269 case SystemZ::BI__builtin_s390_vstrczh: 3270 case SystemZ::BI__builtin_s390_vstrczf: 3271 case SystemZ::BI__builtin_s390_vstrcbs: 3272 case SystemZ::BI__builtin_s390_vstrchs: 3273 case SystemZ::BI__builtin_s390_vstrcfs: 3274 case SystemZ::BI__builtin_s390_vstrczbs: 3275 case SystemZ::BI__builtin_s390_vstrczhs: 3276 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 3277 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 3278 case SystemZ::BI__builtin_s390_vfminsb: 3279 case SystemZ::BI__builtin_s390_vfmaxsb: 3280 case SystemZ::BI__builtin_s390_vfmindb: 3281 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 3282 } 3283 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3284 } 3285 3286 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 3287 /// This checks that the target supports __builtin_cpu_supports and 3288 /// that the string argument is constant and valid. 3289 static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) { 3290 Expr *Arg = TheCall->getArg(0); 3291 3292 // Check if the argument is a string literal. 3293 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3294 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3295 << Arg->getSourceRange(); 3296 3297 // Check the contents of the string. 3298 StringRef Feature = 3299 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3300 if (!S.Context.getTargetInfo().validateCpuSupports(Feature)) 3301 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 3302 << Arg->getSourceRange(); 3303 return false; 3304 } 3305 3306 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 3307 /// This checks that the target supports __builtin_cpu_is and 3308 /// that the string argument is constant and valid. 3309 static bool SemaBuiltinCpuIs(Sema &S, CallExpr *TheCall) { 3310 Expr *Arg = TheCall->getArg(0); 3311 3312 // Check if the argument is a string literal. 3313 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3314 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3315 << Arg->getSourceRange(); 3316 3317 // Check the contents of the string. 3318 StringRef Feature = 3319 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3320 if (!S.Context.getTargetInfo().validateCpuIs(Feature)) 3321 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 3322 << Arg->getSourceRange(); 3323 return false; 3324 } 3325 3326 // Check if the rounding mode is legal. 3327 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 3328 // Indicates if this instruction has rounding control or just SAE. 3329 bool HasRC = false; 3330 3331 unsigned ArgNum = 0; 3332 switch (BuiltinID) { 3333 default: 3334 return false; 3335 case X86::BI__builtin_ia32_vcvttsd2si32: 3336 case X86::BI__builtin_ia32_vcvttsd2si64: 3337 case X86::BI__builtin_ia32_vcvttsd2usi32: 3338 case X86::BI__builtin_ia32_vcvttsd2usi64: 3339 case X86::BI__builtin_ia32_vcvttss2si32: 3340 case X86::BI__builtin_ia32_vcvttss2si64: 3341 case X86::BI__builtin_ia32_vcvttss2usi32: 3342 case X86::BI__builtin_ia32_vcvttss2usi64: 3343 ArgNum = 1; 3344 break; 3345 case X86::BI__builtin_ia32_maxpd512: 3346 case X86::BI__builtin_ia32_maxps512: 3347 case X86::BI__builtin_ia32_minpd512: 3348 case X86::BI__builtin_ia32_minps512: 3349 ArgNum = 2; 3350 break; 3351 case X86::BI__builtin_ia32_cvtps2pd512_mask: 3352 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 3353 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 3354 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 3355 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 3356 case X86::BI__builtin_ia32_cvttps2dq512_mask: 3357 case X86::BI__builtin_ia32_cvttps2qq512_mask: 3358 case X86::BI__builtin_ia32_cvttps2udq512_mask: 3359 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 3360 case X86::BI__builtin_ia32_exp2pd_mask: 3361 case X86::BI__builtin_ia32_exp2ps_mask: 3362 case X86::BI__builtin_ia32_getexppd512_mask: 3363 case X86::BI__builtin_ia32_getexpps512_mask: 3364 case X86::BI__builtin_ia32_rcp28pd_mask: 3365 case X86::BI__builtin_ia32_rcp28ps_mask: 3366 case X86::BI__builtin_ia32_rsqrt28pd_mask: 3367 case X86::BI__builtin_ia32_rsqrt28ps_mask: 3368 case X86::BI__builtin_ia32_vcomisd: 3369 case X86::BI__builtin_ia32_vcomiss: 3370 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 3371 ArgNum = 3; 3372 break; 3373 case X86::BI__builtin_ia32_cmppd512_mask: 3374 case X86::BI__builtin_ia32_cmpps512_mask: 3375 case X86::BI__builtin_ia32_cmpsd_mask: 3376 case X86::BI__builtin_ia32_cmpss_mask: 3377 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 3378 case X86::BI__builtin_ia32_getexpsd128_round_mask: 3379 case X86::BI__builtin_ia32_getexpss128_round_mask: 3380 case X86::BI__builtin_ia32_getmantpd512_mask: 3381 case X86::BI__builtin_ia32_getmantps512_mask: 3382 case X86::BI__builtin_ia32_maxsd_round_mask: 3383 case X86::BI__builtin_ia32_maxss_round_mask: 3384 case X86::BI__builtin_ia32_minsd_round_mask: 3385 case X86::BI__builtin_ia32_minss_round_mask: 3386 case X86::BI__builtin_ia32_rcp28sd_round_mask: 3387 case X86::BI__builtin_ia32_rcp28ss_round_mask: 3388 case X86::BI__builtin_ia32_reducepd512_mask: 3389 case X86::BI__builtin_ia32_reduceps512_mask: 3390 case X86::BI__builtin_ia32_rndscalepd_mask: 3391 case X86::BI__builtin_ia32_rndscaleps_mask: 3392 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 3393 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 3394 ArgNum = 4; 3395 break; 3396 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3397 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3398 case X86::BI__builtin_ia32_fixupimmps512_mask: 3399 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3400 case X86::BI__builtin_ia32_fixupimmsd_mask: 3401 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3402 case X86::BI__builtin_ia32_fixupimmss_mask: 3403 case X86::BI__builtin_ia32_fixupimmss_maskz: 3404 case X86::BI__builtin_ia32_getmantsd_round_mask: 3405 case X86::BI__builtin_ia32_getmantss_round_mask: 3406 case X86::BI__builtin_ia32_rangepd512_mask: 3407 case X86::BI__builtin_ia32_rangeps512_mask: 3408 case X86::BI__builtin_ia32_rangesd128_round_mask: 3409 case X86::BI__builtin_ia32_rangess128_round_mask: 3410 case X86::BI__builtin_ia32_reducesd_mask: 3411 case X86::BI__builtin_ia32_reducess_mask: 3412 case X86::BI__builtin_ia32_rndscalesd_round_mask: 3413 case X86::BI__builtin_ia32_rndscaless_round_mask: 3414 ArgNum = 5; 3415 break; 3416 case X86::BI__builtin_ia32_vcvtsd2si64: 3417 case X86::BI__builtin_ia32_vcvtsd2si32: 3418 case X86::BI__builtin_ia32_vcvtsd2usi32: 3419 case X86::BI__builtin_ia32_vcvtsd2usi64: 3420 case X86::BI__builtin_ia32_vcvtss2si32: 3421 case X86::BI__builtin_ia32_vcvtss2si64: 3422 case X86::BI__builtin_ia32_vcvtss2usi32: 3423 case X86::BI__builtin_ia32_vcvtss2usi64: 3424 case X86::BI__builtin_ia32_sqrtpd512: 3425 case X86::BI__builtin_ia32_sqrtps512: 3426 ArgNum = 1; 3427 HasRC = true; 3428 break; 3429 case X86::BI__builtin_ia32_addpd512: 3430 case X86::BI__builtin_ia32_addps512: 3431 case X86::BI__builtin_ia32_divpd512: 3432 case X86::BI__builtin_ia32_divps512: 3433 case X86::BI__builtin_ia32_mulpd512: 3434 case X86::BI__builtin_ia32_mulps512: 3435 case X86::BI__builtin_ia32_subpd512: 3436 case X86::BI__builtin_ia32_subps512: 3437 case X86::BI__builtin_ia32_cvtsi2sd64: 3438 case X86::BI__builtin_ia32_cvtsi2ss32: 3439 case X86::BI__builtin_ia32_cvtsi2ss64: 3440 case X86::BI__builtin_ia32_cvtusi2sd64: 3441 case X86::BI__builtin_ia32_cvtusi2ss32: 3442 case X86::BI__builtin_ia32_cvtusi2ss64: 3443 ArgNum = 2; 3444 HasRC = true; 3445 break; 3446 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 3447 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 3448 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 3449 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 3450 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 3451 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 3452 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 3453 case X86::BI__builtin_ia32_cvtps2dq512_mask: 3454 case X86::BI__builtin_ia32_cvtps2qq512_mask: 3455 case X86::BI__builtin_ia32_cvtps2udq512_mask: 3456 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 3457 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 3458 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 3459 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 3460 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 3461 ArgNum = 3; 3462 HasRC = true; 3463 break; 3464 case X86::BI__builtin_ia32_addss_round_mask: 3465 case X86::BI__builtin_ia32_addsd_round_mask: 3466 case X86::BI__builtin_ia32_divss_round_mask: 3467 case X86::BI__builtin_ia32_divsd_round_mask: 3468 case X86::BI__builtin_ia32_mulss_round_mask: 3469 case X86::BI__builtin_ia32_mulsd_round_mask: 3470 case X86::BI__builtin_ia32_subss_round_mask: 3471 case X86::BI__builtin_ia32_subsd_round_mask: 3472 case X86::BI__builtin_ia32_scalefpd512_mask: 3473 case X86::BI__builtin_ia32_scalefps512_mask: 3474 case X86::BI__builtin_ia32_scalefsd_round_mask: 3475 case X86::BI__builtin_ia32_scalefss_round_mask: 3476 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 3477 case X86::BI__builtin_ia32_sqrtsd_round_mask: 3478 case X86::BI__builtin_ia32_sqrtss_round_mask: 3479 case X86::BI__builtin_ia32_vfmaddsd3_mask: 3480 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 3481 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 3482 case X86::BI__builtin_ia32_vfmaddss3_mask: 3483 case X86::BI__builtin_ia32_vfmaddss3_maskz: 3484 case X86::BI__builtin_ia32_vfmaddss3_mask3: 3485 case X86::BI__builtin_ia32_vfmaddpd512_mask: 3486 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 3487 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 3488 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 3489 case X86::BI__builtin_ia32_vfmaddps512_mask: 3490 case X86::BI__builtin_ia32_vfmaddps512_maskz: 3491 case X86::BI__builtin_ia32_vfmaddps512_mask3: 3492 case X86::BI__builtin_ia32_vfmsubps512_mask3: 3493 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 3494 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 3495 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 3496 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 3497 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 3498 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 3499 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 3500 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 3501 ArgNum = 4; 3502 HasRC = true; 3503 break; 3504 } 3505 3506 llvm::APSInt Result; 3507 3508 // We can't check the value of a dependent argument. 3509 Expr *Arg = TheCall->getArg(ArgNum); 3510 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3511 return false; 3512 3513 // Check constant-ness first. 3514 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3515 return true; 3516 3517 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 3518 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 3519 // combined with ROUND_NO_EXC. 3520 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 3521 Result == 8/*ROUND_NO_EXC*/ || 3522 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 3523 return false; 3524 3525 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 3526 << Arg->getSourceRange(); 3527 } 3528 3529 // Check if the gather/scatter scale is legal. 3530 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 3531 CallExpr *TheCall) { 3532 unsigned ArgNum = 0; 3533 switch (BuiltinID) { 3534 default: 3535 return false; 3536 case X86::BI__builtin_ia32_gatherpfdpd: 3537 case X86::BI__builtin_ia32_gatherpfdps: 3538 case X86::BI__builtin_ia32_gatherpfqpd: 3539 case X86::BI__builtin_ia32_gatherpfqps: 3540 case X86::BI__builtin_ia32_scatterpfdpd: 3541 case X86::BI__builtin_ia32_scatterpfdps: 3542 case X86::BI__builtin_ia32_scatterpfqpd: 3543 case X86::BI__builtin_ia32_scatterpfqps: 3544 ArgNum = 3; 3545 break; 3546 case X86::BI__builtin_ia32_gatherd_pd: 3547 case X86::BI__builtin_ia32_gatherd_pd256: 3548 case X86::BI__builtin_ia32_gatherq_pd: 3549 case X86::BI__builtin_ia32_gatherq_pd256: 3550 case X86::BI__builtin_ia32_gatherd_ps: 3551 case X86::BI__builtin_ia32_gatherd_ps256: 3552 case X86::BI__builtin_ia32_gatherq_ps: 3553 case X86::BI__builtin_ia32_gatherq_ps256: 3554 case X86::BI__builtin_ia32_gatherd_q: 3555 case X86::BI__builtin_ia32_gatherd_q256: 3556 case X86::BI__builtin_ia32_gatherq_q: 3557 case X86::BI__builtin_ia32_gatherq_q256: 3558 case X86::BI__builtin_ia32_gatherd_d: 3559 case X86::BI__builtin_ia32_gatherd_d256: 3560 case X86::BI__builtin_ia32_gatherq_d: 3561 case X86::BI__builtin_ia32_gatherq_d256: 3562 case X86::BI__builtin_ia32_gather3div2df: 3563 case X86::BI__builtin_ia32_gather3div2di: 3564 case X86::BI__builtin_ia32_gather3div4df: 3565 case X86::BI__builtin_ia32_gather3div4di: 3566 case X86::BI__builtin_ia32_gather3div4sf: 3567 case X86::BI__builtin_ia32_gather3div4si: 3568 case X86::BI__builtin_ia32_gather3div8sf: 3569 case X86::BI__builtin_ia32_gather3div8si: 3570 case X86::BI__builtin_ia32_gather3siv2df: 3571 case X86::BI__builtin_ia32_gather3siv2di: 3572 case X86::BI__builtin_ia32_gather3siv4df: 3573 case X86::BI__builtin_ia32_gather3siv4di: 3574 case X86::BI__builtin_ia32_gather3siv4sf: 3575 case X86::BI__builtin_ia32_gather3siv4si: 3576 case X86::BI__builtin_ia32_gather3siv8sf: 3577 case X86::BI__builtin_ia32_gather3siv8si: 3578 case X86::BI__builtin_ia32_gathersiv8df: 3579 case X86::BI__builtin_ia32_gathersiv16sf: 3580 case X86::BI__builtin_ia32_gatherdiv8df: 3581 case X86::BI__builtin_ia32_gatherdiv16sf: 3582 case X86::BI__builtin_ia32_gathersiv8di: 3583 case X86::BI__builtin_ia32_gathersiv16si: 3584 case X86::BI__builtin_ia32_gatherdiv8di: 3585 case X86::BI__builtin_ia32_gatherdiv16si: 3586 case X86::BI__builtin_ia32_scatterdiv2df: 3587 case X86::BI__builtin_ia32_scatterdiv2di: 3588 case X86::BI__builtin_ia32_scatterdiv4df: 3589 case X86::BI__builtin_ia32_scatterdiv4di: 3590 case X86::BI__builtin_ia32_scatterdiv4sf: 3591 case X86::BI__builtin_ia32_scatterdiv4si: 3592 case X86::BI__builtin_ia32_scatterdiv8sf: 3593 case X86::BI__builtin_ia32_scatterdiv8si: 3594 case X86::BI__builtin_ia32_scattersiv2df: 3595 case X86::BI__builtin_ia32_scattersiv2di: 3596 case X86::BI__builtin_ia32_scattersiv4df: 3597 case X86::BI__builtin_ia32_scattersiv4di: 3598 case X86::BI__builtin_ia32_scattersiv4sf: 3599 case X86::BI__builtin_ia32_scattersiv4si: 3600 case X86::BI__builtin_ia32_scattersiv8sf: 3601 case X86::BI__builtin_ia32_scattersiv8si: 3602 case X86::BI__builtin_ia32_scattersiv8df: 3603 case X86::BI__builtin_ia32_scattersiv16sf: 3604 case X86::BI__builtin_ia32_scatterdiv8df: 3605 case X86::BI__builtin_ia32_scatterdiv16sf: 3606 case X86::BI__builtin_ia32_scattersiv8di: 3607 case X86::BI__builtin_ia32_scattersiv16si: 3608 case X86::BI__builtin_ia32_scatterdiv8di: 3609 case X86::BI__builtin_ia32_scatterdiv16si: 3610 ArgNum = 4; 3611 break; 3612 } 3613 3614 llvm::APSInt Result; 3615 3616 // We can't check the value of a dependent argument. 3617 Expr *Arg = TheCall->getArg(ArgNum); 3618 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3619 return false; 3620 3621 // Check constant-ness first. 3622 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3623 return true; 3624 3625 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 3626 return false; 3627 3628 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 3629 << Arg->getSourceRange(); 3630 } 3631 3632 static bool isX86_32Builtin(unsigned BuiltinID) { 3633 // These builtins only work on x86-32 targets. 3634 switch (BuiltinID) { 3635 case X86::BI__builtin_ia32_readeflags_u32: 3636 case X86::BI__builtin_ia32_writeeflags_u32: 3637 return true; 3638 } 3639 3640 return false; 3641 } 3642 3643 bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3644 if (BuiltinID == X86::BI__builtin_cpu_supports) 3645 return SemaBuiltinCpuSupports(*this, TheCall); 3646 3647 if (BuiltinID == X86::BI__builtin_cpu_is) 3648 return SemaBuiltinCpuIs(*this, TheCall); 3649 3650 // Check for 32-bit only builtins on a 64-bit target. 3651 const llvm::Triple &TT = Context.getTargetInfo().getTriple(); 3652 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 3653 return Diag(TheCall->getCallee()->getBeginLoc(), 3654 diag::err_32_bit_builtin_64_bit_tgt); 3655 3656 // If the intrinsic has rounding or SAE make sure its valid. 3657 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 3658 return true; 3659 3660 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 3661 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 3662 return true; 3663 3664 // For intrinsics which take an immediate value as part of the instruction, 3665 // range check them here. 3666 int i = 0, l = 0, u = 0; 3667 switch (BuiltinID) { 3668 default: 3669 return false; 3670 case X86::BI__builtin_ia32_vec_ext_v2si: 3671 case X86::BI__builtin_ia32_vec_ext_v2di: 3672 case X86::BI__builtin_ia32_vextractf128_pd256: 3673 case X86::BI__builtin_ia32_vextractf128_ps256: 3674 case X86::BI__builtin_ia32_vextractf128_si256: 3675 case X86::BI__builtin_ia32_extract128i256: 3676 case X86::BI__builtin_ia32_extractf64x4_mask: 3677 case X86::BI__builtin_ia32_extracti64x4_mask: 3678 case X86::BI__builtin_ia32_extractf32x8_mask: 3679 case X86::BI__builtin_ia32_extracti32x8_mask: 3680 case X86::BI__builtin_ia32_extractf64x2_256_mask: 3681 case X86::BI__builtin_ia32_extracti64x2_256_mask: 3682 case X86::BI__builtin_ia32_extractf32x4_256_mask: 3683 case X86::BI__builtin_ia32_extracti32x4_256_mask: 3684 i = 1; l = 0; u = 1; 3685 break; 3686 case X86::BI__builtin_ia32_vec_set_v2di: 3687 case X86::BI__builtin_ia32_vinsertf128_pd256: 3688 case X86::BI__builtin_ia32_vinsertf128_ps256: 3689 case X86::BI__builtin_ia32_vinsertf128_si256: 3690 case X86::BI__builtin_ia32_insert128i256: 3691 case X86::BI__builtin_ia32_insertf32x8: 3692 case X86::BI__builtin_ia32_inserti32x8: 3693 case X86::BI__builtin_ia32_insertf64x4: 3694 case X86::BI__builtin_ia32_inserti64x4: 3695 case X86::BI__builtin_ia32_insertf64x2_256: 3696 case X86::BI__builtin_ia32_inserti64x2_256: 3697 case X86::BI__builtin_ia32_insertf32x4_256: 3698 case X86::BI__builtin_ia32_inserti32x4_256: 3699 i = 2; l = 0; u = 1; 3700 break; 3701 case X86::BI__builtin_ia32_vpermilpd: 3702 case X86::BI__builtin_ia32_vec_ext_v4hi: 3703 case X86::BI__builtin_ia32_vec_ext_v4si: 3704 case X86::BI__builtin_ia32_vec_ext_v4sf: 3705 case X86::BI__builtin_ia32_vec_ext_v4di: 3706 case X86::BI__builtin_ia32_extractf32x4_mask: 3707 case X86::BI__builtin_ia32_extracti32x4_mask: 3708 case X86::BI__builtin_ia32_extractf64x2_512_mask: 3709 case X86::BI__builtin_ia32_extracti64x2_512_mask: 3710 i = 1; l = 0; u = 3; 3711 break; 3712 case X86::BI_mm_prefetch: 3713 case X86::BI__builtin_ia32_vec_ext_v8hi: 3714 case X86::BI__builtin_ia32_vec_ext_v8si: 3715 i = 1; l = 0; u = 7; 3716 break; 3717 case X86::BI__builtin_ia32_sha1rnds4: 3718 case X86::BI__builtin_ia32_blendpd: 3719 case X86::BI__builtin_ia32_shufpd: 3720 case X86::BI__builtin_ia32_vec_set_v4hi: 3721 case X86::BI__builtin_ia32_vec_set_v4si: 3722 case X86::BI__builtin_ia32_vec_set_v4di: 3723 case X86::BI__builtin_ia32_shuf_f32x4_256: 3724 case X86::BI__builtin_ia32_shuf_f64x2_256: 3725 case X86::BI__builtin_ia32_shuf_i32x4_256: 3726 case X86::BI__builtin_ia32_shuf_i64x2_256: 3727 case X86::BI__builtin_ia32_insertf64x2_512: 3728 case X86::BI__builtin_ia32_inserti64x2_512: 3729 case X86::BI__builtin_ia32_insertf32x4: 3730 case X86::BI__builtin_ia32_inserti32x4: 3731 i = 2; l = 0; u = 3; 3732 break; 3733 case X86::BI__builtin_ia32_vpermil2pd: 3734 case X86::BI__builtin_ia32_vpermil2pd256: 3735 case X86::BI__builtin_ia32_vpermil2ps: 3736 case X86::BI__builtin_ia32_vpermil2ps256: 3737 i = 3; l = 0; u = 3; 3738 break; 3739 case X86::BI__builtin_ia32_cmpb128_mask: 3740 case X86::BI__builtin_ia32_cmpw128_mask: 3741 case X86::BI__builtin_ia32_cmpd128_mask: 3742 case X86::BI__builtin_ia32_cmpq128_mask: 3743 case X86::BI__builtin_ia32_cmpb256_mask: 3744 case X86::BI__builtin_ia32_cmpw256_mask: 3745 case X86::BI__builtin_ia32_cmpd256_mask: 3746 case X86::BI__builtin_ia32_cmpq256_mask: 3747 case X86::BI__builtin_ia32_cmpb512_mask: 3748 case X86::BI__builtin_ia32_cmpw512_mask: 3749 case X86::BI__builtin_ia32_cmpd512_mask: 3750 case X86::BI__builtin_ia32_cmpq512_mask: 3751 case X86::BI__builtin_ia32_ucmpb128_mask: 3752 case X86::BI__builtin_ia32_ucmpw128_mask: 3753 case X86::BI__builtin_ia32_ucmpd128_mask: 3754 case X86::BI__builtin_ia32_ucmpq128_mask: 3755 case X86::BI__builtin_ia32_ucmpb256_mask: 3756 case X86::BI__builtin_ia32_ucmpw256_mask: 3757 case X86::BI__builtin_ia32_ucmpd256_mask: 3758 case X86::BI__builtin_ia32_ucmpq256_mask: 3759 case X86::BI__builtin_ia32_ucmpb512_mask: 3760 case X86::BI__builtin_ia32_ucmpw512_mask: 3761 case X86::BI__builtin_ia32_ucmpd512_mask: 3762 case X86::BI__builtin_ia32_ucmpq512_mask: 3763 case X86::BI__builtin_ia32_vpcomub: 3764 case X86::BI__builtin_ia32_vpcomuw: 3765 case X86::BI__builtin_ia32_vpcomud: 3766 case X86::BI__builtin_ia32_vpcomuq: 3767 case X86::BI__builtin_ia32_vpcomb: 3768 case X86::BI__builtin_ia32_vpcomw: 3769 case X86::BI__builtin_ia32_vpcomd: 3770 case X86::BI__builtin_ia32_vpcomq: 3771 case X86::BI__builtin_ia32_vec_set_v8hi: 3772 case X86::BI__builtin_ia32_vec_set_v8si: 3773 i = 2; l = 0; u = 7; 3774 break; 3775 case X86::BI__builtin_ia32_vpermilpd256: 3776 case X86::BI__builtin_ia32_roundps: 3777 case X86::BI__builtin_ia32_roundpd: 3778 case X86::BI__builtin_ia32_roundps256: 3779 case X86::BI__builtin_ia32_roundpd256: 3780 case X86::BI__builtin_ia32_getmantpd128_mask: 3781 case X86::BI__builtin_ia32_getmantpd256_mask: 3782 case X86::BI__builtin_ia32_getmantps128_mask: 3783 case X86::BI__builtin_ia32_getmantps256_mask: 3784 case X86::BI__builtin_ia32_getmantpd512_mask: 3785 case X86::BI__builtin_ia32_getmantps512_mask: 3786 case X86::BI__builtin_ia32_vec_ext_v16qi: 3787 case X86::BI__builtin_ia32_vec_ext_v16hi: 3788 i = 1; l = 0; u = 15; 3789 break; 3790 case X86::BI__builtin_ia32_pblendd128: 3791 case X86::BI__builtin_ia32_blendps: 3792 case X86::BI__builtin_ia32_blendpd256: 3793 case X86::BI__builtin_ia32_shufpd256: 3794 case X86::BI__builtin_ia32_roundss: 3795 case X86::BI__builtin_ia32_roundsd: 3796 case X86::BI__builtin_ia32_rangepd128_mask: 3797 case X86::BI__builtin_ia32_rangepd256_mask: 3798 case X86::BI__builtin_ia32_rangepd512_mask: 3799 case X86::BI__builtin_ia32_rangeps128_mask: 3800 case X86::BI__builtin_ia32_rangeps256_mask: 3801 case X86::BI__builtin_ia32_rangeps512_mask: 3802 case X86::BI__builtin_ia32_getmantsd_round_mask: 3803 case X86::BI__builtin_ia32_getmantss_round_mask: 3804 case X86::BI__builtin_ia32_vec_set_v16qi: 3805 case X86::BI__builtin_ia32_vec_set_v16hi: 3806 i = 2; l = 0; u = 15; 3807 break; 3808 case X86::BI__builtin_ia32_vec_ext_v32qi: 3809 i = 1; l = 0; u = 31; 3810 break; 3811 case X86::BI__builtin_ia32_cmpps: 3812 case X86::BI__builtin_ia32_cmpss: 3813 case X86::BI__builtin_ia32_cmppd: 3814 case X86::BI__builtin_ia32_cmpsd: 3815 case X86::BI__builtin_ia32_cmpps256: 3816 case X86::BI__builtin_ia32_cmppd256: 3817 case X86::BI__builtin_ia32_cmpps128_mask: 3818 case X86::BI__builtin_ia32_cmppd128_mask: 3819 case X86::BI__builtin_ia32_cmpps256_mask: 3820 case X86::BI__builtin_ia32_cmppd256_mask: 3821 case X86::BI__builtin_ia32_cmpps512_mask: 3822 case X86::BI__builtin_ia32_cmppd512_mask: 3823 case X86::BI__builtin_ia32_cmpsd_mask: 3824 case X86::BI__builtin_ia32_cmpss_mask: 3825 case X86::BI__builtin_ia32_vec_set_v32qi: 3826 i = 2; l = 0; u = 31; 3827 break; 3828 case X86::BI__builtin_ia32_permdf256: 3829 case X86::BI__builtin_ia32_permdi256: 3830 case X86::BI__builtin_ia32_permdf512: 3831 case X86::BI__builtin_ia32_permdi512: 3832 case X86::BI__builtin_ia32_vpermilps: 3833 case X86::BI__builtin_ia32_vpermilps256: 3834 case X86::BI__builtin_ia32_vpermilpd512: 3835 case X86::BI__builtin_ia32_vpermilps512: 3836 case X86::BI__builtin_ia32_pshufd: 3837 case X86::BI__builtin_ia32_pshufd256: 3838 case X86::BI__builtin_ia32_pshufd512: 3839 case X86::BI__builtin_ia32_pshufhw: 3840 case X86::BI__builtin_ia32_pshufhw256: 3841 case X86::BI__builtin_ia32_pshufhw512: 3842 case X86::BI__builtin_ia32_pshuflw: 3843 case X86::BI__builtin_ia32_pshuflw256: 3844 case X86::BI__builtin_ia32_pshuflw512: 3845 case X86::BI__builtin_ia32_vcvtps2ph: 3846 case X86::BI__builtin_ia32_vcvtps2ph_mask: 3847 case X86::BI__builtin_ia32_vcvtps2ph256: 3848 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 3849 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 3850 case X86::BI__builtin_ia32_rndscaleps_128_mask: 3851 case X86::BI__builtin_ia32_rndscalepd_128_mask: 3852 case X86::BI__builtin_ia32_rndscaleps_256_mask: 3853 case X86::BI__builtin_ia32_rndscalepd_256_mask: 3854 case X86::BI__builtin_ia32_rndscaleps_mask: 3855 case X86::BI__builtin_ia32_rndscalepd_mask: 3856 case X86::BI__builtin_ia32_reducepd128_mask: 3857 case X86::BI__builtin_ia32_reducepd256_mask: 3858 case X86::BI__builtin_ia32_reducepd512_mask: 3859 case X86::BI__builtin_ia32_reduceps128_mask: 3860 case X86::BI__builtin_ia32_reduceps256_mask: 3861 case X86::BI__builtin_ia32_reduceps512_mask: 3862 case X86::BI__builtin_ia32_prold512: 3863 case X86::BI__builtin_ia32_prolq512: 3864 case X86::BI__builtin_ia32_prold128: 3865 case X86::BI__builtin_ia32_prold256: 3866 case X86::BI__builtin_ia32_prolq128: 3867 case X86::BI__builtin_ia32_prolq256: 3868 case X86::BI__builtin_ia32_prord512: 3869 case X86::BI__builtin_ia32_prorq512: 3870 case X86::BI__builtin_ia32_prord128: 3871 case X86::BI__builtin_ia32_prord256: 3872 case X86::BI__builtin_ia32_prorq128: 3873 case X86::BI__builtin_ia32_prorq256: 3874 case X86::BI__builtin_ia32_fpclasspd128_mask: 3875 case X86::BI__builtin_ia32_fpclasspd256_mask: 3876 case X86::BI__builtin_ia32_fpclassps128_mask: 3877 case X86::BI__builtin_ia32_fpclassps256_mask: 3878 case X86::BI__builtin_ia32_fpclassps512_mask: 3879 case X86::BI__builtin_ia32_fpclasspd512_mask: 3880 case X86::BI__builtin_ia32_fpclasssd_mask: 3881 case X86::BI__builtin_ia32_fpclassss_mask: 3882 case X86::BI__builtin_ia32_pslldqi128_byteshift: 3883 case X86::BI__builtin_ia32_pslldqi256_byteshift: 3884 case X86::BI__builtin_ia32_pslldqi512_byteshift: 3885 case X86::BI__builtin_ia32_psrldqi128_byteshift: 3886 case X86::BI__builtin_ia32_psrldqi256_byteshift: 3887 case X86::BI__builtin_ia32_psrldqi512_byteshift: 3888 case X86::BI__builtin_ia32_kshiftliqi: 3889 case X86::BI__builtin_ia32_kshiftlihi: 3890 case X86::BI__builtin_ia32_kshiftlisi: 3891 case X86::BI__builtin_ia32_kshiftlidi: 3892 case X86::BI__builtin_ia32_kshiftriqi: 3893 case X86::BI__builtin_ia32_kshiftrihi: 3894 case X86::BI__builtin_ia32_kshiftrisi: 3895 case X86::BI__builtin_ia32_kshiftridi: 3896 i = 1; l = 0; u = 255; 3897 break; 3898 case X86::BI__builtin_ia32_vperm2f128_pd256: 3899 case X86::BI__builtin_ia32_vperm2f128_ps256: 3900 case X86::BI__builtin_ia32_vperm2f128_si256: 3901 case X86::BI__builtin_ia32_permti256: 3902 case X86::BI__builtin_ia32_pblendw128: 3903 case X86::BI__builtin_ia32_pblendw256: 3904 case X86::BI__builtin_ia32_blendps256: 3905 case X86::BI__builtin_ia32_pblendd256: 3906 case X86::BI__builtin_ia32_palignr128: 3907 case X86::BI__builtin_ia32_palignr256: 3908 case X86::BI__builtin_ia32_palignr512: 3909 case X86::BI__builtin_ia32_alignq512: 3910 case X86::BI__builtin_ia32_alignd512: 3911 case X86::BI__builtin_ia32_alignd128: 3912 case X86::BI__builtin_ia32_alignd256: 3913 case X86::BI__builtin_ia32_alignq128: 3914 case X86::BI__builtin_ia32_alignq256: 3915 case X86::BI__builtin_ia32_vcomisd: 3916 case X86::BI__builtin_ia32_vcomiss: 3917 case X86::BI__builtin_ia32_shuf_f32x4: 3918 case X86::BI__builtin_ia32_shuf_f64x2: 3919 case X86::BI__builtin_ia32_shuf_i32x4: 3920 case X86::BI__builtin_ia32_shuf_i64x2: 3921 case X86::BI__builtin_ia32_shufpd512: 3922 case X86::BI__builtin_ia32_shufps: 3923 case X86::BI__builtin_ia32_shufps256: 3924 case X86::BI__builtin_ia32_shufps512: 3925 case X86::BI__builtin_ia32_dbpsadbw128: 3926 case X86::BI__builtin_ia32_dbpsadbw256: 3927 case X86::BI__builtin_ia32_dbpsadbw512: 3928 case X86::BI__builtin_ia32_vpshldd128: 3929 case X86::BI__builtin_ia32_vpshldd256: 3930 case X86::BI__builtin_ia32_vpshldd512: 3931 case X86::BI__builtin_ia32_vpshldq128: 3932 case X86::BI__builtin_ia32_vpshldq256: 3933 case X86::BI__builtin_ia32_vpshldq512: 3934 case X86::BI__builtin_ia32_vpshldw128: 3935 case X86::BI__builtin_ia32_vpshldw256: 3936 case X86::BI__builtin_ia32_vpshldw512: 3937 case X86::BI__builtin_ia32_vpshrdd128: 3938 case X86::BI__builtin_ia32_vpshrdd256: 3939 case X86::BI__builtin_ia32_vpshrdd512: 3940 case X86::BI__builtin_ia32_vpshrdq128: 3941 case X86::BI__builtin_ia32_vpshrdq256: 3942 case X86::BI__builtin_ia32_vpshrdq512: 3943 case X86::BI__builtin_ia32_vpshrdw128: 3944 case X86::BI__builtin_ia32_vpshrdw256: 3945 case X86::BI__builtin_ia32_vpshrdw512: 3946 i = 2; l = 0; u = 255; 3947 break; 3948 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3949 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3950 case X86::BI__builtin_ia32_fixupimmps512_mask: 3951 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3952 case X86::BI__builtin_ia32_fixupimmsd_mask: 3953 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3954 case X86::BI__builtin_ia32_fixupimmss_mask: 3955 case X86::BI__builtin_ia32_fixupimmss_maskz: 3956 case X86::BI__builtin_ia32_fixupimmpd128_mask: 3957 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 3958 case X86::BI__builtin_ia32_fixupimmpd256_mask: 3959 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 3960 case X86::BI__builtin_ia32_fixupimmps128_mask: 3961 case X86::BI__builtin_ia32_fixupimmps128_maskz: 3962 case X86::BI__builtin_ia32_fixupimmps256_mask: 3963 case X86::BI__builtin_ia32_fixupimmps256_maskz: 3964 case X86::BI__builtin_ia32_pternlogd512_mask: 3965 case X86::BI__builtin_ia32_pternlogd512_maskz: 3966 case X86::BI__builtin_ia32_pternlogq512_mask: 3967 case X86::BI__builtin_ia32_pternlogq512_maskz: 3968 case X86::BI__builtin_ia32_pternlogd128_mask: 3969 case X86::BI__builtin_ia32_pternlogd128_maskz: 3970 case X86::BI__builtin_ia32_pternlogd256_mask: 3971 case X86::BI__builtin_ia32_pternlogd256_maskz: 3972 case X86::BI__builtin_ia32_pternlogq128_mask: 3973 case X86::BI__builtin_ia32_pternlogq128_maskz: 3974 case X86::BI__builtin_ia32_pternlogq256_mask: 3975 case X86::BI__builtin_ia32_pternlogq256_maskz: 3976 i = 3; l = 0; u = 255; 3977 break; 3978 case X86::BI__builtin_ia32_gatherpfdpd: 3979 case X86::BI__builtin_ia32_gatherpfdps: 3980 case X86::BI__builtin_ia32_gatherpfqpd: 3981 case X86::BI__builtin_ia32_gatherpfqps: 3982 case X86::BI__builtin_ia32_scatterpfdpd: 3983 case X86::BI__builtin_ia32_scatterpfdps: 3984 case X86::BI__builtin_ia32_scatterpfqpd: 3985 case X86::BI__builtin_ia32_scatterpfqps: 3986 i = 4; l = 2; u = 3; 3987 break; 3988 case X86::BI__builtin_ia32_reducesd_mask: 3989 case X86::BI__builtin_ia32_reducess_mask: 3990 case X86::BI__builtin_ia32_rndscalesd_round_mask: 3991 case X86::BI__builtin_ia32_rndscaless_round_mask: 3992 i = 4; l = 0; u = 255; 3993 break; 3994 } 3995 3996 // Note that we don't force a hard error on the range check here, allowing 3997 // template-generated or macro-generated dead code to potentially have out-of- 3998 // range values. These need to code generate, but don't need to necessarily 3999 // make any sense. We use a warning that defaults to an error. 4000 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 4001 } 4002 4003 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 4004 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 4005 /// Returns true when the format fits the function and the FormatStringInfo has 4006 /// been populated. 4007 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 4008 FormatStringInfo *FSI) { 4009 FSI->HasVAListArg = Format->getFirstArg() == 0; 4010 FSI->FormatIdx = Format->getFormatIdx() - 1; 4011 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 4012 4013 // The way the format attribute works in GCC, the implicit this argument 4014 // of member functions is counted. However, it doesn't appear in our own 4015 // lists, so decrement format_idx in that case. 4016 if (IsCXXMember) { 4017 if(FSI->FormatIdx == 0) 4018 return false; 4019 --FSI->FormatIdx; 4020 if (FSI->FirstDataArg != 0) 4021 --FSI->FirstDataArg; 4022 } 4023 return true; 4024 } 4025 4026 /// Checks if a the given expression evaluates to null. 4027 /// 4028 /// Returns true if the value evaluates to null. 4029 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 4030 // If the expression has non-null type, it doesn't evaluate to null. 4031 if (auto nullability 4032 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 4033 if (*nullability == NullabilityKind::NonNull) 4034 return false; 4035 } 4036 4037 // As a special case, transparent unions initialized with zero are 4038 // considered null for the purposes of the nonnull attribute. 4039 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 4040 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 4041 if (const CompoundLiteralExpr *CLE = 4042 dyn_cast<CompoundLiteralExpr>(Expr)) 4043 if (const InitListExpr *ILE = 4044 dyn_cast<InitListExpr>(CLE->getInitializer())) 4045 Expr = ILE->getInit(0); 4046 } 4047 4048 bool Result; 4049 return (!Expr->isValueDependent() && 4050 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 4051 !Result); 4052 } 4053 4054 static void CheckNonNullArgument(Sema &S, 4055 const Expr *ArgExpr, 4056 SourceLocation CallSiteLoc) { 4057 if (CheckNonNullExpr(S, ArgExpr)) 4058 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 4059 S.PDiag(diag::warn_null_arg) 4060 << ArgExpr->getSourceRange()); 4061 } 4062 4063 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 4064 FormatStringInfo FSI; 4065 if ((GetFormatStringType(Format) == FST_NSString) && 4066 getFormatStringInfo(Format, false, &FSI)) { 4067 Idx = FSI.FormatIdx; 4068 return true; 4069 } 4070 return false; 4071 } 4072 4073 /// Diagnose use of %s directive in an NSString which is being passed 4074 /// as formatting string to formatting method. 4075 static void 4076 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 4077 const NamedDecl *FDecl, 4078 Expr **Args, 4079 unsigned NumArgs) { 4080 unsigned Idx = 0; 4081 bool Format = false; 4082 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 4083 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 4084 Idx = 2; 4085 Format = true; 4086 } 4087 else 4088 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4089 if (S.GetFormatNSStringIdx(I, Idx)) { 4090 Format = true; 4091 break; 4092 } 4093 } 4094 if (!Format || NumArgs <= Idx) 4095 return; 4096 const Expr *FormatExpr = Args[Idx]; 4097 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 4098 FormatExpr = CSCE->getSubExpr(); 4099 const StringLiteral *FormatString; 4100 if (const ObjCStringLiteral *OSL = 4101 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 4102 FormatString = OSL->getString(); 4103 else 4104 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 4105 if (!FormatString) 4106 return; 4107 if (S.FormatStringHasSArg(FormatString)) { 4108 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 4109 << "%s" << 1 << 1; 4110 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 4111 << FDecl->getDeclName(); 4112 } 4113 } 4114 4115 /// Determine whether the given type has a non-null nullability annotation. 4116 static bool isNonNullType(ASTContext &ctx, QualType type) { 4117 if (auto nullability = type->getNullability(ctx)) 4118 return *nullability == NullabilityKind::NonNull; 4119 4120 return false; 4121 } 4122 4123 static void CheckNonNullArguments(Sema &S, 4124 const NamedDecl *FDecl, 4125 const FunctionProtoType *Proto, 4126 ArrayRef<const Expr *> Args, 4127 SourceLocation CallSiteLoc) { 4128 assert((FDecl || Proto) && "Need a function declaration or prototype"); 4129 4130 // Already checked by by constant evaluator. 4131 if (S.isConstantEvaluated()) 4132 return; 4133 // Check the attributes attached to the method/function itself. 4134 llvm::SmallBitVector NonNullArgs; 4135 if (FDecl) { 4136 // Handle the nonnull attribute on the function/method declaration itself. 4137 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 4138 if (!NonNull->args_size()) { 4139 // Easy case: all pointer arguments are nonnull. 4140 for (const auto *Arg : Args) 4141 if (S.isValidPointerAttrType(Arg->getType())) 4142 CheckNonNullArgument(S, Arg, CallSiteLoc); 4143 return; 4144 } 4145 4146 for (const ParamIdx &Idx : NonNull->args()) { 4147 unsigned IdxAST = Idx.getASTIndex(); 4148 if (IdxAST >= Args.size()) 4149 continue; 4150 if (NonNullArgs.empty()) 4151 NonNullArgs.resize(Args.size()); 4152 NonNullArgs.set(IdxAST); 4153 } 4154 } 4155 } 4156 4157 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 4158 // Handle the nonnull attribute on the parameters of the 4159 // function/method. 4160 ArrayRef<ParmVarDecl*> parms; 4161 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 4162 parms = FD->parameters(); 4163 else 4164 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 4165 4166 unsigned ParamIndex = 0; 4167 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 4168 I != E; ++I, ++ParamIndex) { 4169 const ParmVarDecl *PVD = *I; 4170 if (PVD->hasAttr<NonNullAttr>() || 4171 isNonNullType(S.Context, PVD->getType())) { 4172 if (NonNullArgs.empty()) 4173 NonNullArgs.resize(Args.size()); 4174 4175 NonNullArgs.set(ParamIndex); 4176 } 4177 } 4178 } else { 4179 // If we have a non-function, non-method declaration but no 4180 // function prototype, try to dig out the function prototype. 4181 if (!Proto) { 4182 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 4183 QualType type = VD->getType().getNonReferenceType(); 4184 if (auto pointerType = type->getAs<PointerType>()) 4185 type = pointerType->getPointeeType(); 4186 else if (auto blockType = type->getAs<BlockPointerType>()) 4187 type = blockType->getPointeeType(); 4188 // FIXME: data member pointers? 4189 4190 // Dig out the function prototype, if there is one. 4191 Proto = type->getAs<FunctionProtoType>(); 4192 } 4193 } 4194 4195 // Fill in non-null argument information from the nullability 4196 // information on the parameter types (if we have them). 4197 if (Proto) { 4198 unsigned Index = 0; 4199 for (auto paramType : Proto->getParamTypes()) { 4200 if (isNonNullType(S.Context, paramType)) { 4201 if (NonNullArgs.empty()) 4202 NonNullArgs.resize(Args.size()); 4203 4204 NonNullArgs.set(Index); 4205 } 4206 4207 ++Index; 4208 } 4209 } 4210 } 4211 4212 // Check for non-null arguments. 4213 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 4214 ArgIndex != ArgIndexEnd; ++ArgIndex) { 4215 if (NonNullArgs[ArgIndex]) 4216 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 4217 } 4218 } 4219 4220 /// Handles the checks for format strings, non-POD arguments to vararg 4221 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 4222 /// attributes. 4223 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 4224 const Expr *ThisArg, ArrayRef<const Expr *> Args, 4225 bool IsMemberFunction, SourceLocation Loc, 4226 SourceRange Range, VariadicCallType CallType) { 4227 // FIXME: We should check as much as we can in the template definition. 4228 if (CurContext->isDependentContext()) 4229 return; 4230 4231 // Printf and scanf checking. 4232 llvm::SmallBitVector CheckedVarArgs; 4233 if (FDecl) { 4234 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4235 // Only create vector if there are format attributes. 4236 CheckedVarArgs.resize(Args.size()); 4237 4238 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 4239 CheckedVarArgs); 4240 } 4241 } 4242 4243 // Refuse POD arguments that weren't caught by the format string 4244 // checks above. 4245 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 4246 if (CallType != VariadicDoesNotApply && 4247 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 4248 unsigned NumParams = Proto ? Proto->getNumParams() 4249 : FDecl && isa<FunctionDecl>(FDecl) 4250 ? cast<FunctionDecl>(FDecl)->getNumParams() 4251 : FDecl && isa<ObjCMethodDecl>(FDecl) 4252 ? cast<ObjCMethodDecl>(FDecl)->param_size() 4253 : 0; 4254 4255 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 4256 // Args[ArgIdx] can be null in malformed code. 4257 if (const Expr *Arg = Args[ArgIdx]) { 4258 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 4259 checkVariadicArgument(Arg, CallType); 4260 } 4261 } 4262 } 4263 4264 if (FDecl || Proto) { 4265 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 4266 4267 // Type safety checking. 4268 if (FDecl) { 4269 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 4270 CheckArgumentWithTypeTag(I, Args, Loc); 4271 } 4272 } 4273 4274 if (FD) 4275 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 4276 } 4277 4278 /// CheckConstructorCall - Check a constructor call for correctness and safety 4279 /// properties not enforced by the C type system. 4280 void Sema::CheckConstructorCall(FunctionDecl *FDecl, 4281 ArrayRef<const Expr *> Args, 4282 const FunctionProtoType *Proto, 4283 SourceLocation Loc) { 4284 VariadicCallType CallType = 4285 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 4286 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 4287 Loc, SourceRange(), CallType); 4288 } 4289 4290 /// CheckFunctionCall - Check a direct function call for various correctness 4291 /// and safety properties not strictly enforced by the C type system. 4292 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 4293 const FunctionProtoType *Proto) { 4294 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 4295 isa<CXXMethodDecl>(FDecl); 4296 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 4297 IsMemberOperatorCall; 4298 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 4299 TheCall->getCallee()); 4300 Expr** Args = TheCall->getArgs(); 4301 unsigned NumArgs = TheCall->getNumArgs(); 4302 4303 Expr *ImplicitThis = nullptr; 4304 if (IsMemberOperatorCall) { 4305 // If this is a call to a member operator, hide the first argument 4306 // from checkCall. 4307 // FIXME: Our choice of AST representation here is less than ideal. 4308 ImplicitThis = Args[0]; 4309 ++Args; 4310 --NumArgs; 4311 } else if (IsMemberFunction) 4312 ImplicitThis = 4313 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 4314 4315 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 4316 IsMemberFunction, TheCall->getRParenLoc(), 4317 TheCall->getCallee()->getSourceRange(), CallType); 4318 4319 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 4320 // None of the checks below are needed for functions that don't have 4321 // simple names (e.g., C++ conversion functions). 4322 if (!FnInfo) 4323 return false; 4324 4325 CheckAbsoluteValueFunction(TheCall, FDecl); 4326 CheckMaxUnsignedZero(TheCall, FDecl); 4327 4328 if (getLangOpts().ObjC) 4329 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 4330 4331 unsigned CMId = FDecl->getMemoryFunctionKind(); 4332 if (CMId == 0) 4333 return false; 4334 4335 // Handle memory setting and copying functions. 4336 if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat) 4337 CheckStrlcpycatArguments(TheCall, FnInfo); 4338 else if (CMId == Builtin::BIstrncat) 4339 CheckStrncatArguments(TheCall, FnInfo); 4340 else 4341 CheckMemaccessArguments(TheCall, CMId, FnInfo); 4342 4343 return false; 4344 } 4345 4346 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 4347 ArrayRef<const Expr *> Args) { 4348 VariadicCallType CallType = 4349 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 4350 4351 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 4352 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 4353 CallType); 4354 4355 return false; 4356 } 4357 4358 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 4359 const FunctionProtoType *Proto) { 4360 QualType Ty; 4361 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 4362 Ty = V->getType().getNonReferenceType(); 4363 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 4364 Ty = F->getType().getNonReferenceType(); 4365 else 4366 return false; 4367 4368 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 4369 !Ty->isFunctionProtoType()) 4370 return false; 4371 4372 VariadicCallType CallType; 4373 if (!Proto || !Proto->isVariadic()) { 4374 CallType = VariadicDoesNotApply; 4375 } else if (Ty->isBlockPointerType()) { 4376 CallType = VariadicBlock; 4377 } else { // Ty->isFunctionPointerType() 4378 CallType = VariadicFunction; 4379 } 4380 4381 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 4382 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4383 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4384 TheCall->getCallee()->getSourceRange(), CallType); 4385 4386 return false; 4387 } 4388 4389 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 4390 /// such as function pointers returned from functions. 4391 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 4392 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 4393 TheCall->getCallee()); 4394 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 4395 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4396 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4397 TheCall->getCallee()->getSourceRange(), CallType); 4398 4399 return false; 4400 } 4401 4402 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 4403 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 4404 return false; 4405 4406 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 4407 switch (Op) { 4408 case AtomicExpr::AO__c11_atomic_init: 4409 case AtomicExpr::AO__opencl_atomic_init: 4410 llvm_unreachable("There is no ordering argument for an init"); 4411 4412 case AtomicExpr::AO__c11_atomic_load: 4413 case AtomicExpr::AO__opencl_atomic_load: 4414 case AtomicExpr::AO__atomic_load_n: 4415 case AtomicExpr::AO__atomic_load: 4416 return OrderingCABI != llvm::AtomicOrderingCABI::release && 4417 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4418 4419 case AtomicExpr::AO__c11_atomic_store: 4420 case AtomicExpr::AO__opencl_atomic_store: 4421 case AtomicExpr::AO__atomic_store: 4422 case AtomicExpr::AO__atomic_store_n: 4423 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 4424 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 4425 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4426 4427 default: 4428 return true; 4429 } 4430 } 4431 4432 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 4433 AtomicExpr::AtomicOp Op) { 4434 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 4435 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 4436 4437 // All the non-OpenCL operations take one of the following forms. 4438 // The OpenCL operations take the __c11 forms with one extra argument for 4439 // synchronization scope. 4440 enum { 4441 // C __c11_atomic_init(A *, C) 4442 Init, 4443 4444 // C __c11_atomic_load(A *, int) 4445 Load, 4446 4447 // void __atomic_load(A *, CP, int) 4448 LoadCopy, 4449 4450 // void __atomic_store(A *, CP, int) 4451 Copy, 4452 4453 // C __c11_atomic_add(A *, M, int) 4454 Arithmetic, 4455 4456 // C __atomic_exchange_n(A *, CP, int) 4457 Xchg, 4458 4459 // void __atomic_exchange(A *, C *, CP, int) 4460 GNUXchg, 4461 4462 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 4463 C11CmpXchg, 4464 4465 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 4466 GNUCmpXchg 4467 } Form = Init; 4468 4469 const unsigned NumForm = GNUCmpXchg + 1; 4470 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 4471 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 4472 // where: 4473 // C is an appropriate type, 4474 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 4475 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 4476 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 4477 // the int parameters are for orderings. 4478 4479 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 4480 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 4481 "need to update code for modified forms"); 4482 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 4483 AtomicExpr::AO__c11_atomic_fetch_xor + 1 == 4484 AtomicExpr::AO__atomic_load, 4485 "need to update code for modified C11 atomics"); 4486 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 4487 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 4488 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 4489 Op <= AtomicExpr::AO__c11_atomic_fetch_xor) || 4490 IsOpenCL; 4491 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 4492 Op == AtomicExpr::AO__atomic_store_n || 4493 Op == AtomicExpr::AO__atomic_exchange_n || 4494 Op == AtomicExpr::AO__atomic_compare_exchange_n; 4495 bool IsAddSub = false; 4496 bool IsMinMax = false; 4497 4498 switch (Op) { 4499 case AtomicExpr::AO__c11_atomic_init: 4500 case AtomicExpr::AO__opencl_atomic_init: 4501 Form = Init; 4502 break; 4503 4504 case AtomicExpr::AO__c11_atomic_load: 4505 case AtomicExpr::AO__opencl_atomic_load: 4506 case AtomicExpr::AO__atomic_load_n: 4507 Form = Load; 4508 break; 4509 4510 case AtomicExpr::AO__atomic_load: 4511 Form = LoadCopy; 4512 break; 4513 4514 case AtomicExpr::AO__c11_atomic_store: 4515 case AtomicExpr::AO__opencl_atomic_store: 4516 case AtomicExpr::AO__atomic_store: 4517 case AtomicExpr::AO__atomic_store_n: 4518 Form = Copy; 4519 break; 4520 4521 case AtomicExpr::AO__c11_atomic_fetch_add: 4522 case AtomicExpr::AO__c11_atomic_fetch_sub: 4523 case AtomicExpr::AO__opencl_atomic_fetch_add: 4524 case AtomicExpr::AO__opencl_atomic_fetch_sub: 4525 case AtomicExpr::AO__opencl_atomic_fetch_min: 4526 case AtomicExpr::AO__opencl_atomic_fetch_max: 4527 case AtomicExpr::AO__atomic_fetch_add: 4528 case AtomicExpr::AO__atomic_fetch_sub: 4529 case AtomicExpr::AO__atomic_add_fetch: 4530 case AtomicExpr::AO__atomic_sub_fetch: 4531 IsAddSub = true; 4532 LLVM_FALLTHROUGH; 4533 case AtomicExpr::AO__c11_atomic_fetch_and: 4534 case AtomicExpr::AO__c11_atomic_fetch_or: 4535 case AtomicExpr::AO__c11_atomic_fetch_xor: 4536 case AtomicExpr::AO__opencl_atomic_fetch_and: 4537 case AtomicExpr::AO__opencl_atomic_fetch_or: 4538 case AtomicExpr::AO__opencl_atomic_fetch_xor: 4539 case AtomicExpr::AO__atomic_fetch_and: 4540 case AtomicExpr::AO__atomic_fetch_or: 4541 case AtomicExpr::AO__atomic_fetch_xor: 4542 case AtomicExpr::AO__atomic_fetch_nand: 4543 case AtomicExpr::AO__atomic_and_fetch: 4544 case AtomicExpr::AO__atomic_or_fetch: 4545 case AtomicExpr::AO__atomic_xor_fetch: 4546 case AtomicExpr::AO__atomic_nand_fetch: 4547 Form = Arithmetic; 4548 break; 4549 4550 case AtomicExpr::AO__atomic_fetch_min: 4551 case AtomicExpr::AO__atomic_fetch_max: 4552 IsMinMax = true; 4553 Form = Arithmetic; 4554 break; 4555 4556 case AtomicExpr::AO__c11_atomic_exchange: 4557 case AtomicExpr::AO__opencl_atomic_exchange: 4558 case AtomicExpr::AO__atomic_exchange_n: 4559 Form = Xchg; 4560 break; 4561 4562 case AtomicExpr::AO__atomic_exchange: 4563 Form = GNUXchg; 4564 break; 4565 4566 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 4567 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 4568 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 4569 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 4570 Form = C11CmpXchg; 4571 break; 4572 4573 case AtomicExpr::AO__atomic_compare_exchange: 4574 case AtomicExpr::AO__atomic_compare_exchange_n: 4575 Form = GNUCmpXchg; 4576 break; 4577 } 4578 4579 unsigned AdjustedNumArgs = NumArgs[Form]; 4580 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init) 4581 ++AdjustedNumArgs; 4582 // Check we have the right number of arguments. 4583 if (TheCall->getNumArgs() < AdjustedNumArgs) { 4584 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 4585 << 0 << AdjustedNumArgs << TheCall->getNumArgs() 4586 << TheCall->getCallee()->getSourceRange(); 4587 return ExprError(); 4588 } else if (TheCall->getNumArgs() > AdjustedNumArgs) { 4589 Diag(TheCall->getArg(AdjustedNumArgs)->getBeginLoc(), 4590 diag::err_typecheck_call_too_many_args) 4591 << 0 << AdjustedNumArgs << TheCall->getNumArgs() 4592 << TheCall->getCallee()->getSourceRange(); 4593 return ExprError(); 4594 } 4595 4596 // Inspect the first argument of the atomic operation. 4597 Expr *Ptr = TheCall->getArg(0); 4598 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 4599 if (ConvertedPtr.isInvalid()) 4600 return ExprError(); 4601 4602 Ptr = ConvertedPtr.get(); 4603 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 4604 if (!pointerType) { 4605 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 4606 << Ptr->getType() << Ptr->getSourceRange(); 4607 return ExprError(); 4608 } 4609 4610 // For a __c11 builtin, this should be a pointer to an _Atomic type. 4611 QualType AtomTy = pointerType->getPointeeType(); // 'A' 4612 QualType ValType = AtomTy; // 'C' 4613 if (IsC11) { 4614 if (!AtomTy->isAtomicType()) { 4615 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic) 4616 << Ptr->getType() << Ptr->getSourceRange(); 4617 return ExprError(); 4618 } 4619 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 4620 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 4621 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_non_const_atomic) 4622 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 4623 << Ptr->getSourceRange(); 4624 return ExprError(); 4625 } 4626 ValType = AtomTy->getAs<AtomicType>()->getValueType(); 4627 } else if (Form != Load && Form != LoadCopy) { 4628 if (ValType.isConstQualified()) { 4629 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_non_const_pointer) 4630 << Ptr->getType() << Ptr->getSourceRange(); 4631 return ExprError(); 4632 } 4633 } 4634 4635 // For an arithmetic operation, the implied arithmetic must be well-formed. 4636 if (Form == Arithmetic) { 4637 // gcc does not enforce these rules for GNU atomics, but we do so for sanity. 4638 if (IsAddSub && !ValType->isIntegerType() 4639 && !ValType->isPointerType()) { 4640 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4641 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4642 return ExprError(); 4643 } 4644 if (IsMinMax) { 4645 const BuiltinType *BT = ValType->getAs<BuiltinType>(); 4646 if (!BT || (BT->getKind() != BuiltinType::Int && 4647 BT->getKind() != BuiltinType::UInt)) { 4648 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_int32_or_ptr); 4649 return ExprError(); 4650 } 4651 } 4652 if (!IsAddSub && !IsMinMax && !ValType->isIntegerType()) { 4653 Diag(DRE->getBeginLoc(), diag::err_atomic_op_bitwise_needs_atomic_int) 4654 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4655 return ExprError(); 4656 } 4657 if (IsC11 && ValType->isPointerType() && 4658 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 4659 diag::err_incomplete_type)) { 4660 return ExprError(); 4661 } 4662 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 4663 // For __atomic_*_n operations, the value type must be a scalar integral or 4664 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 4665 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4666 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4667 return ExprError(); 4668 } 4669 4670 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 4671 !AtomTy->isScalarType()) { 4672 // For GNU atomics, require a trivially-copyable type. This is not part of 4673 // the GNU atomics specification, but we enforce it for sanity. 4674 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_trivial_copy) 4675 << Ptr->getType() << Ptr->getSourceRange(); 4676 return ExprError(); 4677 } 4678 4679 switch (ValType.getObjCLifetime()) { 4680 case Qualifiers::OCL_None: 4681 case Qualifiers::OCL_ExplicitNone: 4682 // okay 4683 break; 4684 4685 case Qualifiers::OCL_Weak: 4686 case Qualifiers::OCL_Strong: 4687 case Qualifiers::OCL_Autoreleasing: 4688 // FIXME: Can this happen? By this point, ValType should be known 4689 // to be trivially copyable. 4690 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 4691 << ValType << Ptr->getSourceRange(); 4692 return ExprError(); 4693 } 4694 4695 // All atomic operations have an overload which takes a pointer to a volatile 4696 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 4697 // into the result or the other operands. Similarly atomic_load takes a 4698 // pointer to a const 'A'. 4699 ValType.removeLocalVolatile(); 4700 ValType.removeLocalConst(); 4701 QualType ResultType = ValType; 4702 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 4703 Form == Init) 4704 ResultType = Context.VoidTy; 4705 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 4706 ResultType = Context.BoolTy; 4707 4708 // The type of a parameter passed 'by value'. In the GNU atomics, such 4709 // arguments are actually passed as pointers. 4710 QualType ByValType = ValType; // 'CP' 4711 bool IsPassedByAddress = false; 4712 if (!IsC11 && !IsN) { 4713 ByValType = Ptr->getType(); 4714 IsPassedByAddress = true; 4715 } 4716 4717 // The first argument's non-CV pointer type is used to deduce the type of 4718 // subsequent arguments, except for: 4719 // - weak flag (always converted to bool) 4720 // - memory order (always converted to int) 4721 // - scope (always converted to int) 4722 for (unsigned i = 0; i != TheCall->getNumArgs(); ++i) { 4723 QualType Ty; 4724 if (i < NumVals[Form] + 1) { 4725 switch (i) { 4726 case 0: 4727 // The first argument is always a pointer. It has a fixed type. 4728 // It is always dereferenced, a nullptr is undefined. 4729 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc()); 4730 // Nothing else to do: we already know all we want about this pointer. 4731 continue; 4732 case 1: 4733 // The second argument is the non-atomic operand. For arithmetic, this 4734 // is always passed by value, and for a compare_exchange it is always 4735 // passed by address. For the rest, GNU uses by-address and C11 uses 4736 // by-value. 4737 assert(Form != Load); 4738 if (Form == Init || (Form == Arithmetic && ValType->isIntegerType())) 4739 Ty = ValType; 4740 else if (Form == Copy || Form == Xchg) { 4741 if (IsPassedByAddress) 4742 // The value pointer is always dereferenced, a nullptr is undefined. 4743 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc()); 4744 Ty = ByValType; 4745 } else if (Form == Arithmetic) 4746 Ty = Context.getPointerDiffType(); 4747 else { 4748 Expr *ValArg = TheCall->getArg(i); 4749 // The value pointer is always dereferenced, a nullptr is undefined. 4750 CheckNonNullArgument(*this, ValArg, DRE->getBeginLoc()); 4751 LangAS AS = LangAS::Default; 4752 // Keep address space of non-atomic pointer type. 4753 if (const PointerType *PtrTy = 4754 ValArg->getType()->getAs<PointerType>()) { 4755 AS = PtrTy->getPointeeType().getAddressSpace(); 4756 } 4757 Ty = Context.getPointerType( 4758 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 4759 } 4760 break; 4761 case 2: 4762 // The third argument to compare_exchange / GNU exchange is the desired 4763 // value, either by-value (for the C11 and *_n variant) or as a pointer. 4764 if (IsPassedByAddress) 4765 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc()); 4766 Ty = ByValType; 4767 break; 4768 case 3: 4769 // The fourth argument to GNU compare_exchange is a 'weak' flag. 4770 Ty = Context.BoolTy; 4771 break; 4772 } 4773 } else { 4774 // The order(s) and scope are always converted to int. 4775 Ty = Context.IntTy; 4776 } 4777 4778 InitializedEntity Entity = 4779 InitializedEntity::InitializeParameter(Context, Ty, false); 4780 ExprResult Arg = TheCall->getArg(i); 4781 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 4782 if (Arg.isInvalid()) 4783 return true; 4784 TheCall->setArg(i, Arg.get()); 4785 } 4786 4787 // Permute the arguments into a 'consistent' order. 4788 SmallVector<Expr*, 5> SubExprs; 4789 SubExprs.push_back(Ptr); 4790 switch (Form) { 4791 case Init: 4792 // Note, AtomicExpr::getVal1() has a special case for this atomic. 4793 SubExprs.push_back(TheCall->getArg(1)); // Val1 4794 break; 4795 case Load: 4796 SubExprs.push_back(TheCall->getArg(1)); // Order 4797 break; 4798 case LoadCopy: 4799 case Copy: 4800 case Arithmetic: 4801 case Xchg: 4802 SubExprs.push_back(TheCall->getArg(2)); // Order 4803 SubExprs.push_back(TheCall->getArg(1)); // Val1 4804 break; 4805 case GNUXchg: 4806 // Note, AtomicExpr::getVal2() has a special case for this atomic. 4807 SubExprs.push_back(TheCall->getArg(3)); // Order 4808 SubExprs.push_back(TheCall->getArg(1)); // Val1 4809 SubExprs.push_back(TheCall->getArg(2)); // Val2 4810 break; 4811 case C11CmpXchg: 4812 SubExprs.push_back(TheCall->getArg(3)); // Order 4813 SubExprs.push_back(TheCall->getArg(1)); // Val1 4814 SubExprs.push_back(TheCall->getArg(4)); // OrderFail 4815 SubExprs.push_back(TheCall->getArg(2)); // Val2 4816 break; 4817 case GNUCmpXchg: 4818 SubExprs.push_back(TheCall->getArg(4)); // Order 4819 SubExprs.push_back(TheCall->getArg(1)); // Val1 4820 SubExprs.push_back(TheCall->getArg(5)); // OrderFail 4821 SubExprs.push_back(TheCall->getArg(2)); // Val2 4822 SubExprs.push_back(TheCall->getArg(3)); // Weak 4823 break; 4824 } 4825 4826 if (SubExprs.size() >= 2 && Form != Init) { 4827 llvm::APSInt Result(32); 4828 if (SubExprs[1]->isIntegerConstantExpr(Result, Context) && 4829 !isValidOrderingForOp(Result.getSExtValue(), Op)) 4830 Diag(SubExprs[1]->getBeginLoc(), 4831 diag::warn_atomic_op_has_invalid_memory_order) 4832 << SubExprs[1]->getSourceRange(); 4833 } 4834 4835 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 4836 auto *Scope = TheCall->getArg(TheCall->getNumArgs() - 1); 4837 llvm::APSInt Result(32); 4838 if (Scope->isIntegerConstantExpr(Result, Context) && 4839 !ScopeModel->isValid(Result.getZExtValue())) { 4840 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 4841 << Scope->getSourceRange(); 4842 } 4843 SubExprs.push_back(Scope); 4844 } 4845 4846 AtomicExpr *AE = 4847 new (Context) AtomicExpr(TheCall->getCallee()->getBeginLoc(), SubExprs, 4848 ResultType, Op, TheCall->getRParenLoc()); 4849 4850 if ((Op == AtomicExpr::AO__c11_atomic_load || 4851 Op == AtomicExpr::AO__c11_atomic_store || 4852 Op == AtomicExpr::AO__opencl_atomic_load || 4853 Op == AtomicExpr::AO__opencl_atomic_store ) && 4854 Context.AtomicUsesUnsupportedLibcall(AE)) 4855 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 4856 << ((Op == AtomicExpr::AO__c11_atomic_load || 4857 Op == AtomicExpr::AO__opencl_atomic_load) 4858 ? 0 4859 : 1); 4860 4861 return AE; 4862 } 4863 4864 /// checkBuiltinArgument - Given a call to a builtin function, perform 4865 /// normal type-checking on the given argument, updating the call in 4866 /// place. This is useful when a builtin function requires custom 4867 /// type-checking for some of its arguments but not necessarily all of 4868 /// them. 4869 /// 4870 /// Returns true on error. 4871 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 4872 FunctionDecl *Fn = E->getDirectCallee(); 4873 assert(Fn && "builtin call without direct callee!"); 4874 4875 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 4876 InitializedEntity Entity = 4877 InitializedEntity::InitializeParameter(S.Context, Param); 4878 4879 ExprResult Arg = E->getArg(0); 4880 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 4881 if (Arg.isInvalid()) 4882 return true; 4883 4884 E->setArg(ArgIndex, Arg.get()); 4885 return false; 4886 } 4887 4888 /// We have a call to a function like __sync_fetch_and_add, which is an 4889 /// overloaded function based on the pointer type of its first argument. 4890 /// The main BuildCallExpr routines have already promoted the types of 4891 /// arguments because all of these calls are prototyped as void(...). 4892 /// 4893 /// This function goes through and does final semantic checking for these 4894 /// builtins, as well as generating any warnings. 4895 ExprResult 4896 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 4897 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 4898 Expr *Callee = TheCall->getCallee(); 4899 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 4900 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 4901 4902 // Ensure that we have at least one argument to do type inference from. 4903 if (TheCall->getNumArgs() < 1) { 4904 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 4905 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 4906 return ExprError(); 4907 } 4908 4909 // Inspect the first argument of the atomic builtin. This should always be 4910 // a pointer type, whose element is an integral scalar or pointer type. 4911 // Because it is a pointer type, we don't have to worry about any implicit 4912 // casts here. 4913 // FIXME: We don't allow floating point scalars as input. 4914 Expr *FirstArg = TheCall->getArg(0); 4915 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 4916 if (FirstArgResult.isInvalid()) 4917 return ExprError(); 4918 FirstArg = FirstArgResult.get(); 4919 TheCall->setArg(0, FirstArg); 4920 4921 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 4922 if (!pointerType) { 4923 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 4924 << FirstArg->getType() << FirstArg->getSourceRange(); 4925 return ExprError(); 4926 } 4927 4928 QualType ValType = pointerType->getPointeeType(); 4929 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 4930 !ValType->isBlockPointerType()) { 4931 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 4932 << FirstArg->getType() << FirstArg->getSourceRange(); 4933 return ExprError(); 4934 } 4935 4936 if (ValType.isConstQualified()) { 4937 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 4938 << FirstArg->getType() << FirstArg->getSourceRange(); 4939 return ExprError(); 4940 } 4941 4942 switch (ValType.getObjCLifetime()) { 4943 case Qualifiers::OCL_None: 4944 case Qualifiers::OCL_ExplicitNone: 4945 // okay 4946 break; 4947 4948 case Qualifiers::OCL_Weak: 4949 case Qualifiers::OCL_Strong: 4950 case Qualifiers::OCL_Autoreleasing: 4951 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 4952 << ValType << FirstArg->getSourceRange(); 4953 return ExprError(); 4954 } 4955 4956 // Strip any qualifiers off ValType. 4957 ValType = ValType.getUnqualifiedType(); 4958 4959 // The majority of builtins return a value, but a few have special return 4960 // types, so allow them to override appropriately below. 4961 QualType ResultType = ValType; 4962 4963 // We need to figure out which concrete builtin this maps onto. For example, 4964 // __sync_fetch_and_add with a 2 byte object turns into 4965 // __sync_fetch_and_add_2. 4966 #define BUILTIN_ROW(x) \ 4967 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 4968 Builtin::BI##x##_8, Builtin::BI##x##_16 } 4969 4970 static const unsigned BuiltinIndices[][5] = { 4971 BUILTIN_ROW(__sync_fetch_and_add), 4972 BUILTIN_ROW(__sync_fetch_and_sub), 4973 BUILTIN_ROW(__sync_fetch_and_or), 4974 BUILTIN_ROW(__sync_fetch_and_and), 4975 BUILTIN_ROW(__sync_fetch_and_xor), 4976 BUILTIN_ROW(__sync_fetch_and_nand), 4977 4978 BUILTIN_ROW(__sync_add_and_fetch), 4979 BUILTIN_ROW(__sync_sub_and_fetch), 4980 BUILTIN_ROW(__sync_and_and_fetch), 4981 BUILTIN_ROW(__sync_or_and_fetch), 4982 BUILTIN_ROW(__sync_xor_and_fetch), 4983 BUILTIN_ROW(__sync_nand_and_fetch), 4984 4985 BUILTIN_ROW(__sync_val_compare_and_swap), 4986 BUILTIN_ROW(__sync_bool_compare_and_swap), 4987 BUILTIN_ROW(__sync_lock_test_and_set), 4988 BUILTIN_ROW(__sync_lock_release), 4989 BUILTIN_ROW(__sync_swap) 4990 }; 4991 #undef BUILTIN_ROW 4992 4993 // Determine the index of the size. 4994 unsigned SizeIndex; 4995 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 4996 case 1: SizeIndex = 0; break; 4997 case 2: SizeIndex = 1; break; 4998 case 4: SizeIndex = 2; break; 4999 case 8: SizeIndex = 3; break; 5000 case 16: SizeIndex = 4; break; 5001 default: 5002 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 5003 << FirstArg->getType() << FirstArg->getSourceRange(); 5004 return ExprError(); 5005 } 5006 5007 // Each of these builtins has one pointer argument, followed by some number of 5008 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 5009 // that we ignore. Find out which row of BuiltinIndices to read from as well 5010 // as the number of fixed args. 5011 unsigned BuiltinID = FDecl->getBuiltinID(); 5012 unsigned BuiltinIndex, NumFixed = 1; 5013 bool WarnAboutSemanticsChange = false; 5014 switch (BuiltinID) { 5015 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 5016 case Builtin::BI__sync_fetch_and_add: 5017 case Builtin::BI__sync_fetch_and_add_1: 5018 case Builtin::BI__sync_fetch_and_add_2: 5019 case Builtin::BI__sync_fetch_and_add_4: 5020 case Builtin::BI__sync_fetch_and_add_8: 5021 case Builtin::BI__sync_fetch_and_add_16: 5022 BuiltinIndex = 0; 5023 break; 5024 5025 case Builtin::BI__sync_fetch_and_sub: 5026 case Builtin::BI__sync_fetch_and_sub_1: 5027 case Builtin::BI__sync_fetch_and_sub_2: 5028 case Builtin::BI__sync_fetch_and_sub_4: 5029 case Builtin::BI__sync_fetch_and_sub_8: 5030 case Builtin::BI__sync_fetch_and_sub_16: 5031 BuiltinIndex = 1; 5032 break; 5033 5034 case Builtin::BI__sync_fetch_and_or: 5035 case Builtin::BI__sync_fetch_and_or_1: 5036 case Builtin::BI__sync_fetch_and_or_2: 5037 case Builtin::BI__sync_fetch_and_or_4: 5038 case Builtin::BI__sync_fetch_and_or_8: 5039 case Builtin::BI__sync_fetch_and_or_16: 5040 BuiltinIndex = 2; 5041 break; 5042 5043 case Builtin::BI__sync_fetch_and_and: 5044 case Builtin::BI__sync_fetch_and_and_1: 5045 case Builtin::BI__sync_fetch_and_and_2: 5046 case Builtin::BI__sync_fetch_and_and_4: 5047 case Builtin::BI__sync_fetch_and_and_8: 5048 case Builtin::BI__sync_fetch_and_and_16: 5049 BuiltinIndex = 3; 5050 break; 5051 5052 case Builtin::BI__sync_fetch_and_xor: 5053 case Builtin::BI__sync_fetch_and_xor_1: 5054 case Builtin::BI__sync_fetch_and_xor_2: 5055 case Builtin::BI__sync_fetch_and_xor_4: 5056 case Builtin::BI__sync_fetch_and_xor_8: 5057 case Builtin::BI__sync_fetch_and_xor_16: 5058 BuiltinIndex = 4; 5059 break; 5060 5061 case Builtin::BI__sync_fetch_and_nand: 5062 case Builtin::BI__sync_fetch_and_nand_1: 5063 case Builtin::BI__sync_fetch_and_nand_2: 5064 case Builtin::BI__sync_fetch_and_nand_4: 5065 case Builtin::BI__sync_fetch_and_nand_8: 5066 case Builtin::BI__sync_fetch_and_nand_16: 5067 BuiltinIndex = 5; 5068 WarnAboutSemanticsChange = true; 5069 break; 5070 5071 case Builtin::BI__sync_add_and_fetch: 5072 case Builtin::BI__sync_add_and_fetch_1: 5073 case Builtin::BI__sync_add_and_fetch_2: 5074 case Builtin::BI__sync_add_and_fetch_4: 5075 case Builtin::BI__sync_add_and_fetch_8: 5076 case Builtin::BI__sync_add_and_fetch_16: 5077 BuiltinIndex = 6; 5078 break; 5079 5080 case Builtin::BI__sync_sub_and_fetch: 5081 case Builtin::BI__sync_sub_and_fetch_1: 5082 case Builtin::BI__sync_sub_and_fetch_2: 5083 case Builtin::BI__sync_sub_and_fetch_4: 5084 case Builtin::BI__sync_sub_and_fetch_8: 5085 case Builtin::BI__sync_sub_and_fetch_16: 5086 BuiltinIndex = 7; 5087 break; 5088 5089 case Builtin::BI__sync_and_and_fetch: 5090 case Builtin::BI__sync_and_and_fetch_1: 5091 case Builtin::BI__sync_and_and_fetch_2: 5092 case Builtin::BI__sync_and_and_fetch_4: 5093 case Builtin::BI__sync_and_and_fetch_8: 5094 case Builtin::BI__sync_and_and_fetch_16: 5095 BuiltinIndex = 8; 5096 break; 5097 5098 case Builtin::BI__sync_or_and_fetch: 5099 case Builtin::BI__sync_or_and_fetch_1: 5100 case Builtin::BI__sync_or_and_fetch_2: 5101 case Builtin::BI__sync_or_and_fetch_4: 5102 case Builtin::BI__sync_or_and_fetch_8: 5103 case Builtin::BI__sync_or_and_fetch_16: 5104 BuiltinIndex = 9; 5105 break; 5106 5107 case Builtin::BI__sync_xor_and_fetch: 5108 case Builtin::BI__sync_xor_and_fetch_1: 5109 case Builtin::BI__sync_xor_and_fetch_2: 5110 case Builtin::BI__sync_xor_and_fetch_4: 5111 case Builtin::BI__sync_xor_and_fetch_8: 5112 case Builtin::BI__sync_xor_and_fetch_16: 5113 BuiltinIndex = 10; 5114 break; 5115 5116 case Builtin::BI__sync_nand_and_fetch: 5117 case Builtin::BI__sync_nand_and_fetch_1: 5118 case Builtin::BI__sync_nand_and_fetch_2: 5119 case Builtin::BI__sync_nand_and_fetch_4: 5120 case Builtin::BI__sync_nand_and_fetch_8: 5121 case Builtin::BI__sync_nand_and_fetch_16: 5122 BuiltinIndex = 11; 5123 WarnAboutSemanticsChange = true; 5124 break; 5125 5126 case Builtin::BI__sync_val_compare_and_swap: 5127 case Builtin::BI__sync_val_compare_and_swap_1: 5128 case Builtin::BI__sync_val_compare_and_swap_2: 5129 case Builtin::BI__sync_val_compare_and_swap_4: 5130 case Builtin::BI__sync_val_compare_and_swap_8: 5131 case Builtin::BI__sync_val_compare_and_swap_16: 5132 BuiltinIndex = 12; 5133 NumFixed = 2; 5134 break; 5135 5136 case Builtin::BI__sync_bool_compare_and_swap: 5137 case Builtin::BI__sync_bool_compare_and_swap_1: 5138 case Builtin::BI__sync_bool_compare_and_swap_2: 5139 case Builtin::BI__sync_bool_compare_and_swap_4: 5140 case Builtin::BI__sync_bool_compare_and_swap_8: 5141 case Builtin::BI__sync_bool_compare_and_swap_16: 5142 BuiltinIndex = 13; 5143 NumFixed = 2; 5144 ResultType = Context.BoolTy; 5145 break; 5146 5147 case Builtin::BI__sync_lock_test_and_set: 5148 case Builtin::BI__sync_lock_test_and_set_1: 5149 case Builtin::BI__sync_lock_test_and_set_2: 5150 case Builtin::BI__sync_lock_test_and_set_4: 5151 case Builtin::BI__sync_lock_test_and_set_8: 5152 case Builtin::BI__sync_lock_test_and_set_16: 5153 BuiltinIndex = 14; 5154 break; 5155 5156 case Builtin::BI__sync_lock_release: 5157 case Builtin::BI__sync_lock_release_1: 5158 case Builtin::BI__sync_lock_release_2: 5159 case Builtin::BI__sync_lock_release_4: 5160 case Builtin::BI__sync_lock_release_8: 5161 case Builtin::BI__sync_lock_release_16: 5162 BuiltinIndex = 15; 5163 NumFixed = 0; 5164 ResultType = Context.VoidTy; 5165 break; 5166 5167 case Builtin::BI__sync_swap: 5168 case Builtin::BI__sync_swap_1: 5169 case Builtin::BI__sync_swap_2: 5170 case Builtin::BI__sync_swap_4: 5171 case Builtin::BI__sync_swap_8: 5172 case Builtin::BI__sync_swap_16: 5173 BuiltinIndex = 16; 5174 break; 5175 } 5176 5177 // Now that we know how many fixed arguments we expect, first check that we 5178 // have at least that many. 5179 if (TheCall->getNumArgs() < 1+NumFixed) { 5180 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5181 << 0 << 1 + NumFixed << TheCall->getNumArgs() 5182 << Callee->getSourceRange(); 5183 return ExprError(); 5184 } 5185 5186 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 5187 << Callee->getSourceRange(); 5188 5189 if (WarnAboutSemanticsChange) { 5190 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 5191 << Callee->getSourceRange(); 5192 } 5193 5194 // Get the decl for the concrete builtin from this, we can tell what the 5195 // concrete integer type we should convert to is. 5196 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 5197 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 5198 FunctionDecl *NewBuiltinDecl; 5199 if (NewBuiltinID == BuiltinID) 5200 NewBuiltinDecl = FDecl; 5201 else { 5202 // Perform builtin lookup to avoid redeclaring it. 5203 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 5204 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 5205 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 5206 assert(Res.getFoundDecl()); 5207 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 5208 if (!NewBuiltinDecl) 5209 return ExprError(); 5210 } 5211 5212 // The first argument --- the pointer --- has a fixed type; we 5213 // deduce the types of the rest of the arguments accordingly. Walk 5214 // the remaining arguments, converting them to the deduced value type. 5215 for (unsigned i = 0; i != NumFixed; ++i) { 5216 ExprResult Arg = TheCall->getArg(i+1); 5217 5218 // GCC does an implicit conversion to the pointer or integer ValType. This 5219 // can fail in some cases (1i -> int**), check for this error case now. 5220 // Initialize the argument. 5221 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 5222 ValType, /*consume*/ false); 5223 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5224 if (Arg.isInvalid()) 5225 return ExprError(); 5226 5227 // Okay, we have something that *can* be converted to the right type. Check 5228 // to see if there is a potentially weird extension going on here. This can 5229 // happen when you do an atomic operation on something like an char* and 5230 // pass in 42. The 42 gets converted to char. This is even more strange 5231 // for things like 45.123 -> char, etc. 5232 // FIXME: Do this check. 5233 TheCall->setArg(i+1, Arg.get()); 5234 } 5235 5236 // Create a new DeclRefExpr to refer to the new decl. 5237 DeclRefExpr *NewDRE = DeclRefExpr::Create( 5238 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 5239 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 5240 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 5241 5242 // Set the callee in the CallExpr. 5243 // FIXME: This loses syntactic information. 5244 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 5245 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 5246 CK_BuiltinFnToFnPtr); 5247 TheCall->setCallee(PromotedCall.get()); 5248 5249 // Change the result type of the call to match the original value type. This 5250 // is arbitrary, but the codegen for these builtins ins design to handle it 5251 // gracefully. 5252 TheCall->setType(ResultType); 5253 5254 return TheCallResult; 5255 } 5256 5257 /// SemaBuiltinNontemporalOverloaded - We have a call to 5258 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 5259 /// overloaded function based on the pointer type of its last argument. 5260 /// 5261 /// This function goes through and does final semantic checking for these 5262 /// builtins. 5263 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 5264 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 5265 DeclRefExpr *DRE = 5266 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5267 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5268 unsigned BuiltinID = FDecl->getBuiltinID(); 5269 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 5270 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 5271 "Unexpected nontemporal load/store builtin!"); 5272 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 5273 unsigned numArgs = isStore ? 2 : 1; 5274 5275 // Ensure that we have the proper number of arguments. 5276 if (checkArgCount(*this, TheCall, numArgs)) 5277 return ExprError(); 5278 5279 // Inspect the last argument of the nontemporal builtin. This should always 5280 // be a pointer type, from which we imply the type of the memory access. 5281 // Because it is a pointer type, we don't have to worry about any implicit 5282 // casts here. 5283 Expr *PointerArg = TheCall->getArg(numArgs - 1); 5284 ExprResult PointerArgResult = 5285 DefaultFunctionArrayLvalueConversion(PointerArg); 5286 5287 if (PointerArgResult.isInvalid()) 5288 return ExprError(); 5289 PointerArg = PointerArgResult.get(); 5290 TheCall->setArg(numArgs - 1, PointerArg); 5291 5292 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 5293 if (!pointerType) { 5294 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 5295 << PointerArg->getType() << PointerArg->getSourceRange(); 5296 return ExprError(); 5297 } 5298 5299 QualType ValType = pointerType->getPointeeType(); 5300 5301 // Strip any qualifiers off ValType. 5302 ValType = ValType.getUnqualifiedType(); 5303 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5304 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 5305 !ValType->isVectorType()) { 5306 Diag(DRE->getBeginLoc(), 5307 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 5308 << PointerArg->getType() << PointerArg->getSourceRange(); 5309 return ExprError(); 5310 } 5311 5312 if (!isStore) { 5313 TheCall->setType(ValType); 5314 return TheCallResult; 5315 } 5316 5317 ExprResult ValArg = TheCall->getArg(0); 5318 InitializedEntity Entity = InitializedEntity::InitializeParameter( 5319 Context, ValType, /*consume*/ false); 5320 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 5321 if (ValArg.isInvalid()) 5322 return ExprError(); 5323 5324 TheCall->setArg(0, ValArg.get()); 5325 TheCall->setType(Context.VoidTy); 5326 return TheCallResult; 5327 } 5328 5329 /// CheckObjCString - Checks that the argument to the builtin 5330 /// CFString constructor is correct 5331 /// Note: It might also make sense to do the UTF-16 conversion here (would 5332 /// simplify the backend). 5333 bool Sema::CheckObjCString(Expr *Arg) { 5334 Arg = Arg->IgnoreParenCasts(); 5335 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 5336 5337 if (!Literal || !Literal->isAscii()) { 5338 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 5339 << Arg->getSourceRange(); 5340 return true; 5341 } 5342 5343 if (Literal->containsNonAsciiOrNull()) { 5344 StringRef String = Literal->getString(); 5345 unsigned NumBytes = String.size(); 5346 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 5347 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 5348 llvm::UTF16 *ToPtr = &ToBuf[0]; 5349 5350 llvm::ConversionResult Result = 5351 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 5352 ToPtr + NumBytes, llvm::strictConversion); 5353 // Check for conversion failure. 5354 if (Result != llvm::conversionOK) 5355 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 5356 << Arg->getSourceRange(); 5357 } 5358 return false; 5359 } 5360 5361 /// CheckObjCString - Checks that the format string argument to the os_log() 5362 /// and os_trace() functions is correct, and converts it to const char *. 5363 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 5364 Arg = Arg->IgnoreParenCasts(); 5365 auto *Literal = dyn_cast<StringLiteral>(Arg); 5366 if (!Literal) { 5367 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 5368 Literal = ObjcLiteral->getString(); 5369 } 5370 } 5371 5372 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 5373 return ExprError( 5374 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 5375 << Arg->getSourceRange()); 5376 } 5377 5378 ExprResult Result(Literal); 5379 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 5380 InitializedEntity Entity = 5381 InitializedEntity::InitializeParameter(Context, ResultTy, false); 5382 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 5383 return Result; 5384 } 5385 5386 /// Check that the user is calling the appropriate va_start builtin for the 5387 /// target and calling convention. 5388 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 5389 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 5390 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 5391 bool IsAArch64 = TT.getArch() == llvm::Triple::aarch64; 5392 bool IsWindows = TT.isOSWindows(); 5393 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 5394 if (IsX64 || IsAArch64) { 5395 CallingConv CC = CC_C; 5396 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 5397 CC = FD->getType()->getAs<FunctionType>()->getCallConv(); 5398 if (IsMSVAStart) { 5399 // Don't allow this in System V ABI functions. 5400 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 5401 return S.Diag(Fn->getBeginLoc(), 5402 diag::err_ms_va_start_used_in_sysv_function); 5403 } else { 5404 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 5405 // On x64 Windows, don't allow this in System V ABI functions. 5406 // (Yes, that means there's no corresponding way to support variadic 5407 // System V ABI functions on Windows.) 5408 if ((IsWindows && CC == CC_X86_64SysV) || 5409 (!IsWindows && CC == CC_Win64)) 5410 return S.Diag(Fn->getBeginLoc(), 5411 diag::err_va_start_used_in_wrong_abi_function) 5412 << !IsWindows; 5413 } 5414 return false; 5415 } 5416 5417 if (IsMSVAStart) 5418 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 5419 return false; 5420 } 5421 5422 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 5423 ParmVarDecl **LastParam = nullptr) { 5424 // Determine whether the current function, block, or obj-c method is variadic 5425 // and get its parameter list. 5426 bool IsVariadic = false; 5427 ArrayRef<ParmVarDecl *> Params; 5428 DeclContext *Caller = S.CurContext; 5429 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 5430 IsVariadic = Block->isVariadic(); 5431 Params = Block->parameters(); 5432 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 5433 IsVariadic = FD->isVariadic(); 5434 Params = FD->parameters(); 5435 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 5436 IsVariadic = MD->isVariadic(); 5437 // FIXME: This isn't correct for methods (results in bogus warning). 5438 Params = MD->parameters(); 5439 } else if (isa<CapturedDecl>(Caller)) { 5440 // We don't support va_start in a CapturedDecl. 5441 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 5442 return true; 5443 } else { 5444 // This must be some other declcontext that parses exprs. 5445 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 5446 return true; 5447 } 5448 5449 if (!IsVariadic) { 5450 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 5451 return true; 5452 } 5453 5454 if (LastParam) 5455 *LastParam = Params.empty() ? nullptr : Params.back(); 5456 5457 return false; 5458 } 5459 5460 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 5461 /// for validity. Emit an error and return true on failure; return false 5462 /// on success. 5463 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 5464 Expr *Fn = TheCall->getCallee(); 5465 5466 if (checkVAStartABI(*this, BuiltinID, Fn)) 5467 return true; 5468 5469 if (TheCall->getNumArgs() > 2) { 5470 Diag(TheCall->getArg(2)->getBeginLoc(), 5471 diag::err_typecheck_call_too_many_args) 5472 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5473 << Fn->getSourceRange() 5474 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5475 (*(TheCall->arg_end() - 1))->getEndLoc()); 5476 return true; 5477 } 5478 5479 if (TheCall->getNumArgs() < 2) { 5480 return Diag(TheCall->getEndLoc(), 5481 diag::err_typecheck_call_too_few_args_at_least) 5482 << 0 /*function call*/ << 2 << TheCall->getNumArgs(); 5483 } 5484 5485 // Type-check the first argument normally. 5486 if (checkBuiltinArgument(*this, TheCall, 0)) 5487 return true; 5488 5489 // Check that the current function is variadic, and get its last parameter. 5490 ParmVarDecl *LastParam; 5491 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 5492 return true; 5493 5494 // Verify that the second argument to the builtin is the last argument of the 5495 // current function or method. 5496 bool SecondArgIsLastNamedArgument = false; 5497 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 5498 5499 // These are valid if SecondArgIsLastNamedArgument is false after the next 5500 // block. 5501 QualType Type; 5502 SourceLocation ParamLoc; 5503 bool IsCRegister = false; 5504 5505 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 5506 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 5507 SecondArgIsLastNamedArgument = PV == LastParam; 5508 5509 Type = PV->getType(); 5510 ParamLoc = PV->getLocation(); 5511 IsCRegister = 5512 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 5513 } 5514 } 5515 5516 if (!SecondArgIsLastNamedArgument) 5517 Diag(TheCall->getArg(1)->getBeginLoc(), 5518 diag::warn_second_arg_of_va_start_not_last_named_param); 5519 else if (IsCRegister || Type->isReferenceType() || 5520 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 5521 // Promotable integers are UB, but enumerations need a bit of 5522 // extra checking to see what their promotable type actually is. 5523 if (!Type->isPromotableIntegerType()) 5524 return false; 5525 if (!Type->isEnumeralType()) 5526 return true; 5527 const EnumDecl *ED = Type->getAs<EnumType>()->getDecl(); 5528 return !(ED && 5529 Context.typesAreCompatible(ED->getPromotionType(), Type)); 5530 }()) { 5531 unsigned Reason = 0; 5532 if (Type->isReferenceType()) Reason = 1; 5533 else if (IsCRegister) Reason = 2; 5534 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 5535 Diag(ParamLoc, diag::note_parameter_type) << Type; 5536 } 5537 5538 TheCall->setType(Context.VoidTy); 5539 return false; 5540 } 5541 5542 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 5543 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 5544 // const char *named_addr); 5545 5546 Expr *Func = Call->getCallee(); 5547 5548 if (Call->getNumArgs() < 3) 5549 return Diag(Call->getEndLoc(), 5550 diag::err_typecheck_call_too_few_args_at_least) 5551 << 0 /*function call*/ << 3 << Call->getNumArgs(); 5552 5553 // Type-check the first argument normally. 5554 if (checkBuiltinArgument(*this, Call, 0)) 5555 return true; 5556 5557 // Check that the current function is variadic. 5558 if (checkVAStartIsInVariadicFunction(*this, Func)) 5559 return true; 5560 5561 // __va_start on Windows does not validate the parameter qualifiers 5562 5563 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 5564 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 5565 5566 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 5567 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 5568 5569 const QualType &ConstCharPtrTy = 5570 Context.getPointerType(Context.CharTy.withConst()); 5571 if (!Arg1Ty->isPointerType() || 5572 Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy) 5573 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5574 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 5575 << 0 /* qualifier difference */ 5576 << 3 /* parameter mismatch */ 5577 << 2 << Arg1->getType() << ConstCharPtrTy; 5578 5579 const QualType SizeTy = Context.getSizeType(); 5580 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 5581 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5582 << Arg2->getType() << SizeTy << 1 /* different class */ 5583 << 0 /* qualifier difference */ 5584 << 3 /* parameter mismatch */ 5585 << 3 << Arg2->getType() << SizeTy; 5586 5587 return false; 5588 } 5589 5590 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 5591 /// friends. This is declared to take (...), so we have to check everything. 5592 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 5593 if (TheCall->getNumArgs() < 2) 5594 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5595 << 0 << 2 << TheCall->getNumArgs() /*function call*/; 5596 if (TheCall->getNumArgs() > 2) 5597 return Diag(TheCall->getArg(2)->getBeginLoc(), 5598 diag::err_typecheck_call_too_many_args) 5599 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5600 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5601 (*(TheCall->arg_end() - 1))->getEndLoc()); 5602 5603 ExprResult OrigArg0 = TheCall->getArg(0); 5604 ExprResult OrigArg1 = TheCall->getArg(1); 5605 5606 // Do standard promotions between the two arguments, returning their common 5607 // type. 5608 QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false); 5609 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 5610 return true; 5611 5612 // Make sure any conversions are pushed back into the call; this is 5613 // type safe since unordered compare builtins are declared as "_Bool 5614 // foo(...)". 5615 TheCall->setArg(0, OrigArg0.get()); 5616 TheCall->setArg(1, OrigArg1.get()); 5617 5618 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 5619 return false; 5620 5621 // If the common type isn't a real floating type, then the arguments were 5622 // invalid for this operation. 5623 if (Res.isNull() || !Res->isRealFloatingType()) 5624 return Diag(OrigArg0.get()->getBeginLoc(), 5625 diag::err_typecheck_call_invalid_ordered_compare) 5626 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 5627 << SourceRange(OrigArg0.get()->getBeginLoc(), 5628 OrigArg1.get()->getEndLoc()); 5629 5630 return false; 5631 } 5632 5633 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 5634 /// __builtin_isnan and friends. This is declared to take (...), so we have 5635 /// to check everything. We expect the last argument to be a floating point 5636 /// value. 5637 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 5638 if (TheCall->getNumArgs() < NumArgs) 5639 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5640 << 0 << NumArgs << TheCall->getNumArgs() /*function call*/; 5641 if (TheCall->getNumArgs() > NumArgs) 5642 return Diag(TheCall->getArg(NumArgs)->getBeginLoc(), 5643 diag::err_typecheck_call_too_many_args) 5644 << 0 /*function call*/ << NumArgs << TheCall->getNumArgs() 5645 << SourceRange(TheCall->getArg(NumArgs)->getBeginLoc(), 5646 (*(TheCall->arg_end() - 1))->getEndLoc()); 5647 5648 Expr *OrigArg = TheCall->getArg(NumArgs-1); 5649 5650 if (OrigArg->isTypeDependent()) 5651 return false; 5652 5653 // This operation requires a non-_Complex floating-point number. 5654 if (!OrigArg->getType()->isRealFloatingType()) 5655 return Diag(OrigArg->getBeginLoc(), 5656 diag::err_typecheck_call_invalid_unary_fp) 5657 << OrigArg->getType() << OrigArg->getSourceRange(); 5658 5659 // If this is an implicit conversion from float -> float, double, or 5660 // long double, remove it. 5661 if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(OrigArg)) { 5662 // Only remove standard FloatCasts, leaving other casts inplace 5663 if (Cast->getCastKind() == CK_FloatingCast) { 5664 Expr *CastArg = Cast->getSubExpr(); 5665 if (CastArg->getType()->isSpecificBuiltinType(BuiltinType::Float)) { 5666 assert( 5667 (Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) || 5668 Cast->getType()->isSpecificBuiltinType(BuiltinType::Float) || 5669 Cast->getType()->isSpecificBuiltinType(BuiltinType::LongDouble)) && 5670 "promotion from float to either float, double, or long double is " 5671 "the only expected cast here"); 5672 Cast->setSubExpr(nullptr); 5673 TheCall->setArg(NumArgs-1, CastArg); 5674 } 5675 } 5676 } 5677 5678 return false; 5679 } 5680 5681 // Customized Sema Checking for VSX builtins that have the following signature: 5682 // vector [...] builtinName(vector [...], vector [...], const int); 5683 // Which takes the same type of vectors (any legal vector type) for the first 5684 // two arguments and takes compile time constant for the third argument. 5685 // Example builtins are : 5686 // vector double vec_xxpermdi(vector double, vector double, int); 5687 // vector short vec_xxsldwi(vector short, vector short, int); 5688 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 5689 unsigned ExpectedNumArgs = 3; 5690 if (TheCall->getNumArgs() < ExpectedNumArgs) 5691 return Diag(TheCall->getEndLoc(), 5692 diag::err_typecheck_call_too_few_args_at_least) 5693 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs() 5694 << TheCall->getSourceRange(); 5695 5696 if (TheCall->getNumArgs() > ExpectedNumArgs) 5697 return Diag(TheCall->getEndLoc(), 5698 diag::err_typecheck_call_too_many_args_at_most) 5699 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs() 5700 << TheCall->getSourceRange(); 5701 5702 // Check the third argument is a compile time constant 5703 llvm::APSInt Value; 5704 if(!TheCall->getArg(2)->isIntegerConstantExpr(Value, Context)) 5705 return Diag(TheCall->getBeginLoc(), 5706 diag::err_vsx_builtin_nonconstant_argument) 5707 << 3 /* argument index */ << TheCall->getDirectCallee() 5708 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5709 TheCall->getArg(2)->getEndLoc()); 5710 5711 QualType Arg1Ty = TheCall->getArg(0)->getType(); 5712 QualType Arg2Ty = TheCall->getArg(1)->getType(); 5713 5714 // Check the type of argument 1 and argument 2 are vectors. 5715 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 5716 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 5717 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 5718 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 5719 << TheCall->getDirectCallee() 5720 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5721 TheCall->getArg(1)->getEndLoc()); 5722 } 5723 5724 // Check the first two arguments are the same type. 5725 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 5726 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 5727 << TheCall->getDirectCallee() 5728 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5729 TheCall->getArg(1)->getEndLoc()); 5730 } 5731 5732 // When default clang type checking is turned off and the customized type 5733 // checking is used, the returning type of the function must be explicitly 5734 // set. Otherwise it is _Bool by default. 5735 TheCall->setType(Arg1Ty); 5736 5737 return false; 5738 } 5739 5740 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 5741 // This is declared to take (...), so we have to check everything. 5742 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 5743 if (TheCall->getNumArgs() < 2) 5744 return ExprError(Diag(TheCall->getEndLoc(), 5745 diag::err_typecheck_call_too_few_args_at_least) 5746 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5747 << TheCall->getSourceRange()); 5748 5749 // Determine which of the following types of shufflevector we're checking: 5750 // 1) unary, vector mask: (lhs, mask) 5751 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 5752 QualType resType = TheCall->getArg(0)->getType(); 5753 unsigned numElements = 0; 5754 5755 if (!TheCall->getArg(0)->isTypeDependent() && 5756 !TheCall->getArg(1)->isTypeDependent()) { 5757 QualType LHSType = TheCall->getArg(0)->getType(); 5758 QualType RHSType = TheCall->getArg(1)->getType(); 5759 5760 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 5761 return ExprError( 5762 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 5763 << TheCall->getDirectCallee() 5764 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5765 TheCall->getArg(1)->getEndLoc())); 5766 5767 numElements = LHSType->getAs<VectorType>()->getNumElements(); 5768 unsigned numResElements = TheCall->getNumArgs() - 2; 5769 5770 // Check to see if we have a call with 2 vector arguments, the unary shuffle 5771 // with mask. If so, verify that RHS is an integer vector type with the 5772 // same number of elts as lhs. 5773 if (TheCall->getNumArgs() == 2) { 5774 if (!RHSType->hasIntegerRepresentation() || 5775 RHSType->getAs<VectorType>()->getNumElements() != numElements) 5776 return ExprError(Diag(TheCall->getBeginLoc(), 5777 diag::err_vec_builtin_incompatible_vector) 5778 << TheCall->getDirectCallee() 5779 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 5780 TheCall->getArg(1)->getEndLoc())); 5781 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 5782 return ExprError(Diag(TheCall->getBeginLoc(), 5783 diag::err_vec_builtin_incompatible_vector) 5784 << TheCall->getDirectCallee() 5785 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5786 TheCall->getArg(1)->getEndLoc())); 5787 } else if (numElements != numResElements) { 5788 QualType eltType = LHSType->getAs<VectorType>()->getElementType(); 5789 resType = Context.getVectorType(eltType, numResElements, 5790 VectorType::GenericVector); 5791 } 5792 } 5793 5794 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 5795 if (TheCall->getArg(i)->isTypeDependent() || 5796 TheCall->getArg(i)->isValueDependent()) 5797 continue; 5798 5799 llvm::APSInt Result(32); 5800 if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context)) 5801 return ExprError(Diag(TheCall->getBeginLoc(), 5802 diag::err_shufflevector_nonconstant_argument) 5803 << TheCall->getArg(i)->getSourceRange()); 5804 5805 // Allow -1 which will be translated to undef in the IR. 5806 if (Result.isSigned() && Result.isAllOnesValue()) 5807 continue; 5808 5809 if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2) 5810 return ExprError(Diag(TheCall->getBeginLoc(), 5811 diag::err_shufflevector_argument_too_large) 5812 << TheCall->getArg(i)->getSourceRange()); 5813 } 5814 5815 SmallVector<Expr*, 32> exprs; 5816 5817 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 5818 exprs.push_back(TheCall->getArg(i)); 5819 TheCall->setArg(i, nullptr); 5820 } 5821 5822 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 5823 TheCall->getCallee()->getBeginLoc(), 5824 TheCall->getRParenLoc()); 5825 } 5826 5827 /// SemaConvertVectorExpr - Handle __builtin_convertvector 5828 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 5829 SourceLocation BuiltinLoc, 5830 SourceLocation RParenLoc) { 5831 ExprValueKind VK = VK_RValue; 5832 ExprObjectKind OK = OK_Ordinary; 5833 QualType DstTy = TInfo->getType(); 5834 QualType SrcTy = E->getType(); 5835 5836 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 5837 return ExprError(Diag(BuiltinLoc, 5838 diag::err_convertvector_non_vector) 5839 << E->getSourceRange()); 5840 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 5841 return ExprError(Diag(BuiltinLoc, 5842 diag::err_convertvector_non_vector_type)); 5843 5844 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 5845 unsigned SrcElts = SrcTy->getAs<VectorType>()->getNumElements(); 5846 unsigned DstElts = DstTy->getAs<VectorType>()->getNumElements(); 5847 if (SrcElts != DstElts) 5848 return ExprError(Diag(BuiltinLoc, 5849 diag::err_convertvector_incompatible_vector) 5850 << E->getSourceRange()); 5851 } 5852 5853 return new (Context) 5854 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 5855 } 5856 5857 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 5858 // This is declared to take (const void*, ...) and can take two 5859 // optional constant int args. 5860 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 5861 unsigned NumArgs = TheCall->getNumArgs(); 5862 5863 if (NumArgs > 3) 5864 return Diag(TheCall->getEndLoc(), 5865 diag::err_typecheck_call_too_many_args_at_most) 5866 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 5867 5868 // Argument 0 is checked for us and the remaining arguments must be 5869 // constant integers. 5870 for (unsigned i = 1; i != NumArgs; ++i) 5871 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 5872 return true; 5873 5874 return false; 5875 } 5876 5877 /// SemaBuiltinAssume - Handle __assume (MS Extension). 5878 // __assume does not evaluate its arguments, and should warn if its argument 5879 // has side effects. 5880 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 5881 Expr *Arg = TheCall->getArg(0); 5882 if (Arg->isInstantiationDependent()) return false; 5883 5884 if (Arg->HasSideEffects(Context)) 5885 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 5886 << Arg->getSourceRange() 5887 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 5888 5889 return false; 5890 } 5891 5892 /// Handle __builtin_alloca_with_align. This is declared 5893 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 5894 /// than 8. 5895 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 5896 // The alignment must be a constant integer. 5897 Expr *Arg = TheCall->getArg(1); 5898 5899 // We can't check the value of a dependent argument. 5900 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 5901 if (const auto *UE = 5902 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 5903 if (UE->getKind() == UETT_AlignOf || 5904 UE->getKind() == UETT_PreferredAlignOf) 5905 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 5906 << Arg->getSourceRange(); 5907 5908 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 5909 5910 if (!Result.isPowerOf2()) 5911 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 5912 << Arg->getSourceRange(); 5913 5914 if (Result < Context.getCharWidth()) 5915 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 5916 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 5917 5918 if (Result > std::numeric_limits<int32_t>::max()) 5919 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 5920 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 5921 } 5922 5923 return false; 5924 } 5925 5926 /// Handle __builtin_assume_aligned. This is declared 5927 /// as (const void*, size_t, ...) and can take one optional constant int arg. 5928 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 5929 unsigned NumArgs = TheCall->getNumArgs(); 5930 5931 if (NumArgs > 3) 5932 return Diag(TheCall->getEndLoc(), 5933 diag::err_typecheck_call_too_many_args_at_most) 5934 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 5935 5936 // The alignment must be a constant integer. 5937 Expr *Arg = TheCall->getArg(1); 5938 5939 // We can't check the value of a dependent argument. 5940 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 5941 llvm::APSInt Result; 5942 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 5943 return true; 5944 5945 if (!Result.isPowerOf2()) 5946 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 5947 << Arg->getSourceRange(); 5948 } 5949 5950 if (NumArgs > 2) { 5951 ExprResult Arg(TheCall->getArg(2)); 5952 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 5953 Context.getSizeType(), false); 5954 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5955 if (Arg.isInvalid()) return true; 5956 TheCall->setArg(2, Arg.get()); 5957 } 5958 5959 return false; 5960 } 5961 5962 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 5963 unsigned BuiltinID = 5964 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 5965 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 5966 5967 unsigned NumArgs = TheCall->getNumArgs(); 5968 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 5969 if (NumArgs < NumRequiredArgs) { 5970 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5971 << 0 /* function call */ << NumRequiredArgs << NumArgs 5972 << TheCall->getSourceRange(); 5973 } 5974 if (NumArgs >= NumRequiredArgs + 0x100) { 5975 return Diag(TheCall->getEndLoc(), 5976 diag::err_typecheck_call_too_many_args_at_most) 5977 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 5978 << TheCall->getSourceRange(); 5979 } 5980 unsigned i = 0; 5981 5982 // For formatting call, check buffer arg. 5983 if (!IsSizeCall) { 5984 ExprResult Arg(TheCall->getArg(i)); 5985 InitializedEntity Entity = InitializedEntity::InitializeParameter( 5986 Context, Context.VoidPtrTy, false); 5987 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5988 if (Arg.isInvalid()) 5989 return true; 5990 TheCall->setArg(i, Arg.get()); 5991 i++; 5992 } 5993 5994 // Check string literal arg. 5995 unsigned FormatIdx = i; 5996 { 5997 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 5998 if (Arg.isInvalid()) 5999 return true; 6000 TheCall->setArg(i, Arg.get()); 6001 i++; 6002 } 6003 6004 // Make sure variadic args are scalar. 6005 unsigned FirstDataArg = i; 6006 while (i < NumArgs) { 6007 ExprResult Arg = DefaultVariadicArgumentPromotion( 6008 TheCall->getArg(i), VariadicFunction, nullptr); 6009 if (Arg.isInvalid()) 6010 return true; 6011 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 6012 if (ArgSize.getQuantity() >= 0x100) { 6013 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 6014 << i << (int)ArgSize.getQuantity() << 0xff 6015 << TheCall->getSourceRange(); 6016 } 6017 TheCall->setArg(i, Arg.get()); 6018 i++; 6019 } 6020 6021 // Check formatting specifiers. NOTE: We're only doing this for the non-size 6022 // call to avoid duplicate diagnostics. 6023 if (!IsSizeCall) { 6024 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 6025 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 6026 bool Success = CheckFormatArguments( 6027 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 6028 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 6029 CheckedVarArgs); 6030 if (!Success) 6031 return true; 6032 } 6033 6034 if (IsSizeCall) { 6035 TheCall->setType(Context.getSizeType()); 6036 } else { 6037 TheCall->setType(Context.VoidPtrTy); 6038 } 6039 return false; 6040 } 6041 6042 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 6043 /// TheCall is a constant expression. 6044 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 6045 llvm::APSInt &Result) { 6046 Expr *Arg = TheCall->getArg(ArgNum); 6047 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6048 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6049 6050 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 6051 6052 if (!Arg->isIntegerConstantExpr(Result, Context)) 6053 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 6054 << FDecl->getDeclName() << Arg->getSourceRange(); 6055 6056 return false; 6057 } 6058 6059 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 6060 /// TheCall is a constant expression in the range [Low, High]. 6061 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 6062 int Low, int High, bool RangeIsError) { 6063 if (isConstantEvaluated()) 6064 return false; 6065 llvm::APSInt Result; 6066 6067 // We can't check the value of a dependent argument. 6068 Expr *Arg = TheCall->getArg(ArgNum); 6069 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6070 return false; 6071 6072 // Check constant-ness first. 6073 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6074 return true; 6075 6076 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 6077 if (RangeIsError) 6078 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 6079 << Result.toString(10) << Low << High << Arg->getSourceRange(); 6080 else 6081 // Defer the warning until we know if the code will be emitted so that 6082 // dead code can ignore this. 6083 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 6084 PDiag(diag::warn_argument_invalid_range) 6085 << Result.toString(10) << Low << High 6086 << Arg->getSourceRange()); 6087 } 6088 6089 return false; 6090 } 6091 6092 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 6093 /// TheCall is a constant expression is a multiple of Num.. 6094 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 6095 unsigned Num) { 6096 llvm::APSInt Result; 6097 6098 // We can't check the value of a dependent argument. 6099 Expr *Arg = TheCall->getArg(ArgNum); 6100 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6101 return false; 6102 6103 // Check constant-ness first. 6104 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6105 return true; 6106 6107 if (Result.getSExtValue() % Num != 0) 6108 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 6109 << Num << Arg->getSourceRange(); 6110 6111 return false; 6112 } 6113 6114 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 6115 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 6116 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 6117 if (checkArgCount(*this, TheCall, 2)) 6118 return true; 6119 Expr *Arg0 = TheCall->getArg(0); 6120 Expr *Arg1 = TheCall->getArg(1); 6121 6122 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6123 if (FirstArg.isInvalid()) 6124 return true; 6125 QualType FirstArgType = FirstArg.get()->getType(); 6126 if (!FirstArgType->isAnyPointerType()) 6127 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6128 << "first" << FirstArgType << Arg0->getSourceRange(); 6129 TheCall->setArg(0, FirstArg.get()); 6130 6131 ExprResult SecArg = DefaultLvalueConversion(Arg1); 6132 if (SecArg.isInvalid()) 6133 return true; 6134 QualType SecArgType = SecArg.get()->getType(); 6135 if (!SecArgType->isIntegerType()) 6136 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6137 << "second" << SecArgType << Arg1->getSourceRange(); 6138 6139 // Derive the return type from the pointer argument. 6140 TheCall->setType(FirstArgType); 6141 return false; 6142 } 6143 6144 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 6145 if (checkArgCount(*this, TheCall, 2)) 6146 return true; 6147 6148 Expr *Arg0 = TheCall->getArg(0); 6149 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6150 if (FirstArg.isInvalid()) 6151 return true; 6152 QualType FirstArgType = FirstArg.get()->getType(); 6153 if (!FirstArgType->isAnyPointerType()) 6154 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6155 << "first" << FirstArgType << Arg0->getSourceRange(); 6156 TheCall->setArg(0, FirstArg.get()); 6157 6158 // Derive the return type from the pointer argument. 6159 TheCall->setType(FirstArgType); 6160 6161 // Second arg must be an constant in range [0,15] 6162 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6163 } 6164 6165 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 6166 if (checkArgCount(*this, TheCall, 2)) 6167 return true; 6168 Expr *Arg0 = TheCall->getArg(0); 6169 Expr *Arg1 = TheCall->getArg(1); 6170 6171 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6172 if (FirstArg.isInvalid()) 6173 return true; 6174 QualType FirstArgType = FirstArg.get()->getType(); 6175 if (!FirstArgType->isAnyPointerType()) 6176 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6177 << "first" << FirstArgType << Arg0->getSourceRange(); 6178 6179 QualType SecArgType = Arg1->getType(); 6180 if (!SecArgType->isIntegerType()) 6181 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6182 << "second" << SecArgType << Arg1->getSourceRange(); 6183 TheCall->setType(Context.IntTy); 6184 return false; 6185 } 6186 6187 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 6188 BuiltinID == AArch64::BI__builtin_arm_stg) { 6189 if (checkArgCount(*this, TheCall, 1)) 6190 return true; 6191 Expr *Arg0 = TheCall->getArg(0); 6192 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6193 if (FirstArg.isInvalid()) 6194 return true; 6195 6196 QualType FirstArgType = FirstArg.get()->getType(); 6197 if (!FirstArgType->isAnyPointerType()) 6198 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6199 << "first" << FirstArgType << Arg0->getSourceRange(); 6200 TheCall->setArg(0, FirstArg.get()); 6201 6202 // Derive the return type from the pointer argument. 6203 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 6204 TheCall->setType(FirstArgType); 6205 return false; 6206 } 6207 6208 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 6209 Expr *ArgA = TheCall->getArg(0); 6210 Expr *ArgB = TheCall->getArg(1); 6211 6212 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 6213 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 6214 6215 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 6216 return true; 6217 6218 QualType ArgTypeA = ArgExprA.get()->getType(); 6219 QualType ArgTypeB = ArgExprB.get()->getType(); 6220 6221 auto isNull = [&] (Expr *E) -> bool { 6222 return E->isNullPointerConstant( 6223 Context, Expr::NPC_ValueDependentIsNotNull); }; 6224 6225 // argument should be either a pointer or null 6226 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 6227 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6228 << "first" << ArgTypeA << ArgA->getSourceRange(); 6229 6230 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 6231 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6232 << "second" << ArgTypeB << ArgB->getSourceRange(); 6233 6234 // Ensure Pointee types are compatible 6235 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 6236 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 6237 QualType pointeeA = ArgTypeA->getPointeeType(); 6238 QualType pointeeB = ArgTypeB->getPointeeType(); 6239 if (!Context.typesAreCompatible( 6240 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 6241 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 6242 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 6243 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 6244 << ArgB->getSourceRange(); 6245 } 6246 } 6247 6248 // at least one argument should be pointer type 6249 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 6250 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 6251 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 6252 6253 if (isNull(ArgA)) // adopt type of the other pointer 6254 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 6255 6256 if (isNull(ArgB)) 6257 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 6258 6259 TheCall->setArg(0, ArgExprA.get()); 6260 TheCall->setArg(1, ArgExprB.get()); 6261 TheCall->setType(Context.LongLongTy); 6262 return false; 6263 } 6264 assert(false && "Unhandled ARM MTE intrinsic"); 6265 return true; 6266 } 6267 6268 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 6269 /// TheCall is an ARM/AArch64 special register string literal. 6270 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 6271 int ArgNum, unsigned ExpectedFieldNum, 6272 bool AllowName) { 6273 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 6274 BuiltinID == ARM::BI__builtin_arm_wsr64 || 6275 BuiltinID == ARM::BI__builtin_arm_rsr || 6276 BuiltinID == ARM::BI__builtin_arm_rsrp || 6277 BuiltinID == ARM::BI__builtin_arm_wsr || 6278 BuiltinID == ARM::BI__builtin_arm_wsrp; 6279 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 6280 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 6281 BuiltinID == AArch64::BI__builtin_arm_rsr || 6282 BuiltinID == AArch64::BI__builtin_arm_rsrp || 6283 BuiltinID == AArch64::BI__builtin_arm_wsr || 6284 BuiltinID == AArch64::BI__builtin_arm_wsrp; 6285 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 6286 6287 // We can't check the value of a dependent argument. 6288 Expr *Arg = TheCall->getArg(ArgNum); 6289 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6290 return false; 6291 6292 // Check if the argument is a string literal. 6293 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 6294 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 6295 << Arg->getSourceRange(); 6296 6297 // Check the type of special register given. 6298 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 6299 SmallVector<StringRef, 6> Fields; 6300 Reg.split(Fields, ":"); 6301 6302 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 6303 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6304 << Arg->getSourceRange(); 6305 6306 // If the string is the name of a register then we cannot check that it is 6307 // valid here but if the string is of one the forms described in ACLE then we 6308 // can check that the supplied fields are integers and within the valid 6309 // ranges. 6310 if (Fields.size() > 1) { 6311 bool FiveFields = Fields.size() == 5; 6312 6313 bool ValidString = true; 6314 if (IsARMBuiltin) { 6315 ValidString &= Fields[0].startswith_lower("cp") || 6316 Fields[0].startswith_lower("p"); 6317 if (ValidString) 6318 Fields[0] = 6319 Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1); 6320 6321 ValidString &= Fields[2].startswith_lower("c"); 6322 if (ValidString) 6323 Fields[2] = Fields[2].drop_front(1); 6324 6325 if (FiveFields) { 6326 ValidString &= Fields[3].startswith_lower("c"); 6327 if (ValidString) 6328 Fields[3] = Fields[3].drop_front(1); 6329 } 6330 } 6331 6332 SmallVector<int, 5> Ranges; 6333 if (FiveFields) 6334 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 6335 else 6336 Ranges.append({15, 7, 15}); 6337 6338 for (unsigned i=0; i<Fields.size(); ++i) { 6339 int IntField; 6340 ValidString &= !Fields[i].getAsInteger(10, IntField); 6341 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 6342 } 6343 6344 if (!ValidString) 6345 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6346 << Arg->getSourceRange(); 6347 } else if (IsAArch64Builtin && Fields.size() == 1) { 6348 // If the register name is one of those that appear in the condition below 6349 // and the special register builtin being used is one of the write builtins, 6350 // then we require that the argument provided for writing to the register 6351 // is an integer constant expression. This is because it will be lowered to 6352 // an MSR (immediate) instruction, so we need to know the immediate at 6353 // compile time. 6354 if (TheCall->getNumArgs() != 2) 6355 return false; 6356 6357 std::string RegLower = Reg.lower(); 6358 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 6359 RegLower != "pan" && RegLower != "uao") 6360 return false; 6361 6362 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6363 } 6364 6365 return false; 6366 } 6367 6368 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 6369 /// This checks that the target supports __builtin_longjmp and 6370 /// that val is a constant 1. 6371 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 6372 if (!Context.getTargetInfo().hasSjLjLowering()) 6373 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 6374 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6375 6376 Expr *Arg = TheCall->getArg(1); 6377 llvm::APSInt Result; 6378 6379 // TODO: This is less than ideal. Overload this to take a value. 6380 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6381 return true; 6382 6383 if (Result != 1) 6384 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 6385 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 6386 6387 return false; 6388 } 6389 6390 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 6391 /// This checks that the target supports __builtin_setjmp. 6392 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 6393 if (!Context.getTargetInfo().hasSjLjLowering()) 6394 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 6395 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6396 return false; 6397 } 6398 6399 namespace { 6400 6401 class UncoveredArgHandler { 6402 enum { Unknown = -1, AllCovered = -2 }; 6403 6404 signed FirstUncoveredArg = Unknown; 6405 SmallVector<const Expr *, 4> DiagnosticExprs; 6406 6407 public: 6408 UncoveredArgHandler() = default; 6409 6410 bool hasUncoveredArg() const { 6411 return (FirstUncoveredArg >= 0); 6412 } 6413 6414 unsigned getUncoveredArg() const { 6415 assert(hasUncoveredArg() && "no uncovered argument"); 6416 return FirstUncoveredArg; 6417 } 6418 6419 void setAllCovered() { 6420 // A string has been found with all arguments covered, so clear out 6421 // the diagnostics. 6422 DiagnosticExprs.clear(); 6423 FirstUncoveredArg = AllCovered; 6424 } 6425 6426 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 6427 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 6428 6429 // Don't update if a previous string covers all arguments. 6430 if (FirstUncoveredArg == AllCovered) 6431 return; 6432 6433 // UncoveredArgHandler tracks the highest uncovered argument index 6434 // and with it all the strings that match this index. 6435 if (NewFirstUncoveredArg == FirstUncoveredArg) 6436 DiagnosticExprs.push_back(StrExpr); 6437 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 6438 DiagnosticExprs.clear(); 6439 DiagnosticExprs.push_back(StrExpr); 6440 FirstUncoveredArg = NewFirstUncoveredArg; 6441 } 6442 } 6443 6444 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 6445 }; 6446 6447 enum StringLiteralCheckType { 6448 SLCT_NotALiteral, 6449 SLCT_UncheckedLiteral, 6450 SLCT_CheckedLiteral 6451 }; 6452 6453 } // namespace 6454 6455 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 6456 BinaryOperatorKind BinOpKind, 6457 bool AddendIsRight) { 6458 unsigned BitWidth = Offset.getBitWidth(); 6459 unsigned AddendBitWidth = Addend.getBitWidth(); 6460 // There might be negative interim results. 6461 if (Addend.isUnsigned()) { 6462 Addend = Addend.zext(++AddendBitWidth); 6463 Addend.setIsSigned(true); 6464 } 6465 // Adjust the bit width of the APSInts. 6466 if (AddendBitWidth > BitWidth) { 6467 Offset = Offset.sext(AddendBitWidth); 6468 BitWidth = AddendBitWidth; 6469 } else if (BitWidth > AddendBitWidth) { 6470 Addend = Addend.sext(BitWidth); 6471 } 6472 6473 bool Ov = false; 6474 llvm::APSInt ResOffset = Offset; 6475 if (BinOpKind == BO_Add) 6476 ResOffset = Offset.sadd_ov(Addend, Ov); 6477 else { 6478 assert(AddendIsRight && BinOpKind == BO_Sub && 6479 "operator must be add or sub with addend on the right"); 6480 ResOffset = Offset.ssub_ov(Addend, Ov); 6481 } 6482 6483 // We add an offset to a pointer here so we should support an offset as big as 6484 // possible. 6485 if (Ov) { 6486 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 6487 "index (intermediate) result too big"); 6488 Offset = Offset.sext(2 * BitWidth); 6489 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 6490 return; 6491 } 6492 6493 Offset = ResOffset; 6494 } 6495 6496 namespace { 6497 6498 // This is a wrapper class around StringLiteral to support offsetted string 6499 // literals as format strings. It takes the offset into account when returning 6500 // the string and its length or the source locations to display notes correctly. 6501 class FormatStringLiteral { 6502 const StringLiteral *FExpr; 6503 int64_t Offset; 6504 6505 public: 6506 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 6507 : FExpr(fexpr), Offset(Offset) {} 6508 6509 StringRef getString() const { 6510 return FExpr->getString().drop_front(Offset); 6511 } 6512 6513 unsigned getByteLength() const { 6514 return FExpr->getByteLength() - getCharByteWidth() * Offset; 6515 } 6516 6517 unsigned getLength() const { return FExpr->getLength() - Offset; } 6518 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 6519 6520 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 6521 6522 QualType getType() const { return FExpr->getType(); } 6523 6524 bool isAscii() const { return FExpr->isAscii(); } 6525 bool isWide() const { return FExpr->isWide(); } 6526 bool isUTF8() const { return FExpr->isUTF8(); } 6527 bool isUTF16() const { return FExpr->isUTF16(); } 6528 bool isUTF32() const { return FExpr->isUTF32(); } 6529 bool isPascal() const { return FExpr->isPascal(); } 6530 6531 SourceLocation getLocationOfByte( 6532 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 6533 const TargetInfo &Target, unsigned *StartToken = nullptr, 6534 unsigned *StartTokenByteOffset = nullptr) const { 6535 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 6536 StartToken, StartTokenByteOffset); 6537 } 6538 6539 SourceLocation getBeginLoc() const LLVM_READONLY { 6540 return FExpr->getBeginLoc().getLocWithOffset(Offset); 6541 } 6542 6543 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 6544 }; 6545 6546 } // namespace 6547 6548 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 6549 const Expr *OrigFormatExpr, 6550 ArrayRef<const Expr *> Args, 6551 bool HasVAListArg, unsigned format_idx, 6552 unsigned firstDataArg, 6553 Sema::FormatStringType Type, 6554 bool inFunctionCall, 6555 Sema::VariadicCallType CallType, 6556 llvm::SmallBitVector &CheckedVarArgs, 6557 UncoveredArgHandler &UncoveredArg); 6558 6559 // Determine if an expression is a string literal or constant string. 6560 // If this function returns false on the arguments to a function expecting a 6561 // format string, we will usually need to emit a warning. 6562 // True string literals are then checked by CheckFormatString. 6563 static StringLiteralCheckType 6564 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 6565 bool HasVAListArg, unsigned format_idx, 6566 unsigned firstDataArg, Sema::FormatStringType Type, 6567 Sema::VariadicCallType CallType, bool InFunctionCall, 6568 llvm::SmallBitVector &CheckedVarArgs, 6569 UncoveredArgHandler &UncoveredArg, 6570 llvm::APSInt Offset) { 6571 if (S.isConstantEvaluated()) 6572 return SLCT_NotALiteral; 6573 tryAgain: 6574 assert(Offset.isSigned() && "invalid offset"); 6575 6576 if (E->isTypeDependent() || E->isValueDependent()) 6577 return SLCT_NotALiteral; 6578 6579 E = E->IgnoreParenCasts(); 6580 6581 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 6582 // Technically -Wformat-nonliteral does not warn about this case. 6583 // The behavior of printf and friends in this case is implementation 6584 // dependent. Ideally if the format string cannot be null then 6585 // it should have a 'nonnull' attribute in the function prototype. 6586 return SLCT_UncheckedLiteral; 6587 6588 switch (E->getStmtClass()) { 6589 case Stmt::BinaryConditionalOperatorClass: 6590 case Stmt::ConditionalOperatorClass: { 6591 // The expression is a literal if both sub-expressions were, and it was 6592 // completely checked only if both sub-expressions were checked. 6593 const AbstractConditionalOperator *C = 6594 cast<AbstractConditionalOperator>(E); 6595 6596 // Determine whether it is necessary to check both sub-expressions, for 6597 // example, because the condition expression is a constant that can be 6598 // evaluated at compile time. 6599 bool CheckLeft = true, CheckRight = true; 6600 6601 bool Cond; 6602 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 6603 S.isConstantEvaluated())) { 6604 if (Cond) 6605 CheckRight = false; 6606 else 6607 CheckLeft = false; 6608 } 6609 6610 // We need to maintain the offsets for the right and the left hand side 6611 // separately to check if every possible indexed expression is a valid 6612 // string literal. They might have different offsets for different string 6613 // literals in the end. 6614 StringLiteralCheckType Left; 6615 if (!CheckLeft) 6616 Left = SLCT_UncheckedLiteral; 6617 else { 6618 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 6619 HasVAListArg, format_idx, firstDataArg, 6620 Type, CallType, InFunctionCall, 6621 CheckedVarArgs, UncoveredArg, Offset); 6622 if (Left == SLCT_NotALiteral || !CheckRight) { 6623 return Left; 6624 } 6625 } 6626 6627 StringLiteralCheckType Right = 6628 checkFormatStringExpr(S, C->getFalseExpr(), Args, 6629 HasVAListArg, format_idx, firstDataArg, 6630 Type, CallType, InFunctionCall, CheckedVarArgs, 6631 UncoveredArg, Offset); 6632 6633 return (CheckLeft && Left < Right) ? Left : Right; 6634 } 6635 6636 case Stmt::ImplicitCastExprClass: 6637 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 6638 goto tryAgain; 6639 6640 case Stmt::OpaqueValueExprClass: 6641 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 6642 E = src; 6643 goto tryAgain; 6644 } 6645 return SLCT_NotALiteral; 6646 6647 case Stmt::PredefinedExprClass: 6648 // While __func__, etc., are technically not string literals, they 6649 // cannot contain format specifiers and thus are not a security 6650 // liability. 6651 return SLCT_UncheckedLiteral; 6652 6653 case Stmt::DeclRefExprClass: { 6654 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 6655 6656 // As an exception, do not flag errors for variables binding to 6657 // const string literals. 6658 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 6659 bool isConstant = false; 6660 QualType T = DR->getType(); 6661 6662 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 6663 isConstant = AT->getElementType().isConstant(S.Context); 6664 } else if (const PointerType *PT = T->getAs<PointerType>()) { 6665 isConstant = T.isConstant(S.Context) && 6666 PT->getPointeeType().isConstant(S.Context); 6667 } else if (T->isObjCObjectPointerType()) { 6668 // In ObjC, there is usually no "const ObjectPointer" type, 6669 // so don't check if the pointee type is constant. 6670 isConstant = T.isConstant(S.Context); 6671 } 6672 6673 if (isConstant) { 6674 if (const Expr *Init = VD->getAnyInitializer()) { 6675 // Look through initializers like const char c[] = { "foo" } 6676 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 6677 if (InitList->isStringLiteralInit()) 6678 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 6679 } 6680 return checkFormatStringExpr(S, Init, Args, 6681 HasVAListArg, format_idx, 6682 firstDataArg, Type, CallType, 6683 /*InFunctionCall*/ false, CheckedVarArgs, 6684 UncoveredArg, Offset); 6685 } 6686 } 6687 6688 // For vprintf* functions (i.e., HasVAListArg==true), we add a 6689 // special check to see if the format string is a function parameter 6690 // of the function calling the printf function. If the function 6691 // has an attribute indicating it is a printf-like function, then we 6692 // should suppress warnings concerning non-literals being used in a call 6693 // to a vprintf function. For example: 6694 // 6695 // void 6696 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 6697 // va_list ap; 6698 // va_start(ap, fmt); 6699 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 6700 // ... 6701 // } 6702 if (HasVAListArg) { 6703 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 6704 if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) { 6705 int PVIndex = PV->getFunctionScopeIndex() + 1; 6706 for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) { 6707 // adjust for implicit parameter 6708 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) 6709 if (MD->isInstance()) 6710 ++PVIndex; 6711 // We also check if the formats are compatible. 6712 // We can't pass a 'scanf' string to a 'printf' function. 6713 if (PVIndex == PVFormat->getFormatIdx() && 6714 Type == S.GetFormatStringType(PVFormat)) 6715 return SLCT_UncheckedLiteral; 6716 } 6717 } 6718 } 6719 } 6720 } 6721 6722 return SLCT_NotALiteral; 6723 } 6724 6725 case Stmt::CallExprClass: 6726 case Stmt::CXXMemberCallExprClass: { 6727 const CallExpr *CE = cast<CallExpr>(E); 6728 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 6729 bool IsFirst = true; 6730 StringLiteralCheckType CommonResult; 6731 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 6732 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 6733 StringLiteralCheckType Result = checkFormatStringExpr( 6734 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 6735 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset); 6736 if (IsFirst) { 6737 CommonResult = Result; 6738 IsFirst = false; 6739 } 6740 } 6741 if (!IsFirst) 6742 return CommonResult; 6743 6744 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 6745 unsigned BuiltinID = FD->getBuiltinID(); 6746 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 6747 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 6748 const Expr *Arg = CE->getArg(0); 6749 return checkFormatStringExpr(S, Arg, Args, 6750 HasVAListArg, format_idx, 6751 firstDataArg, Type, CallType, 6752 InFunctionCall, CheckedVarArgs, 6753 UncoveredArg, Offset); 6754 } 6755 } 6756 } 6757 6758 return SLCT_NotALiteral; 6759 } 6760 case Stmt::ObjCMessageExprClass: { 6761 const auto *ME = cast<ObjCMessageExpr>(E); 6762 if (const auto *ND = ME->getMethodDecl()) { 6763 if (const auto *FA = ND->getAttr<FormatArgAttr>()) { 6764 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 6765 return checkFormatStringExpr( 6766 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 6767 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset); 6768 } 6769 } 6770 6771 return SLCT_NotALiteral; 6772 } 6773 case Stmt::ObjCStringLiteralClass: 6774 case Stmt::StringLiteralClass: { 6775 const StringLiteral *StrE = nullptr; 6776 6777 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 6778 StrE = ObjCFExpr->getString(); 6779 else 6780 StrE = cast<StringLiteral>(E); 6781 6782 if (StrE) { 6783 if (Offset.isNegative() || Offset > StrE->getLength()) { 6784 // TODO: It would be better to have an explicit warning for out of 6785 // bounds literals. 6786 return SLCT_NotALiteral; 6787 } 6788 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 6789 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 6790 firstDataArg, Type, InFunctionCall, CallType, 6791 CheckedVarArgs, UncoveredArg); 6792 return SLCT_CheckedLiteral; 6793 } 6794 6795 return SLCT_NotALiteral; 6796 } 6797 case Stmt::BinaryOperatorClass: { 6798 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 6799 6800 // A string literal + an int offset is still a string literal. 6801 if (BinOp->isAdditiveOp()) { 6802 Expr::EvalResult LResult, RResult; 6803 6804 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 6805 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 6806 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 6807 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 6808 6809 if (LIsInt != RIsInt) { 6810 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 6811 6812 if (LIsInt) { 6813 if (BinOpKind == BO_Add) { 6814 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 6815 E = BinOp->getRHS(); 6816 goto tryAgain; 6817 } 6818 } else { 6819 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 6820 E = BinOp->getLHS(); 6821 goto tryAgain; 6822 } 6823 } 6824 } 6825 6826 return SLCT_NotALiteral; 6827 } 6828 case Stmt::UnaryOperatorClass: { 6829 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 6830 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 6831 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 6832 Expr::EvalResult IndexResult; 6833 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 6834 Expr::SE_NoSideEffects, 6835 S.isConstantEvaluated())) { 6836 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 6837 /*RHS is int*/ true); 6838 E = ASE->getBase(); 6839 goto tryAgain; 6840 } 6841 } 6842 6843 return SLCT_NotALiteral; 6844 } 6845 6846 default: 6847 return SLCT_NotALiteral; 6848 } 6849 } 6850 6851 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 6852 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 6853 .Case("scanf", FST_Scanf) 6854 .Cases("printf", "printf0", FST_Printf) 6855 .Cases("NSString", "CFString", FST_NSString) 6856 .Case("strftime", FST_Strftime) 6857 .Case("strfmon", FST_Strfmon) 6858 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 6859 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 6860 .Case("os_trace", FST_OSLog) 6861 .Case("os_log", FST_OSLog) 6862 .Default(FST_Unknown); 6863 } 6864 6865 /// CheckFormatArguments - Check calls to printf and scanf (and similar 6866 /// functions) for correct use of format strings. 6867 /// Returns true if a format string has been fully checked. 6868 bool Sema::CheckFormatArguments(const FormatAttr *Format, 6869 ArrayRef<const Expr *> Args, 6870 bool IsCXXMember, 6871 VariadicCallType CallType, 6872 SourceLocation Loc, SourceRange Range, 6873 llvm::SmallBitVector &CheckedVarArgs) { 6874 FormatStringInfo FSI; 6875 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 6876 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 6877 FSI.FirstDataArg, GetFormatStringType(Format), 6878 CallType, Loc, Range, CheckedVarArgs); 6879 return false; 6880 } 6881 6882 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 6883 bool HasVAListArg, unsigned format_idx, 6884 unsigned firstDataArg, FormatStringType Type, 6885 VariadicCallType CallType, 6886 SourceLocation Loc, SourceRange Range, 6887 llvm::SmallBitVector &CheckedVarArgs) { 6888 // CHECK: printf/scanf-like function is called with no format string. 6889 if (format_idx >= Args.size()) { 6890 Diag(Loc, diag::warn_missing_format_string) << Range; 6891 return false; 6892 } 6893 6894 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 6895 6896 // CHECK: format string is not a string literal. 6897 // 6898 // Dynamically generated format strings are difficult to 6899 // automatically vet at compile time. Requiring that format strings 6900 // are string literals: (1) permits the checking of format strings by 6901 // the compiler and thereby (2) can practically remove the source of 6902 // many format string exploits. 6903 6904 // Format string can be either ObjC string (e.g. @"%d") or 6905 // C string (e.g. "%d") 6906 // ObjC string uses the same format specifiers as C string, so we can use 6907 // the same format string checking logic for both ObjC and C strings. 6908 UncoveredArgHandler UncoveredArg; 6909 StringLiteralCheckType CT = 6910 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 6911 format_idx, firstDataArg, Type, CallType, 6912 /*IsFunctionCall*/ true, CheckedVarArgs, 6913 UncoveredArg, 6914 /*no string offset*/ llvm::APSInt(64, false) = 0); 6915 6916 // Generate a diagnostic where an uncovered argument is detected. 6917 if (UncoveredArg.hasUncoveredArg()) { 6918 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 6919 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 6920 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 6921 } 6922 6923 if (CT != SLCT_NotALiteral) 6924 // Literal format string found, check done! 6925 return CT == SLCT_CheckedLiteral; 6926 6927 // Strftime is particular as it always uses a single 'time' argument, 6928 // so it is safe to pass a non-literal string. 6929 if (Type == FST_Strftime) 6930 return false; 6931 6932 // Do not emit diag when the string param is a macro expansion and the 6933 // format is either NSString or CFString. This is a hack to prevent 6934 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 6935 // which are usually used in place of NS and CF string literals. 6936 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 6937 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 6938 return false; 6939 6940 // If there are no arguments specified, warn with -Wformat-security, otherwise 6941 // warn only with -Wformat-nonliteral. 6942 if (Args.size() == firstDataArg) { 6943 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 6944 << OrigFormatExpr->getSourceRange(); 6945 switch (Type) { 6946 default: 6947 break; 6948 case FST_Kprintf: 6949 case FST_FreeBSDKPrintf: 6950 case FST_Printf: 6951 Diag(FormatLoc, diag::note_format_security_fixit) 6952 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 6953 break; 6954 case FST_NSString: 6955 Diag(FormatLoc, diag::note_format_security_fixit) 6956 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 6957 break; 6958 } 6959 } else { 6960 Diag(FormatLoc, diag::warn_format_nonliteral) 6961 << OrigFormatExpr->getSourceRange(); 6962 } 6963 return false; 6964 } 6965 6966 namespace { 6967 6968 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 6969 protected: 6970 Sema &S; 6971 const FormatStringLiteral *FExpr; 6972 const Expr *OrigFormatExpr; 6973 const Sema::FormatStringType FSType; 6974 const unsigned FirstDataArg; 6975 const unsigned NumDataArgs; 6976 const char *Beg; // Start of format string. 6977 const bool HasVAListArg; 6978 ArrayRef<const Expr *> Args; 6979 unsigned FormatIdx; 6980 llvm::SmallBitVector CoveredArgs; 6981 bool usesPositionalArgs = false; 6982 bool atFirstArg = true; 6983 bool inFunctionCall; 6984 Sema::VariadicCallType CallType; 6985 llvm::SmallBitVector &CheckedVarArgs; 6986 UncoveredArgHandler &UncoveredArg; 6987 6988 public: 6989 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 6990 const Expr *origFormatExpr, 6991 const Sema::FormatStringType type, unsigned firstDataArg, 6992 unsigned numDataArgs, const char *beg, bool hasVAListArg, 6993 ArrayRef<const Expr *> Args, unsigned formatIdx, 6994 bool inFunctionCall, Sema::VariadicCallType callType, 6995 llvm::SmallBitVector &CheckedVarArgs, 6996 UncoveredArgHandler &UncoveredArg) 6997 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 6998 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 6999 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 7000 inFunctionCall(inFunctionCall), CallType(callType), 7001 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 7002 CoveredArgs.resize(numDataArgs); 7003 CoveredArgs.reset(); 7004 } 7005 7006 void DoneProcessing(); 7007 7008 void HandleIncompleteSpecifier(const char *startSpecifier, 7009 unsigned specifierLen) override; 7010 7011 void HandleInvalidLengthModifier( 7012 const analyze_format_string::FormatSpecifier &FS, 7013 const analyze_format_string::ConversionSpecifier &CS, 7014 const char *startSpecifier, unsigned specifierLen, 7015 unsigned DiagID); 7016 7017 void HandleNonStandardLengthModifier( 7018 const analyze_format_string::FormatSpecifier &FS, 7019 const char *startSpecifier, unsigned specifierLen); 7020 7021 void HandleNonStandardConversionSpecifier( 7022 const analyze_format_string::ConversionSpecifier &CS, 7023 const char *startSpecifier, unsigned specifierLen); 7024 7025 void HandlePosition(const char *startPos, unsigned posLen) override; 7026 7027 void HandleInvalidPosition(const char *startSpecifier, 7028 unsigned specifierLen, 7029 analyze_format_string::PositionContext p) override; 7030 7031 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 7032 7033 void HandleNullChar(const char *nullCharacter) override; 7034 7035 template <typename Range> 7036 static void 7037 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 7038 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 7039 bool IsStringLocation, Range StringRange, 7040 ArrayRef<FixItHint> Fixit = None); 7041 7042 protected: 7043 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 7044 const char *startSpec, 7045 unsigned specifierLen, 7046 const char *csStart, unsigned csLen); 7047 7048 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 7049 const char *startSpec, 7050 unsigned specifierLen); 7051 7052 SourceRange getFormatStringRange(); 7053 CharSourceRange getSpecifierRange(const char *startSpecifier, 7054 unsigned specifierLen); 7055 SourceLocation getLocationOfByte(const char *x); 7056 7057 const Expr *getDataArg(unsigned i) const; 7058 7059 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 7060 const analyze_format_string::ConversionSpecifier &CS, 7061 const char *startSpecifier, unsigned specifierLen, 7062 unsigned argIndex); 7063 7064 template <typename Range> 7065 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 7066 bool IsStringLocation, Range StringRange, 7067 ArrayRef<FixItHint> Fixit = None); 7068 }; 7069 7070 } // namespace 7071 7072 SourceRange CheckFormatHandler::getFormatStringRange() { 7073 return OrigFormatExpr->getSourceRange(); 7074 } 7075 7076 CharSourceRange CheckFormatHandler:: 7077 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 7078 SourceLocation Start = getLocationOfByte(startSpecifier); 7079 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 7080 7081 // Advance the end SourceLocation by one due to half-open ranges. 7082 End = End.getLocWithOffset(1); 7083 7084 return CharSourceRange::getCharRange(Start, End); 7085 } 7086 7087 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 7088 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 7089 S.getLangOpts(), S.Context.getTargetInfo()); 7090 } 7091 7092 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 7093 unsigned specifierLen){ 7094 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 7095 getLocationOfByte(startSpecifier), 7096 /*IsStringLocation*/true, 7097 getSpecifierRange(startSpecifier, specifierLen)); 7098 } 7099 7100 void CheckFormatHandler::HandleInvalidLengthModifier( 7101 const analyze_format_string::FormatSpecifier &FS, 7102 const analyze_format_string::ConversionSpecifier &CS, 7103 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 7104 using namespace analyze_format_string; 7105 7106 const LengthModifier &LM = FS.getLengthModifier(); 7107 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7108 7109 // See if we know how to fix this length modifier. 7110 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7111 if (FixedLM) { 7112 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7113 getLocationOfByte(LM.getStart()), 7114 /*IsStringLocation*/true, 7115 getSpecifierRange(startSpecifier, specifierLen)); 7116 7117 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7118 << FixedLM->toString() 7119 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7120 7121 } else { 7122 FixItHint Hint; 7123 if (DiagID == diag::warn_format_nonsensical_length) 7124 Hint = FixItHint::CreateRemoval(LMRange); 7125 7126 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7127 getLocationOfByte(LM.getStart()), 7128 /*IsStringLocation*/true, 7129 getSpecifierRange(startSpecifier, specifierLen), 7130 Hint); 7131 } 7132 } 7133 7134 void CheckFormatHandler::HandleNonStandardLengthModifier( 7135 const analyze_format_string::FormatSpecifier &FS, 7136 const char *startSpecifier, unsigned specifierLen) { 7137 using namespace analyze_format_string; 7138 7139 const LengthModifier &LM = FS.getLengthModifier(); 7140 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7141 7142 // See if we know how to fix this length modifier. 7143 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7144 if (FixedLM) { 7145 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7146 << LM.toString() << 0, 7147 getLocationOfByte(LM.getStart()), 7148 /*IsStringLocation*/true, 7149 getSpecifierRange(startSpecifier, specifierLen)); 7150 7151 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7152 << FixedLM->toString() 7153 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7154 7155 } else { 7156 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7157 << LM.toString() << 0, 7158 getLocationOfByte(LM.getStart()), 7159 /*IsStringLocation*/true, 7160 getSpecifierRange(startSpecifier, specifierLen)); 7161 } 7162 } 7163 7164 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 7165 const analyze_format_string::ConversionSpecifier &CS, 7166 const char *startSpecifier, unsigned specifierLen) { 7167 using namespace analyze_format_string; 7168 7169 // See if we know how to fix this conversion specifier. 7170 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 7171 if (FixedCS) { 7172 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7173 << CS.toString() << /*conversion specifier*/1, 7174 getLocationOfByte(CS.getStart()), 7175 /*IsStringLocation*/true, 7176 getSpecifierRange(startSpecifier, specifierLen)); 7177 7178 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 7179 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 7180 << FixedCS->toString() 7181 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 7182 } else { 7183 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7184 << CS.toString() << /*conversion specifier*/1, 7185 getLocationOfByte(CS.getStart()), 7186 /*IsStringLocation*/true, 7187 getSpecifierRange(startSpecifier, specifierLen)); 7188 } 7189 } 7190 7191 void CheckFormatHandler::HandlePosition(const char *startPos, 7192 unsigned posLen) { 7193 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 7194 getLocationOfByte(startPos), 7195 /*IsStringLocation*/true, 7196 getSpecifierRange(startPos, posLen)); 7197 } 7198 7199 void 7200 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 7201 analyze_format_string::PositionContext p) { 7202 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 7203 << (unsigned) p, 7204 getLocationOfByte(startPos), /*IsStringLocation*/true, 7205 getSpecifierRange(startPos, posLen)); 7206 } 7207 7208 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 7209 unsigned posLen) { 7210 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 7211 getLocationOfByte(startPos), 7212 /*IsStringLocation*/true, 7213 getSpecifierRange(startPos, posLen)); 7214 } 7215 7216 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 7217 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 7218 // The presence of a null character is likely an error. 7219 EmitFormatDiagnostic( 7220 S.PDiag(diag::warn_printf_format_string_contains_null_char), 7221 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 7222 getFormatStringRange()); 7223 } 7224 } 7225 7226 // Note that this may return NULL if there was an error parsing or building 7227 // one of the argument expressions. 7228 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 7229 return Args[FirstDataArg + i]; 7230 } 7231 7232 void CheckFormatHandler::DoneProcessing() { 7233 // Does the number of data arguments exceed the number of 7234 // format conversions in the format string? 7235 if (!HasVAListArg) { 7236 // Find any arguments that weren't covered. 7237 CoveredArgs.flip(); 7238 signed notCoveredArg = CoveredArgs.find_first(); 7239 if (notCoveredArg >= 0) { 7240 assert((unsigned)notCoveredArg < NumDataArgs); 7241 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 7242 } else { 7243 UncoveredArg.setAllCovered(); 7244 } 7245 } 7246 } 7247 7248 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 7249 const Expr *ArgExpr) { 7250 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 7251 "Invalid state"); 7252 7253 if (!ArgExpr) 7254 return; 7255 7256 SourceLocation Loc = ArgExpr->getBeginLoc(); 7257 7258 if (S.getSourceManager().isInSystemMacro(Loc)) 7259 return; 7260 7261 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 7262 for (auto E : DiagnosticExprs) 7263 PDiag << E->getSourceRange(); 7264 7265 CheckFormatHandler::EmitFormatDiagnostic( 7266 S, IsFunctionCall, DiagnosticExprs[0], 7267 PDiag, Loc, /*IsStringLocation*/false, 7268 DiagnosticExprs[0]->getSourceRange()); 7269 } 7270 7271 bool 7272 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 7273 SourceLocation Loc, 7274 const char *startSpec, 7275 unsigned specifierLen, 7276 const char *csStart, 7277 unsigned csLen) { 7278 bool keepGoing = true; 7279 if (argIndex < NumDataArgs) { 7280 // Consider the argument coverered, even though the specifier doesn't 7281 // make sense. 7282 CoveredArgs.set(argIndex); 7283 } 7284 else { 7285 // If argIndex exceeds the number of data arguments we 7286 // don't issue a warning because that is just a cascade of warnings (and 7287 // they may have intended '%%' anyway). We don't want to continue processing 7288 // the format string after this point, however, as we will like just get 7289 // gibberish when trying to match arguments. 7290 keepGoing = false; 7291 } 7292 7293 StringRef Specifier(csStart, csLen); 7294 7295 // If the specifier in non-printable, it could be the first byte of a UTF-8 7296 // sequence. In that case, print the UTF-8 code point. If not, print the byte 7297 // hex value. 7298 std::string CodePointStr; 7299 if (!llvm::sys::locale::isPrint(*csStart)) { 7300 llvm::UTF32 CodePoint; 7301 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 7302 const llvm::UTF8 *E = 7303 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 7304 llvm::ConversionResult Result = 7305 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 7306 7307 if (Result != llvm::conversionOK) { 7308 unsigned char FirstChar = *csStart; 7309 CodePoint = (llvm::UTF32)FirstChar; 7310 } 7311 7312 llvm::raw_string_ostream OS(CodePointStr); 7313 if (CodePoint < 256) 7314 OS << "\\x" << llvm::format("%02x", CodePoint); 7315 else if (CodePoint <= 0xFFFF) 7316 OS << "\\u" << llvm::format("%04x", CodePoint); 7317 else 7318 OS << "\\U" << llvm::format("%08x", CodePoint); 7319 OS.flush(); 7320 Specifier = CodePointStr; 7321 } 7322 7323 EmitFormatDiagnostic( 7324 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 7325 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 7326 7327 return keepGoing; 7328 } 7329 7330 void 7331 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 7332 const char *startSpec, 7333 unsigned specifierLen) { 7334 EmitFormatDiagnostic( 7335 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 7336 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 7337 } 7338 7339 bool 7340 CheckFormatHandler::CheckNumArgs( 7341 const analyze_format_string::FormatSpecifier &FS, 7342 const analyze_format_string::ConversionSpecifier &CS, 7343 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 7344 7345 if (argIndex >= NumDataArgs) { 7346 PartialDiagnostic PDiag = FS.usesPositionalArg() 7347 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 7348 << (argIndex+1) << NumDataArgs) 7349 : S.PDiag(diag::warn_printf_insufficient_data_args); 7350 EmitFormatDiagnostic( 7351 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 7352 getSpecifierRange(startSpecifier, specifierLen)); 7353 7354 // Since more arguments than conversion tokens are given, by extension 7355 // all arguments are covered, so mark this as so. 7356 UncoveredArg.setAllCovered(); 7357 return false; 7358 } 7359 return true; 7360 } 7361 7362 template<typename Range> 7363 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 7364 SourceLocation Loc, 7365 bool IsStringLocation, 7366 Range StringRange, 7367 ArrayRef<FixItHint> FixIt) { 7368 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 7369 Loc, IsStringLocation, StringRange, FixIt); 7370 } 7371 7372 /// If the format string is not within the function call, emit a note 7373 /// so that the function call and string are in diagnostic messages. 7374 /// 7375 /// \param InFunctionCall if true, the format string is within the function 7376 /// call and only one diagnostic message will be produced. Otherwise, an 7377 /// extra note will be emitted pointing to location of the format string. 7378 /// 7379 /// \param ArgumentExpr the expression that is passed as the format string 7380 /// argument in the function call. Used for getting locations when two 7381 /// diagnostics are emitted. 7382 /// 7383 /// \param PDiag the callee should already have provided any strings for the 7384 /// diagnostic message. This function only adds locations and fixits 7385 /// to diagnostics. 7386 /// 7387 /// \param Loc primary location for diagnostic. If two diagnostics are 7388 /// required, one will be at Loc and a new SourceLocation will be created for 7389 /// the other one. 7390 /// 7391 /// \param IsStringLocation if true, Loc points to the format string should be 7392 /// used for the note. Otherwise, Loc points to the argument list and will 7393 /// be used with PDiag. 7394 /// 7395 /// \param StringRange some or all of the string to highlight. This is 7396 /// templated so it can accept either a CharSourceRange or a SourceRange. 7397 /// 7398 /// \param FixIt optional fix it hint for the format string. 7399 template <typename Range> 7400 void CheckFormatHandler::EmitFormatDiagnostic( 7401 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 7402 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 7403 Range StringRange, ArrayRef<FixItHint> FixIt) { 7404 if (InFunctionCall) { 7405 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 7406 D << StringRange; 7407 D << FixIt; 7408 } else { 7409 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 7410 << ArgumentExpr->getSourceRange(); 7411 7412 const Sema::SemaDiagnosticBuilder &Note = 7413 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 7414 diag::note_format_string_defined); 7415 7416 Note << StringRange; 7417 Note << FixIt; 7418 } 7419 } 7420 7421 //===--- CHECK: Printf format string checking ------------------------------===// 7422 7423 namespace { 7424 7425 class CheckPrintfHandler : public CheckFormatHandler { 7426 public: 7427 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 7428 const Expr *origFormatExpr, 7429 const Sema::FormatStringType type, unsigned firstDataArg, 7430 unsigned numDataArgs, bool isObjC, const char *beg, 7431 bool hasVAListArg, ArrayRef<const Expr *> Args, 7432 unsigned formatIdx, bool inFunctionCall, 7433 Sema::VariadicCallType CallType, 7434 llvm::SmallBitVector &CheckedVarArgs, 7435 UncoveredArgHandler &UncoveredArg) 7436 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 7437 numDataArgs, beg, hasVAListArg, Args, formatIdx, 7438 inFunctionCall, CallType, CheckedVarArgs, 7439 UncoveredArg) {} 7440 7441 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 7442 7443 /// Returns true if '%@' specifiers are allowed in the format string. 7444 bool allowsObjCArg() const { 7445 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 7446 FSType == Sema::FST_OSTrace; 7447 } 7448 7449 bool HandleInvalidPrintfConversionSpecifier( 7450 const analyze_printf::PrintfSpecifier &FS, 7451 const char *startSpecifier, 7452 unsigned specifierLen) override; 7453 7454 void handleInvalidMaskType(StringRef MaskType) override; 7455 7456 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 7457 const char *startSpecifier, 7458 unsigned specifierLen) override; 7459 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 7460 const char *StartSpecifier, 7461 unsigned SpecifierLen, 7462 const Expr *E); 7463 7464 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 7465 const char *startSpecifier, unsigned specifierLen); 7466 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 7467 const analyze_printf::OptionalAmount &Amt, 7468 unsigned type, 7469 const char *startSpecifier, unsigned specifierLen); 7470 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7471 const analyze_printf::OptionalFlag &flag, 7472 const char *startSpecifier, unsigned specifierLen); 7473 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 7474 const analyze_printf::OptionalFlag &ignoredFlag, 7475 const analyze_printf::OptionalFlag &flag, 7476 const char *startSpecifier, unsigned specifierLen); 7477 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 7478 const Expr *E); 7479 7480 void HandleEmptyObjCModifierFlag(const char *startFlag, 7481 unsigned flagLen) override; 7482 7483 void HandleInvalidObjCModifierFlag(const char *startFlag, 7484 unsigned flagLen) override; 7485 7486 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 7487 const char *flagsEnd, 7488 const char *conversionPosition) 7489 override; 7490 }; 7491 7492 } // namespace 7493 7494 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 7495 const analyze_printf::PrintfSpecifier &FS, 7496 const char *startSpecifier, 7497 unsigned specifierLen) { 7498 const analyze_printf::PrintfConversionSpecifier &CS = 7499 FS.getConversionSpecifier(); 7500 7501 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 7502 getLocationOfByte(CS.getStart()), 7503 startSpecifier, specifierLen, 7504 CS.getStart(), CS.getLength()); 7505 } 7506 7507 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 7508 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 7509 } 7510 7511 bool CheckPrintfHandler::HandleAmount( 7512 const analyze_format_string::OptionalAmount &Amt, 7513 unsigned k, const char *startSpecifier, 7514 unsigned specifierLen) { 7515 if (Amt.hasDataArgument()) { 7516 if (!HasVAListArg) { 7517 unsigned argIndex = Amt.getArgIndex(); 7518 if (argIndex >= NumDataArgs) { 7519 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 7520 << k, 7521 getLocationOfByte(Amt.getStart()), 7522 /*IsStringLocation*/true, 7523 getSpecifierRange(startSpecifier, specifierLen)); 7524 // Don't do any more checking. We will just emit 7525 // spurious errors. 7526 return false; 7527 } 7528 7529 // Type check the data argument. It should be an 'int'. 7530 // Although not in conformance with C99, we also allow the argument to be 7531 // an 'unsigned int' as that is a reasonably safe case. GCC also 7532 // doesn't emit a warning for that case. 7533 CoveredArgs.set(argIndex); 7534 const Expr *Arg = getDataArg(argIndex); 7535 if (!Arg) 7536 return false; 7537 7538 QualType T = Arg->getType(); 7539 7540 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 7541 assert(AT.isValid()); 7542 7543 if (!AT.matchesType(S.Context, T)) { 7544 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 7545 << k << AT.getRepresentativeTypeName(S.Context) 7546 << T << Arg->getSourceRange(), 7547 getLocationOfByte(Amt.getStart()), 7548 /*IsStringLocation*/true, 7549 getSpecifierRange(startSpecifier, specifierLen)); 7550 // Don't do any more checking. We will just emit 7551 // spurious errors. 7552 return false; 7553 } 7554 } 7555 } 7556 return true; 7557 } 7558 7559 void CheckPrintfHandler::HandleInvalidAmount( 7560 const analyze_printf::PrintfSpecifier &FS, 7561 const analyze_printf::OptionalAmount &Amt, 7562 unsigned type, 7563 const char *startSpecifier, 7564 unsigned specifierLen) { 7565 const analyze_printf::PrintfConversionSpecifier &CS = 7566 FS.getConversionSpecifier(); 7567 7568 FixItHint fixit = 7569 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 7570 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 7571 Amt.getConstantLength())) 7572 : FixItHint(); 7573 7574 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 7575 << type << CS.toString(), 7576 getLocationOfByte(Amt.getStart()), 7577 /*IsStringLocation*/true, 7578 getSpecifierRange(startSpecifier, specifierLen), 7579 fixit); 7580 } 7581 7582 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7583 const analyze_printf::OptionalFlag &flag, 7584 const char *startSpecifier, 7585 unsigned specifierLen) { 7586 // Warn about pointless flag with a fixit removal. 7587 const analyze_printf::PrintfConversionSpecifier &CS = 7588 FS.getConversionSpecifier(); 7589 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 7590 << flag.toString() << CS.toString(), 7591 getLocationOfByte(flag.getPosition()), 7592 /*IsStringLocation*/true, 7593 getSpecifierRange(startSpecifier, specifierLen), 7594 FixItHint::CreateRemoval( 7595 getSpecifierRange(flag.getPosition(), 1))); 7596 } 7597 7598 void CheckPrintfHandler::HandleIgnoredFlag( 7599 const analyze_printf::PrintfSpecifier &FS, 7600 const analyze_printf::OptionalFlag &ignoredFlag, 7601 const analyze_printf::OptionalFlag &flag, 7602 const char *startSpecifier, 7603 unsigned specifierLen) { 7604 // Warn about ignored flag with a fixit removal. 7605 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 7606 << ignoredFlag.toString() << flag.toString(), 7607 getLocationOfByte(ignoredFlag.getPosition()), 7608 /*IsStringLocation*/true, 7609 getSpecifierRange(startSpecifier, specifierLen), 7610 FixItHint::CreateRemoval( 7611 getSpecifierRange(ignoredFlag.getPosition(), 1))); 7612 } 7613 7614 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 7615 unsigned flagLen) { 7616 // Warn about an empty flag. 7617 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 7618 getLocationOfByte(startFlag), 7619 /*IsStringLocation*/true, 7620 getSpecifierRange(startFlag, flagLen)); 7621 } 7622 7623 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 7624 unsigned flagLen) { 7625 // Warn about an invalid flag. 7626 auto Range = getSpecifierRange(startFlag, flagLen); 7627 StringRef flag(startFlag, flagLen); 7628 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 7629 getLocationOfByte(startFlag), 7630 /*IsStringLocation*/true, 7631 Range, FixItHint::CreateRemoval(Range)); 7632 } 7633 7634 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 7635 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 7636 // Warn about using '[...]' without a '@' conversion. 7637 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 7638 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 7639 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 7640 getLocationOfByte(conversionPosition), 7641 /*IsStringLocation*/true, 7642 Range, FixItHint::CreateRemoval(Range)); 7643 } 7644 7645 // Determines if the specified is a C++ class or struct containing 7646 // a member with the specified name and kind (e.g. a CXXMethodDecl named 7647 // "c_str()"). 7648 template<typename MemberKind> 7649 static llvm::SmallPtrSet<MemberKind*, 1> 7650 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 7651 const RecordType *RT = Ty->getAs<RecordType>(); 7652 llvm::SmallPtrSet<MemberKind*, 1> Results; 7653 7654 if (!RT) 7655 return Results; 7656 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 7657 if (!RD || !RD->getDefinition()) 7658 return Results; 7659 7660 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 7661 Sema::LookupMemberName); 7662 R.suppressDiagnostics(); 7663 7664 // We just need to include all members of the right kind turned up by the 7665 // filter, at this point. 7666 if (S.LookupQualifiedName(R, RT->getDecl())) 7667 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 7668 NamedDecl *decl = (*I)->getUnderlyingDecl(); 7669 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 7670 Results.insert(FK); 7671 } 7672 return Results; 7673 } 7674 7675 /// Check if we could call '.c_str()' on an object. 7676 /// 7677 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 7678 /// allow the call, or if it would be ambiguous). 7679 bool Sema::hasCStrMethod(const Expr *E) { 7680 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 7681 7682 MethodSet Results = 7683 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 7684 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 7685 MI != ME; ++MI) 7686 if ((*MI)->getMinRequiredArguments() == 0) 7687 return true; 7688 return false; 7689 } 7690 7691 // Check if a (w)string was passed when a (w)char* was needed, and offer a 7692 // better diagnostic if so. AT is assumed to be valid. 7693 // Returns true when a c_str() conversion method is found. 7694 bool CheckPrintfHandler::checkForCStrMembers( 7695 const analyze_printf::ArgType &AT, const Expr *E) { 7696 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 7697 7698 MethodSet Results = 7699 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 7700 7701 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 7702 MI != ME; ++MI) { 7703 const CXXMethodDecl *Method = *MI; 7704 if (Method->getMinRequiredArguments() == 0 && 7705 AT.matchesType(S.Context, Method->getReturnType())) { 7706 // FIXME: Suggest parens if the expression needs them. 7707 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 7708 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 7709 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 7710 return true; 7711 } 7712 } 7713 7714 return false; 7715 } 7716 7717 bool 7718 CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier 7719 &FS, 7720 const char *startSpecifier, 7721 unsigned specifierLen) { 7722 using namespace analyze_format_string; 7723 using namespace analyze_printf; 7724 7725 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 7726 7727 if (FS.consumesDataArgument()) { 7728 if (atFirstArg) { 7729 atFirstArg = false; 7730 usesPositionalArgs = FS.usesPositionalArg(); 7731 } 7732 else if (usesPositionalArgs != FS.usesPositionalArg()) { 7733 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 7734 startSpecifier, specifierLen); 7735 return false; 7736 } 7737 } 7738 7739 // First check if the field width, precision, and conversion specifier 7740 // have matching data arguments. 7741 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 7742 startSpecifier, specifierLen)) { 7743 return false; 7744 } 7745 7746 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 7747 startSpecifier, specifierLen)) { 7748 return false; 7749 } 7750 7751 if (!CS.consumesDataArgument()) { 7752 // FIXME: Technically specifying a precision or field width here 7753 // makes no sense. Worth issuing a warning at some point. 7754 return true; 7755 } 7756 7757 // Consume the argument. 7758 unsigned argIndex = FS.getArgIndex(); 7759 if (argIndex < NumDataArgs) { 7760 // The check to see if the argIndex is valid will come later. 7761 // We set the bit here because we may exit early from this 7762 // function if we encounter some other error. 7763 CoveredArgs.set(argIndex); 7764 } 7765 7766 // FreeBSD kernel extensions. 7767 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 7768 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 7769 // We need at least two arguments. 7770 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 7771 return false; 7772 7773 // Claim the second argument. 7774 CoveredArgs.set(argIndex + 1); 7775 7776 // Type check the first argument (int for %b, pointer for %D) 7777 const Expr *Ex = getDataArg(argIndex); 7778 const analyze_printf::ArgType &AT = 7779 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 7780 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 7781 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 7782 EmitFormatDiagnostic( 7783 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 7784 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 7785 << false << Ex->getSourceRange(), 7786 Ex->getBeginLoc(), /*IsStringLocation*/ false, 7787 getSpecifierRange(startSpecifier, specifierLen)); 7788 7789 // Type check the second argument (char * for both %b and %D) 7790 Ex = getDataArg(argIndex + 1); 7791 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 7792 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 7793 EmitFormatDiagnostic( 7794 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 7795 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 7796 << false << Ex->getSourceRange(), 7797 Ex->getBeginLoc(), /*IsStringLocation*/ false, 7798 getSpecifierRange(startSpecifier, specifierLen)); 7799 7800 return true; 7801 } 7802 7803 // Check for using an Objective-C specific conversion specifier 7804 // in a non-ObjC literal. 7805 if (!allowsObjCArg() && CS.isObjCArg()) { 7806 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 7807 specifierLen); 7808 } 7809 7810 // %P can only be used with os_log. 7811 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 7812 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 7813 specifierLen); 7814 } 7815 7816 // %n is not allowed with os_log. 7817 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 7818 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 7819 getLocationOfByte(CS.getStart()), 7820 /*IsStringLocation*/ false, 7821 getSpecifierRange(startSpecifier, specifierLen)); 7822 7823 return true; 7824 } 7825 7826 // Only scalars are allowed for os_trace. 7827 if (FSType == Sema::FST_OSTrace && 7828 (CS.getKind() == ConversionSpecifier::PArg || 7829 CS.getKind() == ConversionSpecifier::sArg || 7830 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 7831 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 7832 specifierLen); 7833 } 7834 7835 // Check for use of public/private annotation outside of os_log(). 7836 if (FSType != Sema::FST_OSLog) { 7837 if (FS.isPublic().isSet()) { 7838 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 7839 << "public", 7840 getLocationOfByte(FS.isPublic().getPosition()), 7841 /*IsStringLocation*/ false, 7842 getSpecifierRange(startSpecifier, specifierLen)); 7843 } 7844 if (FS.isPrivate().isSet()) { 7845 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 7846 << "private", 7847 getLocationOfByte(FS.isPrivate().getPosition()), 7848 /*IsStringLocation*/ false, 7849 getSpecifierRange(startSpecifier, specifierLen)); 7850 } 7851 } 7852 7853 // Check for invalid use of field width 7854 if (!FS.hasValidFieldWidth()) { 7855 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 7856 startSpecifier, specifierLen); 7857 } 7858 7859 // Check for invalid use of precision 7860 if (!FS.hasValidPrecision()) { 7861 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 7862 startSpecifier, specifierLen); 7863 } 7864 7865 // Precision is mandatory for %P specifier. 7866 if (CS.getKind() == ConversionSpecifier::PArg && 7867 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 7868 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 7869 getLocationOfByte(startSpecifier), 7870 /*IsStringLocation*/ false, 7871 getSpecifierRange(startSpecifier, specifierLen)); 7872 } 7873 7874 // Check each flag does not conflict with any other component. 7875 if (!FS.hasValidThousandsGroupingPrefix()) 7876 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 7877 if (!FS.hasValidLeadingZeros()) 7878 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 7879 if (!FS.hasValidPlusPrefix()) 7880 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 7881 if (!FS.hasValidSpacePrefix()) 7882 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 7883 if (!FS.hasValidAlternativeForm()) 7884 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 7885 if (!FS.hasValidLeftJustified()) 7886 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 7887 7888 // Check that flags are not ignored by another flag 7889 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 7890 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 7891 startSpecifier, specifierLen); 7892 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 7893 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 7894 startSpecifier, specifierLen); 7895 7896 // Check the length modifier is valid with the given conversion specifier. 7897 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 7898 S.getLangOpts())) 7899 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 7900 diag::warn_format_nonsensical_length); 7901 else if (!FS.hasStandardLengthModifier()) 7902 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 7903 else if (!FS.hasStandardLengthConversionCombination()) 7904 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 7905 diag::warn_format_non_standard_conversion_spec); 7906 7907 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 7908 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 7909 7910 // The remaining checks depend on the data arguments. 7911 if (HasVAListArg) 7912 return true; 7913 7914 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 7915 return false; 7916 7917 const Expr *Arg = getDataArg(argIndex); 7918 if (!Arg) 7919 return true; 7920 7921 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 7922 } 7923 7924 static bool requiresParensToAddCast(const Expr *E) { 7925 // FIXME: We should have a general way to reason about operator 7926 // precedence and whether parens are actually needed here. 7927 // Take care of a few common cases where they aren't. 7928 const Expr *Inside = E->IgnoreImpCasts(); 7929 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 7930 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 7931 7932 switch (Inside->getStmtClass()) { 7933 case Stmt::ArraySubscriptExprClass: 7934 case Stmt::CallExprClass: 7935 case Stmt::CharacterLiteralClass: 7936 case Stmt::CXXBoolLiteralExprClass: 7937 case Stmt::DeclRefExprClass: 7938 case Stmt::FloatingLiteralClass: 7939 case Stmt::IntegerLiteralClass: 7940 case Stmt::MemberExprClass: 7941 case Stmt::ObjCArrayLiteralClass: 7942 case Stmt::ObjCBoolLiteralExprClass: 7943 case Stmt::ObjCBoxedExprClass: 7944 case Stmt::ObjCDictionaryLiteralClass: 7945 case Stmt::ObjCEncodeExprClass: 7946 case Stmt::ObjCIvarRefExprClass: 7947 case Stmt::ObjCMessageExprClass: 7948 case Stmt::ObjCPropertyRefExprClass: 7949 case Stmt::ObjCStringLiteralClass: 7950 case Stmt::ObjCSubscriptRefExprClass: 7951 case Stmt::ParenExprClass: 7952 case Stmt::StringLiteralClass: 7953 case Stmt::UnaryOperatorClass: 7954 return false; 7955 default: 7956 return true; 7957 } 7958 } 7959 7960 static std::pair<QualType, StringRef> 7961 shouldNotPrintDirectly(const ASTContext &Context, 7962 QualType IntendedTy, 7963 const Expr *E) { 7964 // Use a 'while' to peel off layers of typedefs. 7965 QualType TyTy = IntendedTy; 7966 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 7967 StringRef Name = UserTy->getDecl()->getName(); 7968 QualType CastTy = llvm::StringSwitch<QualType>(Name) 7969 .Case("CFIndex", Context.getNSIntegerType()) 7970 .Case("NSInteger", Context.getNSIntegerType()) 7971 .Case("NSUInteger", Context.getNSUIntegerType()) 7972 .Case("SInt32", Context.IntTy) 7973 .Case("UInt32", Context.UnsignedIntTy) 7974 .Default(QualType()); 7975 7976 if (!CastTy.isNull()) 7977 return std::make_pair(CastTy, Name); 7978 7979 TyTy = UserTy->desugar(); 7980 } 7981 7982 // Strip parens if necessary. 7983 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 7984 return shouldNotPrintDirectly(Context, 7985 PE->getSubExpr()->getType(), 7986 PE->getSubExpr()); 7987 7988 // If this is a conditional expression, then its result type is constructed 7989 // via usual arithmetic conversions and thus there might be no necessary 7990 // typedef sugar there. Recurse to operands to check for NSInteger & 7991 // Co. usage condition. 7992 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 7993 QualType TrueTy, FalseTy; 7994 StringRef TrueName, FalseName; 7995 7996 std::tie(TrueTy, TrueName) = 7997 shouldNotPrintDirectly(Context, 7998 CO->getTrueExpr()->getType(), 7999 CO->getTrueExpr()); 8000 std::tie(FalseTy, FalseName) = 8001 shouldNotPrintDirectly(Context, 8002 CO->getFalseExpr()->getType(), 8003 CO->getFalseExpr()); 8004 8005 if (TrueTy == FalseTy) 8006 return std::make_pair(TrueTy, TrueName); 8007 else if (TrueTy.isNull()) 8008 return std::make_pair(FalseTy, FalseName); 8009 else if (FalseTy.isNull()) 8010 return std::make_pair(TrueTy, TrueName); 8011 } 8012 8013 return std::make_pair(QualType(), StringRef()); 8014 } 8015 8016 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 8017 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 8018 /// type do not count. 8019 static bool 8020 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 8021 QualType From = ICE->getSubExpr()->getType(); 8022 QualType To = ICE->getType(); 8023 // It's an integer promotion if the destination type is the promoted 8024 // source type. 8025 if (ICE->getCastKind() == CK_IntegralCast && 8026 From->isPromotableIntegerType() && 8027 S.Context.getPromotedIntegerType(From) == To) 8028 return true; 8029 // Look through vector types, since we do default argument promotion for 8030 // those in OpenCL. 8031 if (const auto *VecTy = From->getAs<ExtVectorType>()) 8032 From = VecTy->getElementType(); 8033 if (const auto *VecTy = To->getAs<ExtVectorType>()) 8034 To = VecTy->getElementType(); 8035 // It's a floating promotion if the source type is a lower rank. 8036 return ICE->getCastKind() == CK_FloatingCast && 8037 S.Context.getFloatingTypeOrder(From, To) < 0; 8038 } 8039 8040 bool 8041 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 8042 const char *StartSpecifier, 8043 unsigned SpecifierLen, 8044 const Expr *E) { 8045 using namespace analyze_format_string; 8046 using namespace analyze_printf; 8047 8048 // Now type check the data expression that matches the 8049 // format specifier. 8050 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 8051 if (!AT.isValid()) 8052 return true; 8053 8054 QualType ExprTy = E->getType(); 8055 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 8056 ExprTy = TET->getUnderlyingExpr()->getType(); 8057 } 8058 8059 const analyze_printf::ArgType::MatchKind Match = 8060 AT.matchesType(S.Context, ExprTy); 8061 bool Pedantic = Match == analyze_printf::ArgType::NoMatchPedantic; 8062 if (Match == analyze_printf::ArgType::Match) 8063 return true; 8064 8065 // Look through argument promotions for our error message's reported type. 8066 // This includes the integral and floating promotions, but excludes array 8067 // and function pointer decay (seeing that an argument intended to be a 8068 // string has type 'char [6]' is probably more confusing than 'char *') and 8069 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 8070 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 8071 if (isArithmeticArgumentPromotion(S, ICE)) { 8072 E = ICE->getSubExpr(); 8073 ExprTy = E->getType(); 8074 8075 // Check if we didn't match because of an implicit cast from a 'char' 8076 // or 'short' to an 'int'. This is done because printf is a varargs 8077 // function. 8078 if (ICE->getType() == S.Context.IntTy || 8079 ICE->getType() == S.Context.UnsignedIntTy) { 8080 // All further checking is done on the subexpression. 8081 if (AT.matchesType(S.Context, ExprTy)) 8082 return true; 8083 } 8084 } 8085 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 8086 // Special case for 'a', which has type 'int' in C. 8087 // Note, however, that we do /not/ want to treat multibyte constants like 8088 // 'MooV' as characters! This form is deprecated but still exists. 8089 if (ExprTy == S.Context.IntTy) 8090 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 8091 ExprTy = S.Context.CharTy; 8092 } 8093 8094 // Look through enums to their underlying type. 8095 bool IsEnum = false; 8096 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 8097 ExprTy = EnumTy->getDecl()->getIntegerType(); 8098 IsEnum = true; 8099 } 8100 8101 // %C in an Objective-C context prints a unichar, not a wchar_t. 8102 // If the argument is an integer of some kind, believe the %C and suggest 8103 // a cast instead of changing the conversion specifier. 8104 QualType IntendedTy = ExprTy; 8105 if (isObjCContext() && 8106 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 8107 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 8108 !ExprTy->isCharType()) { 8109 // 'unichar' is defined as a typedef of unsigned short, but we should 8110 // prefer using the typedef if it is visible. 8111 IntendedTy = S.Context.UnsignedShortTy; 8112 8113 // While we are here, check if the value is an IntegerLiteral that happens 8114 // to be within the valid range. 8115 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 8116 const llvm::APInt &V = IL->getValue(); 8117 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 8118 return true; 8119 } 8120 8121 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 8122 Sema::LookupOrdinaryName); 8123 if (S.LookupName(Result, S.getCurScope())) { 8124 NamedDecl *ND = Result.getFoundDecl(); 8125 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 8126 if (TD->getUnderlyingType() == IntendedTy) 8127 IntendedTy = S.Context.getTypedefType(TD); 8128 } 8129 } 8130 } 8131 8132 // Special-case some of Darwin's platform-independence types by suggesting 8133 // casts to primitive types that are known to be large enough. 8134 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 8135 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 8136 QualType CastTy; 8137 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 8138 if (!CastTy.isNull()) { 8139 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 8140 // (long in ASTContext). Only complain to pedants. 8141 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 8142 (AT.isSizeT() || AT.isPtrdiffT()) && 8143 AT.matchesType(S.Context, CastTy)) 8144 Pedantic = true; 8145 IntendedTy = CastTy; 8146 ShouldNotPrintDirectly = true; 8147 } 8148 } 8149 8150 // We may be able to offer a FixItHint if it is a supported type. 8151 PrintfSpecifier fixedFS = FS; 8152 bool Success = 8153 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 8154 8155 if (Success) { 8156 // Get the fix string from the fixed format specifier 8157 SmallString<16> buf; 8158 llvm::raw_svector_ostream os(buf); 8159 fixedFS.toString(os); 8160 8161 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 8162 8163 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 8164 unsigned Diag = 8165 Pedantic 8166 ? diag::warn_format_conversion_argument_type_mismatch_pedantic 8167 : diag::warn_format_conversion_argument_type_mismatch; 8168 // In this case, the specifier is wrong and should be changed to match 8169 // the argument. 8170 EmitFormatDiagnostic(S.PDiag(Diag) 8171 << AT.getRepresentativeTypeName(S.Context) 8172 << IntendedTy << IsEnum << E->getSourceRange(), 8173 E->getBeginLoc(), 8174 /*IsStringLocation*/ false, SpecRange, 8175 FixItHint::CreateReplacement(SpecRange, os.str())); 8176 } else { 8177 // The canonical type for formatting this value is different from the 8178 // actual type of the expression. (This occurs, for example, with Darwin's 8179 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 8180 // should be printed as 'long' for 64-bit compatibility.) 8181 // Rather than emitting a normal format/argument mismatch, we want to 8182 // add a cast to the recommended type (and correct the format string 8183 // if necessary). 8184 SmallString<16> CastBuf; 8185 llvm::raw_svector_ostream CastFix(CastBuf); 8186 CastFix << "("; 8187 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 8188 CastFix << ")"; 8189 8190 SmallVector<FixItHint,4> Hints; 8191 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 8192 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 8193 8194 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 8195 // If there's already a cast present, just replace it. 8196 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 8197 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 8198 8199 } else if (!requiresParensToAddCast(E)) { 8200 // If the expression has high enough precedence, 8201 // just write the C-style cast. 8202 Hints.push_back( 8203 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8204 } else { 8205 // Otherwise, add parens around the expression as well as the cast. 8206 CastFix << "("; 8207 Hints.push_back( 8208 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8209 8210 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 8211 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 8212 } 8213 8214 if (ShouldNotPrintDirectly) { 8215 // The expression has a type that should not be printed directly. 8216 // We extract the name from the typedef because we don't want to show 8217 // the underlying type in the diagnostic. 8218 StringRef Name; 8219 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 8220 Name = TypedefTy->getDecl()->getName(); 8221 else 8222 Name = CastTyName; 8223 unsigned Diag = Pedantic 8224 ? diag::warn_format_argument_needs_cast_pedantic 8225 : diag::warn_format_argument_needs_cast; 8226 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 8227 << E->getSourceRange(), 8228 E->getBeginLoc(), /*IsStringLocation=*/false, 8229 SpecRange, Hints); 8230 } else { 8231 // In this case, the expression could be printed using a different 8232 // specifier, but we've decided that the specifier is probably correct 8233 // and we should cast instead. Just use the normal warning message. 8234 EmitFormatDiagnostic( 8235 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8236 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 8237 << E->getSourceRange(), 8238 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 8239 } 8240 } 8241 } else { 8242 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 8243 SpecifierLen); 8244 // Since the warning for passing non-POD types to variadic functions 8245 // was deferred until now, we emit a warning for non-POD 8246 // arguments here. 8247 switch (S.isValidVarArgType(ExprTy)) { 8248 case Sema::VAK_Valid: 8249 case Sema::VAK_ValidInCXX11: { 8250 unsigned Diag = 8251 Pedantic 8252 ? diag::warn_format_conversion_argument_type_mismatch_pedantic 8253 : diag::warn_format_conversion_argument_type_mismatch; 8254 8255 EmitFormatDiagnostic( 8256 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 8257 << IsEnum << CSR << E->getSourceRange(), 8258 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8259 break; 8260 } 8261 case Sema::VAK_Undefined: 8262 case Sema::VAK_MSVCUndefined: 8263 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 8264 << S.getLangOpts().CPlusPlus11 << ExprTy 8265 << CallType 8266 << AT.getRepresentativeTypeName(S.Context) << CSR 8267 << E->getSourceRange(), 8268 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8269 checkForCStrMembers(AT, E); 8270 break; 8271 8272 case Sema::VAK_Invalid: 8273 if (ExprTy->isObjCObjectType()) 8274 EmitFormatDiagnostic( 8275 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 8276 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 8277 << AT.getRepresentativeTypeName(S.Context) << CSR 8278 << E->getSourceRange(), 8279 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8280 else 8281 // FIXME: If this is an initializer list, suggest removing the braces 8282 // or inserting a cast to the target type. 8283 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 8284 << isa<InitListExpr>(E) << ExprTy << CallType 8285 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 8286 break; 8287 } 8288 8289 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 8290 "format string specifier index out of range"); 8291 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 8292 } 8293 8294 return true; 8295 } 8296 8297 //===--- CHECK: Scanf format string checking ------------------------------===// 8298 8299 namespace { 8300 8301 class CheckScanfHandler : public CheckFormatHandler { 8302 public: 8303 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 8304 const Expr *origFormatExpr, Sema::FormatStringType type, 8305 unsigned firstDataArg, unsigned numDataArgs, 8306 const char *beg, bool hasVAListArg, 8307 ArrayRef<const Expr *> Args, unsigned formatIdx, 8308 bool inFunctionCall, Sema::VariadicCallType CallType, 8309 llvm::SmallBitVector &CheckedVarArgs, 8310 UncoveredArgHandler &UncoveredArg) 8311 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 8312 numDataArgs, beg, hasVAListArg, Args, formatIdx, 8313 inFunctionCall, CallType, CheckedVarArgs, 8314 UncoveredArg) {} 8315 8316 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 8317 const char *startSpecifier, 8318 unsigned specifierLen) override; 8319 8320 bool HandleInvalidScanfConversionSpecifier( 8321 const analyze_scanf::ScanfSpecifier &FS, 8322 const char *startSpecifier, 8323 unsigned specifierLen) override; 8324 8325 void HandleIncompleteScanList(const char *start, const char *end) override; 8326 }; 8327 8328 } // namespace 8329 8330 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 8331 const char *end) { 8332 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 8333 getLocationOfByte(end), /*IsStringLocation*/true, 8334 getSpecifierRange(start, end - start)); 8335 } 8336 8337 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 8338 const analyze_scanf::ScanfSpecifier &FS, 8339 const char *startSpecifier, 8340 unsigned specifierLen) { 8341 const analyze_scanf::ScanfConversionSpecifier &CS = 8342 FS.getConversionSpecifier(); 8343 8344 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8345 getLocationOfByte(CS.getStart()), 8346 startSpecifier, specifierLen, 8347 CS.getStart(), CS.getLength()); 8348 } 8349 8350 bool CheckScanfHandler::HandleScanfSpecifier( 8351 const analyze_scanf::ScanfSpecifier &FS, 8352 const char *startSpecifier, 8353 unsigned specifierLen) { 8354 using namespace analyze_scanf; 8355 using namespace analyze_format_string; 8356 8357 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 8358 8359 // Handle case where '%' and '*' don't consume an argument. These shouldn't 8360 // be used to decide if we are using positional arguments consistently. 8361 if (FS.consumesDataArgument()) { 8362 if (atFirstArg) { 8363 atFirstArg = false; 8364 usesPositionalArgs = FS.usesPositionalArg(); 8365 } 8366 else if (usesPositionalArgs != FS.usesPositionalArg()) { 8367 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 8368 startSpecifier, specifierLen); 8369 return false; 8370 } 8371 } 8372 8373 // Check if the field with is non-zero. 8374 const OptionalAmount &Amt = FS.getFieldWidth(); 8375 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 8376 if (Amt.getConstantAmount() == 0) { 8377 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 8378 Amt.getConstantLength()); 8379 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 8380 getLocationOfByte(Amt.getStart()), 8381 /*IsStringLocation*/true, R, 8382 FixItHint::CreateRemoval(R)); 8383 } 8384 } 8385 8386 if (!FS.consumesDataArgument()) { 8387 // FIXME: Technically specifying a precision or field width here 8388 // makes no sense. Worth issuing a warning at some point. 8389 return true; 8390 } 8391 8392 // Consume the argument. 8393 unsigned argIndex = FS.getArgIndex(); 8394 if (argIndex < NumDataArgs) { 8395 // The check to see if the argIndex is valid will come later. 8396 // We set the bit here because we may exit early from this 8397 // function if we encounter some other error. 8398 CoveredArgs.set(argIndex); 8399 } 8400 8401 // Check the length modifier is valid with the given conversion specifier. 8402 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 8403 S.getLangOpts())) 8404 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8405 diag::warn_format_nonsensical_length); 8406 else if (!FS.hasStandardLengthModifier()) 8407 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8408 else if (!FS.hasStandardLengthConversionCombination()) 8409 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8410 diag::warn_format_non_standard_conversion_spec); 8411 8412 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 8413 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 8414 8415 // The remaining checks depend on the data arguments. 8416 if (HasVAListArg) 8417 return true; 8418 8419 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 8420 return false; 8421 8422 // Check that the argument type matches the format specifier. 8423 const Expr *Ex = getDataArg(argIndex); 8424 if (!Ex) 8425 return true; 8426 8427 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 8428 8429 if (!AT.isValid()) { 8430 return true; 8431 } 8432 8433 analyze_format_string::ArgType::MatchKind Match = 8434 AT.matchesType(S.Context, Ex->getType()); 8435 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 8436 if (Match == analyze_format_string::ArgType::Match) 8437 return true; 8438 8439 ScanfSpecifier fixedFS = FS; 8440 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 8441 S.getLangOpts(), S.Context); 8442 8443 unsigned Diag = 8444 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 8445 : diag::warn_format_conversion_argument_type_mismatch; 8446 8447 if (Success) { 8448 // Get the fix string from the fixed format specifier. 8449 SmallString<128> buf; 8450 llvm::raw_svector_ostream os(buf); 8451 fixedFS.toString(os); 8452 8453 EmitFormatDiagnostic( 8454 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 8455 << Ex->getType() << false << Ex->getSourceRange(), 8456 Ex->getBeginLoc(), 8457 /*IsStringLocation*/ false, 8458 getSpecifierRange(startSpecifier, specifierLen), 8459 FixItHint::CreateReplacement( 8460 getSpecifierRange(startSpecifier, specifierLen), os.str())); 8461 } else { 8462 EmitFormatDiagnostic(S.PDiag(Diag) 8463 << AT.getRepresentativeTypeName(S.Context) 8464 << Ex->getType() << false << Ex->getSourceRange(), 8465 Ex->getBeginLoc(), 8466 /*IsStringLocation*/ false, 8467 getSpecifierRange(startSpecifier, specifierLen)); 8468 } 8469 8470 return true; 8471 } 8472 8473 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 8474 const Expr *OrigFormatExpr, 8475 ArrayRef<const Expr *> Args, 8476 bool HasVAListArg, unsigned format_idx, 8477 unsigned firstDataArg, 8478 Sema::FormatStringType Type, 8479 bool inFunctionCall, 8480 Sema::VariadicCallType CallType, 8481 llvm::SmallBitVector &CheckedVarArgs, 8482 UncoveredArgHandler &UncoveredArg) { 8483 // CHECK: is the format string a wide literal? 8484 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 8485 CheckFormatHandler::EmitFormatDiagnostic( 8486 S, inFunctionCall, Args[format_idx], 8487 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 8488 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8489 return; 8490 } 8491 8492 // Str - The format string. NOTE: this is NOT null-terminated! 8493 StringRef StrRef = FExpr->getString(); 8494 const char *Str = StrRef.data(); 8495 // Account for cases where the string literal is truncated in a declaration. 8496 const ConstantArrayType *T = 8497 S.Context.getAsConstantArrayType(FExpr->getType()); 8498 assert(T && "String literal not of constant array type!"); 8499 size_t TypeSize = T->getSize().getZExtValue(); 8500 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8501 const unsigned numDataArgs = Args.size() - firstDataArg; 8502 8503 // Emit a warning if the string literal is truncated and does not contain an 8504 // embedded null character. 8505 if (TypeSize <= StrRef.size() && 8506 StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) { 8507 CheckFormatHandler::EmitFormatDiagnostic( 8508 S, inFunctionCall, Args[format_idx], 8509 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 8510 FExpr->getBeginLoc(), 8511 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 8512 return; 8513 } 8514 8515 // CHECK: empty format string? 8516 if (StrLen == 0 && numDataArgs > 0) { 8517 CheckFormatHandler::EmitFormatDiagnostic( 8518 S, inFunctionCall, Args[format_idx], 8519 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 8520 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8521 return; 8522 } 8523 8524 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 8525 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 8526 Type == Sema::FST_OSTrace) { 8527 CheckPrintfHandler H( 8528 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 8529 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 8530 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 8531 CheckedVarArgs, UncoveredArg); 8532 8533 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 8534 S.getLangOpts(), 8535 S.Context.getTargetInfo(), 8536 Type == Sema::FST_FreeBSDKPrintf)) 8537 H.DoneProcessing(); 8538 } else if (Type == Sema::FST_Scanf) { 8539 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 8540 numDataArgs, Str, HasVAListArg, Args, format_idx, 8541 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 8542 8543 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 8544 S.getLangOpts(), 8545 S.Context.getTargetInfo())) 8546 H.DoneProcessing(); 8547 } // TODO: handle other formats 8548 } 8549 8550 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 8551 // Str - The format string. NOTE: this is NOT null-terminated! 8552 StringRef StrRef = FExpr->getString(); 8553 const char *Str = StrRef.data(); 8554 // Account for cases where the string literal is truncated in a declaration. 8555 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 8556 assert(T && "String literal not of constant array type!"); 8557 size_t TypeSize = T->getSize().getZExtValue(); 8558 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8559 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 8560 getLangOpts(), 8561 Context.getTargetInfo()); 8562 } 8563 8564 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 8565 8566 // Returns the related absolute value function that is larger, of 0 if one 8567 // does not exist. 8568 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 8569 switch (AbsFunction) { 8570 default: 8571 return 0; 8572 8573 case Builtin::BI__builtin_abs: 8574 return Builtin::BI__builtin_labs; 8575 case Builtin::BI__builtin_labs: 8576 return Builtin::BI__builtin_llabs; 8577 case Builtin::BI__builtin_llabs: 8578 return 0; 8579 8580 case Builtin::BI__builtin_fabsf: 8581 return Builtin::BI__builtin_fabs; 8582 case Builtin::BI__builtin_fabs: 8583 return Builtin::BI__builtin_fabsl; 8584 case Builtin::BI__builtin_fabsl: 8585 return 0; 8586 8587 case Builtin::BI__builtin_cabsf: 8588 return Builtin::BI__builtin_cabs; 8589 case Builtin::BI__builtin_cabs: 8590 return Builtin::BI__builtin_cabsl; 8591 case Builtin::BI__builtin_cabsl: 8592 return 0; 8593 8594 case Builtin::BIabs: 8595 return Builtin::BIlabs; 8596 case Builtin::BIlabs: 8597 return Builtin::BIllabs; 8598 case Builtin::BIllabs: 8599 return 0; 8600 8601 case Builtin::BIfabsf: 8602 return Builtin::BIfabs; 8603 case Builtin::BIfabs: 8604 return Builtin::BIfabsl; 8605 case Builtin::BIfabsl: 8606 return 0; 8607 8608 case Builtin::BIcabsf: 8609 return Builtin::BIcabs; 8610 case Builtin::BIcabs: 8611 return Builtin::BIcabsl; 8612 case Builtin::BIcabsl: 8613 return 0; 8614 } 8615 } 8616 8617 // Returns the argument type of the absolute value function. 8618 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 8619 unsigned AbsType) { 8620 if (AbsType == 0) 8621 return QualType(); 8622 8623 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 8624 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 8625 if (Error != ASTContext::GE_None) 8626 return QualType(); 8627 8628 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 8629 if (!FT) 8630 return QualType(); 8631 8632 if (FT->getNumParams() != 1) 8633 return QualType(); 8634 8635 return FT->getParamType(0); 8636 } 8637 8638 // Returns the best absolute value function, or zero, based on type and 8639 // current absolute value function. 8640 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 8641 unsigned AbsFunctionKind) { 8642 unsigned BestKind = 0; 8643 uint64_t ArgSize = Context.getTypeSize(ArgType); 8644 for (unsigned Kind = AbsFunctionKind; Kind != 0; 8645 Kind = getLargerAbsoluteValueFunction(Kind)) { 8646 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 8647 if (Context.getTypeSize(ParamType) >= ArgSize) { 8648 if (BestKind == 0) 8649 BestKind = Kind; 8650 else if (Context.hasSameType(ParamType, ArgType)) { 8651 BestKind = Kind; 8652 break; 8653 } 8654 } 8655 } 8656 return BestKind; 8657 } 8658 8659 enum AbsoluteValueKind { 8660 AVK_Integer, 8661 AVK_Floating, 8662 AVK_Complex 8663 }; 8664 8665 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 8666 if (T->isIntegralOrEnumerationType()) 8667 return AVK_Integer; 8668 if (T->isRealFloatingType()) 8669 return AVK_Floating; 8670 if (T->isAnyComplexType()) 8671 return AVK_Complex; 8672 8673 llvm_unreachable("Type not integer, floating, or complex"); 8674 } 8675 8676 // Changes the absolute value function to a different type. Preserves whether 8677 // the function is a builtin. 8678 static unsigned changeAbsFunction(unsigned AbsKind, 8679 AbsoluteValueKind ValueKind) { 8680 switch (ValueKind) { 8681 case AVK_Integer: 8682 switch (AbsKind) { 8683 default: 8684 return 0; 8685 case Builtin::BI__builtin_fabsf: 8686 case Builtin::BI__builtin_fabs: 8687 case Builtin::BI__builtin_fabsl: 8688 case Builtin::BI__builtin_cabsf: 8689 case Builtin::BI__builtin_cabs: 8690 case Builtin::BI__builtin_cabsl: 8691 return Builtin::BI__builtin_abs; 8692 case Builtin::BIfabsf: 8693 case Builtin::BIfabs: 8694 case Builtin::BIfabsl: 8695 case Builtin::BIcabsf: 8696 case Builtin::BIcabs: 8697 case Builtin::BIcabsl: 8698 return Builtin::BIabs; 8699 } 8700 case AVK_Floating: 8701 switch (AbsKind) { 8702 default: 8703 return 0; 8704 case Builtin::BI__builtin_abs: 8705 case Builtin::BI__builtin_labs: 8706 case Builtin::BI__builtin_llabs: 8707 case Builtin::BI__builtin_cabsf: 8708 case Builtin::BI__builtin_cabs: 8709 case Builtin::BI__builtin_cabsl: 8710 return Builtin::BI__builtin_fabsf; 8711 case Builtin::BIabs: 8712 case Builtin::BIlabs: 8713 case Builtin::BIllabs: 8714 case Builtin::BIcabsf: 8715 case Builtin::BIcabs: 8716 case Builtin::BIcabsl: 8717 return Builtin::BIfabsf; 8718 } 8719 case AVK_Complex: 8720 switch (AbsKind) { 8721 default: 8722 return 0; 8723 case Builtin::BI__builtin_abs: 8724 case Builtin::BI__builtin_labs: 8725 case Builtin::BI__builtin_llabs: 8726 case Builtin::BI__builtin_fabsf: 8727 case Builtin::BI__builtin_fabs: 8728 case Builtin::BI__builtin_fabsl: 8729 return Builtin::BI__builtin_cabsf; 8730 case Builtin::BIabs: 8731 case Builtin::BIlabs: 8732 case Builtin::BIllabs: 8733 case Builtin::BIfabsf: 8734 case Builtin::BIfabs: 8735 case Builtin::BIfabsl: 8736 return Builtin::BIcabsf; 8737 } 8738 } 8739 llvm_unreachable("Unable to convert function"); 8740 } 8741 8742 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 8743 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 8744 if (!FnInfo) 8745 return 0; 8746 8747 switch (FDecl->getBuiltinID()) { 8748 default: 8749 return 0; 8750 case Builtin::BI__builtin_abs: 8751 case Builtin::BI__builtin_fabs: 8752 case Builtin::BI__builtin_fabsf: 8753 case Builtin::BI__builtin_fabsl: 8754 case Builtin::BI__builtin_labs: 8755 case Builtin::BI__builtin_llabs: 8756 case Builtin::BI__builtin_cabs: 8757 case Builtin::BI__builtin_cabsf: 8758 case Builtin::BI__builtin_cabsl: 8759 case Builtin::BIabs: 8760 case Builtin::BIlabs: 8761 case Builtin::BIllabs: 8762 case Builtin::BIfabs: 8763 case Builtin::BIfabsf: 8764 case Builtin::BIfabsl: 8765 case Builtin::BIcabs: 8766 case Builtin::BIcabsf: 8767 case Builtin::BIcabsl: 8768 return FDecl->getBuiltinID(); 8769 } 8770 llvm_unreachable("Unknown Builtin type"); 8771 } 8772 8773 // If the replacement is valid, emit a note with replacement function. 8774 // Additionally, suggest including the proper header if not already included. 8775 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 8776 unsigned AbsKind, QualType ArgType) { 8777 bool EmitHeaderHint = true; 8778 const char *HeaderName = nullptr; 8779 const char *FunctionName = nullptr; 8780 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 8781 FunctionName = "std::abs"; 8782 if (ArgType->isIntegralOrEnumerationType()) { 8783 HeaderName = "cstdlib"; 8784 } else if (ArgType->isRealFloatingType()) { 8785 HeaderName = "cmath"; 8786 } else { 8787 llvm_unreachable("Invalid Type"); 8788 } 8789 8790 // Lookup all std::abs 8791 if (NamespaceDecl *Std = S.getStdNamespace()) { 8792 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 8793 R.suppressDiagnostics(); 8794 S.LookupQualifiedName(R, Std); 8795 8796 for (const auto *I : R) { 8797 const FunctionDecl *FDecl = nullptr; 8798 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 8799 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 8800 } else { 8801 FDecl = dyn_cast<FunctionDecl>(I); 8802 } 8803 if (!FDecl) 8804 continue; 8805 8806 // Found std::abs(), check that they are the right ones. 8807 if (FDecl->getNumParams() != 1) 8808 continue; 8809 8810 // Check that the parameter type can handle the argument. 8811 QualType ParamType = FDecl->getParamDecl(0)->getType(); 8812 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 8813 S.Context.getTypeSize(ArgType) <= 8814 S.Context.getTypeSize(ParamType)) { 8815 // Found a function, don't need the header hint. 8816 EmitHeaderHint = false; 8817 break; 8818 } 8819 } 8820 } 8821 } else { 8822 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 8823 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 8824 8825 if (HeaderName) { 8826 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 8827 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 8828 R.suppressDiagnostics(); 8829 S.LookupName(R, S.getCurScope()); 8830 8831 if (R.isSingleResult()) { 8832 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 8833 if (FD && FD->getBuiltinID() == AbsKind) { 8834 EmitHeaderHint = false; 8835 } else { 8836 return; 8837 } 8838 } else if (!R.empty()) { 8839 return; 8840 } 8841 } 8842 } 8843 8844 S.Diag(Loc, diag::note_replace_abs_function) 8845 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 8846 8847 if (!HeaderName) 8848 return; 8849 8850 if (!EmitHeaderHint) 8851 return; 8852 8853 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 8854 << FunctionName; 8855 } 8856 8857 template <std::size_t StrLen> 8858 static bool IsStdFunction(const FunctionDecl *FDecl, 8859 const char (&Str)[StrLen]) { 8860 if (!FDecl) 8861 return false; 8862 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 8863 return false; 8864 if (!FDecl->isInStdNamespace()) 8865 return false; 8866 8867 return true; 8868 } 8869 8870 // Warn when using the wrong abs() function. 8871 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 8872 const FunctionDecl *FDecl) { 8873 if (Call->getNumArgs() != 1) 8874 return; 8875 8876 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 8877 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 8878 if (AbsKind == 0 && !IsStdAbs) 8879 return; 8880 8881 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 8882 QualType ParamType = Call->getArg(0)->getType(); 8883 8884 // Unsigned types cannot be negative. Suggest removing the absolute value 8885 // function call. 8886 if (ArgType->isUnsignedIntegerType()) { 8887 const char *FunctionName = 8888 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 8889 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 8890 Diag(Call->getExprLoc(), diag::note_remove_abs) 8891 << FunctionName 8892 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 8893 return; 8894 } 8895 8896 // Taking the absolute value of a pointer is very suspicious, they probably 8897 // wanted to index into an array, dereference a pointer, call a function, etc. 8898 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 8899 unsigned DiagType = 0; 8900 if (ArgType->isFunctionType()) 8901 DiagType = 1; 8902 else if (ArgType->isArrayType()) 8903 DiagType = 2; 8904 8905 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 8906 return; 8907 } 8908 8909 // std::abs has overloads which prevent most of the absolute value problems 8910 // from occurring. 8911 if (IsStdAbs) 8912 return; 8913 8914 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 8915 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 8916 8917 // The argument and parameter are the same kind. Check if they are the right 8918 // size. 8919 if (ArgValueKind == ParamValueKind) { 8920 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 8921 return; 8922 8923 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 8924 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 8925 << FDecl << ArgType << ParamType; 8926 8927 if (NewAbsKind == 0) 8928 return; 8929 8930 emitReplacement(*this, Call->getExprLoc(), 8931 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 8932 return; 8933 } 8934 8935 // ArgValueKind != ParamValueKind 8936 // The wrong type of absolute value function was used. Attempt to find the 8937 // proper one. 8938 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 8939 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 8940 if (NewAbsKind == 0) 8941 return; 8942 8943 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 8944 << FDecl << ParamValueKind << ArgValueKind; 8945 8946 emitReplacement(*this, Call->getExprLoc(), 8947 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 8948 } 8949 8950 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 8951 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 8952 const FunctionDecl *FDecl) { 8953 if (!Call || !FDecl) return; 8954 8955 // Ignore template specializations and macros. 8956 if (inTemplateInstantiation()) return; 8957 if (Call->getExprLoc().isMacroID()) return; 8958 8959 // Only care about the one template argument, two function parameter std::max 8960 if (Call->getNumArgs() != 2) return; 8961 if (!IsStdFunction(FDecl, "max")) return; 8962 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 8963 if (!ArgList) return; 8964 if (ArgList->size() != 1) return; 8965 8966 // Check that template type argument is unsigned integer. 8967 const auto& TA = ArgList->get(0); 8968 if (TA.getKind() != TemplateArgument::Type) return; 8969 QualType ArgType = TA.getAsType(); 8970 if (!ArgType->isUnsignedIntegerType()) return; 8971 8972 // See if either argument is a literal zero. 8973 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 8974 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 8975 if (!MTE) return false; 8976 const auto *Num = dyn_cast<IntegerLiteral>(MTE->GetTemporaryExpr()); 8977 if (!Num) return false; 8978 if (Num->getValue() != 0) return false; 8979 return true; 8980 }; 8981 8982 const Expr *FirstArg = Call->getArg(0); 8983 const Expr *SecondArg = Call->getArg(1); 8984 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 8985 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 8986 8987 // Only warn when exactly one argument is zero. 8988 if (IsFirstArgZero == IsSecondArgZero) return; 8989 8990 SourceRange FirstRange = FirstArg->getSourceRange(); 8991 SourceRange SecondRange = SecondArg->getSourceRange(); 8992 8993 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 8994 8995 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 8996 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 8997 8998 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 8999 SourceRange RemovalRange; 9000 if (IsFirstArgZero) { 9001 RemovalRange = SourceRange(FirstRange.getBegin(), 9002 SecondRange.getBegin().getLocWithOffset(-1)); 9003 } else { 9004 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 9005 SecondRange.getEnd()); 9006 } 9007 9008 Diag(Call->getExprLoc(), diag::note_remove_max_call) 9009 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 9010 << FixItHint::CreateRemoval(RemovalRange); 9011 } 9012 9013 //===--- CHECK: Standard memory functions ---------------------------------===// 9014 9015 /// Takes the expression passed to the size_t parameter of functions 9016 /// such as memcmp, strncat, etc and warns if it's a comparison. 9017 /// 9018 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 9019 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 9020 IdentifierInfo *FnName, 9021 SourceLocation FnLoc, 9022 SourceLocation RParenLoc) { 9023 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 9024 if (!Size) 9025 return false; 9026 9027 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 9028 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 9029 return false; 9030 9031 SourceRange SizeRange = Size->getSourceRange(); 9032 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 9033 << SizeRange << FnName; 9034 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 9035 << FnName 9036 << FixItHint::CreateInsertion( 9037 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 9038 << FixItHint::CreateRemoval(RParenLoc); 9039 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 9040 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 9041 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 9042 ")"); 9043 9044 return true; 9045 } 9046 9047 /// Determine whether the given type is or contains a dynamic class type 9048 /// (e.g., whether it has a vtable). 9049 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 9050 bool &IsContained) { 9051 // Look through array types while ignoring qualifiers. 9052 const Type *Ty = T->getBaseElementTypeUnsafe(); 9053 IsContained = false; 9054 9055 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 9056 RD = RD ? RD->getDefinition() : nullptr; 9057 if (!RD || RD->isInvalidDecl()) 9058 return nullptr; 9059 9060 if (RD->isDynamicClass()) 9061 return RD; 9062 9063 // Check all the fields. If any bases were dynamic, the class is dynamic. 9064 // It's impossible for a class to transitively contain itself by value, so 9065 // infinite recursion is impossible. 9066 for (auto *FD : RD->fields()) { 9067 bool SubContained; 9068 if (const CXXRecordDecl *ContainedRD = 9069 getContainedDynamicClass(FD->getType(), SubContained)) { 9070 IsContained = true; 9071 return ContainedRD; 9072 } 9073 } 9074 9075 return nullptr; 9076 } 9077 9078 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 9079 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 9080 if (Unary->getKind() == UETT_SizeOf) 9081 return Unary; 9082 return nullptr; 9083 } 9084 9085 /// If E is a sizeof expression, returns its argument expression, 9086 /// otherwise returns NULL. 9087 static const Expr *getSizeOfExprArg(const Expr *E) { 9088 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9089 if (!SizeOf->isArgumentType()) 9090 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 9091 return nullptr; 9092 } 9093 9094 /// If E is a sizeof expression, returns its argument type. 9095 static QualType getSizeOfArgType(const Expr *E) { 9096 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9097 return SizeOf->getTypeOfArgument(); 9098 return QualType(); 9099 } 9100 9101 namespace { 9102 9103 struct SearchNonTrivialToInitializeField 9104 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 9105 using Super = 9106 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 9107 9108 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 9109 9110 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 9111 SourceLocation SL) { 9112 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9113 asDerived().visitArray(PDIK, AT, SL); 9114 return; 9115 } 9116 9117 Super::visitWithKind(PDIK, FT, SL); 9118 } 9119 9120 void visitARCStrong(QualType FT, SourceLocation SL) { 9121 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9122 } 9123 void visitARCWeak(QualType FT, SourceLocation SL) { 9124 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9125 } 9126 void visitStruct(QualType FT, SourceLocation SL) { 9127 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9128 visit(FD->getType(), FD->getLocation()); 9129 } 9130 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 9131 const ArrayType *AT, SourceLocation SL) { 9132 visit(getContext().getBaseElementType(AT), SL); 9133 } 9134 void visitTrivial(QualType FT, SourceLocation SL) {} 9135 9136 static void diag(QualType RT, const Expr *E, Sema &S) { 9137 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 9138 } 9139 9140 ASTContext &getContext() { return S.getASTContext(); } 9141 9142 const Expr *E; 9143 Sema &S; 9144 }; 9145 9146 struct SearchNonTrivialToCopyField 9147 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 9148 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 9149 9150 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 9151 9152 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 9153 SourceLocation SL) { 9154 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9155 asDerived().visitArray(PCK, AT, SL); 9156 return; 9157 } 9158 9159 Super::visitWithKind(PCK, FT, SL); 9160 } 9161 9162 void visitARCStrong(QualType FT, SourceLocation SL) { 9163 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9164 } 9165 void visitARCWeak(QualType FT, SourceLocation SL) { 9166 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9167 } 9168 void visitStruct(QualType FT, SourceLocation SL) { 9169 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9170 visit(FD->getType(), FD->getLocation()); 9171 } 9172 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 9173 SourceLocation SL) { 9174 visit(getContext().getBaseElementType(AT), SL); 9175 } 9176 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 9177 SourceLocation SL) {} 9178 void visitTrivial(QualType FT, SourceLocation SL) {} 9179 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 9180 9181 static void diag(QualType RT, const Expr *E, Sema &S) { 9182 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 9183 } 9184 9185 ASTContext &getContext() { return S.getASTContext(); } 9186 9187 const Expr *E; 9188 Sema &S; 9189 }; 9190 9191 } 9192 9193 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 9194 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 9195 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 9196 9197 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 9198 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 9199 return false; 9200 9201 return doesExprLikelyComputeSize(BO->getLHS()) || 9202 doesExprLikelyComputeSize(BO->getRHS()); 9203 } 9204 9205 return getAsSizeOfExpr(SizeofExpr) != nullptr; 9206 } 9207 9208 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 9209 /// 9210 /// \code 9211 /// #define MACRO 0 9212 /// foo(MACRO); 9213 /// foo(0); 9214 /// \endcode 9215 /// 9216 /// This should return true for the first call to foo, but not for the second 9217 /// (regardless of whether foo is a macro or function). 9218 static bool isArgumentExpandedFromMacro(SourceManager &SM, 9219 SourceLocation CallLoc, 9220 SourceLocation ArgLoc) { 9221 if (!CallLoc.isMacroID()) 9222 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 9223 9224 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 9225 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 9226 } 9227 9228 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 9229 /// last two arguments transposed. 9230 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 9231 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 9232 return; 9233 9234 const Expr *SizeArg = 9235 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 9236 9237 auto isLiteralZero = [](const Expr *E) { 9238 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 9239 }; 9240 9241 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 9242 SourceLocation CallLoc = Call->getRParenLoc(); 9243 SourceManager &SM = S.getSourceManager(); 9244 if (isLiteralZero(SizeArg) && 9245 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 9246 9247 SourceLocation DiagLoc = SizeArg->getExprLoc(); 9248 9249 // Some platforms #define bzero to __builtin_memset. See if this is the 9250 // case, and if so, emit a better diagnostic. 9251 if (BId == Builtin::BIbzero || 9252 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 9253 CallLoc, SM, S.getLangOpts()) == "bzero")) { 9254 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 9255 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 9256 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 9257 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 9258 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 9259 } 9260 return; 9261 } 9262 9263 // If the second argument to a memset is a sizeof expression and the third 9264 // isn't, this is also likely an error. This should catch 9265 // 'memset(buf, sizeof(buf), 0xff)'. 9266 if (BId == Builtin::BImemset && 9267 doesExprLikelyComputeSize(Call->getArg(1)) && 9268 !doesExprLikelyComputeSize(Call->getArg(2))) { 9269 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 9270 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 9271 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 9272 return; 9273 } 9274 } 9275 9276 /// Check for dangerous or invalid arguments to memset(). 9277 /// 9278 /// This issues warnings on known problematic, dangerous or unspecified 9279 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 9280 /// function calls. 9281 /// 9282 /// \param Call The call expression to diagnose. 9283 void Sema::CheckMemaccessArguments(const CallExpr *Call, 9284 unsigned BId, 9285 IdentifierInfo *FnName) { 9286 assert(BId != 0); 9287 9288 // It is possible to have a non-standard definition of memset. Validate 9289 // we have enough arguments, and if not, abort further checking. 9290 unsigned ExpectedNumArgs = 9291 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 9292 if (Call->getNumArgs() < ExpectedNumArgs) 9293 return; 9294 9295 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 9296 BId == Builtin::BIstrndup ? 1 : 2); 9297 unsigned LenArg = 9298 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 9299 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 9300 9301 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 9302 Call->getBeginLoc(), Call->getRParenLoc())) 9303 return; 9304 9305 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 9306 CheckMemaccessSize(*this, BId, Call); 9307 9308 // We have special checking when the length is a sizeof expression. 9309 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 9310 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 9311 llvm::FoldingSetNodeID SizeOfArgID; 9312 9313 // Although widely used, 'bzero' is not a standard function. Be more strict 9314 // with the argument types before allowing diagnostics and only allow the 9315 // form bzero(ptr, sizeof(...)). 9316 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 9317 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 9318 return; 9319 9320 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 9321 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 9322 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 9323 9324 QualType DestTy = Dest->getType(); 9325 QualType PointeeTy; 9326 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 9327 PointeeTy = DestPtrTy->getPointeeType(); 9328 9329 // Never warn about void type pointers. This can be used to suppress 9330 // false positives. 9331 if (PointeeTy->isVoidType()) 9332 continue; 9333 9334 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 9335 // actually comparing the expressions for equality. Because computing the 9336 // expression IDs can be expensive, we only do this if the diagnostic is 9337 // enabled. 9338 if (SizeOfArg && 9339 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 9340 SizeOfArg->getExprLoc())) { 9341 // We only compute IDs for expressions if the warning is enabled, and 9342 // cache the sizeof arg's ID. 9343 if (SizeOfArgID == llvm::FoldingSetNodeID()) 9344 SizeOfArg->Profile(SizeOfArgID, Context, true); 9345 llvm::FoldingSetNodeID DestID; 9346 Dest->Profile(DestID, Context, true); 9347 if (DestID == SizeOfArgID) { 9348 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 9349 // over sizeof(src) as well. 9350 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 9351 StringRef ReadableName = FnName->getName(); 9352 9353 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 9354 if (UnaryOp->getOpcode() == UO_AddrOf) 9355 ActionIdx = 1; // If its an address-of operator, just remove it. 9356 if (!PointeeTy->isIncompleteType() && 9357 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 9358 ActionIdx = 2; // If the pointee's size is sizeof(char), 9359 // suggest an explicit length. 9360 9361 // If the function is defined as a builtin macro, do not show macro 9362 // expansion. 9363 SourceLocation SL = SizeOfArg->getExprLoc(); 9364 SourceRange DSR = Dest->getSourceRange(); 9365 SourceRange SSR = SizeOfArg->getSourceRange(); 9366 SourceManager &SM = getSourceManager(); 9367 9368 if (SM.isMacroArgExpansion(SL)) { 9369 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 9370 SL = SM.getSpellingLoc(SL); 9371 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 9372 SM.getSpellingLoc(DSR.getEnd())); 9373 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 9374 SM.getSpellingLoc(SSR.getEnd())); 9375 } 9376 9377 DiagRuntimeBehavior(SL, SizeOfArg, 9378 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 9379 << ReadableName 9380 << PointeeTy 9381 << DestTy 9382 << DSR 9383 << SSR); 9384 DiagRuntimeBehavior(SL, SizeOfArg, 9385 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 9386 << ActionIdx 9387 << SSR); 9388 9389 break; 9390 } 9391 } 9392 9393 // Also check for cases where the sizeof argument is the exact same 9394 // type as the memory argument, and where it points to a user-defined 9395 // record type. 9396 if (SizeOfArgTy != QualType()) { 9397 if (PointeeTy->isRecordType() && 9398 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 9399 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 9400 PDiag(diag::warn_sizeof_pointer_type_memaccess) 9401 << FnName << SizeOfArgTy << ArgIdx 9402 << PointeeTy << Dest->getSourceRange() 9403 << LenExpr->getSourceRange()); 9404 break; 9405 } 9406 } 9407 } else if (DestTy->isArrayType()) { 9408 PointeeTy = DestTy; 9409 } 9410 9411 if (PointeeTy == QualType()) 9412 continue; 9413 9414 // Always complain about dynamic classes. 9415 bool IsContained; 9416 if (const CXXRecordDecl *ContainedRD = 9417 getContainedDynamicClass(PointeeTy, IsContained)) { 9418 9419 unsigned OperationType = 0; 9420 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 9421 // "overwritten" if we're warning about the destination for any call 9422 // but memcmp; otherwise a verb appropriate to the call. 9423 if (ArgIdx != 0 || IsCmp) { 9424 if (BId == Builtin::BImemcpy) 9425 OperationType = 1; 9426 else if(BId == Builtin::BImemmove) 9427 OperationType = 2; 9428 else if (IsCmp) 9429 OperationType = 3; 9430 } 9431 9432 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9433 PDiag(diag::warn_dyn_class_memaccess) 9434 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 9435 << IsContained << ContainedRD << OperationType 9436 << Call->getCallee()->getSourceRange()); 9437 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 9438 BId != Builtin::BImemset) 9439 DiagRuntimeBehavior( 9440 Dest->getExprLoc(), Dest, 9441 PDiag(diag::warn_arc_object_memaccess) 9442 << ArgIdx << FnName << PointeeTy 9443 << Call->getCallee()->getSourceRange()); 9444 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 9445 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 9446 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 9447 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9448 PDiag(diag::warn_cstruct_memaccess) 9449 << ArgIdx << FnName << PointeeTy << 0); 9450 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 9451 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 9452 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 9453 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9454 PDiag(diag::warn_cstruct_memaccess) 9455 << ArgIdx << FnName << PointeeTy << 1); 9456 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 9457 } else { 9458 continue; 9459 } 9460 } else 9461 continue; 9462 9463 DiagRuntimeBehavior( 9464 Dest->getExprLoc(), Dest, 9465 PDiag(diag::note_bad_memaccess_silence) 9466 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 9467 break; 9468 } 9469 } 9470 9471 // A little helper routine: ignore addition and subtraction of integer literals. 9472 // This intentionally does not ignore all integer constant expressions because 9473 // we don't want to remove sizeof(). 9474 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 9475 Ex = Ex->IgnoreParenCasts(); 9476 9477 while (true) { 9478 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 9479 if (!BO || !BO->isAdditiveOp()) 9480 break; 9481 9482 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 9483 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 9484 9485 if (isa<IntegerLiteral>(RHS)) 9486 Ex = LHS; 9487 else if (isa<IntegerLiteral>(LHS)) 9488 Ex = RHS; 9489 else 9490 break; 9491 } 9492 9493 return Ex; 9494 } 9495 9496 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 9497 ASTContext &Context) { 9498 // Only handle constant-sized or VLAs, but not flexible members. 9499 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 9500 // Only issue the FIXIT for arrays of size > 1. 9501 if (CAT->getSize().getSExtValue() <= 1) 9502 return false; 9503 } else if (!Ty->isVariableArrayType()) { 9504 return false; 9505 } 9506 return true; 9507 } 9508 9509 // Warn if the user has made the 'size' argument to strlcpy or strlcat 9510 // be the size of the source, instead of the destination. 9511 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 9512 IdentifierInfo *FnName) { 9513 9514 // Don't crash if the user has the wrong number of arguments 9515 unsigned NumArgs = Call->getNumArgs(); 9516 if ((NumArgs != 3) && (NumArgs != 4)) 9517 return; 9518 9519 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 9520 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 9521 const Expr *CompareWithSrc = nullptr; 9522 9523 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 9524 Call->getBeginLoc(), Call->getRParenLoc())) 9525 return; 9526 9527 // Look for 'strlcpy(dst, x, sizeof(x))' 9528 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 9529 CompareWithSrc = Ex; 9530 else { 9531 // Look for 'strlcpy(dst, x, strlen(x))' 9532 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 9533 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 9534 SizeCall->getNumArgs() == 1) 9535 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 9536 } 9537 } 9538 9539 if (!CompareWithSrc) 9540 return; 9541 9542 // Determine if the argument to sizeof/strlen is equal to the source 9543 // argument. In principle there's all kinds of things you could do 9544 // here, for instance creating an == expression and evaluating it with 9545 // EvaluateAsBooleanCondition, but this uses a more direct technique: 9546 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 9547 if (!SrcArgDRE) 9548 return; 9549 9550 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 9551 if (!CompareWithSrcDRE || 9552 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 9553 return; 9554 9555 const Expr *OriginalSizeArg = Call->getArg(2); 9556 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 9557 << OriginalSizeArg->getSourceRange() << FnName; 9558 9559 // Output a FIXIT hint if the destination is an array (rather than a 9560 // pointer to an array). This could be enhanced to handle some 9561 // pointers if we know the actual size, like if DstArg is 'array+2' 9562 // we could say 'sizeof(array)-2'. 9563 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 9564 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 9565 return; 9566 9567 SmallString<128> sizeString; 9568 llvm::raw_svector_ostream OS(sizeString); 9569 OS << "sizeof("; 9570 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9571 OS << ")"; 9572 9573 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 9574 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 9575 OS.str()); 9576 } 9577 9578 /// Check if two expressions refer to the same declaration. 9579 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 9580 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 9581 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 9582 return D1->getDecl() == D2->getDecl(); 9583 return false; 9584 } 9585 9586 static const Expr *getStrlenExprArg(const Expr *E) { 9587 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 9588 const FunctionDecl *FD = CE->getDirectCallee(); 9589 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 9590 return nullptr; 9591 return CE->getArg(0)->IgnoreParenCasts(); 9592 } 9593 return nullptr; 9594 } 9595 9596 // Warn on anti-patterns as the 'size' argument to strncat. 9597 // The correct size argument should look like following: 9598 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 9599 void Sema::CheckStrncatArguments(const CallExpr *CE, 9600 IdentifierInfo *FnName) { 9601 // Don't crash if the user has the wrong number of arguments. 9602 if (CE->getNumArgs() < 3) 9603 return; 9604 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 9605 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 9606 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 9607 9608 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 9609 CE->getRParenLoc())) 9610 return; 9611 9612 // Identify common expressions, which are wrongly used as the size argument 9613 // to strncat and may lead to buffer overflows. 9614 unsigned PatternType = 0; 9615 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 9616 // - sizeof(dst) 9617 if (referToTheSameDecl(SizeOfArg, DstArg)) 9618 PatternType = 1; 9619 // - sizeof(src) 9620 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 9621 PatternType = 2; 9622 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 9623 if (BE->getOpcode() == BO_Sub) { 9624 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 9625 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 9626 // - sizeof(dst) - strlen(dst) 9627 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 9628 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 9629 PatternType = 1; 9630 // - sizeof(src) - (anything) 9631 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 9632 PatternType = 2; 9633 } 9634 } 9635 9636 if (PatternType == 0) 9637 return; 9638 9639 // Generate the diagnostic. 9640 SourceLocation SL = LenArg->getBeginLoc(); 9641 SourceRange SR = LenArg->getSourceRange(); 9642 SourceManager &SM = getSourceManager(); 9643 9644 // If the function is defined as a builtin macro, do not show macro expansion. 9645 if (SM.isMacroArgExpansion(SL)) { 9646 SL = SM.getSpellingLoc(SL); 9647 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 9648 SM.getSpellingLoc(SR.getEnd())); 9649 } 9650 9651 // Check if the destination is an array (rather than a pointer to an array). 9652 QualType DstTy = DstArg->getType(); 9653 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 9654 Context); 9655 if (!isKnownSizeArray) { 9656 if (PatternType == 1) 9657 Diag(SL, diag::warn_strncat_wrong_size) << SR; 9658 else 9659 Diag(SL, diag::warn_strncat_src_size) << SR; 9660 return; 9661 } 9662 9663 if (PatternType == 1) 9664 Diag(SL, diag::warn_strncat_large_size) << SR; 9665 else 9666 Diag(SL, diag::warn_strncat_src_size) << SR; 9667 9668 SmallString<128> sizeString; 9669 llvm::raw_svector_ostream OS(sizeString); 9670 OS << "sizeof("; 9671 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9672 OS << ") - "; 9673 OS << "strlen("; 9674 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9675 OS << ") - 1"; 9676 9677 Diag(SL, diag::note_strncat_wrong_size) 9678 << FixItHint::CreateReplacement(SR, OS.str()); 9679 } 9680 9681 void 9682 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 9683 SourceLocation ReturnLoc, 9684 bool isObjCMethod, 9685 const AttrVec *Attrs, 9686 const FunctionDecl *FD) { 9687 // Check if the return value is null but should not be. 9688 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 9689 (!isObjCMethod && isNonNullType(Context, lhsType))) && 9690 CheckNonNullExpr(*this, RetValExp)) 9691 Diag(ReturnLoc, diag::warn_null_ret) 9692 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 9693 9694 // C++11 [basic.stc.dynamic.allocation]p4: 9695 // If an allocation function declared with a non-throwing 9696 // exception-specification fails to allocate storage, it shall return 9697 // a null pointer. Any other allocation function that fails to allocate 9698 // storage shall indicate failure only by throwing an exception [...] 9699 if (FD) { 9700 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 9701 if (Op == OO_New || Op == OO_Array_New) { 9702 const FunctionProtoType *Proto 9703 = FD->getType()->castAs<FunctionProtoType>(); 9704 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 9705 CheckNonNullExpr(*this, RetValExp)) 9706 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 9707 << FD << getLangOpts().CPlusPlus11; 9708 } 9709 } 9710 } 9711 9712 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 9713 9714 /// Check for comparisons of floating point operands using != and ==. 9715 /// Issue a warning if these are no self-comparisons, as they are not likely 9716 /// to do what the programmer intended. 9717 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { 9718 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 9719 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 9720 9721 // Special case: check for x == x (which is OK). 9722 // Do not emit warnings for such cases. 9723 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 9724 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 9725 if (DRL->getDecl() == DRR->getDecl()) 9726 return; 9727 9728 // Special case: check for comparisons against literals that can be exactly 9729 // represented by APFloat. In such cases, do not emit a warning. This 9730 // is a heuristic: often comparison against such literals are used to 9731 // detect if a value in a variable has not changed. This clearly can 9732 // lead to false negatives. 9733 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 9734 if (FLL->isExact()) 9735 return; 9736 } else 9737 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 9738 if (FLR->isExact()) 9739 return; 9740 9741 // Check for comparisons with builtin types. 9742 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 9743 if (CL->getBuiltinCallee()) 9744 return; 9745 9746 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 9747 if (CR->getBuiltinCallee()) 9748 return; 9749 9750 // Emit the diagnostic. 9751 Diag(Loc, diag::warn_floatingpoint_eq) 9752 << LHS->getSourceRange() << RHS->getSourceRange(); 9753 } 9754 9755 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 9756 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 9757 9758 namespace { 9759 9760 /// Structure recording the 'active' range of an integer-valued 9761 /// expression. 9762 struct IntRange { 9763 /// The number of bits active in the int. 9764 unsigned Width; 9765 9766 /// True if the int is known not to have negative values. 9767 bool NonNegative; 9768 9769 IntRange(unsigned Width, bool NonNegative) 9770 : Width(Width), NonNegative(NonNegative) {} 9771 9772 /// Returns the range of the bool type. 9773 static IntRange forBoolType() { 9774 return IntRange(1, true); 9775 } 9776 9777 /// Returns the range of an opaque value of the given integral type. 9778 static IntRange forValueOfType(ASTContext &C, QualType T) { 9779 return forValueOfCanonicalType(C, 9780 T->getCanonicalTypeInternal().getTypePtr()); 9781 } 9782 9783 /// Returns the range of an opaque value of a canonical integral type. 9784 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 9785 assert(T->isCanonicalUnqualified()); 9786 9787 if (const VectorType *VT = dyn_cast<VectorType>(T)) 9788 T = VT->getElementType().getTypePtr(); 9789 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 9790 T = CT->getElementType().getTypePtr(); 9791 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 9792 T = AT->getValueType().getTypePtr(); 9793 9794 if (!C.getLangOpts().CPlusPlus) { 9795 // For enum types in C code, use the underlying datatype. 9796 if (const EnumType *ET = dyn_cast<EnumType>(T)) 9797 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 9798 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 9799 // For enum types in C++, use the known bit width of the enumerators. 9800 EnumDecl *Enum = ET->getDecl(); 9801 // In C++11, enums can have a fixed underlying type. Use this type to 9802 // compute the range. 9803 if (Enum->isFixed()) { 9804 return IntRange(C.getIntWidth(QualType(T, 0)), 9805 !ET->isSignedIntegerOrEnumerationType()); 9806 } 9807 9808 unsigned NumPositive = Enum->getNumPositiveBits(); 9809 unsigned NumNegative = Enum->getNumNegativeBits(); 9810 9811 if (NumNegative == 0) 9812 return IntRange(NumPositive, true/*NonNegative*/); 9813 else 9814 return IntRange(std::max(NumPositive + 1, NumNegative), 9815 false/*NonNegative*/); 9816 } 9817 9818 const BuiltinType *BT = cast<BuiltinType>(T); 9819 assert(BT->isInteger()); 9820 9821 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 9822 } 9823 9824 /// Returns the "target" range of a canonical integral type, i.e. 9825 /// the range of values expressible in the type. 9826 /// 9827 /// This matches forValueOfCanonicalType except that enums have the 9828 /// full range of their type, not the range of their enumerators. 9829 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 9830 assert(T->isCanonicalUnqualified()); 9831 9832 if (const VectorType *VT = dyn_cast<VectorType>(T)) 9833 T = VT->getElementType().getTypePtr(); 9834 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 9835 T = CT->getElementType().getTypePtr(); 9836 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 9837 T = AT->getValueType().getTypePtr(); 9838 if (const EnumType *ET = dyn_cast<EnumType>(T)) 9839 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 9840 9841 const BuiltinType *BT = cast<BuiltinType>(T); 9842 assert(BT->isInteger()); 9843 9844 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 9845 } 9846 9847 /// Returns the supremum of two ranges: i.e. their conservative merge. 9848 static IntRange join(IntRange L, IntRange R) { 9849 return IntRange(std::max(L.Width, R.Width), 9850 L.NonNegative && R.NonNegative); 9851 } 9852 9853 /// Returns the infinum of two ranges: i.e. their aggressive merge. 9854 static IntRange meet(IntRange L, IntRange R) { 9855 return IntRange(std::min(L.Width, R.Width), 9856 L.NonNegative || R.NonNegative); 9857 } 9858 }; 9859 9860 } // namespace 9861 9862 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 9863 unsigned MaxWidth) { 9864 if (value.isSigned() && value.isNegative()) 9865 return IntRange(value.getMinSignedBits(), false); 9866 9867 if (value.getBitWidth() > MaxWidth) 9868 value = value.trunc(MaxWidth); 9869 9870 // isNonNegative() just checks the sign bit without considering 9871 // signedness. 9872 return IntRange(value.getActiveBits(), true); 9873 } 9874 9875 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 9876 unsigned MaxWidth) { 9877 if (result.isInt()) 9878 return GetValueRange(C, result.getInt(), MaxWidth); 9879 9880 if (result.isVector()) { 9881 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 9882 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 9883 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 9884 R = IntRange::join(R, El); 9885 } 9886 return R; 9887 } 9888 9889 if (result.isComplexInt()) { 9890 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 9891 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 9892 return IntRange::join(R, I); 9893 } 9894 9895 // This can happen with lossless casts to intptr_t of "based" lvalues. 9896 // Assume it might use arbitrary bits. 9897 // FIXME: The only reason we need to pass the type in here is to get 9898 // the sign right on this one case. It would be nice if APValue 9899 // preserved this. 9900 assert(result.isLValue() || result.isAddrLabelDiff()); 9901 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 9902 } 9903 9904 static QualType GetExprType(const Expr *E) { 9905 QualType Ty = E->getType(); 9906 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 9907 Ty = AtomicRHS->getValueType(); 9908 return Ty; 9909 } 9910 9911 /// Pseudo-evaluate the given integer expression, estimating the 9912 /// range of values it might take. 9913 /// 9914 /// \param MaxWidth - the width to which the value will be truncated 9915 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 9916 bool InConstantContext) { 9917 E = E->IgnoreParens(); 9918 9919 // Try a full evaluation first. 9920 Expr::EvalResult result; 9921 if (E->EvaluateAsRValue(result, C, InConstantContext)) 9922 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 9923 9924 // I think we only want to look through implicit casts here; if the 9925 // user has an explicit widening cast, we should treat the value as 9926 // being of the new, wider type. 9927 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 9928 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 9929 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext); 9930 9931 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 9932 9933 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 9934 CE->getCastKind() == CK_BooleanToSignedIntegral; 9935 9936 // Assume that non-integer casts can span the full range of the type. 9937 if (!isIntegerCast) 9938 return OutputTypeRange; 9939 9940 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 9941 std::min(MaxWidth, OutputTypeRange.Width), 9942 InConstantContext); 9943 9944 // Bail out if the subexpr's range is as wide as the cast type. 9945 if (SubRange.Width >= OutputTypeRange.Width) 9946 return OutputTypeRange; 9947 9948 // Otherwise, we take the smaller width, and we're non-negative if 9949 // either the output type or the subexpr is. 9950 return IntRange(SubRange.Width, 9951 SubRange.NonNegative || OutputTypeRange.NonNegative); 9952 } 9953 9954 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 9955 // If we can fold the condition, just take that operand. 9956 bool CondResult; 9957 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 9958 return GetExprRange(C, 9959 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 9960 MaxWidth, InConstantContext); 9961 9962 // Otherwise, conservatively merge. 9963 IntRange L = 9964 GetExprRange(C, CO->getTrueExpr(), MaxWidth, InConstantContext); 9965 IntRange R = 9966 GetExprRange(C, CO->getFalseExpr(), MaxWidth, InConstantContext); 9967 return IntRange::join(L, R); 9968 } 9969 9970 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 9971 switch (BO->getOpcode()) { 9972 case BO_Cmp: 9973 llvm_unreachable("builtin <=> should have class type"); 9974 9975 // Boolean-valued operations are single-bit and positive. 9976 case BO_LAnd: 9977 case BO_LOr: 9978 case BO_LT: 9979 case BO_GT: 9980 case BO_LE: 9981 case BO_GE: 9982 case BO_EQ: 9983 case BO_NE: 9984 return IntRange::forBoolType(); 9985 9986 // The type of the assignments is the type of the LHS, so the RHS 9987 // is not necessarily the same type. 9988 case BO_MulAssign: 9989 case BO_DivAssign: 9990 case BO_RemAssign: 9991 case BO_AddAssign: 9992 case BO_SubAssign: 9993 case BO_XorAssign: 9994 case BO_OrAssign: 9995 // TODO: bitfields? 9996 return IntRange::forValueOfType(C, GetExprType(E)); 9997 9998 // Simple assignments just pass through the RHS, which will have 9999 // been coerced to the LHS type. 10000 case BO_Assign: 10001 // TODO: bitfields? 10002 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext); 10003 10004 // Operations with opaque sources are black-listed. 10005 case BO_PtrMemD: 10006 case BO_PtrMemI: 10007 return IntRange::forValueOfType(C, GetExprType(E)); 10008 10009 // Bitwise-and uses the *infinum* of the two source ranges. 10010 case BO_And: 10011 case BO_AndAssign: 10012 return IntRange::meet( 10013 GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext), 10014 GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext)); 10015 10016 // Left shift gets black-listed based on a judgement call. 10017 case BO_Shl: 10018 // ...except that we want to treat '1 << (blah)' as logically 10019 // positive. It's an important idiom. 10020 if (IntegerLiteral *I 10021 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 10022 if (I->getValue() == 1) { 10023 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 10024 return IntRange(R.Width, /*NonNegative*/ true); 10025 } 10026 } 10027 LLVM_FALLTHROUGH; 10028 10029 case BO_ShlAssign: 10030 return IntRange::forValueOfType(C, GetExprType(E)); 10031 10032 // Right shift by a constant can narrow its left argument. 10033 case BO_Shr: 10034 case BO_ShrAssign: { 10035 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext); 10036 10037 // If the shift amount is a positive constant, drop the width by 10038 // that much. 10039 llvm::APSInt shift; 10040 if (BO->getRHS()->isIntegerConstantExpr(shift, C) && 10041 shift.isNonNegative()) { 10042 unsigned zext = shift.getZExtValue(); 10043 if (zext >= L.Width) 10044 L.Width = (L.NonNegative ? 0 : 1); 10045 else 10046 L.Width -= zext; 10047 } 10048 10049 return L; 10050 } 10051 10052 // Comma acts as its right operand. 10053 case BO_Comma: 10054 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext); 10055 10056 // Black-list pointer subtractions. 10057 case BO_Sub: 10058 if (BO->getLHS()->getType()->isPointerType()) 10059 return IntRange::forValueOfType(C, GetExprType(E)); 10060 break; 10061 10062 // The width of a division result is mostly determined by the size 10063 // of the LHS. 10064 case BO_Div: { 10065 // Don't 'pre-truncate' the operands. 10066 unsigned opWidth = C.getIntWidth(GetExprType(E)); 10067 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext); 10068 10069 // If the divisor is constant, use that. 10070 llvm::APSInt divisor; 10071 if (BO->getRHS()->isIntegerConstantExpr(divisor, C)) { 10072 unsigned log2 = divisor.logBase2(); // floor(log_2(divisor)) 10073 if (log2 >= L.Width) 10074 L.Width = (L.NonNegative ? 0 : 1); 10075 else 10076 L.Width = std::min(L.Width - log2, MaxWidth); 10077 return L; 10078 } 10079 10080 // Otherwise, just use the LHS's width. 10081 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext); 10082 return IntRange(L.Width, L.NonNegative && R.NonNegative); 10083 } 10084 10085 // The result of a remainder can't be larger than the result of 10086 // either side. 10087 case BO_Rem: { 10088 // Don't 'pre-truncate' the operands. 10089 unsigned opWidth = C.getIntWidth(GetExprType(E)); 10090 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext); 10091 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext); 10092 10093 IntRange meet = IntRange::meet(L, R); 10094 meet.Width = std::min(meet.Width, MaxWidth); 10095 return meet; 10096 } 10097 10098 // The default behavior is okay for these. 10099 case BO_Mul: 10100 case BO_Add: 10101 case BO_Xor: 10102 case BO_Or: 10103 break; 10104 } 10105 10106 // The default case is to treat the operation as if it were closed 10107 // on the narrowest type that encompasses both operands. 10108 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext); 10109 IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext); 10110 return IntRange::join(L, R); 10111 } 10112 10113 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 10114 switch (UO->getOpcode()) { 10115 // Boolean-valued operations are white-listed. 10116 case UO_LNot: 10117 return IntRange::forBoolType(); 10118 10119 // Operations with opaque sources are black-listed. 10120 case UO_Deref: 10121 case UO_AddrOf: // should be impossible 10122 return IntRange::forValueOfType(C, GetExprType(E)); 10123 10124 default: 10125 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext); 10126 } 10127 } 10128 10129 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 10130 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext); 10131 10132 if (const auto *BitField = E->getSourceBitField()) 10133 return IntRange(BitField->getBitWidthValue(C), 10134 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 10135 10136 return IntRange::forValueOfType(C, GetExprType(E)); 10137 } 10138 10139 static IntRange GetExprRange(ASTContext &C, const Expr *E, 10140 bool InConstantContext) { 10141 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext); 10142 } 10143 10144 /// Checks whether the given value, which currently has the given 10145 /// source semantics, has the same value when coerced through the 10146 /// target semantics. 10147 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 10148 const llvm::fltSemantics &Src, 10149 const llvm::fltSemantics &Tgt) { 10150 llvm::APFloat truncated = value; 10151 10152 bool ignored; 10153 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 10154 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 10155 10156 return truncated.bitwiseIsEqual(value); 10157 } 10158 10159 /// Checks whether the given value, which currently has the given 10160 /// source semantics, has the same value when coerced through the 10161 /// target semantics. 10162 /// 10163 /// The value might be a vector of floats (or a complex number). 10164 static bool IsSameFloatAfterCast(const APValue &value, 10165 const llvm::fltSemantics &Src, 10166 const llvm::fltSemantics &Tgt) { 10167 if (value.isFloat()) 10168 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 10169 10170 if (value.isVector()) { 10171 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 10172 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 10173 return false; 10174 return true; 10175 } 10176 10177 assert(value.isComplexFloat()); 10178 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 10179 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 10180 } 10181 10182 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC); 10183 10184 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 10185 // Suppress cases where we are comparing against an enum constant. 10186 if (const DeclRefExpr *DR = 10187 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 10188 if (isa<EnumConstantDecl>(DR->getDecl())) 10189 return true; 10190 10191 // Suppress cases where the '0' value is expanded from a macro. 10192 if (E->getBeginLoc().isMacroID()) 10193 return true; 10194 10195 return false; 10196 } 10197 10198 static bool isKnownToHaveUnsignedValue(Expr *E) { 10199 return E->getType()->isIntegerType() && 10200 (!E->getType()->isSignedIntegerType() || 10201 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 10202 } 10203 10204 namespace { 10205 /// The promoted range of values of a type. In general this has the 10206 /// following structure: 10207 /// 10208 /// |-----------| . . . |-----------| 10209 /// ^ ^ ^ ^ 10210 /// Min HoleMin HoleMax Max 10211 /// 10212 /// ... where there is only a hole if a signed type is promoted to unsigned 10213 /// (in which case Min and Max are the smallest and largest representable 10214 /// values). 10215 struct PromotedRange { 10216 // Min, or HoleMax if there is a hole. 10217 llvm::APSInt PromotedMin; 10218 // Max, or HoleMin if there is a hole. 10219 llvm::APSInt PromotedMax; 10220 10221 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 10222 if (R.Width == 0) 10223 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 10224 else if (R.Width >= BitWidth && !Unsigned) { 10225 // Promotion made the type *narrower*. This happens when promoting 10226 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 10227 // Treat all values of 'signed int' as being in range for now. 10228 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 10229 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 10230 } else { 10231 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 10232 .extOrTrunc(BitWidth); 10233 PromotedMin.setIsUnsigned(Unsigned); 10234 10235 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 10236 .extOrTrunc(BitWidth); 10237 PromotedMax.setIsUnsigned(Unsigned); 10238 } 10239 } 10240 10241 // Determine whether this range is contiguous (has no hole). 10242 bool isContiguous() const { return PromotedMin <= PromotedMax; } 10243 10244 // Where a constant value is within the range. 10245 enum ComparisonResult { 10246 LT = 0x1, 10247 LE = 0x2, 10248 GT = 0x4, 10249 GE = 0x8, 10250 EQ = 0x10, 10251 NE = 0x20, 10252 InRangeFlag = 0x40, 10253 10254 Less = LE | LT | NE, 10255 Min = LE | InRangeFlag, 10256 InRange = InRangeFlag, 10257 Max = GE | InRangeFlag, 10258 Greater = GE | GT | NE, 10259 10260 OnlyValue = LE | GE | EQ | InRangeFlag, 10261 InHole = NE 10262 }; 10263 10264 ComparisonResult compare(const llvm::APSInt &Value) const { 10265 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 10266 Value.isUnsigned() == PromotedMin.isUnsigned()); 10267 if (!isContiguous()) { 10268 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 10269 if (Value.isMinValue()) return Min; 10270 if (Value.isMaxValue()) return Max; 10271 if (Value >= PromotedMin) return InRange; 10272 if (Value <= PromotedMax) return InRange; 10273 return InHole; 10274 } 10275 10276 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 10277 case -1: return Less; 10278 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 10279 case 1: 10280 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 10281 case -1: return InRange; 10282 case 0: return Max; 10283 case 1: return Greater; 10284 } 10285 } 10286 10287 llvm_unreachable("impossible compare result"); 10288 } 10289 10290 static llvm::Optional<StringRef> 10291 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 10292 if (Op == BO_Cmp) { 10293 ComparisonResult LTFlag = LT, GTFlag = GT; 10294 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 10295 10296 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 10297 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 10298 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 10299 return llvm::None; 10300 } 10301 10302 ComparisonResult TrueFlag, FalseFlag; 10303 if (Op == BO_EQ) { 10304 TrueFlag = EQ; 10305 FalseFlag = NE; 10306 } else if (Op == BO_NE) { 10307 TrueFlag = NE; 10308 FalseFlag = EQ; 10309 } else { 10310 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 10311 TrueFlag = LT; 10312 FalseFlag = GE; 10313 } else { 10314 TrueFlag = GT; 10315 FalseFlag = LE; 10316 } 10317 if (Op == BO_GE || Op == BO_LE) 10318 std::swap(TrueFlag, FalseFlag); 10319 } 10320 if (R & TrueFlag) 10321 return StringRef("true"); 10322 if (R & FalseFlag) 10323 return StringRef("false"); 10324 return llvm::None; 10325 } 10326 }; 10327 } 10328 10329 static bool HasEnumType(Expr *E) { 10330 // Strip off implicit integral promotions. 10331 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 10332 if (ICE->getCastKind() != CK_IntegralCast && 10333 ICE->getCastKind() != CK_NoOp) 10334 break; 10335 E = ICE->getSubExpr(); 10336 } 10337 10338 return E->getType()->isEnumeralType(); 10339 } 10340 10341 static int classifyConstantValue(Expr *Constant) { 10342 // The values of this enumeration are used in the diagnostics 10343 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 10344 enum ConstantValueKind { 10345 Miscellaneous = 0, 10346 LiteralTrue, 10347 LiteralFalse 10348 }; 10349 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 10350 return BL->getValue() ? ConstantValueKind::LiteralTrue 10351 : ConstantValueKind::LiteralFalse; 10352 return ConstantValueKind::Miscellaneous; 10353 } 10354 10355 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 10356 Expr *Constant, Expr *Other, 10357 const llvm::APSInt &Value, 10358 bool RhsConstant) { 10359 if (S.inTemplateInstantiation()) 10360 return false; 10361 10362 Expr *OriginalOther = Other; 10363 10364 Constant = Constant->IgnoreParenImpCasts(); 10365 Other = Other->IgnoreParenImpCasts(); 10366 10367 // Suppress warnings on tautological comparisons between values of the same 10368 // enumeration type. There are only two ways we could warn on this: 10369 // - If the constant is outside the range of representable values of 10370 // the enumeration. In such a case, we should warn about the cast 10371 // to enumeration type, not about the comparison. 10372 // - If the constant is the maximum / minimum in-range value. For an 10373 // enumeratin type, such comparisons can be meaningful and useful. 10374 if (Constant->getType()->isEnumeralType() && 10375 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 10376 return false; 10377 10378 // TODO: Investigate using GetExprRange() to get tighter bounds 10379 // on the bit ranges. 10380 QualType OtherT = Other->getType(); 10381 if (const auto *AT = OtherT->getAs<AtomicType>()) 10382 OtherT = AT->getValueType(); 10383 IntRange OtherRange = IntRange::forValueOfType(S.Context, OtherT); 10384 10385 // Whether we're treating Other as being a bool because of the form of 10386 // expression despite it having another type (typically 'int' in C). 10387 bool OtherIsBooleanDespiteType = 10388 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 10389 if (OtherIsBooleanDespiteType) 10390 OtherRange = IntRange::forBoolType(); 10391 10392 // Determine the promoted range of the other type and see if a comparison of 10393 // the constant against that range is tautological. 10394 PromotedRange OtherPromotedRange(OtherRange, Value.getBitWidth(), 10395 Value.isUnsigned()); 10396 auto Cmp = OtherPromotedRange.compare(Value); 10397 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 10398 if (!Result) 10399 return false; 10400 10401 // Suppress the diagnostic for an in-range comparison if the constant comes 10402 // from a macro or enumerator. We don't want to diagnose 10403 // 10404 // some_long_value <= INT_MAX 10405 // 10406 // when sizeof(int) == sizeof(long). 10407 bool InRange = Cmp & PromotedRange::InRangeFlag; 10408 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 10409 return false; 10410 10411 // If this is a comparison to an enum constant, include that 10412 // constant in the diagnostic. 10413 const EnumConstantDecl *ED = nullptr; 10414 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 10415 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 10416 10417 // Should be enough for uint128 (39 decimal digits) 10418 SmallString<64> PrettySourceValue; 10419 llvm::raw_svector_ostream OS(PrettySourceValue); 10420 if (ED) 10421 OS << '\'' << *ED << "' (" << Value << ")"; 10422 else 10423 OS << Value; 10424 10425 // FIXME: We use a somewhat different formatting for the in-range cases and 10426 // cases involving boolean values for historical reasons. We should pick a 10427 // consistent way of presenting these diagnostics. 10428 if (!InRange || Other->isKnownToHaveBooleanValue()) { 10429 10430 S.DiagRuntimeBehavior( 10431 E->getOperatorLoc(), E, 10432 S.PDiag(!InRange ? diag::warn_out_of_range_compare 10433 : diag::warn_tautological_bool_compare) 10434 << OS.str() << classifyConstantValue(Constant) << OtherT 10435 << OtherIsBooleanDespiteType << *Result 10436 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 10437 } else { 10438 unsigned Diag = (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 10439 ? (HasEnumType(OriginalOther) 10440 ? diag::warn_unsigned_enum_always_true_comparison 10441 : diag::warn_unsigned_always_true_comparison) 10442 : diag::warn_tautological_constant_compare; 10443 10444 S.Diag(E->getOperatorLoc(), Diag) 10445 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 10446 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 10447 } 10448 10449 return true; 10450 } 10451 10452 /// Analyze the operands of the given comparison. Implements the 10453 /// fallback case from AnalyzeComparison. 10454 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 10455 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 10456 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 10457 } 10458 10459 /// Implements -Wsign-compare. 10460 /// 10461 /// \param E the binary operator to check for warnings 10462 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 10463 // The type the comparison is being performed in. 10464 QualType T = E->getLHS()->getType(); 10465 10466 // Only analyze comparison operators where both sides have been converted to 10467 // the same type. 10468 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 10469 return AnalyzeImpConvsInComparison(S, E); 10470 10471 // Don't analyze value-dependent comparisons directly. 10472 if (E->isValueDependent()) 10473 return AnalyzeImpConvsInComparison(S, E); 10474 10475 Expr *LHS = E->getLHS(); 10476 Expr *RHS = E->getRHS(); 10477 10478 if (T->isIntegralType(S.Context)) { 10479 llvm::APSInt RHSValue; 10480 llvm::APSInt LHSValue; 10481 10482 bool IsRHSIntegralLiteral = RHS->isIntegerConstantExpr(RHSValue, S.Context); 10483 bool IsLHSIntegralLiteral = LHS->isIntegerConstantExpr(LHSValue, S.Context); 10484 10485 // We don't care about expressions whose result is a constant. 10486 if (IsRHSIntegralLiteral && IsLHSIntegralLiteral) 10487 return AnalyzeImpConvsInComparison(S, E); 10488 10489 // We only care about expressions where just one side is literal 10490 if (IsRHSIntegralLiteral ^ IsLHSIntegralLiteral) { 10491 // Is the constant on the RHS or LHS? 10492 const bool RhsConstant = IsRHSIntegralLiteral; 10493 Expr *Const = RhsConstant ? RHS : LHS; 10494 Expr *Other = RhsConstant ? LHS : RHS; 10495 const llvm::APSInt &Value = RhsConstant ? RHSValue : LHSValue; 10496 10497 // Check whether an integer constant comparison results in a value 10498 // of 'true' or 'false'. 10499 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 10500 return AnalyzeImpConvsInComparison(S, E); 10501 } 10502 } 10503 10504 if (!T->hasUnsignedIntegerRepresentation()) { 10505 // We don't do anything special if this isn't an unsigned integral 10506 // comparison: we're only interested in integral comparisons, and 10507 // signed comparisons only happen in cases we don't care to warn about. 10508 return AnalyzeImpConvsInComparison(S, E); 10509 } 10510 10511 LHS = LHS->IgnoreParenImpCasts(); 10512 RHS = RHS->IgnoreParenImpCasts(); 10513 10514 if (!S.getLangOpts().CPlusPlus) { 10515 // Avoid warning about comparison of integers with different signs when 10516 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 10517 // the type of `E`. 10518 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 10519 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 10520 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 10521 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 10522 } 10523 10524 // Check to see if one of the (unmodified) operands is of different 10525 // signedness. 10526 Expr *signedOperand, *unsignedOperand; 10527 if (LHS->getType()->hasSignedIntegerRepresentation()) { 10528 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 10529 "unsigned comparison between two signed integer expressions?"); 10530 signedOperand = LHS; 10531 unsignedOperand = RHS; 10532 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 10533 signedOperand = RHS; 10534 unsignedOperand = LHS; 10535 } else { 10536 return AnalyzeImpConvsInComparison(S, E); 10537 } 10538 10539 // Otherwise, calculate the effective range of the signed operand. 10540 IntRange signedRange = 10541 GetExprRange(S.Context, signedOperand, S.isConstantEvaluated()); 10542 10543 // Go ahead and analyze implicit conversions in the operands. Note 10544 // that we skip the implicit conversions on both sides. 10545 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 10546 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 10547 10548 // If the signed range is non-negative, -Wsign-compare won't fire. 10549 if (signedRange.NonNegative) 10550 return; 10551 10552 // For (in)equality comparisons, if the unsigned operand is a 10553 // constant which cannot collide with a overflowed signed operand, 10554 // then reinterpreting the signed operand as unsigned will not 10555 // change the result of the comparison. 10556 if (E->isEqualityOp()) { 10557 unsigned comparisonWidth = S.Context.getIntWidth(T); 10558 IntRange unsignedRange = 10559 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated()); 10560 10561 // We should never be unable to prove that the unsigned operand is 10562 // non-negative. 10563 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 10564 10565 if (unsignedRange.Width < comparisonWidth) 10566 return; 10567 } 10568 10569 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 10570 S.PDiag(diag::warn_mixed_sign_comparison) 10571 << LHS->getType() << RHS->getType() 10572 << LHS->getSourceRange() << RHS->getSourceRange()); 10573 } 10574 10575 /// Analyzes an attempt to assign the given value to a bitfield. 10576 /// 10577 /// Returns true if there was something fishy about the attempt. 10578 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 10579 SourceLocation InitLoc) { 10580 assert(Bitfield->isBitField()); 10581 if (Bitfield->isInvalidDecl()) 10582 return false; 10583 10584 // White-list bool bitfields. 10585 QualType BitfieldType = Bitfield->getType(); 10586 if (BitfieldType->isBooleanType()) 10587 return false; 10588 10589 if (BitfieldType->isEnumeralType()) { 10590 EnumDecl *BitfieldEnumDecl = BitfieldType->getAs<EnumType>()->getDecl(); 10591 // If the underlying enum type was not explicitly specified as an unsigned 10592 // type and the enum contain only positive values, MSVC++ will cause an 10593 // inconsistency by storing this as a signed type. 10594 if (S.getLangOpts().CPlusPlus11 && 10595 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 10596 BitfieldEnumDecl->getNumPositiveBits() > 0 && 10597 BitfieldEnumDecl->getNumNegativeBits() == 0) { 10598 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 10599 << BitfieldEnumDecl->getNameAsString(); 10600 } 10601 } 10602 10603 if (Bitfield->getType()->isBooleanType()) 10604 return false; 10605 10606 // Ignore value- or type-dependent expressions. 10607 if (Bitfield->getBitWidth()->isValueDependent() || 10608 Bitfield->getBitWidth()->isTypeDependent() || 10609 Init->isValueDependent() || 10610 Init->isTypeDependent()) 10611 return false; 10612 10613 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 10614 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 10615 10616 Expr::EvalResult Result; 10617 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 10618 Expr::SE_AllowSideEffects)) { 10619 // The RHS is not constant. If the RHS has an enum type, make sure the 10620 // bitfield is wide enough to hold all the values of the enum without 10621 // truncation. 10622 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 10623 EnumDecl *ED = EnumTy->getDecl(); 10624 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 10625 10626 // Enum types are implicitly signed on Windows, so check if there are any 10627 // negative enumerators to see if the enum was intended to be signed or 10628 // not. 10629 bool SignedEnum = ED->getNumNegativeBits() > 0; 10630 10631 // Check for surprising sign changes when assigning enum values to a 10632 // bitfield of different signedness. If the bitfield is signed and we 10633 // have exactly the right number of bits to store this unsigned enum, 10634 // suggest changing the enum to an unsigned type. This typically happens 10635 // on Windows where unfixed enums always use an underlying type of 'int'. 10636 unsigned DiagID = 0; 10637 if (SignedEnum && !SignedBitfield) { 10638 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 10639 } else if (SignedBitfield && !SignedEnum && 10640 ED->getNumPositiveBits() == FieldWidth) { 10641 DiagID = diag::warn_signed_bitfield_enum_conversion; 10642 } 10643 10644 if (DiagID) { 10645 S.Diag(InitLoc, DiagID) << Bitfield << ED; 10646 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 10647 SourceRange TypeRange = 10648 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 10649 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 10650 << SignedEnum << TypeRange; 10651 } 10652 10653 // Compute the required bitwidth. If the enum has negative values, we need 10654 // one more bit than the normal number of positive bits to represent the 10655 // sign bit. 10656 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 10657 ED->getNumNegativeBits()) 10658 : ED->getNumPositiveBits(); 10659 10660 // Check the bitwidth. 10661 if (BitsNeeded > FieldWidth) { 10662 Expr *WidthExpr = Bitfield->getBitWidth(); 10663 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 10664 << Bitfield << ED; 10665 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 10666 << BitsNeeded << ED << WidthExpr->getSourceRange(); 10667 } 10668 } 10669 10670 return false; 10671 } 10672 10673 llvm::APSInt Value = Result.Val.getInt(); 10674 10675 unsigned OriginalWidth = Value.getBitWidth(); 10676 10677 if (!Value.isSigned() || Value.isNegative()) 10678 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 10679 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 10680 OriginalWidth = Value.getMinSignedBits(); 10681 10682 if (OriginalWidth <= FieldWidth) 10683 return false; 10684 10685 // Compute the value which the bitfield will contain. 10686 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 10687 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 10688 10689 // Check whether the stored value is equal to the original value. 10690 TruncatedValue = TruncatedValue.extend(OriginalWidth); 10691 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 10692 return false; 10693 10694 // Special-case bitfields of width 1: booleans are naturally 0/1, and 10695 // therefore don't strictly fit into a signed bitfield of width 1. 10696 if (FieldWidth == 1 && Value == 1) 10697 return false; 10698 10699 std::string PrettyValue = Value.toString(10); 10700 std::string PrettyTrunc = TruncatedValue.toString(10); 10701 10702 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 10703 << PrettyValue << PrettyTrunc << OriginalInit->getType() 10704 << Init->getSourceRange(); 10705 10706 return true; 10707 } 10708 10709 /// Analyze the given simple or compound assignment for warning-worthy 10710 /// operations. 10711 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 10712 // Just recurse on the LHS. 10713 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 10714 10715 // We want to recurse on the RHS as normal unless we're assigning to 10716 // a bitfield. 10717 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 10718 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 10719 E->getOperatorLoc())) { 10720 // Recurse, ignoring any implicit conversions on the RHS. 10721 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 10722 E->getOperatorLoc()); 10723 } 10724 } 10725 10726 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 10727 10728 // Diagnose implicitly sequentially-consistent atomic assignment. 10729 if (E->getLHS()->getType()->isAtomicType()) 10730 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 10731 } 10732 10733 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 10734 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 10735 SourceLocation CContext, unsigned diag, 10736 bool pruneControlFlow = false) { 10737 if (pruneControlFlow) { 10738 S.DiagRuntimeBehavior(E->getExprLoc(), E, 10739 S.PDiag(diag) 10740 << SourceType << T << E->getSourceRange() 10741 << SourceRange(CContext)); 10742 return; 10743 } 10744 S.Diag(E->getExprLoc(), diag) 10745 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 10746 } 10747 10748 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 10749 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 10750 SourceLocation CContext, 10751 unsigned diag, bool pruneControlFlow = false) { 10752 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 10753 } 10754 10755 /// Diagnose an implicit cast from a floating point value to an integer value. 10756 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 10757 SourceLocation CContext) { 10758 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 10759 const bool PruneWarnings = S.inTemplateInstantiation(); 10760 10761 Expr *InnerE = E->IgnoreParenImpCasts(); 10762 // We also want to warn on, e.g., "int i = -1.234" 10763 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 10764 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 10765 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 10766 10767 const bool IsLiteral = 10768 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 10769 10770 llvm::APFloat Value(0.0); 10771 bool IsConstant = 10772 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 10773 if (!IsConstant) { 10774 return DiagnoseImpCast(S, E, T, CContext, 10775 diag::warn_impcast_float_integer, PruneWarnings); 10776 } 10777 10778 bool isExact = false; 10779 10780 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 10781 T->hasUnsignedIntegerRepresentation()); 10782 llvm::APFloat::opStatus Result = Value.convertToInteger( 10783 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 10784 10785 if (Result == llvm::APFloat::opOK && isExact) { 10786 if (IsLiteral) return; 10787 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 10788 PruneWarnings); 10789 } 10790 10791 // Conversion of a floating-point value to a non-bool integer where the 10792 // integral part cannot be represented by the integer type is undefined. 10793 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 10794 return DiagnoseImpCast( 10795 S, E, T, CContext, 10796 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 10797 : diag::warn_impcast_float_to_integer_out_of_range, 10798 PruneWarnings); 10799 10800 unsigned DiagID = 0; 10801 if (IsLiteral) { 10802 // Warn on floating point literal to integer. 10803 DiagID = diag::warn_impcast_literal_float_to_integer; 10804 } else if (IntegerValue == 0) { 10805 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 10806 return DiagnoseImpCast(S, E, T, CContext, 10807 diag::warn_impcast_float_integer, PruneWarnings); 10808 } 10809 // Warn on non-zero to zero conversion. 10810 DiagID = diag::warn_impcast_float_to_integer_zero; 10811 } else { 10812 if (IntegerValue.isUnsigned()) { 10813 if (!IntegerValue.isMaxValue()) { 10814 return DiagnoseImpCast(S, E, T, CContext, 10815 diag::warn_impcast_float_integer, PruneWarnings); 10816 } 10817 } else { // IntegerValue.isSigned() 10818 if (!IntegerValue.isMaxSignedValue() && 10819 !IntegerValue.isMinSignedValue()) { 10820 return DiagnoseImpCast(S, E, T, CContext, 10821 diag::warn_impcast_float_integer, PruneWarnings); 10822 } 10823 } 10824 // Warn on evaluatable floating point expression to integer conversion. 10825 DiagID = diag::warn_impcast_float_to_integer; 10826 } 10827 10828 // FIXME: Force the precision of the source value down so we don't print 10829 // digits which are usually useless (we don't really care here if we 10830 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 10831 // would automatically print the shortest representation, but it's a bit 10832 // tricky to implement. 10833 SmallString<16> PrettySourceValue; 10834 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 10835 precision = (precision * 59 + 195) / 196; 10836 Value.toString(PrettySourceValue, precision); 10837 10838 SmallString<16> PrettyTargetValue; 10839 if (IsBool) 10840 PrettyTargetValue = Value.isZero() ? "false" : "true"; 10841 else 10842 IntegerValue.toString(PrettyTargetValue); 10843 10844 if (PruneWarnings) { 10845 S.DiagRuntimeBehavior(E->getExprLoc(), E, 10846 S.PDiag(DiagID) 10847 << E->getType() << T.getUnqualifiedType() 10848 << PrettySourceValue << PrettyTargetValue 10849 << E->getSourceRange() << SourceRange(CContext)); 10850 } else { 10851 S.Diag(E->getExprLoc(), DiagID) 10852 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 10853 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 10854 } 10855 } 10856 10857 /// Analyze the given compound assignment for the possible losing of 10858 /// floating-point precision. 10859 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 10860 assert(isa<CompoundAssignOperator>(E) && 10861 "Must be compound assignment operation"); 10862 // Recurse on the LHS and RHS in here 10863 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 10864 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 10865 10866 if (E->getLHS()->getType()->isAtomicType()) 10867 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 10868 10869 // Now check the outermost expression 10870 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 10871 const auto *RBT = cast<CompoundAssignOperator>(E) 10872 ->getComputationResultType() 10873 ->getAs<BuiltinType>(); 10874 10875 // The below checks assume source is floating point. 10876 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 10877 10878 // If source is floating point but target is an integer. 10879 if (ResultBT->isInteger()) 10880 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 10881 E->getExprLoc(), diag::warn_impcast_float_integer); 10882 10883 if (!ResultBT->isFloatingPoint()) 10884 return; 10885 10886 // If both source and target are floating points, warn about losing precision. 10887 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 10888 QualType(ResultBT, 0), QualType(RBT, 0)); 10889 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 10890 // warn about dropping FP rank. 10891 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 10892 diag::warn_impcast_float_result_precision); 10893 } 10894 10895 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 10896 IntRange Range) { 10897 if (!Range.Width) return "0"; 10898 10899 llvm::APSInt ValueInRange = Value; 10900 ValueInRange.setIsSigned(!Range.NonNegative); 10901 ValueInRange = ValueInRange.trunc(Range.Width); 10902 return ValueInRange.toString(10); 10903 } 10904 10905 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 10906 if (!isa<ImplicitCastExpr>(Ex)) 10907 return false; 10908 10909 Expr *InnerE = Ex->IgnoreParenImpCasts(); 10910 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 10911 const Type *Source = 10912 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 10913 if (Target->isDependentType()) 10914 return false; 10915 10916 const BuiltinType *FloatCandidateBT = 10917 dyn_cast<BuiltinType>(ToBool ? Source : Target); 10918 const Type *BoolCandidateType = ToBool ? Target : Source; 10919 10920 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 10921 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 10922 } 10923 10924 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 10925 SourceLocation CC) { 10926 unsigned NumArgs = TheCall->getNumArgs(); 10927 for (unsigned i = 0; i < NumArgs; ++i) { 10928 Expr *CurrA = TheCall->getArg(i); 10929 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 10930 continue; 10931 10932 bool IsSwapped = ((i > 0) && 10933 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 10934 IsSwapped |= ((i < (NumArgs - 1)) && 10935 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 10936 if (IsSwapped) { 10937 // Warn on this floating-point to bool conversion. 10938 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 10939 CurrA->getType(), CC, 10940 diag::warn_impcast_floating_point_to_bool); 10941 } 10942 } 10943 } 10944 10945 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 10946 SourceLocation CC) { 10947 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 10948 E->getExprLoc())) 10949 return; 10950 10951 // Don't warn on functions which have return type nullptr_t. 10952 if (isa<CallExpr>(E)) 10953 return; 10954 10955 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 10956 const Expr::NullPointerConstantKind NullKind = 10957 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 10958 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 10959 return; 10960 10961 // Return if target type is a safe conversion. 10962 if (T->isAnyPointerType() || T->isBlockPointerType() || 10963 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 10964 return; 10965 10966 SourceLocation Loc = E->getSourceRange().getBegin(); 10967 10968 // Venture through the macro stacks to get to the source of macro arguments. 10969 // The new location is a better location than the complete location that was 10970 // passed in. 10971 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 10972 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 10973 10974 // __null is usually wrapped in a macro. Go up a macro if that is the case. 10975 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 10976 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 10977 Loc, S.SourceMgr, S.getLangOpts()); 10978 if (MacroName == "NULL") 10979 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 10980 } 10981 10982 // Only warn if the null and context location are in the same macro expansion. 10983 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 10984 return; 10985 10986 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 10987 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 10988 << FixItHint::CreateReplacement(Loc, 10989 S.getFixItZeroLiteralForType(T, Loc)); 10990 } 10991 10992 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 10993 ObjCArrayLiteral *ArrayLiteral); 10994 10995 static void 10996 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 10997 ObjCDictionaryLiteral *DictionaryLiteral); 10998 10999 /// Check a single element within a collection literal against the 11000 /// target element type. 11001 static void checkObjCCollectionLiteralElement(Sema &S, 11002 QualType TargetElementType, 11003 Expr *Element, 11004 unsigned ElementKind) { 11005 // Skip a bitcast to 'id' or qualified 'id'. 11006 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 11007 if (ICE->getCastKind() == CK_BitCast && 11008 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 11009 Element = ICE->getSubExpr(); 11010 } 11011 11012 QualType ElementType = Element->getType(); 11013 ExprResult ElementResult(Element); 11014 if (ElementType->getAs<ObjCObjectPointerType>() && 11015 S.CheckSingleAssignmentConstraints(TargetElementType, 11016 ElementResult, 11017 false, false) 11018 != Sema::Compatible) { 11019 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 11020 << ElementType << ElementKind << TargetElementType 11021 << Element->getSourceRange(); 11022 } 11023 11024 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 11025 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 11026 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 11027 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 11028 } 11029 11030 /// Check an Objective-C array literal being converted to the given 11031 /// target type. 11032 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 11033 ObjCArrayLiteral *ArrayLiteral) { 11034 if (!S.NSArrayDecl) 11035 return; 11036 11037 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11038 if (!TargetObjCPtr) 11039 return; 11040 11041 if (TargetObjCPtr->isUnspecialized() || 11042 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11043 != S.NSArrayDecl->getCanonicalDecl()) 11044 return; 11045 11046 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11047 if (TypeArgs.size() != 1) 11048 return; 11049 11050 QualType TargetElementType = TypeArgs[0]; 11051 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 11052 checkObjCCollectionLiteralElement(S, TargetElementType, 11053 ArrayLiteral->getElement(I), 11054 0); 11055 } 11056 } 11057 11058 /// Check an Objective-C dictionary literal being converted to the given 11059 /// target type. 11060 static void 11061 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 11062 ObjCDictionaryLiteral *DictionaryLiteral) { 11063 if (!S.NSDictionaryDecl) 11064 return; 11065 11066 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11067 if (!TargetObjCPtr) 11068 return; 11069 11070 if (TargetObjCPtr->isUnspecialized() || 11071 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11072 != S.NSDictionaryDecl->getCanonicalDecl()) 11073 return; 11074 11075 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11076 if (TypeArgs.size() != 2) 11077 return; 11078 11079 QualType TargetKeyType = TypeArgs[0]; 11080 QualType TargetObjectType = TypeArgs[1]; 11081 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 11082 auto Element = DictionaryLiteral->getKeyValueElement(I); 11083 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 11084 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 11085 } 11086 } 11087 11088 // Helper function to filter out cases for constant width constant conversion. 11089 // Don't warn on char array initialization or for non-decimal values. 11090 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 11091 SourceLocation CC) { 11092 // If initializing from a constant, and the constant starts with '0', 11093 // then it is a binary, octal, or hexadecimal. Allow these constants 11094 // to fill all the bits, even if there is a sign change. 11095 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 11096 const char FirstLiteralCharacter = 11097 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 11098 if (FirstLiteralCharacter == '0') 11099 return false; 11100 } 11101 11102 // If the CC location points to a '{', and the type is char, then assume 11103 // assume it is an array initialization. 11104 if (CC.isValid() && T->isCharType()) { 11105 const char FirstContextCharacter = 11106 S.getSourceManager().getCharacterData(CC)[0]; 11107 if (FirstContextCharacter == '{') 11108 return false; 11109 } 11110 11111 return true; 11112 } 11113 11114 static void 11115 CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC, 11116 bool *ICContext = nullptr) { 11117 if (E->isTypeDependent() || E->isValueDependent()) return; 11118 11119 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 11120 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 11121 if (Source == Target) return; 11122 if (Target->isDependentType()) return; 11123 11124 // If the conversion context location is invalid don't complain. We also 11125 // don't want to emit a warning if the issue occurs from the expansion of 11126 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 11127 // delay this check as long as possible. Once we detect we are in that 11128 // scenario, we just return. 11129 if (CC.isInvalid()) 11130 return; 11131 11132 if (Source->isAtomicType()) 11133 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 11134 11135 // Diagnose implicit casts to bool. 11136 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 11137 if (isa<StringLiteral>(E)) 11138 // Warn on string literal to bool. Checks for string literals in logical 11139 // and expressions, for instance, assert(0 && "error here"), are 11140 // prevented by a check in AnalyzeImplicitConversions(). 11141 return DiagnoseImpCast(S, E, T, CC, 11142 diag::warn_impcast_string_literal_to_bool); 11143 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 11144 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 11145 // This covers the literal expressions that evaluate to Objective-C 11146 // objects. 11147 return DiagnoseImpCast(S, E, T, CC, 11148 diag::warn_impcast_objective_c_literal_to_bool); 11149 } 11150 if (Source->isPointerType() || Source->canDecayToPointerType()) { 11151 // Warn on pointer to bool conversion that is always true. 11152 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 11153 SourceRange(CC)); 11154 } 11155 } 11156 11157 // Check implicit casts from Objective-C collection literals to specialized 11158 // collection types, e.g., NSArray<NSString *> *. 11159 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 11160 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 11161 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 11162 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 11163 11164 // Strip vector types. 11165 if (isa<VectorType>(Source)) { 11166 if (!isa<VectorType>(Target)) { 11167 if (S.SourceMgr.isInSystemMacro(CC)) 11168 return; 11169 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 11170 } 11171 11172 // If the vector cast is cast between two vectors of the same size, it is 11173 // a bitcast, not a conversion. 11174 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 11175 return; 11176 11177 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 11178 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 11179 } 11180 if (auto VecTy = dyn_cast<VectorType>(Target)) 11181 Target = VecTy->getElementType().getTypePtr(); 11182 11183 // Strip complex types. 11184 if (isa<ComplexType>(Source)) { 11185 if (!isa<ComplexType>(Target)) { 11186 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 11187 return; 11188 11189 return DiagnoseImpCast(S, E, T, CC, 11190 S.getLangOpts().CPlusPlus 11191 ? diag::err_impcast_complex_scalar 11192 : diag::warn_impcast_complex_scalar); 11193 } 11194 11195 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 11196 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 11197 } 11198 11199 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 11200 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 11201 11202 // If the source is floating point... 11203 if (SourceBT && SourceBT->isFloatingPoint()) { 11204 // ...and the target is floating point... 11205 if (TargetBT && TargetBT->isFloatingPoint()) { 11206 // ...then warn if we're dropping FP rank. 11207 11208 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 11209 QualType(SourceBT, 0), QualType(TargetBT, 0)); 11210 if (Order > 0) { 11211 // Don't warn about float constants that are precisely 11212 // representable in the target type. 11213 Expr::EvalResult result; 11214 if (E->EvaluateAsRValue(result, S.Context)) { 11215 // Value might be a float, a float vector, or a float complex. 11216 if (IsSameFloatAfterCast(result.Val, 11217 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 11218 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 11219 return; 11220 } 11221 11222 if (S.SourceMgr.isInSystemMacro(CC)) 11223 return; 11224 11225 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 11226 } 11227 // ... or possibly if we're increasing rank, too 11228 else if (Order < 0) { 11229 if (S.SourceMgr.isInSystemMacro(CC)) 11230 return; 11231 11232 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 11233 } 11234 return; 11235 } 11236 11237 // If the target is integral, always warn. 11238 if (TargetBT && TargetBT->isInteger()) { 11239 if (S.SourceMgr.isInSystemMacro(CC)) 11240 return; 11241 11242 DiagnoseFloatingImpCast(S, E, T, CC); 11243 } 11244 11245 // Detect the case where a call result is converted from floating-point to 11246 // to bool, and the final argument to the call is converted from bool, to 11247 // discover this typo: 11248 // 11249 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 11250 // 11251 // FIXME: This is an incredibly special case; is there some more general 11252 // way to detect this class of misplaced-parentheses bug? 11253 if (Target->isBooleanType() && isa<CallExpr>(E)) { 11254 // Check last argument of function call to see if it is an 11255 // implicit cast from a type matching the type the result 11256 // is being cast to. 11257 CallExpr *CEx = cast<CallExpr>(E); 11258 if (unsigned NumArgs = CEx->getNumArgs()) { 11259 Expr *LastA = CEx->getArg(NumArgs - 1); 11260 Expr *InnerE = LastA->IgnoreParenImpCasts(); 11261 if (isa<ImplicitCastExpr>(LastA) && 11262 InnerE->getType()->isBooleanType()) { 11263 // Warn on this floating-point to bool conversion 11264 DiagnoseImpCast(S, E, T, CC, 11265 diag::warn_impcast_floating_point_to_bool); 11266 } 11267 } 11268 } 11269 return; 11270 } 11271 11272 // Valid casts involving fixed point types should be accounted for here. 11273 if (Source->isFixedPointType()) { 11274 if (Target->isUnsaturatedFixedPointType()) { 11275 Expr::EvalResult Result; 11276 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 11277 S.isConstantEvaluated())) { 11278 APFixedPoint Value = Result.Val.getFixedPoint(); 11279 APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 11280 APFixedPoint MinVal = S.Context.getFixedPointMin(T); 11281 if (Value > MaxVal || Value < MinVal) { 11282 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11283 S.PDiag(diag::warn_impcast_fixed_point_range) 11284 << Value.toString() << T 11285 << E->getSourceRange() 11286 << clang::SourceRange(CC)); 11287 return; 11288 } 11289 } 11290 } else if (Target->isIntegerType()) { 11291 Expr::EvalResult Result; 11292 if (!S.isConstantEvaluated() && 11293 E->EvaluateAsFixedPoint(Result, S.Context, 11294 Expr::SE_AllowSideEffects)) { 11295 APFixedPoint FXResult = Result.Val.getFixedPoint(); 11296 11297 bool Overflowed; 11298 llvm::APSInt IntResult = FXResult.convertToInt( 11299 S.Context.getIntWidth(T), 11300 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 11301 11302 if (Overflowed) { 11303 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11304 S.PDiag(diag::warn_impcast_fixed_point_range) 11305 << FXResult.toString() << T 11306 << E->getSourceRange() 11307 << clang::SourceRange(CC)); 11308 return; 11309 } 11310 } 11311 } 11312 } else if (Target->isUnsaturatedFixedPointType()) { 11313 if (Source->isIntegerType()) { 11314 Expr::EvalResult Result; 11315 if (!S.isConstantEvaluated() && 11316 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 11317 llvm::APSInt Value = Result.Val.getInt(); 11318 11319 bool Overflowed; 11320 APFixedPoint IntResult = APFixedPoint::getFromIntValue( 11321 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 11322 11323 if (Overflowed) { 11324 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11325 S.PDiag(diag::warn_impcast_fixed_point_range) 11326 << Value.toString(/*radix=*/10) << T 11327 << E->getSourceRange() 11328 << clang::SourceRange(CC)); 11329 return; 11330 } 11331 } 11332 } 11333 } 11334 11335 DiagnoseNullConversion(S, E, T, CC); 11336 11337 S.DiscardMisalignedMemberAddress(Target, E); 11338 11339 if (!Source->isIntegerType() || !Target->isIntegerType()) 11340 return; 11341 11342 // TODO: remove this early return once the false positives for constant->bool 11343 // in templates, macros, etc, are reduced or removed. 11344 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 11345 return; 11346 11347 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated()); 11348 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 11349 11350 if (SourceRange.Width > TargetRange.Width) { 11351 // If the source is a constant, use a default-on diagnostic. 11352 // TODO: this should happen for bitfield stores, too. 11353 Expr::EvalResult Result; 11354 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 11355 S.isConstantEvaluated())) { 11356 llvm::APSInt Value(32); 11357 Value = Result.Val.getInt(); 11358 11359 if (S.SourceMgr.isInSystemMacro(CC)) 11360 return; 11361 11362 std::string PrettySourceValue = Value.toString(10); 11363 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 11364 11365 S.DiagRuntimeBehavior( 11366 E->getExprLoc(), E, 11367 S.PDiag(diag::warn_impcast_integer_precision_constant) 11368 << PrettySourceValue << PrettyTargetValue << E->getType() << T 11369 << E->getSourceRange() << clang::SourceRange(CC)); 11370 return; 11371 } 11372 11373 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 11374 if (S.SourceMgr.isInSystemMacro(CC)) 11375 return; 11376 11377 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 11378 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 11379 /* pruneControlFlow */ true); 11380 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 11381 } 11382 11383 if (TargetRange.Width > SourceRange.Width) { 11384 if (auto *UO = dyn_cast<UnaryOperator>(E)) 11385 if (UO->getOpcode() == UO_Minus) 11386 if (Source->isUnsignedIntegerType()) { 11387 if (Target->isUnsignedIntegerType()) 11388 return DiagnoseImpCast(S, E, T, CC, 11389 diag::warn_impcast_high_order_zero_bits); 11390 if (Target->isSignedIntegerType()) 11391 return DiagnoseImpCast(S, E, T, CC, 11392 diag::warn_impcast_nonnegative_result); 11393 } 11394 } 11395 11396 if (TargetRange.Width == SourceRange.Width && !TargetRange.NonNegative && 11397 SourceRange.NonNegative && Source->isSignedIntegerType()) { 11398 // Warn when doing a signed to signed conversion, warn if the positive 11399 // source value is exactly the width of the target type, which will 11400 // cause a negative value to be stored. 11401 11402 Expr::EvalResult Result; 11403 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 11404 !S.SourceMgr.isInSystemMacro(CC)) { 11405 llvm::APSInt Value = Result.Val.getInt(); 11406 if (isSameWidthConstantConversion(S, E, T, CC)) { 11407 std::string PrettySourceValue = Value.toString(10); 11408 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 11409 11410 S.DiagRuntimeBehavior( 11411 E->getExprLoc(), E, 11412 S.PDiag(diag::warn_impcast_integer_precision_constant) 11413 << PrettySourceValue << PrettyTargetValue << E->getType() << T 11414 << E->getSourceRange() << clang::SourceRange(CC)); 11415 return; 11416 } 11417 } 11418 11419 // Fall through for non-constants to give a sign conversion warning. 11420 } 11421 11422 if ((TargetRange.NonNegative && !SourceRange.NonNegative) || 11423 (!TargetRange.NonNegative && SourceRange.NonNegative && 11424 SourceRange.Width == TargetRange.Width)) { 11425 if (S.SourceMgr.isInSystemMacro(CC)) 11426 return; 11427 11428 unsigned DiagID = diag::warn_impcast_integer_sign; 11429 11430 // Traditionally, gcc has warned about this under -Wsign-compare. 11431 // We also want to warn about it in -Wconversion. 11432 // So if -Wconversion is off, use a completely identical diagnostic 11433 // in the sign-compare group. 11434 // The conditional-checking code will 11435 if (ICContext) { 11436 DiagID = diag::warn_impcast_integer_sign_conditional; 11437 *ICContext = true; 11438 } 11439 11440 return DiagnoseImpCast(S, E, T, CC, DiagID); 11441 } 11442 11443 // Diagnose conversions between different enumeration types. 11444 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 11445 // type, to give us better diagnostics. 11446 QualType SourceType = E->getType(); 11447 if (!S.getLangOpts().CPlusPlus) { 11448 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 11449 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 11450 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 11451 SourceType = S.Context.getTypeDeclType(Enum); 11452 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 11453 } 11454 } 11455 11456 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 11457 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 11458 if (SourceEnum->getDecl()->hasNameForLinkage() && 11459 TargetEnum->getDecl()->hasNameForLinkage() && 11460 SourceEnum != TargetEnum) { 11461 if (S.SourceMgr.isInSystemMacro(CC)) 11462 return; 11463 11464 return DiagnoseImpCast(S, E, SourceType, T, CC, 11465 diag::warn_impcast_different_enum_types); 11466 } 11467 } 11468 11469 static void CheckConditionalOperator(Sema &S, ConditionalOperator *E, 11470 SourceLocation CC, QualType T); 11471 11472 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 11473 SourceLocation CC, bool &ICContext) { 11474 E = E->IgnoreParenImpCasts(); 11475 11476 if (isa<ConditionalOperator>(E)) 11477 return CheckConditionalOperator(S, cast<ConditionalOperator>(E), CC, T); 11478 11479 AnalyzeImplicitConversions(S, E, CC); 11480 if (E->getType() != T) 11481 return CheckImplicitConversion(S, E, T, CC, &ICContext); 11482 } 11483 11484 static void CheckConditionalOperator(Sema &S, ConditionalOperator *E, 11485 SourceLocation CC, QualType T) { 11486 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 11487 11488 bool Suspicious = false; 11489 CheckConditionalOperand(S, E->getTrueExpr(), T, CC, Suspicious); 11490 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 11491 11492 // If -Wconversion would have warned about either of the candidates 11493 // for a signedness conversion to the context type... 11494 if (!Suspicious) return; 11495 11496 // ...but it's currently ignored... 11497 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 11498 return; 11499 11500 // ...then check whether it would have warned about either of the 11501 // candidates for a signedness conversion to the condition type. 11502 if (E->getType() == T) return; 11503 11504 Suspicious = false; 11505 CheckImplicitConversion(S, E->getTrueExpr()->IgnoreParenImpCasts(), 11506 E->getType(), CC, &Suspicious); 11507 if (!Suspicious) 11508 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 11509 E->getType(), CC, &Suspicious); 11510 } 11511 11512 /// Check conversion of given expression to boolean. 11513 /// Input argument E is a logical expression. 11514 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 11515 if (S.getLangOpts().Bool) 11516 return; 11517 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 11518 return; 11519 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 11520 } 11521 11522 /// AnalyzeImplicitConversions - Find and report any interesting 11523 /// implicit conversions in the given expression. There are a couple 11524 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 11525 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, 11526 SourceLocation CC) { 11527 QualType T = OrigE->getType(); 11528 Expr *E = OrigE->IgnoreParenImpCasts(); 11529 11530 if (E->isTypeDependent() || E->isValueDependent()) 11531 return; 11532 11533 // For conditional operators, we analyze the arguments as if they 11534 // were being fed directly into the output. 11535 if (isa<ConditionalOperator>(E)) { 11536 ConditionalOperator *CO = cast<ConditionalOperator>(E); 11537 CheckConditionalOperator(S, CO, CC, T); 11538 return; 11539 } 11540 11541 // Check implicit argument conversions for function calls. 11542 if (CallExpr *Call = dyn_cast<CallExpr>(E)) 11543 CheckImplicitArgumentConversions(S, Call, CC); 11544 11545 // Go ahead and check any implicit conversions we might have skipped. 11546 // The non-canonical typecheck is just an optimization; 11547 // CheckImplicitConversion will filter out dead implicit conversions. 11548 if (E->getType() != T) 11549 CheckImplicitConversion(S, E, T, CC); 11550 11551 // Now continue drilling into this expression. 11552 11553 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 11554 // The bound subexpressions in a PseudoObjectExpr are not reachable 11555 // as transitive children. 11556 // FIXME: Use a more uniform representation for this. 11557 for (auto *SE : POE->semantics()) 11558 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 11559 AnalyzeImplicitConversions(S, OVE->getSourceExpr(), CC); 11560 } 11561 11562 // Skip past explicit casts. 11563 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 11564 E = CE->getSubExpr()->IgnoreParenImpCasts(); 11565 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 11566 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 11567 return AnalyzeImplicitConversions(S, E, CC); 11568 } 11569 11570 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 11571 // Do a somewhat different check with comparison operators. 11572 if (BO->isComparisonOp()) 11573 return AnalyzeComparison(S, BO); 11574 11575 // And with simple assignments. 11576 if (BO->getOpcode() == BO_Assign) 11577 return AnalyzeAssignment(S, BO); 11578 // And with compound assignments. 11579 if (BO->isAssignmentOp()) 11580 return AnalyzeCompoundAssignment(S, BO); 11581 } 11582 11583 // These break the otherwise-useful invariant below. Fortunately, 11584 // we don't really need to recurse into them, because any internal 11585 // expressions should have been analyzed already when they were 11586 // built into statements. 11587 if (isa<StmtExpr>(E)) return; 11588 11589 // Don't descend into unevaluated contexts. 11590 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 11591 11592 // Now just recurse over the expression's children. 11593 CC = E->getExprLoc(); 11594 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 11595 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 11596 for (Stmt *SubStmt : E->children()) { 11597 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 11598 if (!ChildExpr) 11599 continue; 11600 11601 if (IsLogicalAndOperator && 11602 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 11603 // Ignore checking string literals that are in logical and operators. 11604 // This is a common pattern for asserts. 11605 continue; 11606 AnalyzeImplicitConversions(S, ChildExpr, CC); 11607 } 11608 11609 if (BO && BO->isLogicalOp()) { 11610 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 11611 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 11612 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 11613 11614 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 11615 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 11616 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 11617 } 11618 11619 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 11620 if (U->getOpcode() == UO_LNot) { 11621 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 11622 } else if (U->getOpcode() != UO_AddrOf) { 11623 if (U->getSubExpr()->getType()->isAtomicType()) 11624 S.Diag(U->getSubExpr()->getBeginLoc(), 11625 diag::warn_atomic_implicit_seq_cst); 11626 } 11627 } 11628 } 11629 11630 /// Diagnose integer type and any valid implicit conversion to it. 11631 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 11632 // Taking into account implicit conversions, 11633 // allow any integer. 11634 if (!E->getType()->isIntegerType()) { 11635 S.Diag(E->getBeginLoc(), 11636 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 11637 return true; 11638 } 11639 // Potentially emit standard warnings for implicit conversions if enabled 11640 // using -Wconversion. 11641 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 11642 return false; 11643 } 11644 11645 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 11646 // Returns true when emitting a warning about taking the address of a reference. 11647 static bool CheckForReference(Sema &SemaRef, const Expr *E, 11648 const PartialDiagnostic &PD) { 11649 E = E->IgnoreParenImpCasts(); 11650 11651 const FunctionDecl *FD = nullptr; 11652 11653 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 11654 if (!DRE->getDecl()->getType()->isReferenceType()) 11655 return false; 11656 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 11657 if (!M->getMemberDecl()->getType()->isReferenceType()) 11658 return false; 11659 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 11660 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 11661 return false; 11662 FD = Call->getDirectCallee(); 11663 } else { 11664 return false; 11665 } 11666 11667 SemaRef.Diag(E->getExprLoc(), PD); 11668 11669 // If possible, point to location of function. 11670 if (FD) { 11671 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 11672 } 11673 11674 return true; 11675 } 11676 11677 // Returns true if the SourceLocation is expanded from any macro body. 11678 // Returns false if the SourceLocation is invalid, is from not in a macro 11679 // expansion, or is from expanded from a top-level macro argument. 11680 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 11681 if (Loc.isInvalid()) 11682 return false; 11683 11684 while (Loc.isMacroID()) { 11685 if (SM.isMacroBodyExpansion(Loc)) 11686 return true; 11687 Loc = SM.getImmediateMacroCallerLoc(Loc); 11688 } 11689 11690 return false; 11691 } 11692 11693 /// Diagnose pointers that are always non-null. 11694 /// \param E the expression containing the pointer 11695 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 11696 /// compared to a null pointer 11697 /// \param IsEqual True when the comparison is equal to a null pointer 11698 /// \param Range Extra SourceRange to highlight in the diagnostic 11699 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 11700 Expr::NullPointerConstantKind NullKind, 11701 bool IsEqual, SourceRange Range) { 11702 if (!E) 11703 return; 11704 11705 // Don't warn inside macros. 11706 if (E->getExprLoc().isMacroID()) { 11707 const SourceManager &SM = getSourceManager(); 11708 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 11709 IsInAnyMacroBody(SM, Range.getBegin())) 11710 return; 11711 } 11712 E = E->IgnoreImpCasts(); 11713 11714 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 11715 11716 if (isa<CXXThisExpr>(E)) { 11717 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 11718 : diag::warn_this_bool_conversion; 11719 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 11720 return; 11721 } 11722 11723 bool IsAddressOf = false; 11724 11725 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 11726 if (UO->getOpcode() != UO_AddrOf) 11727 return; 11728 IsAddressOf = true; 11729 E = UO->getSubExpr(); 11730 } 11731 11732 if (IsAddressOf) { 11733 unsigned DiagID = IsCompare 11734 ? diag::warn_address_of_reference_null_compare 11735 : diag::warn_address_of_reference_bool_conversion; 11736 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 11737 << IsEqual; 11738 if (CheckForReference(*this, E, PD)) { 11739 return; 11740 } 11741 } 11742 11743 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 11744 bool IsParam = isa<NonNullAttr>(NonnullAttr); 11745 std::string Str; 11746 llvm::raw_string_ostream S(Str); 11747 E->printPretty(S, nullptr, getPrintingPolicy()); 11748 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 11749 : diag::warn_cast_nonnull_to_bool; 11750 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 11751 << E->getSourceRange() << Range << IsEqual; 11752 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 11753 }; 11754 11755 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 11756 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 11757 if (auto *Callee = Call->getDirectCallee()) { 11758 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 11759 ComplainAboutNonnullParamOrCall(A); 11760 return; 11761 } 11762 } 11763 } 11764 11765 // Expect to find a single Decl. Skip anything more complicated. 11766 ValueDecl *D = nullptr; 11767 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 11768 D = R->getDecl(); 11769 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 11770 D = M->getMemberDecl(); 11771 } 11772 11773 // Weak Decls can be null. 11774 if (!D || D->isWeak()) 11775 return; 11776 11777 // Check for parameter decl with nonnull attribute 11778 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 11779 if (getCurFunction() && 11780 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 11781 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 11782 ComplainAboutNonnullParamOrCall(A); 11783 return; 11784 } 11785 11786 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 11787 // Skip function template not specialized yet. 11788 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 11789 return; 11790 auto ParamIter = llvm::find(FD->parameters(), PV); 11791 assert(ParamIter != FD->param_end()); 11792 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 11793 11794 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 11795 if (!NonNull->args_size()) { 11796 ComplainAboutNonnullParamOrCall(NonNull); 11797 return; 11798 } 11799 11800 for (const ParamIdx &ArgNo : NonNull->args()) { 11801 if (ArgNo.getASTIndex() == ParamNo) { 11802 ComplainAboutNonnullParamOrCall(NonNull); 11803 return; 11804 } 11805 } 11806 } 11807 } 11808 } 11809 } 11810 11811 QualType T = D->getType(); 11812 const bool IsArray = T->isArrayType(); 11813 const bool IsFunction = T->isFunctionType(); 11814 11815 // Address of function is used to silence the function warning. 11816 if (IsAddressOf && IsFunction) { 11817 return; 11818 } 11819 11820 // Found nothing. 11821 if (!IsAddressOf && !IsFunction && !IsArray) 11822 return; 11823 11824 // Pretty print the expression for the diagnostic. 11825 std::string Str; 11826 llvm::raw_string_ostream S(Str); 11827 E->printPretty(S, nullptr, getPrintingPolicy()); 11828 11829 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 11830 : diag::warn_impcast_pointer_to_bool; 11831 enum { 11832 AddressOf, 11833 FunctionPointer, 11834 ArrayPointer 11835 } DiagType; 11836 if (IsAddressOf) 11837 DiagType = AddressOf; 11838 else if (IsFunction) 11839 DiagType = FunctionPointer; 11840 else if (IsArray) 11841 DiagType = ArrayPointer; 11842 else 11843 llvm_unreachable("Could not determine diagnostic."); 11844 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 11845 << Range << IsEqual; 11846 11847 if (!IsFunction) 11848 return; 11849 11850 // Suggest '&' to silence the function warning. 11851 Diag(E->getExprLoc(), diag::note_function_warning_silence) 11852 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 11853 11854 // Check to see if '()' fixit should be emitted. 11855 QualType ReturnType; 11856 UnresolvedSet<4> NonTemplateOverloads; 11857 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 11858 if (ReturnType.isNull()) 11859 return; 11860 11861 if (IsCompare) { 11862 // There are two cases here. If there is null constant, the only suggest 11863 // for a pointer return type. If the null is 0, then suggest if the return 11864 // type is a pointer or an integer type. 11865 if (!ReturnType->isPointerType()) { 11866 if (NullKind == Expr::NPCK_ZeroExpression || 11867 NullKind == Expr::NPCK_ZeroLiteral) { 11868 if (!ReturnType->isIntegerType()) 11869 return; 11870 } else { 11871 return; 11872 } 11873 } 11874 } else { // !IsCompare 11875 // For function to bool, only suggest if the function pointer has bool 11876 // return type. 11877 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 11878 return; 11879 } 11880 Diag(E->getExprLoc(), diag::note_function_to_function_call) 11881 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 11882 } 11883 11884 /// Diagnoses "dangerous" implicit conversions within the given 11885 /// expression (which is a full expression). Implements -Wconversion 11886 /// and -Wsign-compare. 11887 /// 11888 /// \param CC the "context" location of the implicit conversion, i.e. 11889 /// the most location of the syntactic entity requiring the implicit 11890 /// conversion 11891 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 11892 // Don't diagnose in unevaluated contexts. 11893 if (isUnevaluatedContext()) 11894 return; 11895 11896 // Don't diagnose for value- or type-dependent expressions. 11897 if (E->isTypeDependent() || E->isValueDependent()) 11898 return; 11899 11900 // Check for array bounds violations in cases where the check isn't triggered 11901 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 11902 // ArraySubscriptExpr is on the RHS of a variable initialization. 11903 CheckArrayAccess(E); 11904 11905 // This is not the right CC for (e.g.) a variable initialization. 11906 AnalyzeImplicitConversions(*this, E, CC); 11907 } 11908 11909 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 11910 /// Input argument E is a logical expression. 11911 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 11912 ::CheckBoolLikeConversion(*this, E, CC); 11913 } 11914 11915 /// Diagnose when expression is an integer constant expression and its evaluation 11916 /// results in integer overflow 11917 void Sema::CheckForIntOverflow (Expr *E) { 11918 // Use a work list to deal with nested struct initializers. 11919 SmallVector<Expr *, 2> Exprs(1, E); 11920 11921 do { 11922 Expr *OriginalE = Exprs.pop_back_val(); 11923 Expr *E = OriginalE->IgnoreParenCasts(); 11924 11925 if (isa<BinaryOperator>(E)) { 11926 E->EvaluateForOverflow(Context); 11927 continue; 11928 } 11929 11930 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 11931 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 11932 else if (isa<ObjCBoxedExpr>(OriginalE)) 11933 E->EvaluateForOverflow(Context); 11934 else if (auto Call = dyn_cast<CallExpr>(E)) 11935 Exprs.append(Call->arg_begin(), Call->arg_end()); 11936 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 11937 Exprs.append(Message->arg_begin(), Message->arg_end()); 11938 } while (!Exprs.empty()); 11939 } 11940 11941 namespace { 11942 11943 /// Visitor for expressions which looks for unsequenced operations on the 11944 /// same object. 11945 class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> { 11946 using Base = EvaluatedExprVisitor<SequenceChecker>; 11947 11948 /// A tree of sequenced regions within an expression. Two regions are 11949 /// unsequenced if one is an ancestor or a descendent of the other. When we 11950 /// finish processing an expression with sequencing, such as a comma 11951 /// expression, we fold its tree nodes into its parent, since they are 11952 /// unsequenced with respect to nodes we will visit later. 11953 class SequenceTree { 11954 struct Value { 11955 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 11956 unsigned Parent : 31; 11957 unsigned Merged : 1; 11958 }; 11959 SmallVector<Value, 8> Values; 11960 11961 public: 11962 /// A region within an expression which may be sequenced with respect 11963 /// to some other region. 11964 class Seq { 11965 friend class SequenceTree; 11966 11967 unsigned Index; 11968 11969 explicit Seq(unsigned N) : Index(N) {} 11970 11971 public: 11972 Seq() : Index(0) {} 11973 }; 11974 11975 SequenceTree() { Values.push_back(Value(0)); } 11976 Seq root() const { return Seq(0); } 11977 11978 /// Create a new sequence of operations, which is an unsequenced 11979 /// subset of \p Parent. This sequence of operations is sequenced with 11980 /// respect to other children of \p Parent. 11981 Seq allocate(Seq Parent) { 11982 Values.push_back(Value(Parent.Index)); 11983 return Seq(Values.size() - 1); 11984 } 11985 11986 /// Merge a sequence of operations into its parent. 11987 void merge(Seq S) { 11988 Values[S.Index].Merged = true; 11989 } 11990 11991 /// Determine whether two operations are unsequenced. This operation 11992 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 11993 /// should have been merged into its parent as appropriate. 11994 bool isUnsequenced(Seq Cur, Seq Old) { 11995 unsigned C = representative(Cur.Index); 11996 unsigned Target = representative(Old.Index); 11997 while (C >= Target) { 11998 if (C == Target) 11999 return true; 12000 C = Values[C].Parent; 12001 } 12002 return false; 12003 } 12004 12005 private: 12006 /// Pick a representative for a sequence. 12007 unsigned representative(unsigned K) { 12008 if (Values[K].Merged) 12009 // Perform path compression as we go. 12010 return Values[K].Parent = representative(Values[K].Parent); 12011 return K; 12012 } 12013 }; 12014 12015 /// An object for which we can track unsequenced uses. 12016 using Object = NamedDecl *; 12017 12018 /// Different flavors of object usage which we track. We only track the 12019 /// least-sequenced usage of each kind. 12020 enum UsageKind { 12021 /// A read of an object. Multiple unsequenced reads are OK. 12022 UK_Use, 12023 12024 /// A modification of an object which is sequenced before the value 12025 /// computation of the expression, such as ++n in C++. 12026 UK_ModAsValue, 12027 12028 /// A modification of an object which is not sequenced before the value 12029 /// computation of the expression, such as n++. 12030 UK_ModAsSideEffect, 12031 12032 UK_Count = UK_ModAsSideEffect + 1 12033 }; 12034 12035 struct Usage { 12036 Expr *Use; 12037 SequenceTree::Seq Seq; 12038 12039 Usage() : Use(nullptr), Seq() {} 12040 }; 12041 12042 struct UsageInfo { 12043 Usage Uses[UK_Count]; 12044 12045 /// Have we issued a diagnostic for this variable already? 12046 bool Diagnosed; 12047 12048 UsageInfo() : Uses(), Diagnosed(false) {} 12049 }; 12050 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 12051 12052 Sema &SemaRef; 12053 12054 /// Sequenced regions within the expression. 12055 SequenceTree Tree; 12056 12057 /// Declaration modifications and references which we have seen. 12058 UsageInfoMap UsageMap; 12059 12060 /// The region we are currently within. 12061 SequenceTree::Seq Region; 12062 12063 /// Filled in with declarations which were modified as a side-effect 12064 /// (that is, post-increment operations). 12065 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 12066 12067 /// Expressions to check later. We defer checking these to reduce 12068 /// stack usage. 12069 SmallVectorImpl<Expr *> &WorkList; 12070 12071 /// RAII object wrapping the visitation of a sequenced subexpression of an 12072 /// expression. At the end of this process, the side-effects of the evaluation 12073 /// become sequenced with respect to the value computation of the result, so 12074 /// we downgrade any UK_ModAsSideEffect within the evaluation to 12075 /// UK_ModAsValue. 12076 struct SequencedSubexpression { 12077 SequencedSubexpression(SequenceChecker &Self) 12078 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 12079 Self.ModAsSideEffect = &ModAsSideEffect; 12080 } 12081 12082 ~SequencedSubexpression() { 12083 for (auto &M : llvm::reverse(ModAsSideEffect)) { 12084 UsageInfo &U = Self.UsageMap[M.first]; 12085 auto &SideEffectUsage = U.Uses[UK_ModAsSideEffect]; 12086 Self.addUsage(U, M.first, SideEffectUsage.Use, UK_ModAsValue); 12087 SideEffectUsage = M.second; 12088 } 12089 Self.ModAsSideEffect = OldModAsSideEffect; 12090 } 12091 12092 SequenceChecker &Self; 12093 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 12094 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 12095 }; 12096 12097 /// RAII object wrapping the visitation of a subexpression which we might 12098 /// choose to evaluate as a constant. If any subexpression is evaluated and 12099 /// found to be non-constant, this allows us to suppress the evaluation of 12100 /// the outer expression. 12101 class EvaluationTracker { 12102 public: 12103 EvaluationTracker(SequenceChecker &Self) 12104 : Self(Self), Prev(Self.EvalTracker) { 12105 Self.EvalTracker = this; 12106 } 12107 12108 ~EvaluationTracker() { 12109 Self.EvalTracker = Prev; 12110 if (Prev) 12111 Prev->EvalOK &= EvalOK; 12112 } 12113 12114 bool evaluate(const Expr *E, bool &Result) { 12115 if (!EvalOK || E->isValueDependent()) 12116 return false; 12117 EvalOK = E->EvaluateAsBooleanCondition( 12118 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 12119 return EvalOK; 12120 } 12121 12122 private: 12123 SequenceChecker &Self; 12124 EvaluationTracker *Prev; 12125 bool EvalOK = true; 12126 } *EvalTracker = nullptr; 12127 12128 /// Find the object which is produced by the specified expression, 12129 /// if any. 12130 Object getObject(Expr *E, bool Mod) const { 12131 E = E->IgnoreParenCasts(); 12132 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 12133 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 12134 return getObject(UO->getSubExpr(), Mod); 12135 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 12136 if (BO->getOpcode() == BO_Comma) 12137 return getObject(BO->getRHS(), Mod); 12138 if (Mod && BO->isAssignmentOp()) 12139 return getObject(BO->getLHS(), Mod); 12140 } else if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 12141 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 12142 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 12143 return ME->getMemberDecl(); 12144 } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 12145 // FIXME: If this is a reference, map through to its value. 12146 return DRE->getDecl(); 12147 return nullptr; 12148 } 12149 12150 /// Note that an object was modified or used by an expression. 12151 void addUsage(UsageInfo &UI, Object O, Expr *Ref, UsageKind UK) { 12152 Usage &U = UI.Uses[UK]; 12153 if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) { 12154 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 12155 ModAsSideEffect->push_back(std::make_pair(O, U)); 12156 U.Use = Ref; 12157 U.Seq = Region; 12158 } 12159 } 12160 12161 /// Check whether a modification or use conflicts with a prior usage. 12162 void checkUsage(Object O, UsageInfo &UI, Expr *Ref, UsageKind OtherKind, 12163 bool IsModMod) { 12164 if (UI.Diagnosed) 12165 return; 12166 12167 const Usage &U = UI.Uses[OtherKind]; 12168 if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) 12169 return; 12170 12171 Expr *Mod = U.Use; 12172 Expr *ModOrUse = Ref; 12173 if (OtherKind == UK_Use) 12174 std::swap(Mod, ModOrUse); 12175 12176 SemaRef.DiagRuntimeBehavior( 12177 Mod->getExprLoc(), {Mod, ModOrUse}, 12178 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 12179 : diag::warn_unsequenced_mod_use) 12180 << O << SourceRange(ModOrUse->getExprLoc())); 12181 UI.Diagnosed = true; 12182 } 12183 12184 void notePreUse(Object O, Expr *Use) { 12185 UsageInfo &U = UsageMap[O]; 12186 // Uses conflict with other modifications. 12187 checkUsage(O, U, Use, UK_ModAsValue, false); 12188 } 12189 12190 void notePostUse(Object O, Expr *Use) { 12191 UsageInfo &U = UsageMap[O]; 12192 checkUsage(O, U, Use, UK_ModAsSideEffect, false); 12193 addUsage(U, O, Use, UK_Use); 12194 } 12195 12196 void notePreMod(Object O, Expr *Mod) { 12197 UsageInfo &U = UsageMap[O]; 12198 // Modifications conflict with other modifications and with uses. 12199 checkUsage(O, U, Mod, UK_ModAsValue, true); 12200 checkUsage(O, U, Mod, UK_Use, false); 12201 } 12202 12203 void notePostMod(Object O, Expr *Use, UsageKind UK) { 12204 UsageInfo &U = UsageMap[O]; 12205 checkUsage(O, U, Use, UK_ModAsSideEffect, true); 12206 addUsage(U, O, Use, UK); 12207 } 12208 12209 public: 12210 SequenceChecker(Sema &S, Expr *E, SmallVectorImpl<Expr *> &WorkList) 12211 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 12212 Visit(E); 12213 } 12214 12215 void VisitStmt(Stmt *S) { 12216 // Skip all statements which aren't expressions for now. 12217 } 12218 12219 void VisitExpr(Expr *E) { 12220 // By default, just recurse to evaluated subexpressions. 12221 Base::VisitStmt(E); 12222 } 12223 12224 void VisitCastExpr(CastExpr *E) { 12225 Object O = Object(); 12226 if (E->getCastKind() == CK_LValueToRValue) 12227 O = getObject(E->getSubExpr(), false); 12228 12229 if (O) 12230 notePreUse(O, E); 12231 VisitExpr(E); 12232 if (O) 12233 notePostUse(O, E); 12234 } 12235 12236 void VisitSequencedExpressions(Expr *SequencedBefore, Expr *SequencedAfter) { 12237 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 12238 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 12239 SequenceTree::Seq OldRegion = Region; 12240 12241 { 12242 SequencedSubexpression SeqBefore(*this); 12243 Region = BeforeRegion; 12244 Visit(SequencedBefore); 12245 } 12246 12247 Region = AfterRegion; 12248 Visit(SequencedAfter); 12249 12250 Region = OldRegion; 12251 12252 Tree.merge(BeforeRegion); 12253 Tree.merge(AfterRegion); 12254 } 12255 12256 void VisitArraySubscriptExpr(ArraySubscriptExpr *ASE) { 12257 // C++17 [expr.sub]p1: 12258 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 12259 // expression E1 is sequenced before the expression E2. 12260 if (SemaRef.getLangOpts().CPlusPlus17) 12261 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 12262 else 12263 Base::VisitStmt(ASE); 12264 } 12265 12266 void VisitBinComma(BinaryOperator *BO) { 12267 // C++11 [expr.comma]p1: 12268 // Every value computation and side effect associated with the left 12269 // expression is sequenced before every value computation and side 12270 // effect associated with the right expression. 12271 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 12272 } 12273 12274 void VisitBinAssign(BinaryOperator *BO) { 12275 // The modification is sequenced after the value computation of the LHS 12276 // and RHS, so check it before inspecting the operands and update the 12277 // map afterwards. 12278 Object O = getObject(BO->getLHS(), true); 12279 if (!O) 12280 return VisitExpr(BO); 12281 12282 notePreMod(O, BO); 12283 12284 // C++11 [expr.ass]p7: 12285 // E1 op= E2 is equivalent to E1 = E1 op E2, except that E1 is evaluated 12286 // only once. 12287 // 12288 // Therefore, for a compound assignment operator, O is considered used 12289 // everywhere except within the evaluation of E1 itself. 12290 if (isa<CompoundAssignOperator>(BO)) 12291 notePreUse(O, BO); 12292 12293 Visit(BO->getLHS()); 12294 12295 if (isa<CompoundAssignOperator>(BO)) 12296 notePostUse(O, BO); 12297 12298 Visit(BO->getRHS()); 12299 12300 // C++11 [expr.ass]p1: 12301 // the assignment is sequenced [...] before the value computation of the 12302 // assignment expression. 12303 // C11 6.5.16/3 has no such rule. 12304 notePostMod(O, BO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 12305 : UK_ModAsSideEffect); 12306 } 12307 12308 void VisitCompoundAssignOperator(CompoundAssignOperator *CAO) { 12309 VisitBinAssign(CAO); 12310 } 12311 12312 void VisitUnaryPreInc(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 12313 void VisitUnaryPreDec(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 12314 void VisitUnaryPreIncDec(UnaryOperator *UO) { 12315 Object O = getObject(UO->getSubExpr(), true); 12316 if (!O) 12317 return VisitExpr(UO); 12318 12319 notePreMod(O, UO); 12320 Visit(UO->getSubExpr()); 12321 // C++11 [expr.pre.incr]p1: 12322 // the expression ++x is equivalent to x+=1 12323 notePostMod(O, UO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 12324 : UK_ModAsSideEffect); 12325 } 12326 12327 void VisitUnaryPostInc(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 12328 void VisitUnaryPostDec(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 12329 void VisitUnaryPostIncDec(UnaryOperator *UO) { 12330 Object O = getObject(UO->getSubExpr(), true); 12331 if (!O) 12332 return VisitExpr(UO); 12333 12334 notePreMod(O, UO); 12335 Visit(UO->getSubExpr()); 12336 notePostMod(O, UO, UK_ModAsSideEffect); 12337 } 12338 12339 /// Don't visit the RHS of '&&' or '||' if it might not be evaluated. 12340 void VisitBinLOr(BinaryOperator *BO) { 12341 // The side-effects of the LHS of an '&&' are sequenced before the 12342 // value computation of the RHS, and hence before the value computation 12343 // of the '&&' itself, unless the LHS evaluates to zero. We treat them 12344 // as if they were unconditionally sequenced. 12345 EvaluationTracker Eval(*this); 12346 { 12347 SequencedSubexpression Sequenced(*this); 12348 Visit(BO->getLHS()); 12349 } 12350 12351 bool Result; 12352 if (Eval.evaluate(BO->getLHS(), Result)) { 12353 if (!Result) 12354 Visit(BO->getRHS()); 12355 } else { 12356 // Check for unsequenced operations in the RHS, treating it as an 12357 // entirely separate evaluation. 12358 // 12359 // FIXME: If there are operations in the RHS which are unsequenced 12360 // with respect to operations outside the RHS, and those operations 12361 // are unconditionally evaluated, diagnose them. 12362 WorkList.push_back(BO->getRHS()); 12363 } 12364 } 12365 void VisitBinLAnd(BinaryOperator *BO) { 12366 EvaluationTracker Eval(*this); 12367 { 12368 SequencedSubexpression Sequenced(*this); 12369 Visit(BO->getLHS()); 12370 } 12371 12372 bool Result; 12373 if (Eval.evaluate(BO->getLHS(), Result)) { 12374 if (Result) 12375 Visit(BO->getRHS()); 12376 } else { 12377 WorkList.push_back(BO->getRHS()); 12378 } 12379 } 12380 12381 // Only visit the condition, unless we can be sure which subexpression will 12382 // be chosen. 12383 void VisitAbstractConditionalOperator(AbstractConditionalOperator *CO) { 12384 EvaluationTracker Eval(*this); 12385 { 12386 SequencedSubexpression Sequenced(*this); 12387 Visit(CO->getCond()); 12388 } 12389 12390 bool Result; 12391 if (Eval.evaluate(CO->getCond(), Result)) 12392 Visit(Result ? CO->getTrueExpr() : CO->getFalseExpr()); 12393 else { 12394 WorkList.push_back(CO->getTrueExpr()); 12395 WorkList.push_back(CO->getFalseExpr()); 12396 } 12397 } 12398 12399 void VisitCallExpr(CallExpr *CE) { 12400 // C++11 [intro.execution]p15: 12401 // When calling a function [...], every value computation and side effect 12402 // associated with any argument expression, or with the postfix expression 12403 // designating the called function, is sequenced before execution of every 12404 // expression or statement in the body of the function [and thus before 12405 // the value computation of its result]. 12406 SequencedSubexpression Sequenced(*this); 12407 Base::VisitCallExpr(CE); 12408 12409 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 12410 } 12411 12412 void VisitCXXConstructExpr(CXXConstructExpr *CCE) { 12413 // This is a call, so all subexpressions are sequenced before the result. 12414 SequencedSubexpression Sequenced(*this); 12415 12416 if (!CCE->isListInitialization()) 12417 return VisitExpr(CCE); 12418 12419 // In C++11, list initializations are sequenced. 12420 SmallVector<SequenceTree::Seq, 32> Elts; 12421 SequenceTree::Seq Parent = Region; 12422 for (CXXConstructExpr::arg_iterator I = CCE->arg_begin(), 12423 E = CCE->arg_end(); 12424 I != E; ++I) { 12425 Region = Tree.allocate(Parent); 12426 Elts.push_back(Region); 12427 Visit(*I); 12428 } 12429 12430 // Forget that the initializers are sequenced. 12431 Region = Parent; 12432 for (unsigned I = 0; I < Elts.size(); ++I) 12433 Tree.merge(Elts[I]); 12434 } 12435 12436 void VisitInitListExpr(InitListExpr *ILE) { 12437 if (!SemaRef.getLangOpts().CPlusPlus11) 12438 return VisitExpr(ILE); 12439 12440 // In C++11, list initializations are sequenced. 12441 SmallVector<SequenceTree::Seq, 32> Elts; 12442 SequenceTree::Seq Parent = Region; 12443 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 12444 Expr *E = ILE->getInit(I); 12445 if (!E) continue; 12446 Region = Tree.allocate(Parent); 12447 Elts.push_back(Region); 12448 Visit(E); 12449 } 12450 12451 // Forget that the initializers are sequenced. 12452 Region = Parent; 12453 for (unsigned I = 0; I < Elts.size(); ++I) 12454 Tree.merge(Elts[I]); 12455 } 12456 }; 12457 12458 } // namespace 12459 12460 void Sema::CheckUnsequencedOperations(Expr *E) { 12461 SmallVector<Expr *, 8> WorkList; 12462 WorkList.push_back(E); 12463 while (!WorkList.empty()) { 12464 Expr *Item = WorkList.pop_back_val(); 12465 SequenceChecker(*this, Item, WorkList); 12466 } 12467 } 12468 12469 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 12470 bool IsConstexpr) { 12471 llvm::SaveAndRestore<bool> ConstantContext( 12472 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 12473 CheckImplicitConversions(E, CheckLoc); 12474 if (!E->isInstantiationDependent()) 12475 CheckUnsequencedOperations(E); 12476 if (!IsConstexpr && !E->isValueDependent()) 12477 CheckForIntOverflow(E); 12478 DiagnoseMisalignedMembers(); 12479 } 12480 12481 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 12482 FieldDecl *BitField, 12483 Expr *Init) { 12484 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 12485 } 12486 12487 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 12488 SourceLocation Loc) { 12489 if (!PType->isVariablyModifiedType()) 12490 return; 12491 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 12492 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 12493 return; 12494 } 12495 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 12496 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 12497 return; 12498 } 12499 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 12500 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 12501 return; 12502 } 12503 12504 const ArrayType *AT = S.Context.getAsArrayType(PType); 12505 if (!AT) 12506 return; 12507 12508 if (AT->getSizeModifier() != ArrayType::Star) { 12509 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 12510 return; 12511 } 12512 12513 S.Diag(Loc, diag::err_array_star_in_function_definition); 12514 } 12515 12516 /// CheckParmsForFunctionDef - Check that the parameters of the given 12517 /// function are appropriate for the definition of a function. This 12518 /// takes care of any checks that cannot be performed on the 12519 /// declaration itself, e.g., that the types of each of the function 12520 /// parameters are complete. 12521 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 12522 bool CheckParameterNames) { 12523 bool HasInvalidParm = false; 12524 for (ParmVarDecl *Param : Parameters) { 12525 // C99 6.7.5.3p4: the parameters in a parameter type list in a 12526 // function declarator that is part of a function definition of 12527 // that function shall not have incomplete type. 12528 // 12529 // This is also C++ [dcl.fct]p6. 12530 if (!Param->isInvalidDecl() && 12531 RequireCompleteType(Param->getLocation(), Param->getType(), 12532 diag::err_typecheck_decl_incomplete_type)) { 12533 Param->setInvalidDecl(); 12534 HasInvalidParm = true; 12535 } 12536 12537 // C99 6.9.1p5: If the declarator includes a parameter type list, the 12538 // declaration of each parameter shall include an identifier. 12539 if (CheckParameterNames && 12540 Param->getIdentifier() == nullptr && 12541 !Param->isImplicit() && 12542 !getLangOpts().CPlusPlus) 12543 Diag(Param->getLocation(), diag::err_parameter_name_omitted); 12544 12545 // C99 6.7.5.3p12: 12546 // If the function declarator is not part of a definition of that 12547 // function, parameters may have incomplete type and may use the [*] 12548 // notation in their sequences of declarator specifiers to specify 12549 // variable length array types. 12550 QualType PType = Param->getOriginalType(); 12551 // FIXME: This diagnostic should point the '[*]' if source-location 12552 // information is added for it. 12553 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 12554 12555 // If the parameter is a c++ class type and it has to be destructed in the 12556 // callee function, declare the destructor so that it can be called by the 12557 // callee function. Do not perform any direct access check on the dtor here. 12558 if (!Param->isInvalidDecl()) { 12559 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 12560 if (!ClassDecl->isInvalidDecl() && 12561 !ClassDecl->hasIrrelevantDestructor() && 12562 !ClassDecl->isDependentContext() && 12563 ClassDecl->isParamDestroyedInCallee()) { 12564 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 12565 MarkFunctionReferenced(Param->getLocation(), Destructor); 12566 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 12567 } 12568 } 12569 } 12570 12571 // Parameters with the pass_object_size attribute only need to be marked 12572 // constant at function definitions. Because we lack information about 12573 // whether we're on a declaration or definition when we're instantiating the 12574 // attribute, we need to check for constness here. 12575 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 12576 if (!Param->getType().isConstQualified()) 12577 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 12578 << Attr->getSpelling() << 1; 12579 12580 // Check for parameter names shadowing fields from the class. 12581 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 12582 // The owning context for the parameter should be the function, but we 12583 // want to see if this function's declaration context is a record. 12584 DeclContext *DC = Param->getDeclContext(); 12585 if (DC && DC->isFunctionOrMethod()) { 12586 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 12587 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 12588 RD, /*DeclIsField*/ false); 12589 } 12590 } 12591 } 12592 12593 return HasInvalidParm; 12594 } 12595 12596 /// A helper function to get the alignment of a Decl referred to by DeclRefExpr 12597 /// or MemberExpr. 12598 static CharUnits getDeclAlign(Expr *E, CharUnits TypeAlign, 12599 ASTContext &Context) { 12600 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) 12601 return Context.getDeclAlign(DRE->getDecl()); 12602 12603 if (const auto *ME = dyn_cast<MemberExpr>(E)) 12604 return Context.getDeclAlign(ME->getMemberDecl()); 12605 12606 return TypeAlign; 12607 } 12608 12609 /// CheckCastAlign - Implements -Wcast-align, which warns when a 12610 /// pointer cast increases the alignment requirements. 12611 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 12612 // This is actually a lot of work to potentially be doing on every 12613 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 12614 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 12615 return; 12616 12617 // Ignore dependent types. 12618 if (T->isDependentType() || Op->getType()->isDependentType()) 12619 return; 12620 12621 // Require that the destination be a pointer type. 12622 const PointerType *DestPtr = T->getAs<PointerType>(); 12623 if (!DestPtr) return; 12624 12625 // If the destination has alignment 1, we're done. 12626 QualType DestPointee = DestPtr->getPointeeType(); 12627 if (DestPointee->isIncompleteType()) return; 12628 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 12629 if (DestAlign.isOne()) return; 12630 12631 // Require that the source be a pointer type. 12632 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 12633 if (!SrcPtr) return; 12634 QualType SrcPointee = SrcPtr->getPointeeType(); 12635 12636 // Whitelist casts from cv void*. We already implicitly 12637 // whitelisted casts to cv void*, since they have alignment 1. 12638 // Also whitelist casts involving incomplete types, which implicitly 12639 // includes 'void'. 12640 if (SrcPointee->isIncompleteType()) return; 12641 12642 CharUnits SrcAlign = Context.getTypeAlignInChars(SrcPointee); 12643 12644 if (auto *CE = dyn_cast<CastExpr>(Op)) { 12645 if (CE->getCastKind() == CK_ArrayToPointerDecay) 12646 SrcAlign = getDeclAlign(CE->getSubExpr(), SrcAlign, Context); 12647 } else if (auto *UO = dyn_cast<UnaryOperator>(Op)) { 12648 if (UO->getOpcode() == UO_AddrOf) 12649 SrcAlign = getDeclAlign(UO->getSubExpr(), SrcAlign, Context); 12650 } 12651 12652 if (SrcAlign >= DestAlign) return; 12653 12654 Diag(TRange.getBegin(), diag::warn_cast_align) 12655 << Op->getType() << T 12656 << static_cast<unsigned>(SrcAlign.getQuantity()) 12657 << static_cast<unsigned>(DestAlign.getQuantity()) 12658 << TRange << Op->getSourceRange(); 12659 } 12660 12661 /// Check whether this array fits the idiom of a size-one tail padded 12662 /// array member of a struct. 12663 /// 12664 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 12665 /// commonly used to emulate flexible arrays in C89 code. 12666 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 12667 const NamedDecl *ND) { 12668 if (Size != 1 || !ND) return false; 12669 12670 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 12671 if (!FD) return false; 12672 12673 // Don't consider sizes resulting from macro expansions or template argument 12674 // substitution to form C89 tail-padded arrays. 12675 12676 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 12677 while (TInfo) { 12678 TypeLoc TL = TInfo->getTypeLoc(); 12679 // Look through typedefs. 12680 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 12681 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 12682 TInfo = TDL->getTypeSourceInfo(); 12683 continue; 12684 } 12685 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 12686 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 12687 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 12688 return false; 12689 } 12690 break; 12691 } 12692 12693 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 12694 if (!RD) return false; 12695 if (RD->isUnion()) return false; 12696 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 12697 if (!CRD->isStandardLayout()) return false; 12698 } 12699 12700 // See if this is the last field decl in the record. 12701 const Decl *D = FD; 12702 while ((D = D->getNextDeclInContext())) 12703 if (isa<FieldDecl>(D)) 12704 return false; 12705 return true; 12706 } 12707 12708 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 12709 const ArraySubscriptExpr *ASE, 12710 bool AllowOnePastEnd, bool IndexNegated) { 12711 // Already diagnosed by the constant evaluator. 12712 if (isConstantEvaluated()) 12713 return; 12714 12715 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 12716 if (IndexExpr->isValueDependent()) 12717 return; 12718 12719 const Type *EffectiveType = 12720 BaseExpr->getType()->getPointeeOrArrayElementType(); 12721 BaseExpr = BaseExpr->IgnoreParenCasts(); 12722 const ConstantArrayType *ArrayTy = 12723 Context.getAsConstantArrayType(BaseExpr->getType()); 12724 12725 if (!ArrayTy) 12726 return; 12727 12728 const Type *BaseType = ArrayTy->getElementType().getTypePtr(); 12729 if (EffectiveType->isDependentType() || BaseType->isDependentType()) 12730 return; 12731 12732 Expr::EvalResult Result; 12733 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 12734 return; 12735 12736 llvm::APSInt index = Result.Val.getInt(); 12737 if (IndexNegated) 12738 index = -index; 12739 12740 const NamedDecl *ND = nullptr; 12741 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 12742 ND = DRE->getDecl(); 12743 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 12744 ND = ME->getMemberDecl(); 12745 12746 if (index.isUnsigned() || !index.isNegative()) { 12747 // It is possible that the type of the base expression after 12748 // IgnoreParenCasts is incomplete, even though the type of the base 12749 // expression before IgnoreParenCasts is complete (see PR39746 for an 12750 // example). In this case we have no information about whether the array 12751 // access exceeds the array bounds. However we can still diagnose an array 12752 // access which precedes the array bounds. 12753 if (BaseType->isIncompleteType()) 12754 return; 12755 12756 llvm::APInt size = ArrayTy->getSize(); 12757 if (!size.isStrictlyPositive()) 12758 return; 12759 12760 if (BaseType != EffectiveType) { 12761 // Make sure we're comparing apples to apples when comparing index to size 12762 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 12763 uint64_t array_typesize = Context.getTypeSize(BaseType); 12764 // Handle ptrarith_typesize being zero, such as when casting to void* 12765 if (!ptrarith_typesize) ptrarith_typesize = 1; 12766 if (ptrarith_typesize != array_typesize) { 12767 // There's a cast to a different size type involved 12768 uint64_t ratio = array_typesize / ptrarith_typesize; 12769 // TODO: Be smarter about handling cases where array_typesize is not a 12770 // multiple of ptrarith_typesize 12771 if (ptrarith_typesize * ratio == array_typesize) 12772 size *= llvm::APInt(size.getBitWidth(), ratio); 12773 } 12774 } 12775 12776 if (size.getBitWidth() > index.getBitWidth()) 12777 index = index.zext(size.getBitWidth()); 12778 else if (size.getBitWidth() < index.getBitWidth()) 12779 size = size.zext(index.getBitWidth()); 12780 12781 // For array subscripting the index must be less than size, but for pointer 12782 // arithmetic also allow the index (offset) to be equal to size since 12783 // computing the next address after the end of the array is legal and 12784 // commonly done e.g. in C++ iterators and range-based for loops. 12785 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 12786 return; 12787 12788 // Also don't warn for arrays of size 1 which are members of some 12789 // structure. These are often used to approximate flexible arrays in C89 12790 // code. 12791 if (IsTailPaddedMemberArray(*this, size, ND)) 12792 return; 12793 12794 // Suppress the warning if the subscript expression (as identified by the 12795 // ']' location) and the index expression are both from macro expansions 12796 // within a system header. 12797 if (ASE) { 12798 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 12799 ASE->getRBracketLoc()); 12800 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 12801 SourceLocation IndexLoc = 12802 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 12803 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 12804 return; 12805 } 12806 } 12807 12808 unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds; 12809 if (ASE) 12810 DiagID = diag::warn_array_index_exceeds_bounds; 12811 12812 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 12813 PDiag(DiagID) << index.toString(10, true) 12814 << size.toString(10, true) 12815 << (unsigned)size.getLimitedValue(~0U) 12816 << IndexExpr->getSourceRange()); 12817 } else { 12818 unsigned DiagID = diag::warn_array_index_precedes_bounds; 12819 if (!ASE) { 12820 DiagID = diag::warn_ptr_arith_precedes_bounds; 12821 if (index.isNegative()) index = -index; 12822 } 12823 12824 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 12825 PDiag(DiagID) << index.toString(10, true) 12826 << IndexExpr->getSourceRange()); 12827 } 12828 12829 if (!ND) { 12830 // Try harder to find a NamedDecl to point at in the note. 12831 while (const ArraySubscriptExpr *ASE = 12832 dyn_cast<ArraySubscriptExpr>(BaseExpr)) 12833 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 12834 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 12835 ND = DRE->getDecl(); 12836 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 12837 ND = ME->getMemberDecl(); 12838 } 12839 12840 if (ND) 12841 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 12842 PDiag(diag::note_array_index_out_of_bounds) 12843 << ND->getDeclName()); 12844 } 12845 12846 void Sema::CheckArrayAccess(const Expr *expr) { 12847 int AllowOnePastEnd = 0; 12848 while (expr) { 12849 expr = expr->IgnoreParenImpCasts(); 12850 switch (expr->getStmtClass()) { 12851 case Stmt::ArraySubscriptExprClass: { 12852 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 12853 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 12854 AllowOnePastEnd > 0); 12855 expr = ASE->getBase(); 12856 break; 12857 } 12858 case Stmt::MemberExprClass: { 12859 expr = cast<MemberExpr>(expr)->getBase(); 12860 break; 12861 } 12862 case Stmt::OMPArraySectionExprClass: { 12863 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 12864 if (ASE->getLowerBound()) 12865 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 12866 /*ASE=*/nullptr, AllowOnePastEnd > 0); 12867 return; 12868 } 12869 case Stmt::UnaryOperatorClass: { 12870 // Only unwrap the * and & unary operators 12871 const UnaryOperator *UO = cast<UnaryOperator>(expr); 12872 expr = UO->getSubExpr(); 12873 switch (UO->getOpcode()) { 12874 case UO_AddrOf: 12875 AllowOnePastEnd++; 12876 break; 12877 case UO_Deref: 12878 AllowOnePastEnd--; 12879 break; 12880 default: 12881 return; 12882 } 12883 break; 12884 } 12885 case Stmt::ConditionalOperatorClass: { 12886 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 12887 if (const Expr *lhs = cond->getLHS()) 12888 CheckArrayAccess(lhs); 12889 if (const Expr *rhs = cond->getRHS()) 12890 CheckArrayAccess(rhs); 12891 return; 12892 } 12893 case Stmt::CXXOperatorCallExprClass: { 12894 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 12895 for (const auto *Arg : OCE->arguments()) 12896 CheckArrayAccess(Arg); 12897 return; 12898 } 12899 default: 12900 return; 12901 } 12902 } 12903 } 12904 12905 //===--- CHECK: Objective-C retain cycles ----------------------------------// 12906 12907 namespace { 12908 12909 struct RetainCycleOwner { 12910 VarDecl *Variable = nullptr; 12911 SourceRange Range; 12912 SourceLocation Loc; 12913 bool Indirect = false; 12914 12915 RetainCycleOwner() = default; 12916 12917 void setLocsFrom(Expr *e) { 12918 Loc = e->getExprLoc(); 12919 Range = e->getSourceRange(); 12920 } 12921 }; 12922 12923 } // namespace 12924 12925 /// Consider whether capturing the given variable can possibly lead to 12926 /// a retain cycle. 12927 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 12928 // In ARC, it's captured strongly iff the variable has __strong 12929 // lifetime. In MRR, it's captured strongly if the variable is 12930 // __block and has an appropriate type. 12931 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 12932 return false; 12933 12934 owner.Variable = var; 12935 if (ref) 12936 owner.setLocsFrom(ref); 12937 return true; 12938 } 12939 12940 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 12941 while (true) { 12942 e = e->IgnoreParens(); 12943 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 12944 switch (cast->getCastKind()) { 12945 case CK_BitCast: 12946 case CK_LValueBitCast: 12947 case CK_LValueToRValue: 12948 case CK_ARCReclaimReturnedObject: 12949 e = cast->getSubExpr(); 12950 continue; 12951 12952 default: 12953 return false; 12954 } 12955 } 12956 12957 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 12958 ObjCIvarDecl *ivar = ref->getDecl(); 12959 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 12960 return false; 12961 12962 // Try to find a retain cycle in the base. 12963 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 12964 return false; 12965 12966 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 12967 owner.Indirect = true; 12968 return true; 12969 } 12970 12971 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 12972 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 12973 if (!var) return false; 12974 return considerVariable(var, ref, owner); 12975 } 12976 12977 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 12978 if (member->isArrow()) return false; 12979 12980 // Don't count this as an indirect ownership. 12981 e = member->getBase(); 12982 continue; 12983 } 12984 12985 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 12986 // Only pay attention to pseudo-objects on property references. 12987 ObjCPropertyRefExpr *pre 12988 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 12989 ->IgnoreParens()); 12990 if (!pre) return false; 12991 if (pre->isImplicitProperty()) return false; 12992 ObjCPropertyDecl *property = pre->getExplicitProperty(); 12993 if (!property->isRetaining() && 12994 !(property->getPropertyIvarDecl() && 12995 property->getPropertyIvarDecl()->getType() 12996 .getObjCLifetime() == Qualifiers::OCL_Strong)) 12997 return false; 12998 12999 owner.Indirect = true; 13000 if (pre->isSuperReceiver()) { 13001 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 13002 if (!owner.Variable) 13003 return false; 13004 owner.Loc = pre->getLocation(); 13005 owner.Range = pre->getSourceRange(); 13006 return true; 13007 } 13008 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 13009 ->getSourceExpr()); 13010 continue; 13011 } 13012 13013 // Array ivars? 13014 13015 return false; 13016 } 13017 } 13018 13019 namespace { 13020 13021 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 13022 ASTContext &Context; 13023 VarDecl *Variable; 13024 Expr *Capturer = nullptr; 13025 bool VarWillBeReased = false; 13026 13027 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 13028 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 13029 Context(Context), Variable(variable) {} 13030 13031 void VisitDeclRefExpr(DeclRefExpr *ref) { 13032 if (ref->getDecl() == Variable && !Capturer) 13033 Capturer = ref; 13034 } 13035 13036 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 13037 if (Capturer) return; 13038 Visit(ref->getBase()); 13039 if (Capturer && ref->isFreeIvar()) 13040 Capturer = ref; 13041 } 13042 13043 void VisitBlockExpr(BlockExpr *block) { 13044 // Look inside nested blocks 13045 if (block->getBlockDecl()->capturesVariable(Variable)) 13046 Visit(block->getBlockDecl()->getBody()); 13047 } 13048 13049 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 13050 if (Capturer) return; 13051 if (OVE->getSourceExpr()) 13052 Visit(OVE->getSourceExpr()); 13053 } 13054 13055 void VisitBinaryOperator(BinaryOperator *BinOp) { 13056 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 13057 return; 13058 Expr *LHS = BinOp->getLHS(); 13059 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 13060 if (DRE->getDecl() != Variable) 13061 return; 13062 if (Expr *RHS = BinOp->getRHS()) { 13063 RHS = RHS->IgnoreParenCasts(); 13064 llvm::APSInt Value; 13065 VarWillBeReased = 13066 (RHS && RHS->isIntegerConstantExpr(Value, Context) && Value == 0); 13067 } 13068 } 13069 } 13070 }; 13071 13072 } // namespace 13073 13074 /// Check whether the given argument is a block which captures a 13075 /// variable. 13076 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 13077 assert(owner.Variable && owner.Loc.isValid()); 13078 13079 e = e->IgnoreParenCasts(); 13080 13081 // Look through [^{...} copy] and Block_copy(^{...}). 13082 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 13083 Selector Cmd = ME->getSelector(); 13084 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 13085 e = ME->getInstanceReceiver(); 13086 if (!e) 13087 return nullptr; 13088 e = e->IgnoreParenCasts(); 13089 } 13090 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 13091 if (CE->getNumArgs() == 1) { 13092 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 13093 if (Fn) { 13094 const IdentifierInfo *FnI = Fn->getIdentifier(); 13095 if (FnI && FnI->isStr("_Block_copy")) { 13096 e = CE->getArg(0)->IgnoreParenCasts(); 13097 } 13098 } 13099 } 13100 } 13101 13102 BlockExpr *block = dyn_cast<BlockExpr>(e); 13103 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 13104 return nullptr; 13105 13106 FindCaptureVisitor visitor(S.Context, owner.Variable); 13107 visitor.Visit(block->getBlockDecl()->getBody()); 13108 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 13109 } 13110 13111 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 13112 RetainCycleOwner &owner) { 13113 assert(capturer); 13114 assert(owner.Variable && owner.Loc.isValid()); 13115 13116 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 13117 << owner.Variable << capturer->getSourceRange(); 13118 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 13119 << owner.Indirect << owner.Range; 13120 } 13121 13122 /// Check for a keyword selector that starts with the word 'add' or 13123 /// 'set'. 13124 static bool isSetterLikeSelector(Selector sel) { 13125 if (sel.isUnarySelector()) return false; 13126 13127 StringRef str = sel.getNameForSlot(0); 13128 while (!str.empty() && str.front() == '_') str = str.substr(1); 13129 if (str.startswith("set")) 13130 str = str.substr(3); 13131 else if (str.startswith("add")) { 13132 // Specially whitelist 'addOperationWithBlock:'. 13133 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 13134 return false; 13135 str = str.substr(3); 13136 } 13137 else 13138 return false; 13139 13140 if (str.empty()) return true; 13141 return !isLowercase(str.front()); 13142 } 13143 13144 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 13145 ObjCMessageExpr *Message) { 13146 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 13147 Message->getReceiverInterface(), 13148 NSAPI::ClassId_NSMutableArray); 13149 if (!IsMutableArray) { 13150 return None; 13151 } 13152 13153 Selector Sel = Message->getSelector(); 13154 13155 Optional<NSAPI::NSArrayMethodKind> MKOpt = 13156 S.NSAPIObj->getNSArrayMethodKind(Sel); 13157 if (!MKOpt) { 13158 return None; 13159 } 13160 13161 NSAPI::NSArrayMethodKind MK = *MKOpt; 13162 13163 switch (MK) { 13164 case NSAPI::NSMutableArr_addObject: 13165 case NSAPI::NSMutableArr_insertObjectAtIndex: 13166 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 13167 return 0; 13168 case NSAPI::NSMutableArr_replaceObjectAtIndex: 13169 return 1; 13170 13171 default: 13172 return None; 13173 } 13174 13175 return None; 13176 } 13177 13178 static 13179 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 13180 ObjCMessageExpr *Message) { 13181 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 13182 Message->getReceiverInterface(), 13183 NSAPI::ClassId_NSMutableDictionary); 13184 if (!IsMutableDictionary) { 13185 return None; 13186 } 13187 13188 Selector Sel = Message->getSelector(); 13189 13190 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 13191 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 13192 if (!MKOpt) { 13193 return None; 13194 } 13195 13196 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 13197 13198 switch (MK) { 13199 case NSAPI::NSMutableDict_setObjectForKey: 13200 case NSAPI::NSMutableDict_setValueForKey: 13201 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 13202 return 0; 13203 13204 default: 13205 return None; 13206 } 13207 13208 return None; 13209 } 13210 13211 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 13212 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 13213 Message->getReceiverInterface(), 13214 NSAPI::ClassId_NSMutableSet); 13215 13216 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 13217 Message->getReceiverInterface(), 13218 NSAPI::ClassId_NSMutableOrderedSet); 13219 if (!IsMutableSet && !IsMutableOrderedSet) { 13220 return None; 13221 } 13222 13223 Selector Sel = Message->getSelector(); 13224 13225 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 13226 if (!MKOpt) { 13227 return None; 13228 } 13229 13230 NSAPI::NSSetMethodKind MK = *MKOpt; 13231 13232 switch (MK) { 13233 case NSAPI::NSMutableSet_addObject: 13234 case NSAPI::NSOrderedSet_setObjectAtIndex: 13235 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 13236 case NSAPI::NSOrderedSet_insertObjectAtIndex: 13237 return 0; 13238 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 13239 return 1; 13240 } 13241 13242 return None; 13243 } 13244 13245 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 13246 if (!Message->isInstanceMessage()) { 13247 return; 13248 } 13249 13250 Optional<int> ArgOpt; 13251 13252 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 13253 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 13254 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 13255 return; 13256 } 13257 13258 int ArgIndex = *ArgOpt; 13259 13260 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 13261 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 13262 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 13263 } 13264 13265 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 13266 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 13267 if (ArgRE->isObjCSelfExpr()) { 13268 Diag(Message->getSourceRange().getBegin(), 13269 diag::warn_objc_circular_container) 13270 << ArgRE->getDecl() << StringRef("'super'"); 13271 } 13272 } 13273 } else { 13274 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 13275 13276 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 13277 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 13278 } 13279 13280 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 13281 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 13282 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 13283 ValueDecl *Decl = ReceiverRE->getDecl(); 13284 Diag(Message->getSourceRange().getBegin(), 13285 diag::warn_objc_circular_container) 13286 << Decl << Decl; 13287 if (!ArgRE->isObjCSelfExpr()) { 13288 Diag(Decl->getLocation(), 13289 diag::note_objc_circular_container_declared_here) 13290 << Decl; 13291 } 13292 } 13293 } 13294 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 13295 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 13296 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 13297 ObjCIvarDecl *Decl = IvarRE->getDecl(); 13298 Diag(Message->getSourceRange().getBegin(), 13299 diag::warn_objc_circular_container) 13300 << Decl << Decl; 13301 Diag(Decl->getLocation(), 13302 diag::note_objc_circular_container_declared_here) 13303 << Decl; 13304 } 13305 } 13306 } 13307 } 13308 } 13309 13310 /// Check a message send to see if it's likely to cause a retain cycle. 13311 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 13312 // Only check instance methods whose selector looks like a setter. 13313 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 13314 return; 13315 13316 // Try to find a variable that the receiver is strongly owned by. 13317 RetainCycleOwner owner; 13318 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 13319 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 13320 return; 13321 } else { 13322 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 13323 owner.Variable = getCurMethodDecl()->getSelfDecl(); 13324 owner.Loc = msg->getSuperLoc(); 13325 owner.Range = msg->getSuperLoc(); 13326 } 13327 13328 // Check whether the receiver is captured by any of the arguments. 13329 const ObjCMethodDecl *MD = msg->getMethodDecl(); 13330 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 13331 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 13332 // noescape blocks should not be retained by the method. 13333 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 13334 continue; 13335 return diagnoseRetainCycle(*this, capturer, owner); 13336 } 13337 } 13338 } 13339 13340 /// Check a property assign to see if it's likely to cause a retain cycle. 13341 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 13342 RetainCycleOwner owner; 13343 if (!findRetainCycleOwner(*this, receiver, owner)) 13344 return; 13345 13346 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 13347 diagnoseRetainCycle(*this, capturer, owner); 13348 } 13349 13350 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 13351 RetainCycleOwner Owner; 13352 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 13353 return; 13354 13355 // Because we don't have an expression for the variable, we have to set the 13356 // location explicitly here. 13357 Owner.Loc = Var->getLocation(); 13358 Owner.Range = Var->getSourceRange(); 13359 13360 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 13361 diagnoseRetainCycle(*this, Capturer, Owner); 13362 } 13363 13364 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 13365 Expr *RHS, bool isProperty) { 13366 // Check if RHS is an Objective-C object literal, which also can get 13367 // immediately zapped in a weak reference. Note that we explicitly 13368 // allow ObjCStringLiterals, since those are designed to never really die. 13369 RHS = RHS->IgnoreParenImpCasts(); 13370 13371 // This enum needs to match with the 'select' in 13372 // warn_objc_arc_literal_assign (off-by-1). 13373 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 13374 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 13375 return false; 13376 13377 S.Diag(Loc, diag::warn_arc_literal_assign) 13378 << (unsigned) Kind 13379 << (isProperty ? 0 : 1) 13380 << RHS->getSourceRange(); 13381 13382 return true; 13383 } 13384 13385 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 13386 Qualifiers::ObjCLifetime LT, 13387 Expr *RHS, bool isProperty) { 13388 // Strip off any implicit cast added to get to the one ARC-specific. 13389 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 13390 if (cast->getCastKind() == CK_ARCConsumeObject) { 13391 S.Diag(Loc, diag::warn_arc_retained_assign) 13392 << (LT == Qualifiers::OCL_ExplicitNone) 13393 << (isProperty ? 0 : 1) 13394 << RHS->getSourceRange(); 13395 return true; 13396 } 13397 RHS = cast->getSubExpr(); 13398 } 13399 13400 if (LT == Qualifiers::OCL_Weak && 13401 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 13402 return true; 13403 13404 return false; 13405 } 13406 13407 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 13408 QualType LHS, Expr *RHS) { 13409 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 13410 13411 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 13412 return false; 13413 13414 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 13415 return true; 13416 13417 return false; 13418 } 13419 13420 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 13421 Expr *LHS, Expr *RHS) { 13422 QualType LHSType; 13423 // PropertyRef on LHS type need be directly obtained from 13424 // its declaration as it has a PseudoType. 13425 ObjCPropertyRefExpr *PRE 13426 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 13427 if (PRE && !PRE->isImplicitProperty()) { 13428 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 13429 if (PD) 13430 LHSType = PD->getType(); 13431 } 13432 13433 if (LHSType.isNull()) 13434 LHSType = LHS->getType(); 13435 13436 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 13437 13438 if (LT == Qualifiers::OCL_Weak) { 13439 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 13440 getCurFunction()->markSafeWeakUse(LHS); 13441 } 13442 13443 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 13444 return; 13445 13446 // FIXME. Check for other life times. 13447 if (LT != Qualifiers::OCL_None) 13448 return; 13449 13450 if (PRE) { 13451 if (PRE->isImplicitProperty()) 13452 return; 13453 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 13454 if (!PD) 13455 return; 13456 13457 unsigned Attributes = PD->getPropertyAttributes(); 13458 if (Attributes & ObjCPropertyDecl::OBJC_PR_assign) { 13459 // when 'assign' attribute was not explicitly specified 13460 // by user, ignore it and rely on property type itself 13461 // for lifetime info. 13462 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 13463 if (!(AsWrittenAttr & ObjCPropertyDecl::OBJC_PR_assign) && 13464 LHSType->isObjCRetainableType()) 13465 return; 13466 13467 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 13468 if (cast->getCastKind() == CK_ARCConsumeObject) { 13469 Diag(Loc, diag::warn_arc_retained_property_assign) 13470 << RHS->getSourceRange(); 13471 return; 13472 } 13473 RHS = cast->getSubExpr(); 13474 } 13475 } 13476 else if (Attributes & ObjCPropertyDecl::OBJC_PR_weak) { 13477 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 13478 return; 13479 } 13480 } 13481 } 13482 13483 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 13484 13485 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 13486 SourceLocation StmtLoc, 13487 const NullStmt *Body) { 13488 // Do not warn if the body is a macro that expands to nothing, e.g: 13489 // 13490 // #define CALL(x) 13491 // if (condition) 13492 // CALL(0); 13493 if (Body->hasLeadingEmptyMacro()) 13494 return false; 13495 13496 // Get line numbers of statement and body. 13497 bool StmtLineInvalid; 13498 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 13499 &StmtLineInvalid); 13500 if (StmtLineInvalid) 13501 return false; 13502 13503 bool BodyLineInvalid; 13504 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 13505 &BodyLineInvalid); 13506 if (BodyLineInvalid) 13507 return false; 13508 13509 // Warn if null statement and body are on the same line. 13510 if (StmtLine != BodyLine) 13511 return false; 13512 13513 return true; 13514 } 13515 13516 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 13517 const Stmt *Body, 13518 unsigned DiagID) { 13519 // Since this is a syntactic check, don't emit diagnostic for template 13520 // instantiations, this just adds noise. 13521 if (CurrentInstantiationScope) 13522 return; 13523 13524 // The body should be a null statement. 13525 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 13526 if (!NBody) 13527 return; 13528 13529 // Do the usual checks. 13530 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 13531 return; 13532 13533 Diag(NBody->getSemiLoc(), DiagID); 13534 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 13535 } 13536 13537 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 13538 const Stmt *PossibleBody) { 13539 assert(!CurrentInstantiationScope); // Ensured by caller 13540 13541 SourceLocation StmtLoc; 13542 const Stmt *Body; 13543 unsigned DiagID; 13544 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 13545 StmtLoc = FS->getRParenLoc(); 13546 Body = FS->getBody(); 13547 DiagID = diag::warn_empty_for_body; 13548 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 13549 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 13550 Body = WS->getBody(); 13551 DiagID = diag::warn_empty_while_body; 13552 } else 13553 return; // Neither `for' nor `while'. 13554 13555 // The body should be a null statement. 13556 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 13557 if (!NBody) 13558 return; 13559 13560 // Skip expensive checks if diagnostic is disabled. 13561 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 13562 return; 13563 13564 // Do the usual checks. 13565 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 13566 return; 13567 13568 // `for(...);' and `while(...);' are popular idioms, so in order to keep 13569 // noise level low, emit diagnostics only if for/while is followed by a 13570 // CompoundStmt, e.g.: 13571 // for (int i = 0; i < n; i++); 13572 // { 13573 // a(i); 13574 // } 13575 // or if for/while is followed by a statement with more indentation 13576 // than for/while itself: 13577 // for (int i = 0; i < n; i++); 13578 // a(i); 13579 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 13580 if (!ProbableTypo) { 13581 bool BodyColInvalid; 13582 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 13583 PossibleBody->getBeginLoc(), &BodyColInvalid); 13584 if (BodyColInvalid) 13585 return; 13586 13587 bool StmtColInvalid; 13588 unsigned StmtCol = 13589 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 13590 if (StmtColInvalid) 13591 return; 13592 13593 if (BodyCol > StmtCol) 13594 ProbableTypo = true; 13595 } 13596 13597 if (ProbableTypo) { 13598 Diag(NBody->getSemiLoc(), DiagID); 13599 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 13600 } 13601 } 13602 13603 //===--- CHECK: Warn on self move with std::move. -------------------------===// 13604 13605 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 13606 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 13607 SourceLocation OpLoc) { 13608 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 13609 return; 13610 13611 if (inTemplateInstantiation()) 13612 return; 13613 13614 // Strip parens and casts away. 13615 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 13616 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 13617 13618 // Check for a call expression 13619 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 13620 if (!CE || CE->getNumArgs() != 1) 13621 return; 13622 13623 // Check for a call to std::move 13624 if (!CE->isCallToStdMove()) 13625 return; 13626 13627 // Get argument from std::move 13628 RHSExpr = CE->getArg(0); 13629 13630 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 13631 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 13632 13633 // Two DeclRefExpr's, check that the decls are the same. 13634 if (LHSDeclRef && RHSDeclRef) { 13635 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 13636 return; 13637 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 13638 RHSDeclRef->getDecl()->getCanonicalDecl()) 13639 return; 13640 13641 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 13642 << LHSExpr->getSourceRange() 13643 << RHSExpr->getSourceRange(); 13644 return; 13645 } 13646 13647 // Member variables require a different approach to check for self moves. 13648 // MemberExpr's are the same if every nested MemberExpr refers to the same 13649 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 13650 // the base Expr's are CXXThisExpr's. 13651 const Expr *LHSBase = LHSExpr; 13652 const Expr *RHSBase = RHSExpr; 13653 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 13654 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 13655 if (!LHSME || !RHSME) 13656 return; 13657 13658 while (LHSME && RHSME) { 13659 if (LHSME->getMemberDecl()->getCanonicalDecl() != 13660 RHSME->getMemberDecl()->getCanonicalDecl()) 13661 return; 13662 13663 LHSBase = LHSME->getBase(); 13664 RHSBase = RHSME->getBase(); 13665 LHSME = dyn_cast<MemberExpr>(LHSBase); 13666 RHSME = dyn_cast<MemberExpr>(RHSBase); 13667 } 13668 13669 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 13670 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 13671 if (LHSDeclRef && RHSDeclRef) { 13672 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 13673 return; 13674 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 13675 RHSDeclRef->getDecl()->getCanonicalDecl()) 13676 return; 13677 13678 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 13679 << LHSExpr->getSourceRange() 13680 << RHSExpr->getSourceRange(); 13681 return; 13682 } 13683 13684 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 13685 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 13686 << LHSExpr->getSourceRange() 13687 << RHSExpr->getSourceRange(); 13688 } 13689 13690 //===--- Layout compatibility ----------------------------------------------// 13691 13692 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 13693 13694 /// Check if two enumeration types are layout-compatible. 13695 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 13696 // C++11 [dcl.enum] p8: 13697 // Two enumeration types are layout-compatible if they have the same 13698 // underlying type. 13699 return ED1->isComplete() && ED2->isComplete() && 13700 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 13701 } 13702 13703 /// Check if two fields are layout-compatible. 13704 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 13705 FieldDecl *Field2) { 13706 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 13707 return false; 13708 13709 if (Field1->isBitField() != Field2->isBitField()) 13710 return false; 13711 13712 if (Field1->isBitField()) { 13713 // Make sure that the bit-fields are the same length. 13714 unsigned Bits1 = Field1->getBitWidthValue(C); 13715 unsigned Bits2 = Field2->getBitWidthValue(C); 13716 13717 if (Bits1 != Bits2) 13718 return false; 13719 } 13720 13721 return true; 13722 } 13723 13724 /// Check if two standard-layout structs are layout-compatible. 13725 /// (C++11 [class.mem] p17) 13726 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 13727 RecordDecl *RD2) { 13728 // If both records are C++ classes, check that base classes match. 13729 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 13730 // If one of records is a CXXRecordDecl we are in C++ mode, 13731 // thus the other one is a CXXRecordDecl, too. 13732 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 13733 // Check number of base classes. 13734 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 13735 return false; 13736 13737 // Check the base classes. 13738 for (CXXRecordDecl::base_class_const_iterator 13739 Base1 = D1CXX->bases_begin(), 13740 BaseEnd1 = D1CXX->bases_end(), 13741 Base2 = D2CXX->bases_begin(); 13742 Base1 != BaseEnd1; 13743 ++Base1, ++Base2) { 13744 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 13745 return false; 13746 } 13747 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 13748 // If only RD2 is a C++ class, it should have zero base classes. 13749 if (D2CXX->getNumBases() > 0) 13750 return false; 13751 } 13752 13753 // Check the fields. 13754 RecordDecl::field_iterator Field2 = RD2->field_begin(), 13755 Field2End = RD2->field_end(), 13756 Field1 = RD1->field_begin(), 13757 Field1End = RD1->field_end(); 13758 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 13759 if (!isLayoutCompatible(C, *Field1, *Field2)) 13760 return false; 13761 } 13762 if (Field1 != Field1End || Field2 != Field2End) 13763 return false; 13764 13765 return true; 13766 } 13767 13768 /// Check if two standard-layout unions are layout-compatible. 13769 /// (C++11 [class.mem] p18) 13770 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 13771 RecordDecl *RD2) { 13772 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 13773 for (auto *Field2 : RD2->fields()) 13774 UnmatchedFields.insert(Field2); 13775 13776 for (auto *Field1 : RD1->fields()) { 13777 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 13778 I = UnmatchedFields.begin(), 13779 E = UnmatchedFields.end(); 13780 13781 for ( ; I != E; ++I) { 13782 if (isLayoutCompatible(C, Field1, *I)) { 13783 bool Result = UnmatchedFields.erase(*I); 13784 (void) Result; 13785 assert(Result); 13786 break; 13787 } 13788 } 13789 if (I == E) 13790 return false; 13791 } 13792 13793 return UnmatchedFields.empty(); 13794 } 13795 13796 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 13797 RecordDecl *RD2) { 13798 if (RD1->isUnion() != RD2->isUnion()) 13799 return false; 13800 13801 if (RD1->isUnion()) 13802 return isLayoutCompatibleUnion(C, RD1, RD2); 13803 else 13804 return isLayoutCompatibleStruct(C, RD1, RD2); 13805 } 13806 13807 /// Check if two types are layout-compatible in C++11 sense. 13808 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 13809 if (T1.isNull() || T2.isNull()) 13810 return false; 13811 13812 // C++11 [basic.types] p11: 13813 // If two types T1 and T2 are the same type, then T1 and T2 are 13814 // layout-compatible types. 13815 if (C.hasSameType(T1, T2)) 13816 return true; 13817 13818 T1 = T1.getCanonicalType().getUnqualifiedType(); 13819 T2 = T2.getCanonicalType().getUnqualifiedType(); 13820 13821 const Type::TypeClass TC1 = T1->getTypeClass(); 13822 const Type::TypeClass TC2 = T2->getTypeClass(); 13823 13824 if (TC1 != TC2) 13825 return false; 13826 13827 if (TC1 == Type::Enum) { 13828 return isLayoutCompatible(C, 13829 cast<EnumType>(T1)->getDecl(), 13830 cast<EnumType>(T2)->getDecl()); 13831 } else if (TC1 == Type::Record) { 13832 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 13833 return false; 13834 13835 return isLayoutCompatible(C, 13836 cast<RecordType>(T1)->getDecl(), 13837 cast<RecordType>(T2)->getDecl()); 13838 } 13839 13840 return false; 13841 } 13842 13843 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 13844 13845 /// Given a type tag expression find the type tag itself. 13846 /// 13847 /// \param TypeExpr Type tag expression, as it appears in user's code. 13848 /// 13849 /// \param VD Declaration of an identifier that appears in a type tag. 13850 /// 13851 /// \param MagicValue Type tag magic value. 13852 /// 13853 /// \param isConstantEvaluated wether the evalaution should be performed in 13854 13855 /// constant context. 13856 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 13857 const ValueDecl **VD, uint64_t *MagicValue, 13858 bool isConstantEvaluated) { 13859 while(true) { 13860 if (!TypeExpr) 13861 return false; 13862 13863 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 13864 13865 switch (TypeExpr->getStmtClass()) { 13866 case Stmt::UnaryOperatorClass: { 13867 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 13868 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 13869 TypeExpr = UO->getSubExpr(); 13870 continue; 13871 } 13872 return false; 13873 } 13874 13875 case Stmt::DeclRefExprClass: { 13876 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 13877 *VD = DRE->getDecl(); 13878 return true; 13879 } 13880 13881 case Stmt::IntegerLiteralClass: { 13882 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 13883 llvm::APInt MagicValueAPInt = IL->getValue(); 13884 if (MagicValueAPInt.getActiveBits() <= 64) { 13885 *MagicValue = MagicValueAPInt.getZExtValue(); 13886 return true; 13887 } else 13888 return false; 13889 } 13890 13891 case Stmt::BinaryConditionalOperatorClass: 13892 case Stmt::ConditionalOperatorClass: { 13893 const AbstractConditionalOperator *ACO = 13894 cast<AbstractConditionalOperator>(TypeExpr); 13895 bool Result; 13896 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 13897 isConstantEvaluated)) { 13898 if (Result) 13899 TypeExpr = ACO->getTrueExpr(); 13900 else 13901 TypeExpr = ACO->getFalseExpr(); 13902 continue; 13903 } 13904 return false; 13905 } 13906 13907 case Stmt::BinaryOperatorClass: { 13908 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 13909 if (BO->getOpcode() == BO_Comma) { 13910 TypeExpr = BO->getRHS(); 13911 continue; 13912 } 13913 return false; 13914 } 13915 13916 default: 13917 return false; 13918 } 13919 } 13920 } 13921 13922 /// Retrieve the C type corresponding to type tag TypeExpr. 13923 /// 13924 /// \param TypeExpr Expression that specifies a type tag. 13925 /// 13926 /// \param MagicValues Registered magic values. 13927 /// 13928 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 13929 /// kind. 13930 /// 13931 /// \param TypeInfo Information about the corresponding C type. 13932 /// 13933 /// \param isConstantEvaluated wether the evalaution should be performed in 13934 /// constant context. 13935 /// 13936 /// \returns true if the corresponding C type was found. 13937 static bool GetMatchingCType( 13938 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 13939 const ASTContext &Ctx, 13940 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 13941 *MagicValues, 13942 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 13943 bool isConstantEvaluated) { 13944 FoundWrongKind = false; 13945 13946 // Variable declaration that has type_tag_for_datatype attribute. 13947 const ValueDecl *VD = nullptr; 13948 13949 uint64_t MagicValue; 13950 13951 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 13952 return false; 13953 13954 if (VD) { 13955 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 13956 if (I->getArgumentKind() != ArgumentKind) { 13957 FoundWrongKind = true; 13958 return false; 13959 } 13960 TypeInfo.Type = I->getMatchingCType(); 13961 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 13962 TypeInfo.MustBeNull = I->getMustBeNull(); 13963 return true; 13964 } 13965 return false; 13966 } 13967 13968 if (!MagicValues) 13969 return false; 13970 13971 llvm::DenseMap<Sema::TypeTagMagicValue, 13972 Sema::TypeTagData>::const_iterator I = 13973 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 13974 if (I == MagicValues->end()) 13975 return false; 13976 13977 TypeInfo = I->second; 13978 return true; 13979 } 13980 13981 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 13982 uint64_t MagicValue, QualType Type, 13983 bool LayoutCompatible, 13984 bool MustBeNull) { 13985 if (!TypeTagForDatatypeMagicValues) 13986 TypeTagForDatatypeMagicValues.reset( 13987 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 13988 13989 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 13990 (*TypeTagForDatatypeMagicValues)[Magic] = 13991 TypeTagData(Type, LayoutCompatible, MustBeNull); 13992 } 13993 13994 static bool IsSameCharType(QualType T1, QualType T2) { 13995 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 13996 if (!BT1) 13997 return false; 13998 13999 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 14000 if (!BT2) 14001 return false; 14002 14003 BuiltinType::Kind T1Kind = BT1->getKind(); 14004 BuiltinType::Kind T2Kind = BT2->getKind(); 14005 14006 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 14007 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 14008 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 14009 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 14010 } 14011 14012 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 14013 const ArrayRef<const Expr *> ExprArgs, 14014 SourceLocation CallSiteLoc) { 14015 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 14016 bool IsPointerAttr = Attr->getIsPointer(); 14017 14018 // Retrieve the argument representing the 'type_tag'. 14019 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 14020 if (TypeTagIdxAST >= ExprArgs.size()) { 14021 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 14022 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 14023 return; 14024 } 14025 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 14026 bool FoundWrongKind; 14027 TypeTagData TypeInfo; 14028 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 14029 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 14030 TypeInfo, isConstantEvaluated())) { 14031 if (FoundWrongKind) 14032 Diag(TypeTagExpr->getExprLoc(), 14033 diag::warn_type_tag_for_datatype_wrong_kind) 14034 << TypeTagExpr->getSourceRange(); 14035 return; 14036 } 14037 14038 // Retrieve the argument representing the 'arg_idx'. 14039 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 14040 if (ArgumentIdxAST >= ExprArgs.size()) { 14041 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 14042 << 1 << Attr->getArgumentIdx().getSourceIndex(); 14043 return; 14044 } 14045 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 14046 if (IsPointerAttr) { 14047 // Skip implicit cast of pointer to `void *' (as a function argument). 14048 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 14049 if (ICE->getType()->isVoidPointerType() && 14050 ICE->getCastKind() == CK_BitCast) 14051 ArgumentExpr = ICE->getSubExpr(); 14052 } 14053 QualType ArgumentType = ArgumentExpr->getType(); 14054 14055 // Passing a `void*' pointer shouldn't trigger a warning. 14056 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 14057 return; 14058 14059 if (TypeInfo.MustBeNull) { 14060 // Type tag with matching void type requires a null pointer. 14061 if (!ArgumentExpr->isNullPointerConstant(Context, 14062 Expr::NPC_ValueDependentIsNotNull)) { 14063 Diag(ArgumentExpr->getExprLoc(), 14064 diag::warn_type_safety_null_pointer_required) 14065 << ArgumentKind->getName() 14066 << ArgumentExpr->getSourceRange() 14067 << TypeTagExpr->getSourceRange(); 14068 } 14069 return; 14070 } 14071 14072 QualType RequiredType = TypeInfo.Type; 14073 if (IsPointerAttr) 14074 RequiredType = Context.getPointerType(RequiredType); 14075 14076 bool mismatch = false; 14077 if (!TypeInfo.LayoutCompatible) { 14078 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 14079 14080 // C++11 [basic.fundamental] p1: 14081 // Plain char, signed char, and unsigned char are three distinct types. 14082 // 14083 // But we treat plain `char' as equivalent to `signed char' or `unsigned 14084 // char' depending on the current char signedness mode. 14085 if (mismatch) 14086 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 14087 RequiredType->getPointeeType())) || 14088 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 14089 mismatch = false; 14090 } else 14091 if (IsPointerAttr) 14092 mismatch = !isLayoutCompatible(Context, 14093 ArgumentType->getPointeeType(), 14094 RequiredType->getPointeeType()); 14095 else 14096 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 14097 14098 if (mismatch) 14099 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 14100 << ArgumentType << ArgumentKind 14101 << TypeInfo.LayoutCompatible << RequiredType 14102 << ArgumentExpr->getSourceRange() 14103 << TypeTagExpr->getSourceRange(); 14104 } 14105 14106 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 14107 CharUnits Alignment) { 14108 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 14109 } 14110 14111 void Sema::DiagnoseMisalignedMembers() { 14112 for (MisalignedMember &m : MisalignedMembers) { 14113 const NamedDecl *ND = m.RD; 14114 if (ND->getName().empty()) { 14115 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 14116 ND = TD; 14117 } 14118 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 14119 << m.MD << ND << m.E->getSourceRange(); 14120 } 14121 MisalignedMembers.clear(); 14122 } 14123 14124 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 14125 E = E->IgnoreParens(); 14126 if (!T->isPointerType() && !T->isIntegerType()) 14127 return; 14128 if (isa<UnaryOperator>(E) && 14129 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 14130 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 14131 if (isa<MemberExpr>(Op)) { 14132 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 14133 if (MA != MisalignedMembers.end() && 14134 (T->isIntegerType() || 14135 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 14136 Context.getTypeAlignInChars( 14137 T->getPointeeType()) <= MA->Alignment)))) 14138 MisalignedMembers.erase(MA); 14139 } 14140 } 14141 } 14142 14143 void Sema::RefersToMemberWithReducedAlignment( 14144 Expr *E, 14145 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 14146 Action) { 14147 const auto *ME = dyn_cast<MemberExpr>(E); 14148 if (!ME) 14149 return; 14150 14151 // No need to check expressions with an __unaligned-qualified type. 14152 if (E->getType().getQualifiers().hasUnaligned()) 14153 return; 14154 14155 // For a chain of MemberExpr like "a.b.c.d" this list 14156 // will keep FieldDecl's like [d, c, b]. 14157 SmallVector<FieldDecl *, 4> ReverseMemberChain; 14158 const MemberExpr *TopME = nullptr; 14159 bool AnyIsPacked = false; 14160 do { 14161 QualType BaseType = ME->getBase()->getType(); 14162 if (ME->isArrow()) 14163 BaseType = BaseType->getPointeeType(); 14164 RecordDecl *RD = BaseType->getAs<RecordType>()->getDecl(); 14165 if (RD->isInvalidDecl()) 14166 return; 14167 14168 ValueDecl *MD = ME->getMemberDecl(); 14169 auto *FD = dyn_cast<FieldDecl>(MD); 14170 // We do not care about non-data members. 14171 if (!FD || FD->isInvalidDecl()) 14172 return; 14173 14174 AnyIsPacked = 14175 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 14176 ReverseMemberChain.push_back(FD); 14177 14178 TopME = ME; 14179 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 14180 } while (ME); 14181 assert(TopME && "We did not compute a topmost MemberExpr!"); 14182 14183 // Not the scope of this diagnostic. 14184 if (!AnyIsPacked) 14185 return; 14186 14187 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 14188 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 14189 // TODO: The innermost base of the member expression may be too complicated. 14190 // For now, just disregard these cases. This is left for future 14191 // improvement. 14192 if (!DRE && !isa<CXXThisExpr>(TopBase)) 14193 return; 14194 14195 // Alignment expected by the whole expression. 14196 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 14197 14198 // No need to do anything else with this case. 14199 if (ExpectedAlignment.isOne()) 14200 return; 14201 14202 // Synthesize offset of the whole access. 14203 CharUnits Offset; 14204 for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend(); 14205 I++) { 14206 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I)); 14207 } 14208 14209 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 14210 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 14211 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 14212 14213 // The base expression of the innermost MemberExpr may give 14214 // stronger guarantees than the class containing the member. 14215 if (DRE && !TopME->isArrow()) { 14216 const ValueDecl *VD = DRE->getDecl(); 14217 if (!VD->getType()->isReferenceType()) 14218 CompleteObjectAlignment = 14219 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 14220 } 14221 14222 // Check if the synthesized offset fulfills the alignment. 14223 if (Offset % ExpectedAlignment != 0 || 14224 // It may fulfill the offset it but the effective alignment may still be 14225 // lower than the expected expression alignment. 14226 CompleteObjectAlignment < ExpectedAlignment) { 14227 // If this happens, we want to determine a sensible culprit of this. 14228 // Intuitively, watching the chain of member expressions from right to 14229 // left, we start with the required alignment (as required by the field 14230 // type) but some packed attribute in that chain has reduced the alignment. 14231 // It may happen that another packed structure increases it again. But if 14232 // we are here such increase has not been enough. So pointing the first 14233 // FieldDecl that either is packed or else its RecordDecl is, 14234 // seems reasonable. 14235 FieldDecl *FD = nullptr; 14236 CharUnits Alignment; 14237 for (FieldDecl *FDI : ReverseMemberChain) { 14238 if (FDI->hasAttr<PackedAttr>() || 14239 FDI->getParent()->hasAttr<PackedAttr>()) { 14240 FD = FDI; 14241 Alignment = std::min( 14242 Context.getTypeAlignInChars(FD->getType()), 14243 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 14244 break; 14245 } 14246 } 14247 assert(FD && "We did not find a packed FieldDecl!"); 14248 Action(E, FD->getParent(), FD, Alignment); 14249 } 14250 } 14251 14252 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 14253 using namespace std::placeholders; 14254 14255 RefersToMemberWithReducedAlignment( 14256 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 14257 _2, _3, _4)); 14258 } 14259