1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements extra semantic analysis beyond what is enforced 11 // by the C type system. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "clang/AST/APValue.h" 16 #include "clang/AST/ASTContext.h" 17 #include "clang/AST/Attr.h" 18 #include "clang/AST/AttrIterator.h" 19 #include "clang/AST/CharUnits.h" 20 #include "clang/AST/Decl.h" 21 #include "clang/AST/DeclBase.h" 22 #include "clang/AST/DeclCXX.h" 23 #include "clang/AST/DeclObjC.h" 24 #include "clang/AST/DeclarationName.h" 25 #include "clang/AST/EvaluatedExprVisitor.h" 26 #include "clang/AST/Expr.h" 27 #include "clang/AST/ExprCXX.h" 28 #include "clang/AST/ExprObjC.h" 29 #include "clang/AST/ExprOpenMP.h" 30 #include "clang/AST/FormatString.h" 31 #include "clang/AST/NSAPI.h" 32 #include "clang/AST/NonTrivialTypeVisitor.h" 33 #include "clang/AST/OperationKinds.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSwitch.h" 79 #include "llvm/ADT/Triple.h" 80 #include "llvm/Support/AtomicOrdering.h" 81 #include "llvm/Support/Casting.h" 82 #include "llvm/Support/Compiler.h" 83 #include "llvm/Support/ConvertUTF.h" 84 #include "llvm/Support/ErrorHandling.h" 85 #include "llvm/Support/Format.h" 86 #include "llvm/Support/Locale.h" 87 #include "llvm/Support/MathExtras.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include <algorithm> 90 #include <cassert> 91 #include <cstddef> 92 #include <cstdint> 93 #include <functional> 94 #include <limits> 95 #include <string> 96 #include <tuple> 97 #include <utility> 98 99 using namespace clang; 100 using namespace sema; 101 102 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 103 unsigned ByteNo) const { 104 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 105 Context.getTargetInfo()); 106 } 107 108 /// Checks that a call expression's argument count is the desired number. 109 /// This is useful when doing custom type-checking. Returns true on error. 110 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 111 unsigned argCount = call->getNumArgs(); 112 if (argCount == desiredArgCount) return false; 113 114 if (argCount < desiredArgCount) 115 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 116 << 0 /*function call*/ << desiredArgCount << argCount 117 << call->getSourceRange(); 118 119 // Highlight all the excess arguments. 120 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 121 call->getArg(argCount - 1)->getEndLoc()); 122 123 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 124 << 0 /*function call*/ << desiredArgCount << argCount 125 << call->getArg(1)->getSourceRange(); 126 } 127 128 /// Check that the first argument to __builtin_annotation is an integer 129 /// and the second argument is a non-wide string literal. 130 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 131 if (checkArgCount(S, TheCall, 2)) 132 return true; 133 134 // First argument should be an integer. 135 Expr *ValArg = TheCall->getArg(0); 136 QualType Ty = ValArg->getType(); 137 if (!Ty->isIntegerType()) { 138 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 139 << ValArg->getSourceRange(); 140 return true; 141 } 142 143 // Second argument should be a constant string. 144 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 145 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 146 if (!Literal || !Literal->isAscii()) { 147 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 148 << StrArg->getSourceRange(); 149 return true; 150 } 151 152 TheCall->setType(Ty); 153 return false; 154 } 155 156 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 157 // We need at least one argument. 158 if (TheCall->getNumArgs() < 1) { 159 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 160 << 0 << 1 << TheCall->getNumArgs() 161 << TheCall->getCallee()->getSourceRange(); 162 return true; 163 } 164 165 // All arguments should be wide string literals. 166 for (Expr *Arg : TheCall->arguments()) { 167 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 168 if (!Literal || !Literal->isWide()) { 169 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 170 << Arg->getSourceRange(); 171 return true; 172 } 173 } 174 175 return false; 176 } 177 178 /// Check that the argument to __builtin_addressof is a glvalue, and set the 179 /// result type to the corresponding pointer type. 180 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 181 if (checkArgCount(S, TheCall, 1)) 182 return true; 183 184 ExprResult Arg(TheCall->getArg(0)); 185 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 186 if (ResultType.isNull()) 187 return true; 188 189 TheCall->setArg(0, Arg.get()); 190 TheCall->setType(ResultType); 191 return false; 192 } 193 194 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) { 195 if (checkArgCount(S, TheCall, 3)) 196 return true; 197 198 // First two arguments should be integers. 199 for (unsigned I = 0; I < 2; ++I) { 200 ExprResult Arg = TheCall->getArg(I); 201 QualType Ty = Arg.get()->getType(); 202 if (!Ty->isIntegerType()) { 203 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 204 << Ty << Arg.get()->getSourceRange(); 205 return true; 206 } 207 InitializedEntity Entity = InitializedEntity::InitializeParameter( 208 S.getASTContext(), Ty, /*consume*/ false); 209 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 210 if (Arg.isInvalid()) 211 return true; 212 TheCall->setArg(I, Arg.get()); 213 } 214 215 // Third argument should be a pointer to a non-const integer. 216 // IRGen correctly handles volatile, restrict, and address spaces, and 217 // the other qualifiers aren't possible. 218 { 219 ExprResult Arg = TheCall->getArg(2); 220 QualType Ty = Arg.get()->getType(); 221 const auto *PtrTy = Ty->getAs<PointerType>(); 222 if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() && 223 !PtrTy->getPointeeType().isConstQualified())) { 224 S.Diag(Arg.get()->getBeginLoc(), 225 diag::err_overflow_builtin_must_be_ptr_int) 226 << Ty << Arg.get()->getSourceRange(); 227 return true; 228 } 229 InitializedEntity Entity = InitializedEntity::InitializeParameter( 230 S.getASTContext(), Ty, /*consume*/ false); 231 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 232 if (Arg.isInvalid()) 233 return true; 234 TheCall->setArg(2, Arg.get()); 235 } 236 return false; 237 } 238 239 static void SemaBuiltinMemChkCall(Sema &S, FunctionDecl *FDecl, 240 CallExpr *TheCall, unsigned SizeIdx, 241 unsigned DstSizeIdx, 242 StringRef LikelyMacroName) { 243 if (TheCall->getNumArgs() <= SizeIdx || 244 TheCall->getNumArgs() <= DstSizeIdx) 245 return; 246 247 const Expr *SizeArg = TheCall->getArg(SizeIdx); 248 const Expr *DstSizeArg = TheCall->getArg(DstSizeIdx); 249 250 llvm::APSInt Size, DstSize; 251 252 // find out if both sizes are known at compile time 253 if (!SizeArg->EvaluateAsInt(Size, S.Context) || 254 !DstSizeArg->EvaluateAsInt(DstSize, S.Context)) 255 return; 256 257 if (Size.ule(DstSize)) 258 return; 259 260 // Confirmed overflow, so generate the diagnostic. 261 StringRef FunctionName = FDecl->getName(); 262 SourceLocation SL = TheCall->getBeginLoc(); 263 SourceManager &SM = S.getSourceManager(); 264 // If we're in an expansion of a macro whose name corresponds to this builtin, 265 // use the simple macro name and location. 266 if (SL.isMacroID() && Lexer::getImmediateMacroName(SL, SM, S.getLangOpts()) == 267 LikelyMacroName) { 268 FunctionName = LikelyMacroName; 269 SL = SM.getImmediateMacroCallerLoc(SL); 270 } 271 272 S.Diag(SL, diag::warn_memcpy_chk_overflow) 273 << FunctionName << DstSize.toString(/*Radix=*/10) 274 << Size.toString(/*Radix=*/10); 275 } 276 277 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 278 if (checkArgCount(S, BuiltinCall, 2)) 279 return true; 280 281 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 282 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 283 Expr *Call = BuiltinCall->getArg(0); 284 Expr *Chain = BuiltinCall->getArg(1); 285 286 if (Call->getStmtClass() != Stmt::CallExprClass) { 287 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 288 << Call->getSourceRange(); 289 return true; 290 } 291 292 auto CE = cast<CallExpr>(Call); 293 if (CE->getCallee()->getType()->isBlockPointerType()) { 294 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 295 << Call->getSourceRange(); 296 return true; 297 } 298 299 const Decl *TargetDecl = CE->getCalleeDecl(); 300 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 301 if (FD->getBuiltinID()) { 302 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 303 << Call->getSourceRange(); 304 return true; 305 } 306 307 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 308 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 309 << Call->getSourceRange(); 310 return true; 311 } 312 313 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 314 if (ChainResult.isInvalid()) 315 return true; 316 if (!ChainResult.get()->getType()->isPointerType()) { 317 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 318 << Chain->getSourceRange(); 319 return true; 320 } 321 322 QualType ReturnTy = CE->getCallReturnType(S.Context); 323 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 324 QualType BuiltinTy = S.Context.getFunctionType( 325 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 326 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 327 328 Builtin = 329 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 330 331 BuiltinCall->setType(CE->getType()); 332 BuiltinCall->setValueKind(CE->getValueKind()); 333 BuiltinCall->setObjectKind(CE->getObjectKind()); 334 BuiltinCall->setCallee(Builtin); 335 BuiltinCall->setArg(1, ChainResult.get()); 336 337 return false; 338 } 339 340 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 341 Scope::ScopeFlags NeededScopeFlags, 342 unsigned DiagID) { 343 // Scopes aren't available during instantiation. Fortunately, builtin 344 // functions cannot be template args so they cannot be formed through template 345 // instantiation. Therefore checking once during the parse is sufficient. 346 if (SemaRef.inTemplateInstantiation()) 347 return false; 348 349 Scope *S = SemaRef.getCurScope(); 350 while (S && !S->isSEHExceptScope()) 351 S = S->getParent(); 352 if (!S || !(S->getFlags() & NeededScopeFlags)) { 353 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 354 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 355 << DRE->getDecl()->getIdentifier(); 356 return true; 357 } 358 359 return false; 360 } 361 362 static inline bool isBlockPointer(Expr *Arg) { 363 return Arg->getType()->isBlockPointerType(); 364 } 365 366 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 367 /// void*, which is a requirement of device side enqueue. 368 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 369 const BlockPointerType *BPT = 370 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 371 ArrayRef<QualType> Params = 372 BPT->getPointeeType()->getAs<FunctionProtoType>()->getParamTypes(); 373 unsigned ArgCounter = 0; 374 bool IllegalParams = false; 375 // Iterate through the block parameters until either one is found that is not 376 // a local void*, or the block is valid. 377 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 378 I != E; ++I, ++ArgCounter) { 379 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 380 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 381 LangAS::opencl_local) { 382 // Get the location of the error. If a block literal has been passed 383 // (BlockExpr) then we can point straight to the offending argument, 384 // else we just point to the variable reference. 385 SourceLocation ErrorLoc; 386 if (isa<BlockExpr>(BlockArg)) { 387 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 388 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 389 } else if (isa<DeclRefExpr>(BlockArg)) { 390 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 391 } 392 S.Diag(ErrorLoc, 393 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 394 IllegalParams = true; 395 } 396 } 397 398 return IllegalParams; 399 } 400 401 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 402 if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) { 403 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 404 << 1 << Call->getDirectCallee() << "cl_khr_subgroups"; 405 return true; 406 } 407 return false; 408 } 409 410 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 411 if (checkArgCount(S, TheCall, 2)) 412 return true; 413 414 if (checkOpenCLSubgroupExt(S, TheCall)) 415 return true; 416 417 // First argument is an ndrange_t type. 418 Expr *NDRangeArg = TheCall->getArg(0); 419 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 420 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 421 << TheCall->getDirectCallee() << "'ndrange_t'"; 422 return true; 423 } 424 425 Expr *BlockArg = TheCall->getArg(1); 426 if (!isBlockPointer(BlockArg)) { 427 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 428 << TheCall->getDirectCallee() << "block"; 429 return true; 430 } 431 return checkOpenCLBlockArgs(S, BlockArg); 432 } 433 434 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 435 /// get_kernel_work_group_size 436 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 437 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 438 if (checkArgCount(S, TheCall, 1)) 439 return true; 440 441 Expr *BlockArg = TheCall->getArg(0); 442 if (!isBlockPointer(BlockArg)) { 443 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 444 << TheCall->getDirectCallee() << "block"; 445 return true; 446 } 447 return checkOpenCLBlockArgs(S, BlockArg); 448 } 449 450 /// Diagnose integer type and any valid implicit conversion to it. 451 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 452 const QualType &IntType); 453 454 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 455 unsigned Start, unsigned End) { 456 bool IllegalParams = false; 457 for (unsigned I = Start; I <= End; ++I) 458 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 459 S.Context.getSizeType()); 460 return IllegalParams; 461 } 462 463 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 464 /// 'local void*' parameter of passed block. 465 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 466 Expr *BlockArg, 467 unsigned NumNonVarArgs) { 468 const BlockPointerType *BPT = 469 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 470 unsigned NumBlockParams = 471 BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams(); 472 unsigned TotalNumArgs = TheCall->getNumArgs(); 473 474 // For each argument passed to the block, a corresponding uint needs to 475 // be passed to describe the size of the local memory. 476 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 477 S.Diag(TheCall->getBeginLoc(), 478 diag::err_opencl_enqueue_kernel_local_size_args); 479 return true; 480 } 481 482 // Check that the sizes of the local memory are specified by integers. 483 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 484 TotalNumArgs - 1); 485 } 486 487 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 488 /// overload formats specified in Table 6.13.17.1. 489 /// int enqueue_kernel(queue_t queue, 490 /// kernel_enqueue_flags_t flags, 491 /// const ndrange_t ndrange, 492 /// void (^block)(void)) 493 /// int enqueue_kernel(queue_t queue, 494 /// kernel_enqueue_flags_t flags, 495 /// const ndrange_t ndrange, 496 /// uint num_events_in_wait_list, 497 /// clk_event_t *event_wait_list, 498 /// clk_event_t *event_ret, 499 /// void (^block)(void)) 500 /// int enqueue_kernel(queue_t queue, 501 /// kernel_enqueue_flags_t flags, 502 /// const ndrange_t ndrange, 503 /// void (^block)(local void*, ...), 504 /// uint size0, ...) 505 /// int enqueue_kernel(queue_t queue, 506 /// kernel_enqueue_flags_t flags, 507 /// const ndrange_t ndrange, 508 /// uint num_events_in_wait_list, 509 /// clk_event_t *event_wait_list, 510 /// clk_event_t *event_ret, 511 /// void (^block)(local void*, ...), 512 /// uint size0, ...) 513 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 514 unsigned NumArgs = TheCall->getNumArgs(); 515 516 if (NumArgs < 4) { 517 S.Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_few_args); 518 return true; 519 } 520 521 Expr *Arg0 = TheCall->getArg(0); 522 Expr *Arg1 = TheCall->getArg(1); 523 Expr *Arg2 = TheCall->getArg(2); 524 Expr *Arg3 = TheCall->getArg(3); 525 526 // First argument always needs to be a queue_t type. 527 if (!Arg0->getType()->isQueueT()) { 528 S.Diag(TheCall->getArg(0)->getBeginLoc(), 529 diag::err_opencl_builtin_expected_type) 530 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 531 return true; 532 } 533 534 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 535 if (!Arg1->getType()->isIntegerType()) { 536 S.Diag(TheCall->getArg(1)->getBeginLoc(), 537 diag::err_opencl_builtin_expected_type) 538 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 539 return true; 540 } 541 542 // Third argument is always an ndrange_t type. 543 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 544 S.Diag(TheCall->getArg(2)->getBeginLoc(), 545 diag::err_opencl_builtin_expected_type) 546 << TheCall->getDirectCallee() << "'ndrange_t'"; 547 return true; 548 } 549 550 // With four arguments, there is only one form that the function could be 551 // called in: no events and no variable arguments. 552 if (NumArgs == 4) { 553 // check that the last argument is the right block type. 554 if (!isBlockPointer(Arg3)) { 555 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 556 << TheCall->getDirectCallee() << "block"; 557 return true; 558 } 559 // we have a block type, check the prototype 560 const BlockPointerType *BPT = 561 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 562 if (BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams() > 0) { 563 S.Diag(Arg3->getBeginLoc(), 564 diag::err_opencl_enqueue_kernel_blocks_no_args); 565 return true; 566 } 567 return false; 568 } 569 // we can have block + varargs. 570 if (isBlockPointer(Arg3)) 571 return (checkOpenCLBlockArgs(S, Arg3) || 572 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 573 // last two cases with either exactly 7 args or 7 args and varargs. 574 if (NumArgs >= 7) { 575 // check common block argument. 576 Expr *Arg6 = TheCall->getArg(6); 577 if (!isBlockPointer(Arg6)) { 578 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 579 << TheCall->getDirectCallee() << "block"; 580 return true; 581 } 582 if (checkOpenCLBlockArgs(S, Arg6)) 583 return true; 584 585 // Forth argument has to be any integer type. 586 if (!Arg3->getType()->isIntegerType()) { 587 S.Diag(TheCall->getArg(3)->getBeginLoc(), 588 diag::err_opencl_builtin_expected_type) 589 << TheCall->getDirectCallee() << "integer"; 590 return true; 591 } 592 // check remaining common arguments. 593 Expr *Arg4 = TheCall->getArg(4); 594 Expr *Arg5 = TheCall->getArg(5); 595 596 // Fifth argument is always passed as a pointer to clk_event_t. 597 if (!Arg4->isNullPointerConstant(S.Context, 598 Expr::NPC_ValueDependentIsNotNull) && 599 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 600 S.Diag(TheCall->getArg(4)->getBeginLoc(), 601 diag::err_opencl_builtin_expected_type) 602 << TheCall->getDirectCallee() 603 << S.Context.getPointerType(S.Context.OCLClkEventTy); 604 return true; 605 } 606 607 // Sixth argument is always passed as a pointer to clk_event_t. 608 if (!Arg5->isNullPointerConstant(S.Context, 609 Expr::NPC_ValueDependentIsNotNull) && 610 !(Arg5->getType()->isPointerType() && 611 Arg5->getType()->getPointeeType()->isClkEventT())) { 612 S.Diag(TheCall->getArg(5)->getBeginLoc(), 613 diag::err_opencl_builtin_expected_type) 614 << TheCall->getDirectCallee() 615 << S.Context.getPointerType(S.Context.OCLClkEventTy); 616 return true; 617 } 618 619 if (NumArgs == 7) 620 return false; 621 622 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 623 } 624 625 // None of the specific case has been detected, give generic error 626 S.Diag(TheCall->getBeginLoc(), 627 diag::err_opencl_enqueue_kernel_incorrect_args); 628 return true; 629 } 630 631 /// Returns OpenCL access qual. 632 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 633 return D->getAttr<OpenCLAccessAttr>(); 634 } 635 636 /// Returns true if pipe element type is different from the pointer. 637 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 638 const Expr *Arg0 = Call->getArg(0); 639 // First argument type should always be pipe. 640 if (!Arg0->getType()->isPipeType()) { 641 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 642 << Call->getDirectCallee() << Arg0->getSourceRange(); 643 return true; 644 } 645 OpenCLAccessAttr *AccessQual = 646 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 647 // Validates the access qualifier is compatible with the call. 648 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 649 // read_only and write_only, and assumed to be read_only if no qualifier is 650 // specified. 651 switch (Call->getDirectCallee()->getBuiltinID()) { 652 case Builtin::BIread_pipe: 653 case Builtin::BIreserve_read_pipe: 654 case Builtin::BIcommit_read_pipe: 655 case Builtin::BIwork_group_reserve_read_pipe: 656 case Builtin::BIsub_group_reserve_read_pipe: 657 case Builtin::BIwork_group_commit_read_pipe: 658 case Builtin::BIsub_group_commit_read_pipe: 659 if (!(!AccessQual || AccessQual->isReadOnly())) { 660 S.Diag(Arg0->getBeginLoc(), 661 diag::err_opencl_builtin_pipe_invalid_access_modifier) 662 << "read_only" << Arg0->getSourceRange(); 663 return true; 664 } 665 break; 666 case Builtin::BIwrite_pipe: 667 case Builtin::BIreserve_write_pipe: 668 case Builtin::BIcommit_write_pipe: 669 case Builtin::BIwork_group_reserve_write_pipe: 670 case Builtin::BIsub_group_reserve_write_pipe: 671 case Builtin::BIwork_group_commit_write_pipe: 672 case Builtin::BIsub_group_commit_write_pipe: 673 if (!(AccessQual && AccessQual->isWriteOnly())) { 674 S.Diag(Arg0->getBeginLoc(), 675 diag::err_opencl_builtin_pipe_invalid_access_modifier) 676 << "write_only" << Arg0->getSourceRange(); 677 return true; 678 } 679 break; 680 default: 681 break; 682 } 683 return false; 684 } 685 686 /// Returns true if pipe element type is different from the pointer. 687 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 688 const Expr *Arg0 = Call->getArg(0); 689 const Expr *ArgIdx = Call->getArg(Idx); 690 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 691 const QualType EltTy = PipeTy->getElementType(); 692 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 693 // The Idx argument should be a pointer and the type of the pointer and 694 // the type of pipe element should also be the same. 695 if (!ArgTy || 696 !S.Context.hasSameType( 697 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 698 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 699 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 700 << ArgIdx->getType() << ArgIdx->getSourceRange(); 701 return true; 702 } 703 return false; 704 } 705 706 // Performs semantic analysis for the read/write_pipe call. 707 // \param S Reference to the semantic analyzer. 708 // \param Call A pointer to the builtin call. 709 // \return True if a semantic error has been found, false otherwise. 710 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 711 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 712 // functions have two forms. 713 switch (Call->getNumArgs()) { 714 case 2: 715 if (checkOpenCLPipeArg(S, Call)) 716 return true; 717 // The call with 2 arguments should be 718 // read/write_pipe(pipe T, T*). 719 // Check packet type T. 720 if (checkOpenCLPipePacketType(S, Call, 1)) 721 return true; 722 break; 723 724 case 4: { 725 if (checkOpenCLPipeArg(S, Call)) 726 return true; 727 // The call with 4 arguments should be 728 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 729 // Check reserve_id_t. 730 if (!Call->getArg(1)->getType()->isReserveIDT()) { 731 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 732 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 733 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 734 return true; 735 } 736 737 // Check the index. 738 const Expr *Arg2 = Call->getArg(2); 739 if (!Arg2->getType()->isIntegerType() && 740 !Arg2->getType()->isUnsignedIntegerType()) { 741 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 742 << Call->getDirectCallee() << S.Context.UnsignedIntTy 743 << Arg2->getType() << Arg2->getSourceRange(); 744 return true; 745 } 746 747 // Check packet type T. 748 if (checkOpenCLPipePacketType(S, Call, 3)) 749 return true; 750 } break; 751 default: 752 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 753 << Call->getDirectCallee() << Call->getSourceRange(); 754 return true; 755 } 756 757 return false; 758 } 759 760 // Performs a semantic analysis on the {work_group_/sub_group_ 761 // /_}reserve_{read/write}_pipe 762 // \param S Reference to the semantic analyzer. 763 // \param Call The call to the builtin function to be analyzed. 764 // \return True if a semantic error was found, false otherwise. 765 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 766 if (checkArgCount(S, Call, 2)) 767 return true; 768 769 if (checkOpenCLPipeArg(S, Call)) 770 return true; 771 772 // Check the reserve size. 773 if (!Call->getArg(1)->getType()->isIntegerType() && 774 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 775 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 776 << Call->getDirectCallee() << S.Context.UnsignedIntTy 777 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 778 return true; 779 } 780 781 // Since return type of reserve_read/write_pipe built-in function is 782 // reserve_id_t, which is not defined in the builtin def file , we used int 783 // as return type and need to override the return type of these functions. 784 Call->setType(S.Context.OCLReserveIDTy); 785 786 return false; 787 } 788 789 // Performs a semantic analysis on {work_group_/sub_group_ 790 // /_}commit_{read/write}_pipe 791 // \param S Reference to the semantic analyzer. 792 // \param Call The call to the builtin function to be analyzed. 793 // \return True if a semantic error was found, false otherwise. 794 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 795 if (checkArgCount(S, Call, 2)) 796 return true; 797 798 if (checkOpenCLPipeArg(S, Call)) 799 return true; 800 801 // Check reserve_id_t. 802 if (!Call->getArg(1)->getType()->isReserveIDT()) { 803 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 804 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 805 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 806 return true; 807 } 808 809 return false; 810 } 811 812 // Performs a semantic analysis on the call to built-in Pipe 813 // Query Functions. 814 // \param S Reference to the semantic analyzer. 815 // \param Call The call to the builtin function to be analyzed. 816 // \return True if a semantic error was found, false otherwise. 817 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 818 if (checkArgCount(S, Call, 1)) 819 return true; 820 821 if (!Call->getArg(0)->getType()->isPipeType()) { 822 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 823 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 824 return true; 825 } 826 827 return false; 828 } 829 830 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 831 // Performs semantic analysis for the to_global/local/private call. 832 // \param S Reference to the semantic analyzer. 833 // \param BuiltinID ID of the builtin function. 834 // \param Call A pointer to the builtin call. 835 // \return True if a semantic error has been found, false otherwise. 836 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 837 CallExpr *Call) { 838 if (Call->getNumArgs() != 1) { 839 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_arg_num) 840 << Call->getDirectCallee() << Call->getSourceRange(); 841 return true; 842 } 843 844 auto RT = Call->getArg(0)->getType(); 845 if (!RT->isPointerType() || RT->getPointeeType() 846 .getAddressSpace() == LangAS::opencl_constant) { 847 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 848 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 849 return true; 850 } 851 852 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 853 S.Diag(Call->getArg(0)->getBeginLoc(), 854 diag::warn_opencl_generic_address_space_arg) 855 << Call->getDirectCallee()->getNameInfo().getAsString() 856 << Call->getArg(0)->getSourceRange(); 857 } 858 859 RT = RT->getPointeeType(); 860 auto Qual = RT.getQualifiers(); 861 switch (BuiltinID) { 862 case Builtin::BIto_global: 863 Qual.setAddressSpace(LangAS::opencl_global); 864 break; 865 case Builtin::BIto_local: 866 Qual.setAddressSpace(LangAS::opencl_local); 867 break; 868 case Builtin::BIto_private: 869 Qual.setAddressSpace(LangAS::opencl_private); 870 break; 871 default: 872 llvm_unreachable("Invalid builtin function"); 873 } 874 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 875 RT.getUnqualifiedType(), Qual))); 876 877 return false; 878 } 879 880 // Emit an error and return true if the current architecture is not in the list 881 // of supported architectures. 882 static bool 883 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 884 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 885 llvm::Triple::ArchType CurArch = 886 S.getASTContext().getTargetInfo().getTriple().getArch(); 887 if (llvm::is_contained(SupportedArchs, CurArch)) 888 return false; 889 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 890 << TheCall->getSourceRange(); 891 return true; 892 } 893 894 ExprResult 895 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 896 CallExpr *TheCall) { 897 ExprResult TheCallResult(TheCall); 898 899 // Find out if any arguments are required to be integer constant expressions. 900 unsigned ICEArguments = 0; 901 ASTContext::GetBuiltinTypeError Error; 902 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 903 if (Error != ASTContext::GE_None) 904 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 905 906 // If any arguments are required to be ICE's, check and diagnose. 907 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 908 // Skip arguments not required to be ICE's. 909 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 910 911 llvm::APSInt Result; 912 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 913 return true; 914 ICEArguments &= ~(1 << ArgNo); 915 } 916 917 switch (BuiltinID) { 918 case Builtin::BI__builtin___CFStringMakeConstantString: 919 assert(TheCall->getNumArgs() == 1 && 920 "Wrong # arguments to builtin CFStringMakeConstantString"); 921 if (CheckObjCString(TheCall->getArg(0))) 922 return ExprError(); 923 break; 924 case Builtin::BI__builtin_ms_va_start: 925 case Builtin::BI__builtin_stdarg_start: 926 case Builtin::BI__builtin_va_start: 927 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 928 return ExprError(); 929 break; 930 case Builtin::BI__va_start: { 931 switch (Context.getTargetInfo().getTriple().getArch()) { 932 case llvm::Triple::aarch64: 933 case llvm::Triple::arm: 934 case llvm::Triple::thumb: 935 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 936 return ExprError(); 937 break; 938 default: 939 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 940 return ExprError(); 941 break; 942 } 943 break; 944 } 945 946 // The acquire, release, and no fence variants are ARM and AArch64 only. 947 case Builtin::BI_interlockedbittestandset_acq: 948 case Builtin::BI_interlockedbittestandset_rel: 949 case Builtin::BI_interlockedbittestandset_nf: 950 case Builtin::BI_interlockedbittestandreset_acq: 951 case Builtin::BI_interlockedbittestandreset_rel: 952 case Builtin::BI_interlockedbittestandreset_nf: 953 if (CheckBuiltinTargetSupport( 954 *this, BuiltinID, TheCall, 955 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 956 return ExprError(); 957 break; 958 959 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 960 case Builtin::BI_bittest64: 961 case Builtin::BI_bittestandcomplement64: 962 case Builtin::BI_bittestandreset64: 963 case Builtin::BI_bittestandset64: 964 case Builtin::BI_interlockedbittestandreset64: 965 case Builtin::BI_interlockedbittestandset64: 966 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall, 967 {llvm::Triple::x86_64, llvm::Triple::arm, 968 llvm::Triple::thumb, llvm::Triple::aarch64})) 969 return ExprError(); 970 break; 971 972 case Builtin::BI__builtin_isgreater: 973 case Builtin::BI__builtin_isgreaterequal: 974 case Builtin::BI__builtin_isless: 975 case Builtin::BI__builtin_islessequal: 976 case Builtin::BI__builtin_islessgreater: 977 case Builtin::BI__builtin_isunordered: 978 if (SemaBuiltinUnorderedCompare(TheCall)) 979 return ExprError(); 980 break; 981 case Builtin::BI__builtin_fpclassify: 982 if (SemaBuiltinFPClassification(TheCall, 6)) 983 return ExprError(); 984 break; 985 case Builtin::BI__builtin_isfinite: 986 case Builtin::BI__builtin_isinf: 987 case Builtin::BI__builtin_isinf_sign: 988 case Builtin::BI__builtin_isnan: 989 case Builtin::BI__builtin_isnormal: 990 case Builtin::BI__builtin_signbit: 991 case Builtin::BI__builtin_signbitf: 992 case Builtin::BI__builtin_signbitl: 993 if (SemaBuiltinFPClassification(TheCall, 1)) 994 return ExprError(); 995 break; 996 case Builtin::BI__builtin_shufflevector: 997 return SemaBuiltinShuffleVector(TheCall); 998 // TheCall will be freed by the smart pointer here, but that's fine, since 999 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1000 case Builtin::BI__builtin_prefetch: 1001 if (SemaBuiltinPrefetch(TheCall)) 1002 return ExprError(); 1003 break; 1004 case Builtin::BI__builtin_alloca_with_align: 1005 if (SemaBuiltinAllocaWithAlign(TheCall)) 1006 return ExprError(); 1007 break; 1008 case Builtin::BI__assume: 1009 case Builtin::BI__builtin_assume: 1010 if (SemaBuiltinAssume(TheCall)) 1011 return ExprError(); 1012 break; 1013 case Builtin::BI__builtin_assume_aligned: 1014 if (SemaBuiltinAssumeAligned(TheCall)) 1015 return ExprError(); 1016 break; 1017 case Builtin::BI__builtin_object_size: 1018 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1019 return ExprError(); 1020 break; 1021 case Builtin::BI__builtin_longjmp: 1022 if (SemaBuiltinLongjmp(TheCall)) 1023 return ExprError(); 1024 break; 1025 case Builtin::BI__builtin_setjmp: 1026 if (SemaBuiltinSetjmp(TheCall)) 1027 return ExprError(); 1028 break; 1029 case Builtin::BI_setjmp: 1030 case Builtin::BI_setjmpex: 1031 if (checkArgCount(*this, TheCall, 1)) 1032 return true; 1033 break; 1034 case Builtin::BI__builtin_classify_type: 1035 if (checkArgCount(*this, TheCall, 1)) return true; 1036 TheCall->setType(Context.IntTy); 1037 break; 1038 case Builtin::BI__builtin_constant_p: 1039 if (checkArgCount(*this, TheCall, 1)) return true; 1040 TheCall->setType(Context.IntTy); 1041 break; 1042 case Builtin::BI__sync_fetch_and_add: 1043 case Builtin::BI__sync_fetch_and_add_1: 1044 case Builtin::BI__sync_fetch_and_add_2: 1045 case Builtin::BI__sync_fetch_and_add_4: 1046 case Builtin::BI__sync_fetch_and_add_8: 1047 case Builtin::BI__sync_fetch_and_add_16: 1048 case Builtin::BI__sync_fetch_and_sub: 1049 case Builtin::BI__sync_fetch_and_sub_1: 1050 case Builtin::BI__sync_fetch_and_sub_2: 1051 case Builtin::BI__sync_fetch_and_sub_4: 1052 case Builtin::BI__sync_fetch_and_sub_8: 1053 case Builtin::BI__sync_fetch_and_sub_16: 1054 case Builtin::BI__sync_fetch_and_or: 1055 case Builtin::BI__sync_fetch_and_or_1: 1056 case Builtin::BI__sync_fetch_and_or_2: 1057 case Builtin::BI__sync_fetch_and_or_4: 1058 case Builtin::BI__sync_fetch_and_or_8: 1059 case Builtin::BI__sync_fetch_and_or_16: 1060 case Builtin::BI__sync_fetch_and_and: 1061 case Builtin::BI__sync_fetch_and_and_1: 1062 case Builtin::BI__sync_fetch_and_and_2: 1063 case Builtin::BI__sync_fetch_and_and_4: 1064 case Builtin::BI__sync_fetch_and_and_8: 1065 case Builtin::BI__sync_fetch_and_and_16: 1066 case Builtin::BI__sync_fetch_and_xor: 1067 case Builtin::BI__sync_fetch_and_xor_1: 1068 case Builtin::BI__sync_fetch_and_xor_2: 1069 case Builtin::BI__sync_fetch_and_xor_4: 1070 case Builtin::BI__sync_fetch_and_xor_8: 1071 case Builtin::BI__sync_fetch_and_xor_16: 1072 case Builtin::BI__sync_fetch_and_nand: 1073 case Builtin::BI__sync_fetch_and_nand_1: 1074 case Builtin::BI__sync_fetch_and_nand_2: 1075 case Builtin::BI__sync_fetch_and_nand_4: 1076 case Builtin::BI__sync_fetch_and_nand_8: 1077 case Builtin::BI__sync_fetch_and_nand_16: 1078 case Builtin::BI__sync_add_and_fetch: 1079 case Builtin::BI__sync_add_and_fetch_1: 1080 case Builtin::BI__sync_add_and_fetch_2: 1081 case Builtin::BI__sync_add_and_fetch_4: 1082 case Builtin::BI__sync_add_and_fetch_8: 1083 case Builtin::BI__sync_add_and_fetch_16: 1084 case Builtin::BI__sync_sub_and_fetch: 1085 case Builtin::BI__sync_sub_and_fetch_1: 1086 case Builtin::BI__sync_sub_and_fetch_2: 1087 case Builtin::BI__sync_sub_and_fetch_4: 1088 case Builtin::BI__sync_sub_and_fetch_8: 1089 case Builtin::BI__sync_sub_and_fetch_16: 1090 case Builtin::BI__sync_and_and_fetch: 1091 case Builtin::BI__sync_and_and_fetch_1: 1092 case Builtin::BI__sync_and_and_fetch_2: 1093 case Builtin::BI__sync_and_and_fetch_4: 1094 case Builtin::BI__sync_and_and_fetch_8: 1095 case Builtin::BI__sync_and_and_fetch_16: 1096 case Builtin::BI__sync_or_and_fetch: 1097 case Builtin::BI__sync_or_and_fetch_1: 1098 case Builtin::BI__sync_or_and_fetch_2: 1099 case Builtin::BI__sync_or_and_fetch_4: 1100 case Builtin::BI__sync_or_and_fetch_8: 1101 case Builtin::BI__sync_or_and_fetch_16: 1102 case Builtin::BI__sync_xor_and_fetch: 1103 case Builtin::BI__sync_xor_and_fetch_1: 1104 case Builtin::BI__sync_xor_and_fetch_2: 1105 case Builtin::BI__sync_xor_and_fetch_4: 1106 case Builtin::BI__sync_xor_and_fetch_8: 1107 case Builtin::BI__sync_xor_and_fetch_16: 1108 case Builtin::BI__sync_nand_and_fetch: 1109 case Builtin::BI__sync_nand_and_fetch_1: 1110 case Builtin::BI__sync_nand_and_fetch_2: 1111 case Builtin::BI__sync_nand_and_fetch_4: 1112 case Builtin::BI__sync_nand_and_fetch_8: 1113 case Builtin::BI__sync_nand_and_fetch_16: 1114 case Builtin::BI__sync_val_compare_and_swap: 1115 case Builtin::BI__sync_val_compare_and_swap_1: 1116 case Builtin::BI__sync_val_compare_and_swap_2: 1117 case Builtin::BI__sync_val_compare_and_swap_4: 1118 case Builtin::BI__sync_val_compare_and_swap_8: 1119 case Builtin::BI__sync_val_compare_and_swap_16: 1120 case Builtin::BI__sync_bool_compare_and_swap: 1121 case Builtin::BI__sync_bool_compare_and_swap_1: 1122 case Builtin::BI__sync_bool_compare_and_swap_2: 1123 case Builtin::BI__sync_bool_compare_and_swap_4: 1124 case Builtin::BI__sync_bool_compare_and_swap_8: 1125 case Builtin::BI__sync_bool_compare_and_swap_16: 1126 case Builtin::BI__sync_lock_test_and_set: 1127 case Builtin::BI__sync_lock_test_and_set_1: 1128 case Builtin::BI__sync_lock_test_and_set_2: 1129 case Builtin::BI__sync_lock_test_and_set_4: 1130 case Builtin::BI__sync_lock_test_and_set_8: 1131 case Builtin::BI__sync_lock_test_and_set_16: 1132 case Builtin::BI__sync_lock_release: 1133 case Builtin::BI__sync_lock_release_1: 1134 case Builtin::BI__sync_lock_release_2: 1135 case Builtin::BI__sync_lock_release_4: 1136 case Builtin::BI__sync_lock_release_8: 1137 case Builtin::BI__sync_lock_release_16: 1138 case Builtin::BI__sync_swap: 1139 case Builtin::BI__sync_swap_1: 1140 case Builtin::BI__sync_swap_2: 1141 case Builtin::BI__sync_swap_4: 1142 case Builtin::BI__sync_swap_8: 1143 case Builtin::BI__sync_swap_16: 1144 return SemaBuiltinAtomicOverloaded(TheCallResult); 1145 case Builtin::BI__sync_synchronize: 1146 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1147 << TheCall->getCallee()->getSourceRange(); 1148 break; 1149 case Builtin::BI__builtin_nontemporal_load: 1150 case Builtin::BI__builtin_nontemporal_store: 1151 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1152 #define BUILTIN(ID, TYPE, ATTRS) 1153 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1154 case Builtin::BI##ID: \ 1155 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1156 #include "clang/Basic/Builtins.def" 1157 case Builtin::BI__annotation: 1158 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1159 return ExprError(); 1160 break; 1161 case Builtin::BI__builtin_annotation: 1162 if (SemaBuiltinAnnotation(*this, TheCall)) 1163 return ExprError(); 1164 break; 1165 case Builtin::BI__builtin_addressof: 1166 if (SemaBuiltinAddressof(*this, TheCall)) 1167 return ExprError(); 1168 break; 1169 case Builtin::BI__builtin_add_overflow: 1170 case Builtin::BI__builtin_sub_overflow: 1171 case Builtin::BI__builtin_mul_overflow: 1172 if (SemaBuiltinOverflow(*this, TheCall)) 1173 return ExprError(); 1174 break; 1175 case Builtin::BI__builtin_operator_new: 1176 case Builtin::BI__builtin_operator_delete: { 1177 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 1178 ExprResult Res = 1179 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 1180 if (Res.isInvalid()) 1181 CorrectDelayedTyposInExpr(TheCallResult.get()); 1182 return Res; 1183 } 1184 case Builtin::BI__builtin_dump_struct: { 1185 // We first want to ensure we are called with 2 arguments 1186 if (checkArgCount(*this, TheCall, 2)) 1187 return ExprError(); 1188 // Ensure that the first argument is of type 'struct XX *' 1189 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 1190 const QualType PtrArgType = PtrArg->getType(); 1191 if (!PtrArgType->isPointerType() || 1192 !PtrArgType->getPointeeType()->isRecordType()) { 1193 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1194 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 1195 << "structure pointer"; 1196 return ExprError(); 1197 } 1198 1199 // Ensure that the second argument is of type 'FunctionType' 1200 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 1201 const QualType FnPtrArgType = FnPtrArg->getType(); 1202 if (!FnPtrArgType->isPointerType()) { 1203 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1204 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1205 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1206 return ExprError(); 1207 } 1208 1209 const auto *FuncType = 1210 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 1211 1212 if (!FuncType) { 1213 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1214 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1215 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1216 return ExprError(); 1217 } 1218 1219 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 1220 if (!FT->getNumParams()) { 1221 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1222 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1223 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1224 return ExprError(); 1225 } 1226 QualType PT = FT->getParamType(0); 1227 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 1228 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 1229 !PT->getPointeeType().isConstQualified()) { 1230 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1231 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1232 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1233 return ExprError(); 1234 } 1235 } 1236 1237 TheCall->setType(Context.IntTy); 1238 break; 1239 } 1240 1241 // check secure string manipulation functions where overflows 1242 // are detectable at compile time 1243 case Builtin::BI__builtin___memcpy_chk: 1244 SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "memcpy"); 1245 break; 1246 case Builtin::BI__builtin___memmove_chk: 1247 SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "memmove"); 1248 break; 1249 case Builtin::BI__builtin___memset_chk: 1250 SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "memset"); 1251 break; 1252 case Builtin::BI__builtin___strlcat_chk: 1253 SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "strlcat"); 1254 break; 1255 case Builtin::BI__builtin___strlcpy_chk: 1256 SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "strlcpy"); 1257 break; 1258 case Builtin::BI__builtin___strncat_chk: 1259 SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "strncat"); 1260 break; 1261 case Builtin::BI__builtin___strncpy_chk: 1262 SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "strncpy"); 1263 break; 1264 case Builtin::BI__builtin___stpncpy_chk: 1265 SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "stpncpy"); 1266 break; 1267 case Builtin::BI__builtin___memccpy_chk: 1268 SemaBuiltinMemChkCall(*this, FDecl, TheCall, 3, 4, "memccpy"); 1269 break; 1270 case Builtin::BI__builtin___snprintf_chk: 1271 SemaBuiltinMemChkCall(*this, FDecl, TheCall, 1, 3, "snprintf"); 1272 break; 1273 case Builtin::BI__builtin___vsnprintf_chk: 1274 SemaBuiltinMemChkCall(*this, FDecl, TheCall, 1, 3, "vsnprintf"); 1275 break; 1276 case Builtin::BI__builtin_call_with_static_chain: 1277 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 1278 return ExprError(); 1279 break; 1280 case Builtin::BI__exception_code: 1281 case Builtin::BI_exception_code: 1282 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 1283 diag::err_seh___except_block)) 1284 return ExprError(); 1285 break; 1286 case Builtin::BI__exception_info: 1287 case Builtin::BI_exception_info: 1288 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 1289 diag::err_seh___except_filter)) 1290 return ExprError(); 1291 break; 1292 case Builtin::BI__GetExceptionInfo: 1293 if (checkArgCount(*this, TheCall, 1)) 1294 return ExprError(); 1295 1296 if (CheckCXXThrowOperand( 1297 TheCall->getBeginLoc(), 1298 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 1299 TheCall)) 1300 return ExprError(); 1301 1302 TheCall->setType(Context.VoidPtrTy); 1303 break; 1304 // OpenCL v2.0, s6.13.16 - Pipe functions 1305 case Builtin::BIread_pipe: 1306 case Builtin::BIwrite_pipe: 1307 // Since those two functions are declared with var args, we need a semantic 1308 // check for the argument. 1309 if (SemaBuiltinRWPipe(*this, TheCall)) 1310 return ExprError(); 1311 TheCall->setType(Context.IntTy); 1312 break; 1313 case Builtin::BIreserve_read_pipe: 1314 case Builtin::BIreserve_write_pipe: 1315 case Builtin::BIwork_group_reserve_read_pipe: 1316 case Builtin::BIwork_group_reserve_write_pipe: 1317 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 1318 return ExprError(); 1319 break; 1320 case Builtin::BIsub_group_reserve_read_pipe: 1321 case Builtin::BIsub_group_reserve_write_pipe: 1322 if (checkOpenCLSubgroupExt(*this, TheCall) || 1323 SemaBuiltinReserveRWPipe(*this, TheCall)) 1324 return ExprError(); 1325 break; 1326 case Builtin::BIcommit_read_pipe: 1327 case Builtin::BIcommit_write_pipe: 1328 case Builtin::BIwork_group_commit_read_pipe: 1329 case Builtin::BIwork_group_commit_write_pipe: 1330 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 1331 return ExprError(); 1332 break; 1333 case Builtin::BIsub_group_commit_read_pipe: 1334 case Builtin::BIsub_group_commit_write_pipe: 1335 if (checkOpenCLSubgroupExt(*this, TheCall) || 1336 SemaBuiltinCommitRWPipe(*this, TheCall)) 1337 return ExprError(); 1338 break; 1339 case Builtin::BIget_pipe_num_packets: 1340 case Builtin::BIget_pipe_max_packets: 1341 if (SemaBuiltinPipePackets(*this, TheCall)) 1342 return ExprError(); 1343 TheCall->setType(Context.UnsignedIntTy); 1344 break; 1345 case Builtin::BIto_global: 1346 case Builtin::BIto_local: 1347 case Builtin::BIto_private: 1348 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 1349 return ExprError(); 1350 break; 1351 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 1352 case Builtin::BIenqueue_kernel: 1353 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 1354 return ExprError(); 1355 break; 1356 case Builtin::BIget_kernel_work_group_size: 1357 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 1358 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 1359 return ExprError(); 1360 break; 1361 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 1362 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 1363 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 1364 return ExprError(); 1365 break; 1366 case Builtin::BI__builtin_os_log_format: 1367 case Builtin::BI__builtin_os_log_format_buffer_size: 1368 if (SemaBuiltinOSLogFormat(TheCall)) 1369 return ExprError(); 1370 break; 1371 } 1372 1373 // Since the target specific builtins for each arch overlap, only check those 1374 // of the arch we are compiling for. 1375 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 1376 switch (Context.getTargetInfo().getTriple().getArch()) { 1377 case llvm::Triple::arm: 1378 case llvm::Triple::armeb: 1379 case llvm::Triple::thumb: 1380 case llvm::Triple::thumbeb: 1381 if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall)) 1382 return ExprError(); 1383 break; 1384 case llvm::Triple::aarch64: 1385 case llvm::Triple::aarch64_be: 1386 if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall)) 1387 return ExprError(); 1388 break; 1389 case llvm::Triple::hexagon: 1390 if (CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall)) 1391 return ExprError(); 1392 break; 1393 case llvm::Triple::mips: 1394 case llvm::Triple::mipsel: 1395 case llvm::Triple::mips64: 1396 case llvm::Triple::mips64el: 1397 if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall)) 1398 return ExprError(); 1399 break; 1400 case llvm::Triple::systemz: 1401 if (CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall)) 1402 return ExprError(); 1403 break; 1404 case llvm::Triple::x86: 1405 case llvm::Triple::x86_64: 1406 if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall)) 1407 return ExprError(); 1408 break; 1409 case llvm::Triple::ppc: 1410 case llvm::Triple::ppc64: 1411 case llvm::Triple::ppc64le: 1412 if (CheckPPCBuiltinFunctionCall(BuiltinID, TheCall)) 1413 return ExprError(); 1414 break; 1415 default: 1416 break; 1417 } 1418 } 1419 1420 return TheCallResult; 1421 } 1422 1423 // Get the valid immediate range for the specified NEON type code. 1424 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 1425 NeonTypeFlags Type(t); 1426 int IsQuad = ForceQuad ? true : Type.isQuad(); 1427 switch (Type.getEltType()) { 1428 case NeonTypeFlags::Int8: 1429 case NeonTypeFlags::Poly8: 1430 return shift ? 7 : (8 << IsQuad) - 1; 1431 case NeonTypeFlags::Int16: 1432 case NeonTypeFlags::Poly16: 1433 return shift ? 15 : (4 << IsQuad) - 1; 1434 case NeonTypeFlags::Int32: 1435 return shift ? 31 : (2 << IsQuad) - 1; 1436 case NeonTypeFlags::Int64: 1437 case NeonTypeFlags::Poly64: 1438 return shift ? 63 : (1 << IsQuad) - 1; 1439 case NeonTypeFlags::Poly128: 1440 return shift ? 127 : (1 << IsQuad) - 1; 1441 case NeonTypeFlags::Float16: 1442 assert(!shift && "cannot shift float types!"); 1443 return (4 << IsQuad) - 1; 1444 case NeonTypeFlags::Float32: 1445 assert(!shift && "cannot shift float types!"); 1446 return (2 << IsQuad) - 1; 1447 case NeonTypeFlags::Float64: 1448 assert(!shift && "cannot shift float types!"); 1449 return (1 << IsQuad) - 1; 1450 } 1451 llvm_unreachable("Invalid NeonTypeFlag!"); 1452 } 1453 1454 /// getNeonEltType - Return the QualType corresponding to the elements of 1455 /// the vector type specified by the NeonTypeFlags. This is used to check 1456 /// the pointer arguments for Neon load/store intrinsics. 1457 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 1458 bool IsPolyUnsigned, bool IsInt64Long) { 1459 switch (Flags.getEltType()) { 1460 case NeonTypeFlags::Int8: 1461 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 1462 case NeonTypeFlags::Int16: 1463 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 1464 case NeonTypeFlags::Int32: 1465 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 1466 case NeonTypeFlags::Int64: 1467 if (IsInt64Long) 1468 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 1469 else 1470 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 1471 : Context.LongLongTy; 1472 case NeonTypeFlags::Poly8: 1473 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 1474 case NeonTypeFlags::Poly16: 1475 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 1476 case NeonTypeFlags::Poly64: 1477 if (IsInt64Long) 1478 return Context.UnsignedLongTy; 1479 else 1480 return Context.UnsignedLongLongTy; 1481 case NeonTypeFlags::Poly128: 1482 break; 1483 case NeonTypeFlags::Float16: 1484 return Context.HalfTy; 1485 case NeonTypeFlags::Float32: 1486 return Context.FloatTy; 1487 case NeonTypeFlags::Float64: 1488 return Context.DoubleTy; 1489 } 1490 llvm_unreachable("Invalid NeonTypeFlag!"); 1491 } 1492 1493 bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 1494 llvm::APSInt Result; 1495 uint64_t mask = 0; 1496 unsigned TV = 0; 1497 int PtrArgNum = -1; 1498 bool HasConstPtr = false; 1499 switch (BuiltinID) { 1500 #define GET_NEON_OVERLOAD_CHECK 1501 #include "clang/Basic/arm_neon.inc" 1502 #include "clang/Basic/arm_fp16.inc" 1503 #undef GET_NEON_OVERLOAD_CHECK 1504 } 1505 1506 // For NEON intrinsics which are overloaded on vector element type, validate 1507 // the immediate which specifies which variant to emit. 1508 unsigned ImmArg = TheCall->getNumArgs()-1; 1509 if (mask) { 1510 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 1511 return true; 1512 1513 TV = Result.getLimitedValue(64); 1514 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 1515 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 1516 << TheCall->getArg(ImmArg)->getSourceRange(); 1517 } 1518 1519 if (PtrArgNum >= 0) { 1520 // Check that pointer arguments have the specified type. 1521 Expr *Arg = TheCall->getArg(PtrArgNum); 1522 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 1523 Arg = ICE->getSubExpr(); 1524 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 1525 QualType RHSTy = RHS.get()->getType(); 1526 1527 llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch(); 1528 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 1529 Arch == llvm::Triple::aarch64_be; 1530 bool IsInt64Long = 1531 Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong; 1532 QualType EltTy = 1533 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 1534 if (HasConstPtr) 1535 EltTy = EltTy.withConst(); 1536 QualType LHSTy = Context.getPointerType(EltTy); 1537 AssignConvertType ConvTy; 1538 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 1539 if (RHS.isInvalid()) 1540 return true; 1541 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 1542 RHS.get(), AA_Assigning)) 1543 return true; 1544 } 1545 1546 // For NEON intrinsics which take an immediate value as part of the 1547 // instruction, range check them here. 1548 unsigned i = 0, l = 0, u = 0; 1549 switch (BuiltinID) { 1550 default: 1551 return false; 1552 #define GET_NEON_IMMEDIATE_CHECK 1553 #include "clang/Basic/arm_neon.inc" 1554 #include "clang/Basic/arm_fp16.inc" 1555 #undef GET_NEON_IMMEDIATE_CHECK 1556 } 1557 1558 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 1559 } 1560 1561 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 1562 unsigned MaxWidth) { 1563 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 1564 BuiltinID == ARM::BI__builtin_arm_ldaex || 1565 BuiltinID == ARM::BI__builtin_arm_strex || 1566 BuiltinID == ARM::BI__builtin_arm_stlex || 1567 BuiltinID == AArch64::BI__builtin_arm_ldrex || 1568 BuiltinID == AArch64::BI__builtin_arm_ldaex || 1569 BuiltinID == AArch64::BI__builtin_arm_strex || 1570 BuiltinID == AArch64::BI__builtin_arm_stlex) && 1571 "unexpected ARM builtin"); 1572 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 1573 BuiltinID == ARM::BI__builtin_arm_ldaex || 1574 BuiltinID == AArch64::BI__builtin_arm_ldrex || 1575 BuiltinID == AArch64::BI__builtin_arm_ldaex; 1576 1577 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 1578 1579 // Ensure that we have the proper number of arguments. 1580 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 1581 return true; 1582 1583 // Inspect the pointer argument of the atomic builtin. This should always be 1584 // a pointer type, whose element is an integral scalar or pointer type. 1585 // Because it is a pointer type, we don't have to worry about any implicit 1586 // casts here. 1587 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 1588 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 1589 if (PointerArgRes.isInvalid()) 1590 return true; 1591 PointerArg = PointerArgRes.get(); 1592 1593 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 1594 if (!pointerType) { 1595 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 1596 << PointerArg->getType() << PointerArg->getSourceRange(); 1597 return true; 1598 } 1599 1600 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 1601 // task is to insert the appropriate casts into the AST. First work out just 1602 // what the appropriate type is. 1603 QualType ValType = pointerType->getPointeeType(); 1604 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 1605 if (IsLdrex) 1606 AddrType.addConst(); 1607 1608 // Issue a warning if the cast is dodgy. 1609 CastKind CastNeeded = CK_NoOp; 1610 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 1611 CastNeeded = CK_BitCast; 1612 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 1613 << PointerArg->getType() << Context.getPointerType(AddrType) 1614 << AA_Passing << PointerArg->getSourceRange(); 1615 } 1616 1617 // Finally, do the cast and replace the argument with the corrected version. 1618 AddrType = Context.getPointerType(AddrType); 1619 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 1620 if (PointerArgRes.isInvalid()) 1621 return true; 1622 PointerArg = PointerArgRes.get(); 1623 1624 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 1625 1626 // In general, we allow ints, floats and pointers to be loaded and stored. 1627 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 1628 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 1629 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 1630 << PointerArg->getType() << PointerArg->getSourceRange(); 1631 return true; 1632 } 1633 1634 // But ARM doesn't have instructions to deal with 128-bit versions. 1635 if (Context.getTypeSize(ValType) > MaxWidth) { 1636 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 1637 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 1638 << PointerArg->getType() << PointerArg->getSourceRange(); 1639 return true; 1640 } 1641 1642 switch (ValType.getObjCLifetime()) { 1643 case Qualifiers::OCL_None: 1644 case Qualifiers::OCL_ExplicitNone: 1645 // okay 1646 break; 1647 1648 case Qualifiers::OCL_Weak: 1649 case Qualifiers::OCL_Strong: 1650 case Qualifiers::OCL_Autoreleasing: 1651 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 1652 << ValType << PointerArg->getSourceRange(); 1653 return true; 1654 } 1655 1656 if (IsLdrex) { 1657 TheCall->setType(ValType); 1658 return false; 1659 } 1660 1661 // Initialize the argument to be stored. 1662 ExprResult ValArg = TheCall->getArg(0); 1663 InitializedEntity Entity = InitializedEntity::InitializeParameter( 1664 Context, ValType, /*consume*/ false); 1665 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 1666 if (ValArg.isInvalid()) 1667 return true; 1668 TheCall->setArg(0, ValArg.get()); 1669 1670 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 1671 // but the custom checker bypasses all default analysis. 1672 TheCall->setType(Context.IntTy); 1673 return false; 1674 } 1675 1676 bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 1677 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 1678 BuiltinID == ARM::BI__builtin_arm_ldaex || 1679 BuiltinID == ARM::BI__builtin_arm_strex || 1680 BuiltinID == ARM::BI__builtin_arm_stlex) { 1681 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 1682 } 1683 1684 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 1685 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 1686 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 1687 } 1688 1689 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 1690 BuiltinID == ARM::BI__builtin_arm_wsr64) 1691 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 1692 1693 if (BuiltinID == ARM::BI__builtin_arm_rsr || 1694 BuiltinID == ARM::BI__builtin_arm_rsrp || 1695 BuiltinID == ARM::BI__builtin_arm_wsr || 1696 BuiltinID == ARM::BI__builtin_arm_wsrp) 1697 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1698 1699 if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) 1700 return true; 1701 1702 // For intrinsics which take an immediate value as part of the instruction, 1703 // range check them here. 1704 // FIXME: VFP Intrinsics should error if VFP not present. 1705 switch (BuiltinID) { 1706 default: return false; 1707 case ARM::BI__builtin_arm_ssat: 1708 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 1709 case ARM::BI__builtin_arm_usat: 1710 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 1711 case ARM::BI__builtin_arm_ssat16: 1712 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 1713 case ARM::BI__builtin_arm_usat16: 1714 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 1715 case ARM::BI__builtin_arm_vcvtr_f: 1716 case ARM::BI__builtin_arm_vcvtr_d: 1717 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 1718 case ARM::BI__builtin_arm_dmb: 1719 case ARM::BI__builtin_arm_dsb: 1720 case ARM::BI__builtin_arm_isb: 1721 case ARM::BI__builtin_arm_dbg: 1722 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 1723 } 1724 } 1725 1726 bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, 1727 CallExpr *TheCall) { 1728 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 1729 BuiltinID == AArch64::BI__builtin_arm_ldaex || 1730 BuiltinID == AArch64::BI__builtin_arm_strex || 1731 BuiltinID == AArch64::BI__builtin_arm_stlex) { 1732 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 1733 } 1734 1735 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 1736 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 1737 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 1738 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 1739 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 1740 } 1741 1742 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 1743 BuiltinID == AArch64::BI__builtin_arm_wsr64) 1744 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1745 1746 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 1747 BuiltinID == AArch64::BI__builtin_arm_rsrp || 1748 BuiltinID == AArch64::BI__builtin_arm_wsr || 1749 BuiltinID == AArch64::BI__builtin_arm_wsrp) 1750 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1751 1752 // Only check the valid encoding range. Any constant in this range would be 1753 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 1754 // an exception for incorrect registers. This matches MSVC behavior. 1755 if (BuiltinID == AArch64::BI_ReadStatusReg || 1756 BuiltinID == AArch64::BI_WriteStatusReg) 1757 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 1758 1759 if (BuiltinID == AArch64::BI__getReg) 1760 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 1761 1762 if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) 1763 return true; 1764 1765 // For intrinsics which take an immediate value as part of the instruction, 1766 // range check them here. 1767 unsigned i = 0, l = 0, u = 0; 1768 switch (BuiltinID) { 1769 default: return false; 1770 case AArch64::BI__builtin_arm_dmb: 1771 case AArch64::BI__builtin_arm_dsb: 1772 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 1773 } 1774 1775 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 1776 } 1777 1778 bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) { 1779 struct BuiltinAndString { 1780 unsigned BuiltinID; 1781 const char *Str; 1782 }; 1783 1784 static BuiltinAndString ValidCPU[] = { 1785 { Hexagon::BI__builtin_HEXAGON_A6_vcmpbeq_notany, "v65" }, 1786 { Hexagon::BI__builtin_HEXAGON_A6_vminub_RdP, "v62,v65" }, 1787 { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffb, "v62,v65" }, 1788 { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffub, "v62,v65" }, 1789 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, "v60,v62,v65" }, 1790 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, "v60,v62,v65" }, 1791 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, "v60,v62,v65" }, 1792 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, "v60,v62,v65" }, 1793 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, "v60,v62,v65" }, 1794 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, "v60,v62,v65" }, 1795 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, "v60,v62,v65" }, 1796 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, "v60,v62,v65" }, 1797 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, "v60,v62,v65" }, 1798 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, "v60,v62,v65" }, 1799 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, "v60,v62,v65" }, 1800 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, "v60,v62,v65" }, 1801 { Hexagon::BI__builtin_HEXAGON_S6_vsplatrbp, "v62,v65" }, 1802 { Hexagon::BI__builtin_HEXAGON_S6_vtrunehb_ppp, "v62,v65" }, 1803 { Hexagon::BI__builtin_HEXAGON_S6_vtrunohb_ppp, "v62,v65" }, 1804 }; 1805 1806 static BuiltinAndString ValidHVX[] = { 1807 { Hexagon::BI__builtin_HEXAGON_V6_extractw, "v60,v62,v65" }, 1808 { Hexagon::BI__builtin_HEXAGON_V6_extractw_128B, "v60,v62,v65" }, 1809 { Hexagon::BI__builtin_HEXAGON_V6_hi, "v60,v62,v65" }, 1810 { Hexagon::BI__builtin_HEXAGON_V6_hi_128B, "v60,v62,v65" }, 1811 { Hexagon::BI__builtin_HEXAGON_V6_lo, "v60,v62,v65" }, 1812 { Hexagon::BI__builtin_HEXAGON_V6_lo_128B, "v60,v62,v65" }, 1813 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb, "v62,v65" }, 1814 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb_128B, "v62,v65" }, 1815 { Hexagon::BI__builtin_HEXAGON_V6_lvsplath, "v62,v65" }, 1816 { Hexagon::BI__builtin_HEXAGON_V6_lvsplath_128B, "v62,v65" }, 1817 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw, "v60,v62,v65" }, 1818 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw_128B, "v60,v62,v65" }, 1819 { Hexagon::BI__builtin_HEXAGON_V6_pred_and, "v60,v62,v65" }, 1820 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_128B, "v60,v62,v65" }, 1821 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n, "v60,v62,v65" }, 1822 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n_128B, "v60,v62,v65" }, 1823 { Hexagon::BI__builtin_HEXAGON_V6_pred_not, "v60,v62,v65" }, 1824 { Hexagon::BI__builtin_HEXAGON_V6_pred_not_128B, "v60,v62,v65" }, 1825 { Hexagon::BI__builtin_HEXAGON_V6_pred_or, "v60,v62,v65" }, 1826 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_128B, "v60,v62,v65" }, 1827 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n, "v60,v62,v65" }, 1828 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n_128B, "v60,v62,v65" }, 1829 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2, "v60,v62,v65" }, 1830 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2_128B, "v60,v62,v65" }, 1831 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2, "v62,v65" }, 1832 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2_128B, "v62,v65" }, 1833 { Hexagon::BI__builtin_HEXAGON_V6_pred_xor, "v60,v62,v65" }, 1834 { Hexagon::BI__builtin_HEXAGON_V6_pred_xor_128B, "v60,v62,v65" }, 1835 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh, "v62,v65" }, 1836 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh_128B, "v62,v65" }, 1837 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw, "v62,v65" }, 1838 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw_128B, "v62,v65" }, 1839 { Hexagon::BI__builtin_HEXAGON_V6_vabsb, "v65" }, 1840 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_128B, "v65" }, 1841 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat, "v65" }, 1842 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat_128B, "v65" }, 1843 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh, "v60,v62,v65" }, 1844 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh_128B, "v60,v62,v65" }, 1845 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub, "v60,v62,v65" }, 1846 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub_128B, "v60,v62,v65" }, 1847 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh, "v60,v62,v65" }, 1848 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh_128B, "v60,v62,v65" }, 1849 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw, "v60,v62,v65" }, 1850 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw_128B, "v60,v62,v65" }, 1851 { Hexagon::BI__builtin_HEXAGON_V6_vabsh, "v60,v62,v65" }, 1852 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_128B, "v60,v62,v65" }, 1853 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat, "v60,v62,v65" }, 1854 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat_128B, "v60,v62,v65" }, 1855 { Hexagon::BI__builtin_HEXAGON_V6_vabsw, "v60,v62,v65" }, 1856 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_128B, "v60,v62,v65" }, 1857 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat, "v60,v62,v65" }, 1858 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat_128B, "v60,v62,v65" }, 1859 { Hexagon::BI__builtin_HEXAGON_V6_vaddb, "v60,v62,v65" }, 1860 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_128B, "v60,v62,v65" }, 1861 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv, "v60,v62,v65" }, 1862 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv_128B, "v60,v62,v65" }, 1863 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat, "v62,v65" }, 1864 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_128B, "v62,v65" }, 1865 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv, "v62,v65" }, 1866 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv_128B, "v62,v65" }, 1867 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry, "v62,v65" }, 1868 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B, "v62,v65" }, 1869 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh, "v62,v65" }, 1870 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh_128B, "v62,v65" }, 1871 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw, "v62,v65" }, 1872 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw_128B, "v62,v65" }, 1873 { Hexagon::BI__builtin_HEXAGON_V6_vaddh, "v60,v62,v65" }, 1874 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_128B, "v60,v62,v65" }, 1875 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv, "v60,v62,v65" }, 1876 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv_128B, "v60,v62,v65" }, 1877 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat, "v60,v62,v65" }, 1878 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_128B, "v60,v62,v65" }, 1879 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv, "v60,v62,v65" }, 1880 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv_128B, "v60,v62,v65" }, 1881 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw, "v60,v62,v65" }, 1882 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_128B, "v60,v62,v65" }, 1883 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc, "v62,v65" }, 1884 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc_128B, "v62,v65" }, 1885 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh, "v60,v62,v65" }, 1886 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_128B, "v60,v62,v65" }, 1887 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc, "v62,v65" }, 1888 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc_128B, "v62,v65" }, 1889 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat, "v60,v62,v65" }, 1890 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_128B, "v60,v62,v65" }, 1891 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv, "v60,v62,v65" }, 1892 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv_128B, "v60,v62,v65" }, 1893 { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat, "v62,v65" }, 1894 { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat_128B, "v62,v65" }, 1895 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat, "v60,v62,v65" }, 1896 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_128B, "v60,v62,v65" }, 1897 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv, "v60,v62,v65" }, 1898 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv_128B, "v60,v62,v65" }, 1899 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw, "v60,v62,v65" }, 1900 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_128B, "v60,v62,v65" }, 1901 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc, "v62,v65" }, 1902 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc_128B, "v62,v65" }, 1903 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat, "v62,v65" }, 1904 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_128B, "v62,v65" }, 1905 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv, "v62,v65" }, 1906 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv_128B, "v62,v65" }, 1907 { Hexagon::BI__builtin_HEXAGON_V6_vaddw, "v60,v62,v65" }, 1908 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_128B, "v60,v62,v65" }, 1909 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv, "v60,v62,v65" }, 1910 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv_128B, "v60,v62,v65" }, 1911 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat, "v60,v62,v65" }, 1912 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_128B, "v60,v62,v65" }, 1913 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv, "v60,v62,v65" }, 1914 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv_128B, "v60,v62,v65" }, 1915 { Hexagon::BI__builtin_HEXAGON_V6_valignb, "v60,v62,v65" }, 1916 { Hexagon::BI__builtin_HEXAGON_V6_valignb_128B, "v60,v62,v65" }, 1917 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, "v60,v62,v65" }, 1918 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, "v60,v62,v65" }, 1919 { Hexagon::BI__builtin_HEXAGON_V6_vand, "v60,v62,v65" }, 1920 { Hexagon::BI__builtin_HEXAGON_V6_vand_128B, "v60,v62,v65" }, 1921 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt, "v62,v65" }, 1922 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_128B, "v62,v65" }, 1923 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc, "v62,v65" }, 1924 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc_128B, "v62,v65" }, 1925 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt, "v60,v62,v65" }, 1926 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_128B, "v60,v62,v65" }, 1927 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc, "v60,v62,v65" }, 1928 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc_128B, "v60,v62,v65" }, 1929 { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv, "v62,v65" }, 1930 { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv_128B, "v62,v65" }, 1931 { Hexagon::BI__builtin_HEXAGON_V6_vandvqv, "v62,v65" }, 1932 { Hexagon::BI__builtin_HEXAGON_V6_vandvqv_128B, "v62,v65" }, 1933 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt, "v60,v62,v65" }, 1934 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_128B, "v60,v62,v65" }, 1935 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc, "v60,v62,v65" }, 1936 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc_128B, "v60,v62,v65" }, 1937 { Hexagon::BI__builtin_HEXAGON_V6_vaslh, "v60,v62,v65" }, 1938 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_128B, "v60,v62,v65" }, 1939 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc, "v65" }, 1940 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc_128B, "v65" }, 1941 { Hexagon::BI__builtin_HEXAGON_V6_vaslhv, "v60,v62,v65" }, 1942 { Hexagon::BI__builtin_HEXAGON_V6_vaslhv_128B, "v60,v62,v65" }, 1943 { Hexagon::BI__builtin_HEXAGON_V6_vaslw, "v60,v62,v65" }, 1944 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_128B, "v60,v62,v65" }, 1945 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc, "v60,v62,v65" }, 1946 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc_128B, "v60,v62,v65" }, 1947 { Hexagon::BI__builtin_HEXAGON_V6_vaslwv, "v60,v62,v65" }, 1948 { Hexagon::BI__builtin_HEXAGON_V6_vaslwv_128B, "v60,v62,v65" }, 1949 { Hexagon::BI__builtin_HEXAGON_V6_vasrh, "v60,v62,v65" }, 1950 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_128B, "v60,v62,v65" }, 1951 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc, "v65" }, 1952 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc_128B, "v65" }, 1953 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat, "v60,v62,v65" }, 1954 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat_128B, "v60,v62,v65" }, 1955 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat, "v62,v65" }, 1956 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat_128B, "v62,v65" }, 1957 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat, "v60,v62,v65" }, 1958 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat_128B, "v60,v62,v65" }, 1959 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat, "v60,v62,v65" }, 1960 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat_128B, "v60,v62,v65" }, 1961 { Hexagon::BI__builtin_HEXAGON_V6_vasrhv, "v60,v62,v65" }, 1962 { Hexagon::BI__builtin_HEXAGON_V6_vasrhv_128B, "v60,v62,v65" }, 1963 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat, "v65" }, 1964 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat_128B, "v65" }, 1965 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat, "v65" }, 1966 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat_128B, "v65" }, 1967 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat, "v62,v65" }, 1968 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat_128B, "v62,v65" }, 1969 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat, "v65" }, 1970 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat_128B, "v65" }, 1971 { Hexagon::BI__builtin_HEXAGON_V6_vasrw, "v60,v62,v65" }, 1972 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_128B, "v60,v62,v65" }, 1973 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc, "v60,v62,v65" }, 1974 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc_128B, "v60,v62,v65" }, 1975 { Hexagon::BI__builtin_HEXAGON_V6_vasrwh, "v60,v62,v65" }, 1976 { Hexagon::BI__builtin_HEXAGON_V6_vasrwh_128B, "v60,v62,v65" }, 1977 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat, "v60,v62,v65" }, 1978 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat_128B, "v60,v62,v65" }, 1979 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat, "v60,v62,v65" }, 1980 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat_128B, "v60,v62,v65" }, 1981 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat, "v62,v65" }, 1982 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat_128B, "v62,v65" }, 1983 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat, "v60,v62,v65" }, 1984 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat_128B, "v60,v62,v65" }, 1985 { Hexagon::BI__builtin_HEXAGON_V6_vasrwv, "v60,v62,v65" }, 1986 { Hexagon::BI__builtin_HEXAGON_V6_vasrwv_128B, "v60,v62,v65" }, 1987 { Hexagon::BI__builtin_HEXAGON_V6_vassign, "v60,v62,v65" }, 1988 { Hexagon::BI__builtin_HEXAGON_V6_vassign_128B, "v60,v62,v65" }, 1989 { Hexagon::BI__builtin_HEXAGON_V6_vassignp, "v60,v62,v65" }, 1990 { Hexagon::BI__builtin_HEXAGON_V6_vassignp_128B, "v60,v62,v65" }, 1991 { Hexagon::BI__builtin_HEXAGON_V6_vavgb, "v65" }, 1992 { Hexagon::BI__builtin_HEXAGON_V6_vavgb_128B, "v65" }, 1993 { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd, "v65" }, 1994 { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd_128B, "v65" }, 1995 { Hexagon::BI__builtin_HEXAGON_V6_vavgh, "v60,v62,v65" }, 1996 { Hexagon::BI__builtin_HEXAGON_V6_vavgh_128B, "v60,v62,v65" }, 1997 { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd, "v60,v62,v65" }, 1998 { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd_128B, "v60,v62,v65" }, 1999 { Hexagon::BI__builtin_HEXAGON_V6_vavgub, "v60,v62,v65" }, 2000 { Hexagon::BI__builtin_HEXAGON_V6_vavgub_128B, "v60,v62,v65" }, 2001 { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd, "v60,v62,v65" }, 2002 { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd_128B, "v60,v62,v65" }, 2003 { Hexagon::BI__builtin_HEXAGON_V6_vavguh, "v60,v62,v65" }, 2004 { Hexagon::BI__builtin_HEXAGON_V6_vavguh_128B, "v60,v62,v65" }, 2005 { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd, "v60,v62,v65" }, 2006 { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd_128B, "v60,v62,v65" }, 2007 { Hexagon::BI__builtin_HEXAGON_V6_vavguw, "v65" }, 2008 { Hexagon::BI__builtin_HEXAGON_V6_vavguw_128B, "v65" }, 2009 { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd, "v65" }, 2010 { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd_128B, "v65" }, 2011 { Hexagon::BI__builtin_HEXAGON_V6_vavgw, "v60,v62,v65" }, 2012 { Hexagon::BI__builtin_HEXAGON_V6_vavgw_128B, "v60,v62,v65" }, 2013 { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd, "v60,v62,v65" }, 2014 { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd_128B, "v60,v62,v65" }, 2015 { Hexagon::BI__builtin_HEXAGON_V6_vcl0h, "v60,v62,v65" }, 2016 { Hexagon::BI__builtin_HEXAGON_V6_vcl0h_128B, "v60,v62,v65" }, 2017 { Hexagon::BI__builtin_HEXAGON_V6_vcl0w, "v60,v62,v65" }, 2018 { Hexagon::BI__builtin_HEXAGON_V6_vcl0w_128B, "v60,v62,v65" }, 2019 { Hexagon::BI__builtin_HEXAGON_V6_vcombine, "v60,v62,v65" }, 2020 { Hexagon::BI__builtin_HEXAGON_V6_vcombine_128B, "v60,v62,v65" }, 2021 { Hexagon::BI__builtin_HEXAGON_V6_vd0, "v60,v62,v65" }, 2022 { Hexagon::BI__builtin_HEXAGON_V6_vd0_128B, "v60,v62,v65" }, 2023 { Hexagon::BI__builtin_HEXAGON_V6_vdd0, "v65" }, 2024 { Hexagon::BI__builtin_HEXAGON_V6_vdd0_128B, "v65" }, 2025 { Hexagon::BI__builtin_HEXAGON_V6_vdealb, "v60,v62,v65" }, 2026 { Hexagon::BI__builtin_HEXAGON_V6_vdealb_128B, "v60,v62,v65" }, 2027 { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w, "v60,v62,v65" }, 2028 { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w_128B, "v60,v62,v65" }, 2029 { Hexagon::BI__builtin_HEXAGON_V6_vdealh, "v60,v62,v65" }, 2030 { Hexagon::BI__builtin_HEXAGON_V6_vdealh_128B, "v60,v62,v65" }, 2031 { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd, "v60,v62,v65" }, 2032 { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd_128B, "v60,v62,v65" }, 2033 { Hexagon::BI__builtin_HEXAGON_V6_vdelta, "v60,v62,v65" }, 2034 { Hexagon::BI__builtin_HEXAGON_V6_vdelta_128B, "v60,v62,v65" }, 2035 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus, "v60,v62,v65" }, 2036 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_128B, "v60,v62,v65" }, 2037 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc, "v60,v62,v65" }, 2038 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc_128B, "v60,v62,v65" }, 2039 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv, "v60,v62,v65" }, 2040 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_128B, "v60,v62,v65" }, 2041 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc, "v60,v62,v65" }, 2042 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, "v60,v62,v65" }, 2043 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb, "v60,v62,v65" }, 2044 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_128B, "v60,v62,v65" }, 2045 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc, "v60,v62,v65" }, 2046 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc_128B, "v60,v62,v65" }, 2047 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv, "v60,v62,v65" }, 2048 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_128B, "v60,v62,v65" }, 2049 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc, "v60,v62,v65" }, 2050 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, "v60,v62,v65" }, 2051 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat, "v60,v62,v65" }, 2052 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_128B, "v60,v62,v65" }, 2053 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc, "v60,v62,v65" }, 2054 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, "v60,v62,v65" }, 2055 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat, "v60,v62,v65" }, 2056 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_128B, "v60,v62,v65" }, 2057 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc, "v60,v62,v65" }, 2058 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, "v60,v62,v65" }, 2059 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat, "v60,v62,v65" }, 2060 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_128B, "v60,v62,v65" }, 2061 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc, "v60,v62,v65" }, 2062 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, "v60,v62,v65" }, 2063 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat, "v60,v62,v65" }, 2064 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_128B, "v60,v62,v65" }, 2065 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc, "v60,v62,v65" }, 2066 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, "v60,v62,v65" }, 2067 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat, "v60,v62,v65" }, 2068 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_128B, "v60,v62,v65" }, 2069 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc, "v60,v62,v65" }, 2070 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, "v60,v62,v65" }, 2071 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh, "v60,v62,v65" }, 2072 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_128B, "v60,v62,v65" }, 2073 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc, "v60,v62,v65" }, 2074 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc_128B, "v60,v62,v65" }, 2075 { Hexagon::BI__builtin_HEXAGON_V6_veqb, "v60,v62,v65" }, 2076 { Hexagon::BI__builtin_HEXAGON_V6_veqb_128B, "v60,v62,v65" }, 2077 { Hexagon::BI__builtin_HEXAGON_V6_veqb_and, "v60,v62,v65" }, 2078 { Hexagon::BI__builtin_HEXAGON_V6_veqb_and_128B, "v60,v62,v65" }, 2079 { Hexagon::BI__builtin_HEXAGON_V6_veqb_or, "v60,v62,v65" }, 2080 { Hexagon::BI__builtin_HEXAGON_V6_veqb_or_128B, "v60,v62,v65" }, 2081 { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor, "v60,v62,v65" }, 2082 { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor_128B, "v60,v62,v65" }, 2083 { Hexagon::BI__builtin_HEXAGON_V6_veqh, "v60,v62,v65" }, 2084 { Hexagon::BI__builtin_HEXAGON_V6_veqh_128B, "v60,v62,v65" }, 2085 { Hexagon::BI__builtin_HEXAGON_V6_veqh_and, "v60,v62,v65" }, 2086 { Hexagon::BI__builtin_HEXAGON_V6_veqh_and_128B, "v60,v62,v65" }, 2087 { Hexagon::BI__builtin_HEXAGON_V6_veqh_or, "v60,v62,v65" }, 2088 { Hexagon::BI__builtin_HEXAGON_V6_veqh_or_128B, "v60,v62,v65" }, 2089 { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor, "v60,v62,v65" }, 2090 { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor_128B, "v60,v62,v65" }, 2091 { Hexagon::BI__builtin_HEXAGON_V6_veqw, "v60,v62,v65" }, 2092 { Hexagon::BI__builtin_HEXAGON_V6_veqw_128B, "v60,v62,v65" }, 2093 { Hexagon::BI__builtin_HEXAGON_V6_veqw_and, "v60,v62,v65" }, 2094 { Hexagon::BI__builtin_HEXAGON_V6_veqw_and_128B, "v60,v62,v65" }, 2095 { Hexagon::BI__builtin_HEXAGON_V6_veqw_or, "v60,v62,v65" }, 2096 { Hexagon::BI__builtin_HEXAGON_V6_veqw_or_128B, "v60,v62,v65" }, 2097 { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor, "v60,v62,v65" }, 2098 { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor_128B, "v60,v62,v65" }, 2099 { Hexagon::BI__builtin_HEXAGON_V6_vgtb, "v60,v62,v65" }, 2100 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_128B, "v60,v62,v65" }, 2101 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and, "v60,v62,v65" }, 2102 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and_128B, "v60,v62,v65" }, 2103 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or, "v60,v62,v65" }, 2104 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or_128B, "v60,v62,v65" }, 2105 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor, "v60,v62,v65" }, 2106 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor_128B, "v60,v62,v65" }, 2107 { Hexagon::BI__builtin_HEXAGON_V6_vgth, "v60,v62,v65" }, 2108 { Hexagon::BI__builtin_HEXAGON_V6_vgth_128B, "v60,v62,v65" }, 2109 { Hexagon::BI__builtin_HEXAGON_V6_vgth_and, "v60,v62,v65" }, 2110 { Hexagon::BI__builtin_HEXAGON_V6_vgth_and_128B, "v60,v62,v65" }, 2111 { Hexagon::BI__builtin_HEXAGON_V6_vgth_or, "v60,v62,v65" }, 2112 { Hexagon::BI__builtin_HEXAGON_V6_vgth_or_128B, "v60,v62,v65" }, 2113 { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor, "v60,v62,v65" }, 2114 { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor_128B, "v60,v62,v65" }, 2115 { Hexagon::BI__builtin_HEXAGON_V6_vgtub, "v60,v62,v65" }, 2116 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_128B, "v60,v62,v65" }, 2117 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and, "v60,v62,v65" }, 2118 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and_128B, "v60,v62,v65" }, 2119 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or, "v60,v62,v65" }, 2120 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or_128B, "v60,v62,v65" }, 2121 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor, "v60,v62,v65" }, 2122 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor_128B, "v60,v62,v65" }, 2123 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh, "v60,v62,v65" }, 2124 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_128B, "v60,v62,v65" }, 2125 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and, "v60,v62,v65" }, 2126 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and_128B, "v60,v62,v65" }, 2127 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or, "v60,v62,v65" }, 2128 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or_128B, "v60,v62,v65" }, 2129 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor, "v60,v62,v65" }, 2130 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor_128B, "v60,v62,v65" }, 2131 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw, "v60,v62,v65" }, 2132 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_128B, "v60,v62,v65" }, 2133 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and, "v60,v62,v65" }, 2134 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and_128B, "v60,v62,v65" }, 2135 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or, "v60,v62,v65" }, 2136 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or_128B, "v60,v62,v65" }, 2137 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor, "v60,v62,v65" }, 2138 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor_128B, "v60,v62,v65" }, 2139 { Hexagon::BI__builtin_HEXAGON_V6_vgtw, "v60,v62,v65" }, 2140 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_128B, "v60,v62,v65" }, 2141 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and, "v60,v62,v65" }, 2142 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and_128B, "v60,v62,v65" }, 2143 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or, "v60,v62,v65" }, 2144 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or_128B, "v60,v62,v65" }, 2145 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor, "v60,v62,v65" }, 2146 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor_128B, "v60,v62,v65" }, 2147 { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr, "v60,v62,v65" }, 2148 { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr_128B, "v60,v62,v65" }, 2149 { Hexagon::BI__builtin_HEXAGON_V6_vlalignb, "v60,v62,v65" }, 2150 { Hexagon::BI__builtin_HEXAGON_V6_vlalignb_128B, "v60,v62,v65" }, 2151 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, "v60,v62,v65" }, 2152 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, "v60,v62,v65" }, 2153 { Hexagon::BI__builtin_HEXAGON_V6_vlsrb, "v62,v65" }, 2154 { Hexagon::BI__builtin_HEXAGON_V6_vlsrb_128B, "v62,v65" }, 2155 { Hexagon::BI__builtin_HEXAGON_V6_vlsrh, "v60,v62,v65" }, 2156 { Hexagon::BI__builtin_HEXAGON_V6_vlsrh_128B, "v60,v62,v65" }, 2157 { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv, "v60,v62,v65" }, 2158 { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv_128B, "v60,v62,v65" }, 2159 { Hexagon::BI__builtin_HEXAGON_V6_vlsrw, "v60,v62,v65" }, 2160 { Hexagon::BI__builtin_HEXAGON_V6_vlsrw_128B, "v60,v62,v65" }, 2161 { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv, "v60,v62,v65" }, 2162 { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv_128B, "v60,v62,v65" }, 2163 { Hexagon::BI__builtin_HEXAGON_V6_vlut4, "v65" }, 2164 { Hexagon::BI__builtin_HEXAGON_V6_vlut4_128B, "v65" }, 2165 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb, "v60,v62,v65" }, 2166 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_128B, "v60,v62,v65" }, 2167 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, "v62,v65" }, 2168 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, "v62,v65" }, 2169 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm, "v62,v65" }, 2170 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm_128B, "v62,v65" }, 2171 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc, "v60,v62,v65" }, 2172 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc_128B, "v60,v62,v65" }, 2173 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, "v62,v65" }, 2174 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, "v62,v65" }, 2175 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh, "v60,v62,v65" }, 2176 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_128B, "v60,v62,v65" }, 2177 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, "v62,v65" }, 2178 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, "v62,v65" }, 2179 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm, "v62,v65" }, 2180 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm_128B, "v62,v65" }, 2181 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc, "v60,v62,v65" }, 2182 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc_128B, "v60,v62,v65" }, 2183 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, "v62,v65" }, 2184 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, "v62,v65" }, 2185 { Hexagon::BI__builtin_HEXAGON_V6_vmaxb, "v62,v65" }, 2186 { Hexagon::BI__builtin_HEXAGON_V6_vmaxb_128B, "v62,v65" }, 2187 { Hexagon::BI__builtin_HEXAGON_V6_vmaxh, "v60,v62,v65" }, 2188 { Hexagon::BI__builtin_HEXAGON_V6_vmaxh_128B, "v60,v62,v65" }, 2189 { Hexagon::BI__builtin_HEXAGON_V6_vmaxub, "v60,v62,v65" }, 2190 { Hexagon::BI__builtin_HEXAGON_V6_vmaxub_128B, "v60,v62,v65" }, 2191 { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh, "v60,v62,v65" }, 2192 { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh_128B, "v60,v62,v65" }, 2193 { Hexagon::BI__builtin_HEXAGON_V6_vmaxw, "v60,v62,v65" }, 2194 { Hexagon::BI__builtin_HEXAGON_V6_vmaxw_128B, "v60,v62,v65" }, 2195 { Hexagon::BI__builtin_HEXAGON_V6_vminb, "v62,v65" }, 2196 { Hexagon::BI__builtin_HEXAGON_V6_vminb_128B, "v62,v65" }, 2197 { Hexagon::BI__builtin_HEXAGON_V6_vminh, "v60,v62,v65" }, 2198 { Hexagon::BI__builtin_HEXAGON_V6_vminh_128B, "v60,v62,v65" }, 2199 { Hexagon::BI__builtin_HEXAGON_V6_vminub, "v60,v62,v65" }, 2200 { Hexagon::BI__builtin_HEXAGON_V6_vminub_128B, "v60,v62,v65" }, 2201 { Hexagon::BI__builtin_HEXAGON_V6_vminuh, "v60,v62,v65" }, 2202 { Hexagon::BI__builtin_HEXAGON_V6_vminuh_128B, "v60,v62,v65" }, 2203 { Hexagon::BI__builtin_HEXAGON_V6_vminw, "v60,v62,v65" }, 2204 { Hexagon::BI__builtin_HEXAGON_V6_vminw_128B, "v60,v62,v65" }, 2205 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus, "v60,v62,v65" }, 2206 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_128B, "v60,v62,v65" }, 2207 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc, "v60,v62,v65" }, 2208 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc_128B, "v60,v62,v65" }, 2209 { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv, "v60,v62,v65" }, 2210 { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv_128B, "v60,v62,v65" }, 2211 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu, "v65" }, 2212 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_128B, "v65" }, 2213 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc, "v65" }, 2214 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc_128B, "v65" }, 2215 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv, "v60,v62,v65" }, 2216 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv_128B, "v60,v62,v65" }, 2217 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb, "v60,v62,v65" }, 2218 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_128B, "v60,v62,v65" }, 2219 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc, "v60,v62,v65" }, 2220 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc_128B, "v60,v62,v65" }, 2221 { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat, "v65" }, 2222 { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat_128B, "v65" }, 2223 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb, "v62,v65" }, 2224 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_128B, "v62,v65" }, 2225 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc, "v62,v65" }, 2226 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc_128B, "v62,v65" }, 2227 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat, "v65" }, 2228 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat_128B, "v65" }, 2229 { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat, "v65" }, 2230 { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat_128B, "v65" }, 2231 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus, "v60,v62,v65" }, 2232 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_128B, "v60,v62,v65" }, 2233 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc, "v60,v62,v65" }, 2234 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc_128B, "v60,v62,v65" }, 2235 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv, "v60,v62,v65" }, 2236 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_128B, "v60,v62,v65" }, 2237 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc, "v60,v62,v65" }, 2238 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc_128B, "v60,v62,v65" }, 2239 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv, "v60,v62,v65" }, 2240 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_128B, "v60,v62,v65" }, 2241 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc, "v60,v62,v65" }, 2242 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc_128B, "v60,v62,v65" }, 2243 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh, "v60,v62,v65" }, 2244 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_128B, "v60,v62,v65" }, 2245 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64, "v62,v65" }, 2246 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64_128B, "v62,v65" }, 2247 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh, "v60,v62,v65" }, 2248 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_128B, "v60,v62,v65" }, 2249 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc, "v65" }, 2250 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc_128B, "v65" }, 2251 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc, "v60,v62,v65" }, 2252 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc_128B, "v60,v62,v65" }, 2253 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs, "v60,v62,v65" }, 2254 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs_128B, "v60,v62,v65" }, 2255 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss, "v60,v62,v65" }, 2256 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss_128B, "v60,v62,v65" }, 2257 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus, "v60,v62,v65" }, 2258 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_128B, "v60,v62,v65" }, 2259 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc, "v60,v62,v65" }, 2260 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc_128B, "v60,v62,v65" }, 2261 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv, "v60,v62,v65" }, 2262 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_128B, "v60,v62,v65" }, 2263 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc, "v60,v62,v65" }, 2264 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc_128B, "v60,v62,v65" }, 2265 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs, "v60,v62,v65" }, 2266 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs_128B, "v60,v62,v65" }, 2267 { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh, "v60,v62,v65" }, 2268 { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh_128B, "v60,v62,v65" }, 2269 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc, "v60,v62,v65" }, 2270 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc_128B, "v60,v62,v65" }, 2271 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh, "v60,v62,v65" }, 2272 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_128B, "v60,v62,v65" }, 2273 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc, "v60,v62,v65" }, 2274 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, "v60,v62,v65" }, 2275 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih, "v60,v62,v65" }, 2276 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_128B, "v60,v62,v65" }, 2277 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc, "v60,v62,v65" }, 2278 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc_128B, "v60,v62,v65" }, 2279 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb, "v60,v62,v65" }, 2280 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_128B, "v60,v62,v65" }, 2281 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc, "v60,v62,v65" }, 2282 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc_128B, "v60,v62,v65" }, 2283 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh, "v60,v62,v65" }, 2284 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh_128B, "v60,v62,v65" }, 2285 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb, "v60,v62,v65" }, 2286 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_128B, "v60,v62,v65" }, 2287 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc, "v60,v62,v65" }, 2288 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc_128B, "v60,v62,v65" }, 2289 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh, "v60,v62,v65" }, 2290 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_128B, "v60,v62,v65" }, 2291 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc, "v60,v62,v65" }, 2292 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc_128B, "v60,v62,v65" }, 2293 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub, "v62,v65" }, 2294 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_128B, "v62,v65" }, 2295 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc, "v62,v65" }, 2296 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc_128B, "v62,v65" }, 2297 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh, "v60,v62,v65" }, 2298 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_128B, "v60,v62,v65" }, 2299 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc, "v62,v65" }, 2300 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, "v62,v65" }, 2301 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd, "v60,v62,v65" }, 2302 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_128B, "v60,v62,v65" }, 2303 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, "v60,v62,v65" }, 2304 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, "v60,v62,v65" }, 2305 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc, "v60,v62,v65" }, 2306 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc_128B, "v60,v62,v65" }, 2307 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub, "v60,v62,v65" }, 2308 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_128B, "v60,v62,v65" }, 2309 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc, "v60,v62,v65" }, 2310 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc_128B, "v60,v62,v65" }, 2311 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv, "v60,v62,v65" }, 2312 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_128B, "v60,v62,v65" }, 2313 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc, "v60,v62,v65" }, 2314 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc_128B, "v60,v62,v65" }, 2315 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh, "v60,v62,v65" }, 2316 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_128B, "v60,v62,v65" }, 2317 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc, "v60,v62,v65" }, 2318 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc_128B, "v60,v62,v65" }, 2319 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe, "v65" }, 2320 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_128B, "v65" }, 2321 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc, "v65" }, 2322 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc_128B, "v65" }, 2323 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv, "v60,v62,v65" }, 2324 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_128B, "v60,v62,v65" }, 2325 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc, "v60,v62,v65" }, 2326 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc_128B, "v60,v62,v65" }, 2327 { Hexagon::BI__builtin_HEXAGON_V6_vmux, "v60,v62,v65" }, 2328 { Hexagon::BI__builtin_HEXAGON_V6_vmux_128B, "v60,v62,v65" }, 2329 { Hexagon::BI__builtin_HEXAGON_V6_vnavgb, "v65" }, 2330 { Hexagon::BI__builtin_HEXAGON_V6_vnavgb_128B, "v65" }, 2331 { Hexagon::BI__builtin_HEXAGON_V6_vnavgh, "v60,v62,v65" }, 2332 { Hexagon::BI__builtin_HEXAGON_V6_vnavgh_128B, "v60,v62,v65" }, 2333 { Hexagon::BI__builtin_HEXAGON_V6_vnavgub, "v60,v62,v65" }, 2334 { Hexagon::BI__builtin_HEXAGON_V6_vnavgub_128B, "v60,v62,v65" }, 2335 { Hexagon::BI__builtin_HEXAGON_V6_vnavgw, "v60,v62,v65" }, 2336 { Hexagon::BI__builtin_HEXAGON_V6_vnavgw_128B, "v60,v62,v65" }, 2337 { Hexagon::BI__builtin_HEXAGON_V6_vnormamth, "v60,v62,v65" }, 2338 { Hexagon::BI__builtin_HEXAGON_V6_vnormamth_128B, "v60,v62,v65" }, 2339 { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw, "v60,v62,v65" }, 2340 { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw_128B, "v60,v62,v65" }, 2341 { Hexagon::BI__builtin_HEXAGON_V6_vnot, "v60,v62,v65" }, 2342 { Hexagon::BI__builtin_HEXAGON_V6_vnot_128B, "v60,v62,v65" }, 2343 { Hexagon::BI__builtin_HEXAGON_V6_vor, "v60,v62,v65" }, 2344 { Hexagon::BI__builtin_HEXAGON_V6_vor_128B, "v60,v62,v65" }, 2345 { Hexagon::BI__builtin_HEXAGON_V6_vpackeb, "v60,v62,v65" }, 2346 { Hexagon::BI__builtin_HEXAGON_V6_vpackeb_128B, "v60,v62,v65" }, 2347 { Hexagon::BI__builtin_HEXAGON_V6_vpackeh, "v60,v62,v65" }, 2348 { Hexagon::BI__builtin_HEXAGON_V6_vpackeh_128B, "v60,v62,v65" }, 2349 { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat, "v60,v62,v65" }, 2350 { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat_128B, "v60,v62,v65" }, 2351 { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat, "v60,v62,v65" }, 2352 { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat_128B, "v60,v62,v65" }, 2353 { Hexagon::BI__builtin_HEXAGON_V6_vpackob, "v60,v62,v65" }, 2354 { Hexagon::BI__builtin_HEXAGON_V6_vpackob_128B, "v60,v62,v65" }, 2355 { Hexagon::BI__builtin_HEXAGON_V6_vpackoh, "v60,v62,v65" }, 2356 { Hexagon::BI__builtin_HEXAGON_V6_vpackoh_128B, "v60,v62,v65" }, 2357 { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat, "v60,v62,v65" }, 2358 { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat_128B, "v60,v62,v65" }, 2359 { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat, "v60,v62,v65" }, 2360 { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat_128B, "v60,v62,v65" }, 2361 { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth, "v60,v62,v65" }, 2362 { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth_128B, "v60,v62,v65" }, 2363 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb, "v65" }, 2364 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb_128B, "v65" }, 2365 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh, "v65" }, 2366 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh_128B, "v65" }, 2367 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw, "v65" }, 2368 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw_128B, "v65" }, 2369 { Hexagon::BI__builtin_HEXAGON_V6_vrdelta, "v60,v62,v65" }, 2370 { Hexagon::BI__builtin_HEXAGON_V6_vrdelta_128B, "v60,v62,v65" }, 2371 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt, "v65" }, 2372 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_128B, "v65" }, 2373 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc, "v65" }, 2374 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B, "v65" }, 2375 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus, "v60,v62,v65" }, 2376 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_128B, "v60,v62,v65" }, 2377 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc, "v60,v62,v65" }, 2378 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc_128B, "v60,v62,v65" }, 2379 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, "v60,v62,v65" }, 2380 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, "v60,v62,v65" }, 2381 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, "v60,v62,v65" }, 2382 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, "v60,v62,v65" }, 2383 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv, "v60,v62,v65" }, 2384 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_128B, "v60,v62,v65" }, 2385 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc, "v60,v62,v65" }, 2386 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc_128B, "v60,v62,v65" }, 2387 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv, "v60,v62,v65" }, 2388 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_128B, "v60,v62,v65" }, 2389 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc, "v60,v62,v65" }, 2390 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc_128B, "v60,v62,v65" }, 2391 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub, "v60,v62,v65" }, 2392 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_128B, "v60,v62,v65" }, 2393 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc, "v60,v62,v65" }, 2394 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc_128B, "v60,v62,v65" }, 2395 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, "v60,v62,v65" }, 2396 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, "v60,v62,v65" }, 2397 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, "v60,v62,v65" }, 2398 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, "v60,v62,v65" }, 2399 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt, "v65" }, 2400 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_128B, "v65" }, 2401 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc, "v65" }, 2402 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B, "v65" }, 2403 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv, "v60,v62,v65" }, 2404 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_128B, "v60,v62,v65" }, 2405 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc, "v60,v62,v65" }, 2406 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc_128B, "v60,v62,v65" }, 2407 { Hexagon::BI__builtin_HEXAGON_V6_vror, "v60,v62,v65" }, 2408 { Hexagon::BI__builtin_HEXAGON_V6_vror_128B, "v60,v62,v65" }, 2409 { Hexagon::BI__builtin_HEXAGON_V6_vroundhb, "v60,v62,v65" }, 2410 { Hexagon::BI__builtin_HEXAGON_V6_vroundhb_128B, "v60,v62,v65" }, 2411 { Hexagon::BI__builtin_HEXAGON_V6_vroundhub, "v60,v62,v65" }, 2412 { Hexagon::BI__builtin_HEXAGON_V6_vroundhub_128B, "v60,v62,v65" }, 2413 { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub, "v62,v65" }, 2414 { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub_128B, "v62,v65" }, 2415 { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh, "v62,v65" }, 2416 { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh_128B, "v62,v65" }, 2417 { Hexagon::BI__builtin_HEXAGON_V6_vroundwh, "v60,v62,v65" }, 2418 { Hexagon::BI__builtin_HEXAGON_V6_vroundwh_128B, "v60,v62,v65" }, 2419 { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh, "v60,v62,v65" }, 2420 { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh_128B, "v60,v62,v65" }, 2421 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, "v60,v62,v65" }, 2422 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, "v60,v62,v65" }, 2423 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, "v60,v62,v65" }, 2424 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, "v60,v62,v65" }, 2425 { Hexagon::BI__builtin_HEXAGON_V6_vsathub, "v60,v62,v65" }, 2426 { Hexagon::BI__builtin_HEXAGON_V6_vsathub_128B, "v60,v62,v65" }, 2427 { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh, "v62,v65" }, 2428 { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh_128B, "v62,v65" }, 2429 { Hexagon::BI__builtin_HEXAGON_V6_vsatwh, "v60,v62,v65" }, 2430 { Hexagon::BI__builtin_HEXAGON_V6_vsatwh_128B, "v60,v62,v65" }, 2431 { Hexagon::BI__builtin_HEXAGON_V6_vsb, "v60,v62,v65" }, 2432 { Hexagon::BI__builtin_HEXAGON_V6_vsb_128B, "v60,v62,v65" }, 2433 { Hexagon::BI__builtin_HEXAGON_V6_vsh, "v60,v62,v65" }, 2434 { Hexagon::BI__builtin_HEXAGON_V6_vsh_128B, "v60,v62,v65" }, 2435 { Hexagon::BI__builtin_HEXAGON_V6_vshufeh, "v60,v62,v65" }, 2436 { Hexagon::BI__builtin_HEXAGON_V6_vshufeh_128B, "v60,v62,v65" }, 2437 { Hexagon::BI__builtin_HEXAGON_V6_vshuffb, "v60,v62,v65" }, 2438 { Hexagon::BI__builtin_HEXAGON_V6_vshuffb_128B, "v60,v62,v65" }, 2439 { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb, "v60,v62,v65" }, 2440 { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb_128B, "v60,v62,v65" }, 2441 { Hexagon::BI__builtin_HEXAGON_V6_vshuffh, "v60,v62,v65" }, 2442 { Hexagon::BI__builtin_HEXAGON_V6_vshuffh_128B, "v60,v62,v65" }, 2443 { Hexagon::BI__builtin_HEXAGON_V6_vshuffob, "v60,v62,v65" }, 2444 { Hexagon::BI__builtin_HEXAGON_V6_vshuffob_128B, "v60,v62,v65" }, 2445 { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd, "v60,v62,v65" }, 2446 { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd_128B, "v60,v62,v65" }, 2447 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb, "v60,v62,v65" }, 2448 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb_128B, "v60,v62,v65" }, 2449 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh, "v60,v62,v65" }, 2450 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh_128B, "v60,v62,v65" }, 2451 { Hexagon::BI__builtin_HEXAGON_V6_vshufoh, "v60,v62,v65" }, 2452 { Hexagon::BI__builtin_HEXAGON_V6_vshufoh_128B, "v60,v62,v65" }, 2453 { Hexagon::BI__builtin_HEXAGON_V6_vsubb, "v60,v62,v65" }, 2454 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_128B, "v60,v62,v65" }, 2455 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv, "v60,v62,v65" }, 2456 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv_128B, "v60,v62,v65" }, 2457 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat, "v62,v65" }, 2458 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_128B, "v62,v65" }, 2459 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv, "v62,v65" }, 2460 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv_128B, "v62,v65" }, 2461 { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry, "v62,v65" }, 2462 { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B, "v62,v65" }, 2463 { Hexagon::BI__builtin_HEXAGON_V6_vsubh, "v60,v62,v65" }, 2464 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_128B, "v60,v62,v65" }, 2465 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv, "v60,v62,v65" }, 2466 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv_128B, "v60,v62,v65" }, 2467 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat, "v60,v62,v65" }, 2468 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_128B, "v60,v62,v65" }, 2469 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv, "v60,v62,v65" }, 2470 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv_128B, "v60,v62,v65" }, 2471 { Hexagon::BI__builtin_HEXAGON_V6_vsubhw, "v60,v62,v65" }, 2472 { Hexagon::BI__builtin_HEXAGON_V6_vsubhw_128B, "v60,v62,v65" }, 2473 { Hexagon::BI__builtin_HEXAGON_V6_vsububh, "v60,v62,v65" }, 2474 { Hexagon::BI__builtin_HEXAGON_V6_vsububh_128B, "v60,v62,v65" }, 2475 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat, "v60,v62,v65" }, 2476 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_128B, "v60,v62,v65" }, 2477 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv, "v60,v62,v65" }, 2478 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv_128B, "v60,v62,v65" }, 2479 { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat, "v62,v65" }, 2480 { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat_128B, "v62,v65" }, 2481 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat, "v60,v62,v65" }, 2482 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_128B, "v60,v62,v65" }, 2483 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv, "v60,v62,v65" }, 2484 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv_128B, "v60,v62,v65" }, 2485 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw, "v60,v62,v65" }, 2486 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw_128B, "v60,v62,v65" }, 2487 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat, "v62,v65" }, 2488 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_128B, "v62,v65" }, 2489 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv, "v62,v65" }, 2490 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv_128B, "v62,v65" }, 2491 { Hexagon::BI__builtin_HEXAGON_V6_vsubw, "v60,v62,v65" }, 2492 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_128B, "v60,v62,v65" }, 2493 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv, "v60,v62,v65" }, 2494 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv_128B, "v60,v62,v65" }, 2495 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat, "v60,v62,v65" }, 2496 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_128B, "v60,v62,v65" }, 2497 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv, "v60,v62,v65" }, 2498 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv_128B, "v60,v62,v65" }, 2499 { Hexagon::BI__builtin_HEXAGON_V6_vswap, "v60,v62,v65" }, 2500 { Hexagon::BI__builtin_HEXAGON_V6_vswap_128B, "v60,v62,v65" }, 2501 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb, "v60,v62,v65" }, 2502 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_128B, "v60,v62,v65" }, 2503 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc, "v60,v62,v65" }, 2504 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc_128B, "v60,v62,v65" }, 2505 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus, "v60,v62,v65" }, 2506 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_128B, "v60,v62,v65" }, 2507 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc, "v60,v62,v65" }, 2508 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc_128B, "v60,v62,v65" }, 2509 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb, "v60,v62,v65" }, 2510 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_128B, "v60,v62,v65" }, 2511 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc, "v60,v62,v65" }, 2512 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc_128B, "v60,v62,v65" }, 2513 { Hexagon::BI__builtin_HEXAGON_V6_vunpackb, "v60,v62,v65" }, 2514 { Hexagon::BI__builtin_HEXAGON_V6_vunpackb_128B, "v60,v62,v65" }, 2515 { Hexagon::BI__builtin_HEXAGON_V6_vunpackh, "v60,v62,v65" }, 2516 { Hexagon::BI__builtin_HEXAGON_V6_vunpackh_128B, "v60,v62,v65" }, 2517 { Hexagon::BI__builtin_HEXAGON_V6_vunpackob, "v60,v62,v65" }, 2518 { Hexagon::BI__builtin_HEXAGON_V6_vunpackob_128B, "v60,v62,v65" }, 2519 { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh, "v60,v62,v65" }, 2520 { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh_128B, "v60,v62,v65" }, 2521 { Hexagon::BI__builtin_HEXAGON_V6_vunpackub, "v60,v62,v65" }, 2522 { Hexagon::BI__builtin_HEXAGON_V6_vunpackub_128B, "v60,v62,v65" }, 2523 { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh, "v60,v62,v65" }, 2524 { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh_128B, "v60,v62,v65" }, 2525 { Hexagon::BI__builtin_HEXAGON_V6_vxor, "v60,v62,v65" }, 2526 { Hexagon::BI__builtin_HEXAGON_V6_vxor_128B, "v60,v62,v65" }, 2527 { Hexagon::BI__builtin_HEXAGON_V6_vzb, "v60,v62,v65" }, 2528 { Hexagon::BI__builtin_HEXAGON_V6_vzb_128B, "v60,v62,v65" }, 2529 { Hexagon::BI__builtin_HEXAGON_V6_vzh, "v60,v62,v65" }, 2530 { Hexagon::BI__builtin_HEXAGON_V6_vzh_128B, "v60,v62,v65" }, 2531 }; 2532 2533 // Sort the tables on first execution so we can binary search them. 2534 auto SortCmp = [](const BuiltinAndString &LHS, const BuiltinAndString &RHS) { 2535 return LHS.BuiltinID < RHS.BuiltinID; 2536 }; 2537 static const bool SortOnce = 2538 (std::sort(std::begin(ValidCPU), std::end(ValidCPU), SortCmp), 2539 std::sort(std::begin(ValidHVX), std::end(ValidHVX), SortCmp), true); 2540 (void)SortOnce; 2541 auto LowerBoundCmp = [](const BuiltinAndString &BI, unsigned BuiltinID) { 2542 return BI.BuiltinID < BuiltinID; 2543 }; 2544 2545 const TargetInfo &TI = Context.getTargetInfo(); 2546 2547 const BuiltinAndString *FC = 2548 std::lower_bound(std::begin(ValidCPU), std::end(ValidCPU), BuiltinID, 2549 LowerBoundCmp); 2550 if (FC != std::end(ValidCPU) && FC->BuiltinID == BuiltinID) { 2551 const TargetOptions &Opts = TI.getTargetOpts(); 2552 StringRef CPU = Opts.CPU; 2553 if (!CPU.empty()) { 2554 assert(CPU.startswith("hexagon") && "Unexpected CPU name"); 2555 CPU.consume_front("hexagon"); 2556 SmallVector<StringRef, 3> CPUs; 2557 StringRef(FC->Str).split(CPUs, ','); 2558 if (llvm::none_of(CPUs, [CPU](StringRef S) { return S == CPU; })) 2559 return Diag(TheCall->getBeginLoc(), 2560 diag::err_hexagon_builtin_unsupported_cpu); 2561 } 2562 } 2563 2564 const BuiltinAndString *FH = 2565 std::lower_bound(std::begin(ValidHVX), std::end(ValidHVX), BuiltinID, 2566 LowerBoundCmp); 2567 if (FH != std::end(ValidHVX) && FH->BuiltinID == BuiltinID) { 2568 if (!TI.hasFeature("hvx")) 2569 return Diag(TheCall->getBeginLoc(), 2570 diag::err_hexagon_builtin_requires_hvx); 2571 2572 SmallVector<StringRef, 3> HVXs; 2573 StringRef(FH->Str).split(HVXs, ','); 2574 bool IsValid = llvm::any_of(HVXs, 2575 [&TI] (StringRef V) { 2576 std::string F = "hvx" + V.str(); 2577 return TI.hasFeature(F); 2578 }); 2579 if (!IsValid) 2580 return Diag(TheCall->getBeginLoc(), 2581 diag::err_hexagon_builtin_unsupported_hvx); 2582 } 2583 2584 return false; 2585 } 2586 2587 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2588 struct ArgInfo { 2589 uint8_t OpNum; 2590 bool IsSigned; 2591 uint8_t BitWidth; 2592 uint8_t Align; 2593 }; 2594 struct BuiltinInfo { 2595 unsigned BuiltinID; 2596 ArgInfo Infos[2]; 2597 }; 2598 2599 static BuiltinInfo Infos[] = { 2600 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 2601 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 2602 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 2603 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 0 }} }, 2604 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 2605 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 2606 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 2607 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 2608 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 2609 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 2610 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 2611 2612 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 2613 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 2614 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 2615 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 2616 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 2617 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 2618 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 2619 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 2620 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 2621 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 2622 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 2623 2624 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 2625 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 2626 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 2627 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 2628 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 2629 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 2630 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 2631 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 2632 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 2633 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 2634 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 2635 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 2636 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 2637 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 2638 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 2639 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 2640 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 2641 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 2642 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 2643 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 2644 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 2645 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 2646 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 2647 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 2648 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 2649 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 2650 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 2651 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 2652 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 2653 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 2654 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 2655 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 2656 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 2657 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 2658 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 2659 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 2660 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 2661 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 2662 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 2663 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 2664 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 2665 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 2666 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 2667 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 2668 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 2669 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 2670 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 2671 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 2672 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 2673 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 2674 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 2675 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 2676 {{ 1, false, 6, 0 }} }, 2677 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 2678 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 2679 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 2680 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 2681 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 2682 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 2683 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 2684 {{ 1, false, 5, 0 }} }, 2685 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 2686 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 2687 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 2688 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 2689 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 2690 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 2691 { 2, false, 5, 0 }} }, 2692 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 2693 { 2, false, 6, 0 }} }, 2694 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 2695 { 3, false, 5, 0 }} }, 2696 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 2697 { 3, false, 6, 0 }} }, 2698 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 2699 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 2700 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 2701 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 2702 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 2703 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 2704 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 2705 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 2706 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 2707 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 2708 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 2709 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 2710 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 2711 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 2712 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 2713 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 2714 {{ 2, false, 4, 0 }, 2715 { 3, false, 5, 0 }} }, 2716 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 2717 {{ 2, false, 4, 0 }, 2718 { 3, false, 5, 0 }} }, 2719 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 2720 {{ 2, false, 4, 0 }, 2721 { 3, false, 5, 0 }} }, 2722 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 2723 {{ 2, false, 4, 0 }, 2724 { 3, false, 5, 0 }} }, 2725 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 2726 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 2727 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 2728 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 2729 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 2730 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 2731 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 2732 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 2733 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 2734 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 2735 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 2736 { 2, false, 5, 0 }} }, 2737 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 2738 { 2, false, 6, 0 }} }, 2739 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 2740 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 2741 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 2742 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 2743 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 2744 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 2745 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 2746 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 2747 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 2748 {{ 1, false, 4, 0 }} }, 2749 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 2750 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 2751 {{ 1, false, 4, 0 }} }, 2752 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 2753 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 2754 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 2755 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 2756 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 2757 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 2758 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 2759 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 2760 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 2761 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 2762 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 2763 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 2764 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 2765 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 2766 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 2767 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 2768 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 2769 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 2770 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 2771 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 2772 {{ 3, false, 1, 0 }} }, 2773 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 2774 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 2775 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 2776 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 2777 {{ 3, false, 1, 0 }} }, 2778 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 2779 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 2780 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 2781 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 2782 {{ 3, false, 1, 0 }} }, 2783 }; 2784 2785 // Use a dynamically initialized static to sort the table exactly once on 2786 // first run. 2787 static const bool SortOnce = 2788 (std::sort(std::begin(Infos), std::end(Infos), 2789 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 2790 return LHS.BuiltinID < RHS.BuiltinID; 2791 }), 2792 true); 2793 (void)SortOnce; 2794 2795 const BuiltinInfo *F = 2796 std::lower_bound(std::begin(Infos), std::end(Infos), BuiltinID, 2797 [](const BuiltinInfo &BI, unsigned BuiltinID) { 2798 return BI.BuiltinID < BuiltinID; 2799 }); 2800 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 2801 return false; 2802 2803 bool Error = false; 2804 2805 for (const ArgInfo &A : F->Infos) { 2806 // Ignore empty ArgInfo elements. 2807 if (A.BitWidth == 0) 2808 continue; 2809 2810 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 2811 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 2812 if (!A.Align) { 2813 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 2814 } else { 2815 unsigned M = 1 << A.Align; 2816 Min *= M; 2817 Max *= M; 2818 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) | 2819 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 2820 } 2821 } 2822 return Error; 2823 } 2824 2825 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 2826 CallExpr *TheCall) { 2827 return CheckHexagonBuiltinCpu(BuiltinID, TheCall) || 2828 CheckHexagonBuiltinArgument(BuiltinID, TheCall); 2829 } 2830 2831 2832 // CheckMipsBuiltinFunctionCall - Checks the constant value passed to the 2833 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 2834 // ordering for DSP is unspecified. MSA is ordered by the data format used 2835 // by the underlying instruction i.e., df/m, df/n and then by size. 2836 // 2837 // FIXME: The size tests here should instead be tablegen'd along with the 2838 // definitions from include/clang/Basic/BuiltinsMips.def. 2839 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 2840 // be too. 2841 bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2842 unsigned i = 0, l = 0, u = 0, m = 0; 2843 switch (BuiltinID) { 2844 default: return false; 2845 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 2846 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 2847 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 2848 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 2849 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 2850 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 2851 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 2852 // MSA instrinsics. Instructions (which the intrinsics maps to) which use the 2853 // df/m field. 2854 // These intrinsics take an unsigned 3 bit immediate. 2855 case Mips::BI__builtin_msa_bclri_b: 2856 case Mips::BI__builtin_msa_bnegi_b: 2857 case Mips::BI__builtin_msa_bseti_b: 2858 case Mips::BI__builtin_msa_sat_s_b: 2859 case Mips::BI__builtin_msa_sat_u_b: 2860 case Mips::BI__builtin_msa_slli_b: 2861 case Mips::BI__builtin_msa_srai_b: 2862 case Mips::BI__builtin_msa_srari_b: 2863 case Mips::BI__builtin_msa_srli_b: 2864 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 2865 case Mips::BI__builtin_msa_binsli_b: 2866 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 2867 // These intrinsics take an unsigned 4 bit immediate. 2868 case Mips::BI__builtin_msa_bclri_h: 2869 case Mips::BI__builtin_msa_bnegi_h: 2870 case Mips::BI__builtin_msa_bseti_h: 2871 case Mips::BI__builtin_msa_sat_s_h: 2872 case Mips::BI__builtin_msa_sat_u_h: 2873 case Mips::BI__builtin_msa_slli_h: 2874 case Mips::BI__builtin_msa_srai_h: 2875 case Mips::BI__builtin_msa_srari_h: 2876 case Mips::BI__builtin_msa_srli_h: 2877 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 2878 case Mips::BI__builtin_msa_binsli_h: 2879 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 2880 // These intrinsics take an unsigned 5 bit immediate. 2881 // The first block of intrinsics actually have an unsigned 5 bit field, 2882 // not a df/n field. 2883 case Mips::BI__builtin_msa_clei_u_b: 2884 case Mips::BI__builtin_msa_clei_u_h: 2885 case Mips::BI__builtin_msa_clei_u_w: 2886 case Mips::BI__builtin_msa_clei_u_d: 2887 case Mips::BI__builtin_msa_clti_u_b: 2888 case Mips::BI__builtin_msa_clti_u_h: 2889 case Mips::BI__builtin_msa_clti_u_w: 2890 case Mips::BI__builtin_msa_clti_u_d: 2891 case Mips::BI__builtin_msa_maxi_u_b: 2892 case Mips::BI__builtin_msa_maxi_u_h: 2893 case Mips::BI__builtin_msa_maxi_u_w: 2894 case Mips::BI__builtin_msa_maxi_u_d: 2895 case Mips::BI__builtin_msa_mini_u_b: 2896 case Mips::BI__builtin_msa_mini_u_h: 2897 case Mips::BI__builtin_msa_mini_u_w: 2898 case Mips::BI__builtin_msa_mini_u_d: 2899 case Mips::BI__builtin_msa_addvi_b: 2900 case Mips::BI__builtin_msa_addvi_h: 2901 case Mips::BI__builtin_msa_addvi_w: 2902 case Mips::BI__builtin_msa_addvi_d: 2903 case Mips::BI__builtin_msa_bclri_w: 2904 case Mips::BI__builtin_msa_bnegi_w: 2905 case Mips::BI__builtin_msa_bseti_w: 2906 case Mips::BI__builtin_msa_sat_s_w: 2907 case Mips::BI__builtin_msa_sat_u_w: 2908 case Mips::BI__builtin_msa_slli_w: 2909 case Mips::BI__builtin_msa_srai_w: 2910 case Mips::BI__builtin_msa_srari_w: 2911 case Mips::BI__builtin_msa_srli_w: 2912 case Mips::BI__builtin_msa_srlri_w: 2913 case Mips::BI__builtin_msa_subvi_b: 2914 case Mips::BI__builtin_msa_subvi_h: 2915 case Mips::BI__builtin_msa_subvi_w: 2916 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 2917 case Mips::BI__builtin_msa_binsli_w: 2918 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 2919 // These intrinsics take an unsigned 6 bit immediate. 2920 case Mips::BI__builtin_msa_bclri_d: 2921 case Mips::BI__builtin_msa_bnegi_d: 2922 case Mips::BI__builtin_msa_bseti_d: 2923 case Mips::BI__builtin_msa_sat_s_d: 2924 case Mips::BI__builtin_msa_sat_u_d: 2925 case Mips::BI__builtin_msa_slli_d: 2926 case Mips::BI__builtin_msa_srai_d: 2927 case Mips::BI__builtin_msa_srari_d: 2928 case Mips::BI__builtin_msa_srli_d: 2929 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 2930 case Mips::BI__builtin_msa_binsli_d: 2931 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 2932 // These intrinsics take a signed 5 bit immediate. 2933 case Mips::BI__builtin_msa_ceqi_b: 2934 case Mips::BI__builtin_msa_ceqi_h: 2935 case Mips::BI__builtin_msa_ceqi_w: 2936 case Mips::BI__builtin_msa_ceqi_d: 2937 case Mips::BI__builtin_msa_clti_s_b: 2938 case Mips::BI__builtin_msa_clti_s_h: 2939 case Mips::BI__builtin_msa_clti_s_w: 2940 case Mips::BI__builtin_msa_clti_s_d: 2941 case Mips::BI__builtin_msa_clei_s_b: 2942 case Mips::BI__builtin_msa_clei_s_h: 2943 case Mips::BI__builtin_msa_clei_s_w: 2944 case Mips::BI__builtin_msa_clei_s_d: 2945 case Mips::BI__builtin_msa_maxi_s_b: 2946 case Mips::BI__builtin_msa_maxi_s_h: 2947 case Mips::BI__builtin_msa_maxi_s_w: 2948 case Mips::BI__builtin_msa_maxi_s_d: 2949 case Mips::BI__builtin_msa_mini_s_b: 2950 case Mips::BI__builtin_msa_mini_s_h: 2951 case Mips::BI__builtin_msa_mini_s_w: 2952 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 2953 // These intrinsics take an unsigned 8 bit immediate. 2954 case Mips::BI__builtin_msa_andi_b: 2955 case Mips::BI__builtin_msa_nori_b: 2956 case Mips::BI__builtin_msa_ori_b: 2957 case Mips::BI__builtin_msa_shf_b: 2958 case Mips::BI__builtin_msa_shf_h: 2959 case Mips::BI__builtin_msa_shf_w: 2960 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 2961 case Mips::BI__builtin_msa_bseli_b: 2962 case Mips::BI__builtin_msa_bmnzi_b: 2963 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 2964 // df/n format 2965 // These intrinsics take an unsigned 4 bit immediate. 2966 case Mips::BI__builtin_msa_copy_s_b: 2967 case Mips::BI__builtin_msa_copy_u_b: 2968 case Mips::BI__builtin_msa_insve_b: 2969 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 2970 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 2971 // These intrinsics take an unsigned 3 bit immediate. 2972 case Mips::BI__builtin_msa_copy_s_h: 2973 case Mips::BI__builtin_msa_copy_u_h: 2974 case Mips::BI__builtin_msa_insve_h: 2975 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 2976 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 2977 // These intrinsics take an unsigned 2 bit immediate. 2978 case Mips::BI__builtin_msa_copy_s_w: 2979 case Mips::BI__builtin_msa_copy_u_w: 2980 case Mips::BI__builtin_msa_insve_w: 2981 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 2982 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 2983 // These intrinsics take an unsigned 1 bit immediate. 2984 case Mips::BI__builtin_msa_copy_s_d: 2985 case Mips::BI__builtin_msa_copy_u_d: 2986 case Mips::BI__builtin_msa_insve_d: 2987 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 2988 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 2989 // Memory offsets and immediate loads. 2990 // These intrinsics take a signed 10 bit immediate. 2991 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 2992 case Mips::BI__builtin_msa_ldi_h: 2993 case Mips::BI__builtin_msa_ldi_w: 2994 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 2995 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 2996 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 2997 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 2998 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 2999 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3000 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3001 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3002 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3003 } 3004 3005 if (!m) 3006 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3007 3008 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3009 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3010 } 3011 3012 bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3013 unsigned i = 0, l = 0, u = 0; 3014 bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde || 3015 BuiltinID == PPC::BI__builtin_divdeu || 3016 BuiltinID == PPC::BI__builtin_bpermd; 3017 bool IsTarget64Bit = Context.getTargetInfo() 3018 .getTypeWidth(Context 3019 .getTargetInfo() 3020 .getIntPtrType()) == 64; 3021 bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe || 3022 BuiltinID == PPC::BI__builtin_divweu || 3023 BuiltinID == PPC::BI__builtin_divde || 3024 BuiltinID == PPC::BI__builtin_divdeu; 3025 3026 if (Is64BitBltin && !IsTarget64Bit) 3027 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3028 << TheCall->getSourceRange(); 3029 3030 if ((IsBltinExtDiv && !Context.getTargetInfo().hasFeature("extdiv")) || 3031 (BuiltinID == PPC::BI__builtin_bpermd && 3032 !Context.getTargetInfo().hasFeature("bpermd"))) 3033 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3034 << TheCall->getSourceRange(); 3035 3036 auto SemaVSXCheck = [&](CallExpr *TheCall) -> bool { 3037 if (!Context.getTargetInfo().hasFeature("vsx")) 3038 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3039 << TheCall->getSourceRange(); 3040 return false; 3041 }; 3042 3043 switch (BuiltinID) { 3044 default: return false; 3045 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3046 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3047 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3048 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3049 case PPC::BI__builtin_tbegin: 3050 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; 3051 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; 3052 case PPC::BI__builtin_tabortwc: 3053 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; 3054 case PPC::BI__builtin_tabortwci: 3055 case PPC::BI__builtin_tabortdci: 3056 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3057 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 3058 case PPC::BI__builtin_vsx_xxpermdi: 3059 case PPC::BI__builtin_vsx_xxsldwi: 3060 return SemaBuiltinVSX(TheCall); 3061 case PPC::BI__builtin_unpack_vector_int128: 3062 return SemaVSXCheck(TheCall) || 3063 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3064 case PPC::BI__builtin_pack_vector_int128: 3065 return SemaVSXCheck(TheCall); 3066 } 3067 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3068 } 3069 3070 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 3071 CallExpr *TheCall) { 3072 if (BuiltinID == SystemZ::BI__builtin_tabort) { 3073 Expr *Arg = TheCall->getArg(0); 3074 llvm::APSInt AbortCode(32); 3075 if (Arg->isIntegerConstantExpr(AbortCode, Context) && 3076 AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256) 3077 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 3078 << Arg->getSourceRange(); 3079 } 3080 3081 // For intrinsics which take an immediate value as part of the instruction, 3082 // range check them here. 3083 unsigned i = 0, l = 0, u = 0; 3084 switch (BuiltinID) { 3085 default: return false; 3086 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 3087 case SystemZ::BI__builtin_s390_verimb: 3088 case SystemZ::BI__builtin_s390_verimh: 3089 case SystemZ::BI__builtin_s390_verimf: 3090 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 3091 case SystemZ::BI__builtin_s390_vfaeb: 3092 case SystemZ::BI__builtin_s390_vfaeh: 3093 case SystemZ::BI__builtin_s390_vfaef: 3094 case SystemZ::BI__builtin_s390_vfaebs: 3095 case SystemZ::BI__builtin_s390_vfaehs: 3096 case SystemZ::BI__builtin_s390_vfaefs: 3097 case SystemZ::BI__builtin_s390_vfaezb: 3098 case SystemZ::BI__builtin_s390_vfaezh: 3099 case SystemZ::BI__builtin_s390_vfaezf: 3100 case SystemZ::BI__builtin_s390_vfaezbs: 3101 case SystemZ::BI__builtin_s390_vfaezhs: 3102 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 3103 case SystemZ::BI__builtin_s390_vfisb: 3104 case SystemZ::BI__builtin_s390_vfidb: 3105 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 3106 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3107 case SystemZ::BI__builtin_s390_vftcisb: 3108 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 3109 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 3110 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 3111 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 3112 case SystemZ::BI__builtin_s390_vstrcb: 3113 case SystemZ::BI__builtin_s390_vstrch: 3114 case SystemZ::BI__builtin_s390_vstrcf: 3115 case SystemZ::BI__builtin_s390_vstrczb: 3116 case SystemZ::BI__builtin_s390_vstrczh: 3117 case SystemZ::BI__builtin_s390_vstrczf: 3118 case SystemZ::BI__builtin_s390_vstrcbs: 3119 case SystemZ::BI__builtin_s390_vstrchs: 3120 case SystemZ::BI__builtin_s390_vstrcfs: 3121 case SystemZ::BI__builtin_s390_vstrczbs: 3122 case SystemZ::BI__builtin_s390_vstrczhs: 3123 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 3124 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 3125 case SystemZ::BI__builtin_s390_vfminsb: 3126 case SystemZ::BI__builtin_s390_vfmaxsb: 3127 case SystemZ::BI__builtin_s390_vfmindb: 3128 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 3129 } 3130 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3131 } 3132 3133 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 3134 /// This checks that the target supports __builtin_cpu_supports and 3135 /// that the string argument is constant and valid. 3136 static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) { 3137 Expr *Arg = TheCall->getArg(0); 3138 3139 // Check if the argument is a string literal. 3140 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3141 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3142 << Arg->getSourceRange(); 3143 3144 // Check the contents of the string. 3145 StringRef Feature = 3146 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3147 if (!S.Context.getTargetInfo().validateCpuSupports(Feature)) 3148 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 3149 << Arg->getSourceRange(); 3150 return false; 3151 } 3152 3153 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 3154 /// This checks that the target supports __builtin_cpu_is and 3155 /// that the string argument is constant and valid. 3156 static bool SemaBuiltinCpuIs(Sema &S, CallExpr *TheCall) { 3157 Expr *Arg = TheCall->getArg(0); 3158 3159 // Check if the argument is a string literal. 3160 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3161 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3162 << Arg->getSourceRange(); 3163 3164 // Check the contents of the string. 3165 StringRef Feature = 3166 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3167 if (!S.Context.getTargetInfo().validateCpuIs(Feature)) 3168 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 3169 << Arg->getSourceRange(); 3170 return false; 3171 } 3172 3173 // Check if the rounding mode is legal. 3174 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 3175 // Indicates if this instruction has rounding control or just SAE. 3176 bool HasRC = false; 3177 3178 unsigned ArgNum = 0; 3179 switch (BuiltinID) { 3180 default: 3181 return false; 3182 case X86::BI__builtin_ia32_vcvttsd2si32: 3183 case X86::BI__builtin_ia32_vcvttsd2si64: 3184 case X86::BI__builtin_ia32_vcvttsd2usi32: 3185 case X86::BI__builtin_ia32_vcvttsd2usi64: 3186 case X86::BI__builtin_ia32_vcvttss2si32: 3187 case X86::BI__builtin_ia32_vcvttss2si64: 3188 case X86::BI__builtin_ia32_vcvttss2usi32: 3189 case X86::BI__builtin_ia32_vcvttss2usi64: 3190 ArgNum = 1; 3191 break; 3192 case X86::BI__builtin_ia32_maxpd512: 3193 case X86::BI__builtin_ia32_maxps512: 3194 case X86::BI__builtin_ia32_minpd512: 3195 case X86::BI__builtin_ia32_minps512: 3196 ArgNum = 2; 3197 break; 3198 case X86::BI__builtin_ia32_cvtps2pd512_mask: 3199 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 3200 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 3201 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 3202 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 3203 case X86::BI__builtin_ia32_cvttps2dq512_mask: 3204 case X86::BI__builtin_ia32_cvttps2qq512_mask: 3205 case X86::BI__builtin_ia32_cvttps2udq512_mask: 3206 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 3207 case X86::BI__builtin_ia32_exp2pd_mask: 3208 case X86::BI__builtin_ia32_exp2ps_mask: 3209 case X86::BI__builtin_ia32_getexppd512_mask: 3210 case X86::BI__builtin_ia32_getexpps512_mask: 3211 case X86::BI__builtin_ia32_rcp28pd_mask: 3212 case X86::BI__builtin_ia32_rcp28ps_mask: 3213 case X86::BI__builtin_ia32_rsqrt28pd_mask: 3214 case X86::BI__builtin_ia32_rsqrt28ps_mask: 3215 case X86::BI__builtin_ia32_vcomisd: 3216 case X86::BI__builtin_ia32_vcomiss: 3217 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 3218 ArgNum = 3; 3219 break; 3220 case X86::BI__builtin_ia32_cmppd512_mask: 3221 case X86::BI__builtin_ia32_cmpps512_mask: 3222 case X86::BI__builtin_ia32_cmpsd_mask: 3223 case X86::BI__builtin_ia32_cmpss_mask: 3224 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 3225 case X86::BI__builtin_ia32_getexpsd128_round_mask: 3226 case X86::BI__builtin_ia32_getexpss128_round_mask: 3227 case X86::BI__builtin_ia32_maxsd_round_mask: 3228 case X86::BI__builtin_ia32_maxss_round_mask: 3229 case X86::BI__builtin_ia32_minsd_round_mask: 3230 case X86::BI__builtin_ia32_minss_round_mask: 3231 case X86::BI__builtin_ia32_rcp28sd_round_mask: 3232 case X86::BI__builtin_ia32_rcp28ss_round_mask: 3233 case X86::BI__builtin_ia32_reducepd512_mask: 3234 case X86::BI__builtin_ia32_reduceps512_mask: 3235 case X86::BI__builtin_ia32_rndscalepd_mask: 3236 case X86::BI__builtin_ia32_rndscaleps_mask: 3237 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 3238 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 3239 ArgNum = 4; 3240 break; 3241 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3242 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3243 case X86::BI__builtin_ia32_fixupimmps512_mask: 3244 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3245 case X86::BI__builtin_ia32_fixupimmsd_mask: 3246 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3247 case X86::BI__builtin_ia32_fixupimmss_mask: 3248 case X86::BI__builtin_ia32_fixupimmss_maskz: 3249 case X86::BI__builtin_ia32_rangepd512_mask: 3250 case X86::BI__builtin_ia32_rangeps512_mask: 3251 case X86::BI__builtin_ia32_rangesd128_round_mask: 3252 case X86::BI__builtin_ia32_rangess128_round_mask: 3253 case X86::BI__builtin_ia32_reducesd_mask: 3254 case X86::BI__builtin_ia32_reducess_mask: 3255 case X86::BI__builtin_ia32_rndscalesd_round_mask: 3256 case X86::BI__builtin_ia32_rndscaless_round_mask: 3257 ArgNum = 5; 3258 break; 3259 case X86::BI__builtin_ia32_vcvtsd2si64: 3260 case X86::BI__builtin_ia32_vcvtsd2si32: 3261 case X86::BI__builtin_ia32_vcvtsd2usi32: 3262 case X86::BI__builtin_ia32_vcvtsd2usi64: 3263 case X86::BI__builtin_ia32_vcvtss2si32: 3264 case X86::BI__builtin_ia32_vcvtss2si64: 3265 case X86::BI__builtin_ia32_vcvtss2usi32: 3266 case X86::BI__builtin_ia32_vcvtss2usi64: 3267 case X86::BI__builtin_ia32_sqrtpd512: 3268 case X86::BI__builtin_ia32_sqrtps512: 3269 ArgNum = 1; 3270 HasRC = true; 3271 break; 3272 case X86::BI__builtin_ia32_addpd512: 3273 case X86::BI__builtin_ia32_addps512: 3274 case X86::BI__builtin_ia32_divpd512: 3275 case X86::BI__builtin_ia32_divps512: 3276 case X86::BI__builtin_ia32_mulpd512: 3277 case X86::BI__builtin_ia32_mulps512: 3278 case X86::BI__builtin_ia32_subpd512: 3279 case X86::BI__builtin_ia32_subps512: 3280 case X86::BI__builtin_ia32_cvtsi2sd64: 3281 case X86::BI__builtin_ia32_cvtsi2ss32: 3282 case X86::BI__builtin_ia32_cvtsi2ss64: 3283 case X86::BI__builtin_ia32_cvtusi2sd64: 3284 case X86::BI__builtin_ia32_cvtusi2ss32: 3285 case X86::BI__builtin_ia32_cvtusi2ss64: 3286 ArgNum = 2; 3287 HasRC = true; 3288 break; 3289 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 3290 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 3291 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 3292 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 3293 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 3294 case X86::BI__builtin_ia32_cvtps2qq512_mask: 3295 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 3296 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 3297 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 3298 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 3299 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 3300 ArgNum = 3; 3301 HasRC = true; 3302 break; 3303 case X86::BI__builtin_ia32_addss_round_mask: 3304 case X86::BI__builtin_ia32_addsd_round_mask: 3305 case X86::BI__builtin_ia32_divss_round_mask: 3306 case X86::BI__builtin_ia32_divsd_round_mask: 3307 case X86::BI__builtin_ia32_mulss_round_mask: 3308 case X86::BI__builtin_ia32_mulsd_round_mask: 3309 case X86::BI__builtin_ia32_subss_round_mask: 3310 case X86::BI__builtin_ia32_subsd_round_mask: 3311 case X86::BI__builtin_ia32_scalefpd512_mask: 3312 case X86::BI__builtin_ia32_scalefps512_mask: 3313 case X86::BI__builtin_ia32_scalefsd_round_mask: 3314 case X86::BI__builtin_ia32_scalefss_round_mask: 3315 case X86::BI__builtin_ia32_getmantpd512_mask: 3316 case X86::BI__builtin_ia32_getmantps512_mask: 3317 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 3318 case X86::BI__builtin_ia32_sqrtsd_round_mask: 3319 case X86::BI__builtin_ia32_sqrtss_round_mask: 3320 case X86::BI__builtin_ia32_vfmaddsd3_mask: 3321 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 3322 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 3323 case X86::BI__builtin_ia32_vfmaddss3_mask: 3324 case X86::BI__builtin_ia32_vfmaddss3_maskz: 3325 case X86::BI__builtin_ia32_vfmaddss3_mask3: 3326 case X86::BI__builtin_ia32_vfmaddpd512_mask: 3327 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 3328 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 3329 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 3330 case X86::BI__builtin_ia32_vfmaddps512_mask: 3331 case X86::BI__builtin_ia32_vfmaddps512_maskz: 3332 case X86::BI__builtin_ia32_vfmaddps512_mask3: 3333 case X86::BI__builtin_ia32_vfmsubps512_mask3: 3334 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 3335 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 3336 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 3337 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 3338 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 3339 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 3340 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 3341 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 3342 ArgNum = 4; 3343 HasRC = true; 3344 break; 3345 case X86::BI__builtin_ia32_getmantsd_round_mask: 3346 case X86::BI__builtin_ia32_getmantss_round_mask: 3347 ArgNum = 5; 3348 HasRC = true; 3349 break; 3350 } 3351 3352 llvm::APSInt Result; 3353 3354 // We can't check the value of a dependent argument. 3355 Expr *Arg = TheCall->getArg(ArgNum); 3356 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3357 return false; 3358 3359 // Check constant-ness first. 3360 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3361 return true; 3362 3363 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 3364 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 3365 // combined with ROUND_NO_EXC. 3366 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 3367 Result == 8/*ROUND_NO_EXC*/ || 3368 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 3369 return false; 3370 3371 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 3372 << Arg->getSourceRange(); 3373 } 3374 3375 // Check if the gather/scatter scale is legal. 3376 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 3377 CallExpr *TheCall) { 3378 unsigned ArgNum = 0; 3379 switch (BuiltinID) { 3380 default: 3381 return false; 3382 case X86::BI__builtin_ia32_gatherpfdpd: 3383 case X86::BI__builtin_ia32_gatherpfdps: 3384 case X86::BI__builtin_ia32_gatherpfqpd: 3385 case X86::BI__builtin_ia32_gatherpfqps: 3386 case X86::BI__builtin_ia32_scatterpfdpd: 3387 case X86::BI__builtin_ia32_scatterpfdps: 3388 case X86::BI__builtin_ia32_scatterpfqpd: 3389 case X86::BI__builtin_ia32_scatterpfqps: 3390 ArgNum = 3; 3391 break; 3392 case X86::BI__builtin_ia32_gatherd_pd: 3393 case X86::BI__builtin_ia32_gatherd_pd256: 3394 case X86::BI__builtin_ia32_gatherq_pd: 3395 case X86::BI__builtin_ia32_gatherq_pd256: 3396 case X86::BI__builtin_ia32_gatherd_ps: 3397 case X86::BI__builtin_ia32_gatherd_ps256: 3398 case X86::BI__builtin_ia32_gatherq_ps: 3399 case X86::BI__builtin_ia32_gatherq_ps256: 3400 case X86::BI__builtin_ia32_gatherd_q: 3401 case X86::BI__builtin_ia32_gatherd_q256: 3402 case X86::BI__builtin_ia32_gatherq_q: 3403 case X86::BI__builtin_ia32_gatherq_q256: 3404 case X86::BI__builtin_ia32_gatherd_d: 3405 case X86::BI__builtin_ia32_gatherd_d256: 3406 case X86::BI__builtin_ia32_gatherq_d: 3407 case X86::BI__builtin_ia32_gatherq_d256: 3408 case X86::BI__builtin_ia32_gather3div2df: 3409 case X86::BI__builtin_ia32_gather3div2di: 3410 case X86::BI__builtin_ia32_gather3div4df: 3411 case X86::BI__builtin_ia32_gather3div4di: 3412 case X86::BI__builtin_ia32_gather3div4sf: 3413 case X86::BI__builtin_ia32_gather3div4si: 3414 case X86::BI__builtin_ia32_gather3div8sf: 3415 case X86::BI__builtin_ia32_gather3div8si: 3416 case X86::BI__builtin_ia32_gather3siv2df: 3417 case X86::BI__builtin_ia32_gather3siv2di: 3418 case X86::BI__builtin_ia32_gather3siv4df: 3419 case X86::BI__builtin_ia32_gather3siv4di: 3420 case X86::BI__builtin_ia32_gather3siv4sf: 3421 case X86::BI__builtin_ia32_gather3siv4si: 3422 case X86::BI__builtin_ia32_gather3siv8sf: 3423 case X86::BI__builtin_ia32_gather3siv8si: 3424 case X86::BI__builtin_ia32_gathersiv8df: 3425 case X86::BI__builtin_ia32_gathersiv16sf: 3426 case X86::BI__builtin_ia32_gatherdiv8df: 3427 case X86::BI__builtin_ia32_gatherdiv16sf: 3428 case X86::BI__builtin_ia32_gathersiv8di: 3429 case X86::BI__builtin_ia32_gathersiv16si: 3430 case X86::BI__builtin_ia32_gatherdiv8di: 3431 case X86::BI__builtin_ia32_gatherdiv16si: 3432 case X86::BI__builtin_ia32_scatterdiv2df: 3433 case X86::BI__builtin_ia32_scatterdiv2di: 3434 case X86::BI__builtin_ia32_scatterdiv4df: 3435 case X86::BI__builtin_ia32_scatterdiv4di: 3436 case X86::BI__builtin_ia32_scatterdiv4sf: 3437 case X86::BI__builtin_ia32_scatterdiv4si: 3438 case X86::BI__builtin_ia32_scatterdiv8sf: 3439 case X86::BI__builtin_ia32_scatterdiv8si: 3440 case X86::BI__builtin_ia32_scattersiv2df: 3441 case X86::BI__builtin_ia32_scattersiv2di: 3442 case X86::BI__builtin_ia32_scattersiv4df: 3443 case X86::BI__builtin_ia32_scattersiv4di: 3444 case X86::BI__builtin_ia32_scattersiv4sf: 3445 case X86::BI__builtin_ia32_scattersiv4si: 3446 case X86::BI__builtin_ia32_scattersiv8sf: 3447 case X86::BI__builtin_ia32_scattersiv8si: 3448 case X86::BI__builtin_ia32_scattersiv8df: 3449 case X86::BI__builtin_ia32_scattersiv16sf: 3450 case X86::BI__builtin_ia32_scatterdiv8df: 3451 case X86::BI__builtin_ia32_scatterdiv16sf: 3452 case X86::BI__builtin_ia32_scattersiv8di: 3453 case X86::BI__builtin_ia32_scattersiv16si: 3454 case X86::BI__builtin_ia32_scatterdiv8di: 3455 case X86::BI__builtin_ia32_scatterdiv16si: 3456 ArgNum = 4; 3457 break; 3458 } 3459 3460 llvm::APSInt Result; 3461 3462 // We can't check the value of a dependent argument. 3463 Expr *Arg = TheCall->getArg(ArgNum); 3464 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3465 return false; 3466 3467 // Check constant-ness first. 3468 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3469 return true; 3470 3471 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 3472 return false; 3473 3474 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 3475 << Arg->getSourceRange(); 3476 } 3477 3478 static bool isX86_32Builtin(unsigned BuiltinID) { 3479 // These builtins only work on x86-32 targets. 3480 switch (BuiltinID) { 3481 case X86::BI__builtin_ia32_readeflags_u32: 3482 case X86::BI__builtin_ia32_writeeflags_u32: 3483 return true; 3484 } 3485 3486 return false; 3487 } 3488 3489 bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3490 if (BuiltinID == X86::BI__builtin_cpu_supports) 3491 return SemaBuiltinCpuSupports(*this, TheCall); 3492 3493 if (BuiltinID == X86::BI__builtin_cpu_is) 3494 return SemaBuiltinCpuIs(*this, TheCall); 3495 3496 // Check for 32-bit only builtins on a 64-bit target. 3497 const llvm::Triple &TT = Context.getTargetInfo().getTriple(); 3498 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 3499 return Diag(TheCall->getCallee()->getBeginLoc(), 3500 diag::err_32_bit_builtin_64_bit_tgt); 3501 3502 // If the intrinsic has rounding or SAE make sure its valid. 3503 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 3504 return true; 3505 3506 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 3507 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 3508 return true; 3509 3510 // For intrinsics which take an immediate value as part of the instruction, 3511 // range check them here. 3512 int i = 0, l = 0, u = 0; 3513 switch (BuiltinID) { 3514 default: 3515 return false; 3516 case X86::BI__builtin_ia32_vec_ext_v2si: 3517 case X86::BI__builtin_ia32_vec_ext_v2di: 3518 case X86::BI__builtin_ia32_vextractf128_pd256: 3519 case X86::BI__builtin_ia32_vextractf128_ps256: 3520 case X86::BI__builtin_ia32_vextractf128_si256: 3521 case X86::BI__builtin_ia32_extract128i256: 3522 case X86::BI__builtin_ia32_extractf64x4_mask: 3523 case X86::BI__builtin_ia32_extracti64x4_mask: 3524 case X86::BI__builtin_ia32_extractf32x8_mask: 3525 case X86::BI__builtin_ia32_extracti32x8_mask: 3526 case X86::BI__builtin_ia32_extractf64x2_256_mask: 3527 case X86::BI__builtin_ia32_extracti64x2_256_mask: 3528 case X86::BI__builtin_ia32_extractf32x4_256_mask: 3529 case X86::BI__builtin_ia32_extracti32x4_256_mask: 3530 i = 1; l = 0; u = 1; 3531 break; 3532 case X86::BI__builtin_ia32_vec_set_v2di: 3533 case X86::BI__builtin_ia32_vinsertf128_pd256: 3534 case X86::BI__builtin_ia32_vinsertf128_ps256: 3535 case X86::BI__builtin_ia32_vinsertf128_si256: 3536 case X86::BI__builtin_ia32_insert128i256: 3537 case X86::BI__builtin_ia32_insertf32x8: 3538 case X86::BI__builtin_ia32_inserti32x8: 3539 case X86::BI__builtin_ia32_insertf64x4: 3540 case X86::BI__builtin_ia32_inserti64x4: 3541 case X86::BI__builtin_ia32_insertf64x2_256: 3542 case X86::BI__builtin_ia32_inserti64x2_256: 3543 case X86::BI__builtin_ia32_insertf32x4_256: 3544 case X86::BI__builtin_ia32_inserti32x4_256: 3545 i = 2; l = 0; u = 1; 3546 break; 3547 case X86::BI__builtin_ia32_vpermilpd: 3548 case X86::BI__builtin_ia32_vec_ext_v4hi: 3549 case X86::BI__builtin_ia32_vec_ext_v4si: 3550 case X86::BI__builtin_ia32_vec_ext_v4sf: 3551 case X86::BI__builtin_ia32_vec_ext_v4di: 3552 case X86::BI__builtin_ia32_extractf32x4_mask: 3553 case X86::BI__builtin_ia32_extracti32x4_mask: 3554 case X86::BI__builtin_ia32_extractf64x2_512_mask: 3555 case X86::BI__builtin_ia32_extracti64x2_512_mask: 3556 i = 1; l = 0; u = 3; 3557 break; 3558 case X86::BI_mm_prefetch: 3559 case X86::BI__builtin_ia32_vec_ext_v8hi: 3560 case X86::BI__builtin_ia32_vec_ext_v8si: 3561 i = 1; l = 0; u = 7; 3562 break; 3563 case X86::BI__builtin_ia32_sha1rnds4: 3564 case X86::BI__builtin_ia32_blendpd: 3565 case X86::BI__builtin_ia32_shufpd: 3566 case X86::BI__builtin_ia32_vec_set_v4hi: 3567 case X86::BI__builtin_ia32_vec_set_v4si: 3568 case X86::BI__builtin_ia32_vec_set_v4di: 3569 case X86::BI__builtin_ia32_shuf_f32x4_256: 3570 case X86::BI__builtin_ia32_shuf_f64x2_256: 3571 case X86::BI__builtin_ia32_shuf_i32x4_256: 3572 case X86::BI__builtin_ia32_shuf_i64x2_256: 3573 case X86::BI__builtin_ia32_insertf64x2_512: 3574 case X86::BI__builtin_ia32_inserti64x2_512: 3575 case X86::BI__builtin_ia32_insertf32x4: 3576 case X86::BI__builtin_ia32_inserti32x4: 3577 i = 2; l = 0; u = 3; 3578 break; 3579 case X86::BI__builtin_ia32_vpermil2pd: 3580 case X86::BI__builtin_ia32_vpermil2pd256: 3581 case X86::BI__builtin_ia32_vpermil2ps: 3582 case X86::BI__builtin_ia32_vpermil2ps256: 3583 i = 3; l = 0; u = 3; 3584 break; 3585 case X86::BI__builtin_ia32_cmpb128_mask: 3586 case X86::BI__builtin_ia32_cmpw128_mask: 3587 case X86::BI__builtin_ia32_cmpd128_mask: 3588 case X86::BI__builtin_ia32_cmpq128_mask: 3589 case X86::BI__builtin_ia32_cmpb256_mask: 3590 case X86::BI__builtin_ia32_cmpw256_mask: 3591 case X86::BI__builtin_ia32_cmpd256_mask: 3592 case X86::BI__builtin_ia32_cmpq256_mask: 3593 case X86::BI__builtin_ia32_cmpb512_mask: 3594 case X86::BI__builtin_ia32_cmpw512_mask: 3595 case X86::BI__builtin_ia32_cmpd512_mask: 3596 case X86::BI__builtin_ia32_cmpq512_mask: 3597 case X86::BI__builtin_ia32_ucmpb128_mask: 3598 case X86::BI__builtin_ia32_ucmpw128_mask: 3599 case X86::BI__builtin_ia32_ucmpd128_mask: 3600 case X86::BI__builtin_ia32_ucmpq128_mask: 3601 case X86::BI__builtin_ia32_ucmpb256_mask: 3602 case X86::BI__builtin_ia32_ucmpw256_mask: 3603 case X86::BI__builtin_ia32_ucmpd256_mask: 3604 case X86::BI__builtin_ia32_ucmpq256_mask: 3605 case X86::BI__builtin_ia32_ucmpb512_mask: 3606 case X86::BI__builtin_ia32_ucmpw512_mask: 3607 case X86::BI__builtin_ia32_ucmpd512_mask: 3608 case X86::BI__builtin_ia32_ucmpq512_mask: 3609 case X86::BI__builtin_ia32_vpcomub: 3610 case X86::BI__builtin_ia32_vpcomuw: 3611 case X86::BI__builtin_ia32_vpcomud: 3612 case X86::BI__builtin_ia32_vpcomuq: 3613 case X86::BI__builtin_ia32_vpcomb: 3614 case X86::BI__builtin_ia32_vpcomw: 3615 case X86::BI__builtin_ia32_vpcomd: 3616 case X86::BI__builtin_ia32_vpcomq: 3617 case X86::BI__builtin_ia32_vec_set_v8hi: 3618 case X86::BI__builtin_ia32_vec_set_v8si: 3619 i = 2; l = 0; u = 7; 3620 break; 3621 case X86::BI__builtin_ia32_vpermilpd256: 3622 case X86::BI__builtin_ia32_roundps: 3623 case X86::BI__builtin_ia32_roundpd: 3624 case X86::BI__builtin_ia32_roundps256: 3625 case X86::BI__builtin_ia32_roundpd256: 3626 case X86::BI__builtin_ia32_getmantpd128_mask: 3627 case X86::BI__builtin_ia32_getmantpd256_mask: 3628 case X86::BI__builtin_ia32_getmantps128_mask: 3629 case X86::BI__builtin_ia32_getmantps256_mask: 3630 case X86::BI__builtin_ia32_getmantpd512_mask: 3631 case X86::BI__builtin_ia32_getmantps512_mask: 3632 case X86::BI__builtin_ia32_vec_ext_v16qi: 3633 case X86::BI__builtin_ia32_vec_ext_v16hi: 3634 i = 1; l = 0; u = 15; 3635 break; 3636 case X86::BI__builtin_ia32_pblendd128: 3637 case X86::BI__builtin_ia32_blendps: 3638 case X86::BI__builtin_ia32_blendpd256: 3639 case X86::BI__builtin_ia32_shufpd256: 3640 case X86::BI__builtin_ia32_roundss: 3641 case X86::BI__builtin_ia32_roundsd: 3642 case X86::BI__builtin_ia32_rangepd128_mask: 3643 case X86::BI__builtin_ia32_rangepd256_mask: 3644 case X86::BI__builtin_ia32_rangepd512_mask: 3645 case X86::BI__builtin_ia32_rangeps128_mask: 3646 case X86::BI__builtin_ia32_rangeps256_mask: 3647 case X86::BI__builtin_ia32_rangeps512_mask: 3648 case X86::BI__builtin_ia32_getmantsd_round_mask: 3649 case X86::BI__builtin_ia32_getmantss_round_mask: 3650 case X86::BI__builtin_ia32_vec_set_v16qi: 3651 case X86::BI__builtin_ia32_vec_set_v16hi: 3652 i = 2; l = 0; u = 15; 3653 break; 3654 case X86::BI__builtin_ia32_vec_ext_v32qi: 3655 i = 1; l = 0; u = 31; 3656 break; 3657 case X86::BI__builtin_ia32_cmpps: 3658 case X86::BI__builtin_ia32_cmpss: 3659 case X86::BI__builtin_ia32_cmppd: 3660 case X86::BI__builtin_ia32_cmpsd: 3661 case X86::BI__builtin_ia32_cmpps256: 3662 case X86::BI__builtin_ia32_cmppd256: 3663 case X86::BI__builtin_ia32_cmpps128_mask: 3664 case X86::BI__builtin_ia32_cmppd128_mask: 3665 case X86::BI__builtin_ia32_cmpps256_mask: 3666 case X86::BI__builtin_ia32_cmppd256_mask: 3667 case X86::BI__builtin_ia32_cmpps512_mask: 3668 case X86::BI__builtin_ia32_cmppd512_mask: 3669 case X86::BI__builtin_ia32_cmpsd_mask: 3670 case X86::BI__builtin_ia32_cmpss_mask: 3671 case X86::BI__builtin_ia32_vec_set_v32qi: 3672 i = 2; l = 0; u = 31; 3673 break; 3674 case X86::BI__builtin_ia32_permdf256: 3675 case X86::BI__builtin_ia32_permdi256: 3676 case X86::BI__builtin_ia32_permdf512: 3677 case X86::BI__builtin_ia32_permdi512: 3678 case X86::BI__builtin_ia32_vpermilps: 3679 case X86::BI__builtin_ia32_vpermilps256: 3680 case X86::BI__builtin_ia32_vpermilpd512: 3681 case X86::BI__builtin_ia32_vpermilps512: 3682 case X86::BI__builtin_ia32_pshufd: 3683 case X86::BI__builtin_ia32_pshufd256: 3684 case X86::BI__builtin_ia32_pshufd512: 3685 case X86::BI__builtin_ia32_pshufhw: 3686 case X86::BI__builtin_ia32_pshufhw256: 3687 case X86::BI__builtin_ia32_pshufhw512: 3688 case X86::BI__builtin_ia32_pshuflw: 3689 case X86::BI__builtin_ia32_pshuflw256: 3690 case X86::BI__builtin_ia32_pshuflw512: 3691 case X86::BI__builtin_ia32_vcvtps2ph: 3692 case X86::BI__builtin_ia32_vcvtps2ph_mask: 3693 case X86::BI__builtin_ia32_vcvtps2ph256: 3694 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 3695 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 3696 case X86::BI__builtin_ia32_rndscaleps_128_mask: 3697 case X86::BI__builtin_ia32_rndscalepd_128_mask: 3698 case X86::BI__builtin_ia32_rndscaleps_256_mask: 3699 case X86::BI__builtin_ia32_rndscalepd_256_mask: 3700 case X86::BI__builtin_ia32_rndscaleps_mask: 3701 case X86::BI__builtin_ia32_rndscalepd_mask: 3702 case X86::BI__builtin_ia32_reducepd128_mask: 3703 case X86::BI__builtin_ia32_reducepd256_mask: 3704 case X86::BI__builtin_ia32_reducepd512_mask: 3705 case X86::BI__builtin_ia32_reduceps128_mask: 3706 case X86::BI__builtin_ia32_reduceps256_mask: 3707 case X86::BI__builtin_ia32_reduceps512_mask: 3708 case X86::BI__builtin_ia32_prold512: 3709 case X86::BI__builtin_ia32_prolq512: 3710 case X86::BI__builtin_ia32_prold128: 3711 case X86::BI__builtin_ia32_prold256: 3712 case X86::BI__builtin_ia32_prolq128: 3713 case X86::BI__builtin_ia32_prolq256: 3714 case X86::BI__builtin_ia32_prord512: 3715 case X86::BI__builtin_ia32_prorq512: 3716 case X86::BI__builtin_ia32_prord128: 3717 case X86::BI__builtin_ia32_prord256: 3718 case X86::BI__builtin_ia32_prorq128: 3719 case X86::BI__builtin_ia32_prorq256: 3720 case X86::BI__builtin_ia32_fpclasspd128_mask: 3721 case X86::BI__builtin_ia32_fpclasspd256_mask: 3722 case X86::BI__builtin_ia32_fpclassps128_mask: 3723 case X86::BI__builtin_ia32_fpclassps256_mask: 3724 case X86::BI__builtin_ia32_fpclassps512_mask: 3725 case X86::BI__builtin_ia32_fpclasspd512_mask: 3726 case X86::BI__builtin_ia32_fpclasssd_mask: 3727 case X86::BI__builtin_ia32_fpclassss_mask: 3728 case X86::BI__builtin_ia32_pslldqi128_byteshift: 3729 case X86::BI__builtin_ia32_pslldqi256_byteshift: 3730 case X86::BI__builtin_ia32_pslldqi512_byteshift: 3731 case X86::BI__builtin_ia32_psrldqi128_byteshift: 3732 case X86::BI__builtin_ia32_psrldqi256_byteshift: 3733 case X86::BI__builtin_ia32_psrldqi512_byteshift: 3734 case X86::BI__builtin_ia32_kshiftliqi: 3735 case X86::BI__builtin_ia32_kshiftlihi: 3736 case X86::BI__builtin_ia32_kshiftlisi: 3737 case X86::BI__builtin_ia32_kshiftlidi: 3738 case X86::BI__builtin_ia32_kshiftriqi: 3739 case X86::BI__builtin_ia32_kshiftrihi: 3740 case X86::BI__builtin_ia32_kshiftrisi: 3741 case X86::BI__builtin_ia32_kshiftridi: 3742 i = 1; l = 0; u = 255; 3743 break; 3744 case X86::BI__builtin_ia32_vperm2f128_pd256: 3745 case X86::BI__builtin_ia32_vperm2f128_ps256: 3746 case X86::BI__builtin_ia32_vperm2f128_si256: 3747 case X86::BI__builtin_ia32_permti256: 3748 case X86::BI__builtin_ia32_pblendw128: 3749 case X86::BI__builtin_ia32_pblendw256: 3750 case X86::BI__builtin_ia32_blendps256: 3751 case X86::BI__builtin_ia32_pblendd256: 3752 case X86::BI__builtin_ia32_palignr128: 3753 case X86::BI__builtin_ia32_palignr256: 3754 case X86::BI__builtin_ia32_palignr512: 3755 case X86::BI__builtin_ia32_alignq512: 3756 case X86::BI__builtin_ia32_alignd512: 3757 case X86::BI__builtin_ia32_alignd128: 3758 case X86::BI__builtin_ia32_alignd256: 3759 case X86::BI__builtin_ia32_alignq128: 3760 case X86::BI__builtin_ia32_alignq256: 3761 case X86::BI__builtin_ia32_vcomisd: 3762 case X86::BI__builtin_ia32_vcomiss: 3763 case X86::BI__builtin_ia32_shuf_f32x4: 3764 case X86::BI__builtin_ia32_shuf_f64x2: 3765 case X86::BI__builtin_ia32_shuf_i32x4: 3766 case X86::BI__builtin_ia32_shuf_i64x2: 3767 case X86::BI__builtin_ia32_shufpd512: 3768 case X86::BI__builtin_ia32_shufps: 3769 case X86::BI__builtin_ia32_shufps256: 3770 case X86::BI__builtin_ia32_shufps512: 3771 case X86::BI__builtin_ia32_dbpsadbw128: 3772 case X86::BI__builtin_ia32_dbpsadbw256: 3773 case X86::BI__builtin_ia32_dbpsadbw512: 3774 case X86::BI__builtin_ia32_vpshldd128: 3775 case X86::BI__builtin_ia32_vpshldd256: 3776 case X86::BI__builtin_ia32_vpshldd512: 3777 case X86::BI__builtin_ia32_vpshldq128: 3778 case X86::BI__builtin_ia32_vpshldq256: 3779 case X86::BI__builtin_ia32_vpshldq512: 3780 case X86::BI__builtin_ia32_vpshldw128: 3781 case X86::BI__builtin_ia32_vpshldw256: 3782 case X86::BI__builtin_ia32_vpshldw512: 3783 case X86::BI__builtin_ia32_vpshrdd128: 3784 case X86::BI__builtin_ia32_vpshrdd256: 3785 case X86::BI__builtin_ia32_vpshrdd512: 3786 case X86::BI__builtin_ia32_vpshrdq128: 3787 case X86::BI__builtin_ia32_vpshrdq256: 3788 case X86::BI__builtin_ia32_vpshrdq512: 3789 case X86::BI__builtin_ia32_vpshrdw128: 3790 case X86::BI__builtin_ia32_vpshrdw256: 3791 case X86::BI__builtin_ia32_vpshrdw512: 3792 i = 2; l = 0; u = 255; 3793 break; 3794 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3795 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3796 case X86::BI__builtin_ia32_fixupimmps512_mask: 3797 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3798 case X86::BI__builtin_ia32_fixupimmsd_mask: 3799 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3800 case X86::BI__builtin_ia32_fixupimmss_mask: 3801 case X86::BI__builtin_ia32_fixupimmss_maskz: 3802 case X86::BI__builtin_ia32_fixupimmpd128_mask: 3803 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 3804 case X86::BI__builtin_ia32_fixupimmpd256_mask: 3805 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 3806 case X86::BI__builtin_ia32_fixupimmps128_mask: 3807 case X86::BI__builtin_ia32_fixupimmps128_maskz: 3808 case X86::BI__builtin_ia32_fixupimmps256_mask: 3809 case X86::BI__builtin_ia32_fixupimmps256_maskz: 3810 case X86::BI__builtin_ia32_pternlogd512_mask: 3811 case X86::BI__builtin_ia32_pternlogd512_maskz: 3812 case X86::BI__builtin_ia32_pternlogq512_mask: 3813 case X86::BI__builtin_ia32_pternlogq512_maskz: 3814 case X86::BI__builtin_ia32_pternlogd128_mask: 3815 case X86::BI__builtin_ia32_pternlogd128_maskz: 3816 case X86::BI__builtin_ia32_pternlogd256_mask: 3817 case X86::BI__builtin_ia32_pternlogd256_maskz: 3818 case X86::BI__builtin_ia32_pternlogq128_mask: 3819 case X86::BI__builtin_ia32_pternlogq128_maskz: 3820 case X86::BI__builtin_ia32_pternlogq256_mask: 3821 case X86::BI__builtin_ia32_pternlogq256_maskz: 3822 i = 3; l = 0; u = 255; 3823 break; 3824 case X86::BI__builtin_ia32_gatherpfdpd: 3825 case X86::BI__builtin_ia32_gatherpfdps: 3826 case X86::BI__builtin_ia32_gatherpfqpd: 3827 case X86::BI__builtin_ia32_gatherpfqps: 3828 case X86::BI__builtin_ia32_scatterpfdpd: 3829 case X86::BI__builtin_ia32_scatterpfdps: 3830 case X86::BI__builtin_ia32_scatterpfqpd: 3831 case X86::BI__builtin_ia32_scatterpfqps: 3832 i = 4; l = 2; u = 3; 3833 break; 3834 case X86::BI__builtin_ia32_rndscalesd_round_mask: 3835 case X86::BI__builtin_ia32_rndscaless_round_mask: 3836 i = 4; l = 0; u = 255; 3837 break; 3838 } 3839 3840 // Note that we don't force a hard error on the range check here, allowing 3841 // template-generated or macro-generated dead code to potentially have out-of- 3842 // range values. These need to code generate, but don't need to necessarily 3843 // make any sense. We use a warning that defaults to an error. 3844 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 3845 } 3846 3847 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 3848 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 3849 /// Returns true when the format fits the function and the FormatStringInfo has 3850 /// been populated. 3851 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 3852 FormatStringInfo *FSI) { 3853 FSI->HasVAListArg = Format->getFirstArg() == 0; 3854 FSI->FormatIdx = Format->getFormatIdx() - 1; 3855 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 3856 3857 // The way the format attribute works in GCC, the implicit this argument 3858 // of member functions is counted. However, it doesn't appear in our own 3859 // lists, so decrement format_idx in that case. 3860 if (IsCXXMember) { 3861 if(FSI->FormatIdx == 0) 3862 return false; 3863 --FSI->FormatIdx; 3864 if (FSI->FirstDataArg != 0) 3865 --FSI->FirstDataArg; 3866 } 3867 return true; 3868 } 3869 3870 /// Checks if a the given expression evaluates to null. 3871 /// 3872 /// Returns true if the value evaluates to null. 3873 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 3874 // If the expression has non-null type, it doesn't evaluate to null. 3875 if (auto nullability 3876 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 3877 if (*nullability == NullabilityKind::NonNull) 3878 return false; 3879 } 3880 3881 // As a special case, transparent unions initialized with zero are 3882 // considered null for the purposes of the nonnull attribute. 3883 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 3884 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 3885 if (const CompoundLiteralExpr *CLE = 3886 dyn_cast<CompoundLiteralExpr>(Expr)) 3887 if (const InitListExpr *ILE = 3888 dyn_cast<InitListExpr>(CLE->getInitializer())) 3889 Expr = ILE->getInit(0); 3890 } 3891 3892 bool Result; 3893 return (!Expr->isValueDependent() && 3894 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 3895 !Result); 3896 } 3897 3898 static void CheckNonNullArgument(Sema &S, 3899 const Expr *ArgExpr, 3900 SourceLocation CallSiteLoc) { 3901 if (CheckNonNullExpr(S, ArgExpr)) 3902 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 3903 S.PDiag(diag::warn_null_arg) << ArgExpr->getSourceRange()); 3904 } 3905 3906 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 3907 FormatStringInfo FSI; 3908 if ((GetFormatStringType(Format) == FST_NSString) && 3909 getFormatStringInfo(Format, false, &FSI)) { 3910 Idx = FSI.FormatIdx; 3911 return true; 3912 } 3913 return false; 3914 } 3915 3916 /// Diagnose use of %s directive in an NSString which is being passed 3917 /// as formatting string to formatting method. 3918 static void 3919 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 3920 const NamedDecl *FDecl, 3921 Expr **Args, 3922 unsigned NumArgs) { 3923 unsigned Idx = 0; 3924 bool Format = false; 3925 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 3926 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 3927 Idx = 2; 3928 Format = true; 3929 } 3930 else 3931 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 3932 if (S.GetFormatNSStringIdx(I, Idx)) { 3933 Format = true; 3934 break; 3935 } 3936 } 3937 if (!Format || NumArgs <= Idx) 3938 return; 3939 const Expr *FormatExpr = Args[Idx]; 3940 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 3941 FormatExpr = CSCE->getSubExpr(); 3942 const StringLiteral *FormatString; 3943 if (const ObjCStringLiteral *OSL = 3944 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 3945 FormatString = OSL->getString(); 3946 else 3947 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 3948 if (!FormatString) 3949 return; 3950 if (S.FormatStringHasSArg(FormatString)) { 3951 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 3952 << "%s" << 1 << 1; 3953 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 3954 << FDecl->getDeclName(); 3955 } 3956 } 3957 3958 /// Determine whether the given type has a non-null nullability annotation. 3959 static bool isNonNullType(ASTContext &ctx, QualType type) { 3960 if (auto nullability = type->getNullability(ctx)) 3961 return *nullability == NullabilityKind::NonNull; 3962 3963 return false; 3964 } 3965 3966 static void CheckNonNullArguments(Sema &S, 3967 const NamedDecl *FDecl, 3968 const FunctionProtoType *Proto, 3969 ArrayRef<const Expr *> Args, 3970 SourceLocation CallSiteLoc) { 3971 assert((FDecl || Proto) && "Need a function declaration or prototype"); 3972 3973 // Check the attributes attached to the method/function itself. 3974 llvm::SmallBitVector NonNullArgs; 3975 if (FDecl) { 3976 // Handle the nonnull attribute on the function/method declaration itself. 3977 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 3978 if (!NonNull->args_size()) { 3979 // Easy case: all pointer arguments are nonnull. 3980 for (const auto *Arg : Args) 3981 if (S.isValidPointerAttrType(Arg->getType())) 3982 CheckNonNullArgument(S, Arg, CallSiteLoc); 3983 return; 3984 } 3985 3986 for (const ParamIdx &Idx : NonNull->args()) { 3987 unsigned IdxAST = Idx.getASTIndex(); 3988 if (IdxAST >= Args.size()) 3989 continue; 3990 if (NonNullArgs.empty()) 3991 NonNullArgs.resize(Args.size()); 3992 NonNullArgs.set(IdxAST); 3993 } 3994 } 3995 } 3996 3997 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 3998 // Handle the nonnull attribute on the parameters of the 3999 // function/method. 4000 ArrayRef<ParmVarDecl*> parms; 4001 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 4002 parms = FD->parameters(); 4003 else 4004 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 4005 4006 unsigned ParamIndex = 0; 4007 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 4008 I != E; ++I, ++ParamIndex) { 4009 const ParmVarDecl *PVD = *I; 4010 if (PVD->hasAttr<NonNullAttr>() || 4011 isNonNullType(S.Context, PVD->getType())) { 4012 if (NonNullArgs.empty()) 4013 NonNullArgs.resize(Args.size()); 4014 4015 NonNullArgs.set(ParamIndex); 4016 } 4017 } 4018 } else { 4019 // If we have a non-function, non-method declaration but no 4020 // function prototype, try to dig out the function prototype. 4021 if (!Proto) { 4022 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 4023 QualType type = VD->getType().getNonReferenceType(); 4024 if (auto pointerType = type->getAs<PointerType>()) 4025 type = pointerType->getPointeeType(); 4026 else if (auto blockType = type->getAs<BlockPointerType>()) 4027 type = blockType->getPointeeType(); 4028 // FIXME: data member pointers? 4029 4030 // Dig out the function prototype, if there is one. 4031 Proto = type->getAs<FunctionProtoType>(); 4032 } 4033 } 4034 4035 // Fill in non-null argument information from the nullability 4036 // information on the parameter types (if we have them). 4037 if (Proto) { 4038 unsigned Index = 0; 4039 for (auto paramType : Proto->getParamTypes()) { 4040 if (isNonNullType(S.Context, paramType)) { 4041 if (NonNullArgs.empty()) 4042 NonNullArgs.resize(Args.size()); 4043 4044 NonNullArgs.set(Index); 4045 } 4046 4047 ++Index; 4048 } 4049 } 4050 } 4051 4052 // Check for non-null arguments. 4053 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 4054 ArgIndex != ArgIndexEnd; ++ArgIndex) { 4055 if (NonNullArgs[ArgIndex]) 4056 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 4057 } 4058 } 4059 4060 /// Handles the checks for format strings, non-POD arguments to vararg 4061 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 4062 /// attributes. 4063 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 4064 const Expr *ThisArg, ArrayRef<const Expr *> Args, 4065 bool IsMemberFunction, SourceLocation Loc, 4066 SourceRange Range, VariadicCallType CallType) { 4067 // FIXME: We should check as much as we can in the template definition. 4068 if (CurContext->isDependentContext()) 4069 return; 4070 4071 // Printf and scanf checking. 4072 llvm::SmallBitVector CheckedVarArgs; 4073 if (FDecl) { 4074 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4075 // Only create vector if there are format attributes. 4076 CheckedVarArgs.resize(Args.size()); 4077 4078 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 4079 CheckedVarArgs); 4080 } 4081 } 4082 4083 // Refuse POD arguments that weren't caught by the format string 4084 // checks above. 4085 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 4086 if (CallType != VariadicDoesNotApply && 4087 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 4088 unsigned NumParams = Proto ? Proto->getNumParams() 4089 : FDecl && isa<FunctionDecl>(FDecl) 4090 ? cast<FunctionDecl>(FDecl)->getNumParams() 4091 : FDecl && isa<ObjCMethodDecl>(FDecl) 4092 ? cast<ObjCMethodDecl>(FDecl)->param_size() 4093 : 0; 4094 4095 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 4096 // Args[ArgIdx] can be null in malformed code. 4097 if (const Expr *Arg = Args[ArgIdx]) { 4098 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 4099 checkVariadicArgument(Arg, CallType); 4100 } 4101 } 4102 } 4103 4104 if (FDecl || Proto) { 4105 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 4106 4107 // Type safety checking. 4108 if (FDecl) { 4109 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 4110 CheckArgumentWithTypeTag(I, Args, Loc); 4111 } 4112 } 4113 4114 if (FD) 4115 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 4116 } 4117 4118 /// CheckConstructorCall - Check a constructor call for correctness and safety 4119 /// properties not enforced by the C type system. 4120 void Sema::CheckConstructorCall(FunctionDecl *FDecl, 4121 ArrayRef<const Expr *> Args, 4122 const FunctionProtoType *Proto, 4123 SourceLocation Loc) { 4124 VariadicCallType CallType = 4125 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 4126 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 4127 Loc, SourceRange(), CallType); 4128 } 4129 4130 /// CheckFunctionCall - Check a direct function call for various correctness 4131 /// and safety properties not strictly enforced by the C type system. 4132 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 4133 const FunctionProtoType *Proto) { 4134 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 4135 isa<CXXMethodDecl>(FDecl); 4136 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 4137 IsMemberOperatorCall; 4138 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 4139 TheCall->getCallee()); 4140 Expr** Args = TheCall->getArgs(); 4141 unsigned NumArgs = TheCall->getNumArgs(); 4142 4143 Expr *ImplicitThis = nullptr; 4144 if (IsMemberOperatorCall) { 4145 // If this is a call to a member operator, hide the first argument 4146 // from checkCall. 4147 // FIXME: Our choice of AST representation here is less than ideal. 4148 ImplicitThis = Args[0]; 4149 ++Args; 4150 --NumArgs; 4151 } else if (IsMemberFunction) 4152 ImplicitThis = 4153 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 4154 4155 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 4156 IsMemberFunction, TheCall->getRParenLoc(), 4157 TheCall->getCallee()->getSourceRange(), CallType); 4158 4159 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 4160 // None of the checks below are needed for functions that don't have 4161 // simple names (e.g., C++ conversion functions). 4162 if (!FnInfo) 4163 return false; 4164 4165 CheckAbsoluteValueFunction(TheCall, FDecl); 4166 CheckMaxUnsignedZero(TheCall, FDecl); 4167 4168 if (getLangOpts().ObjC) 4169 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 4170 4171 unsigned CMId = FDecl->getMemoryFunctionKind(); 4172 if (CMId == 0) 4173 return false; 4174 4175 // Handle memory setting and copying functions. 4176 if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat) 4177 CheckStrlcpycatArguments(TheCall, FnInfo); 4178 else if (CMId == Builtin::BIstrncat) 4179 CheckStrncatArguments(TheCall, FnInfo); 4180 else 4181 CheckMemaccessArguments(TheCall, CMId, FnInfo); 4182 4183 return false; 4184 } 4185 4186 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 4187 ArrayRef<const Expr *> Args) { 4188 VariadicCallType CallType = 4189 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 4190 4191 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 4192 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 4193 CallType); 4194 4195 return false; 4196 } 4197 4198 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 4199 const FunctionProtoType *Proto) { 4200 QualType Ty; 4201 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 4202 Ty = V->getType().getNonReferenceType(); 4203 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 4204 Ty = F->getType().getNonReferenceType(); 4205 else 4206 return false; 4207 4208 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 4209 !Ty->isFunctionProtoType()) 4210 return false; 4211 4212 VariadicCallType CallType; 4213 if (!Proto || !Proto->isVariadic()) { 4214 CallType = VariadicDoesNotApply; 4215 } else if (Ty->isBlockPointerType()) { 4216 CallType = VariadicBlock; 4217 } else { // Ty->isFunctionPointerType() 4218 CallType = VariadicFunction; 4219 } 4220 4221 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 4222 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4223 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4224 TheCall->getCallee()->getSourceRange(), CallType); 4225 4226 return false; 4227 } 4228 4229 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 4230 /// such as function pointers returned from functions. 4231 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 4232 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 4233 TheCall->getCallee()); 4234 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 4235 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4236 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4237 TheCall->getCallee()->getSourceRange(), CallType); 4238 4239 return false; 4240 } 4241 4242 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 4243 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 4244 return false; 4245 4246 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 4247 switch (Op) { 4248 case AtomicExpr::AO__c11_atomic_init: 4249 case AtomicExpr::AO__opencl_atomic_init: 4250 llvm_unreachable("There is no ordering argument for an init"); 4251 4252 case AtomicExpr::AO__c11_atomic_load: 4253 case AtomicExpr::AO__opencl_atomic_load: 4254 case AtomicExpr::AO__atomic_load_n: 4255 case AtomicExpr::AO__atomic_load: 4256 return OrderingCABI != llvm::AtomicOrderingCABI::release && 4257 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4258 4259 case AtomicExpr::AO__c11_atomic_store: 4260 case AtomicExpr::AO__opencl_atomic_store: 4261 case AtomicExpr::AO__atomic_store: 4262 case AtomicExpr::AO__atomic_store_n: 4263 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 4264 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 4265 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4266 4267 default: 4268 return true; 4269 } 4270 } 4271 4272 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 4273 AtomicExpr::AtomicOp Op) { 4274 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 4275 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 4276 4277 // All the non-OpenCL operations take one of the following forms. 4278 // The OpenCL operations take the __c11 forms with one extra argument for 4279 // synchronization scope. 4280 enum { 4281 // C __c11_atomic_init(A *, C) 4282 Init, 4283 4284 // C __c11_atomic_load(A *, int) 4285 Load, 4286 4287 // void __atomic_load(A *, CP, int) 4288 LoadCopy, 4289 4290 // void __atomic_store(A *, CP, int) 4291 Copy, 4292 4293 // C __c11_atomic_add(A *, M, int) 4294 Arithmetic, 4295 4296 // C __atomic_exchange_n(A *, CP, int) 4297 Xchg, 4298 4299 // void __atomic_exchange(A *, C *, CP, int) 4300 GNUXchg, 4301 4302 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 4303 C11CmpXchg, 4304 4305 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 4306 GNUCmpXchg 4307 } Form = Init; 4308 4309 const unsigned NumForm = GNUCmpXchg + 1; 4310 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 4311 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 4312 // where: 4313 // C is an appropriate type, 4314 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 4315 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 4316 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 4317 // the int parameters are for orderings. 4318 4319 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 4320 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 4321 "need to update code for modified forms"); 4322 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 4323 AtomicExpr::AO__c11_atomic_fetch_xor + 1 == 4324 AtomicExpr::AO__atomic_load, 4325 "need to update code for modified C11 atomics"); 4326 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 4327 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 4328 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 4329 Op <= AtomicExpr::AO__c11_atomic_fetch_xor) || 4330 IsOpenCL; 4331 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 4332 Op == AtomicExpr::AO__atomic_store_n || 4333 Op == AtomicExpr::AO__atomic_exchange_n || 4334 Op == AtomicExpr::AO__atomic_compare_exchange_n; 4335 bool IsAddSub = false; 4336 bool IsMinMax = false; 4337 4338 switch (Op) { 4339 case AtomicExpr::AO__c11_atomic_init: 4340 case AtomicExpr::AO__opencl_atomic_init: 4341 Form = Init; 4342 break; 4343 4344 case AtomicExpr::AO__c11_atomic_load: 4345 case AtomicExpr::AO__opencl_atomic_load: 4346 case AtomicExpr::AO__atomic_load_n: 4347 Form = Load; 4348 break; 4349 4350 case AtomicExpr::AO__atomic_load: 4351 Form = LoadCopy; 4352 break; 4353 4354 case AtomicExpr::AO__c11_atomic_store: 4355 case AtomicExpr::AO__opencl_atomic_store: 4356 case AtomicExpr::AO__atomic_store: 4357 case AtomicExpr::AO__atomic_store_n: 4358 Form = Copy; 4359 break; 4360 4361 case AtomicExpr::AO__c11_atomic_fetch_add: 4362 case AtomicExpr::AO__c11_atomic_fetch_sub: 4363 case AtomicExpr::AO__opencl_atomic_fetch_add: 4364 case AtomicExpr::AO__opencl_atomic_fetch_sub: 4365 case AtomicExpr::AO__opencl_atomic_fetch_min: 4366 case AtomicExpr::AO__opencl_atomic_fetch_max: 4367 case AtomicExpr::AO__atomic_fetch_add: 4368 case AtomicExpr::AO__atomic_fetch_sub: 4369 case AtomicExpr::AO__atomic_add_fetch: 4370 case AtomicExpr::AO__atomic_sub_fetch: 4371 IsAddSub = true; 4372 LLVM_FALLTHROUGH; 4373 case AtomicExpr::AO__c11_atomic_fetch_and: 4374 case AtomicExpr::AO__c11_atomic_fetch_or: 4375 case AtomicExpr::AO__c11_atomic_fetch_xor: 4376 case AtomicExpr::AO__opencl_atomic_fetch_and: 4377 case AtomicExpr::AO__opencl_atomic_fetch_or: 4378 case AtomicExpr::AO__opencl_atomic_fetch_xor: 4379 case AtomicExpr::AO__atomic_fetch_and: 4380 case AtomicExpr::AO__atomic_fetch_or: 4381 case AtomicExpr::AO__atomic_fetch_xor: 4382 case AtomicExpr::AO__atomic_fetch_nand: 4383 case AtomicExpr::AO__atomic_and_fetch: 4384 case AtomicExpr::AO__atomic_or_fetch: 4385 case AtomicExpr::AO__atomic_xor_fetch: 4386 case AtomicExpr::AO__atomic_nand_fetch: 4387 Form = Arithmetic; 4388 break; 4389 4390 case AtomicExpr::AO__atomic_fetch_min: 4391 case AtomicExpr::AO__atomic_fetch_max: 4392 IsMinMax = true; 4393 Form = Arithmetic; 4394 break; 4395 4396 case AtomicExpr::AO__c11_atomic_exchange: 4397 case AtomicExpr::AO__opencl_atomic_exchange: 4398 case AtomicExpr::AO__atomic_exchange_n: 4399 Form = Xchg; 4400 break; 4401 4402 case AtomicExpr::AO__atomic_exchange: 4403 Form = GNUXchg; 4404 break; 4405 4406 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 4407 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 4408 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 4409 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 4410 Form = C11CmpXchg; 4411 break; 4412 4413 case AtomicExpr::AO__atomic_compare_exchange: 4414 case AtomicExpr::AO__atomic_compare_exchange_n: 4415 Form = GNUCmpXchg; 4416 break; 4417 } 4418 4419 unsigned AdjustedNumArgs = NumArgs[Form]; 4420 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init) 4421 ++AdjustedNumArgs; 4422 // Check we have the right number of arguments. 4423 if (TheCall->getNumArgs() < AdjustedNumArgs) { 4424 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 4425 << 0 << AdjustedNumArgs << TheCall->getNumArgs() 4426 << TheCall->getCallee()->getSourceRange(); 4427 return ExprError(); 4428 } else if (TheCall->getNumArgs() > AdjustedNumArgs) { 4429 Diag(TheCall->getArg(AdjustedNumArgs)->getBeginLoc(), 4430 diag::err_typecheck_call_too_many_args) 4431 << 0 << AdjustedNumArgs << TheCall->getNumArgs() 4432 << TheCall->getCallee()->getSourceRange(); 4433 return ExprError(); 4434 } 4435 4436 // Inspect the first argument of the atomic operation. 4437 Expr *Ptr = TheCall->getArg(0); 4438 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 4439 if (ConvertedPtr.isInvalid()) 4440 return ExprError(); 4441 4442 Ptr = ConvertedPtr.get(); 4443 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 4444 if (!pointerType) { 4445 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 4446 << Ptr->getType() << Ptr->getSourceRange(); 4447 return ExprError(); 4448 } 4449 4450 // For a __c11 builtin, this should be a pointer to an _Atomic type. 4451 QualType AtomTy = pointerType->getPointeeType(); // 'A' 4452 QualType ValType = AtomTy; // 'C' 4453 if (IsC11) { 4454 if (!AtomTy->isAtomicType()) { 4455 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic) 4456 << Ptr->getType() << Ptr->getSourceRange(); 4457 return ExprError(); 4458 } 4459 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 4460 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 4461 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_non_const_atomic) 4462 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 4463 << Ptr->getSourceRange(); 4464 return ExprError(); 4465 } 4466 ValType = AtomTy->getAs<AtomicType>()->getValueType(); 4467 } else if (Form != Load && Form != LoadCopy) { 4468 if (ValType.isConstQualified()) { 4469 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_non_const_pointer) 4470 << Ptr->getType() << Ptr->getSourceRange(); 4471 return ExprError(); 4472 } 4473 } 4474 4475 // For an arithmetic operation, the implied arithmetic must be well-formed. 4476 if (Form == Arithmetic) { 4477 // gcc does not enforce these rules for GNU atomics, but we do so for sanity. 4478 if (IsAddSub && !ValType->isIntegerType() 4479 && !ValType->isPointerType()) { 4480 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4481 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4482 return ExprError(); 4483 } 4484 if (IsMinMax) { 4485 const BuiltinType *BT = ValType->getAs<BuiltinType>(); 4486 if (!BT || (BT->getKind() != BuiltinType::Int && 4487 BT->getKind() != BuiltinType::UInt)) { 4488 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_int32_or_ptr); 4489 return ExprError(); 4490 } 4491 } 4492 if (!IsAddSub && !IsMinMax && !ValType->isIntegerType()) { 4493 Diag(DRE->getBeginLoc(), diag::err_atomic_op_bitwise_needs_atomic_int) 4494 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4495 return ExprError(); 4496 } 4497 if (IsC11 && ValType->isPointerType() && 4498 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 4499 diag::err_incomplete_type)) { 4500 return ExprError(); 4501 } 4502 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 4503 // For __atomic_*_n operations, the value type must be a scalar integral or 4504 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 4505 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4506 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4507 return ExprError(); 4508 } 4509 4510 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 4511 !AtomTy->isScalarType()) { 4512 // For GNU atomics, require a trivially-copyable type. This is not part of 4513 // the GNU atomics specification, but we enforce it for sanity. 4514 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_trivial_copy) 4515 << Ptr->getType() << Ptr->getSourceRange(); 4516 return ExprError(); 4517 } 4518 4519 switch (ValType.getObjCLifetime()) { 4520 case Qualifiers::OCL_None: 4521 case Qualifiers::OCL_ExplicitNone: 4522 // okay 4523 break; 4524 4525 case Qualifiers::OCL_Weak: 4526 case Qualifiers::OCL_Strong: 4527 case Qualifiers::OCL_Autoreleasing: 4528 // FIXME: Can this happen? By this point, ValType should be known 4529 // to be trivially copyable. 4530 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 4531 << ValType << Ptr->getSourceRange(); 4532 return ExprError(); 4533 } 4534 4535 // All atomic operations have an overload which takes a pointer to a volatile 4536 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 4537 // into the result or the other operands. Similarly atomic_load takes a 4538 // pointer to a const 'A'. 4539 ValType.removeLocalVolatile(); 4540 ValType.removeLocalConst(); 4541 QualType ResultType = ValType; 4542 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 4543 Form == Init) 4544 ResultType = Context.VoidTy; 4545 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 4546 ResultType = Context.BoolTy; 4547 4548 // The type of a parameter passed 'by value'. In the GNU atomics, such 4549 // arguments are actually passed as pointers. 4550 QualType ByValType = ValType; // 'CP' 4551 bool IsPassedByAddress = false; 4552 if (!IsC11 && !IsN) { 4553 ByValType = Ptr->getType(); 4554 IsPassedByAddress = true; 4555 } 4556 4557 // The first argument's non-CV pointer type is used to deduce the type of 4558 // subsequent arguments, except for: 4559 // - weak flag (always converted to bool) 4560 // - memory order (always converted to int) 4561 // - scope (always converted to int) 4562 for (unsigned i = 0; i != TheCall->getNumArgs(); ++i) { 4563 QualType Ty; 4564 if (i < NumVals[Form] + 1) { 4565 switch (i) { 4566 case 0: 4567 // The first argument is always a pointer. It has a fixed type. 4568 // It is always dereferenced, a nullptr is undefined. 4569 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc()); 4570 // Nothing else to do: we already know all we want about this pointer. 4571 continue; 4572 case 1: 4573 // The second argument is the non-atomic operand. For arithmetic, this 4574 // is always passed by value, and for a compare_exchange it is always 4575 // passed by address. For the rest, GNU uses by-address and C11 uses 4576 // by-value. 4577 assert(Form != Load); 4578 if (Form == Init || (Form == Arithmetic && ValType->isIntegerType())) 4579 Ty = ValType; 4580 else if (Form == Copy || Form == Xchg) { 4581 if (IsPassedByAddress) 4582 // The value pointer is always dereferenced, a nullptr is undefined. 4583 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc()); 4584 Ty = ByValType; 4585 } else if (Form == Arithmetic) 4586 Ty = Context.getPointerDiffType(); 4587 else { 4588 Expr *ValArg = TheCall->getArg(i); 4589 // The value pointer is always dereferenced, a nullptr is undefined. 4590 CheckNonNullArgument(*this, ValArg, DRE->getBeginLoc()); 4591 LangAS AS = LangAS::Default; 4592 // Keep address space of non-atomic pointer type. 4593 if (const PointerType *PtrTy = 4594 ValArg->getType()->getAs<PointerType>()) { 4595 AS = PtrTy->getPointeeType().getAddressSpace(); 4596 } 4597 Ty = Context.getPointerType( 4598 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 4599 } 4600 break; 4601 case 2: 4602 // The third argument to compare_exchange / GNU exchange is the desired 4603 // value, either by-value (for the C11 and *_n variant) or as a pointer. 4604 if (IsPassedByAddress) 4605 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc()); 4606 Ty = ByValType; 4607 break; 4608 case 3: 4609 // The fourth argument to GNU compare_exchange is a 'weak' flag. 4610 Ty = Context.BoolTy; 4611 break; 4612 } 4613 } else { 4614 // The order(s) and scope are always converted to int. 4615 Ty = Context.IntTy; 4616 } 4617 4618 InitializedEntity Entity = 4619 InitializedEntity::InitializeParameter(Context, Ty, false); 4620 ExprResult Arg = TheCall->getArg(i); 4621 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 4622 if (Arg.isInvalid()) 4623 return true; 4624 TheCall->setArg(i, Arg.get()); 4625 } 4626 4627 // Permute the arguments into a 'consistent' order. 4628 SmallVector<Expr*, 5> SubExprs; 4629 SubExprs.push_back(Ptr); 4630 switch (Form) { 4631 case Init: 4632 // Note, AtomicExpr::getVal1() has a special case for this atomic. 4633 SubExprs.push_back(TheCall->getArg(1)); // Val1 4634 break; 4635 case Load: 4636 SubExprs.push_back(TheCall->getArg(1)); // Order 4637 break; 4638 case LoadCopy: 4639 case Copy: 4640 case Arithmetic: 4641 case Xchg: 4642 SubExprs.push_back(TheCall->getArg(2)); // Order 4643 SubExprs.push_back(TheCall->getArg(1)); // Val1 4644 break; 4645 case GNUXchg: 4646 // Note, AtomicExpr::getVal2() has a special case for this atomic. 4647 SubExprs.push_back(TheCall->getArg(3)); // Order 4648 SubExprs.push_back(TheCall->getArg(1)); // Val1 4649 SubExprs.push_back(TheCall->getArg(2)); // Val2 4650 break; 4651 case C11CmpXchg: 4652 SubExprs.push_back(TheCall->getArg(3)); // Order 4653 SubExprs.push_back(TheCall->getArg(1)); // Val1 4654 SubExprs.push_back(TheCall->getArg(4)); // OrderFail 4655 SubExprs.push_back(TheCall->getArg(2)); // Val2 4656 break; 4657 case GNUCmpXchg: 4658 SubExprs.push_back(TheCall->getArg(4)); // Order 4659 SubExprs.push_back(TheCall->getArg(1)); // Val1 4660 SubExprs.push_back(TheCall->getArg(5)); // OrderFail 4661 SubExprs.push_back(TheCall->getArg(2)); // Val2 4662 SubExprs.push_back(TheCall->getArg(3)); // Weak 4663 break; 4664 } 4665 4666 if (SubExprs.size() >= 2 && Form != Init) { 4667 llvm::APSInt Result(32); 4668 if (SubExprs[1]->isIntegerConstantExpr(Result, Context) && 4669 !isValidOrderingForOp(Result.getSExtValue(), Op)) 4670 Diag(SubExprs[1]->getBeginLoc(), 4671 diag::warn_atomic_op_has_invalid_memory_order) 4672 << SubExprs[1]->getSourceRange(); 4673 } 4674 4675 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 4676 auto *Scope = TheCall->getArg(TheCall->getNumArgs() - 1); 4677 llvm::APSInt Result(32); 4678 if (Scope->isIntegerConstantExpr(Result, Context) && 4679 !ScopeModel->isValid(Result.getZExtValue())) { 4680 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 4681 << Scope->getSourceRange(); 4682 } 4683 SubExprs.push_back(Scope); 4684 } 4685 4686 AtomicExpr *AE = 4687 new (Context) AtomicExpr(TheCall->getCallee()->getBeginLoc(), SubExprs, 4688 ResultType, Op, TheCall->getRParenLoc()); 4689 4690 if ((Op == AtomicExpr::AO__c11_atomic_load || 4691 Op == AtomicExpr::AO__c11_atomic_store || 4692 Op == AtomicExpr::AO__opencl_atomic_load || 4693 Op == AtomicExpr::AO__opencl_atomic_store ) && 4694 Context.AtomicUsesUnsupportedLibcall(AE)) 4695 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 4696 << ((Op == AtomicExpr::AO__c11_atomic_load || 4697 Op == AtomicExpr::AO__opencl_atomic_load) 4698 ? 0 4699 : 1); 4700 4701 return AE; 4702 } 4703 4704 /// checkBuiltinArgument - Given a call to a builtin function, perform 4705 /// normal type-checking on the given argument, updating the call in 4706 /// place. This is useful when a builtin function requires custom 4707 /// type-checking for some of its arguments but not necessarily all of 4708 /// them. 4709 /// 4710 /// Returns true on error. 4711 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 4712 FunctionDecl *Fn = E->getDirectCallee(); 4713 assert(Fn && "builtin call without direct callee!"); 4714 4715 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 4716 InitializedEntity Entity = 4717 InitializedEntity::InitializeParameter(S.Context, Param); 4718 4719 ExprResult Arg = E->getArg(0); 4720 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 4721 if (Arg.isInvalid()) 4722 return true; 4723 4724 E->setArg(ArgIndex, Arg.get()); 4725 return false; 4726 } 4727 4728 /// We have a call to a function like __sync_fetch_and_add, which is an 4729 /// overloaded function based on the pointer type of its first argument. 4730 /// The main ActOnCallExpr routines have already promoted the types of 4731 /// arguments because all of these calls are prototyped as void(...). 4732 /// 4733 /// This function goes through and does final semantic checking for these 4734 /// builtins, as well as generating any warnings. 4735 ExprResult 4736 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 4737 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 4738 Expr *Callee = TheCall->getCallee(); 4739 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 4740 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 4741 4742 // Ensure that we have at least one argument to do type inference from. 4743 if (TheCall->getNumArgs() < 1) { 4744 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 4745 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 4746 return ExprError(); 4747 } 4748 4749 // Inspect the first argument of the atomic builtin. This should always be 4750 // a pointer type, whose element is an integral scalar or pointer type. 4751 // Because it is a pointer type, we don't have to worry about any implicit 4752 // casts here. 4753 // FIXME: We don't allow floating point scalars as input. 4754 Expr *FirstArg = TheCall->getArg(0); 4755 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 4756 if (FirstArgResult.isInvalid()) 4757 return ExprError(); 4758 FirstArg = FirstArgResult.get(); 4759 TheCall->setArg(0, FirstArg); 4760 4761 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 4762 if (!pointerType) { 4763 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 4764 << FirstArg->getType() << FirstArg->getSourceRange(); 4765 return ExprError(); 4766 } 4767 4768 QualType ValType = pointerType->getPointeeType(); 4769 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 4770 !ValType->isBlockPointerType()) { 4771 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 4772 << FirstArg->getType() << FirstArg->getSourceRange(); 4773 return ExprError(); 4774 } 4775 4776 if (ValType.isConstQualified()) { 4777 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 4778 << FirstArg->getType() << FirstArg->getSourceRange(); 4779 return ExprError(); 4780 } 4781 4782 switch (ValType.getObjCLifetime()) { 4783 case Qualifiers::OCL_None: 4784 case Qualifiers::OCL_ExplicitNone: 4785 // okay 4786 break; 4787 4788 case Qualifiers::OCL_Weak: 4789 case Qualifiers::OCL_Strong: 4790 case Qualifiers::OCL_Autoreleasing: 4791 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 4792 << ValType << FirstArg->getSourceRange(); 4793 return ExprError(); 4794 } 4795 4796 // Strip any qualifiers off ValType. 4797 ValType = ValType.getUnqualifiedType(); 4798 4799 // The majority of builtins return a value, but a few have special return 4800 // types, so allow them to override appropriately below. 4801 QualType ResultType = ValType; 4802 4803 // We need to figure out which concrete builtin this maps onto. For example, 4804 // __sync_fetch_and_add with a 2 byte object turns into 4805 // __sync_fetch_and_add_2. 4806 #define BUILTIN_ROW(x) \ 4807 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 4808 Builtin::BI##x##_8, Builtin::BI##x##_16 } 4809 4810 static const unsigned BuiltinIndices[][5] = { 4811 BUILTIN_ROW(__sync_fetch_and_add), 4812 BUILTIN_ROW(__sync_fetch_and_sub), 4813 BUILTIN_ROW(__sync_fetch_and_or), 4814 BUILTIN_ROW(__sync_fetch_and_and), 4815 BUILTIN_ROW(__sync_fetch_and_xor), 4816 BUILTIN_ROW(__sync_fetch_and_nand), 4817 4818 BUILTIN_ROW(__sync_add_and_fetch), 4819 BUILTIN_ROW(__sync_sub_and_fetch), 4820 BUILTIN_ROW(__sync_and_and_fetch), 4821 BUILTIN_ROW(__sync_or_and_fetch), 4822 BUILTIN_ROW(__sync_xor_and_fetch), 4823 BUILTIN_ROW(__sync_nand_and_fetch), 4824 4825 BUILTIN_ROW(__sync_val_compare_and_swap), 4826 BUILTIN_ROW(__sync_bool_compare_and_swap), 4827 BUILTIN_ROW(__sync_lock_test_and_set), 4828 BUILTIN_ROW(__sync_lock_release), 4829 BUILTIN_ROW(__sync_swap) 4830 }; 4831 #undef BUILTIN_ROW 4832 4833 // Determine the index of the size. 4834 unsigned SizeIndex; 4835 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 4836 case 1: SizeIndex = 0; break; 4837 case 2: SizeIndex = 1; break; 4838 case 4: SizeIndex = 2; break; 4839 case 8: SizeIndex = 3; break; 4840 case 16: SizeIndex = 4; break; 4841 default: 4842 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 4843 << FirstArg->getType() << FirstArg->getSourceRange(); 4844 return ExprError(); 4845 } 4846 4847 // Each of these builtins has one pointer argument, followed by some number of 4848 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 4849 // that we ignore. Find out which row of BuiltinIndices to read from as well 4850 // as the number of fixed args. 4851 unsigned BuiltinID = FDecl->getBuiltinID(); 4852 unsigned BuiltinIndex, NumFixed = 1; 4853 bool WarnAboutSemanticsChange = false; 4854 switch (BuiltinID) { 4855 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 4856 case Builtin::BI__sync_fetch_and_add: 4857 case Builtin::BI__sync_fetch_and_add_1: 4858 case Builtin::BI__sync_fetch_and_add_2: 4859 case Builtin::BI__sync_fetch_and_add_4: 4860 case Builtin::BI__sync_fetch_and_add_8: 4861 case Builtin::BI__sync_fetch_and_add_16: 4862 BuiltinIndex = 0; 4863 break; 4864 4865 case Builtin::BI__sync_fetch_and_sub: 4866 case Builtin::BI__sync_fetch_and_sub_1: 4867 case Builtin::BI__sync_fetch_and_sub_2: 4868 case Builtin::BI__sync_fetch_and_sub_4: 4869 case Builtin::BI__sync_fetch_and_sub_8: 4870 case Builtin::BI__sync_fetch_and_sub_16: 4871 BuiltinIndex = 1; 4872 break; 4873 4874 case Builtin::BI__sync_fetch_and_or: 4875 case Builtin::BI__sync_fetch_and_or_1: 4876 case Builtin::BI__sync_fetch_and_or_2: 4877 case Builtin::BI__sync_fetch_and_or_4: 4878 case Builtin::BI__sync_fetch_and_or_8: 4879 case Builtin::BI__sync_fetch_and_or_16: 4880 BuiltinIndex = 2; 4881 break; 4882 4883 case Builtin::BI__sync_fetch_and_and: 4884 case Builtin::BI__sync_fetch_and_and_1: 4885 case Builtin::BI__sync_fetch_and_and_2: 4886 case Builtin::BI__sync_fetch_and_and_4: 4887 case Builtin::BI__sync_fetch_and_and_8: 4888 case Builtin::BI__sync_fetch_and_and_16: 4889 BuiltinIndex = 3; 4890 break; 4891 4892 case Builtin::BI__sync_fetch_and_xor: 4893 case Builtin::BI__sync_fetch_and_xor_1: 4894 case Builtin::BI__sync_fetch_and_xor_2: 4895 case Builtin::BI__sync_fetch_and_xor_4: 4896 case Builtin::BI__sync_fetch_and_xor_8: 4897 case Builtin::BI__sync_fetch_and_xor_16: 4898 BuiltinIndex = 4; 4899 break; 4900 4901 case Builtin::BI__sync_fetch_and_nand: 4902 case Builtin::BI__sync_fetch_and_nand_1: 4903 case Builtin::BI__sync_fetch_and_nand_2: 4904 case Builtin::BI__sync_fetch_and_nand_4: 4905 case Builtin::BI__sync_fetch_and_nand_8: 4906 case Builtin::BI__sync_fetch_and_nand_16: 4907 BuiltinIndex = 5; 4908 WarnAboutSemanticsChange = true; 4909 break; 4910 4911 case Builtin::BI__sync_add_and_fetch: 4912 case Builtin::BI__sync_add_and_fetch_1: 4913 case Builtin::BI__sync_add_and_fetch_2: 4914 case Builtin::BI__sync_add_and_fetch_4: 4915 case Builtin::BI__sync_add_and_fetch_8: 4916 case Builtin::BI__sync_add_and_fetch_16: 4917 BuiltinIndex = 6; 4918 break; 4919 4920 case Builtin::BI__sync_sub_and_fetch: 4921 case Builtin::BI__sync_sub_and_fetch_1: 4922 case Builtin::BI__sync_sub_and_fetch_2: 4923 case Builtin::BI__sync_sub_and_fetch_4: 4924 case Builtin::BI__sync_sub_and_fetch_8: 4925 case Builtin::BI__sync_sub_and_fetch_16: 4926 BuiltinIndex = 7; 4927 break; 4928 4929 case Builtin::BI__sync_and_and_fetch: 4930 case Builtin::BI__sync_and_and_fetch_1: 4931 case Builtin::BI__sync_and_and_fetch_2: 4932 case Builtin::BI__sync_and_and_fetch_4: 4933 case Builtin::BI__sync_and_and_fetch_8: 4934 case Builtin::BI__sync_and_and_fetch_16: 4935 BuiltinIndex = 8; 4936 break; 4937 4938 case Builtin::BI__sync_or_and_fetch: 4939 case Builtin::BI__sync_or_and_fetch_1: 4940 case Builtin::BI__sync_or_and_fetch_2: 4941 case Builtin::BI__sync_or_and_fetch_4: 4942 case Builtin::BI__sync_or_and_fetch_8: 4943 case Builtin::BI__sync_or_and_fetch_16: 4944 BuiltinIndex = 9; 4945 break; 4946 4947 case Builtin::BI__sync_xor_and_fetch: 4948 case Builtin::BI__sync_xor_and_fetch_1: 4949 case Builtin::BI__sync_xor_and_fetch_2: 4950 case Builtin::BI__sync_xor_and_fetch_4: 4951 case Builtin::BI__sync_xor_and_fetch_8: 4952 case Builtin::BI__sync_xor_and_fetch_16: 4953 BuiltinIndex = 10; 4954 break; 4955 4956 case Builtin::BI__sync_nand_and_fetch: 4957 case Builtin::BI__sync_nand_and_fetch_1: 4958 case Builtin::BI__sync_nand_and_fetch_2: 4959 case Builtin::BI__sync_nand_and_fetch_4: 4960 case Builtin::BI__sync_nand_and_fetch_8: 4961 case Builtin::BI__sync_nand_and_fetch_16: 4962 BuiltinIndex = 11; 4963 WarnAboutSemanticsChange = true; 4964 break; 4965 4966 case Builtin::BI__sync_val_compare_and_swap: 4967 case Builtin::BI__sync_val_compare_and_swap_1: 4968 case Builtin::BI__sync_val_compare_and_swap_2: 4969 case Builtin::BI__sync_val_compare_and_swap_4: 4970 case Builtin::BI__sync_val_compare_and_swap_8: 4971 case Builtin::BI__sync_val_compare_and_swap_16: 4972 BuiltinIndex = 12; 4973 NumFixed = 2; 4974 break; 4975 4976 case Builtin::BI__sync_bool_compare_and_swap: 4977 case Builtin::BI__sync_bool_compare_and_swap_1: 4978 case Builtin::BI__sync_bool_compare_and_swap_2: 4979 case Builtin::BI__sync_bool_compare_and_swap_4: 4980 case Builtin::BI__sync_bool_compare_and_swap_8: 4981 case Builtin::BI__sync_bool_compare_and_swap_16: 4982 BuiltinIndex = 13; 4983 NumFixed = 2; 4984 ResultType = Context.BoolTy; 4985 break; 4986 4987 case Builtin::BI__sync_lock_test_and_set: 4988 case Builtin::BI__sync_lock_test_and_set_1: 4989 case Builtin::BI__sync_lock_test_and_set_2: 4990 case Builtin::BI__sync_lock_test_and_set_4: 4991 case Builtin::BI__sync_lock_test_and_set_8: 4992 case Builtin::BI__sync_lock_test_and_set_16: 4993 BuiltinIndex = 14; 4994 break; 4995 4996 case Builtin::BI__sync_lock_release: 4997 case Builtin::BI__sync_lock_release_1: 4998 case Builtin::BI__sync_lock_release_2: 4999 case Builtin::BI__sync_lock_release_4: 5000 case Builtin::BI__sync_lock_release_8: 5001 case Builtin::BI__sync_lock_release_16: 5002 BuiltinIndex = 15; 5003 NumFixed = 0; 5004 ResultType = Context.VoidTy; 5005 break; 5006 5007 case Builtin::BI__sync_swap: 5008 case Builtin::BI__sync_swap_1: 5009 case Builtin::BI__sync_swap_2: 5010 case Builtin::BI__sync_swap_4: 5011 case Builtin::BI__sync_swap_8: 5012 case Builtin::BI__sync_swap_16: 5013 BuiltinIndex = 16; 5014 break; 5015 } 5016 5017 // Now that we know how many fixed arguments we expect, first check that we 5018 // have at least that many. 5019 if (TheCall->getNumArgs() < 1+NumFixed) { 5020 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5021 << 0 << 1 + NumFixed << TheCall->getNumArgs() 5022 << Callee->getSourceRange(); 5023 return ExprError(); 5024 } 5025 5026 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 5027 << Callee->getSourceRange(); 5028 5029 if (WarnAboutSemanticsChange) { 5030 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 5031 << Callee->getSourceRange(); 5032 } 5033 5034 // Get the decl for the concrete builtin from this, we can tell what the 5035 // concrete integer type we should convert to is. 5036 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 5037 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 5038 FunctionDecl *NewBuiltinDecl; 5039 if (NewBuiltinID == BuiltinID) 5040 NewBuiltinDecl = FDecl; 5041 else { 5042 // Perform builtin lookup to avoid redeclaring it. 5043 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 5044 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 5045 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 5046 assert(Res.getFoundDecl()); 5047 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 5048 if (!NewBuiltinDecl) 5049 return ExprError(); 5050 } 5051 5052 // The first argument --- the pointer --- has a fixed type; we 5053 // deduce the types of the rest of the arguments accordingly. Walk 5054 // the remaining arguments, converting them to the deduced value type. 5055 for (unsigned i = 0; i != NumFixed; ++i) { 5056 ExprResult Arg = TheCall->getArg(i+1); 5057 5058 // GCC does an implicit conversion to the pointer or integer ValType. This 5059 // can fail in some cases (1i -> int**), check for this error case now. 5060 // Initialize the argument. 5061 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 5062 ValType, /*consume*/ false); 5063 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5064 if (Arg.isInvalid()) 5065 return ExprError(); 5066 5067 // Okay, we have something that *can* be converted to the right type. Check 5068 // to see if there is a potentially weird extension going on here. This can 5069 // happen when you do an atomic operation on something like an char* and 5070 // pass in 42. The 42 gets converted to char. This is even more strange 5071 // for things like 45.123 -> char, etc. 5072 // FIXME: Do this check. 5073 TheCall->setArg(i+1, Arg.get()); 5074 } 5075 5076 ASTContext& Context = this->getASTContext(); 5077 5078 // Create a new DeclRefExpr to refer to the new decl. 5079 DeclRefExpr* NewDRE = DeclRefExpr::Create( 5080 Context, 5081 DRE->getQualifierLoc(), 5082 SourceLocation(), 5083 NewBuiltinDecl, 5084 /*enclosing*/ false, 5085 DRE->getLocation(), 5086 Context.BuiltinFnTy, 5087 DRE->getValueKind()); 5088 5089 // Set the callee in the CallExpr. 5090 // FIXME: This loses syntactic information. 5091 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 5092 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 5093 CK_BuiltinFnToFnPtr); 5094 TheCall->setCallee(PromotedCall.get()); 5095 5096 // Change the result type of the call to match the original value type. This 5097 // is arbitrary, but the codegen for these builtins ins design to handle it 5098 // gracefully. 5099 TheCall->setType(ResultType); 5100 5101 return TheCallResult; 5102 } 5103 5104 /// SemaBuiltinNontemporalOverloaded - We have a call to 5105 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 5106 /// overloaded function based on the pointer type of its last argument. 5107 /// 5108 /// This function goes through and does final semantic checking for these 5109 /// builtins. 5110 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 5111 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 5112 DeclRefExpr *DRE = 5113 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5114 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5115 unsigned BuiltinID = FDecl->getBuiltinID(); 5116 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 5117 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 5118 "Unexpected nontemporal load/store builtin!"); 5119 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 5120 unsigned numArgs = isStore ? 2 : 1; 5121 5122 // Ensure that we have the proper number of arguments. 5123 if (checkArgCount(*this, TheCall, numArgs)) 5124 return ExprError(); 5125 5126 // Inspect the last argument of the nontemporal builtin. This should always 5127 // be a pointer type, from which we imply the type of the memory access. 5128 // Because it is a pointer type, we don't have to worry about any implicit 5129 // casts here. 5130 Expr *PointerArg = TheCall->getArg(numArgs - 1); 5131 ExprResult PointerArgResult = 5132 DefaultFunctionArrayLvalueConversion(PointerArg); 5133 5134 if (PointerArgResult.isInvalid()) 5135 return ExprError(); 5136 PointerArg = PointerArgResult.get(); 5137 TheCall->setArg(numArgs - 1, PointerArg); 5138 5139 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 5140 if (!pointerType) { 5141 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 5142 << PointerArg->getType() << PointerArg->getSourceRange(); 5143 return ExprError(); 5144 } 5145 5146 QualType ValType = pointerType->getPointeeType(); 5147 5148 // Strip any qualifiers off ValType. 5149 ValType = ValType.getUnqualifiedType(); 5150 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5151 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 5152 !ValType->isVectorType()) { 5153 Diag(DRE->getBeginLoc(), 5154 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 5155 << PointerArg->getType() << PointerArg->getSourceRange(); 5156 return ExprError(); 5157 } 5158 5159 if (!isStore) { 5160 TheCall->setType(ValType); 5161 return TheCallResult; 5162 } 5163 5164 ExprResult ValArg = TheCall->getArg(0); 5165 InitializedEntity Entity = InitializedEntity::InitializeParameter( 5166 Context, ValType, /*consume*/ false); 5167 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 5168 if (ValArg.isInvalid()) 5169 return ExprError(); 5170 5171 TheCall->setArg(0, ValArg.get()); 5172 TheCall->setType(Context.VoidTy); 5173 return TheCallResult; 5174 } 5175 5176 /// CheckObjCString - Checks that the argument to the builtin 5177 /// CFString constructor is correct 5178 /// Note: It might also make sense to do the UTF-16 conversion here (would 5179 /// simplify the backend). 5180 bool Sema::CheckObjCString(Expr *Arg) { 5181 Arg = Arg->IgnoreParenCasts(); 5182 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 5183 5184 if (!Literal || !Literal->isAscii()) { 5185 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 5186 << Arg->getSourceRange(); 5187 return true; 5188 } 5189 5190 if (Literal->containsNonAsciiOrNull()) { 5191 StringRef String = Literal->getString(); 5192 unsigned NumBytes = String.size(); 5193 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 5194 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 5195 llvm::UTF16 *ToPtr = &ToBuf[0]; 5196 5197 llvm::ConversionResult Result = 5198 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 5199 ToPtr + NumBytes, llvm::strictConversion); 5200 // Check for conversion failure. 5201 if (Result != llvm::conversionOK) 5202 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 5203 << Arg->getSourceRange(); 5204 } 5205 return false; 5206 } 5207 5208 /// CheckObjCString - Checks that the format string argument to the os_log() 5209 /// and os_trace() functions is correct, and converts it to const char *. 5210 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 5211 Arg = Arg->IgnoreParenCasts(); 5212 auto *Literal = dyn_cast<StringLiteral>(Arg); 5213 if (!Literal) { 5214 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 5215 Literal = ObjcLiteral->getString(); 5216 } 5217 } 5218 5219 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 5220 return ExprError( 5221 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 5222 << Arg->getSourceRange()); 5223 } 5224 5225 ExprResult Result(Literal); 5226 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 5227 InitializedEntity Entity = 5228 InitializedEntity::InitializeParameter(Context, ResultTy, false); 5229 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 5230 return Result; 5231 } 5232 5233 /// Check that the user is calling the appropriate va_start builtin for the 5234 /// target and calling convention. 5235 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 5236 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 5237 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 5238 bool IsAArch64 = TT.getArch() == llvm::Triple::aarch64; 5239 bool IsWindows = TT.isOSWindows(); 5240 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 5241 if (IsX64 || IsAArch64) { 5242 CallingConv CC = CC_C; 5243 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 5244 CC = FD->getType()->getAs<FunctionType>()->getCallConv(); 5245 if (IsMSVAStart) { 5246 // Don't allow this in System V ABI functions. 5247 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 5248 return S.Diag(Fn->getBeginLoc(), 5249 diag::err_ms_va_start_used_in_sysv_function); 5250 } else { 5251 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 5252 // On x64 Windows, don't allow this in System V ABI functions. 5253 // (Yes, that means there's no corresponding way to support variadic 5254 // System V ABI functions on Windows.) 5255 if ((IsWindows && CC == CC_X86_64SysV) || 5256 (!IsWindows && CC == CC_Win64)) 5257 return S.Diag(Fn->getBeginLoc(), 5258 diag::err_va_start_used_in_wrong_abi_function) 5259 << !IsWindows; 5260 } 5261 return false; 5262 } 5263 5264 if (IsMSVAStart) 5265 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 5266 return false; 5267 } 5268 5269 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 5270 ParmVarDecl **LastParam = nullptr) { 5271 // Determine whether the current function, block, or obj-c method is variadic 5272 // and get its parameter list. 5273 bool IsVariadic = false; 5274 ArrayRef<ParmVarDecl *> Params; 5275 DeclContext *Caller = S.CurContext; 5276 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 5277 IsVariadic = Block->isVariadic(); 5278 Params = Block->parameters(); 5279 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 5280 IsVariadic = FD->isVariadic(); 5281 Params = FD->parameters(); 5282 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 5283 IsVariadic = MD->isVariadic(); 5284 // FIXME: This isn't correct for methods (results in bogus warning). 5285 Params = MD->parameters(); 5286 } else if (isa<CapturedDecl>(Caller)) { 5287 // We don't support va_start in a CapturedDecl. 5288 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 5289 return true; 5290 } else { 5291 // This must be some other declcontext that parses exprs. 5292 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 5293 return true; 5294 } 5295 5296 if (!IsVariadic) { 5297 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 5298 return true; 5299 } 5300 5301 if (LastParam) 5302 *LastParam = Params.empty() ? nullptr : Params.back(); 5303 5304 return false; 5305 } 5306 5307 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 5308 /// for validity. Emit an error and return true on failure; return false 5309 /// on success. 5310 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 5311 Expr *Fn = TheCall->getCallee(); 5312 5313 if (checkVAStartABI(*this, BuiltinID, Fn)) 5314 return true; 5315 5316 if (TheCall->getNumArgs() > 2) { 5317 Diag(TheCall->getArg(2)->getBeginLoc(), 5318 diag::err_typecheck_call_too_many_args) 5319 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5320 << Fn->getSourceRange() 5321 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5322 (*(TheCall->arg_end() - 1))->getEndLoc()); 5323 return true; 5324 } 5325 5326 if (TheCall->getNumArgs() < 2) { 5327 return Diag(TheCall->getEndLoc(), 5328 diag::err_typecheck_call_too_few_args_at_least) 5329 << 0 /*function call*/ << 2 << TheCall->getNumArgs(); 5330 } 5331 5332 // Type-check the first argument normally. 5333 if (checkBuiltinArgument(*this, TheCall, 0)) 5334 return true; 5335 5336 // Check that the current function is variadic, and get its last parameter. 5337 ParmVarDecl *LastParam; 5338 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 5339 return true; 5340 5341 // Verify that the second argument to the builtin is the last argument of the 5342 // current function or method. 5343 bool SecondArgIsLastNamedArgument = false; 5344 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 5345 5346 // These are valid if SecondArgIsLastNamedArgument is false after the next 5347 // block. 5348 QualType Type; 5349 SourceLocation ParamLoc; 5350 bool IsCRegister = false; 5351 5352 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 5353 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 5354 SecondArgIsLastNamedArgument = PV == LastParam; 5355 5356 Type = PV->getType(); 5357 ParamLoc = PV->getLocation(); 5358 IsCRegister = 5359 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 5360 } 5361 } 5362 5363 if (!SecondArgIsLastNamedArgument) 5364 Diag(TheCall->getArg(1)->getBeginLoc(), 5365 diag::warn_second_arg_of_va_start_not_last_named_param); 5366 else if (IsCRegister || Type->isReferenceType() || 5367 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 5368 // Promotable integers are UB, but enumerations need a bit of 5369 // extra checking to see what their promotable type actually is. 5370 if (!Type->isPromotableIntegerType()) 5371 return false; 5372 if (!Type->isEnumeralType()) 5373 return true; 5374 const EnumDecl *ED = Type->getAs<EnumType>()->getDecl(); 5375 return !(ED && 5376 Context.typesAreCompatible(ED->getPromotionType(), Type)); 5377 }()) { 5378 unsigned Reason = 0; 5379 if (Type->isReferenceType()) Reason = 1; 5380 else if (IsCRegister) Reason = 2; 5381 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 5382 Diag(ParamLoc, diag::note_parameter_type) << Type; 5383 } 5384 5385 TheCall->setType(Context.VoidTy); 5386 return false; 5387 } 5388 5389 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 5390 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 5391 // const char *named_addr); 5392 5393 Expr *Func = Call->getCallee(); 5394 5395 if (Call->getNumArgs() < 3) 5396 return Diag(Call->getEndLoc(), 5397 diag::err_typecheck_call_too_few_args_at_least) 5398 << 0 /*function call*/ << 3 << Call->getNumArgs(); 5399 5400 // Type-check the first argument normally. 5401 if (checkBuiltinArgument(*this, Call, 0)) 5402 return true; 5403 5404 // Check that the current function is variadic. 5405 if (checkVAStartIsInVariadicFunction(*this, Func)) 5406 return true; 5407 5408 // __va_start on Windows does not validate the parameter qualifiers 5409 5410 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 5411 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 5412 5413 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 5414 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 5415 5416 const QualType &ConstCharPtrTy = 5417 Context.getPointerType(Context.CharTy.withConst()); 5418 if (!Arg1Ty->isPointerType() || 5419 Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy) 5420 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5421 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 5422 << 0 /* qualifier difference */ 5423 << 3 /* parameter mismatch */ 5424 << 2 << Arg1->getType() << ConstCharPtrTy; 5425 5426 const QualType SizeTy = Context.getSizeType(); 5427 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 5428 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5429 << Arg2->getType() << SizeTy << 1 /* different class */ 5430 << 0 /* qualifier difference */ 5431 << 3 /* parameter mismatch */ 5432 << 3 << Arg2->getType() << SizeTy; 5433 5434 return false; 5435 } 5436 5437 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 5438 /// friends. This is declared to take (...), so we have to check everything. 5439 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 5440 if (TheCall->getNumArgs() < 2) 5441 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5442 << 0 << 2 << TheCall->getNumArgs() /*function call*/; 5443 if (TheCall->getNumArgs() > 2) 5444 return Diag(TheCall->getArg(2)->getBeginLoc(), 5445 diag::err_typecheck_call_too_many_args) 5446 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5447 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5448 (*(TheCall->arg_end() - 1))->getEndLoc()); 5449 5450 ExprResult OrigArg0 = TheCall->getArg(0); 5451 ExprResult OrigArg1 = TheCall->getArg(1); 5452 5453 // Do standard promotions between the two arguments, returning their common 5454 // type. 5455 QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false); 5456 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 5457 return true; 5458 5459 // Make sure any conversions are pushed back into the call; this is 5460 // type safe since unordered compare builtins are declared as "_Bool 5461 // foo(...)". 5462 TheCall->setArg(0, OrigArg0.get()); 5463 TheCall->setArg(1, OrigArg1.get()); 5464 5465 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 5466 return false; 5467 5468 // If the common type isn't a real floating type, then the arguments were 5469 // invalid for this operation. 5470 if (Res.isNull() || !Res->isRealFloatingType()) 5471 return Diag(OrigArg0.get()->getBeginLoc(), 5472 diag::err_typecheck_call_invalid_ordered_compare) 5473 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 5474 << SourceRange(OrigArg0.get()->getBeginLoc(), 5475 OrigArg1.get()->getEndLoc()); 5476 5477 return false; 5478 } 5479 5480 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 5481 /// __builtin_isnan and friends. This is declared to take (...), so we have 5482 /// to check everything. We expect the last argument to be a floating point 5483 /// value. 5484 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 5485 if (TheCall->getNumArgs() < NumArgs) 5486 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5487 << 0 << NumArgs << TheCall->getNumArgs() /*function call*/; 5488 if (TheCall->getNumArgs() > NumArgs) 5489 return Diag(TheCall->getArg(NumArgs)->getBeginLoc(), 5490 diag::err_typecheck_call_too_many_args) 5491 << 0 /*function call*/ << NumArgs << TheCall->getNumArgs() 5492 << SourceRange(TheCall->getArg(NumArgs)->getBeginLoc(), 5493 (*(TheCall->arg_end() - 1))->getEndLoc()); 5494 5495 Expr *OrigArg = TheCall->getArg(NumArgs-1); 5496 5497 if (OrigArg->isTypeDependent()) 5498 return false; 5499 5500 // This operation requires a non-_Complex floating-point number. 5501 if (!OrigArg->getType()->isRealFloatingType()) 5502 return Diag(OrigArg->getBeginLoc(), 5503 diag::err_typecheck_call_invalid_unary_fp) 5504 << OrigArg->getType() << OrigArg->getSourceRange(); 5505 5506 // If this is an implicit conversion from float -> float, double, or 5507 // long double, remove it. 5508 if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(OrigArg)) { 5509 // Only remove standard FloatCasts, leaving other casts inplace 5510 if (Cast->getCastKind() == CK_FloatingCast) { 5511 Expr *CastArg = Cast->getSubExpr(); 5512 if (CastArg->getType()->isSpecificBuiltinType(BuiltinType::Float)) { 5513 assert( 5514 (Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) || 5515 Cast->getType()->isSpecificBuiltinType(BuiltinType::Float) || 5516 Cast->getType()->isSpecificBuiltinType(BuiltinType::LongDouble)) && 5517 "promotion from float to either float, double, or long double is " 5518 "the only expected cast here"); 5519 Cast->setSubExpr(nullptr); 5520 TheCall->setArg(NumArgs-1, CastArg); 5521 } 5522 } 5523 } 5524 5525 return false; 5526 } 5527 5528 // Customized Sema Checking for VSX builtins that have the following signature: 5529 // vector [...] builtinName(vector [...], vector [...], const int); 5530 // Which takes the same type of vectors (any legal vector type) for the first 5531 // two arguments and takes compile time constant for the third argument. 5532 // Example builtins are : 5533 // vector double vec_xxpermdi(vector double, vector double, int); 5534 // vector short vec_xxsldwi(vector short, vector short, int); 5535 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 5536 unsigned ExpectedNumArgs = 3; 5537 if (TheCall->getNumArgs() < ExpectedNumArgs) 5538 return Diag(TheCall->getEndLoc(), 5539 diag::err_typecheck_call_too_few_args_at_least) 5540 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs() 5541 << TheCall->getSourceRange(); 5542 5543 if (TheCall->getNumArgs() > ExpectedNumArgs) 5544 return Diag(TheCall->getEndLoc(), 5545 diag::err_typecheck_call_too_many_args_at_most) 5546 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs() 5547 << TheCall->getSourceRange(); 5548 5549 // Check the third argument is a compile time constant 5550 llvm::APSInt Value; 5551 if(!TheCall->getArg(2)->isIntegerConstantExpr(Value, Context)) 5552 return Diag(TheCall->getBeginLoc(), 5553 diag::err_vsx_builtin_nonconstant_argument) 5554 << 3 /* argument index */ << TheCall->getDirectCallee() 5555 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5556 TheCall->getArg(2)->getEndLoc()); 5557 5558 QualType Arg1Ty = TheCall->getArg(0)->getType(); 5559 QualType Arg2Ty = TheCall->getArg(1)->getType(); 5560 5561 // Check the type of argument 1 and argument 2 are vectors. 5562 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 5563 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 5564 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 5565 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 5566 << TheCall->getDirectCallee() 5567 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5568 TheCall->getArg(1)->getEndLoc()); 5569 } 5570 5571 // Check the first two arguments are the same type. 5572 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 5573 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 5574 << TheCall->getDirectCallee() 5575 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5576 TheCall->getArg(1)->getEndLoc()); 5577 } 5578 5579 // When default clang type checking is turned off and the customized type 5580 // checking is used, the returning type of the function must be explicitly 5581 // set. Otherwise it is _Bool by default. 5582 TheCall->setType(Arg1Ty); 5583 5584 return false; 5585 } 5586 5587 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 5588 // This is declared to take (...), so we have to check everything. 5589 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 5590 if (TheCall->getNumArgs() < 2) 5591 return ExprError(Diag(TheCall->getEndLoc(), 5592 diag::err_typecheck_call_too_few_args_at_least) 5593 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5594 << TheCall->getSourceRange()); 5595 5596 // Determine which of the following types of shufflevector we're checking: 5597 // 1) unary, vector mask: (lhs, mask) 5598 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 5599 QualType resType = TheCall->getArg(0)->getType(); 5600 unsigned numElements = 0; 5601 5602 if (!TheCall->getArg(0)->isTypeDependent() && 5603 !TheCall->getArg(1)->isTypeDependent()) { 5604 QualType LHSType = TheCall->getArg(0)->getType(); 5605 QualType RHSType = TheCall->getArg(1)->getType(); 5606 5607 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 5608 return ExprError( 5609 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 5610 << TheCall->getDirectCallee() 5611 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5612 TheCall->getArg(1)->getEndLoc())); 5613 5614 numElements = LHSType->getAs<VectorType>()->getNumElements(); 5615 unsigned numResElements = TheCall->getNumArgs() - 2; 5616 5617 // Check to see if we have a call with 2 vector arguments, the unary shuffle 5618 // with mask. If so, verify that RHS is an integer vector type with the 5619 // same number of elts as lhs. 5620 if (TheCall->getNumArgs() == 2) { 5621 if (!RHSType->hasIntegerRepresentation() || 5622 RHSType->getAs<VectorType>()->getNumElements() != numElements) 5623 return ExprError(Diag(TheCall->getBeginLoc(), 5624 diag::err_vec_builtin_incompatible_vector) 5625 << TheCall->getDirectCallee() 5626 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 5627 TheCall->getArg(1)->getEndLoc())); 5628 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 5629 return ExprError(Diag(TheCall->getBeginLoc(), 5630 diag::err_vec_builtin_incompatible_vector) 5631 << TheCall->getDirectCallee() 5632 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5633 TheCall->getArg(1)->getEndLoc())); 5634 } else if (numElements != numResElements) { 5635 QualType eltType = LHSType->getAs<VectorType>()->getElementType(); 5636 resType = Context.getVectorType(eltType, numResElements, 5637 VectorType::GenericVector); 5638 } 5639 } 5640 5641 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 5642 if (TheCall->getArg(i)->isTypeDependent() || 5643 TheCall->getArg(i)->isValueDependent()) 5644 continue; 5645 5646 llvm::APSInt Result(32); 5647 if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context)) 5648 return ExprError(Diag(TheCall->getBeginLoc(), 5649 diag::err_shufflevector_nonconstant_argument) 5650 << TheCall->getArg(i)->getSourceRange()); 5651 5652 // Allow -1 which will be translated to undef in the IR. 5653 if (Result.isSigned() && Result.isAllOnesValue()) 5654 continue; 5655 5656 if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2) 5657 return ExprError(Diag(TheCall->getBeginLoc(), 5658 diag::err_shufflevector_argument_too_large) 5659 << TheCall->getArg(i)->getSourceRange()); 5660 } 5661 5662 SmallVector<Expr*, 32> exprs; 5663 5664 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 5665 exprs.push_back(TheCall->getArg(i)); 5666 TheCall->setArg(i, nullptr); 5667 } 5668 5669 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 5670 TheCall->getCallee()->getBeginLoc(), 5671 TheCall->getRParenLoc()); 5672 } 5673 5674 /// SemaConvertVectorExpr - Handle __builtin_convertvector 5675 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 5676 SourceLocation BuiltinLoc, 5677 SourceLocation RParenLoc) { 5678 ExprValueKind VK = VK_RValue; 5679 ExprObjectKind OK = OK_Ordinary; 5680 QualType DstTy = TInfo->getType(); 5681 QualType SrcTy = E->getType(); 5682 5683 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 5684 return ExprError(Diag(BuiltinLoc, 5685 diag::err_convertvector_non_vector) 5686 << E->getSourceRange()); 5687 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 5688 return ExprError(Diag(BuiltinLoc, 5689 diag::err_convertvector_non_vector_type)); 5690 5691 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 5692 unsigned SrcElts = SrcTy->getAs<VectorType>()->getNumElements(); 5693 unsigned DstElts = DstTy->getAs<VectorType>()->getNumElements(); 5694 if (SrcElts != DstElts) 5695 return ExprError(Diag(BuiltinLoc, 5696 diag::err_convertvector_incompatible_vector) 5697 << E->getSourceRange()); 5698 } 5699 5700 return new (Context) 5701 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 5702 } 5703 5704 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 5705 // This is declared to take (const void*, ...) and can take two 5706 // optional constant int args. 5707 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 5708 unsigned NumArgs = TheCall->getNumArgs(); 5709 5710 if (NumArgs > 3) 5711 return Diag(TheCall->getEndLoc(), 5712 diag::err_typecheck_call_too_many_args_at_most) 5713 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 5714 5715 // Argument 0 is checked for us and the remaining arguments must be 5716 // constant integers. 5717 for (unsigned i = 1; i != NumArgs; ++i) 5718 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 5719 return true; 5720 5721 return false; 5722 } 5723 5724 /// SemaBuiltinAssume - Handle __assume (MS Extension). 5725 // __assume does not evaluate its arguments, and should warn if its argument 5726 // has side effects. 5727 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 5728 Expr *Arg = TheCall->getArg(0); 5729 if (Arg->isInstantiationDependent()) return false; 5730 5731 if (Arg->HasSideEffects(Context)) 5732 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 5733 << Arg->getSourceRange() 5734 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 5735 5736 return false; 5737 } 5738 5739 /// Handle __builtin_alloca_with_align. This is declared 5740 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 5741 /// than 8. 5742 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 5743 // The alignment must be a constant integer. 5744 Expr *Arg = TheCall->getArg(1); 5745 5746 // We can't check the value of a dependent argument. 5747 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 5748 if (const auto *UE = 5749 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 5750 if (UE->getKind() == UETT_AlignOf || 5751 UE->getKind() == UETT_PreferredAlignOf) 5752 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 5753 << Arg->getSourceRange(); 5754 5755 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 5756 5757 if (!Result.isPowerOf2()) 5758 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 5759 << Arg->getSourceRange(); 5760 5761 if (Result < Context.getCharWidth()) 5762 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 5763 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 5764 5765 if (Result > std::numeric_limits<int32_t>::max()) 5766 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 5767 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 5768 } 5769 5770 return false; 5771 } 5772 5773 /// Handle __builtin_assume_aligned. This is declared 5774 /// as (const void*, size_t, ...) and can take one optional constant int arg. 5775 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 5776 unsigned NumArgs = TheCall->getNumArgs(); 5777 5778 if (NumArgs > 3) 5779 return Diag(TheCall->getEndLoc(), 5780 diag::err_typecheck_call_too_many_args_at_most) 5781 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 5782 5783 // The alignment must be a constant integer. 5784 Expr *Arg = TheCall->getArg(1); 5785 5786 // We can't check the value of a dependent argument. 5787 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 5788 llvm::APSInt Result; 5789 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 5790 return true; 5791 5792 if (!Result.isPowerOf2()) 5793 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 5794 << Arg->getSourceRange(); 5795 } 5796 5797 if (NumArgs > 2) { 5798 ExprResult Arg(TheCall->getArg(2)); 5799 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 5800 Context.getSizeType(), false); 5801 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5802 if (Arg.isInvalid()) return true; 5803 TheCall->setArg(2, Arg.get()); 5804 } 5805 5806 return false; 5807 } 5808 5809 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 5810 unsigned BuiltinID = 5811 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 5812 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 5813 5814 unsigned NumArgs = TheCall->getNumArgs(); 5815 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 5816 if (NumArgs < NumRequiredArgs) { 5817 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5818 << 0 /* function call */ << NumRequiredArgs << NumArgs 5819 << TheCall->getSourceRange(); 5820 } 5821 if (NumArgs >= NumRequiredArgs + 0x100) { 5822 return Diag(TheCall->getEndLoc(), 5823 diag::err_typecheck_call_too_many_args_at_most) 5824 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 5825 << TheCall->getSourceRange(); 5826 } 5827 unsigned i = 0; 5828 5829 // For formatting call, check buffer arg. 5830 if (!IsSizeCall) { 5831 ExprResult Arg(TheCall->getArg(i)); 5832 InitializedEntity Entity = InitializedEntity::InitializeParameter( 5833 Context, Context.VoidPtrTy, false); 5834 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5835 if (Arg.isInvalid()) 5836 return true; 5837 TheCall->setArg(i, Arg.get()); 5838 i++; 5839 } 5840 5841 // Check string literal arg. 5842 unsigned FormatIdx = i; 5843 { 5844 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 5845 if (Arg.isInvalid()) 5846 return true; 5847 TheCall->setArg(i, Arg.get()); 5848 i++; 5849 } 5850 5851 // Make sure variadic args are scalar. 5852 unsigned FirstDataArg = i; 5853 while (i < NumArgs) { 5854 ExprResult Arg = DefaultVariadicArgumentPromotion( 5855 TheCall->getArg(i), VariadicFunction, nullptr); 5856 if (Arg.isInvalid()) 5857 return true; 5858 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 5859 if (ArgSize.getQuantity() >= 0x100) { 5860 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 5861 << i << (int)ArgSize.getQuantity() << 0xff 5862 << TheCall->getSourceRange(); 5863 } 5864 TheCall->setArg(i, Arg.get()); 5865 i++; 5866 } 5867 5868 // Check formatting specifiers. NOTE: We're only doing this for the non-size 5869 // call to avoid duplicate diagnostics. 5870 if (!IsSizeCall) { 5871 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 5872 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 5873 bool Success = CheckFormatArguments( 5874 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 5875 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 5876 CheckedVarArgs); 5877 if (!Success) 5878 return true; 5879 } 5880 5881 if (IsSizeCall) { 5882 TheCall->setType(Context.getSizeType()); 5883 } else { 5884 TheCall->setType(Context.VoidPtrTy); 5885 } 5886 return false; 5887 } 5888 5889 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 5890 /// TheCall is a constant expression. 5891 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 5892 llvm::APSInt &Result) { 5893 Expr *Arg = TheCall->getArg(ArgNum); 5894 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5895 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5896 5897 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 5898 5899 if (!Arg->isIntegerConstantExpr(Result, Context)) 5900 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 5901 << FDecl->getDeclName() << Arg->getSourceRange(); 5902 5903 return false; 5904 } 5905 5906 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 5907 /// TheCall is a constant expression in the range [Low, High]. 5908 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 5909 int Low, int High, bool RangeIsError) { 5910 llvm::APSInt Result; 5911 5912 // We can't check the value of a dependent argument. 5913 Expr *Arg = TheCall->getArg(ArgNum); 5914 if (Arg->isTypeDependent() || Arg->isValueDependent()) 5915 return false; 5916 5917 // Check constant-ness first. 5918 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 5919 return true; 5920 5921 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 5922 if (RangeIsError) 5923 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 5924 << Result.toString(10) << Low << High << Arg->getSourceRange(); 5925 else 5926 // Defer the warning until we know if the code will be emitted so that 5927 // dead code can ignore this. 5928 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 5929 PDiag(diag::warn_argument_invalid_range) 5930 << Result.toString(10) << Low << High 5931 << Arg->getSourceRange()); 5932 } 5933 5934 return false; 5935 } 5936 5937 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 5938 /// TheCall is a constant expression is a multiple of Num.. 5939 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 5940 unsigned Num) { 5941 llvm::APSInt Result; 5942 5943 // We can't check the value of a dependent argument. 5944 Expr *Arg = TheCall->getArg(ArgNum); 5945 if (Arg->isTypeDependent() || Arg->isValueDependent()) 5946 return false; 5947 5948 // Check constant-ness first. 5949 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 5950 return true; 5951 5952 if (Result.getSExtValue() % Num != 0) 5953 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 5954 << Num << Arg->getSourceRange(); 5955 5956 return false; 5957 } 5958 5959 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 5960 /// TheCall is an ARM/AArch64 special register string literal. 5961 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 5962 int ArgNum, unsigned ExpectedFieldNum, 5963 bool AllowName) { 5964 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 5965 BuiltinID == ARM::BI__builtin_arm_wsr64 || 5966 BuiltinID == ARM::BI__builtin_arm_rsr || 5967 BuiltinID == ARM::BI__builtin_arm_rsrp || 5968 BuiltinID == ARM::BI__builtin_arm_wsr || 5969 BuiltinID == ARM::BI__builtin_arm_wsrp; 5970 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 5971 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 5972 BuiltinID == AArch64::BI__builtin_arm_rsr || 5973 BuiltinID == AArch64::BI__builtin_arm_rsrp || 5974 BuiltinID == AArch64::BI__builtin_arm_wsr || 5975 BuiltinID == AArch64::BI__builtin_arm_wsrp; 5976 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 5977 5978 // We can't check the value of a dependent argument. 5979 Expr *Arg = TheCall->getArg(ArgNum); 5980 if (Arg->isTypeDependent() || Arg->isValueDependent()) 5981 return false; 5982 5983 // Check if the argument is a string literal. 5984 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 5985 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 5986 << Arg->getSourceRange(); 5987 5988 // Check the type of special register given. 5989 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 5990 SmallVector<StringRef, 6> Fields; 5991 Reg.split(Fields, ":"); 5992 5993 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 5994 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 5995 << Arg->getSourceRange(); 5996 5997 // If the string is the name of a register then we cannot check that it is 5998 // valid here but if the string is of one the forms described in ACLE then we 5999 // can check that the supplied fields are integers and within the valid 6000 // ranges. 6001 if (Fields.size() > 1) { 6002 bool FiveFields = Fields.size() == 5; 6003 6004 bool ValidString = true; 6005 if (IsARMBuiltin) { 6006 ValidString &= Fields[0].startswith_lower("cp") || 6007 Fields[0].startswith_lower("p"); 6008 if (ValidString) 6009 Fields[0] = 6010 Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1); 6011 6012 ValidString &= Fields[2].startswith_lower("c"); 6013 if (ValidString) 6014 Fields[2] = Fields[2].drop_front(1); 6015 6016 if (FiveFields) { 6017 ValidString &= Fields[3].startswith_lower("c"); 6018 if (ValidString) 6019 Fields[3] = Fields[3].drop_front(1); 6020 } 6021 } 6022 6023 SmallVector<int, 5> Ranges; 6024 if (FiveFields) 6025 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 6026 else 6027 Ranges.append({15, 7, 15}); 6028 6029 for (unsigned i=0; i<Fields.size(); ++i) { 6030 int IntField; 6031 ValidString &= !Fields[i].getAsInteger(10, IntField); 6032 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 6033 } 6034 6035 if (!ValidString) 6036 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6037 << Arg->getSourceRange(); 6038 } else if (IsAArch64Builtin && Fields.size() == 1) { 6039 // If the register name is one of those that appear in the condition below 6040 // and the special register builtin being used is one of the write builtins, 6041 // then we require that the argument provided for writing to the register 6042 // is an integer constant expression. This is because it will be lowered to 6043 // an MSR (immediate) instruction, so we need to know the immediate at 6044 // compile time. 6045 if (TheCall->getNumArgs() != 2) 6046 return false; 6047 6048 std::string RegLower = Reg.lower(); 6049 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 6050 RegLower != "pan" && RegLower != "uao") 6051 return false; 6052 6053 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6054 } 6055 6056 return false; 6057 } 6058 6059 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 6060 /// This checks that the target supports __builtin_longjmp and 6061 /// that val is a constant 1. 6062 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 6063 if (!Context.getTargetInfo().hasSjLjLowering()) 6064 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 6065 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6066 6067 Expr *Arg = TheCall->getArg(1); 6068 llvm::APSInt Result; 6069 6070 // TODO: This is less than ideal. Overload this to take a value. 6071 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6072 return true; 6073 6074 if (Result != 1) 6075 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 6076 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 6077 6078 return false; 6079 } 6080 6081 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 6082 /// This checks that the target supports __builtin_setjmp. 6083 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 6084 if (!Context.getTargetInfo().hasSjLjLowering()) 6085 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 6086 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6087 return false; 6088 } 6089 6090 namespace { 6091 6092 class UncoveredArgHandler { 6093 enum { Unknown = -1, AllCovered = -2 }; 6094 6095 signed FirstUncoveredArg = Unknown; 6096 SmallVector<const Expr *, 4> DiagnosticExprs; 6097 6098 public: 6099 UncoveredArgHandler() = default; 6100 6101 bool hasUncoveredArg() const { 6102 return (FirstUncoveredArg >= 0); 6103 } 6104 6105 unsigned getUncoveredArg() const { 6106 assert(hasUncoveredArg() && "no uncovered argument"); 6107 return FirstUncoveredArg; 6108 } 6109 6110 void setAllCovered() { 6111 // A string has been found with all arguments covered, so clear out 6112 // the diagnostics. 6113 DiagnosticExprs.clear(); 6114 FirstUncoveredArg = AllCovered; 6115 } 6116 6117 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 6118 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 6119 6120 // Don't update if a previous string covers all arguments. 6121 if (FirstUncoveredArg == AllCovered) 6122 return; 6123 6124 // UncoveredArgHandler tracks the highest uncovered argument index 6125 // and with it all the strings that match this index. 6126 if (NewFirstUncoveredArg == FirstUncoveredArg) 6127 DiagnosticExprs.push_back(StrExpr); 6128 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 6129 DiagnosticExprs.clear(); 6130 DiagnosticExprs.push_back(StrExpr); 6131 FirstUncoveredArg = NewFirstUncoveredArg; 6132 } 6133 } 6134 6135 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 6136 }; 6137 6138 enum StringLiteralCheckType { 6139 SLCT_NotALiteral, 6140 SLCT_UncheckedLiteral, 6141 SLCT_CheckedLiteral 6142 }; 6143 6144 } // namespace 6145 6146 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 6147 BinaryOperatorKind BinOpKind, 6148 bool AddendIsRight) { 6149 unsigned BitWidth = Offset.getBitWidth(); 6150 unsigned AddendBitWidth = Addend.getBitWidth(); 6151 // There might be negative interim results. 6152 if (Addend.isUnsigned()) { 6153 Addend = Addend.zext(++AddendBitWidth); 6154 Addend.setIsSigned(true); 6155 } 6156 // Adjust the bit width of the APSInts. 6157 if (AddendBitWidth > BitWidth) { 6158 Offset = Offset.sext(AddendBitWidth); 6159 BitWidth = AddendBitWidth; 6160 } else if (BitWidth > AddendBitWidth) { 6161 Addend = Addend.sext(BitWidth); 6162 } 6163 6164 bool Ov = false; 6165 llvm::APSInt ResOffset = Offset; 6166 if (BinOpKind == BO_Add) 6167 ResOffset = Offset.sadd_ov(Addend, Ov); 6168 else { 6169 assert(AddendIsRight && BinOpKind == BO_Sub && 6170 "operator must be add or sub with addend on the right"); 6171 ResOffset = Offset.ssub_ov(Addend, Ov); 6172 } 6173 6174 // We add an offset to a pointer here so we should support an offset as big as 6175 // possible. 6176 if (Ov) { 6177 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 6178 "index (intermediate) result too big"); 6179 Offset = Offset.sext(2 * BitWidth); 6180 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 6181 return; 6182 } 6183 6184 Offset = ResOffset; 6185 } 6186 6187 namespace { 6188 6189 // This is a wrapper class around StringLiteral to support offsetted string 6190 // literals as format strings. It takes the offset into account when returning 6191 // the string and its length or the source locations to display notes correctly. 6192 class FormatStringLiteral { 6193 const StringLiteral *FExpr; 6194 int64_t Offset; 6195 6196 public: 6197 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 6198 : FExpr(fexpr), Offset(Offset) {} 6199 6200 StringRef getString() const { 6201 return FExpr->getString().drop_front(Offset); 6202 } 6203 6204 unsigned getByteLength() const { 6205 return FExpr->getByteLength() - getCharByteWidth() * Offset; 6206 } 6207 6208 unsigned getLength() const { return FExpr->getLength() - Offset; } 6209 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 6210 6211 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 6212 6213 QualType getType() const { return FExpr->getType(); } 6214 6215 bool isAscii() const { return FExpr->isAscii(); } 6216 bool isWide() const { return FExpr->isWide(); } 6217 bool isUTF8() const { return FExpr->isUTF8(); } 6218 bool isUTF16() const { return FExpr->isUTF16(); } 6219 bool isUTF32() const { return FExpr->isUTF32(); } 6220 bool isPascal() const { return FExpr->isPascal(); } 6221 6222 SourceLocation getLocationOfByte( 6223 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 6224 const TargetInfo &Target, unsigned *StartToken = nullptr, 6225 unsigned *StartTokenByteOffset = nullptr) const { 6226 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 6227 StartToken, StartTokenByteOffset); 6228 } 6229 6230 SourceLocation getBeginLoc() const LLVM_READONLY { 6231 return FExpr->getBeginLoc().getLocWithOffset(Offset); 6232 } 6233 6234 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 6235 }; 6236 6237 } // namespace 6238 6239 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 6240 const Expr *OrigFormatExpr, 6241 ArrayRef<const Expr *> Args, 6242 bool HasVAListArg, unsigned format_idx, 6243 unsigned firstDataArg, 6244 Sema::FormatStringType Type, 6245 bool inFunctionCall, 6246 Sema::VariadicCallType CallType, 6247 llvm::SmallBitVector &CheckedVarArgs, 6248 UncoveredArgHandler &UncoveredArg); 6249 6250 // Determine if an expression is a string literal or constant string. 6251 // If this function returns false on the arguments to a function expecting a 6252 // format string, we will usually need to emit a warning. 6253 // True string literals are then checked by CheckFormatString. 6254 static StringLiteralCheckType 6255 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 6256 bool HasVAListArg, unsigned format_idx, 6257 unsigned firstDataArg, Sema::FormatStringType Type, 6258 Sema::VariadicCallType CallType, bool InFunctionCall, 6259 llvm::SmallBitVector &CheckedVarArgs, 6260 UncoveredArgHandler &UncoveredArg, 6261 llvm::APSInt Offset) { 6262 tryAgain: 6263 assert(Offset.isSigned() && "invalid offset"); 6264 6265 if (E->isTypeDependent() || E->isValueDependent()) 6266 return SLCT_NotALiteral; 6267 6268 E = E->IgnoreParenCasts(); 6269 6270 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 6271 // Technically -Wformat-nonliteral does not warn about this case. 6272 // The behavior of printf and friends in this case is implementation 6273 // dependent. Ideally if the format string cannot be null then 6274 // it should have a 'nonnull' attribute in the function prototype. 6275 return SLCT_UncheckedLiteral; 6276 6277 switch (E->getStmtClass()) { 6278 case Stmt::BinaryConditionalOperatorClass: 6279 case Stmt::ConditionalOperatorClass: { 6280 // The expression is a literal if both sub-expressions were, and it was 6281 // completely checked only if both sub-expressions were checked. 6282 const AbstractConditionalOperator *C = 6283 cast<AbstractConditionalOperator>(E); 6284 6285 // Determine whether it is necessary to check both sub-expressions, for 6286 // example, because the condition expression is a constant that can be 6287 // evaluated at compile time. 6288 bool CheckLeft = true, CheckRight = true; 6289 6290 bool Cond; 6291 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext())) { 6292 if (Cond) 6293 CheckRight = false; 6294 else 6295 CheckLeft = false; 6296 } 6297 6298 // We need to maintain the offsets for the right and the left hand side 6299 // separately to check if every possible indexed expression is a valid 6300 // string literal. They might have different offsets for different string 6301 // literals in the end. 6302 StringLiteralCheckType Left; 6303 if (!CheckLeft) 6304 Left = SLCT_UncheckedLiteral; 6305 else { 6306 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 6307 HasVAListArg, format_idx, firstDataArg, 6308 Type, CallType, InFunctionCall, 6309 CheckedVarArgs, UncoveredArg, Offset); 6310 if (Left == SLCT_NotALiteral || !CheckRight) { 6311 return Left; 6312 } 6313 } 6314 6315 StringLiteralCheckType Right = 6316 checkFormatStringExpr(S, C->getFalseExpr(), Args, 6317 HasVAListArg, format_idx, firstDataArg, 6318 Type, CallType, InFunctionCall, CheckedVarArgs, 6319 UncoveredArg, Offset); 6320 6321 return (CheckLeft && Left < Right) ? Left : Right; 6322 } 6323 6324 case Stmt::ImplicitCastExprClass: 6325 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 6326 goto tryAgain; 6327 6328 case Stmt::OpaqueValueExprClass: 6329 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 6330 E = src; 6331 goto tryAgain; 6332 } 6333 return SLCT_NotALiteral; 6334 6335 case Stmt::PredefinedExprClass: 6336 // While __func__, etc., are technically not string literals, they 6337 // cannot contain format specifiers and thus are not a security 6338 // liability. 6339 return SLCT_UncheckedLiteral; 6340 6341 case Stmt::DeclRefExprClass: { 6342 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 6343 6344 // As an exception, do not flag errors for variables binding to 6345 // const string literals. 6346 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 6347 bool isConstant = false; 6348 QualType T = DR->getType(); 6349 6350 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 6351 isConstant = AT->getElementType().isConstant(S.Context); 6352 } else if (const PointerType *PT = T->getAs<PointerType>()) { 6353 isConstant = T.isConstant(S.Context) && 6354 PT->getPointeeType().isConstant(S.Context); 6355 } else if (T->isObjCObjectPointerType()) { 6356 // In ObjC, there is usually no "const ObjectPointer" type, 6357 // so don't check if the pointee type is constant. 6358 isConstant = T.isConstant(S.Context); 6359 } 6360 6361 if (isConstant) { 6362 if (const Expr *Init = VD->getAnyInitializer()) { 6363 // Look through initializers like const char c[] = { "foo" } 6364 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 6365 if (InitList->isStringLiteralInit()) 6366 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 6367 } 6368 return checkFormatStringExpr(S, Init, Args, 6369 HasVAListArg, format_idx, 6370 firstDataArg, Type, CallType, 6371 /*InFunctionCall*/ false, CheckedVarArgs, 6372 UncoveredArg, Offset); 6373 } 6374 } 6375 6376 // For vprintf* functions (i.e., HasVAListArg==true), we add a 6377 // special check to see if the format string is a function parameter 6378 // of the function calling the printf function. If the function 6379 // has an attribute indicating it is a printf-like function, then we 6380 // should suppress warnings concerning non-literals being used in a call 6381 // to a vprintf function. For example: 6382 // 6383 // void 6384 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 6385 // va_list ap; 6386 // va_start(ap, fmt); 6387 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 6388 // ... 6389 // } 6390 if (HasVAListArg) { 6391 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 6392 if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) { 6393 int PVIndex = PV->getFunctionScopeIndex() + 1; 6394 for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) { 6395 // adjust for implicit parameter 6396 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) 6397 if (MD->isInstance()) 6398 ++PVIndex; 6399 // We also check if the formats are compatible. 6400 // We can't pass a 'scanf' string to a 'printf' function. 6401 if (PVIndex == PVFormat->getFormatIdx() && 6402 Type == S.GetFormatStringType(PVFormat)) 6403 return SLCT_UncheckedLiteral; 6404 } 6405 } 6406 } 6407 } 6408 } 6409 6410 return SLCT_NotALiteral; 6411 } 6412 6413 case Stmt::CallExprClass: 6414 case Stmt::CXXMemberCallExprClass: { 6415 const CallExpr *CE = cast<CallExpr>(E); 6416 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 6417 bool IsFirst = true; 6418 StringLiteralCheckType CommonResult; 6419 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 6420 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 6421 StringLiteralCheckType Result = checkFormatStringExpr( 6422 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 6423 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset); 6424 if (IsFirst) { 6425 CommonResult = Result; 6426 IsFirst = false; 6427 } 6428 } 6429 if (!IsFirst) 6430 return CommonResult; 6431 6432 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 6433 unsigned BuiltinID = FD->getBuiltinID(); 6434 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 6435 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 6436 const Expr *Arg = CE->getArg(0); 6437 return checkFormatStringExpr(S, Arg, Args, 6438 HasVAListArg, format_idx, 6439 firstDataArg, Type, CallType, 6440 InFunctionCall, CheckedVarArgs, 6441 UncoveredArg, Offset); 6442 } 6443 } 6444 } 6445 6446 return SLCT_NotALiteral; 6447 } 6448 case Stmt::ObjCMessageExprClass: { 6449 const auto *ME = cast<ObjCMessageExpr>(E); 6450 if (const auto *ND = ME->getMethodDecl()) { 6451 if (const auto *FA = ND->getAttr<FormatArgAttr>()) { 6452 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 6453 return checkFormatStringExpr( 6454 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 6455 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset); 6456 } 6457 } 6458 6459 return SLCT_NotALiteral; 6460 } 6461 case Stmt::ObjCStringLiteralClass: 6462 case Stmt::StringLiteralClass: { 6463 const StringLiteral *StrE = nullptr; 6464 6465 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 6466 StrE = ObjCFExpr->getString(); 6467 else 6468 StrE = cast<StringLiteral>(E); 6469 6470 if (StrE) { 6471 if (Offset.isNegative() || Offset > StrE->getLength()) { 6472 // TODO: It would be better to have an explicit warning for out of 6473 // bounds literals. 6474 return SLCT_NotALiteral; 6475 } 6476 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 6477 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 6478 firstDataArg, Type, InFunctionCall, CallType, 6479 CheckedVarArgs, UncoveredArg); 6480 return SLCT_CheckedLiteral; 6481 } 6482 6483 return SLCT_NotALiteral; 6484 } 6485 case Stmt::BinaryOperatorClass: { 6486 llvm::APSInt LResult; 6487 llvm::APSInt RResult; 6488 6489 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 6490 6491 // A string literal + an int offset is still a string literal. 6492 if (BinOp->isAdditiveOp()) { 6493 bool LIsInt = BinOp->getLHS()->EvaluateAsInt(LResult, S.Context); 6494 bool RIsInt = BinOp->getRHS()->EvaluateAsInt(RResult, S.Context); 6495 6496 if (LIsInt != RIsInt) { 6497 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 6498 6499 if (LIsInt) { 6500 if (BinOpKind == BO_Add) { 6501 sumOffsets(Offset, LResult, BinOpKind, RIsInt); 6502 E = BinOp->getRHS(); 6503 goto tryAgain; 6504 } 6505 } else { 6506 sumOffsets(Offset, RResult, BinOpKind, RIsInt); 6507 E = BinOp->getLHS(); 6508 goto tryAgain; 6509 } 6510 } 6511 } 6512 6513 return SLCT_NotALiteral; 6514 } 6515 case Stmt::UnaryOperatorClass: { 6516 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 6517 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 6518 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 6519 llvm::APSInt IndexResult; 6520 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context)) { 6521 sumOffsets(Offset, IndexResult, BO_Add, /*RHS is int*/ true); 6522 E = ASE->getBase(); 6523 goto tryAgain; 6524 } 6525 } 6526 6527 return SLCT_NotALiteral; 6528 } 6529 6530 default: 6531 return SLCT_NotALiteral; 6532 } 6533 } 6534 6535 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 6536 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 6537 .Case("scanf", FST_Scanf) 6538 .Cases("printf", "printf0", FST_Printf) 6539 .Cases("NSString", "CFString", FST_NSString) 6540 .Case("strftime", FST_Strftime) 6541 .Case("strfmon", FST_Strfmon) 6542 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 6543 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 6544 .Case("os_trace", FST_OSLog) 6545 .Case("os_log", FST_OSLog) 6546 .Default(FST_Unknown); 6547 } 6548 6549 /// CheckFormatArguments - Check calls to printf and scanf (and similar 6550 /// functions) for correct use of format strings. 6551 /// Returns true if a format string has been fully checked. 6552 bool Sema::CheckFormatArguments(const FormatAttr *Format, 6553 ArrayRef<const Expr *> Args, 6554 bool IsCXXMember, 6555 VariadicCallType CallType, 6556 SourceLocation Loc, SourceRange Range, 6557 llvm::SmallBitVector &CheckedVarArgs) { 6558 FormatStringInfo FSI; 6559 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 6560 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 6561 FSI.FirstDataArg, GetFormatStringType(Format), 6562 CallType, Loc, Range, CheckedVarArgs); 6563 return false; 6564 } 6565 6566 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 6567 bool HasVAListArg, unsigned format_idx, 6568 unsigned firstDataArg, FormatStringType Type, 6569 VariadicCallType CallType, 6570 SourceLocation Loc, SourceRange Range, 6571 llvm::SmallBitVector &CheckedVarArgs) { 6572 // CHECK: printf/scanf-like function is called with no format string. 6573 if (format_idx >= Args.size()) { 6574 Diag(Loc, diag::warn_missing_format_string) << Range; 6575 return false; 6576 } 6577 6578 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 6579 6580 // CHECK: format string is not a string literal. 6581 // 6582 // Dynamically generated format strings are difficult to 6583 // automatically vet at compile time. Requiring that format strings 6584 // are string literals: (1) permits the checking of format strings by 6585 // the compiler and thereby (2) can practically remove the source of 6586 // many format string exploits. 6587 6588 // Format string can be either ObjC string (e.g. @"%d") or 6589 // C string (e.g. "%d") 6590 // ObjC string uses the same format specifiers as C string, so we can use 6591 // the same format string checking logic for both ObjC and C strings. 6592 UncoveredArgHandler UncoveredArg; 6593 StringLiteralCheckType CT = 6594 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 6595 format_idx, firstDataArg, Type, CallType, 6596 /*IsFunctionCall*/ true, CheckedVarArgs, 6597 UncoveredArg, 6598 /*no string offset*/ llvm::APSInt(64, false) = 0); 6599 6600 // Generate a diagnostic where an uncovered argument is detected. 6601 if (UncoveredArg.hasUncoveredArg()) { 6602 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 6603 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 6604 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 6605 } 6606 6607 if (CT != SLCT_NotALiteral) 6608 // Literal format string found, check done! 6609 return CT == SLCT_CheckedLiteral; 6610 6611 // Strftime is particular as it always uses a single 'time' argument, 6612 // so it is safe to pass a non-literal string. 6613 if (Type == FST_Strftime) 6614 return false; 6615 6616 // Do not emit diag when the string param is a macro expansion and the 6617 // format is either NSString or CFString. This is a hack to prevent 6618 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 6619 // which are usually used in place of NS and CF string literals. 6620 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 6621 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 6622 return false; 6623 6624 // If there are no arguments specified, warn with -Wformat-security, otherwise 6625 // warn only with -Wformat-nonliteral. 6626 if (Args.size() == firstDataArg) { 6627 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 6628 << OrigFormatExpr->getSourceRange(); 6629 switch (Type) { 6630 default: 6631 break; 6632 case FST_Kprintf: 6633 case FST_FreeBSDKPrintf: 6634 case FST_Printf: 6635 Diag(FormatLoc, diag::note_format_security_fixit) 6636 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 6637 break; 6638 case FST_NSString: 6639 Diag(FormatLoc, diag::note_format_security_fixit) 6640 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 6641 break; 6642 } 6643 } else { 6644 Diag(FormatLoc, diag::warn_format_nonliteral) 6645 << OrigFormatExpr->getSourceRange(); 6646 } 6647 return false; 6648 } 6649 6650 namespace { 6651 6652 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 6653 protected: 6654 Sema &S; 6655 const FormatStringLiteral *FExpr; 6656 const Expr *OrigFormatExpr; 6657 const Sema::FormatStringType FSType; 6658 const unsigned FirstDataArg; 6659 const unsigned NumDataArgs; 6660 const char *Beg; // Start of format string. 6661 const bool HasVAListArg; 6662 ArrayRef<const Expr *> Args; 6663 unsigned FormatIdx; 6664 llvm::SmallBitVector CoveredArgs; 6665 bool usesPositionalArgs = false; 6666 bool atFirstArg = true; 6667 bool inFunctionCall; 6668 Sema::VariadicCallType CallType; 6669 llvm::SmallBitVector &CheckedVarArgs; 6670 UncoveredArgHandler &UncoveredArg; 6671 6672 public: 6673 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 6674 const Expr *origFormatExpr, 6675 const Sema::FormatStringType type, unsigned firstDataArg, 6676 unsigned numDataArgs, const char *beg, bool hasVAListArg, 6677 ArrayRef<const Expr *> Args, unsigned formatIdx, 6678 bool inFunctionCall, Sema::VariadicCallType callType, 6679 llvm::SmallBitVector &CheckedVarArgs, 6680 UncoveredArgHandler &UncoveredArg) 6681 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 6682 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 6683 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 6684 inFunctionCall(inFunctionCall), CallType(callType), 6685 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 6686 CoveredArgs.resize(numDataArgs); 6687 CoveredArgs.reset(); 6688 } 6689 6690 void DoneProcessing(); 6691 6692 void HandleIncompleteSpecifier(const char *startSpecifier, 6693 unsigned specifierLen) override; 6694 6695 void HandleInvalidLengthModifier( 6696 const analyze_format_string::FormatSpecifier &FS, 6697 const analyze_format_string::ConversionSpecifier &CS, 6698 const char *startSpecifier, unsigned specifierLen, 6699 unsigned DiagID); 6700 6701 void HandleNonStandardLengthModifier( 6702 const analyze_format_string::FormatSpecifier &FS, 6703 const char *startSpecifier, unsigned specifierLen); 6704 6705 void HandleNonStandardConversionSpecifier( 6706 const analyze_format_string::ConversionSpecifier &CS, 6707 const char *startSpecifier, unsigned specifierLen); 6708 6709 void HandlePosition(const char *startPos, unsigned posLen) override; 6710 6711 void HandleInvalidPosition(const char *startSpecifier, 6712 unsigned specifierLen, 6713 analyze_format_string::PositionContext p) override; 6714 6715 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 6716 6717 void HandleNullChar(const char *nullCharacter) override; 6718 6719 template <typename Range> 6720 static void 6721 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 6722 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 6723 bool IsStringLocation, Range StringRange, 6724 ArrayRef<FixItHint> Fixit = None); 6725 6726 protected: 6727 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 6728 const char *startSpec, 6729 unsigned specifierLen, 6730 const char *csStart, unsigned csLen); 6731 6732 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 6733 const char *startSpec, 6734 unsigned specifierLen); 6735 6736 SourceRange getFormatStringRange(); 6737 CharSourceRange getSpecifierRange(const char *startSpecifier, 6738 unsigned specifierLen); 6739 SourceLocation getLocationOfByte(const char *x); 6740 6741 const Expr *getDataArg(unsigned i) const; 6742 6743 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 6744 const analyze_format_string::ConversionSpecifier &CS, 6745 const char *startSpecifier, unsigned specifierLen, 6746 unsigned argIndex); 6747 6748 template <typename Range> 6749 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 6750 bool IsStringLocation, Range StringRange, 6751 ArrayRef<FixItHint> Fixit = None); 6752 }; 6753 6754 } // namespace 6755 6756 SourceRange CheckFormatHandler::getFormatStringRange() { 6757 return OrigFormatExpr->getSourceRange(); 6758 } 6759 6760 CharSourceRange CheckFormatHandler:: 6761 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 6762 SourceLocation Start = getLocationOfByte(startSpecifier); 6763 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 6764 6765 // Advance the end SourceLocation by one due to half-open ranges. 6766 End = End.getLocWithOffset(1); 6767 6768 return CharSourceRange::getCharRange(Start, End); 6769 } 6770 6771 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 6772 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 6773 S.getLangOpts(), S.Context.getTargetInfo()); 6774 } 6775 6776 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 6777 unsigned specifierLen){ 6778 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 6779 getLocationOfByte(startSpecifier), 6780 /*IsStringLocation*/true, 6781 getSpecifierRange(startSpecifier, specifierLen)); 6782 } 6783 6784 void CheckFormatHandler::HandleInvalidLengthModifier( 6785 const analyze_format_string::FormatSpecifier &FS, 6786 const analyze_format_string::ConversionSpecifier &CS, 6787 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 6788 using namespace analyze_format_string; 6789 6790 const LengthModifier &LM = FS.getLengthModifier(); 6791 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 6792 6793 // See if we know how to fix this length modifier. 6794 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 6795 if (FixedLM) { 6796 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 6797 getLocationOfByte(LM.getStart()), 6798 /*IsStringLocation*/true, 6799 getSpecifierRange(startSpecifier, specifierLen)); 6800 6801 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 6802 << FixedLM->toString() 6803 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 6804 6805 } else { 6806 FixItHint Hint; 6807 if (DiagID == diag::warn_format_nonsensical_length) 6808 Hint = FixItHint::CreateRemoval(LMRange); 6809 6810 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 6811 getLocationOfByte(LM.getStart()), 6812 /*IsStringLocation*/true, 6813 getSpecifierRange(startSpecifier, specifierLen), 6814 Hint); 6815 } 6816 } 6817 6818 void CheckFormatHandler::HandleNonStandardLengthModifier( 6819 const analyze_format_string::FormatSpecifier &FS, 6820 const char *startSpecifier, unsigned specifierLen) { 6821 using namespace analyze_format_string; 6822 6823 const LengthModifier &LM = FS.getLengthModifier(); 6824 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 6825 6826 // See if we know how to fix this length modifier. 6827 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 6828 if (FixedLM) { 6829 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 6830 << LM.toString() << 0, 6831 getLocationOfByte(LM.getStart()), 6832 /*IsStringLocation*/true, 6833 getSpecifierRange(startSpecifier, specifierLen)); 6834 6835 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 6836 << FixedLM->toString() 6837 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 6838 6839 } else { 6840 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 6841 << LM.toString() << 0, 6842 getLocationOfByte(LM.getStart()), 6843 /*IsStringLocation*/true, 6844 getSpecifierRange(startSpecifier, specifierLen)); 6845 } 6846 } 6847 6848 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 6849 const analyze_format_string::ConversionSpecifier &CS, 6850 const char *startSpecifier, unsigned specifierLen) { 6851 using namespace analyze_format_string; 6852 6853 // See if we know how to fix this conversion specifier. 6854 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 6855 if (FixedCS) { 6856 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 6857 << CS.toString() << /*conversion specifier*/1, 6858 getLocationOfByte(CS.getStart()), 6859 /*IsStringLocation*/true, 6860 getSpecifierRange(startSpecifier, specifierLen)); 6861 6862 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 6863 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 6864 << FixedCS->toString() 6865 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 6866 } else { 6867 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 6868 << CS.toString() << /*conversion specifier*/1, 6869 getLocationOfByte(CS.getStart()), 6870 /*IsStringLocation*/true, 6871 getSpecifierRange(startSpecifier, specifierLen)); 6872 } 6873 } 6874 6875 void CheckFormatHandler::HandlePosition(const char *startPos, 6876 unsigned posLen) { 6877 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 6878 getLocationOfByte(startPos), 6879 /*IsStringLocation*/true, 6880 getSpecifierRange(startPos, posLen)); 6881 } 6882 6883 void 6884 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 6885 analyze_format_string::PositionContext p) { 6886 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 6887 << (unsigned) p, 6888 getLocationOfByte(startPos), /*IsStringLocation*/true, 6889 getSpecifierRange(startPos, posLen)); 6890 } 6891 6892 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 6893 unsigned posLen) { 6894 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 6895 getLocationOfByte(startPos), 6896 /*IsStringLocation*/true, 6897 getSpecifierRange(startPos, posLen)); 6898 } 6899 6900 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 6901 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 6902 // The presence of a null character is likely an error. 6903 EmitFormatDiagnostic( 6904 S.PDiag(diag::warn_printf_format_string_contains_null_char), 6905 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 6906 getFormatStringRange()); 6907 } 6908 } 6909 6910 // Note that this may return NULL if there was an error parsing or building 6911 // one of the argument expressions. 6912 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 6913 return Args[FirstDataArg + i]; 6914 } 6915 6916 void CheckFormatHandler::DoneProcessing() { 6917 // Does the number of data arguments exceed the number of 6918 // format conversions in the format string? 6919 if (!HasVAListArg) { 6920 // Find any arguments that weren't covered. 6921 CoveredArgs.flip(); 6922 signed notCoveredArg = CoveredArgs.find_first(); 6923 if (notCoveredArg >= 0) { 6924 assert((unsigned)notCoveredArg < NumDataArgs); 6925 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 6926 } else { 6927 UncoveredArg.setAllCovered(); 6928 } 6929 } 6930 } 6931 6932 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 6933 const Expr *ArgExpr) { 6934 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 6935 "Invalid state"); 6936 6937 if (!ArgExpr) 6938 return; 6939 6940 SourceLocation Loc = ArgExpr->getBeginLoc(); 6941 6942 if (S.getSourceManager().isInSystemMacro(Loc)) 6943 return; 6944 6945 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 6946 for (auto E : DiagnosticExprs) 6947 PDiag << E->getSourceRange(); 6948 6949 CheckFormatHandler::EmitFormatDiagnostic( 6950 S, IsFunctionCall, DiagnosticExprs[0], 6951 PDiag, Loc, /*IsStringLocation*/false, 6952 DiagnosticExprs[0]->getSourceRange()); 6953 } 6954 6955 bool 6956 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 6957 SourceLocation Loc, 6958 const char *startSpec, 6959 unsigned specifierLen, 6960 const char *csStart, 6961 unsigned csLen) { 6962 bool keepGoing = true; 6963 if (argIndex < NumDataArgs) { 6964 // Consider the argument coverered, even though the specifier doesn't 6965 // make sense. 6966 CoveredArgs.set(argIndex); 6967 } 6968 else { 6969 // If argIndex exceeds the number of data arguments we 6970 // don't issue a warning because that is just a cascade of warnings (and 6971 // they may have intended '%%' anyway). We don't want to continue processing 6972 // the format string after this point, however, as we will like just get 6973 // gibberish when trying to match arguments. 6974 keepGoing = false; 6975 } 6976 6977 StringRef Specifier(csStart, csLen); 6978 6979 // If the specifier in non-printable, it could be the first byte of a UTF-8 6980 // sequence. In that case, print the UTF-8 code point. If not, print the byte 6981 // hex value. 6982 std::string CodePointStr; 6983 if (!llvm::sys::locale::isPrint(*csStart)) { 6984 llvm::UTF32 CodePoint; 6985 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 6986 const llvm::UTF8 *E = 6987 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 6988 llvm::ConversionResult Result = 6989 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 6990 6991 if (Result != llvm::conversionOK) { 6992 unsigned char FirstChar = *csStart; 6993 CodePoint = (llvm::UTF32)FirstChar; 6994 } 6995 6996 llvm::raw_string_ostream OS(CodePointStr); 6997 if (CodePoint < 256) 6998 OS << "\\x" << llvm::format("%02x", CodePoint); 6999 else if (CodePoint <= 0xFFFF) 7000 OS << "\\u" << llvm::format("%04x", CodePoint); 7001 else 7002 OS << "\\U" << llvm::format("%08x", CodePoint); 7003 OS.flush(); 7004 Specifier = CodePointStr; 7005 } 7006 7007 EmitFormatDiagnostic( 7008 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 7009 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 7010 7011 return keepGoing; 7012 } 7013 7014 void 7015 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 7016 const char *startSpec, 7017 unsigned specifierLen) { 7018 EmitFormatDiagnostic( 7019 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 7020 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 7021 } 7022 7023 bool 7024 CheckFormatHandler::CheckNumArgs( 7025 const analyze_format_string::FormatSpecifier &FS, 7026 const analyze_format_string::ConversionSpecifier &CS, 7027 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 7028 7029 if (argIndex >= NumDataArgs) { 7030 PartialDiagnostic PDiag = FS.usesPositionalArg() 7031 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 7032 << (argIndex+1) << NumDataArgs) 7033 : S.PDiag(diag::warn_printf_insufficient_data_args); 7034 EmitFormatDiagnostic( 7035 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 7036 getSpecifierRange(startSpecifier, specifierLen)); 7037 7038 // Since more arguments than conversion tokens are given, by extension 7039 // all arguments are covered, so mark this as so. 7040 UncoveredArg.setAllCovered(); 7041 return false; 7042 } 7043 return true; 7044 } 7045 7046 template<typename Range> 7047 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 7048 SourceLocation Loc, 7049 bool IsStringLocation, 7050 Range StringRange, 7051 ArrayRef<FixItHint> FixIt) { 7052 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 7053 Loc, IsStringLocation, StringRange, FixIt); 7054 } 7055 7056 /// If the format string is not within the function call, emit a note 7057 /// so that the function call and string are in diagnostic messages. 7058 /// 7059 /// \param InFunctionCall if true, the format string is within the function 7060 /// call and only one diagnostic message will be produced. Otherwise, an 7061 /// extra note will be emitted pointing to location of the format string. 7062 /// 7063 /// \param ArgumentExpr the expression that is passed as the format string 7064 /// argument in the function call. Used for getting locations when two 7065 /// diagnostics are emitted. 7066 /// 7067 /// \param PDiag the callee should already have provided any strings for the 7068 /// diagnostic message. This function only adds locations and fixits 7069 /// to diagnostics. 7070 /// 7071 /// \param Loc primary location for diagnostic. If two diagnostics are 7072 /// required, one will be at Loc and a new SourceLocation will be created for 7073 /// the other one. 7074 /// 7075 /// \param IsStringLocation if true, Loc points to the format string should be 7076 /// used for the note. Otherwise, Loc points to the argument list and will 7077 /// be used with PDiag. 7078 /// 7079 /// \param StringRange some or all of the string to highlight. This is 7080 /// templated so it can accept either a CharSourceRange or a SourceRange. 7081 /// 7082 /// \param FixIt optional fix it hint for the format string. 7083 template <typename Range> 7084 void CheckFormatHandler::EmitFormatDiagnostic( 7085 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 7086 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 7087 Range StringRange, ArrayRef<FixItHint> FixIt) { 7088 if (InFunctionCall) { 7089 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 7090 D << StringRange; 7091 D << FixIt; 7092 } else { 7093 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 7094 << ArgumentExpr->getSourceRange(); 7095 7096 const Sema::SemaDiagnosticBuilder &Note = 7097 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 7098 diag::note_format_string_defined); 7099 7100 Note << StringRange; 7101 Note << FixIt; 7102 } 7103 } 7104 7105 //===--- CHECK: Printf format string checking ------------------------------===// 7106 7107 namespace { 7108 7109 class CheckPrintfHandler : public CheckFormatHandler { 7110 public: 7111 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 7112 const Expr *origFormatExpr, 7113 const Sema::FormatStringType type, unsigned firstDataArg, 7114 unsigned numDataArgs, bool isObjC, const char *beg, 7115 bool hasVAListArg, ArrayRef<const Expr *> Args, 7116 unsigned formatIdx, bool inFunctionCall, 7117 Sema::VariadicCallType CallType, 7118 llvm::SmallBitVector &CheckedVarArgs, 7119 UncoveredArgHandler &UncoveredArg) 7120 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 7121 numDataArgs, beg, hasVAListArg, Args, formatIdx, 7122 inFunctionCall, CallType, CheckedVarArgs, 7123 UncoveredArg) {} 7124 7125 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 7126 7127 /// Returns true if '%@' specifiers are allowed in the format string. 7128 bool allowsObjCArg() const { 7129 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 7130 FSType == Sema::FST_OSTrace; 7131 } 7132 7133 bool HandleInvalidPrintfConversionSpecifier( 7134 const analyze_printf::PrintfSpecifier &FS, 7135 const char *startSpecifier, 7136 unsigned specifierLen) override; 7137 7138 void handleInvalidMaskType(StringRef MaskType) override; 7139 7140 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 7141 const char *startSpecifier, 7142 unsigned specifierLen) override; 7143 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 7144 const char *StartSpecifier, 7145 unsigned SpecifierLen, 7146 const Expr *E); 7147 7148 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 7149 const char *startSpecifier, unsigned specifierLen); 7150 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 7151 const analyze_printf::OptionalAmount &Amt, 7152 unsigned type, 7153 const char *startSpecifier, unsigned specifierLen); 7154 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7155 const analyze_printf::OptionalFlag &flag, 7156 const char *startSpecifier, unsigned specifierLen); 7157 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 7158 const analyze_printf::OptionalFlag &ignoredFlag, 7159 const analyze_printf::OptionalFlag &flag, 7160 const char *startSpecifier, unsigned specifierLen); 7161 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 7162 const Expr *E); 7163 7164 void HandleEmptyObjCModifierFlag(const char *startFlag, 7165 unsigned flagLen) override; 7166 7167 void HandleInvalidObjCModifierFlag(const char *startFlag, 7168 unsigned flagLen) override; 7169 7170 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 7171 const char *flagsEnd, 7172 const char *conversionPosition) 7173 override; 7174 }; 7175 7176 } // namespace 7177 7178 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 7179 const analyze_printf::PrintfSpecifier &FS, 7180 const char *startSpecifier, 7181 unsigned specifierLen) { 7182 const analyze_printf::PrintfConversionSpecifier &CS = 7183 FS.getConversionSpecifier(); 7184 7185 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 7186 getLocationOfByte(CS.getStart()), 7187 startSpecifier, specifierLen, 7188 CS.getStart(), CS.getLength()); 7189 } 7190 7191 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 7192 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 7193 } 7194 7195 bool CheckPrintfHandler::HandleAmount( 7196 const analyze_format_string::OptionalAmount &Amt, 7197 unsigned k, const char *startSpecifier, 7198 unsigned specifierLen) { 7199 if (Amt.hasDataArgument()) { 7200 if (!HasVAListArg) { 7201 unsigned argIndex = Amt.getArgIndex(); 7202 if (argIndex >= NumDataArgs) { 7203 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 7204 << k, 7205 getLocationOfByte(Amt.getStart()), 7206 /*IsStringLocation*/true, 7207 getSpecifierRange(startSpecifier, specifierLen)); 7208 // Don't do any more checking. We will just emit 7209 // spurious errors. 7210 return false; 7211 } 7212 7213 // Type check the data argument. It should be an 'int'. 7214 // Although not in conformance with C99, we also allow the argument to be 7215 // an 'unsigned int' as that is a reasonably safe case. GCC also 7216 // doesn't emit a warning for that case. 7217 CoveredArgs.set(argIndex); 7218 const Expr *Arg = getDataArg(argIndex); 7219 if (!Arg) 7220 return false; 7221 7222 QualType T = Arg->getType(); 7223 7224 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 7225 assert(AT.isValid()); 7226 7227 if (!AT.matchesType(S.Context, T)) { 7228 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 7229 << k << AT.getRepresentativeTypeName(S.Context) 7230 << T << Arg->getSourceRange(), 7231 getLocationOfByte(Amt.getStart()), 7232 /*IsStringLocation*/true, 7233 getSpecifierRange(startSpecifier, specifierLen)); 7234 // Don't do any more checking. We will just emit 7235 // spurious errors. 7236 return false; 7237 } 7238 } 7239 } 7240 return true; 7241 } 7242 7243 void CheckPrintfHandler::HandleInvalidAmount( 7244 const analyze_printf::PrintfSpecifier &FS, 7245 const analyze_printf::OptionalAmount &Amt, 7246 unsigned type, 7247 const char *startSpecifier, 7248 unsigned specifierLen) { 7249 const analyze_printf::PrintfConversionSpecifier &CS = 7250 FS.getConversionSpecifier(); 7251 7252 FixItHint fixit = 7253 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 7254 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 7255 Amt.getConstantLength())) 7256 : FixItHint(); 7257 7258 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 7259 << type << CS.toString(), 7260 getLocationOfByte(Amt.getStart()), 7261 /*IsStringLocation*/true, 7262 getSpecifierRange(startSpecifier, specifierLen), 7263 fixit); 7264 } 7265 7266 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7267 const analyze_printf::OptionalFlag &flag, 7268 const char *startSpecifier, 7269 unsigned specifierLen) { 7270 // Warn about pointless flag with a fixit removal. 7271 const analyze_printf::PrintfConversionSpecifier &CS = 7272 FS.getConversionSpecifier(); 7273 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 7274 << flag.toString() << CS.toString(), 7275 getLocationOfByte(flag.getPosition()), 7276 /*IsStringLocation*/true, 7277 getSpecifierRange(startSpecifier, specifierLen), 7278 FixItHint::CreateRemoval( 7279 getSpecifierRange(flag.getPosition(), 1))); 7280 } 7281 7282 void CheckPrintfHandler::HandleIgnoredFlag( 7283 const analyze_printf::PrintfSpecifier &FS, 7284 const analyze_printf::OptionalFlag &ignoredFlag, 7285 const analyze_printf::OptionalFlag &flag, 7286 const char *startSpecifier, 7287 unsigned specifierLen) { 7288 // Warn about ignored flag with a fixit removal. 7289 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 7290 << ignoredFlag.toString() << flag.toString(), 7291 getLocationOfByte(ignoredFlag.getPosition()), 7292 /*IsStringLocation*/true, 7293 getSpecifierRange(startSpecifier, specifierLen), 7294 FixItHint::CreateRemoval( 7295 getSpecifierRange(ignoredFlag.getPosition(), 1))); 7296 } 7297 7298 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 7299 unsigned flagLen) { 7300 // Warn about an empty flag. 7301 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 7302 getLocationOfByte(startFlag), 7303 /*IsStringLocation*/true, 7304 getSpecifierRange(startFlag, flagLen)); 7305 } 7306 7307 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 7308 unsigned flagLen) { 7309 // Warn about an invalid flag. 7310 auto Range = getSpecifierRange(startFlag, flagLen); 7311 StringRef flag(startFlag, flagLen); 7312 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 7313 getLocationOfByte(startFlag), 7314 /*IsStringLocation*/true, 7315 Range, FixItHint::CreateRemoval(Range)); 7316 } 7317 7318 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 7319 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 7320 // Warn about using '[...]' without a '@' conversion. 7321 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 7322 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 7323 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 7324 getLocationOfByte(conversionPosition), 7325 /*IsStringLocation*/true, 7326 Range, FixItHint::CreateRemoval(Range)); 7327 } 7328 7329 // Determines if the specified is a C++ class or struct containing 7330 // a member with the specified name and kind (e.g. a CXXMethodDecl named 7331 // "c_str()"). 7332 template<typename MemberKind> 7333 static llvm::SmallPtrSet<MemberKind*, 1> 7334 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 7335 const RecordType *RT = Ty->getAs<RecordType>(); 7336 llvm::SmallPtrSet<MemberKind*, 1> Results; 7337 7338 if (!RT) 7339 return Results; 7340 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 7341 if (!RD || !RD->getDefinition()) 7342 return Results; 7343 7344 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 7345 Sema::LookupMemberName); 7346 R.suppressDiagnostics(); 7347 7348 // We just need to include all members of the right kind turned up by the 7349 // filter, at this point. 7350 if (S.LookupQualifiedName(R, RT->getDecl())) 7351 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 7352 NamedDecl *decl = (*I)->getUnderlyingDecl(); 7353 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 7354 Results.insert(FK); 7355 } 7356 return Results; 7357 } 7358 7359 /// Check if we could call '.c_str()' on an object. 7360 /// 7361 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 7362 /// allow the call, or if it would be ambiguous). 7363 bool Sema::hasCStrMethod(const Expr *E) { 7364 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 7365 7366 MethodSet Results = 7367 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 7368 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 7369 MI != ME; ++MI) 7370 if ((*MI)->getMinRequiredArguments() == 0) 7371 return true; 7372 return false; 7373 } 7374 7375 // Check if a (w)string was passed when a (w)char* was needed, and offer a 7376 // better diagnostic if so. AT is assumed to be valid. 7377 // Returns true when a c_str() conversion method is found. 7378 bool CheckPrintfHandler::checkForCStrMembers( 7379 const analyze_printf::ArgType &AT, const Expr *E) { 7380 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 7381 7382 MethodSet Results = 7383 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 7384 7385 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 7386 MI != ME; ++MI) { 7387 const CXXMethodDecl *Method = *MI; 7388 if (Method->getMinRequiredArguments() == 0 && 7389 AT.matchesType(S.Context, Method->getReturnType())) { 7390 // FIXME: Suggest parens if the expression needs them. 7391 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 7392 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 7393 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 7394 return true; 7395 } 7396 } 7397 7398 return false; 7399 } 7400 7401 bool 7402 CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier 7403 &FS, 7404 const char *startSpecifier, 7405 unsigned specifierLen) { 7406 using namespace analyze_format_string; 7407 using namespace analyze_printf; 7408 7409 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 7410 7411 if (FS.consumesDataArgument()) { 7412 if (atFirstArg) { 7413 atFirstArg = false; 7414 usesPositionalArgs = FS.usesPositionalArg(); 7415 } 7416 else if (usesPositionalArgs != FS.usesPositionalArg()) { 7417 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 7418 startSpecifier, specifierLen); 7419 return false; 7420 } 7421 } 7422 7423 // First check if the field width, precision, and conversion specifier 7424 // have matching data arguments. 7425 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 7426 startSpecifier, specifierLen)) { 7427 return false; 7428 } 7429 7430 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 7431 startSpecifier, specifierLen)) { 7432 return false; 7433 } 7434 7435 if (!CS.consumesDataArgument()) { 7436 // FIXME: Technically specifying a precision or field width here 7437 // makes no sense. Worth issuing a warning at some point. 7438 return true; 7439 } 7440 7441 // Consume the argument. 7442 unsigned argIndex = FS.getArgIndex(); 7443 if (argIndex < NumDataArgs) { 7444 // The check to see if the argIndex is valid will come later. 7445 // We set the bit here because we may exit early from this 7446 // function if we encounter some other error. 7447 CoveredArgs.set(argIndex); 7448 } 7449 7450 // FreeBSD kernel extensions. 7451 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 7452 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 7453 // We need at least two arguments. 7454 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 7455 return false; 7456 7457 // Claim the second argument. 7458 CoveredArgs.set(argIndex + 1); 7459 7460 // Type check the first argument (int for %b, pointer for %D) 7461 const Expr *Ex = getDataArg(argIndex); 7462 const analyze_printf::ArgType &AT = 7463 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 7464 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 7465 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 7466 EmitFormatDiagnostic( 7467 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 7468 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 7469 << false << Ex->getSourceRange(), 7470 Ex->getBeginLoc(), /*IsStringLocation*/ false, 7471 getSpecifierRange(startSpecifier, specifierLen)); 7472 7473 // Type check the second argument (char * for both %b and %D) 7474 Ex = getDataArg(argIndex + 1); 7475 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 7476 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 7477 EmitFormatDiagnostic( 7478 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 7479 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 7480 << false << Ex->getSourceRange(), 7481 Ex->getBeginLoc(), /*IsStringLocation*/ false, 7482 getSpecifierRange(startSpecifier, specifierLen)); 7483 7484 return true; 7485 } 7486 7487 // Check for using an Objective-C specific conversion specifier 7488 // in a non-ObjC literal. 7489 if (!allowsObjCArg() && CS.isObjCArg()) { 7490 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 7491 specifierLen); 7492 } 7493 7494 // %P can only be used with os_log. 7495 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 7496 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 7497 specifierLen); 7498 } 7499 7500 // %n is not allowed with os_log. 7501 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 7502 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 7503 getLocationOfByte(CS.getStart()), 7504 /*IsStringLocation*/ false, 7505 getSpecifierRange(startSpecifier, specifierLen)); 7506 7507 return true; 7508 } 7509 7510 // Only scalars are allowed for os_trace. 7511 if (FSType == Sema::FST_OSTrace && 7512 (CS.getKind() == ConversionSpecifier::PArg || 7513 CS.getKind() == ConversionSpecifier::sArg || 7514 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 7515 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 7516 specifierLen); 7517 } 7518 7519 // Check for use of public/private annotation outside of os_log(). 7520 if (FSType != Sema::FST_OSLog) { 7521 if (FS.isPublic().isSet()) { 7522 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 7523 << "public", 7524 getLocationOfByte(FS.isPublic().getPosition()), 7525 /*IsStringLocation*/ false, 7526 getSpecifierRange(startSpecifier, specifierLen)); 7527 } 7528 if (FS.isPrivate().isSet()) { 7529 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 7530 << "private", 7531 getLocationOfByte(FS.isPrivate().getPosition()), 7532 /*IsStringLocation*/ false, 7533 getSpecifierRange(startSpecifier, specifierLen)); 7534 } 7535 } 7536 7537 // Check for invalid use of field width 7538 if (!FS.hasValidFieldWidth()) { 7539 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 7540 startSpecifier, specifierLen); 7541 } 7542 7543 // Check for invalid use of precision 7544 if (!FS.hasValidPrecision()) { 7545 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 7546 startSpecifier, specifierLen); 7547 } 7548 7549 // Precision is mandatory for %P specifier. 7550 if (CS.getKind() == ConversionSpecifier::PArg && 7551 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 7552 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 7553 getLocationOfByte(startSpecifier), 7554 /*IsStringLocation*/ false, 7555 getSpecifierRange(startSpecifier, specifierLen)); 7556 } 7557 7558 // Check each flag does not conflict with any other component. 7559 if (!FS.hasValidThousandsGroupingPrefix()) 7560 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 7561 if (!FS.hasValidLeadingZeros()) 7562 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 7563 if (!FS.hasValidPlusPrefix()) 7564 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 7565 if (!FS.hasValidSpacePrefix()) 7566 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 7567 if (!FS.hasValidAlternativeForm()) 7568 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 7569 if (!FS.hasValidLeftJustified()) 7570 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 7571 7572 // Check that flags are not ignored by another flag 7573 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 7574 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 7575 startSpecifier, specifierLen); 7576 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 7577 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 7578 startSpecifier, specifierLen); 7579 7580 // Check the length modifier is valid with the given conversion specifier. 7581 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo())) 7582 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 7583 diag::warn_format_nonsensical_length); 7584 else if (!FS.hasStandardLengthModifier()) 7585 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 7586 else if (!FS.hasStandardLengthConversionCombination()) 7587 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 7588 diag::warn_format_non_standard_conversion_spec); 7589 7590 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 7591 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 7592 7593 // The remaining checks depend on the data arguments. 7594 if (HasVAListArg) 7595 return true; 7596 7597 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 7598 return false; 7599 7600 const Expr *Arg = getDataArg(argIndex); 7601 if (!Arg) 7602 return true; 7603 7604 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 7605 } 7606 7607 static bool requiresParensToAddCast(const Expr *E) { 7608 // FIXME: We should have a general way to reason about operator 7609 // precedence and whether parens are actually needed here. 7610 // Take care of a few common cases where they aren't. 7611 const Expr *Inside = E->IgnoreImpCasts(); 7612 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 7613 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 7614 7615 switch (Inside->getStmtClass()) { 7616 case Stmt::ArraySubscriptExprClass: 7617 case Stmt::CallExprClass: 7618 case Stmt::CharacterLiteralClass: 7619 case Stmt::CXXBoolLiteralExprClass: 7620 case Stmt::DeclRefExprClass: 7621 case Stmt::FloatingLiteralClass: 7622 case Stmt::IntegerLiteralClass: 7623 case Stmt::MemberExprClass: 7624 case Stmt::ObjCArrayLiteralClass: 7625 case Stmt::ObjCBoolLiteralExprClass: 7626 case Stmt::ObjCBoxedExprClass: 7627 case Stmt::ObjCDictionaryLiteralClass: 7628 case Stmt::ObjCEncodeExprClass: 7629 case Stmt::ObjCIvarRefExprClass: 7630 case Stmt::ObjCMessageExprClass: 7631 case Stmt::ObjCPropertyRefExprClass: 7632 case Stmt::ObjCStringLiteralClass: 7633 case Stmt::ObjCSubscriptRefExprClass: 7634 case Stmt::ParenExprClass: 7635 case Stmt::StringLiteralClass: 7636 case Stmt::UnaryOperatorClass: 7637 return false; 7638 default: 7639 return true; 7640 } 7641 } 7642 7643 static std::pair<QualType, StringRef> 7644 shouldNotPrintDirectly(const ASTContext &Context, 7645 QualType IntendedTy, 7646 const Expr *E) { 7647 // Use a 'while' to peel off layers of typedefs. 7648 QualType TyTy = IntendedTy; 7649 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 7650 StringRef Name = UserTy->getDecl()->getName(); 7651 QualType CastTy = llvm::StringSwitch<QualType>(Name) 7652 .Case("CFIndex", Context.getNSIntegerType()) 7653 .Case("NSInteger", Context.getNSIntegerType()) 7654 .Case("NSUInteger", Context.getNSUIntegerType()) 7655 .Case("SInt32", Context.IntTy) 7656 .Case("UInt32", Context.UnsignedIntTy) 7657 .Default(QualType()); 7658 7659 if (!CastTy.isNull()) 7660 return std::make_pair(CastTy, Name); 7661 7662 TyTy = UserTy->desugar(); 7663 } 7664 7665 // Strip parens if necessary. 7666 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 7667 return shouldNotPrintDirectly(Context, 7668 PE->getSubExpr()->getType(), 7669 PE->getSubExpr()); 7670 7671 // If this is a conditional expression, then its result type is constructed 7672 // via usual arithmetic conversions and thus there might be no necessary 7673 // typedef sugar there. Recurse to operands to check for NSInteger & 7674 // Co. usage condition. 7675 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 7676 QualType TrueTy, FalseTy; 7677 StringRef TrueName, FalseName; 7678 7679 std::tie(TrueTy, TrueName) = 7680 shouldNotPrintDirectly(Context, 7681 CO->getTrueExpr()->getType(), 7682 CO->getTrueExpr()); 7683 std::tie(FalseTy, FalseName) = 7684 shouldNotPrintDirectly(Context, 7685 CO->getFalseExpr()->getType(), 7686 CO->getFalseExpr()); 7687 7688 if (TrueTy == FalseTy) 7689 return std::make_pair(TrueTy, TrueName); 7690 else if (TrueTy.isNull()) 7691 return std::make_pair(FalseTy, FalseName); 7692 else if (FalseTy.isNull()) 7693 return std::make_pair(TrueTy, TrueName); 7694 } 7695 7696 return std::make_pair(QualType(), StringRef()); 7697 } 7698 7699 bool 7700 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 7701 const char *StartSpecifier, 7702 unsigned SpecifierLen, 7703 const Expr *E) { 7704 using namespace analyze_format_string; 7705 using namespace analyze_printf; 7706 7707 // Now type check the data expression that matches the 7708 // format specifier. 7709 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 7710 if (!AT.isValid()) 7711 return true; 7712 7713 QualType ExprTy = E->getType(); 7714 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 7715 ExprTy = TET->getUnderlyingExpr()->getType(); 7716 } 7717 7718 const analyze_printf::ArgType::MatchKind Match = 7719 AT.matchesType(S.Context, ExprTy); 7720 bool Pedantic = Match == analyze_printf::ArgType::NoMatchPedantic; 7721 if (Match == analyze_printf::ArgType::Match) 7722 return true; 7723 7724 // Look through argument promotions for our error message's reported type. 7725 // This includes the integral and floating promotions, but excludes array 7726 // and function pointer decay; seeing that an argument intended to be a 7727 // string has type 'char [6]' is probably more confusing than 'char *'. 7728 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 7729 if (ICE->getCastKind() == CK_IntegralCast || 7730 ICE->getCastKind() == CK_FloatingCast) { 7731 E = ICE->getSubExpr(); 7732 ExprTy = E->getType(); 7733 7734 // Check if we didn't match because of an implicit cast from a 'char' 7735 // or 'short' to an 'int'. This is done because printf is a varargs 7736 // function. 7737 if (ICE->getType() == S.Context.IntTy || 7738 ICE->getType() == S.Context.UnsignedIntTy) { 7739 // All further checking is done on the subexpression. 7740 if (AT.matchesType(S.Context, ExprTy)) 7741 return true; 7742 } 7743 } 7744 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 7745 // Special case for 'a', which has type 'int' in C. 7746 // Note, however, that we do /not/ want to treat multibyte constants like 7747 // 'MooV' as characters! This form is deprecated but still exists. 7748 if (ExprTy == S.Context.IntTy) 7749 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 7750 ExprTy = S.Context.CharTy; 7751 } 7752 7753 // Look through enums to their underlying type. 7754 bool IsEnum = false; 7755 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 7756 ExprTy = EnumTy->getDecl()->getIntegerType(); 7757 IsEnum = true; 7758 } 7759 7760 // %C in an Objective-C context prints a unichar, not a wchar_t. 7761 // If the argument is an integer of some kind, believe the %C and suggest 7762 // a cast instead of changing the conversion specifier. 7763 QualType IntendedTy = ExprTy; 7764 if (isObjCContext() && 7765 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 7766 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 7767 !ExprTy->isCharType()) { 7768 // 'unichar' is defined as a typedef of unsigned short, but we should 7769 // prefer using the typedef if it is visible. 7770 IntendedTy = S.Context.UnsignedShortTy; 7771 7772 // While we are here, check if the value is an IntegerLiteral that happens 7773 // to be within the valid range. 7774 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 7775 const llvm::APInt &V = IL->getValue(); 7776 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 7777 return true; 7778 } 7779 7780 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 7781 Sema::LookupOrdinaryName); 7782 if (S.LookupName(Result, S.getCurScope())) { 7783 NamedDecl *ND = Result.getFoundDecl(); 7784 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 7785 if (TD->getUnderlyingType() == IntendedTy) 7786 IntendedTy = S.Context.getTypedefType(TD); 7787 } 7788 } 7789 } 7790 7791 // Special-case some of Darwin's platform-independence types by suggesting 7792 // casts to primitive types that are known to be large enough. 7793 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 7794 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 7795 QualType CastTy; 7796 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 7797 if (!CastTy.isNull()) { 7798 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 7799 // (long in ASTContext). Only complain to pedants. 7800 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 7801 (AT.isSizeT() || AT.isPtrdiffT()) && 7802 AT.matchesType(S.Context, CastTy)) 7803 Pedantic = true; 7804 IntendedTy = CastTy; 7805 ShouldNotPrintDirectly = true; 7806 } 7807 } 7808 7809 // We may be able to offer a FixItHint if it is a supported type. 7810 PrintfSpecifier fixedFS = FS; 7811 bool Success = 7812 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 7813 7814 if (Success) { 7815 // Get the fix string from the fixed format specifier 7816 SmallString<16> buf; 7817 llvm::raw_svector_ostream os(buf); 7818 fixedFS.toString(os); 7819 7820 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 7821 7822 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 7823 unsigned Diag = 7824 Pedantic 7825 ? diag::warn_format_conversion_argument_type_mismatch_pedantic 7826 : diag::warn_format_conversion_argument_type_mismatch; 7827 // In this case, the specifier is wrong and should be changed to match 7828 // the argument. 7829 EmitFormatDiagnostic(S.PDiag(Diag) 7830 << AT.getRepresentativeTypeName(S.Context) 7831 << IntendedTy << IsEnum << E->getSourceRange(), 7832 E->getBeginLoc(), 7833 /*IsStringLocation*/ false, SpecRange, 7834 FixItHint::CreateReplacement(SpecRange, os.str())); 7835 } else { 7836 // The canonical type for formatting this value is different from the 7837 // actual type of the expression. (This occurs, for example, with Darwin's 7838 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 7839 // should be printed as 'long' for 64-bit compatibility.) 7840 // Rather than emitting a normal format/argument mismatch, we want to 7841 // add a cast to the recommended type (and correct the format string 7842 // if necessary). 7843 SmallString<16> CastBuf; 7844 llvm::raw_svector_ostream CastFix(CastBuf); 7845 CastFix << "("; 7846 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 7847 CastFix << ")"; 7848 7849 SmallVector<FixItHint,4> Hints; 7850 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 7851 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 7852 7853 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 7854 // If there's already a cast present, just replace it. 7855 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 7856 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 7857 7858 } else if (!requiresParensToAddCast(E)) { 7859 // If the expression has high enough precedence, 7860 // just write the C-style cast. 7861 Hints.push_back( 7862 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 7863 } else { 7864 // Otherwise, add parens around the expression as well as the cast. 7865 CastFix << "("; 7866 Hints.push_back( 7867 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 7868 7869 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 7870 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 7871 } 7872 7873 if (ShouldNotPrintDirectly) { 7874 // The expression has a type that should not be printed directly. 7875 // We extract the name from the typedef because we don't want to show 7876 // the underlying type in the diagnostic. 7877 StringRef Name; 7878 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 7879 Name = TypedefTy->getDecl()->getName(); 7880 else 7881 Name = CastTyName; 7882 unsigned Diag = Pedantic 7883 ? diag::warn_format_argument_needs_cast_pedantic 7884 : diag::warn_format_argument_needs_cast; 7885 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 7886 << E->getSourceRange(), 7887 E->getBeginLoc(), /*IsStringLocation=*/false, 7888 SpecRange, Hints); 7889 } else { 7890 // In this case, the expression could be printed using a different 7891 // specifier, but we've decided that the specifier is probably correct 7892 // and we should cast instead. Just use the normal warning message. 7893 EmitFormatDiagnostic( 7894 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 7895 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 7896 << E->getSourceRange(), 7897 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 7898 } 7899 } 7900 } else { 7901 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 7902 SpecifierLen); 7903 // Since the warning for passing non-POD types to variadic functions 7904 // was deferred until now, we emit a warning for non-POD 7905 // arguments here. 7906 switch (S.isValidVarArgType(ExprTy)) { 7907 case Sema::VAK_Valid: 7908 case Sema::VAK_ValidInCXX11: { 7909 unsigned Diag = 7910 Pedantic 7911 ? diag::warn_format_conversion_argument_type_mismatch_pedantic 7912 : diag::warn_format_conversion_argument_type_mismatch; 7913 7914 EmitFormatDiagnostic( 7915 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 7916 << IsEnum << CSR << E->getSourceRange(), 7917 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 7918 break; 7919 } 7920 case Sema::VAK_Undefined: 7921 case Sema::VAK_MSVCUndefined: 7922 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 7923 << S.getLangOpts().CPlusPlus11 << ExprTy 7924 << CallType 7925 << AT.getRepresentativeTypeName(S.Context) << CSR 7926 << E->getSourceRange(), 7927 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 7928 checkForCStrMembers(AT, E); 7929 break; 7930 7931 case Sema::VAK_Invalid: 7932 if (ExprTy->isObjCObjectType()) 7933 EmitFormatDiagnostic( 7934 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 7935 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 7936 << AT.getRepresentativeTypeName(S.Context) << CSR 7937 << E->getSourceRange(), 7938 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 7939 else 7940 // FIXME: If this is an initializer list, suggest removing the braces 7941 // or inserting a cast to the target type. 7942 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 7943 << isa<InitListExpr>(E) << ExprTy << CallType 7944 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 7945 break; 7946 } 7947 7948 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 7949 "format string specifier index out of range"); 7950 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 7951 } 7952 7953 return true; 7954 } 7955 7956 //===--- CHECK: Scanf format string checking ------------------------------===// 7957 7958 namespace { 7959 7960 class CheckScanfHandler : public CheckFormatHandler { 7961 public: 7962 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 7963 const Expr *origFormatExpr, Sema::FormatStringType type, 7964 unsigned firstDataArg, unsigned numDataArgs, 7965 const char *beg, bool hasVAListArg, 7966 ArrayRef<const Expr *> Args, unsigned formatIdx, 7967 bool inFunctionCall, Sema::VariadicCallType CallType, 7968 llvm::SmallBitVector &CheckedVarArgs, 7969 UncoveredArgHandler &UncoveredArg) 7970 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 7971 numDataArgs, beg, hasVAListArg, Args, formatIdx, 7972 inFunctionCall, CallType, CheckedVarArgs, 7973 UncoveredArg) {} 7974 7975 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 7976 const char *startSpecifier, 7977 unsigned specifierLen) override; 7978 7979 bool HandleInvalidScanfConversionSpecifier( 7980 const analyze_scanf::ScanfSpecifier &FS, 7981 const char *startSpecifier, 7982 unsigned specifierLen) override; 7983 7984 void HandleIncompleteScanList(const char *start, const char *end) override; 7985 }; 7986 7987 } // namespace 7988 7989 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 7990 const char *end) { 7991 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 7992 getLocationOfByte(end), /*IsStringLocation*/true, 7993 getSpecifierRange(start, end - start)); 7994 } 7995 7996 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 7997 const analyze_scanf::ScanfSpecifier &FS, 7998 const char *startSpecifier, 7999 unsigned specifierLen) { 8000 const analyze_scanf::ScanfConversionSpecifier &CS = 8001 FS.getConversionSpecifier(); 8002 8003 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8004 getLocationOfByte(CS.getStart()), 8005 startSpecifier, specifierLen, 8006 CS.getStart(), CS.getLength()); 8007 } 8008 8009 bool CheckScanfHandler::HandleScanfSpecifier( 8010 const analyze_scanf::ScanfSpecifier &FS, 8011 const char *startSpecifier, 8012 unsigned specifierLen) { 8013 using namespace analyze_scanf; 8014 using namespace analyze_format_string; 8015 8016 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 8017 8018 // Handle case where '%' and '*' don't consume an argument. These shouldn't 8019 // be used to decide if we are using positional arguments consistently. 8020 if (FS.consumesDataArgument()) { 8021 if (atFirstArg) { 8022 atFirstArg = false; 8023 usesPositionalArgs = FS.usesPositionalArg(); 8024 } 8025 else if (usesPositionalArgs != FS.usesPositionalArg()) { 8026 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 8027 startSpecifier, specifierLen); 8028 return false; 8029 } 8030 } 8031 8032 // Check if the field with is non-zero. 8033 const OptionalAmount &Amt = FS.getFieldWidth(); 8034 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 8035 if (Amt.getConstantAmount() == 0) { 8036 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 8037 Amt.getConstantLength()); 8038 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 8039 getLocationOfByte(Amt.getStart()), 8040 /*IsStringLocation*/true, R, 8041 FixItHint::CreateRemoval(R)); 8042 } 8043 } 8044 8045 if (!FS.consumesDataArgument()) { 8046 // FIXME: Technically specifying a precision or field width here 8047 // makes no sense. Worth issuing a warning at some point. 8048 return true; 8049 } 8050 8051 // Consume the argument. 8052 unsigned argIndex = FS.getArgIndex(); 8053 if (argIndex < NumDataArgs) { 8054 // The check to see if the argIndex is valid will come later. 8055 // We set the bit here because we may exit early from this 8056 // function if we encounter some other error. 8057 CoveredArgs.set(argIndex); 8058 } 8059 8060 // Check the length modifier is valid with the given conversion specifier. 8061 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo())) 8062 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8063 diag::warn_format_nonsensical_length); 8064 else if (!FS.hasStandardLengthModifier()) 8065 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8066 else if (!FS.hasStandardLengthConversionCombination()) 8067 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8068 diag::warn_format_non_standard_conversion_spec); 8069 8070 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 8071 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 8072 8073 // The remaining checks depend on the data arguments. 8074 if (HasVAListArg) 8075 return true; 8076 8077 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 8078 return false; 8079 8080 // Check that the argument type matches the format specifier. 8081 const Expr *Ex = getDataArg(argIndex); 8082 if (!Ex) 8083 return true; 8084 8085 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 8086 8087 if (!AT.isValid()) { 8088 return true; 8089 } 8090 8091 analyze_format_string::ArgType::MatchKind Match = 8092 AT.matchesType(S.Context, Ex->getType()); 8093 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 8094 if (Match == analyze_format_string::ArgType::Match) 8095 return true; 8096 8097 ScanfSpecifier fixedFS = FS; 8098 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 8099 S.getLangOpts(), S.Context); 8100 8101 unsigned Diag = 8102 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 8103 : diag::warn_format_conversion_argument_type_mismatch; 8104 8105 if (Success) { 8106 // Get the fix string from the fixed format specifier. 8107 SmallString<128> buf; 8108 llvm::raw_svector_ostream os(buf); 8109 fixedFS.toString(os); 8110 8111 EmitFormatDiagnostic( 8112 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 8113 << Ex->getType() << false << Ex->getSourceRange(), 8114 Ex->getBeginLoc(), 8115 /*IsStringLocation*/ false, 8116 getSpecifierRange(startSpecifier, specifierLen), 8117 FixItHint::CreateReplacement( 8118 getSpecifierRange(startSpecifier, specifierLen), os.str())); 8119 } else { 8120 EmitFormatDiagnostic(S.PDiag(Diag) 8121 << AT.getRepresentativeTypeName(S.Context) 8122 << Ex->getType() << false << Ex->getSourceRange(), 8123 Ex->getBeginLoc(), 8124 /*IsStringLocation*/ false, 8125 getSpecifierRange(startSpecifier, specifierLen)); 8126 } 8127 8128 return true; 8129 } 8130 8131 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 8132 const Expr *OrigFormatExpr, 8133 ArrayRef<const Expr *> Args, 8134 bool HasVAListArg, unsigned format_idx, 8135 unsigned firstDataArg, 8136 Sema::FormatStringType Type, 8137 bool inFunctionCall, 8138 Sema::VariadicCallType CallType, 8139 llvm::SmallBitVector &CheckedVarArgs, 8140 UncoveredArgHandler &UncoveredArg) { 8141 // CHECK: is the format string a wide literal? 8142 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 8143 CheckFormatHandler::EmitFormatDiagnostic( 8144 S, inFunctionCall, Args[format_idx], 8145 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 8146 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8147 return; 8148 } 8149 8150 // Str - The format string. NOTE: this is NOT null-terminated! 8151 StringRef StrRef = FExpr->getString(); 8152 const char *Str = StrRef.data(); 8153 // Account for cases where the string literal is truncated in a declaration. 8154 const ConstantArrayType *T = 8155 S.Context.getAsConstantArrayType(FExpr->getType()); 8156 assert(T && "String literal not of constant array type!"); 8157 size_t TypeSize = T->getSize().getZExtValue(); 8158 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8159 const unsigned numDataArgs = Args.size() - firstDataArg; 8160 8161 // Emit a warning if the string literal is truncated and does not contain an 8162 // embedded null character. 8163 if (TypeSize <= StrRef.size() && 8164 StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) { 8165 CheckFormatHandler::EmitFormatDiagnostic( 8166 S, inFunctionCall, Args[format_idx], 8167 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 8168 FExpr->getBeginLoc(), 8169 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 8170 return; 8171 } 8172 8173 // CHECK: empty format string? 8174 if (StrLen == 0 && numDataArgs > 0) { 8175 CheckFormatHandler::EmitFormatDiagnostic( 8176 S, inFunctionCall, Args[format_idx], 8177 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 8178 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8179 return; 8180 } 8181 8182 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 8183 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 8184 Type == Sema::FST_OSTrace) { 8185 CheckPrintfHandler H( 8186 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 8187 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 8188 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 8189 CheckedVarArgs, UncoveredArg); 8190 8191 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 8192 S.getLangOpts(), 8193 S.Context.getTargetInfo(), 8194 Type == Sema::FST_FreeBSDKPrintf)) 8195 H.DoneProcessing(); 8196 } else if (Type == Sema::FST_Scanf) { 8197 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 8198 numDataArgs, Str, HasVAListArg, Args, format_idx, 8199 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 8200 8201 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 8202 S.getLangOpts(), 8203 S.Context.getTargetInfo())) 8204 H.DoneProcessing(); 8205 } // TODO: handle other formats 8206 } 8207 8208 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 8209 // Str - The format string. NOTE: this is NOT null-terminated! 8210 StringRef StrRef = FExpr->getString(); 8211 const char *Str = StrRef.data(); 8212 // Account for cases where the string literal is truncated in a declaration. 8213 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 8214 assert(T && "String literal not of constant array type!"); 8215 size_t TypeSize = T->getSize().getZExtValue(); 8216 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8217 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 8218 getLangOpts(), 8219 Context.getTargetInfo()); 8220 } 8221 8222 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 8223 8224 // Returns the related absolute value function that is larger, of 0 if one 8225 // does not exist. 8226 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 8227 switch (AbsFunction) { 8228 default: 8229 return 0; 8230 8231 case Builtin::BI__builtin_abs: 8232 return Builtin::BI__builtin_labs; 8233 case Builtin::BI__builtin_labs: 8234 return Builtin::BI__builtin_llabs; 8235 case Builtin::BI__builtin_llabs: 8236 return 0; 8237 8238 case Builtin::BI__builtin_fabsf: 8239 return Builtin::BI__builtin_fabs; 8240 case Builtin::BI__builtin_fabs: 8241 return Builtin::BI__builtin_fabsl; 8242 case Builtin::BI__builtin_fabsl: 8243 return 0; 8244 8245 case Builtin::BI__builtin_cabsf: 8246 return Builtin::BI__builtin_cabs; 8247 case Builtin::BI__builtin_cabs: 8248 return Builtin::BI__builtin_cabsl; 8249 case Builtin::BI__builtin_cabsl: 8250 return 0; 8251 8252 case Builtin::BIabs: 8253 return Builtin::BIlabs; 8254 case Builtin::BIlabs: 8255 return Builtin::BIllabs; 8256 case Builtin::BIllabs: 8257 return 0; 8258 8259 case Builtin::BIfabsf: 8260 return Builtin::BIfabs; 8261 case Builtin::BIfabs: 8262 return Builtin::BIfabsl; 8263 case Builtin::BIfabsl: 8264 return 0; 8265 8266 case Builtin::BIcabsf: 8267 return Builtin::BIcabs; 8268 case Builtin::BIcabs: 8269 return Builtin::BIcabsl; 8270 case Builtin::BIcabsl: 8271 return 0; 8272 } 8273 } 8274 8275 // Returns the argument type of the absolute value function. 8276 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 8277 unsigned AbsType) { 8278 if (AbsType == 0) 8279 return QualType(); 8280 8281 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 8282 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 8283 if (Error != ASTContext::GE_None) 8284 return QualType(); 8285 8286 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 8287 if (!FT) 8288 return QualType(); 8289 8290 if (FT->getNumParams() != 1) 8291 return QualType(); 8292 8293 return FT->getParamType(0); 8294 } 8295 8296 // Returns the best absolute value function, or zero, based on type and 8297 // current absolute value function. 8298 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 8299 unsigned AbsFunctionKind) { 8300 unsigned BestKind = 0; 8301 uint64_t ArgSize = Context.getTypeSize(ArgType); 8302 for (unsigned Kind = AbsFunctionKind; Kind != 0; 8303 Kind = getLargerAbsoluteValueFunction(Kind)) { 8304 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 8305 if (Context.getTypeSize(ParamType) >= ArgSize) { 8306 if (BestKind == 0) 8307 BestKind = Kind; 8308 else if (Context.hasSameType(ParamType, ArgType)) { 8309 BestKind = Kind; 8310 break; 8311 } 8312 } 8313 } 8314 return BestKind; 8315 } 8316 8317 enum AbsoluteValueKind { 8318 AVK_Integer, 8319 AVK_Floating, 8320 AVK_Complex 8321 }; 8322 8323 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 8324 if (T->isIntegralOrEnumerationType()) 8325 return AVK_Integer; 8326 if (T->isRealFloatingType()) 8327 return AVK_Floating; 8328 if (T->isAnyComplexType()) 8329 return AVK_Complex; 8330 8331 llvm_unreachable("Type not integer, floating, or complex"); 8332 } 8333 8334 // Changes the absolute value function to a different type. Preserves whether 8335 // the function is a builtin. 8336 static unsigned changeAbsFunction(unsigned AbsKind, 8337 AbsoluteValueKind ValueKind) { 8338 switch (ValueKind) { 8339 case AVK_Integer: 8340 switch (AbsKind) { 8341 default: 8342 return 0; 8343 case Builtin::BI__builtin_fabsf: 8344 case Builtin::BI__builtin_fabs: 8345 case Builtin::BI__builtin_fabsl: 8346 case Builtin::BI__builtin_cabsf: 8347 case Builtin::BI__builtin_cabs: 8348 case Builtin::BI__builtin_cabsl: 8349 return Builtin::BI__builtin_abs; 8350 case Builtin::BIfabsf: 8351 case Builtin::BIfabs: 8352 case Builtin::BIfabsl: 8353 case Builtin::BIcabsf: 8354 case Builtin::BIcabs: 8355 case Builtin::BIcabsl: 8356 return Builtin::BIabs; 8357 } 8358 case AVK_Floating: 8359 switch (AbsKind) { 8360 default: 8361 return 0; 8362 case Builtin::BI__builtin_abs: 8363 case Builtin::BI__builtin_labs: 8364 case Builtin::BI__builtin_llabs: 8365 case Builtin::BI__builtin_cabsf: 8366 case Builtin::BI__builtin_cabs: 8367 case Builtin::BI__builtin_cabsl: 8368 return Builtin::BI__builtin_fabsf; 8369 case Builtin::BIabs: 8370 case Builtin::BIlabs: 8371 case Builtin::BIllabs: 8372 case Builtin::BIcabsf: 8373 case Builtin::BIcabs: 8374 case Builtin::BIcabsl: 8375 return Builtin::BIfabsf; 8376 } 8377 case AVK_Complex: 8378 switch (AbsKind) { 8379 default: 8380 return 0; 8381 case Builtin::BI__builtin_abs: 8382 case Builtin::BI__builtin_labs: 8383 case Builtin::BI__builtin_llabs: 8384 case Builtin::BI__builtin_fabsf: 8385 case Builtin::BI__builtin_fabs: 8386 case Builtin::BI__builtin_fabsl: 8387 return Builtin::BI__builtin_cabsf; 8388 case Builtin::BIabs: 8389 case Builtin::BIlabs: 8390 case Builtin::BIllabs: 8391 case Builtin::BIfabsf: 8392 case Builtin::BIfabs: 8393 case Builtin::BIfabsl: 8394 return Builtin::BIcabsf; 8395 } 8396 } 8397 llvm_unreachable("Unable to convert function"); 8398 } 8399 8400 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 8401 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 8402 if (!FnInfo) 8403 return 0; 8404 8405 switch (FDecl->getBuiltinID()) { 8406 default: 8407 return 0; 8408 case Builtin::BI__builtin_abs: 8409 case Builtin::BI__builtin_fabs: 8410 case Builtin::BI__builtin_fabsf: 8411 case Builtin::BI__builtin_fabsl: 8412 case Builtin::BI__builtin_labs: 8413 case Builtin::BI__builtin_llabs: 8414 case Builtin::BI__builtin_cabs: 8415 case Builtin::BI__builtin_cabsf: 8416 case Builtin::BI__builtin_cabsl: 8417 case Builtin::BIabs: 8418 case Builtin::BIlabs: 8419 case Builtin::BIllabs: 8420 case Builtin::BIfabs: 8421 case Builtin::BIfabsf: 8422 case Builtin::BIfabsl: 8423 case Builtin::BIcabs: 8424 case Builtin::BIcabsf: 8425 case Builtin::BIcabsl: 8426 return FDecl->getBuiltinID(); 8427 } 8428 llvm_unreachable("Unknown Builtin type"); 8429 } 8430 8431 // If the replacement is valid, emit a note with replacement function. 8432 // Additionally, suggest including the proper header if not already included. 8433 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 8434 unsigned AbsKind, QualType ArgType) { 8435 bool EmitHeaderHint = true; 8436 const char *HeaderName = nullptr; 8437 const char *FunctionName = nullptr; 8438 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 8439 FunctionName = "std::abs"; 8440 if (ArgType->isIntegralOrEnumerationType()) { 8441 HeaderName = "cstdlib"; 8442 } else if (ArgType->isRealFloatingType()) { 8443 HeaderName = "cmath"; 8444 } else { 8445 llvm_unreachable("Invalid Type"); 8446 } 8447 8448 // Lookup all std::abs 8449 if (NamespaceDecl *Std = S.getStdNamespace()) { 8450 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 8451 R.suppressDiagnostics(); 8452 S.LookupQualifiedName(R, Std); 8453 8454 for (const auto *I : R) { 8455 const FunctionDecl *FDecl = nullptr; 8456 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 8457 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 8458 } else { 8459 FDecl = dyn_cast<FunctionDecl>(I); 8460 } 8461 if (!FDecl) 8462 continue; 8463 8464 // Found std::abs(), check that they are the right ones. 8465 if (FDecl->getNumParams() != 1) 8466 continue; 8467 8468 // Check that the parameter type can handle the argument. 8469 QualType ParamType = FDecl->getParamDecl(0)->getType(); 8470 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 8471 S.Context.getTypeSize(ArgType) <= 8472 S.Context.getTypeSize(ParamType)) { 8473 // Found a function, don't need the header hint. 8474 EmitHeaderHint = false; 8475 break; 8476 } 8477 } 8478 } 8479 } else { 8480 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 8481 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 8482 8483 if (HeaderName) { 8484 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 8485 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 8486 R.suppressDiagnostics(); 8487 S.LookupName(R, S.getCurScope()); 8488 8489 if (R.isSingleResult()) { 8490 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 8491 if (FD && FD->getBuiltinID() == AbsKind) { 8492 EmitHeaderHint = false; 8493 } else { 8494 return; 8495 } 8496 } else if (!R.empty()) { 8497 return; 8498 } 8499 } 8500 } 8501 8502 S.Diag(Loc, diag::note_replace_abs_function) 8503 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 8504 8505 if (!HeaderName) 8506 return; 8507 8508 if (!EmitHeaderHint) 8509 return; 8510 8511 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 8512 << FunctionName; 8513 } 8514 8515 template <std::size_t StrLen> 8516 static bool IsStdFunction(const FunctionDecl *FDecl, 8517 const char (&Str)[StrLen]) { 8518 if (!FDecl) 8519 return false; 8520 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 8521 return false; 8522 if (!FDecl->isInStdNamespace()) 8523 return false; 8524 8525 return true; 8526 } 8527 8528 // Warn when using the wrong abs() function. 8529 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 8530 const FunctionDecl *FDecl) { 8531 if (Call->getNumArgs() != 1) 8532 return; 8533 8534 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 8535 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 8536 if (AbsKind == 0 && !IsStdAbs) 8537 return; 8538 8539 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 8540 QualType ParamType = Call->getArg(0)->getType(); 8541 8542 // Unsigned types cannot be negative. Suggest removing the absolute value 8543 // function call. 8544 if (ArgType->isUnsignedIntegerType()) { 8545 const char *FunctionName = 8546 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 8547 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 8548 Diag(Call->getExprLoc(), diag::note_remove_abs) 8549 << FunctionName 8550 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 8551 return; 8552 } 8553 8554 // Taking the absolute value of a pointer is very suspicious, they probably 8555 // wanted to index into an array, dereference a pointer, call a function, etc. 8556 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 8557 unsigned DiagType = 0; 8558 if (ArgType->isFunctionType()) 8559 DiagType = 1; 8560 else if (ArgType->isArrayType()) 8561 DiagType = 2; 8562 8563 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 8564 return; 8565 } 8566 8567 // std::abs has overloads which prevent most of the absolute value problems 8568 // from occurring. 8569 if (IsStdAbs) 8570 return; 8571 8572 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 8573 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 8574 8575 // The argument and parameter are the same kind. Check if they are the right 8576 // size. 8577 if (ArgValueKind == ParamValueKind) { 8578 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 8579 return; 8580 8581 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 8582 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 8583 << FDecl << ArgType << ParamType; 8584 8585 if (NewAbsKind == 0) 8586 return; 8587 8588 emitReplacement(*this, Call->getExprLoc(), 8589 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 8590 return; 8591 } 8592 8593 // ArgValueKind != ParamValueKind 8594 // The wrong type of absolute value function was used. Attempt to find the 8595 // proper one. 8596 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 8597 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 8598 if (NewAbsKind == 0) 8599 return; 8600 8601 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 8602 << FDecl << ParamValueKind << ArgValueKind; 8603 8604 emitReplacement(*this, Call->getExprLoc(), 8605 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 8606 } 8607 8608 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 8609 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 8610 const FunctionDecl *FDecl) { 8611 if (!Call || !FDecl) return; 8612 8613 // Ignore template specializations and macros. 8614 if (inTemplateInstantiation()) return; 8615 if (Call->getExprLoc().isMacroID()) return; 8616 8617 // Only care about the one template argument, two function parameter std::max 8618 if (Call->getNumArgs() != 2) return; 8619 if (!IsStdFunction(FDecl, "max")) return; 8620 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 8621 if (!ArgList) return; 8622 if (ArgList->size() != 1) return; 8623 8624 // Check that template type argument is unsigned integer. 8625 const auto& TA = ArgList->get(0); 8626 if (TA.getKind() != TemplateArgument::Type) return; 8627 QualType ArgType = TA.getAsType(); 8628 if (!ArgType->isUnsignedIntegerType()) return; 8629 8630 // See if either argument is a literal zero. 8631 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 8632 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 8633 if (!MTE) return false; 8634 const auto *Num = dyn_cast<IntegerLiteral>(MTE->GetTemporaryExpr()); 8635 if (!Num) return false; 8636 if (Num->getValue() != 0) return false; 8637 return true; 8638 }; 8639 8640 const Expr *FirstArg = Call->getArg(0); 8641 const Expr *SecondArg = Call->getArg(1); 8642 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 8643 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 8644 8645 // Only warn when exactly one argument is zero. 8646 if (IsFirstArgZero == IsSecondArgZero) return; 8647 8648 SourceRange FirstRange = FirstArg->getSourceRange(); 8649 SourceRange SecondRange = SecondArg->getSourceRange(); 8650 8651 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 8652 8653 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 8654 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 8655 8656 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 8657 SourceRange RemovalRange; 8658 if (IsFirstArgZero) { 8659 RemovalRange = SourceRange(FirstRange.getBegin(), 8660 SecondRange.getBegin().getLocWithOffset(-1)); 8661 } else { 8662 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 8663 SecondRange.getEnd()); 8664 } 8665 8666 Diag(Call->getExprLoc(), diag::note_remove_max_call) 8667 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 8668 << FixItHint::CreateRemoval(RemovalRange); 8669 } 8670 8671 //===--- CHECK: Standard memory functions ---------------------------------===// 8672 8673 /// Takes the expression passed to the size_t parameter of functions 8674 /// such as memcmp, strncat, etc and warns if it's a comparison. 8675 /// 8676 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 8677 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 8678 IdentifierInfo *FnName, 8679 SourceLocation FnLoc, 8680 SourceLocation RParenLoc) { 8681 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 8682 if (!Size) 8683 return false; 8684 8685 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 8686 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 8687 return false; 8688 8689 SourceRange SizeRange = Size->getSourceRange(); 8690 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 8691 << SizeRange << FnName; 8692 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 8693 << FnName 8694 << FixItHint::CreateInsertion( 8695 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 8696 << FixItHint::CreateRemoval(RParenLoc); 8697 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 8698 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 8699 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 8700 ")"); 8701 8702 return true; 8703 } 8704 8705 /// Determine whether the given type is or contains a dynamic class type 8706 /// (e.g., whether it has a vtable). 8707 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 8708 bool &IsContained) { 8709 // Look through array types while ignoring qualifiers. 8710 const Type *Ty = T->getBaseElementTypeUnsafe(); 8711 IsContained = false; 8712 8713 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 8714 RD = RD ? RD->getDefinition() : nullptr; 8715 if (!RD || RD->isInvalidDecl()) 8716 return nullptr; 8717 8718 if (RD->isDynamicClass()) 8719 return RD; 8720 8721 // Check all the fields. If any bases were dynamic, the class is dynamic. 8722 // It's impossible for a class to transitively contain itself by value, so 8723 // infinite recursion is impossible. 8724 for (auto *FD : RD->fields()) { 8725 bool SubContained; 8726 if (const CXXRecordDecl *ContainedRD = 8727 getContainedDynamicClass(FD->getType(), SubContained)) { 8728 IsContained = true; 8729 return ContainedRD; 8730 } 8731 } 8732 8733 return nullptr; 8734 } 8735 8736 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 8737 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 8738 if (Unary->getKind() == UETT_SizeOf) 8739 return Unary; 8740 return nullptr; 8741 } 8742 8743 /// If E is a sizeof expression, returns its argument expression, 8744 /// otherwise returns NULL. 8745 static const Expr *getSizeOfExprArg(const Expr *E) { 8746 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 8747 if (!SizeOf->isArgumentType()) 8748 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 8749 return nullptr; 8750 } 8751 8752 /// If E is a sizeof expression, returns its argument type. 8753 static QualType getSizeOfArgType(const Expr *E) { 8754 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 8755 return SizeOf->getTypeOfArgument(); 8756 return QualType(); 8757 } 8758 8759 namespace { 8760 8761 struct SearchNonTrivialToInitializeField 8762 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 8763 using Super = 8764 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 8765 8766 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 8767 8768 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 8769 SourceLocation SL) { 8770 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 8771 asDerived().visitArray(PDIK, AT, SL); 8772 return; 8773 } 8774 8775 Super::visitWithKind(PDIK, FT, SL); 8776 } 8777 8778 void visitARCStrong(QualType FT, SourceLocation SL) { 8779 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 8780 } 8781 void visitARCWeak(QualType FT, SourceLocation SL) { 8782 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 8783 } 8784 void visitStruct(QualType FT, SourceLocation SL) { 8785 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 8786 visit(FD->getType(), FD->getLocation()); 8787 } 8788 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 8789 const ArrayType *AT, SourceLocation SL) { 8790 visit(getContext().getBaseElementType(AT), SL); 8791 } 8792 void visitTrivial(QualType FT, SourceLocation SL) {} 8793 8794 static void diag(QualType RT, const Expr *E, Sema &S) { 8795 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 8796 } 8797 8798 ASTContext &getContext() { return S.getASTContext(); } 8799 8800 const Expr *E; 8801 Sema &S; 8802 }; 8803 8804 struct SearchNonTrivialToCopyField 8805 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 8806 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 8807 8808 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 8809 8810 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 8811 SourceLocation SL) { 8812 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 8813 asDerived().visitArray(PCK, AT, SL); 8814 return; 8815 } 8816 8817 Super::visitWithKind(PCK, FT, SL); 8818 } 8819 8820 void visitARCStrong(QualType FT, SourceLocation SL) { 8821 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 8822 } 8823 void visitARCWeak(QualType FT, SourceLocation SL) { 8824 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 8825 } 8826 void visitStruct(QualType FT, SourceLocation SL) { 8827 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 8828 visit(FD->getType(), FD->getLocation()); 8829 } 8830 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 8831 SourceLocation SL) { 8832 visit(getContext().getBaseElementType(AT), SL); 8833 } 8834 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 8835 SourceLocation SL) {} 8836 void visitTrivial(QualType FT, SourceLocation SL) {} 8837 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 8838 8839 static void diag(QualType RT, const Expr *E, Sema &S) { 8840 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 8841 } 8842 8843 ASTContext &getContext() { return S.getASTContext(); } 8844 8845 const Expr *E; 8846 Sema &S; 8847 }; 8848 8849 } 8850 8851 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 8852 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 8853 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 8854 8855 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 8856 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 8857 return false; 8858 8859 return doesExprLikelyComputeSize(BO->getLHS()) || 8860 doesExprLikelyComputeSize(BO->getRHS()); 8861 } 8862 8863 return getAsSizeOfExpr(SizeofExpr) != nullptr; 8864 } 8865 8866 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 8867 /// 8868 /// \code 8869 /// #define MACRO 0 8870 /// foo(MACRO); 8871 /// foo(0); 8872 /// \endcode 8873 /// 8874 /// This should return true for the first call to foo, but not for the second 8875 /// (regardless of whether foo is a macro or function). 8876 static bool isArgumentExpandedFromMacro(SourceManager &SM, 8877 SourceLocation CallLoc, 8878 SourceLocation ArgLoc) { 8879 if (!CallLoc.isMacroID()) 8880 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 8881 8882 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 8883 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 8884 } 8885 8886 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 8887 /// last two arguments transposed. 8888 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 8889 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 8890 return; 8891 8892 const Expr *SizeArg = 8893 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 8894 8895 auto isLiteralZero = [](const Expr *E) { 8896 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 8897 }; 8898 8899 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 8900 SourceLocation CallLoc = Call->getRParenLoc(); 8901 SourceManager &SM = S.getSourceManager(); 8902 if (isLiteralZero(SizeArg) && 8903 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 8904 8905 SourceLocation DiagLoc = SizeArg->getExprLoc(); 8906 8907 // Some platforms #define bzero to __builtin_memset. See if this is the 8908 // case, and if so, emit a better diagnostic. 8909 if (BId == Builtin::BIbzero || 8910 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 8911 CallLoc, SM, S.getLangOpts()) == "bzero")) { 8912 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 8913 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 8914 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 8915 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 8916 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 8917 } 8918 return; 8919 } 8920 8921 // If the second argument to a memset is a sizeof expression and the third 8922 // isn't, this is also likely an error. This should catch 8923 // 'memset(buf, sizeof(buf), 0xff)'. 8924 if (BId == Builtin::BImemset && 8925 doesExprLikelyComputeSize(Call->getArg(1)) && 8926 !doesExprLikelyComputeSize(Call->getArg(2))) { 8927 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 8928 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 8929 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 8930 return; 8931 } 8932 } 8933 8934 /// Check for dangerous or invalid arguments to memset(). 8935 /// 8936 /// This issues warnings on known problematic, dangerous or unspecified 8937 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 8938 /// function calls. 8939 /// 8940 /// \param Call The call expression to diagnose. 8941 void Sema::CheckMemaccessArguments(const CallExpr *Call, 8942 unsigned BId, 8943 IdentifierInfo *FnName) { 8944 assert(BId != 0); 8945 8946 // It is possible to have a non-standard definition of memset. Validate 8947 // we have enough arguments, and if not, abort further checking. 8948 unsigned ExpectedNumArgs = 8949 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 8950 if (Call->getNumArgs() < ExpectedNumArgs) 8951 return; 8952 8953 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 8954 BId == Builtin::BIstrndup ? 1 : 2); 8955 unsigned LenArg = 8956 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 8957 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 8958 8959 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 8960 Call->getBeginLoc(), Call->getRParenLoc())) 8961 return; 8962 8963 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 8964 CheckMemaccessSize(*this, BId, Call); 8965 8966 // We have special checking when the length is a sizeof expression. 8967 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 8968 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 8969 llvm::FoldingSetNodeID SizeOfArgID; 8970 8971 // Although widely used, 'bzero' is not a standard function. Be more strict 8972 // with the argument types before allowing diagnostics and only allow the 8973 // form bzero(ptr, sizeof(...)). 8974 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 8975 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 8976 return; 8977 8978 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 8979 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 8980 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 8981 8982 QualType DestTy = Dest->getType(); 8983 QualType PointeeTy; 8984 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 8985 PointeeTy = DestPtrTy->getPointeeType(); 8986 8987 // Never warn about void type pointers. This can be used to suppress 8988 // false positives. 8989 if (PointeeTy->isVoidType()) 8990 continue; 8991 8992 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 8993 // actually comparing the expressions for equality. Because computing the 8994 // expression IDs can be expensive, we only do this if the diagnostic is 8995 // enabled. 8996 if (SizeOfArg && 8997 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 8998 SizeOfArg->getExprLoc())) { 8999 // We only compute IDs for expressions if the warning is enabled, and 9000 // cache the sizeof arg's ID. 9001 if (SizeOfArgID == llvm::FoldingSetNodeID()) 9002 SizeOfArg->Profile(SizeOfArgID, Context, true); 9003 llvm::FoldingSetNodeID DestID; 9004 Dest->Profile(DestID, Context, true); 9005 if (DestID == SizeOfArgID) { 9006 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 9007 // over sizeof(src) as well. 9008 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 9009 StringRef ReadableName = FnName->getName(); 9010 9011 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 9012 if (UnaryOp->getOpcode() == UO_AddrOf) 9013 ActionIdx = 1; // If its an address-of operator, just remove it. 9014 if (!PointeeTy->isIncompleteType() && 9015 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 9016 ActionIdx = 2; // If the pointee's size is sizeof(char), 9017 // suggest an explicit length. 9018 9019 // If the function is defined as a builtin macro, do not show macro 9020 // expansion. 9021 SourceLocation SL = SizeOfArg->getExprLoc(); 9022 SourceRange DSR = Dest->getSourceRange(); 9023 SourceRange SSR = SizeOfArg->getSourceRange(); 9024 SourceManager &SM = getSourceManager(); 9025 9026 if (SM.isMacroArgExpansion(SL)) { 9027 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 9028 SL = SM.getSpellingLoc(SL); 9029 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 9030 SM.getSpellingLoc(DSR.getEnd())); 9031 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 9032 SM.getSpellingLoc(SSR.getEnd())); 9033 } 9034 9035 DiagRuntimeBehavior(SL, SizeOfArg, 9036 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 9037 << ReadableName 9038 << PointeeTy 9039 << DestTy 9040 << DSR 9041 << SSR); 9042 DiagRuntimeBehavior(SL, SizeOfArg, 9043 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 9044 << ActionIdx 9045 << SSR); 9046 9047 break; 9048 } 9049 } 9050 9051 // Also check for cases where the sizeof argument is the exact same 9052 // type as the memory argument, and where it points to a user-defined 9053 // record type. 9054 if (SizeOfArgTy != QualType()) { 9055 if (PointeeTy->isRecordType() && 9056 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 9057 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 9058 PDiag(diag::warn_sizeof_pointer_type_memaccess) 9059 << FnName << SizeOfArgTy << ArgIdx 9060 << PointeeTy << Dest->getSourceRange() 9061 << LenExpr->getSourceRange()); 9062 break; 9063 } 9064 } 9065 } else if (DestTy->isArrayType()) { 9066 PointeeTy = DestTy; 9067 } 9068 9069 if (PointeeTy == QualType()) 9070 continue; 9071 9072 // Always complain about dynamic classes. 9073 bool IsContained; 9074 if (const CXXRecordDecl *ContainedRD = 9075 getContainedDynamicClass(PointeeTy, IsContained)) { 9076 9077 unsigned OperationType = 0; 9078 // "overwritten" if we're warning about the destination for any call 9079 // but memcmp; otherwise a verb appropriate to the call. 9080 if (ArgIdx != 0 || BId == Builtin::BImemcmp) { 9081 if (BId == Builtin::BImemcpy) 9082 OperationType = 1; 9083 else if(BId == Builtin::BImemmove) 9084 OperationType = 2; 9085 else if (BId == Builtin::BImemcmp) 9086 OperationType = 3; 9087 } 9088 9089 DiagRuntimeBehavior( 9090 Dest->getExprLoc(), Dest, 9091 PDiag(diag::warn_dyn_class_memaccess) 9092 << (BId == Builtin::BImemcmp ? ArgIdx + 2 : ArgIdx) 9093 << FnName << IsContained << ContainedRD << OperationType 9094 << Call->getCallee()->getSourceRange()); 9095 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 9096 BId != Builtin::BImemset) 9097 DiagRuntimeBehavior( 9098 Dest->getExprLoc(), Dest, 9099 PDiag(diag::warn_arc_object_memaccess) 9100 << ArgIdx << FnName << PointeeTy 9101 << Call->getCallee()->getSourceRange()); 9102 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 9103 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 9104 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 9105 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9106 PDiag(diag::warn_cstruct_memaccess) 9107 << ArgIdx << FnName << PointeeTy << 0); 9108 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 9109 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 9110 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 9111 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9112 PDiag(diag::warn_cstruct_memaccess) 9113 << ArgIdx << FnName << PointeeTy << 1); 9114 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 9115 } else { 9116 continue; 9117 } 9118 } else 9119 continue; 9120 9121 DiagRuntimeBehavior( 9122 Dest->getExprLoc(), Dest, 9123 PDiag(diag::note_bad_memaccess_silence) 9124 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 9125 break; 9126 } 9127 } 9128 9129 // A little helper routine: ignore addition and subtraction of integer literals. 9130 // This intentionally does not ignore all integer constant expressions because 9131 // we don't want to remove sizeof(). 9132 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 9133 Ex = Ex->IgnoreParenCasts(); 9134 9135 while (true) { 9136 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 9137 if (!BO || !BO->isAdditiveOp()) 9138 break; 9139 9140 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 9141 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 9142 9143 if (isa<IntegerLiteral>(RHS)) 9144 Ex = LHS; 9145 else if (isa<IntegerLiteral>(LHS)) 9146 Ex = RHS; 9147 else 9148 break; 9149 } 9150 9151 return Ex; 9152 } 9153 9154 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 9155 ASTContext &Context) { 9156 // Only handle constant-sized or VLAs, but not flexible members. 9157 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 9158 // Only issue the FIXIT for arrays of size > 1. 9159 if (CAT->getSize().getSExtValue() <= 1) 9160 return false; 9161 } else if (!Ty->isVariableArrayType()) { 9162 return false; 9163 } 9164 return true; 9165 } 9166 9167 // Warn if the user has made the 'size' argument to strlcpy or strlcat 9168 // be the size of the source, instead of the destination. 9169 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 9170 IdentifierInfo *FnName) { 9171 9172 // Don't crash if the user has the wrong number of arguments 9173 unsigned NumArgs = Call->getNumArgs(); 9174 if ((NumArgs != 3) && (NumArgs != 4)) 9175 return; 9176 9177 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 9178 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 9179 const Expr *CompareWithSrc = nullptr; 9180 9181 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 9182 Call->getBeginLoc(), Call->getRParenLoc())) 9183 return; 9184 9185 // Look for 'strlcpy(dst, x, sizeof(x))' 9186 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 9187 CompareWithSrc = Ex; 9188 else { 9189 // Look for 'strlcpy(dst, x, strlen(x))' 9190 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 9191 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 9192 SizeCall->getNumArgs() == 1) 9193 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 9194 } 9195 } 9196 9197 if (!CompareWithSrc) 9198 return; 9199 9200 // Determine if the argument to sizeof/strlen is equal to the source 9201 // argument. In principle there's all kinds of things you could do 9202 // here, for instance creating an == expression and evaluating it with 9203 // EvaluateAsBooleanCondition, but this uses a more direct technique: 9204 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 9205 if (!SrcArgDRE) 9206 return; 9207 9208 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 9209 if (!CompareWithSrcDRE || 9210 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 9211 return; 9212 9213 const Expr *OriginalSizeArg = Call->getArg(2); 9214 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 9215 << OriginalSizeArg->getSourceRange() << FnName; 9216 9217 // Output a FIXIT hint if the destination is an array (rather than a 9218 // pointer to an array). This could be enhanced to handle some 9219 // pointers if we know the actual size, like if DstArg is 'array+2' 9220 // we could say 'sizeof(array)-2'. 9221 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 9222 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 9223 return; 9224 9225 SmallString<128> sizeString; 9226 llvm::raw_svector_ostream OS(sizeString); 9227 OS << "sizeof("; 9228 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9229 OS << ")"; 9230 9231 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 9232 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 9233 OS.str()); 9234 } 9235 9236 /// Check if two expressions refer to the same declaration. 9237 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 9238 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 9239 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 9240 return D1->getDecl() == D2->getDecl(); 9241 return false; 9242 } 9243 9244 static const Expr *getStrlenExprArg(const Expr *E) { 9245 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 9246 const FunctionDecl *FD = CE->getDirectCallee(); 9247 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 9248 return nullptr; 9249 return CE->getArg(0)->IgnoreParenCasts(); 9250 } 9251 return nullptr; 9252 } 9253 9254 // Warn on anti-patterns as the 'size' argument to strncat. 9255 // The correct size argument should look like following: 9256 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 9257 void Sema::CheckStrncatArguments(const CallExpr *CE, 9258 IdentifierInfo *FnName) { 9259 // Don't crash if the user has the wrong number of arguments. 9260 if (CE->getNumArgs() < 3) 9261 return; 9262 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 9263 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 9264 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 9265 9266 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 9267 CE->getRParenLoc())) 9268 return; 9269 9270 // Identify common expressions, which are wrongly used as the size argument 9271 // to strncat and may lead to buffer overflows. 9272 unsigned PatternType = 0; 9273 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 9274 // - sizeof(dst) 9275 if (referToTheSameDecl(SizeOfArg, DstArg)) 9276 PatternType = 1; 9277 // - sizeof(src) 9278 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 9279 PatternType = 2; 9280 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 9281 if (BE->getOpcode() == BO_Sub) { 9282 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 9283 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 9284 // - sizeof(dst) - strlen(dst) 9285 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 9286 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 9287 PatternType = 1; 9288 // - sizeof(src) - (anything) 9289 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 9290 PatternType = 2; 9291 } 9292 } 9293 9294 if (PatternType == 0) 9295 return; 9296 9297 // Generate the diagnostic. 9298 SourceLocation SL = LenArg->getBeginLoc(); 9299 SourceRange SR = LenArg->getSourceRange(); 9300 SourceManager &SM = getSourceManager(); 9301 9302 // If the function is defined as a builtin macro, do not show macro expansion. 9303 if (SM.isMacroArgExpansion(SL)) { 9304 SL = SM.getSpellingLoc(SL); 9305 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 9306 SM.getSpellingLoc(SR.getEnd())); 9307 } 9308 9309 // Check if the destination is an array (rather than a pointer to an array). 9310 QualType DstTy = DstArg->getType(); 9311 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 9312 Context); 9313 if (!isKnownSizeArray) { 9314 if (PatternType == 1) 9315 Diag(SL, diag::warn_strncat_wrong_size) << SR; 9316 else 9317 Diag(SL, diag::warn_strncat_src_size) << SR; 9318 return; 9319 } 9320 9321 if (PatternType == 1) 9322 Diag(SL, diag::warn_strncat_large_size) << SR; 9323 else 9324 Diag(SL, diag::warn_strncat_src_size) << SR; 9325 9326 SmallString<128> sizeString; 9327 llvm::raw_svector_ostream OS(sizeString); 9328 OS << "sizeof("; 9329 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9330 OS << ") - "; 9331 OS << "strlen("; 9332 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9333 OS << ") - 1"; 9334 9335 Diag(SL, diag::note_strncat_wrong_size) 9336 << FixItHint::CreateReplacement(SR, OS.str()); 9337 } 9338 9339 void 9340 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 9341 SourceLocation ReturnLoc, 9342 bool isObjCMethod, 9343 const AttrVec *Attrs, 9344 const FunctionDecl *FD) { 9345 // Check if the return value is null but should not be. 9346 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 9347 (!isObjCMethod && isNonNullType(Context, lhsType))) && 9348 CheckNonNullExpr(*this, RetValExp)) 9349 Diag(ReturnLoc, diag::warn_null_ret) 9350 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 9351 9352 // C++11 [basic.stc.dynamic.allocation]p4: 9353 // If an allocation function declared with a non-throwing 9354 // exception-specification fails to allocate storage, it shall return 9355 // a null pointer. Any other allocation function that fails to allocate 9356 // storage shall indicate failure only by throwing an exception [...] 9357 if (FD) { 9358 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 9359 if (Op == OO_New || Op == OO_Array_New) { 9360 const FunctionProtoType *Proto 9361 = FD->getType()->castAs<FunctionProtoType>(); 9362 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 9363 CheckNonNullExpr(*this, RetValExp)) 9364 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 9365 << FD << getLangOpts().CPlusPlus11; 9366 } 9367 } 9368 } 9369 9370 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 9371 9372 /// Check for comparisons of floating point operands using != and ==. 9373 /// Issue a warning if these are no self-comparisons, as they are not likely 9374 /// to do what the programmer intended. 9375 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { 9376 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 9377 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 9378 9379 // Special case: check for x == x (which is OK). 9380 // Do not emit warnings for such cases. 9381 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 9382 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 9383 if (DRL->getDecl() == DRR->getDecl()) 9384 return; 9385 9386 // Special case: check for comparisons against literals that can be exactly 9387 // represented by APFloat. In such cases, do not emit a warning. This 9388 // is a heuristic: often comparison against such literals are used to 9389 // detect if a value in a variable has not changed. This clearly can 9390 // lead to false negatives. 9391 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 9392 if (FLL->isExact()) 9393 return; 9394 } else 9395 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 9396 if (FLR->isExact()) 9397 return; 9398 9399 // Check for comparisons with builtin types. 9400 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 9401 if (CL->getBuiltinCallee()) 9402 return; 9403 9404 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 9405 if (CR->getBuiltinCallee()) 9406 return; 9407 9408 // Emit the diagnostic. 9409 Diag(Loc, diag::warn_floatingpoint_eq) 9410 << LHS->getSourceRange() << RHS->getSourceRange(); 9411 } 9412 9413 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 9414 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 9415 9416 namespace { 9417 9418 /// Structure recording the 'active' range of an integer-valued 9419 /// expression. 9420 struct IntRange { 9421 /// The number of bits active in the int. 9422 unsigned Width; 9423 9424 /// True if the int is known not to have negative values. 9425 bool NonNegative; 9426 9427 IntRange(unsigned Width, bool NonNegative) 9428 : Width(Width), NonNegative(NonNegative) {} 9429 9430 /// Returns the range of the bool type. 9431 static IntRange forBoolType() { 9432 return IntRange(1, true); 9433 } 9434 9435 /// Returns the range of an opaque value of the given integral type. 9436 static IntRange forValueOfType(ASTContext &C, QualType T) { 9437 return forValueOfCanonicalType(C, 9438 T->getCanonicalTypeInternal().getTypePtr()); 9439 } 9440 9441 /// Returns the range of an opaque value of a canonical integral type. 9442 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 9443 assert(T->isCanonicalUnqualified()); 9444 9445 if (const VectorType *VT = dyn_cast<VectorType>(T)) 9446 T = VT->getElementType().getTypePtr(); 9447 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 9448 T = CT->getElementType().getTypePtr(); 9449 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 9450 T = AT->getValueType().getTypePtr(); 9451 9452 if (!C.getLangOpts().CPlusPlus) { 9453 // For enum types in C code, use the underlying datatype. 9454 if (const EnumType *ET = dyn_cast<EnumType>(T)) 9455 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 9456 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 9457 // For enum types in C++, use the known bit width of the enumerators. 9458 EnumDecl *Enum = ET->getDecl(); 9459 // In C++11, enums can have a fixed underlying type. Use this type to 9460 // compute the range. 9461 if (Enum->isFixed()) { 9462 return IntRange(C.getIntWidth(QualType(T, 0)), 9463 !ET->isSignedIntegerOrEnumerationType()); 9464 } 9465 9466 unsigned NumPositive = Enum->getNumPositiveBits(); 9467 unsigned NumNegative = Enum->getNumNegativeBits(); 9468 9469 if (NumNegative == 0) 9470 return IntRange(NumPositive, true/*NonNegative*/); 9471 else 9472 return IntRange(std::max(NumPositive + 1, NumNegative), 9473 false/*NonNegative*/); 9474 } 9475 9476 const BuiltinType *BT = cast<BuiltinType>(T); 9477 assert(BT->isInteger()); 9478 9479 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 9480 } 9481 9482 /// Returns the "target" range of a canonical integral type, i.e. 9483 /// the range of values expressible in the type. 9484 /// 9485 /// This matches forValueOfCanonicalType except that enums have the 9486 /// full range of their type, not the range of their enumerators. 9487 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 9488 assert(T->isCanonicalUnqualified()); 9489 9490 if (const VectorType *VT = dyn_cast<VectorType>(T)) 9491 T = VT->getElementType().getTypePtr(); 9492 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 9493 T = CT->getElementType().getTypePtr(); 9494 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 9495 T = AT->getValueType().getTypePtr(); 9496 if (const EnumType *ET = dyn_cast<EnumType>(T)) 9497 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 9498 9499 const BuiltinType *BT = cast<BuiltinType>(T); 9500 assert(BT->isInteger()); 9501 9502 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 9503 } 9504 9505 /// Returns the supremum of two ranges: i.e. their conservative merge. 9506 static IntRange join(IntRange L, IntRange R) { 9507 return IntRange(std::max(L.Width, R.Width), 9508 L.NonNegative && R.NonNegative); 9509 } 9510 9511 /// Returns the infinum of two ranges: i.e. their aggressive merge. 9512 static IntRange meet(IntRange L, IntRange R) { 9513 return IntRange(std::min(L.Width, R.Width), 9514 L.NonNegative || R.NonNegative); 9515 } 9516 }; 9517 9518 } // namespace 9519 9520 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 9521 unsigned MaxWidth) { 9522 if (value.isSigned() && value.isNegative()) 9523 return IntRange(value.getMinSignedBits(), false); 9524 9525 if (value.getBitWidth() > MaxWidth) 9526 value = value.trunc(MaxWidth); 9527 9528 // isNonNegative() just checks the sign bit without considering 9529 // signedness. 9530 return IntRange(value.getActiveBits(), true); 9531 } 9532 9533 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 9534 unsigned MaxWidth) { 9535 if (result.isInt()) 9536 return GetValueRange(C, result.getInt(), MaxWidth); 9537 9538 if (result.isVector()) { 9539 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 9540 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 9541 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 9542 R = IntRange::join(R, El); 9543 } 9544 return R; 9545 } 9546 9547 if (result.isComplexInt()) { 9548 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 9549 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 9550 return IntRange::join(R, I); 9551 } 9552 9553 // This can happen with lossless casts to intptr_t of "based" lvalues. 9554 // Assume it might use arbitrary bits. 9555 // FIXME: The only reason we need to pass the type in here is to get 9556 // the sign right on this one case. It would be nice if APValue 9557 // preserved this. 9558 assert(result.isLValue() || result.isAddrLabelDiff()); 9559 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 9560 } 9561 9562 static QualType GetExprType(const Expr *E) { 9563 QualType Ty = E->getType(); 9564 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 9565 Ty = AtomicRHS->getValueType(); 9566 return Ty; 9567 } 9568 9569 /// Pseudo-evaluate the given integer expression, estimating the 9570 /// range of values it might take. 9571 /// 9572 /// \param MaxWidth - the width to which the value will be truncated 9573 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) { 9574 E = E->IgnoreParens(); 9575 9576 // Try a full evaluation first. 9577 Expr::EvalResult result; 9578 if (E->EvaluateAsRValue(result, C)) 9579 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 9580 9581 // I think we only want to look through implicit casts here; if the 9582 // user has an explicit widening cast, we should treat the value as 9583 // being of the new, wider type. 9584 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 9585 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 9586 return GetExprRange(C, CE->getSubExpr(), MaxWidth); 9587 9588 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 9589 9590 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 9591 CE->getCastKind() == CK_BooleanToSignedIntegral; 9592 9593 // Assume that non-integer casts can span the full range of the type. 9594 if (!isIntegerCast) 9595 return OutputTypeRange; 9596 9597 IntRange SubRange 9598 = GetExprRange(C, CE->getSubExpr(), 9599 std::min(MaxWidth, OutputTypeRange.Width)); 9600 9601 // Bail out if the subexpr's range is as wide as the cast type. 9602 if (SubRange.Width >= OutputTypeRange.Width) 9603 return OutputTypeRange; 9604 9605 // Otherwise, we take the smaller width, and we're non-negative if 9606 // either the output type or the subexpr is. 9607 return IntRange(SubRange.Width, 9608 SubRange.NonNegative || OutputTypeRange.NonNegative); 9609 } 9610 9611 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 9612 // If we can fold the condition, just take that operand. 9613 bool CondResult; 9614 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 9615 return GetExprRange(C, CondResult ? CO->getTrueExpr() 9616 : CO->getFalseExpr(), 9617 MaxWidth); 9618 9619 // Otherwise, conservatively merge. 9620 IntRange L = GetExprRange(C, CO->getTrueExpr(), MaxWidth); 9621 IntRange R = GetExprRange(C, CO->getFalseExpr(), MaxWidth); 9622 return IntRange::join(L, R); 9623 } 9624 9625 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 9626 switch (BO->getOpcode()) { 9627 case BO_Cmp: 9628 llvm_unreachable("builtin <=> should have class type"); 9629 9630 // Boolean-valued operations are single-bit and positive. 9631 case BO_LAnd: 9632 case BO_LOr: 9633 case BO_LT: 9634 case BO_GT: 9635 case BO_LE: 9636 case BO_GE: 9637 case BO_EQ: 9638 case BO_NE: 9639 return IntRange::forBoolType(); 9640 9641 // The type of the assignments is the type of the LHS, so the RHS 9642 // is not necessarily the same type. 9643 case BO_MulAssign: 9644 case BO_DivAssign: 9645 case BO_RemAssign: 9646 case BO_AddAssign: 9647 case BO_SubAssign: 9648 case BO_XorAssign: 9649 case BO_OrAssign: 9650 // TODO: bitfields? 9651 return IntRange::forValueOfType(C, GetExprType(E)); 9652 9653 // Simple assignments just pass through the RHS, which will have 9654 // been coerced to the LHS type. 9655 case BO_Assign: 9656 // TODO: bitfields? 9657 return GetExprRange(C, BO->getRHS(), MaxWidth); 9658 9659 // Operations with opaque sources are black-listed. 9660 case BO_PtrMemD: 9661 case BO_PtrMemI: 9662 return IntRange::forValueOfType(C, GetExprType(E)); 9663 9664 // Bitwise-and uses the *infinum* of the two source ranges. 9665 case BO_And: 9666 case BO_AndAssign: 9667 return IntRange::meet(GetExprRange(C, BO->getLHS(), MaxWidth), 9668 GetExprRange(C, BO->getRHS(), MaxWidth)); 9669 9670 // Left shift gets black-listed based on a judgement call. 9671 case BO_Shl: 9672 // ...except that we want to treat '1 << (blah)' as logically 9673 // positive. It's an important idiom. 9674 if (IntegerLiteral *I 9675 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 9676 if (I->getValue() == 1) { 9677 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 9678 return IntRange(R.Width, /*NonNegative*/ true); 9679 } 9680 } 9681 LLVM_FALLTHROUGH; 9682 9683 case BO_ShlAssign: 9684 return IntRange::forValueOfType(C, GetExprType(E)); 9685 9686 // Right shift by a constant can narrow its left argument. 9687 case BO_Shr: 9688 case BO_ShrAssign: { 9689 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth); 9690 9691 // If the shift amount is a positive constant, drop the width by 9692 // that much. 9693 llvm::APSInt shift; 9694 if (BO->getRHS()->isIntegerConstantExpr(shift, C) && 9695 shift.isNonNegative()) { 9696 unsigned zext = shift.getZExtValue(); 9697 if (zext >= L.Width) 9698 L.Width = (L.NonNegative ? 0 : 1); 9699 else 9700 L.Width -= zext; 9701 } 9702 9703 return L; 9704 } 9705 9706 // Comma acts as its right operand. 9707 case BO_Comma: 9708 return GetExprRange(C, BO->getRHS(), MaxWidth); 9709 9710 // Black-list pointer subtractions. 9711 case BO_Sub: 9712 if (BO->getLHS()->getType()->isPointerType()) 9713 return IntRange::forValueOfType(C, GetExprType(E)); 9714 break; 9715 9716 // The width of a division result is mostly determined by the size 9717 // of the LHS. 9718 case BO_Div: { 9719 // Don't 'pre-truncate' the operands. 9720 unsigned opWidth = C.getIntWidth(GetExprType(E)); 9721 IntRange L = GetExprRange(C, BO->getLHS(), opWidth); 9722 9723 // If the divisor is constant, use that. 9724 llvm::APSInt divisor; 9725 if (BO->getRHS()->isIntegerConstantExpr(divisor, C)) { 9726 unsigned log2 = divisor.logBase2(); // floor(log_2(divisor)) 9727 if (log2 >= L.Width) 9728 L.Width = (L.NonNegative ? 0 : 1); 9729 else 9730 L.Width = std::min(L.Width - log2, MaxWidth); 9731 return L; 9732 } 9733 9734 // Otherwise, just use the LHS's width. 9735 IntRange R = GetExprRange(C, BO->getRHS(), opWidth); 9736 return IntRange(L.Width, L.NonNegative && R.NonNegative); 9737 } 9738 9739 // The result of a remainder can't be larger than the result of 9740 // either side. 9741 case BO_Rem: { 9742 // Don't 'pre-truncate' the operands. 9743 unsigned opWidth = C.getIntWidth(GetExprType(E)); 9744 IntRange L = GetExprRange(C, BO->getLHS(), opWidth); 9745 IntRange R = GetExprRange(C, BO->getRHS(), opWidth); 9746 9747 IntRange meet = IntRange::meet(L, R); 9748 meet.Width = std::min(meet.Width, MaxWidth); 9749 return meet; 9750 } 9751 9752 // The default behavior is okay for these. 9753 case BO_Mul: 9754 case BO_Add: 9755 case BO_Xor: 9756 case BO_Or: 9757 break; 9758 } 9759 9760 // The default case is to treat the operation as if it were closed 9761 // on the narrowest type that encompasses both operands. 9762 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth); 9763 IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth); 9764 return IntRange::join(L, R); 9765 } 9766 9767 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 9768 switch (UO->getOpcode()) { 9769 // Boolean-valued operations are white-listed. 9770 case UO_LNot: 9771 return IntRange::forBoolType(); 9772 9773 // Operations with opaque sources are black-listed. 9774 case UO_Deref: 9775 case UO_AddrOf: // should be impossible 9776 return IntRange::forValueOfType(C, GetExprType(E)); 9777 9778 default: 9779 return GetExprRange(C, UO->getSubExpr(), MaxWidth); 9780 } 9781 } 9782 9783 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 9784 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth); 9785 9786 if (const auto *BitField = E->getSourceBitField()) 9787 return IntRange(BitField->getBitWidthValue(C), 9788 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 9789 9790 return IntRange::forValueOfType(C, GetExprType(E)); 9791 } 9792 9793 static IntRange GetExprRange(ASTContext &C, const Expr *E) { 9794 return GetExprRange(C, E, C.getIntWidth(GetExprType(E))); 9795 } 9796 9797 /// Checks whether the given value, which currently has the given 9798 /// source semantics, has the same value when coerced through the 9799 /// target semantics. 9800 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 9801 const llvm::fltSemantics &Src, 9802 const llvm::fltSemantics &Tgt) { 9803 llvm::APFloat truncated = value; 9804 9805 bool ignored; 9806 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 9807 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 9808 9809 return truncated.bitwiseIsEqual(value); 9810 } 9811 9812 /// Checks whether the given value, which currently has the given 9813 /// source semantics, has the same value when coerced through the 9814 /// target semantics. 9815 /// 9816 /// The value might be a vector of floats (or a complex number). 9817 static bool IsSameFloatAfterCast(const APValue &value, 9818 const llvm::fltSemantics &Src, 9819 const llvm::fltSemantics &Tgt) { 9820 if (value.isFloat()) 9821 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 9822 9823 if (value.isVector()) { 9824 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 9825 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 9826 return false; 9827 return true; 9828 } 9829 9830 assert(value.isComplexFloat()); 9831 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 9832 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 9833 } 9834 9835 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC); 9836 9837 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 9838 // Suppress cases where we are comparing against an enum constant. 9839 if (const DeclRefExpr *DR = 9840 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 9841 if (isa<EnumConstantDecl>(DR->getDecl())) 9842 return true; 9843 9844 // Suppress cases where the '0' value is expanded from a macro. 9845 if (E->getBeginLoc().isMacroID()) 9846 return true; 9847 9848 return false; 9849 } 9850 9851 static bool isKnownToHaveUnsignedValue(Expr *E) { 9852 return E->getType()->isIntegerType() && 9853 (!E->getType()->isSignedIntegerType() || 9854 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 9855 } 9856 9857 namespace { 9858 /// The promoted range of values of a type. In general this has the 9859 /// following structure: 9860 /// 9861 /// |-----------| . . . |-----------| 9862 /// ^ ^ ^ ^ 9863 /// Min HoleMin HoleMax Max 9864 /// 9865 /// ... where there is only a hole if a signed type is promoted to unsigned 9866 /// (in which case Min and Max are the smallest and largest representable 9867 /// values). 9868 struct PromotedRange { 9869 // Min, or HoleMax if there is a hole. 9870 llvm::APSInt PromotedMin; 9871 // Max, or HoleMin if there is a hole. 9872 llvm::APSInt PromotedMax; 9873 9874 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 9875 if (R.Width == 0) 9876 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 9877 else if (R.Width >= BitWidth && !Unsigned) { 9878 // Promotion made the type *narrower*. This happens when promoting 9879 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 9880 // Treat all values of 'signed int' as being in range for now. 9881 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 9882 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 9883 } else { 9884 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 9885 .extOrTrunc(BitWidth); 9886 PromotedMin.setIsUnsigned(Unsigned); 9887 9888 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 9889 .extOrTrunc(BitWidth); 9890 PromotedMax.setIsUnsigned(Unsigned); 9891 } 9892 } 9893 9894 // Determine whether this range is contiguous (has no hole). 9895 bool isContiguous() const { return PromotedMin <= PromotedMax; } 9896 9897 // Where a constant value is within the range. 9898 enum ComparisonResult { 9899 LT = 0x1, 9900 LE = 0x2, 9901 GT = 0x4, 9902 GE = 0x8, 9903 EQ = 0x10, 9904 NE = 0x20, 9905 InRangeFlag = 0x40, 9906 9907 Less = LE | LT | NE, 9908 Min = LE | InRangeFlag, 9909 InRange = InRangeFlag, 9910 Max = GE | InRangeFlag, 9911 Greater = GE | GT | NE, 9912 9913 OnlyValue = LE | GE | EQ | InRangeFlag, 9914 InHole = NE 9915 }; 9916 9917 ComparisonResult compare(const llvm::APSInt &Value) const { 9918 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 9919 Value.isUnsigned() == PromotedMin.isUnsigned()); 9920 if (!isContiguous()) { 9921 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 9922 if (Value.isMinValue()) return Min; 9923 if (Value.isMaxValue()) return Max; 9924 if (Value >= PromotedMin) return InRange; 9925 if (Value <= PromotedMax) return InRange; 9926 return InHole; 9927 } 9928 9929 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 9930 case -1: return Less; 9931 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 9932 case 1: 9933 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 9934 case -1: return InRange; 9935 case 0: return Max; 9936 case 1: return Greater; 9937 } 9938 } 9939 9940 llvm_unreachable("impossible compare result"); 9941 } 9942 9943 static llvm::Optional<StringRef> 9944 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 9945 if (Op == BO_Cmp) { 9946 ComparisonResult LTFlag = LT, GTFlag = GT; 9947 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 9948 9949 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 9950 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 9951 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 9952 return llvm::None; 9953 } 9954 9955 ComparisonResult TrueFlag, FalseFlag; 9956 if (Op == BO_EQ) { 9957 TrueFlag = EQ; 9958 FalseFlag = NE; 9959 } else if (Op == BO_NE) { 9960 TrueFlag = NE; 9961 FalseFlag = EQ; 9962 } else { 9963 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 9964 TrueFlag = LT; 9965 FalseFlag = GE; 9966 } else { 9967 TrueFlag = GT; 9968 FalseFlag = LE; 9969 } 9970 if (Op == BO_GE || Op == BO_LE) 9971 std::swap(TrueFlag, FalseFlag); 9972 } 9973 if (R & TrueFlag) 9974 return StringRef("true"); 9975 if (R & FalseFlag) 9976 return StringRef("false"); 9977 return llvm::None; 9978 } 9979 }; 9980 } 9981 9982 static bool HasEnumType(Expr *E) { 9983 // Strip off implicit integral promotions. 9984 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 9985 if (ICE->getCastKind() != CK_IntegralCast && 9986 ICE->getCastKind() != CK_NoOp) 9987 break; 9988 E = ICE->getSubExpr(); 9989 } 9990 9991 return E->getType()->isEnumeralType(); 9992 } 9993 9994 static int classifyConstantValue(Expr *Constant) { 9995 // The values of this enumeration are used in the diagnostics 9996 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 9997 enum ConstantValueKind { 9998 Miscellaneous = 0, 9999 LiteralTrue, 10000 LiteralFalse 10001 }; 10002 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 10003 return BL->getValue() ? ConstantValueKind::LiteralTrue 10004 : ConstantValueKind::LiteralFalse; 10005 return ConstantValueKind::Miscellaneous; 10006 } 10007 10008 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 10009 Expr *Constant, Expr *Other, 10010 const llvm::APSInt &Value, 10011 bool RhsConstant) { 10012 if (S.inTemplateInstantiation()) 10013 return false; 10014 10015 Expr *OriginalOther = Other; 10016 10017 Constant = Constant->IgnoreParenImpCasts(); 10018 Other = Other->IgnoreParenImpCasts(); 10019 10020 // Suppress warnings on tautological comparisons between values of the same 10021 // enumeration type. There are only two ways we could warn on this: 10022 // - If the constant is outside the range of representable values of 10023 // the enumeration. In such a case, we should warn about the cast 10024 // to enumeration type, not about the comparison. 10025 // - If the constant is the maximum / minimum in-range value. For an 10026 // enumeratin type, such comparisons can be meaningful and useful. 10027 if (Constant->getType()->isEnumeralType() && 10028 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 10029 return false; 10030 10031 // TODO: Investigate using GetExprRange() to get tighter bounds 10032 // on the bit ranges. 10033 QualType OtherT = Other->getType(); 10034 if (const auto *AT = OtherT->getAs<AtomicType>()) 10035 OtherT = AT->getValueType(); 10036 IntRange OtherRange = IntRange::forValueOfType(S.Context, OtherT); 10037 10038 // Whether we're treating Other as being a bool because of the form of 10039 // expression despite it having another type (typically 'int' in C). 10040 bool OtherIsBooleanDespiteType = 10041 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 10042 if (OtherIsBooleanDespiteType) 10043 OtherRange = IntRange::forBoolType(); 10044 10045 // Determine the promoted range of the other type and see if a comparison of 10046 // the constant against that range is tautological. 10047 PromotedRange OtherPromotedRange(OtherRange, Value.getBitWidth(), 10048 Value.isUnsigned()); 10049 auto Cmp = OtherPromotedRange.compare(Value); 10050 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 10051 if (!Result) 10052 return false; 10053 10054 // Suppress the diagnostic for an in-range comparison if the constant comes 10055 // from a macro or enumerator. We don't want to diagnose 10056 // 10057 // some_long_value <= INT_MAX 10058 // 10059 // when sizeof(int) == sizeof(long). 10060 bool InRange = Cmp & PromotedRange::InRangeFlag; 10061 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 10062 return false; 10063 10064 // If this is a comparison to an enum constant, include that 10065 // constant in the diagnostic. 10066 const EnumConstantDecl *ED = nullptr; 10067 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 10068 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 10069 10070 // Should be enough for uint128 (39 decimal digits) 10071 SmallString<64> PrettySourceValue; 10072 llvm::raw_svector_ostream OS(PrettySourceValue); 10073 if (ED) 10074 OS << '\'' << *ED << "' (" << Value << ")"; 10075 else 10076 OS << Value; 10077 10078 // FIXME: We use a somewhat different formatting for the in-range cases and 10079 // cases involving boolean values for historical reasons. We should pick a 10080 // consistent way of presenting these diagnostics. 10081 if (!InRange || Other->isKnownToHaveBooleanValue()) { 10082 S.DiagRuntimeBehavior( 10083 E->getOperatorLoc(), E, 10084 S.PDiag(!InRange ? diag::warn_out_of_range_compare 10085 : diag::warn_tautological_bool_compare) 10086 << OS.str() << classifyConstantValue(Constant) 10087 << OtherT << OtherIsBooleanDespiteType << *Result 10088 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 10089 } else { 10090 unsigned Diag = (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 10091 ? (HasEnumType(OriginalOther) 10092 ? diag::warn_unsigned_enum_always_true_comparison 10093 : diag::warn_unsigned_always_true_comparison) 10094 : diag::warn_tautological_constant_compare; 10095 10096 S.Diag(E->getOperatorLoc(), Diag) 10097 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 10098 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 10099 } 10100 10101 return true; 10102 } 10103 10104 /// Analyze the operands of the given comparison. Implements the 10105 /// fallback case from AnalyzeComparison. 10106 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 10107 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 10108 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 10109 } 10110 10111 /// Implements -Wsign-compare. 10112 /// 10113 /// \param E the binary operator to check for warnings 10114 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 10115 // The type the comparison is being performed in. 10116 QualType T = E->getLHS()->getType(); 10117 10118 // Only analyze comparison operators where both sides have been converted to 10119 // the same type. 10120 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 10121 return AnalyzeImpConvsInComparison(S, E); 10122 10123 // Don't analyze value-dependent comparisons directly. 10124 if (E->isValueDependent()) 10125 return AnalyzeImpConvsInComparison(S, E); 10126 10127 Expr *LHS = E->getLHS(); 10128 Expr *RHS = E->getRHS(); 10129 10130 if (T->isIntegralType(S.Context)) { 10131 llvm::APSInt RHSValue; 10132 llvm::APSInt LHSValue; 10133 10134 bool IsRHSIntegralLiteral = RHS->isIntegerConstantExpr(RHSValue, S.Context); 10135 bool IsLHSIntegralLiteral = LHS->isIntegerConstantExpr(LHSValue, S.Context); 10136 10137 // We don't care about expressions whose result is a constant. 10138 if (IsRHSIntegralLiteral && IsLHSIntegralLiteral) 10139 return AnalyzeImpConvsInComparison(S, E); 10140 10141 // We only care about expressions where just one side is literal 10142 if (IsRHSIntegralLiteral ^ IsLHSIntegralLiteral) { 10143 // Is the constant on the RHS or LHS? 10144 const bool RhsConstant = IsRHSIntegralLiteral; 10145 Expr *Const = RhsConstant ? RHS : LHS; 10146 Expr *Other = RhsConstant ? LHS : RHS; 10147 const llvm::APSInt &Value = RhsConstant ? RHSValue : LHSValue; 10148 10149 // Check whether an integer constant comparison results in a value 10150 // of 'true' or 'false'. 10151 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 10152 return AnalyzeImpConvsInComparison(S, E); 10153 } 10154 } 10155 10156 if (!T->hasUnsignedIntegerRepresentation()) { 10157 // We don't do anything special if this isn't an unsigned integral 10158 // comparison: we're only interested in integral comparisons, and 10159 // signed comparisons only happen in cases we don't care to warn about. 10160 return AnalyzeImpConvsInComparison(S, E); 10161 } 10162 10163 LHS = LHS->IgnoreParenImpCasts(); 10164 RHS = RHS->IgnoreParenImpCasts(); 10165 10166 if (!S.getLangOpts().CPlusPlus) { 10167 // Avoid warning about comparison of integers with different signs when 10168 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 10169 // the type of `E`. 10170 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 10171 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 10172 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 10173 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 10174 } 10175 10176 // Check to see if one of the (unmodified) operands is of different 10177 // signedness. 10178 Expr *signedOperand, *unsignedOperand; 10179 if (LHS->getType()->hasSignedIntegerRepresentation()) { 10180 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 10181 "unsigned comparison between two signed integer expressions?"); 10182 signedOperand = LHS; 10183 unsignedOperand = RHS; 10184 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 10185 signedOperand = RHS; 10186 unsignedOperand = LHS; 10187 } else { 10188 return AnalyzeImpConvsInComparison(S, E); 10189 } 10190 10191 // Otherwise, calculate the effective range of the signed operand. 10192 IntRange signedRange = GetExprRange(S.Context, signedOperand); 10193 10194 // Go ahead and analyze implicit conversions in the operands. Note 10195 // that we skip the implicit conversions on both sides. 10196 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 10197 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 10198 10199 // If the signed range is non-negative, -Wsign-compare won't fire. 10200 if (signedRange.NonNegative) 10201 return; 10202 10203 // For (in)equality comparisons, if the unsigned operand is a 10204 // constant which cannot collide with a overflowed signed operand, 10205 // then reinterpreting the signed operand as unsigned will not 10206 // change the result of the comparison. 10207 if (E->isEqualityOp()) { 10208 unsigned comparisonWidth = S.Context.getIntWidth(T); 10209 IntRange unsignedRange = GetExprRange(S.Context, unsignedOperand); 10210 10211 // We should never be unable to prove that the unsigned operand is 10212 // non-negative. 10213 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 10214 10215 if (unsignedRange.Width < comparisonWidth) 10216 return; 10217 } 10218 10219 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 10220 S.PDiag(diag::warn_mixed_sign_comparison) 10221 << LHS->getType() << RHS->getType() 10222 << LHS->getSourceRange() << RHS->getSourceRange()); 10223 } 10224 10225 /// Analyzes an attempt to assign the given value to a bitfield. 10226 /// 10227 /// Returns true if there was something fishy about the attempt. 10228 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 10229 SourceLocation InitLoc) { 10230 assert(Bitfield->isBitField()); 10231 if (Bitfield->isInvalidDecl()) 10232 return false; 10233 10234 // White-list bool bitfields. 10235 QualType BitfieldType = Bitfield->getType(); 10236 if (BitfieldType->isBooleanType()) 10237 return false; 10238 10239 if (BitfieldType->isEnumeralType()) { 10240 EnumDecl *BitfieldEnumDecl = BitfieldType->getAs<EnumType>()->getDecl(); 10241 // If the underlying enum type was not explicitly specified as an unsigned 10242 // type and the enum contain only positive values, MSVC++ will cause an 10243 // inconsistency by storing this as a signed type. 10244 if (S.getLangOpts().CPlusPlus11 && 10245 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 10246 BitfieldEnumDecl->getNumPositiveBits() > 0 && 10247 BitfieldEnumDecl->getNumNegativeBits() == 0) { 10248 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 10249 << BitfieldEnumDecl->getNameAsString(); 10250 } 10251 } 10252 10253 if (Bitfield->getType()->isBooleanType()) 10254 return false; 10255 10256 // Ignore value- or type-dependent expressions. 10257 if (Bitfield->getBitWidth()->isValueDependent() || 10258 Bitfield->getBitWidth()->isTypeDependent() || 10259 Init->isValueDependent() || 10260 Init->isTypeDependent()) 10261 return false; 10262 10263 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 10264 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 10265 10266 llvm::APSInt Value; 10267 if (!OriginalInit->EvaluateAsInt(Value, S.Context, 10268 Expr::SE_AllowSideEffects)) { 10269 // The RHS is not constant. If the RHS has an enum type, make sure the 10270 // bitfield is wide enough to hold all the values of the enum without 10271 // truncation. 10272 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 10273 EnumDecl *ED = EnumTy->getDecl(); 10274 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 10275 10276 // Enum types are implicitly signed on Windows, so check if there are any 10277 // negative enumerators to see if the enum was intended to be signed or 10278 // not. 10279 bool SignedEnum = ED->getNumNegativeBits() > 0; 10280 10281 // Check for surprising sign changes when assigning enum values to a 10282 // bitfield of different signedness. If the bitfield is signed and we 10283 // have exactly the right number of bits to store this unsigned enum, 10284 // suggest changing the enum to an unsigned type. This typically happens 10285 // on Windows where unfixed enums always use an underlying type of 'int'. 10286 unsigned DiagID = 0; 10287 if (SignedEnum && !SignedBitfield) { 10288 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 10289 } else if (SignedBitfield && !SignedEnum && 10290 ED->getNumPositiveBits() == FieldWidth) { 10291 DiagID = diag::warn_signed_bitfield_enum_conversion; 10292 } 10293 10294 if (DiagID) { 10295 S.Diag(InitLoc, DiagID) << Bitfield << ED; 10296 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 10297 SourceRange TypeRange = 10298 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 10299 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 10300 << SignedEnum << TypeRange; 10301 } 10302 10303 // Compute the required bitwidth. If the enum has negative values, we need 10304 // one more bit than the normal number of positive bits to represent the 10305 // sign bit. 10306 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 10307 ED->getNumNegativeBits()) 10308 : ED->getNumPositiveBits(); 10309 10310 // Check the bitwidth. 10311 if (BitsNeeded > FieldWidth) { 10312 Expr *WidthExpr = Bitfield->getBitWidth(); 10313 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 10314 << Bitfield << ED; 10315 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 10316 << BitsNeeded << ED << WidthExpr->getSourceRange(); 10317 } 10318 } 10319 10320 return false; 10321 } 10322 10323 unsigned OriginalWidth = Value.getBitWidth(); 10324 10325 if (!Value.isSigned() || Value.isNegative()) 10326 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 10327 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 10328 OriginalWidth = Value.getMinSignedBits(); 10329 10330 if (OriginalWidth <= FieldWidth) 10331 return false; 10332 10333 // Compute the value which the bitfield will contain. 10334 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 10335 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 10336 10337 // Check whether the stored value is equal to the original value. 10338 TruncatedValue = TruncatedValue.extend(OriginalWidth); 10339 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 10340 return false; 10341 10342 // Special-case bitfields of width 1: booleans are naturally 0/1, and 10343 // therefore don't strictly fit into a signed bitfield of width 1. 10344 if (FieldWidth == 1 && Value == 1) 10345 return false; 10346 10347 std::string PrettyValue = Value.toString(10); 10348 std::string PrettyTrunc = TruncatedValue.toString(10); 10349 10350 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 10351 << PrettyValue << PrettyTrunc << OriginalInit->getType() 10352 << Init->getSourceRange(); 10353 10354 return true; 10355 } 10356 10357 /// Analyze the given simple or compound assignment for warning-worthy 10358 /// operations. 10359 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 10360 // Just recurse on the LHS. 10361 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 10362 10363 // We want to recurse on the RHS as normal unless we're assigning to 10364 // a bitfield. 10365 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 10366 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 10367 E->getOperatorLoc())) { 10368 // Recurse, ignoring any implicit conversions on the RHS. 10369 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 10370 E->getOperatorLoc()); 10371 } 10372 } 10373 10374 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 10375 10376 // Diagnose implicitly sequentially-consistent atomic assignment. 10377 if (E->getLHS()->getType()->isAtomicType()) 10378 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 10379 } 10380 10381 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 10382 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 10383 SourceLocation CContext, unsigned diag, 10384 bool pruneControlFlow = false) { 10385 if (pruneControlFlow) { 10386 S.DiagRuntimeBehavior(E->getExprLoc(), E, 10387 S.PDiag(diag) 10388 << SourceType << T << E->getSourceRange() 10389 << SourceRange(CContext)); 10390 return; 10391 } 10392 S.Diag(E->getExprLoc(), diag) 10393 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 10394 } 10395 10396 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 10397 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 10398 SourceLocation CContext, 10399 unsigned diag, bool pruneControlFlow = false) { 10400 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 10401 } 10402 10403 /// Diagnose an implicit cast from a floating point value to an integer value. 10404 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 10405 SourceLocation CContext) { 10406 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 10407 const bool PruneWarnings = S.inTemplateInstantiation(); 10408 10409 Expr *InnerE = E->IgnoreParenImpCasts(); 10410 // We also want to warn on, e.g., "int i = -1.234" 10411 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 10412 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 10413 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 10414 10415 const bool IsLiteral = 10416 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 10417 10418 llvm::APFloat Value(0.0); 10419 bool IsConstant = 10420 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 10421 if (!IsConstant) { 10422 return DiagnoseImpCast(S, E, T, CContext, 10423 diag::warn_impcast_float_integer, PruneWarnings); 10424 } 10425 10426 bool isExact = false; 10427 10428 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 10429 T->hasUnsignedIntegerRepresentation()); 10430 llvm::APFloat::opStatus Result = Value.convertToInteger( 10431 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 10432 10433 if (Result == llvm::APFloat::opOK && isExact) { 10434 if (IsLiteral) return; 10435 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 10436 PruneWarnings); 10437 } 10438 10439 // Conversion of a floating-point value to a non-bool integer where the 10440 // integral part cannot be represented by the integer type is undefined. 10441 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 10442 return DiagnoseImpCast( 10443 S, E, T, CContext, 10444 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 10445 : diag::warn_impcast_float_to_integer_out_of_range, 10446 PruneWarnings); 10447 10448 unsigned DiagID = 0; 10449 if (IsLiteral) { 10450 // Warn on floating point literal to integer. 10451 DiagID = diag::warn_impcast_literal_float_to_integer; 10452 } else if (IntegerValue == 0) { 10453 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 10454 return DiagnoseImpCast(S, E, T, CContext, 10455 diag::warn_impcast_float_integer, PruneWarnings); 10456 } 10457 // Warn on non-zero to zero conversion. 10458 DiagID = diag::warn_impcast_float_to_integer_zero; 10459 } else { 10460 if (IntegerValue.isUnsigned()) { 10461 if (!IntegerValue.isMaxValue()) { 10462 return DiagnoseImpCast(S, E, T, CContext, 10463 diag::warn_impcast_float_integer, PruneWarnings); 10464 } 10465 } else { // IntegerValue.isSigned() 10466 if (!IntegerValue.isMaxSignedValue() && 10467 !IntegerValue.isMinSignedValue()) { 10468 return DiagnoseImpCast(S, E, T, CContext, 10469 diag::warn_impcast_float_integer, PruneWarnings); 10470 } 10471 } 10472 // Warn on evaluatable floating point expression to integer conversion. 10473 DiagID = diag::warn_impcast_float_to_integer; 10474 } 10475 10476 // FIXME: Force the precision of the source value down so we don't print 10477 // digits which are usually useless (we don't really care here if we 10478 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 10479 // would automatically print the shortest representation, but it's a bit 10480 // tricky to implement. 10481 SmallString<16> PrettySourceValue; 10482 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 10483 precision = (precision * 59 + 195) / 196; 10484 Value.toString(PrettySourceValue, precision); 10485 10486 SmallString<16> PrettyTargetValue; 10487 if (IsBool) 10488 PrettyTargetValue = Value.isZero() ? "false" : "true"; 10489 else 10490 IntegerValue.toString(PrettyTargetValue); 10491 10492 if (PruneWarnings) { 10493 S.DiagRuntimeBehavior(E->getExprLoc(), E, 10494 S.PDiag(DiagID) 10495 << E->getType() << T.getUnqualifiedType() 10496 << PrettySourceValue << PrettyTargetValue 10497 << E->getSourceRange() << SourceRange(CContext)); 10498 } else { 10499 S.Diag(E->getExprLoc(), DiagID) 10500 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 10501 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 10502 } 10503 } 10504 10505 /// Analyze the given compound assignment for the possible losing of 10506 /// floating-point precision. 10507 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 10508 assert(isa<CompoundAssignOperator>(E) && 10509 "Must be compound assignment operation"); 10510 // Recurse on the LHS and RHS in here 10511 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 10512 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 10513 10514 if (E->getLHS()->getType()->isAtomicType()) 10515 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 10516 10517 // Now check the outermost expression 10518 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 10519 const auto *RBT = cast<CompoundAssignOperator>(E) 10520 ->getComputationResultType() 10521 ->getAs<BuiltinType>(); 10522 10523 // The below checks assume source is floating point. 10524 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 10525 10526 // If source is floating point but target is not. 10527 if (!ResultBT->isFloatingPoint()) 10528 return DiagnoseFloatingImpCast(S, E, E->getRHS()->getType(), 10529 E->getExprLoc()); 10530 10531 // If both source and target are floating points. 10532 // Builtin FP kinds are ordered by increasing FP rank. 10533 if (ResultBT->getKind() < RBT->getKind() && 10534 // We don't want to warn for system macro. 10535 !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 10536 // warn about dropping FP rank. 10537 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 10538 diag::warn_impcast_float_result_precision); 10539 } 10540 10541 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 10542 IntRange Range) { 10543 if (!Range.Width) return "0"; 10544 10545 llvm::APSInt ValueInRange = Value; 10546 ValueInRange.setIsSigned(!Range.NonNegative); 10547 ValueInRange = ValueInRange.trunc(Range.Width); 10548 return ValueInRange.toString(10); 10549 } 10550 10551 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 10552 if (!isa<ImplicitCastExpr>(Ex)) 10553 return false; 10554 10555 Expr *InnerE = Ex->IgnoreParenImpCasts(); 10556 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 10557 const Type *Source = 10558 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 10559 if (Target->isDependentType()) 10560 return false; 10561 10562 const BuiltinType *FloatCandidateBT = 10563 dyn_cast<BuiltinType>(ToBool ? Source : Target); 10564 const Type *BoolCandidateType = ToBool ? Target : Source; 10565 10566 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 10567 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 10568 } 10569 10570 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 10571 SourceLocation CC) { 10572 unsigned NumArgs = TheCall->getNumArgs(); 10573 for (unsigned i = 0; i < NumArgs; ++i) { 10574 Expr *CurrA = TheCall->getArg(i); 10575 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 10576 continue; 10577 10578 bool IsSwapped = ((i > 0) && 10579 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 10580 IsSwapped |= ((i < (NumArgs - 1)) && 10581 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 10582 if (IsSwapped) { 10583 // Warn on this floating-point to bool conversion. 10584 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 10585 CurrA->getType(), CC, 10586 diag::warn_impcast_floating_point_to_bool); 10587 } 10588 } 10589 } 10590 10591 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 10592 SourceLocation CC) { 10593 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 10594 E->getExprLoc())) 10595 return; 10596 10597 // Don't warn on functions which have return type nullptr_t. 10598 if (isa<CallExpr>(E)) 10599 return; 10600 10601 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 10602 const Expr::NullPointerConstantKind NullKind = 10603 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 10604 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 10605 return; 10606 10607 // Return if target type is a safe conversion. 10608 if (T->isAnyPointerType() || T->isBlockPointerType() || 10609 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 10610 return; 10611 10612 SourceLocation Loc = E->getSourceRange().getBegin(); 10613 10614 // Venture through the macro stacks to get to the source of macro arguments. 10615 // The new location is a better location than the complete location that was 10616 // passed in. 10617 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 10618 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 10619 10620 // __null is usually wrapped in a macro. Go up a macro if that is the case. 10621 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 10622 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 10623 Loc, S.SourceMgr, S.getLangOpts()); 10624 if (MacroName == "NULL") 10625 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 10626 } 10627 10628 // Only warn if the null and context location are in the same macro expansion. 10629 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 10630 return; 10631 10632 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 10633 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 10634 << FixItHint::CreateReplacement(Loc, 10635 S.getFixItZeroLiteralForType(T, Loc)); 10636 } 10637 10638 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 10639 ObjCArrayLiteral *ArrayLiteral); 10640 10641 static void 10642 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 10643 ObjCDictionaryLiteral *DictionaryLiteral); 10644 10645 /// Check a single element within a collection literal against the 10646 /// target element type. 10647 static void checkObjCCollectionLiteralElement(Sema &S, 10648 QualType TargetElementType, 10649 Expr *Element, 10650 unsigned ElementKind) { 10651 // Skip a bitcast to 'id' or qualified 'id'. 10652 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 10653 if (ICE->getCastKind() == CK_BitCast && 10654 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 10655 Element = ICE->getSubExpr(); 10656 } 10657 10658 QualType ElementType = Element->getType(); 10659 ExprResult ElementResult(Element); 10660 if (ElementType->getAs<ObjCObjectPointerType>() && 10661 S.CheckSingleAssignmentConstraints(TargetElementType, 10662 ElementResult, 10663 false, false) 10664 != Sema::Compatible) { 10665 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 10666 << ElementType << ElementKind << TargetElementType 10667 << Element->getSourceRange(); 10668 } 10669 10670 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 10671 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 10672 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 10673 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 10674 } 10675 10676 /// Check an Objective-C array literal being converted to the given 10677 /// target type. 10678 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 10679 ObjCArrayLiteral *ArrayLiteral) { 10680 if (!S.NSArrayDecl) 10681 return; 10682 10683 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 10684 if (!TargetObjCPtr) 10685 return; 10686 10687 if (TargetObjCPtr->isUnspecialized() || 10688 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 10689 != S.NSArrayDecl->getCanonicalDecl()) 10690 return; 10691 10692 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 10693 if (TypeArgs.size() != 1) 10694 return; 10695 10696 QualType TargetElementType = TypeArgs[0]; 10697 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 10698 checkObjCCollectionLiteralElement(S, TargetElementType, 10699 ArrayLiteral->getElement(I), 10700 0); 10701 } 10702 } 10703 10704 /// Check an Objective-C dictionary literal being converted to the given 10705 /// target type. 10706 static void 10707 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 10708 ObjCDictionaryLiteral *DictionaryLiteral) { 10709 if (!S.NSDictionaryDecl) 10710 return; 10711 10712 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 10713 if (!TargetObjCPtr) 10714 return; 10715 10716 if (TargetObjCPtr->isUnspecialized() || 10717 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 10718 != S.NSDictionaryDecl->getCanonicalDecl()) 10719 return; 10720 10721 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 10722 if (TypeArgs.size() != 2) 10723 return; 10724 10725 QualType TargetKeyType = TypeArgs[0]; 10726 QualType TargetObjectType = TypeArgs[1]; 10727 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 10728 auto Element = DictionaryLiteral->getKeyValueElement(I); 10729 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 10730 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 10731 } 10732 } 10733 10734 // Helper function to filter out cases for constant width constant conversion. 10735 // Don't warn on char array initialization or for non-decimal values. 10736 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 10737 SourceLocation CC) { 10738 // If initializing from a constant, and the constant starts with '0', 10739 // then it is a binary, octal, or hexadecimal. Allow these constants 10740 // to fill all the bits, even if there is a sign change. 10741 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 10742 const char FirstLiteralCharacter = 10743 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 10744 if (FirstLiteralCharacter == '0') 10745 return false; 10746 } 10747 10748 // If the CC location points to a '{', and the type is char, then assume 10749 // assume it is an array initialization. 10750 if (CC.isValid() && T->isCharType()) { 10751 const char FirstContextCharacter = 10752 S.getSourceManager().getCharacterData(CC)[0]; 10753 if (FirstContextCharacter == '{') 10754 return false; 10755 } 10756 10757 return true; 10758 } 10759 10760 static void 10761 CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC, 10762 bool *ICContext = nullptr) { 10763 if (E->isTypeDependent() || E->isValueDependent()) return; 10764 10765 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 10766 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 10767 if (Source == Target) return; 10768 if (Target->isDependentType()) return; 10769 10770 // If the conversion context location is invalid don't complain. We also 10771 // don't want to emit a warning if the issue occurs from the expansion of 10772 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 10773 // delay this check as long as possible. Once we detect we are in that 10774 // scenario, we just return. 10775 if (CC.isInvalid()) 10776 return; 10777 10778 if (Source->isAtomicType()) 10779 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 10780 10781 // Diagnose implicit casts to bool. 10782 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 10783 if (isa<StringLiteral>(E)) 10784 // Warn on string literal to bool. Checks for string literals in logical 10785 // and expressions, for instance, assert(0 && "error here"), are 10786 // prevented by a check in AnalyzeImplicitConversions(). 10787 return DiagnoseImpCast(S, E, T, CC, 10788 diag::warn_impcast_string_literal_to_bool); 10789 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 10790 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 10791 // This covers the literal expressions that evaluate to Objective-C 10792 // objects. 10793 return DiagnoseImpCast(S, E, T, CC, 10794 diag::warn_impcast_objective_c_literal_to_bool); 10795 } 10796 if (Source->isPointerType() || Source->canDecayToPointerType()) { 10797 // Warn on pointer to bool conversion that is always true. 10798 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 10799 SourceRange(CC)); 10800 } 10801 } 10802 10803 // Check implicit casts from Objective-C collection literals to specialized 10804 // collection types, e.g., NSArray<NSString *> *. 10805 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 10806 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 10807 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 10808 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 10809 10810 // Strip vector types. 10811 if (isa<VectorType>(Source)) { 10812 if (!isa<VectorType>(Target)) { 10813 if (S.SourceMgr.isInSystemMacro(CC)) 10814 return; 10815 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 10816 } 10817 10818 // If the vector cast is cast between two vectors of the same size, it is 10819 // a bitcast, not a conversion. 10820 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 10821 return; 10822 10823 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 10824 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 10825 } 10826 if (auto VecTy = dyn_cast<VectorType>(Target)) 10827 Target = VecTy->getElementType().getTypePtr(); 10828 10829 // Strip complex types. 10830 if (isa<ComplexType>(Source)) { 10831 if (!isa<ComplexType>(Target)) { 10832 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 10833 return; 10834 10835 return DiagnoseImpCast(S, E, T, CC, 10836 S.getLangOpts().CPlusPlus 10837 ? diag::err_impcast_complex_scalar 10838 : diag::warn_impcast_complex_scalar); 10839 } 10840 10841 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 10842 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 10843 } 10844 10845 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 10846 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 10847 10848 // If the source is floating point... 10849 if (SourceBT && SourceBT->isFloatingPoint()) { 10850 // ...and the target is floating point... 10851 if (TargetBT && TargetBT->isFloatingPoint()) { 10852 // ...then warn if we're dropping FP rank. 10853 10854 // Builtin FP kinds are ordered by increasing FP rank. 10855 if (SourceBT->getKind() > TargetBT->getKind()) { 10856 // Don't warn about float constants that are precisely 10857 // representable in the target type. 10858 Expr::EvalResult result; 10859 if (E->EvaluateAsRValue(result, S.Context)) { 10860 // Value might be a float, a float vector, or a float complex. 10861 if (IsSameFloatAfterCast(result.Val, 10862 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 10863 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 10864 return; 10865 } 10866 10867 if (S.SourceMgr.isInSystemMacro(CC)) 10868 return; 10869 10870 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 10871 } 10872 // ... or possibly if we're increasing rank, too 10873 else if (TargetBT->getKind() > SourceBT->getKind()) { 10874 if (S.SourceMgr.isInSystemMacro(CC)) 10875 return; 10876 10877 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 10878 } 10879 return; 10880 } 10881 10882 // If the target is integral, always warn. 10883 if (TargetBT && TargetBT->isInteger()) { 10884 if (S.SourceMgr.isInSystemMacro(CC)) 10885 return; 10886 10887 DiagnoseFloatingImpCast(S, E, T, CC); 10888 } 10889 10890 // Detect the case where a call result is converted from floating-point to 10891 // to bool, and the final argument to the call is converted from bool, to 10892 // discover this typo: 10893 // 10894 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 10895 // 10896 // FIXME: This is an incredibly special case; is there some more general 10897 // way to detect this class of misplaced-parentheses bug? 10898 if (Target->isBooleanType() && isa<CallExpr>(E)) { 10899 // Check last argument of function call to see if it is an 10900 // implicit cast from a type matching the type the result 10901 // is being cast to. 10902 CallExpr *CEx = cast<CallExpr>(E); 10903 if (unsigned NumArgs = CEx->getNumArgs()) { 10904 Expr *LastA = CEx->getArg(NumArgs - 1); 10905 Expr *InnerE = LastA->IgnoreParenImpCasts(); 10906 if (isa<ImplicitCastExpr>(LastA) && 10907 InnerE->getType()->isBooleanType()) { 10908 // Warn on this floating-point to bool conversion 10909 DiagnoseImpCast(S, E, T, CC, 10910 diag::warn_impcast_floating_point_to_bool); 10911 } 10912 } 10913 } 10914 return; 10915 } 10916 10917 DiagnoseNullConversion(S, E, T, CC); 10918 10919 S.DiscardMisalignedMemberAddress(Target, E); 10920 10921 if (!Source->isIntegerType() || !Target->isIntegerType()) 10922 return; 10923 10924 // TODO: remove this early return once the false positives for constant->bool 10925 // in templates, macros, etc, are reduced or removed. 10926 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 10927 return; 10928 10929 IntRange SourceRange = GetExprRange(S.Context, E); 10930 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 10931 10932 if (SourceRange.Width > TargetRange.Width) { 10933 // If the source is a constant, use a default-on diagnostic. 10934 // TODO: this should happen for bitfield stores, too. 10935 llvm::APSInt Value(32); 10936 if (E->EvaluateAsInt(Value, S.Context, Expr::SE_AllowSideEffects)) { 10937 if (S.SourceMgr.isInSystemMacro(CC)) 10938 return; 10939 10940 std::string PrettySourceValue = Value.toString(10); 10941 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 10942 10943 S.DiagRuntimeBehavior(E->getExprLoc(), E, 10944 S.PDiag(diag::warn_impcast_integer_precision_constant) 10945 << PrettySourceValue << PrettyTargetValue 10946 << E->getType() << T << E->getSourceRange() 10947 << clang::SourceRange(CC)); 10948 return; 10949 } 10950 10951 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 10952 if (S.SourceMgr.isInSystemMacro(CC)) 10953 return; 10954 10955 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 10956 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 10957 /* pruneControlFlow */ true); 10958 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 10959 } 10960 10961 if (TargetRange.Width > SourceRange.Width) { 10962 if (auto *UO = dyn_cast<UnaryOperator>(E)) 10963 if (UO->getOpcode() == UO_Minus) 10964 if (Source->isUnsignedIntegerType()) { 10965 if (Target->isUnsignedIntegerType()) 10966 return DiagnoseImpCast(S, E, T, CC, 10967 diag::warn_impcast_high_order_zero_bits); 10968 if (Target->isSignedIntegerType()) 10969 return DiagnoseImpCast(S, E, T, CC, 10970 diag::warn_impcast_nonnegative_result); 10971 } 10972 } 10973 10974 if (TargetRange.Width == SourceRange.Width && !TargetRange.NonNegative && 10975 SourceRange.NonNegative && Source->isSignedIntegerType()) { 10976 // Warn when doing a signed to signed conversion, warn if the positive 10977 // source value is exactly the width of the target type, which will 10978 // cause a negative value to be stored. 10979 10980 llvm::APSInt Value; 10981 if (E->EvaluateAsInt(Value, S.Context, Expr::SE_AllowSideEffects) && 10982 !S.SourceMgr.isInSystemMacro(CC)) { 10983 if (isSameWidthConstantConversion(S, E, T, CC)) { 10984 std::string PrettySourceValue = Value.toString(10); 10985 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 10986 10987 S.DiagRuntimeBehavior( 10988 E->getExprLoc(), E, 10989 S.PDiag(diag::warn_impcast_integer_precision_constant) 10990 << PrettySourceValue << PrettyTargetValue << E->getType() << T 10991 << E->getSourceRange() << clang::SourceRange(CC)); 10992 return; 10993 } 10994 } 10995 10996 // Fall through for non-constants to give a sign conversion warning. 10997 } 10998 10999 if ((TargetRange.NonNegative && !SourceRange.NonNegative) || 11000 (!TargetRange.NonNegative && SourceRange.NonNegative && 11001 SourceRange.Width == TargetRange.Width)) { 11002 if (S.SourceMgr.isInSystemMacro(CC)) 11003 return; 11004 11005 unsigned DiagID = diag::warn_impcast_integer_sign; 11006 11007 // Traditionally, gcc has warned about this under -Wsign-compare. 11008 // We also want to warn about it in -Wconversion. 11009 // So if -Wconversion is off, use a completely identical diagnostic 11010 // in the sign-compare group. 11011 // The conditional-checking code will 11012 if (ICContext) { 11013 DiagID = diag::warn_impcast_integer_sign_conditional; 11014 *ICContext = true; 11015 } 11016 11017 return DiagnoseImpCast(S, E, T, CC, DiagID); 11018 } 11019 11020 // Diagnose conversions between different enumeration types. 11021 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 11022 // type, to give us better diagnostics. 11023 QualType SourceType = E->getType(); 11024 if (!S.getLangOpts().CPlusPlus) { 11025 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 11026 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 11027 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 11028 SourceType = S.Context.getTypeDeclType(Enum); 11029 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 11030 } 11031 } 11032 11033 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 11034 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 11035 if (SourceEnum->getDecl()->hasNameForLinkage() && 11036 TargetEnum->getDecl()->hasNameForLinkage() && 11037 SourceEnum != TargetEnum) { 11038 if (S.SourceMgr.isInSystemMacro(CC)) 11039 return; 11040 11041 return DiagnoseImpCast(S, E, SourceType, T, CC, 11042 diag::warn_impcast_different_enum_types); 11043 } 11044 } 11045 11046 static void CheckConditionalOperator(Sema &S, ConditionalOperator *E, 11047 SourceLocation CC, QualType T); 11048 11049 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 11050 SourceLocation CC, bool &ICContext) { 11051 E = E->IgnoreParenImpCasts(); 11052 11053 if (isa<ConditionalOperator>(E)) 11054 return CheckConditionalOperator(S, cast<ConditionalOperator>(E), CC, T); 11055 11056 AnalyzeImplicitConversions(S, E, CC); 11057 if (E->getType() != T) 11058 return CheckImplicitConversion(S, E, T, CC, &ICContext); 11059 } 11060 11061 static void CheckConditionalOperator(Sema &S, ConditionalOperator *E, 11062 SourceLocation CC, QualType T) { 11063 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 11064 11065 bool Suspicious = false; 11066 CheckConditionalOperand(S, E->getTrueExpr(), T, CC, Suspicious); 11067 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 11068 11069 // If -Wconversion would have warned about either of the candidates 11070 // for a signedness conversion to the context type... 11071 if (!Suspicious) return; 11072 11073 // ...but it's currently ignored... 11074 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 11075 return; 11076 11077 // ...then check whether it would have warned about either of the 11078 // candidates for a signedness conversion to the condition type. 11079 if (E->getType() == T) return; 11080 11081 Suspicious = false; 11082 CheckImplicitConversion(S, E->getTrueExpr()->IgnoreParenImpCasts(), 11083 E->getType(), CC, &Suspicious); 11084 if (!Suspicious) 11085 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 11086 E->getType(), CC, &Suspicious); 11087 } 11088 11089 /// Check conversion of given expression to boolean. 11090 /// Input argument E is a logical expression. 11091 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 11092 if (S.getLangOpts().Bool) 11093 return; 11094 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 11095 return; 11096 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 11097 } 11098 11099 /// AnalyzeImplicitConversions - Find and report any interesting 11100 /// implicit conversions in the given expression. There are a couple 11101 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 11102 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, 11103 SourceLocation CC) { 11104 QualType T = OrigE->getType(); 11105 Expr *E = OrigE->IgnoreParenImpCasts(); 11106 11107 if (E->isTypeDependent() || E->isValueDependent()) 11108 return; 11109 11110 // For conditional operators, we analyze the arguments as if they 11111 // were being fed directly into the output. 11112 if (isa<ConditionalOperator>(E)) { 11113 ConditionalOperator *CO = cast<ConditionalOperator>(E); 11114 CheckConditionalOperator(S, CO, CC, T); 11115 return; 11116 } 11117 11118 // Check implicit argument conversions for function calls. 11119 if (CallExpr *Call = dyn_cast<CallExpr>(E)) 11120 CheckImplicitArgumentConversions(S, Call, CC); 11121 11122 // Go ahead and check any implicit conversions we might have skipped. 11123 // The non-canonical typecheck is just an optimization; 11124 // CheckImplicitConversion will filter out dead implicit conversions. 11125 if (E->getType() != T) 11126 CheckImplicitConversion(S, E, T, CC); 11127 11128 // Now continue drilling into this expression. 11129 11130 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 11131 // The bound subexpressions in a PseudoObjectExpr are not reachable 11132 // as transitive children. 11133 // FIXME: Use a more uniform representation for this. 11134 for (auto *SE : POE->semantics()) 11135 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 11136 AnalyzeImplicitConversions(S, OVE->getSourceExpr(), CC); 11137 } 11138 11139 // Skip past explicit casts. 11140 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 11141 E = CE->getSubExpr()->IgnoreParenImpCasts(); 11142 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 11143 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 11144 return AnalyzeImplicitConversions(S, E, CC); 11145 } 11146 11147 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 11148 // Do a somewhat different check with comparison operators. 11149 if (BO->isComparisonOp()) 11150 return AnalyzeComparison(S, BO); 11151 11152 // And with simple assignments. 11153 if (BO->getOpcode() == BO_Assign) 11154 return AnalyzeAssignment(S, BO); 11155 // And with compound assignments. 11156 if (BO->isAssignmentOp()) 11157 return AnalyzeCompoundAssignment(S, BO); 11158 } 11159 11160 // These break the otherwise-useful invariant below. Fortunately, 11161 // we don't really need to recurse into them, because any internal 11162 // expressions should have been analyzed already when they were 11163 // built into statements. 11164 if (isa<StmtExpr>(E)) return; 11165 11166 // Don't descend into unevaluated contexts. 11167 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 11168 11169 // Now just recurse over the expression's children. 11170 CC = E->getExprLoc(); 11171 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 11172 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 11173 for (Stmt *SubStmt : E->children()) { 11174 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 11175 if (!ChildExpr) 11176 continue; 11177 11178 if (IsLogicalAndOperator && 11179 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 11180 // Ignore checking string literals that are in logical and operators. 11181 // This is a common pattern for asserts. 11182 continue; 11183 AnalyzeImplicitConversions(S, ChildExpr, CC); 11184 } 11185 11186 if (BO && BO->isLogicalOp()) { 11187 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 11188 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 11189 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 11190 11191 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 11192 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 11193 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 11194 } 11195 11196 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 11197 if (U->getOpcode() == UO_LNot) { 11198 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 11199 } else if (U->getOpcode() != UO_AddrOf) { 11200 if (U->getSubExpr()->getType()->isAtomicType()) 11201 S.Diag(U->getSubExpr()->getBeginLoc(), 11202 diag::warn_atomic_implicit_seq_cst); 11203 } 11204 } 11205 } 11206 11207 /// Diagnose integer type and any valid implicit conversion to it. 11208 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 11209 // Taking into account implicit conversions, 11210 // allow any integer. 11211 if (!E->getType()->isIntegerType()) { 11212 S.Diag(E->getBeginLoc(), 11213 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 11214 return true; 11215 } 11216 // Potentially emit standard warnings for implicit conversions if enabled 11217 // using -Wconversion. 11218 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 11219 return false; 11220 } 11221 11222 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 11223 // Returns true when emitting a warning about taking the address of a reference. 11224 static bool CheckForReference(Sema &SemaRef, const Expr *E, 11225 const PartialDiagnostic &PD) { 11226 E = E->IgnoreParenImpCasts(); 11227 11228 const FunctionDecl *FD = nullptr; 11229 11230 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 11231 if (!DRE->getDecl()->getType()->isReferenceType()) 11232 return false; 11233 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 11234 if (!M->getMemberDecl()->getType()->isReferenceType()) 11235 return false; 11236 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 11237 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 11238 return false; 11239 FD = Call->getDirectCallee(); 11240 } else { 11241 return false; 11242 } 11243 11244 SemaRef.Diag(E->getExprLoc(), PD); 11245 11246 // If possible, point to location of function. 11247 if (FD) { 11248 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 11249 } 11250 11251 return true; 11252 } 11253 11254 // Returns true if the SourceLocation is expanded from any macro body. 11255 // Returns false if the SourceLocation is invalid, is from not in a macro 11256 // expansion, or is from expanded from a top-level macro argument. 11257 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 11258 if (Loc.isInvalid()) 11259 return false; 11260 11261 while (Loc.isMacroID()) { 11262 if (SM.isMacroBodyExpansion(Loc)) 11263 return true; 11264 Loc = SM.getImmediateMacroCallerLoc(Loc); 11265 } 11266 11267 return false; 11268 } 11269 11270 /// Diagnose pointers that are always non-null. 11271 /// \param E the expression containing the pointer 11272 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 11273 /// compared to a null pointer 11274 /// \param IsEqual True when the comparison is equal to a null pointer 11275 /// \param Range Extra SourceRange to highlight in the diagnostic 11276 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 11277 Expr::NullPointerConstantKind NullKind, 11278 bool IsEqual, SourceRange Range) { 11279 if (!E) 11280 return; 11281 11282 // Don't warn inside macros. 11283 if (E->getExprLoc().isMacroID()) { 11284 const SourceManager &SM = getSourceManager(); 11285 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 11286 IsInAnyMacroBody(SM, Range.getBegin())) 11287 return; 11288 } 11289 E = E->IgnoreImpCasts(); 11290 11291 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 11292 11293 if (isa<CXXThisExpr>(E)) { 11294 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 11295 : diag::warn_this_bool_conversion; 11296 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 11297 return; 11298 } 11299 11300 bool IsAddressOf = false; 11301 11302 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 11303 if (UO->getOpcode() != UO_AddrOf) 11304 return; 11305 IsAddressOf = true; 11306 E = UO->getSubExpr(); 11307 } 11308 11309 if (IsAddressOf) { 11310 unsigned DiagID = IsCompare 11311 ? diag::warn_address_of_reference_null_compare 11312 : diag::warn_address_of_reference_bool_conversion; 11313 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 11314 << IsEqual; 11315 if (CheckForReference(*this, E, PD)) { 11316 return; 11317 } 11318 } 11319 11320 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 11321 bool IsParam = isa<NonNullAttr>(NonnullAttr); 11322 std::string Str; 11323 llvm::raw_string_ostream S(Str); 11324 E->printPretty(S, nullptr, getPrintingPolicy()); 11325 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 11326 : diag::warn_cast_nonnull_to_bool; 11327 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 11328 << E->getSourceRange() << Range << IsEqual; 11329 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 11330 }; 11331 11332 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 11333 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 11334 if (auto *Callee = Call->getDirectCallee()) { 11335 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 11336 ComplainAboutNonnullParamOrCall(A); 11337 return; 11338 } 11339 } 11340 } 11341 11342 // Expect to find a single Decl. Skip anything more complicated. 11343 ValueDecl *D = nullptr; 11344 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 11345 D = R->getDecl(); 11346 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 11347 D = M->getMemberDecl(); 11348 } 11349 11350 // Weak Decls can be null. 11351 if (!D || D->isWeak()) 11352 return; 11353 11354 // Check for parameter decl with nonnull attribute 11355 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 11356 if (getCurFunction() && 11357 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 11358 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 11359 ComplainAboutNonnullParamOrCall(A); 11360 return; 11361 } 11362 11363 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 11364 auto ParamIter = llvm::find(FD->parameters(), PV); 11365 assert(ParamIter != FD->param_end()); 11366 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 11367 11368 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 11369 if (!NonNull->args_size()) { 11370 ComplainAboutNonnullParamOrCall(NonNull); 11371 return; 11372 } 11373 11374 for (const ParamIdx &ArgNo : NonNull->args()) { 11375 if (ArgNo.getASTIndex() == ParamNo) { 11376 ComplainAboutNonnullParamOrCall(NonNull); 11377 return; 11378 } 11379 } 11380 } 11381 } 11382 } 11383 } 11384 11385 QualType T = D->getType(); 11386 const bool IsArray = T->isArrayType(); 11387 const bool IsFunction = T->isFunctionType(); 11388 11389 // Address of function is used to silence the function warning. 11390 if (IsAddressOf && IsFunction) { 11391 return; 11392 } 11393 11394 // Found nothing. 11395 if (!IsAddressOf && !IsFunction && !IsArray) 11396 return; 11397 11398 // Pretty print the expression for the diagnostic. 11399 std::string Str; 11400 llvm::raw_string_ostream S(Str); 11401 E->printPretty(S, nullptr, getPrintingPolicy()); 11402 11403 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 11404 : diag::warn_impcast_pointer_to_bool; 11405 enum { 11406 AddressOf, 11407 FunctionPointer, 11408 ArrayPointer 11409 } DiagType; 11410 if (IsAddressOf) 11411 DiagType = AddressOf; 11412 else if (IsFunction) 11413 DiagType = FunctionPointer; 11414 else if (IsArray) 11415 DiagType = ArrayPointer; 11416 else 11417 llvm_unreachable("Could not determine diagnostic."); 11418 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 11419 << Range << IsEqual; 11420 11421 if (!IsFunction) 11422 return; 11423 11424 // Suggest '&' to silence the function warning. 11425 Diag(E->getExprLoc(), diag::note_function_warning_silence) 11426 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 11427 11428 // Check to see if '()' fixit should be emitted. 11429 QualType ReturnType; 11430 UnresolvedSet<4> NonTemplateOverloads; 11431 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 11432 if (ReturnType.isNull()) 11433 return; 11434 11435 if (IsCompare) { 11436 // There are two cases here. If there is null constant, the only suggest 11437 // for a pointer return type. If the null is 0, then suggest if the return 11438 // type is a pointer or an integer type. 11439 if (!ReturnType->isPointerType()) { 11440 if (NullKind == Expr::NPCK_ZeroExpression || 11441 NullKind == Expr::NPCK_ZeroLiteral) { 11442 if (!ReturnType->isIntegerType()) 11443 return; 11444 } else { 11445 return; 11446 } 11447 } 11448 } else { // !IsCompare 11449 // For function to bool, only suggest if the function pointer has bool 11450 // return type. 11451 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 11452 return; 11453 } 11454 Diag(E->getExprLoc(), diag::note_function_to_function_call) 11455 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 11456 } 11457 11458 /// Diagnoses "dangerous" implicit conversions within the given 11459 /// expression (which is a full expression). Implements -Wconversion 11460 /// and -Wsign-compare. 11461 /// 11462 /// \param CC the "context" location of the implicit conversion, i.e. 11463 /// the most location of the syntactic entity requiring the implicit 11464 /// conversion 11465 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 11466 // Don't diagnose in unevaluated contexts. 11467 if (isUnevaluatedContext()) 11468 return; 11469 11470 // Don't diagnose for value- or type-dependent expressions. 11471 if (E->isTypeDependent() || E->isValueDependent()) 11472 return; 11473 11474 // Check for array bounds violations in cases where the check isn't triggered 11475 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 11476 // ArraySubscriptExpr is on the RHS of a variable initialization. 11477 CheckArrayAccess(E); 11478 11479 // This is not the right CC for (e.g.) a variable initialization. 11480 AnalyzeImplicitConversions(*this, E, CC); 11481 } 11482 11483 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 11484 /// Input argument E is a logical expression. 11485 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 11486 ::CheckBoolLikeConversion(*this, E, CC); 11487 } 11488 11489 /// Diagnose when expression is an integer constant expression and its evaluation 11490 /// results in integer overflow 11491 void Sema::CheckForIntOverflow (Expr *E) { 11492 // Use a work list to deal with nested struct initializers. 11493 SmallVector<Expr *, 2> Exprs(1, E); 11494 11495 do { 11496 Expr *OriginalE = Exprs.pop_back_val(); 11497 Expr *E = OriginalE->IgnoreParenCasts(); 11498 11499 if (isa<BinaryOperator>(E)) { 11500 E->EvaluateForOverflow(Context); 11501 continue; 11502 } 11503 11504 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 11505 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 11506 else if (isa<ObjCBoxedExpr>(OriginalE)) 11507 E->EvaluateForOverflow(Context); 11508 else if (auto Call = dyn_cast<CallExpr>(E)) 11509 Exprs.append(Call->arg_begin(), Call->arg_end()); 11510 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 11511 Exprs.append(Message->arg_begin(), Message->arg_end()); 11512 } while (!Exprs.empty()); 11513 } 11514 11515 namespace { 11516 11517 /// Visitor for expressions which looks for unsequenced operations on the 11518 /// same object. 11519 class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> { 11520 using Base = EvaluatedExprVisitor<SequenceChecker>; 11521 11522 /// A tree of sequenced regions within an expression. Two regions are 11523 /// unsequenced if one is an ancestor or a descendent of the other. When we 11524 /// finish processing an expression with sequencing, such as a comma 11525 /// expression, we fold its tree nodes into its parent, since they are 11526 /// unsequenced with respect to nodes we will visit later. 11527 class SequenceTree { 11528 struct Value { 11529 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 11530 unsigned Parent : 31; 11531 unsigned Merged : 1; 11532 }; 11533 SmallVector<Value, 8> Values; 11534 11535 public: 11536 /// A region within an expression which may be sequenced with respect 11537 /// to some other region. 11538 class Seq { 11539 friend class SequenceTree; 11540 11541 unsigned Index = 0; 11542 11543 explicit Seq(unsigned N) : Index(N) {} 11544 11545 public: 11546 Seq() = default; 11547 }; 11548 11549 SequenceTree() { Values.push_back(Value(0)); } 11550 Seq root() const { return Seq(0); } 11551 11552 /// Create a new sequence of operations, which is an unsequenced 11553 /// subset of \p Parent. This sequence of operations is sequenced with 11554 /// respect to other children of \p Parent. 11555 Seq allocate(Seq Parent) { 11556 Values.push_back(Value(Parent.Index)); 11557 return Seq(Values.size() - 1); 11558 } 11559 11560 /// Merge a sequence of operations into its parent. 11561 void merge(Seq S) { 11562 Values[S.Index].Merged = true; 11563 } 11564 11565 /// Determine whether two operations are unsequenced. This operation 11566 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 11567 /// should have been merged into its parent as appropriate. 11568 bool isUnsequenced(Seq Cur, Seq Old) { 11569 unsigned C = representative(Cur.Index); 11570 unsigned Target = representative(Old.Index); 11571 while (C >= Target) { 11572 if (C == Target) 11573 return true; 11574 C = Values[C].Parent; 11575 } 11576 return false; 11577 } 11578 11579 private: 11580 /// Pick a representative for a sequence. 11581 unsigned representative(unsigned K) { 11582 if (Values[K].Merged) 11583 // Perform path compression as we go. 11584 return Values[K].Parent = representative(Values[K].Parent); 11585 return K; 11586 } 11587 }; 11588 11589 /// An object for which we can track unsequenced uses. 11590 using Object = NamedDecl *; 11591 11592 /// Different flavors of object usage which we track. We only track the 11593 /// least-sequenced usage of each kind. 11594 enum UsageKind { 11595 /// A read of an object. Multiple unsequenced reads are OK. 11596 UK_Use, 11597 11598 /// A modification of an object which is sequenced before the value 11599 /// computation of the expression, such as ++n in C++. 11600 UK_ModAsValue, 11601 11602 /// A modification of an object which is not sequenced before the value 11603 /// computation of the expression, such as n++. 11604 UK_ModAsSideEffect, 11605 11606 UK_Count = UK_ModAsSideEffect + 1 11607 }; 11608 11609 struct Usage { 11610 Expr *Use = nullptr; 11611 SequenceTree::Seq Seq; 11612 11613 Usage() = default; 11614 }; 11615 11616 struct UsageInfo { 11617 Usage Uses[UK_Count]; 11618 11619 /// Have we issued a diagnostic for this variable already? 11620 bool Diagnosed = false; 11621 11622 UsageInfo() = default; 11623 }; 11624 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 11625 11626 Sema &SemaRef; 11627 11628 /// Sequenced regions within the expression. 11629 SequenceTree Tree; 11630 11631 /// Declaration modifications and references which we have seen. 11632 UsageInfoMap UsageMap; 11633 11634 /// The region we are currently within. 11635 SequenceTree::Seq Region; 11636 11637 /// Filled in with declarations which were modified as a side-effect 11638 /// (that is, post-increment operations). 11639 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 11640 11641 /// Expressions to check later. We defer checking these to reduce 11642 /// stack usage. 11643 SmallVectorImpl<Expr *> &WorkList; 11644 11645 /// RAII object wrapping the visitation of a sequenced subexpression of an 11646 /// expression. At the end of this process, the side-effects of the evaluation 11647 /// become sequenced with respect to the value computation of the result, so 11648 /// we downgrade any UK_ModAsSideEffect within the evaluation to 11649 /// UK_ModAsValue. 11650 struct SequencedSubexpression { 11651 SequencedSubexpression(SequenceChecker &Self) 11652 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 11653 Self.ModAsSideEffect = &ModAsSideEffect; 11654 } 11655 11656 ~SequencedSubexpression() { 11657 for (auto &M : llvm::reverse(ModAsSideEffect)) { 11658 UsageInfo &U = Self.UsageMap[M.first]; 11659 auto &SideEffectUsage = U.Uses[UK_ModAsSideEffect]; 11660 Self.addUsage(U, M.first, SideEffectUsage.Use, UK_ModAsValue); 11661 SideEffectUsage = M.second; 11662 } 11663 Self.ModAsSideEffect = OldModAsSideEffect; 11664 } 11665 11666 SequenceChecker &Self; 11667 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 11668 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 11669 }; 11670 11671 /// RAII object wrapping the visitation of a subexpression which we might 11672 /// choose to evaluate as a constant. If any subexpression is evaluated and 11673 /// found to be non-constant, this allows us to suppress the evaluation of 11674 /// the outer expression. 11675 class EvaluationTracker { 11676 public: 11677 EvaluationTracker(SequenceChecker &Self) 11678 : Self(Self), Prev(Self.EvalTracker) { 11679 Self.EvalTracker = this; 11680 } 11681 11682 ~EvaluationTracker() { 11683 Self.EvalTracker = Prev; 11684 if (Prev) 11685 Prev->EvalOK &= EvalOK; 11686 } 11687 11688 bool evaluate(const Expr *E, bool &Result) { 11689 if (!EvalOK || E->isValueDependent()) 11690 return false; 11691 EvalOK = E->EvaluateAsBooleanCondition(Result, Self.SemaRef.Context); 11692 return EvalOK; 11693 } 11694 11695 private: 11696 SequenceChecker &Self; 11697 EvaluationTracker *Prev; 11698 bool EvalOK = true; 11699 } *EvalTracker = nullptr; 11700 11701 /// Find the object which is produced by the specified expression, 11702 /// if any. 11703 Object getObject(Expr *E, bool Mod) const { 11704 E = E->IgnoreParenCasts(); 11705 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 11706 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 11707 return getObject(UO->getSubExpr(), Mod); 11708 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 11709 if (BO->getOpcode() == BO_Comma) 11710 return getObject(BO->getRHS(), Mod); 11711 if (Mod && BO->isAssignmentOp()) 11712 return getObject(BO->getLHS(), Mod); 11713 } else if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 11714 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 11715 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 11716 return ME->getMemberDecl(); 11717 } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 11718 // FIXME: If this is a reference, map through to its value. 11719 return DRE->getDecl(); 11720 return nullptr; 11721 } 11722 11723 /// Note that an object was modified or used by an expression. 11724 void addUsage(UsageInfo &UI, Object O, Expr *Ref, UsageKind UK) { 11725 Usage &U = UI.Uses[UK]; 11726 if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) { 11727 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 11728 ModAsSideEffect->push_back(std::make_pair(O, U)); 11729 U.Use = Ref; 11730 U.Seq = Region; 11731 } 11732 } 11733 11734 /// Check whether a modification or use conflicts with a prior usage. 11735 void checkUsage(Object O, UsageInfo &UI, Expr *Ref, UsageKind OtherKind, 11736 bool IsModMod) { 11737 if (UI.Diagnosed) 11738 return; 11739 11740 const Usage &U = UI.Uses[OtherKind]; 11741 if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) 11742 return; 11743 11744 Expr *Mod = U.Use; 11745 Expr *ModOrUse = Ref; 11746 if (OtherKind == UK_Use) 11747 std::swap(Mod, ModOrUse); 11748 11749 SemaRef.Diag(Mod->getExprLoc(), 11750 IsModMod ? diag::warn_unsequenced_mod_mod 11751 : diag::warn_unsequenced_mod_use) 11752 << O << SourceRange(ModOrUse->getExprLoc()); 11753 UI.Diagnosed = true; 11754 } 11755 11756 void notePreUse(Object O, Expr *Use) { 11757 UsageInfo &U = UsageMap[O]; 11758 // Uses conflict with other modifications. 11759 checkUsage(O, U, Use, UK_ModAsValue, false); 11760 } 11761 11762 void notePostUse(Object O, Expr *Use) { 11763 UsageInfo &U = UsageMap[O]; 11764 checkUsage(O, U, Use, UK_ModAsSideEffect, false); 11765 addUsage(U, O, Use, UK_Use); 11766 } 11767 11768 void notePreMod(Object O, Expr *Mod) { 11769 UsageInfo &U = UsageMap[O]; 11770 // Modifications conflict with other modifications and with uses. 11771 checkUsage(O, U, Mod, UK_ModAsValue, true); 11772 checkUsage(O, U, Mod, UK_Use, false); 11773 } 11774 11775 void notePostMod(Object O, Expr *Use, UsageKind UK) { 11776 UsageInfo &U = UsageMap[O]; 11777 checkUsage(O, U, Use, UK_ModAsSideEffect, true); 11778 addUsage(U, O, Use, UK); 11779 } 11780 11781 public: 11782 SequenceChecker(Sema &S, Expr *E, SmallVectorImpl<Expr *> &WorkList) 11783 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 11784 Visit(E); 11785 } 11786 11787 void VisitStmt(Stmt *S) { 11788 // Skip all statements which aren't expressions for now. 11789 } 11790 11791 void VisitExpr(Expr *E) { 11792 // By default, just recurse to evaluated subexpressions. 11793 Base::VisitStmt(E); 11794 } 11795 11796 void VisitCastExpr(CastExpr *E) { 11797 Object O = Object(); 11798 if (E->getCastKind() == CK_LValueToRValue) 11799 O = getObject(E->getSubExpr(), false); 11800 11801 if (O) 11802 notePreUse(O, E); 11803 VisitExpr(E); 11804 if (O) 11805 notePostUse(O, E); 11806 } 11807 11808 void VisitBinComma(BinaryOperator *BO) { 11809 // C++11 [expr.comma]p1: 11810 // Every value computation and side effect associated with the left 11811 // expression is sequenced before every value computation and side 11812 // effect associated with the right expression. 11813 SequenceTree::Seq LHS = Tree.allocate(Region); 11814 SequenceTree::Seq RHS = Tree.allocate(Region); 11815 SequenceTree::Seq OldRegion = Region; 11816 11817 { 11818 SequencedSubexpression SeqLHS(*this); 11819 Region = LHS; 11820 Visit(BO->getLHS()); 11821 } 11822 11823 Region = RHS; 11824 Visit(BO->getRHS()); 11825 11826 Region = OldRegion; 11827 11828 // Forget that LHS and RHS are sequenced. They are both unsequenced 11829 // with respect to other stuff. 11830 Tree.merge(LHS); 11831 Tree.merge(RHS); 11832 } 11833 11834 void VisitBinAssign(BinaryOperator *BO) { 11835 // The modification is sequenced after the value computation of the LHS 11836 // and RHS, so check it before inspecting the operands and update the 11837 // map afterwards. 11838 Object O = getObject(BO->getLHS(), true); 11839 if (!O) 11840 return VisitExpr(BO); 11841 11842 notePreMod(O, BO); 11843 11844 // C++11 [expr.ass]p7: 11845 // E1 op= E2 is equivalent to E1 = E1 op E2, except that E1 is evaluated 11846 // only once. 11847 // 11848 // Therefore, for a compound assignment operator, O is considered used 11849 // everywhere except within the evaluation of E1 itself. 11850 if (isa<CompoundAssignOperator>(BO)) 11851 notePreUse(O, BO); 11852 11853 Visit(BO->getLHS()); 11854 11855 if (isa<CompoundAssignOperator>(BO)) 11856 notePostUse(O, BO); 11857 11858 Visit(BO->getRHS()); 11859 11860 // C++11 [expr.ass]p1: 11861 // the assignment is sequenced [...] before the value computation of the 11862 // assignment expression. 11863 // C11 6.5.16/3 has no such rule. 11864 notePostMod(O, BO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 11865 : UK_ModAsSideEffect); 11866 } 11867 11868 void VisitCompoundAssignOperator(CompoundAssignOperator *CAO) { 11869 VisitBinAssign(CAO); 11870 } 11871 11872 void VisitUnaryPreInc(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 11873 void VisitUnaryPreDec(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 11874 void VisitUnaryPreIncDec(UnaryOperator *UO) { 11875 Object O = getObject(UO->getSubExpr(), true); 11876 if (!O) 11877 return VisitExpr(UO); 11878 11879 notePreMod(O, UO); 11880 Visit(UO->getSubExpr()); 11881 // C++11 [expr.pre.incr]p1: 11882 // the expression ++x is equivalent to x+=1 11883 notePostMod(O, UO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 11884 : UK_ModAsSideEffect); 11885 } 11886 11887 void VisitUnaryPostInc(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 11888 void VisitUnaryPostDec(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 11889 void VisitUnaryPostIncDec(UnaryOperator *UO) { 11890 Object O = getObject(UO->getSubExpr(), true); 11891 if (!O) 11892 return VisitExpr(UO); 11893 11894 notePreMod(O, UO); 11895 Visit(UO->getSubExpr()); 11896 notePostMod(O, UO, UK_ModAsSideEffect); 11897 } 11898 11899 /// Don't visit the RHS of '&&' or '||' if it might not be evaluated. 11900 void VisitBinLOr(BinaryOperator *BO) { 11901 // The side-effects of the LHS of an '&&' are sequenced before the 11902 // value computation of the RHS, and hence before the value computation 11903 // of the '&&' itself, unless the LHS evaluates to zero. We treat them 11904 // as if they were unconditionally sequenced. 11905 EvaluationTracker Eval(*this); 11906 { 11907 SequencedSubexpression Sequenced(*this); 11908 Visit(BO->getLHS()); 11909 } 11910 11911 bool Result; 11912 if (Eval.evaluate(BO->getLHS(), Result)) { 11913 if (!Result) 11914 Visit(BO->getRHS()); 11915 } else { 11916 // Check for unsequenced operations in the RHS, treating it as an 11917 // entirely separate evaluation. 11918 // 11919 // FIXME: If there are operations in the RHS which are unsequenced 11920 // with respect to operations outside the RHS, and those operations 11921 // are unconditionally evaluated, diagnose them. 11922 WorkList.push_back(BO->getRHS()); 11923 } 11924 } 11925 void VisitBinLAnd(BinaryOperator *BO) { 11926 EvaluationTracker Eval(*this); 11927 { 11928 SequencedSubexpression Sequenced(*this); 11929 Visit(BO->getLHS()); 11930 } 11931 11932 bool Result; 11933 if (Eval.evaluate(BO->getLHS(), Result)) { 11934 if (Result) 11935 Visit(BO->getRHS()); 11936 } else { 11937 WorkList.push_back(BO->getRHS()); 11938 } 11939 } 11940 11941 // Only visit the condition, unless we can be sure which subexpression will 11942 // be chosen. 11943 void VisitAbstractConditionalOperator(AbstractConditionalOperator *CO) { 11944 EvaluationTracker Eval(*this); 11945 { 11946 SequencedSubexpression Sequenced(*this); 11947 Visit(CO->getCond()); 11948 } 11949 11950 bool Result; 11951 if (Eval.evaluate(CO->getCond(), Result)) 11952 Visit(Result ? CO->getTrueExpr() : CO->getFalseExpr()); 11953 else { 11954 WorkList.push_back(CO->getTrueExpr()); 11955 WorkList.push_back(CO->getFalseExpr()); 11956 } 11957 } 11958 11959 void VisitCallExpr(CallExpr *CE) { 11960 // C++11 [intro.execution]p15: 11961 // When calling a function [...], every value computation and side effect 11962 // associated with any argument expression, or with the postfix expression 11963 // designating the called function, is sequenced before execution of every 11964 // expression or statement in the body of the function [and thus before 11965 // the value computation of its result]. 11966 SequencedSubexpression Sequenced(*this); 11967 Base::VisitCallExpr(CE); 11968 11969 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 11970 } 11971 11972 void VisitCXXConstructExpr(CXXConstructExpr *CCE) { 11973 // This is a call, so all subexpressions are sequenced before the result. 11974 SequencedSubexpression Sequenced(*this); 11975 11976 if (!CCE->isListInitialization()) 11977 return VisitExpr(CCE); 11978 11979 // In C++11, list initializations are sequenced. 11980 SmallVector<SequenceTree::Seq, 32> Elts; 11981 SequenceTree::Seq Parent = Region; 11982 for (CXXConstructExpr::arg_iterator I = CCE->arg_begin(), 11983 E = CCE->arg_end(); 11984 I != E; ++I) { 11985 Region = Tree.allocate(Parent); 11986 Elts.push_back(Region); 11987 Visit(*I); 11988 } 11989 11990 // Forget that the initializers are sequenced. 11991 Region = Parent; 11992 for (unsigned I = 0; I < Elts.size(); ++I) 11993 Tree.merge(Elts[I]); 11994 } 11995 11996 void VisitInitListExpr(InitListExpr *ILE) { 11997 if (!SemaRef.getLangOpts().CPlusPlus11) 11998 return VisitExpr(ILE); 11999 12000 // In C++11, list initializations are sequenced. 12001 SmallVector<SequenceTree::Seq, 32> Elts; 12002 SequenceTree::Seq Parent = Region; 12003 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 12004 Expr *E = ILE->getInit(I); 12005 if (!E) continue; 12006 Region = Tree.allocate(Parent); 12007 Elts.push_back(Region); 12008 Visit(E); 12009 } 12010 12011 // Forget that the initializers are sequenced. 12012 Region = Parent; 12013 for (unsigned I = 0; I < Elts.size(); ++I) 12014 Tree.merge(Elts[I]); 12015 } 12016 }; 12017 12018 } // namespace 12019 12020 void Sema::CheckUnsequencedOperations(Expr *E) { 12021 SmallVector<Expr *, 8> WorkList; 12022 WorkList.push_back(E); 12023 while (!WorkList.empty()) { 12024 Expr *Item = WorkList.pop_back_val(); 12025 SequenceChecker(*this, Item, WorkList); 12026 } 12027 } 12028 12029 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 12030 bool IsConstexpr) { 12031 CheckImplicitConversions(E, CheckLoc); 12032 if (!E->isInstantiationDependent()) 12033 CheckUnsequencedOperations(E); 12034 if (!IsConstexpr && !E->isValueDependent()) 12035 CheckForIntOverflow(E); 12036 DiagnoseMisalignedMembers(); 12037 } 12038 12039 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 12040 FieldDecl *BitField, 12041 Expr *Init) { 12042 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 12043 } 12044 12045 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 12046 SourceLocation Loc) { 12047 if (!PType->isVariablyModifiedType()) 12048 return; 12049 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 12050 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 12051 return; 12052 } 12053 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 12054 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 12055 return; 12056 } 12057 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 12058 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 12059 return; 12060 } 12061 12062 const ArrayType *AT = S.Context.getAsArrayType(PType); 12063 if (!AT) 12064 return; 12065 12066 if (AT->getSizeModifier() != ArrayType::Star) { 12067 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 12068 return; 12069 } 12070 12071 S.Diag(Loc, diag::err_array_star_in_function_definition); 12072 } 12073 12074 /// CheckParmsForFunctionDef - Check that the parameters of the given 12075 /// function are appropriate for the definition of a function. This 12076 /// takes care of any checks that cannot be performed on the 12077 /// declaration itself, e.g., that the types of each of the function 12078 /// parameters are complete. 12079 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 12080 bool CheckParameterNames) { 12081 bool HasInvalidParm = false; 12082 for (ParmVarDecl *Param : Parameters) { 12083 // C99 6.7.5.3p4: the parameters in a parameter type list in a 12084 // function declarator that is part of a function definition of 12085 // that function shall not have incomplete type. 12086 // 12087 // This is also C++ [dcl.fct]p6. 12088 if (!Param->isInvalidDecl() && 12089 RequireCompleteType(Param->getLocation(), Param->getType(), 12090 diag::err_typecheck_decl_incomplete_type)) { 12091 Param->setInvalidDecl(); 12092 HasInvalidParm = true; 12093 } 12094 12095 // C99 6.9.1p5: If the declarator includes a parameter type list, the 12096 // declaration of each parameter shall include an identifier. 12097 if (CheckParameterNames && 12098 Param->getIdentifier() == nullptr && 12099 !Param->isImplicit() && 12100 !getLangOpts().CPlusPlus) 12101 Diag(Param->getLocation(), diag::err_parameter_name_omitted); 12102 12103 // C99 6.7.5.3p12: 12104 // If the function declarator is not part of a definition of that 12105 // function, parameters may have incomplete type and may use the [*] 12106 // notation in their sequences of declarator specifiers to specify 12107 // variable length array types. 12108 QualType PType = Param->getOriginalType(); 12109 // FIXME: This diagnostic should point the '[*]' if source-location 12110 // information is added for it. 12111 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 12112 12113 // If the parameter is a c++ class type and it has to be destructed in the 12114 // callee function, declare the destructor so that it can be called by the 12115 // callee function. Do not perform any direct access check on the dtor here. 12116 if (!Param->isInvalidDecl()) { 12117 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 12118 if (!ClassDecl->isInvalidDecl() && 12119 !ClassDecl->hasIrrelevantDestructor() && 12120 !ClassDecl->isDependentContext() && 12121 ClassDecl->isParamDestroyedInCallee()) { 12122 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 12123 MarkFunctionReferenced(Param->getLocation(), Destructor); 12124 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 12125 } 12126 } 12127 } 12128 12129 // Parameters with the pass_object_size attribute only need to be marked 12130 // constant at function definitions. Because we lack information about 12131 // whether we're on a declaration or definition when we're instantiating the 12132 // attribute, we need to check for constness here. 12133 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 12134 if (!Param->getType().isConstQualified()) 12135 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 12136 << Attr->getSpelling() << 1; 12137 } 12138 12139 return HasInvalidParm; 12140 } 12141 12142 /// A helper function to get the alignment of a Decl referred to by DeclRefExpr 12143 /// or MemberExpr. 12144 static CharUnits getDeclAlign(Expr *E, CharUnits TypeAlign, 12145 ASTContext &Context) { 12146 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) 12147 return Context.getDeclAlign(DRE->getDecl()); 12148 12149 if (const auto *ME = dyn_cast<MemberExpr>(E)) 12150 return Context.getDeclAlign(ME->getMemberDecl()); 12151 12152 return TypeAlign; 12153 } 12154 12155 /// CheckCastAlign - Implements -Wcast-align, which warns when a 12156 /// pointer cast increases the alignment requirements. 12157 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 12158 // This is actually a lot of work to potentially be doing on every 12159 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 12160 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 12161 return; 12162 12163 // Ignore dependent types. 12164 if (T->isDependentType() || Op->getType()->isDependentType()) 12165 return; 12166 12167 // Require that the destination be a pointer type. 12168 const PointerType *DestPtr = T->getAs<PointerType>(); 12169 if (!DestPtr) return; 12170 12171 // If the destination has alignment 1, we're done. 12172 QualType DestPointee = DestPtr->getPointeeType(); 12173 if (DestPointee->isIncompleteType()) return; 12174 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 12175 if (DestAlign.isOne()) return; 12176 12177 // Require that the source be a pointer type. 12178 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 12179 if (!SrcPtr) return; 12180 QualType SrcPointee = SrcPtr->getPointeeType(); 12181 12182 // Whitelist casts from cv void*. We already implicitly 12183 // whitelisted casts to cv void*, since they have alignment 1. 12184 // Also whitelist casts involving incomplete types, which implicitly 12185 // includes 'void'. 12186 if (SrcPointee->isIncompleteType()) return; 12187 12188 CharUnits SrcAlign = Context.getTypeAlignInChars(SrcPointee); 12189 12190 if (auto *CE = dyn_cast<CastExpr>(Op)) { 12191 if (CE->getCastKind() == CK_ArrayToPointerDecay) 12192 SrcAlign = getDeclAlign(CE->getSubExpr(), SrcAlign, Context); 12193 } else if (auto *UO = dyn_cast<UnaryOperator>(Op)) { 12194 if (UO->getOpcode() == UO_AddrOf) 12195 SrcAlign = getDeclAlign(UO->getSubExpr(), SrcAlign, Context); 12196 } 12197 12198 if (SrcAlign >= DestAlign) return; 12199 12200 Diag(TRange.getBegin(), diag::warn_cast_align) 12201 << Op->getType() << T 12202 << static_cast<unsigned>(SrcAlign.getQuantity()) 12203 << static_cast<unsigned>(DestAlign.getQuantity()) 12204 << TRange << Op->getSourceRange(); 12205 } 12206 12207 /// Check whether this array fits the idiom of a size-one tail padded 12208 /// array member of a struct. 12209 /// 12210 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 12211 /// commonly used to emulate flexible arrays in C89 code. 12212 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 12213 const NamedDecl *ND) { 12214 if (Size != 1 || !ND) return false; 12215 12216 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 12217 if (!FD) return false; 12218 12219 // Don't consider sizes resulting from macro expansions or template argument 12220 // substitution to form C89 tail-padded arrays. 12221 12222 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 12223 while (TInfo) { 12224 TypeLoc TL = TInfo->getTypeLoc(); 12225 // Look through typedefs. 12226 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 12227 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 12228 TInfo = TDL->getTypeSourceInfo(); 12229 continue; 12230 } 12231 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 12232 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 12233 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 12234 return false; 12235 } 12236 break; 12237 } 12238 12239 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 12240 if (!RD) return false; 12241 if (RD->isUnion()) return false; 12242 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 12243 if (!CRD->isStandardLayout()) return false; 12244 } 12245 12246 // See if this is the last field decl in the record. 12247 const Decl *D = FD; 12248 while ((D = D->getNextDeclInContext())) 12249 if (isa<FieldDecl>(D)) 12250 return false; 12251 return true; 12252 } 12253 12254 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 12255 const ArraySubscriptExpr *ASE, 12256 bool AllowOnePastEnd, bool IndexNegated) { 12257 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 12258 if (IndexExpr->isValueDependent()) 12259 return; 12260 12261 const Type *EffectiveType = 12262 BaseExpr->getType()->getPointeeOrArrayElementType(); 12263 BaseExpr = BaseExpr->IgnoreParenCasts(); 12264 const ConstantArrayType *ArrayTy = 12265 Context.getAsConstantArrayType(BaseExpr->getType()); 12266 if (!ArrayTy) 12267 return; 12268 12269 llvm::APSInt index; 12270 if (!IndexExpr->EvaluateAsInt(index, Context, Expr::SE_AllowSideEffects)) 12271 return; 12272 if (IndexNegated) 12273 index = -index; 12274 12275 const NamedDecl *ND = nullptr; 12276 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 12277 ND = DRE->getDecl(); 12278 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 12279 ND = ME->getMemberDecl(); 12280 12281 if (index.isUnsigned() || !index.isNegative()) { 12282 llvm::APInt size = ArrayTy->getSize(); 12283 if (!size.isStrictlyPositive()) 12284 return; 12285 12286 const Type *BaseType = BaseExpr->getType()->getPointeeOrArrayElementType(); 12287 if (BaseType != EffectiveType) { 12288 // Make sure we're comparing apples to apples when comparing index to size 12289 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 12290 uint64_t array_typesize = Context.getTypeSize(BaseType); 12291 // Handle ptrarith_typesize being zero, such as when casting to void* 12292 if (!ptrarith_typesize) ptrarith_typesize = 1; 12293 if (ptrarith_typesize != array_typesize) { 12294 // There's a cast to a different size type involved 12295 uint64_t ratio = array_typesize / ptrarith_typesize; 12296 // TODO: Be smarter about handling cases where array_typesize is not a 12297 // multiple of ptrarith_typesize 12298 if (ptrarith_typesize * ratio == array_typesize) 12299 size *= llvm::APInt(size.getBitWidth(), ratio); 12300 } 12301 } 12302 12303 if (size.getBitWidth() > index.getBitWidth()) 12304 index = index.zext(size.getBitWidth()); 12305 else if (size.getBitWidth() < index.getBitWidth()) 12306 size = size.zext(index.getBitWidth()); 12307 12308 // For array subscripting the index must be less than size, but for pointer 12309 // arithmetic also allow the index (offset) to be equal to size since 12310 // computing the next address after the end of the array is legal and 12311 // commonly done e.g. in C++ iterators and range-based for loops. 12312 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 12313 return; 12314 12315 // Also don't warn for arrays of size 1 which are members of some 12316 // structure. These are often used to approximate flexible arrays in C89 12317 // code. 12318 if (IsTailPaddedMemberArray(*this, size, ND)) 12319 return; 12320 12321 // Suppress the warning if the subscript expression (as identified by the 12322 // ']' location) and the index expression are both from macro expansions 12323 // within a system header. 12324 if (ASE) { 12325 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 12326 ASE->getRBracketLoc()); 12327 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 12328 SourceLocation IndexLoc = 12329 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 12330 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 12331 return; 12332 } 12333 } 12334 12335 unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds; 12336 if (ASE) 12337 DiagID = diag::warn_array_index_exceeds_bounds; 12338 12339 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 12340 PDiag(DiagID) << index.toString(10, true) 12341 << size.toString(10, true) 12342 << (unsigned)size.getLimitedValue(~0U) 12343 << IndexExpr->getSourceRange()); 12344 } else { 12345 unsigned DiagID = diag::warn_array_index_precedes_bounds; 12346 if (!ASE) { 12347 DiagID = diag::warn_ptr_arith_precedes_bounds; 12348 if (index.isNegative()) index = -index; 12349 } 12350 12351 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 12352 PDiag(DiagID) << index.toString(10, true) 12353 << IndexExpr->getSourceRange()); 12354 } 12355 12356 if (!ND) { 12357 // Try harder to find a NamedDecl to point at in the note. 12358 while (const ArraySubscriptExpr *ASE = 12359 dyn_cast<ArraySubscriptExpr>(BaseExpr)) 12360 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 12361 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 12362 ND = DRE->getDecl(); 12363 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 12364 ND = ME->getMemberDecl(); 12365 } 12366 12367 if (ND) 12368 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 12369 PDiag(diag::note_array_index_out_of_bounds) 12370 << ND->getDeclName()); 12371 } 12372 12373 void Sema::CheckArrayAccess(const Expr *expr) { 12374 int AllowOnePastEnd = 0; 12375 while (expr) { 12376 expr = expr->IgnoreParenImpCasts(); 12377 switch (expr->getStmtClass()) { 12378 case Stmt::ArraySubscriptExprClass: { 12379 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 12380 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 12381 AllowOnePastEnd > 0); 12382 expr = ASE->getBase(); 12383 break; 12384 } 12385 case Stmt::MemberExprClass: { 12386 expr = cast<MemberExpr>(expr)->getBase(); 12387 break; 12388 } 12389 case Stmt::OMPArraySectionExprClass: { 12390 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 12391 if (ASE->getLowerBound()) 12392 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 12393 /*ASE=*/nullptr, AllowOnePastEnd > 0); 12394 return; 12395 } 12396 case Stmt::UnaryOperatorClass: { 12397 // Only unwrap the * and & unary operators 12398 const UnaryOperator *UO = cast<UnaryOperator>(expr); 12399 expr = UO->getSubExpr(); 12400 switch (UO->getOpcode()) { 12401 case UO_AddrOf: 12402 AllowOnePastEnd++; 12403 break; 12404 case UO_Deref: 12405 AllowOnePastEnd--; 12406 break; 12407 default: 12408 return; 12409 } 12410 break; 12411 } 12412 case Stmt::ConditionalOperatorClass: { 12413 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 12414 if (const Expr *lhs = cond->getLHS()) 12415 CheckArrayAccess(lhs); 12416 if (const Expr *rhs = cond->getRHS()) 12417 CheckArrayAccess(rhs); 12418 return; 12419 } 12420 case Stmt::CXXOperatorCallExprClass: { 12421 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 12422 for (const auto *Arg : OCE->arguments()) 12423 CheckArrayAccess(Arg); 12424 return; 12425 } 12426 default: 12427 return; 12428 } 12429 } 12430 } 12431 12432 //===--- CHECK: Objective-C retain cycles ----------------------------------// 12433 12434 namespace { 12435 12436 struct RetainCycleOwner { 12437 VarDecl *Variable = nullptr; 12438 SourceRange Range; 12439 SourceLocation Loc; 12440 bool Indirect = false; 12441 12442 RetainCycleOwner() = default; 12443 12444 void setLocsFrom(Expr *e) { 12445 Loc = e->getExprLoc(); 12446 Range = e->getSourceRange(); 12447 } 12448 }; 12449 12450 } // namespace 12451 12452 /// Consider whether capturing the given variable can possibly lead to 12453 /// a retain cycle. 12454 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 12455 // In ARC, it's captured strongly iff the variable has __strong 12456 // lifetime. In MRR, it's captured strongly if the variable is 12457 // __block and has an appropriate type. 12458 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 12459 return false; 12460 12461 owner.Variable = var; 12462 if (ref) 12463 owner.setLocsFrom(ref); 12464 return true; 12465 } 12466 12467 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 12468 while (true) { 12469 e = e->IgnoreParens(); 12470 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 12471 switch (cast->getCastKind()) { 12472 case CK_BitCast: 12473 case CK_LValueBitCast: 12474 case CK_LValueToRValue: 12475 case CK_ARCReclaimReturnedObject: 12476 e = cast->getSubExpr(); 12477 continue; 12478 12479 default: 12480 return false; 12481 } 12482 } 12483 12484 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 12485 ObjCIvarDecl *ivar = ref->getDecl(); 12486 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 12487 return false; 12488 12489 // Try to find a retain cycle in the base. 12490 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 12491 return false; 12492 12493 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 12494 owner.Indirect = true; 12495 return true; 12496 } 12497 12498 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 12499 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 12500 if (!var) return false; 12501 return considerVariable(var, ref, owner); 12502 } 12503 12504 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 12505 if (member->isArrow()) return false; 12506 12507 // Don't count this as an indirect ownership. 12508 e = member->getBase(); 12509 continue; 12510 } 12511 12512 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 12513 // Only pay attention to pseudo-objects on property references. 12514 ObjCPropertyRefExpr *pre 12515 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 12516 ->IgnoreParens()); 12517 if (!pre) return false; 12518 if (pre->isImplicitProperty()) return false; 12519 ObjCPropertyDecl *property = pre->getExplicitProperty(); 12520 if (!property->isRetaining() && 12521 !(property->getPropertyIvarDecl() && 12522 property->getPropertyIvarDecl()->getType() 12523 .getObjCLifetime() == Qualifiers::OCL_Strong)) 12524 return false; 12525 12526 owner.Indirect = true; 12527 if (pre->isSuperReceiver()) { 12528 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 12529 if (!owner.Variable) 12530 return false; 12531 owner.Loc = pre->getLocation(); 12532 owner.Range = pre->getSourceRange(); 12533 return true; 12534 } 12535 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 12536 ->getSourceExpr()); 12537 continue; 12538 } 12539 12540 // Array ivars? 12541 12542 return false; 12543 } 12544 } 12545 12546 namespace { 12547 12548 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 12549 ASTContext &Context; 12550 VarDecl *Variable; 12551 Expr *Capturer = nullptr; 12552 bool VarWillBeReased = false; 12553 12554 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 12555 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 12556 Context(Context), Variable(variable) {} 12557 12558 void VisitDeclRefExpr(DeclRefExpr *ref) { 12559 if (ref->getDecl() == Variable && !Capturer) 12560 Capturer = ref; 12561 } 12562 12563 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 12564 if (Capturer) return; 12565 Visit(ref->getBase()); 12566 if (Capturer && ref->isFreeIvar()) 12567 Capturer = ref; 12568 } 12569 12570 void VisitBlockExpr(BlockExpr *block) { 12571 // Look inside nested blocks 12572 if (block->getBlockDecl()->capturesVariable(Variable)) 12573 Visit(block->getBlockDecl()->getBody()); 12574 } 12575 12576 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 12577 if (Capturer) return; 12578 if (OVE->getSourceExpr()) 12579 Visit(OVE->getSourceExpr()); 12580 } 12581 12582 void VisitBinaryOperator(BinaryOperator *BinOp) { 12583 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 12584 return; 12585 Expr *LHS = BinOp->getLHS(); 12586 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 12587 if (DRE->getDecl() != Variable) 12588 return; 12589 if (Expr *RHS = BinOp->getRHS()) { 12590 RHS = RHS->IgnoreParenCasts(); 12591 llvm::APSInt Value; 12592 VarWillBeReased = 12593 (RHS && RHS->isIntegerConstantExpr(Value, Context) && Value == 0); 12594 } 12595 } 12596 } 12597 }; 12598 12599 } // namespace 12600 12601 /// Check whether the given argument is a block which captures a 12602 /// variable. 12603 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 12604 assert(owner.Variable && owner.Loc.isValid()); 12605 12606 e = e->IgnoreParenCasts(); 12607 12608 // Look through [^{...} copy] and Block_copy(^{...}). 12609 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 12610 Selector Cmd = ME->getSelector(); 12611 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 12612 e = ME->getInstanceReceiver(); 12613 if (!e) 12614 return nullptr; 12615 e = e->IgnoreParenCasts(); 12616 } 12617 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 12618 if (CE->getNumArgs() == 1) { 12619 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 12620 if (Fn) { 12621 const IdentifierInfo *FnI = Fn->getIdentifier(); 12622 if (FnI && FnI->isStr("_Block_copy")) { 12623 e = CE->getArg(0)->IgnoreParenCasts(); 12624 } 12625 } 12626 } 12627 } 12628 12629 BlockExpr *block = dyn_cast<BlockExpr>(e); 12630 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 12631 return nullptr; 12632 12633 FindCaptureVisitor visitor(S.Context, owner.Variable); 12634 visitor.Visit(block->getBlockDecl()->getBody()); 12635 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 12636 } 12637 12638 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 12639 RetainCycleOwner &owner) { 12640 assert(capturer); 12641 assert(owner.Variable && owner.Loc.isValid()); 12642 12643 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 12644 << owner.Variable << capturer->getSourceRange(); 12645 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 12646 << owner.Indirect << owner.Range; 12647 } 12648 12649 /// Check for a keyword selector that starts with the word 'add' or 12650 /// 'set'. 12651 static bool isSetterLikeSelector(Selector sel) { 12652 if (sel.isUnarySelector()) return false; 12653 12654 StringRef str = sel.getNameForSlot(0); 12655 while (!str.empty() && str.front() == '_') str = str.substr(1); 12656 if (str.startswith("set")) 12657 str = str.substr(3); 12658 else if (str.startswith("add")) { 12659 // Specially whitelist 'addOperationWithBlock:'. 12660 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 12661 return false; 12662 str = str.substr(3); 12663 } 12664 else 12665 return false; 12666 12667 if (str.empty()) return true; 12668 return !isLowercase(str.front()); 12669 } 12670 12671 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 12672 ObjCMessageExpr *Message) { 12673 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 12674 Message->getReceiverInterface(), 12675 NSAPI::ClassId_NSMutableArray); 12676 if (!IsMutableArray) { 12677 return None; 12678 } 12679 12680 Selector Sel = Message->getSelector(); 12681 12682 Optional<NSAPI::NSArrayMethodKind> MKOpt = 12683 S.NSAPIObj->getNSArrayMethodKind(Sel); 12684 if (!MKOpt) { 12685 return None; 12686 } 12687 12688 NSAPI::NSArrayMethodKind MK = *MKOpt; 12689 12690 switch (MK) { 12691 case NSAPI::NSMutableArr_addObject: 12692 case NSAPI::NSMutableArr_insertObjectAtIndex: 12693 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 12694 return 0; 12695 case NSAPI::NSMutableArr_replaceObjectAtIndex: 12696 return 1; 12697 12698 default: 12699 return None; 12700 } 12701 12702 return None; 12703 } 12704 12705 static 12706 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 12707 ObjCMessageExpr *Message) { 12708 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 12709 Message->getReceiverInterface(), 12710 NSAPI::ClassId_NSMutableDictionary); 12711 if (!IsMutableDictionary) { 12712 return None; 12713 } 12714 12715 Selector Sel = Message->getSelector(); 12716 12717 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 12718 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 12719 if (!MKOpt) { 12720 return None; 12721 } 12722 12723 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 12724 12725 switch (MK) { 12726 case NSAPI::NSMutableDict_setObjectForKey: 12727 case NSAPI::NSMutableDict_setValueForKey: 12728 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 12729 return 0; 12730 12731 default: 12732 return None; 12733 } 12734 12735 return None; 12736 } 12737 12738 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 12739 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 12740 Message->getReceiverInterface(), 12741 NSAPI::ClassId_NSMutableSet); 12742 12743 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 12744 Message->getReceiverInterface(), 12745 NSAPI::ClassId_NSMutableOrderedSet); 12746 if (!IsMutableSet && !IsMutableOrderedSet) { 12747 return None; 12748 } 12749 12750 Selector Sel = Message->getSelector(); 12751 12752 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 12753 if (!MKOpt) { 12754 return None; 12755 } 12756 12757 NSAPI::NSSetMethodKind MK = *MKOpt; 12758 12759 switch (MK) { 12760 case NSAPI::NSMutableSet_addObject: 12761 case NSAPI::NSOrderedSet_setObjectAtIndex: 12762 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 12763 case NSAPI::NSOrderedSet_insertObjectAtIndex: 12764 return 0; 12765 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 12766 return 1; 12767 } 12768 12769 return None; 12770 } 12771 12772 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 12773 if (!Message->isInstanceMessage()) { 12774 return; 12775 } 12776 12777 Optional<int> ArgOpt; 12778 12779 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 12780 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 12781 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 12782 return; 12783 } 12784 12785 int ArgIndex = *ArgOpt; 12786 12787 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 12788 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 12789 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 12790 } 12791 12792 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 12793 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 12794 if (ArgRE->isObjCSelfExpr()) { 12795 Diag(Message->getSourceRange().getBegin(), 12796 diag::warn_objc_circular_container) 12797 << ArgRE->getDecl() << StringRef("'super'"); 12798 } 12799 } 12800 } else { 12801 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 12802 12803 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 12804 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 12805 } 12806 12807 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 12808 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 12809 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 12810 ValueDecl *Decl = ReceiverRE->getDecl(); 12811 Diag(Message->getSourceRange().getBegin(), 12812 diag::warn_objc_circular_container) 12813 << Decl << Decl; 12814 if (!ArgRE->isObjCSelfExpr()) { 12815 Diag(Decl->getLocation(), 12816 diag::note_objc_circular_container_declared_here) 12817 << Decl; 12818 } 12819 } 12820 } 12821 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 12822 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 12823 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 12824 ObjCIvarDecl *Decl = IvarRE->getDecl(); 12825 Diag(Message->getSourceRange().getBegin(), 12826 diag::warn_objc_circular_container) 12827 << Decl << Decl; 12828 Diag(Decl->getLocation(), 12829 diag::note_objc_circular_container_declared_here) 12830 << Decl; 12831 } 12832 } 12833 } 12834 } 12835 } 12836 12837 /// Check a message send to see if it's likely to cause a retain cycle. 12838 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 12839 // Only check instance methods whose selector looks like a setter. 12840 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 12841 return; 12842 12843 // Try to find a variable that the receiver is strongly owned by. 12844 RetainCycleOwner owner; 12845 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 12846 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 12847 return; 12848 } else { 12849 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 12850 owner.Variable = getCurMethodDecl()->getSelfDecl(); 12851 owner.Loc = msg->getSuperLoc(); 12852 owner.Range = msg->getSuperLoc(); 12853 } 12854 12855 // Check whether the receiver is captured by any of the arguments. 12856 const ObjCMethodDecl *MD = msg->getMethodDecl(); 12857 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 12858 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 12859 // noescape blocks should not be retained by the method. 12860 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 12861 continue; 12862 return diagnoseRetainCycle(*this, capturer, owner); 12863 } 12864 } 12865 } 12866 12867 /// Check a property assign to see if it's likely to cause a retain cycle. 12868 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 12869 RetainCycleOwner owner; 12870 if (!findRetainCycleOwner(*this, receiver, owner)) 12871 return; 12872 12873 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 12874 diagnoseRetainCycle(*this, capturer, owner); 12875 } 12876 12877 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 12878 RetainCycleOwner Owner; 12879 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 12880 return; 12881 12882 // Because we don't have an expression for the variable, we have to set the 12883 // location explicitly here. 12884 Owner.Loc = Var->getLocation(); 12885 Owner.Range = Var->getSourceRange(); 12886 12887 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 12888 diagnoseRetainCycle(*this, Capturer, Owner); 12889 } 12890 12891 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 12892 Expr *RHS, bool isProperty) { 12893 // Check if RHS is an Objective-C object literal, which also can get 12894 // immediately zapped in a weak reference. Note that we explicitly 12895 // allow ObjCStringLiterals, since those are designed to never really die. 12896 RHS = RHS->IgnoreParenImpCasts(); 12897 12898 // This enum needs to match with the 'select' in 12899 // warn_objc_arc_literal_assign (off-by-1). 12900 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 12901 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 12902 return false; 12903 12904 S.Diag(Loc, diag::warn_arc_literal_assign) 12905 << (unsigned) Kind 12906 << (isProperty ? 0 : 1) 12907 << RHS->getSourceRange(); 12908 12909 return true; 12910 } 12911 12912 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 12913 Qualifiers::ObjCLifetime LT, 12914 Expr *RHS, bool isProperty) { 12915 // Strip off any implicit cast added to get to the one ARC-specific. 12916 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 12917 if (cast->getCastKind() == CK_ARCConsumeObject) { 12918 S.Diag(Loc, diag::warn_arc_retained_assign) 12919 << (LT == Qualifiers::OCL_ExplicitNone) 12920 << (isProperty ? 0 : 1) 12921 << RHS->getSourceRange(); 12922 return true; 12923 } 12924 RHS = cast->getSubExpr(); 12925 } 12926 12927 if (LT == Qualifiers::OCL_Weak && 12928 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 12929 return true; 12930 12931 return false; 12932 } 12933 12934 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 12935 QualType LHS, Expr *RHS) { 12936 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 12937 12938 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 12939 return false; 12940 12941 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 12942 return true; 12943 12944 return false; 12945 } 12946 12947 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 12948 Expr *LHS, Expr *RHS) { 12949 QualType LHSType; 12950 // PropertyRef on LHS type need be directly obtained from 12951 // its declaration as it has a PseudoType. 12952 ObjCPropertyRefExpr *PRE 12953 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 12954 if (PRE && !PRE->isImplicitProperty()) { 12955 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 12956 if (PD) 12957 LHSType = PD->getType(); 12958 } 12959 12960 if (LHSType.isNull()) 12961 LHSType = LHS->getType(); 12962 12963 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 12964 12965 if (LT == Qualifiers::OCL_Weak) { 12966 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 12967 getCurFunction()->markSafeWeakUse(LHS); 12968 } 12969 12970 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 12971 return; 12972 12973 // FIXME. Check for other life times. 12974 if (LT != Qualifiers::OCL_None) 12975 return; 12976 12977 if (PRE) { 12978 if (PRE->isImplicitProperty()) 12979 return; 12980 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 12981 if (!PD) 12982 return; 12983 12984 unsigned Attributes = PD->getPropertyAttributes(); 12985 if (Attributes & ObjCPropertyDecl::OBJC_PR_assign) { 12986 // when 'assign' attribute was not explicitly specified 12987 // by user, ignore it and rely on property type itself 12988 // for lifetime info. 12989 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 12990 if (!(AsWrittenAttr & ObjCPropertyDecl::OBJC_PR_assign) && 12991 LHSType->isObjCRetainableType()) 12992 return; 12993 12994 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 12995 if (cast->getCastKind() == CK_ARCConsumeObject) { 12996 Diag(Loc, diag::warn_arc_retained_property_assign) 12997 << RHS->getSourceRange(); 12998 return; 12999 } 13000 RHS = cast->getSubExpr(); 13001 } 13002 } 13003 else if (Attributes & ObjCPropertyDecl::OBJC_PR_weak) { 13004 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 13005 return; 13006 } 13007 } 13008 } 13009 13010 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 13011 13012 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 13013 SourceLocation StmtLoc, 13014 const NullStmt *Body) { 13015 // Do not warn if the body is a macro that expands to nothing, e.g: 13016 // 13017 // #define CALL(x) 13018 // if (condition) 13019 // CALL(0); 13020 if (Body->hasLeadingEmptyMacro()) 13021 return false; 13022 13023 // Get line numbers of statement and body. 13024 bool StmtLineInvalid; 13025 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 13026 &StmtLineInvalid); 13027 if (StmtLineInvalid) 13028 return false; 13029 13030 bool BodyLineInvalid; 13031 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 13032 &BodyLineInvalid); 13033 if (BodyLineInvalid) 13034 return false; 13035 13036 // Warn if null statement and body are on the same line. 13037 if (StmtLine != BodyLine) 13038 return false; 13039 13040 return true; 13041 } 13042 13043 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 13044 const Stmt *Body, 13045 unsigned DiagID) { 13046 // Since this is a syntactic check, don't emit diagnostic for template 13047 // instantiations, this just adds noise. 13048 if (CurrentInstantiationScope) 13049 return; 13050 13051 // The body should be a null statement. 13052 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 13053 if (!NBody) 13054 return; 13055 13056 // Do the usual checks. 13057 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 13058 return; 13059 13060 Diag(NBody->getSemiLoc(), DiagID); 13061 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 13062 } 13063 13064 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 13065 const Stmt *PossibleBody) { 13066 assert(!CurrentInstantiationScope); // Ensured by caller 13067 13068 SourceLocation StmtLoc; 13069 const Stmt *Body; 13070 unsigned DiagID; 13071 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 13072 StmtLoc = FS->getRParenLoc(); 13073 Body = FS->getBody(); 13074 DiagID = diag::warn_empty_for_body; 13075 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 13076 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 13077 Body = WS->getBody(); 13078 DiagID = diag::warn_empty_while_body; 13079 } else 13080 return; // Neither `for' nor `while'. 13081 13082 // The body should be a null statement. 13083 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 13084 if (!NBody) 13085 return; 13086 13087 // Skip expensive checks if diagnostic is disabled. 13088 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 13089 return; 13090 13091 // Do the usual checks. 13092 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 13093 return; 13094 13095 // `for(...);' and `while(...);' are popular idioms, so in order to keep 13096 // noise level low, emit diagnostics only if for/while is followed by a 13097 // CompoundStmt, e.g.: 13098 // for (int i = 0; i < n; i++); 13099 // { 13100 // a(i); 13101 // } 13102 // or if for/while is followed by a statement with more indentation 13103 // than for/while itself: 13104 // for (int i = 0; i < n; i++); 13105 // a(i); 13106 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 13107 if (!ProbableTypo) { 13108 bool BodyColInvalid; 13109 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 13110 PossibleBody->getBeginLoc(), &BodyColInvalid); 13111 if (BodyColInvalid) 13112 return; 13113 13114 bool StmtColInvalid; 13115 unsigned StmtCol = 13116 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 13117 if (StmtColInvalid) 13118 return; 13119 13120 if (BodyCol > StmtCol) 13121 ProbableTypo = true; 13122 } 13123 13124 if (ProbableTypo) { 13125 Diag(NBody->getSemiLoc(), DiagID); 13126 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 13127 } 13128 } 13129 13130 //===--- CHECK: Warn on self move with std::move. -------------------------===// 13131 13132 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 13133 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 13134 SourceLocation OpLoc) { 13135 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 13136 return; 13137 13138 if (inTemplateInstantiation()) 13139 return; 13140 13141 // Strip parens and casts away. 13142 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 13143 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 13144 13145 // Check for a call expression 13146 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 13147 if (!CE || CE->getNumArgs() != 1) 13148 return; 13149 13150 // Check for a call to std::move 13151 if (!CE->isCallToStdMove()) 13152 return; 13153 13154 // Get argument from std::move 13155 RHSExpr = CE->getArg(0); 13156 13157 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 13158 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 13159 13160 // Two DeclRefExpr's, check that the decls are the same. 13161 if (LHSDeclRef && RHSDeclRef) { 13162 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 13163 return; 13164 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 13165 RHSDeclRef->getDecl()->getCanonicalDecl()) 13166 return; 13167 13168 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 13169 << LHSExpr->getSourceRange() 13170 << RHSExpr->getSourceRange(); 13171 return; 13172 } 13173 13174 // Member variables require a different approach to check for self moves. 13175 // MemberExpr's are the same if every nested MemberExpr refers to the same 13176 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 13177 // the base Expr's are CXXThisExpr's. 13178 const Expr *LHSBase = LHSExpr; 13179 const Expr *RHSBase = RHSExpr; 13180 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 13181 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 13182 if (!LHSME || !RHSME) 13183 return; 13184 13185 while (LHSME && RHSME) { 13186 if (LHSME->getMemberDecl()->getCanonicalDecl() != 13187 RHSME->getMemberDecl()->getCanonicalDecl()) 13188 return; 13189 13190 LHSBase = LHSME->getBase(); 13191 RHSBase = RHSME->getBase(); 13192 LHSME = dyn_cast<MemberExpr>(LHSBase); 13193 RHSME = dyn_cast<MemberExpr>(RHSBase); 13194 } 13195 13196 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 13197 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 13198 if (LHSDeclRef && RHSDeclRef) { 13199 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 13200 return; 13201 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 13202 RHSDeclRef->getDecl()->getCanonicalDecl()) 13203 return; 13204 13205 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 13206 << LHSExpr->getSourceRange() 13207 << RHSExpr->getSourceRange(); 13208 return; 13209 } 13210 13211 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 13212 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 13213 << LHSExpr->getSourceRange() 13214 << RHSExpr->getSourceRange(); 13215 } 13216 13217 //===--- Layout compatibility ----------------------------------------------// 13218 13219 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 13220 13221 /// Check if two enumeration types are layout-compatible. 13222 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 13223 // C++11 [dcl.enum] p8: 13224 // Two enumeration types are layout-compatible if they have the same 13225 // underlying type. 13226 return ED1->isComplete() && ED2->isComplete() && 13227 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 13228 } 13229 13230 /// Check if two fields are layout-compatible. 13231 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 13232 FieldDecl *Field2) { 13233 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 13234 return false; 13235 13236 if (Field1->isBitField() != Field2->isBitField()) 13237 return false; 13238 13239 if (Field1->isBitField()) { 13240 // Make sure that the bit-fields are the same length. 13241 unsigned Bits1 = Field1->getBitWidthValue(C); 13242 unsigned Bits2 = Field2->getBitWidthValue(C); 13243 13244 if (Bits1 != Bits2) 13245 return false; 13246 } 13247 13248 return true; 13249 } 13250 13251 /// Check if two standard-layout structs are layout-compatible. 13252 /// (C++11 [class.mem] p17) 13253 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 13254 RecordDecl *RD2) { 13255 // If both records are C++ classes, check that base classes match. 13256 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 13257 // If one of records is a CXXRecordDecl we are in C++ mode, 13258 // thus the other one is a CXXRecordDecl, too. 13259 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 13260 // Check number of base classes. 13261 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 13262 return false; 13263 13264 // Check the base classes. 13265 for (CXXRecordDecl::base_class_const_iterator 13266 Base1 = D1CXX->bases_begin(), 13267 BaseEnd1 = D1CXX->bases_end(), 13268 Base2 = D2CXX->bases_begin(); 13269 Base1 != BaseEnd1; 13270 ++Base1, ++Base2) { 13271 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 13272 return false; 13273 } 13274 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 13275 // If only RD2 is a C++ class, it should have zero base classes. 13276 if (D2CXX->getNumBases() > 0) 13277 return false; 13278 } 13279 13280 // Check the fields. 13281 RecordDecl::field_iterator Field2 = RD2->field_begin(), 13282 Field2End = RD2->field_end(), 13283 Field1 = RD1->field_begin(), 13284 Field1End = RD1->field_end(); 13285 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 13286 if (!isLayoutCompatible(C, *Field1, *Field2)) 13287 return false; 13288 } 13289 if (Field1 != Field1End || Field2 != Field2End) 13290 return false; 13291 13292 return true; 13293 } 13294 13295 /// Check if two standard-layout unions are layout-compatible. 13296 /// (C++11 [class.mem] p18) 13297 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 13298 RecordDecl *RD2) { 13299 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 13300 for (auto *Field2 : RD2->fields()) 13301 UnmatchedFields.insert(Field2); 13302 13303 for (auto *Field1 : RD1->fields()) { 13304 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 13305 I = UnmatchedFields.begin(), 13306 E = UnmatchedFields.end(); 13307 13308 for ( ; I != E; ++I) { 13309 if (isLayoutCompatible(C, Field1, *I)) { 13310 bool Result = UnmatchedFields.erase(*I); 13311 (void) Result; 13312 assert(Result); 13313 break; 13314 } 13315 } 13316 if (I == E) 13317 return false; 13318 } 13319 13320 return UnmatchedFields.empty(); 13321 } 13322 13323 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 13324 RecordDecl *RD2) { 13325 if (RD1->isUnion() != RD2->isUnion()) 13326 return false; 13327 13328 if (RD1->isUnion()) 13329 return isLayoutCompatibleUnion(C, RD1, RD2); 13330 else 13331 return isLayoutCompatibleStruct(C, RD1, RD2); 13332 } 13333 13334 /// Check if two types are layout-compatible in C++11 sense. 13335 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 13336 if (T1.isNull() || T2.isNull()) 13337 return false; 13338 13339 // C++11 [basic.types] p11: 13340 // If two types T1 and T2 are the same type, then T1 and T2 are 13341 // layout-compatible types. 13342 if (C.hasSameType(T1, T2)) 13343 return true; 13344 13345 T1 = T1.getCanonicalType().getUnqualifiedType(); 13346 T2 = T2.getCanonicalType().getUnqualifiedType(); 13347 13348 const Type::TypeClass TC1 = T1->getTypeClass(); 13349 const Type::TypeClass TC2 = T2->getTypeClass(); 13350 13351 if (TC1 != TC2) 13352 return false; 13353 13354 if (TC1 == Type::Enum) { 13355 return isLayoutCompatible(C, 13356 cast<EnumType>(T1)->getDecl(), 13357 cast<EnumType>(T2)->getDecl()); 13358 } else if (TC1 == Type::Record) { 13359 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 13360 return false; 13361 13362 return isLayoutCompatible(C, 13363 cast<RecordType>(T1)->getDecl(), 13364 cast<RecordType>(T2)->getDecl()); 13365 } 13366 13367 return false; 13368 } 13369 13370 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 13371 13372 /// Given a type tag expression find the type tag itself. 13373 /// 13374 /// \param TypeExpr Type tag expression, as it appears in user's code. 13375 /// 13376 /// \param VD Declaration of an identifier that appears in a type tag. 13377 /// 13378 /// \param MagicValue Type tag magic value. 13379 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 13380 const ValueDecl **VD, uint64_t *MagicValue) { 13381 while(true) { 13382 if (!TypeExpr) 13383 return false; 13384 13385 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 13386 13387 switch (TypeExpr->getStmtClass()) { 13388 case Stmt::UnaryOperatorClass: { 13389 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 13390 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 13391 TypeExpr = UO->getSubExpr(); 13392 continue; 13393 } 13394 return false; 13395 } 13396 13397 case Stmt::DeclRefExprClass: { 13398 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 13399 *VD = DRE->getDecl(); 13400 return true; 13401 } 13402 13403 case Stmt::IntegerLiteralClass: { 13404 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 13405 llvm::APInt MagicValueAPInt = IL->getValue(); 13406 if (MagicValueAPInt.getActiveBits() <= 64) { 13407 *MagicValue = MagicValueAPInt.getZExtValue(); 13408 return true; 13409 } else 13410 return false; 13411 } 13412 13413 case Stmt::BinaryConditionalOperatorClass: 13414 case Stmt::ConditionalOperatorClass: { 13415 const AbstractConditionalOperator *ACO = 13416 cast<AbstractConditionalOperator>(TypeExpr); 13417 bool Result; 13418 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx)) { 13419 if (Result) 13420 TypeExpr = ACO->getTrueExpr(); 13421 else 13422 TypeExpr = ACO->getFalseExpr(); 13423 continue; 13424 } 13425 return false; 13426 } 13427 13428 case Stmt::BinaryOperatorClass: { 13429 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 13430 if (BO->getOpcode() == BO_Comma) { 13431 TypeExpr = BO->getRHS(); 13432 continue; 13433 } 13434 return false; 13435 } 13436 13437 default: 13438 return false; 13439 } 13440 } 13441 } 13442 13443 /// Retrieve the C type corresponding to type tag TypeExpr. 13444 /// 13445 /// \param TypeExpr Expression that specifies a type tag. 13446 /// 13447 /// \param MagicValues Registered magic values. 13448 /// 13449 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 13450 /// kind. 13451 /// 13452 /// \param TypeInfo Information about the corresponding C type. 13453 /// 13454 /// \returns true if the corresponding C type was found. 13455 static bool GetMatchingCType( 13456 const IdentifierInfo *ArgumentKind, 13457 const Expr *TypeExpr, const ASTContext &Ctx, 13458 const llvm::DenseMap<Sema::TypeTagMagicValue, 13459 Sema::TypeTagData> *MagicValues, 13460 bool &FoundWrongKind, 13461 Sema::TypeTagData &TypeInfo) { 13462 FoundWrongKind = false; 13463 13464 // Variable declaration that has type_tag_for_datatype attribute. 13465 const ValueDecl *VD = nullptr; 13466 13467 uint64_t MagicValue; 13468 13469 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue)) 13470 return false; 13471 13472 if (VD) { 13473 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 13474 if (I->getArgumentKind() != ArgumentKind) { 13475 FoundWrongKind = true; 13476 return false; 13477 } 13478 TypeInfo.Type = I->getMatchingCType(); 13479 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 13480 TypeInfo.MustBeNull = I->getMustBeNull(); 13481 return true; 13482 } 13483 return false; 13484 } 13485 13486 if (!MagicValues) 13487 return false; 13488 13489 llvm::DenseMap<Sema::TypeTagMagicValue, 13490 Sema::TypeTagData>::const_iterator I = 13491 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 13492 if (I == MagicValues->end()) 13493 return false; 13494 13495 TypeInfo = I->second; 13496 return true; 13497 } 13498 13499 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 13500 uint64_t MagicValue, QualType Type, 13501 bool LayoutCompatible, 13502 bool MustBeNull) { 13503 if (!TypeTagForDatatypeMagicValues) 13504 TypeTagForDatatypeMagicValues.reset( 13505 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 13506 13507 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 13508 (*TypeTagForDatatypeMagicValues)[Magic] = 13509 TypeTagData(Type, LayoutCompatible, MustBeNull); 13510 } 13511 13512 static bool IsSameCharType(QualType T1, QualType T2) { 13513 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 13514 if (!BT1) 13515 return false; 13516 13517 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 13518 if (!BT2) 13519 return false; 13520 13521 BuiltinType::Kind T1Kind = BT1->getKind(); 13522 BuiltinType::Kind T2Kind = BT2->getKind(); 13523 13524 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 13525 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 13526 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 13527 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 13528 } 13529 13530 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 13531 const ArrayRef<const Expr *> ExprArgs, 13532 SourceLocation CallSiteLoc) { 13533 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 13534 bool IsPointerAttr = Attr->getIsPointer(); 13535 13536 // Retrieve the argument representing the 'type_tag'. 13537 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 13538 if (TypeTagIdxAST >= ExprArgs.size()) { 13539 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 13540 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 13541 return; 13542 } 13543 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 13544 bool FoundWrongKind; 13545 TypeTagData TypeInfo; 13546 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 13547 TypeTagForDatatypeMagicValues.get(), 13548 FoundWrongKind, TypeInfo)) { 13549 if (FoundWrongKind) 13550 Diag(TypeTagExpr->getExprLoc(), 13551 diag::warn_type_tag_for_datatype_wrong_kind) 13552 << TypeTagExpr->getSourceRange(); 13553 return; 13554 } 13555 13556 // Retrieve the argument representing the 'arg_idx'. 13557 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 13558 if (ArgumentIdxAST >= ExprArgs.size()) { 13559 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 13560 << 1 << Attr->getArgumentIdx().getSourceIndex(); 13561 return; 13562 } 13563 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 13564 if (IsPointerAttr) { 13565 // Skip implicit cast of pointer to `void *' (as a function argument). 13566 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 13567 if (ICE->getType()->isVoidPointerType() && 13568 ICE->getCastKind() == CK_BitCast) 13569 ArgumentExpr = ICE->getSubExpr(); 13570 } 13571 QualType ArgumentType = ArgumentExpr->getType(); 13572 13573 // Passing a `void*' pointer shouldn't trigger a warning. 13574 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 13575 return; 13576 13577 if (TypeInfo.MustBeNull) { 13578 // Type tag with matching void type requires a null pointer. 13579 if (!ArgumentExpr->isNullPointerConstant(Context, 13580 Expr::NPC_ValueDependentIsNotNull)) { 13581 Diag(ArgumentExpr->getExprLoc(), 13582 diag::warn_type_safety_null_pointer_required) 13583 << ArgumentKind->getName() 13584 << ArgumentExpr->getSourceRange() 13585 << TypeTagExpr->getSourceRange(); 13586 } 13587 return; 13588 } 13589 13590 QualType RequiredType = TypeInfo.Type; 13591 if (IsPointerAttr) 13592 RequiredType = Context.getPointerType(RequiredType); 13593 13594 bool mismatch = false; 13595 if (!TypeInfo.LayoutCompatible) { 13596 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 13597 13598 // C++11 [basic.fundamental] p1: 13599 // Plain char, signed char, and unsigned char are three distinct types. 13600 // 13601 // But we treat plain `char' as equivalent to `signed char' or `unsigned 13602 // char' depending on the current char signedness mode. 13603 if (mismatch) 13604 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 13605 RequiredType->getPointeeType())) || 13606 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 13607 mismatch = false; 13608 } else 13609 if (IsPointerAttr) 13610 mismatch = !isLayoutCompatible(Context, 13611 ArgumentType->getPointeeType(), 13612 RequiredType->getPointeeType()); 13613 else 13614 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 13615 13616 if (mismatch) 13617 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 13618 << ArgumentType << ArgumentKind 13619 << TypeInfo.LayoutCompatible << RequiredType 13620 << ArgumentExpr->getSourceRange() 13621 << TypeTagExpr->getSourceRange(); 13622 } 13623 13624 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 13625 CharUnits Alignment) { 13626 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 13627 } 13628 13629 void Sema::DiagnoseMisalignedMembers() { 13630 for (MisalignedMember &m : MisalignedMembers) { 13631 const NamedDecl *ND = m.RD; 13632 if (ND->getName().empty()) { 13633 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 13634 ND = TD; 13635 } 13636 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 13637 << m.MD << ND << m.E->getSourceRange(); 13638 } 13639 MisalignedMembers.clear(); 13640 } 13641 13642 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 13643 E = E->IgnoreParens(); 13644 if (!T->isPointerType() && !T->isIntegerType()) 13645 return; 13646 if (isa<UnaryOperator>(E) && 13647 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 13648 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 13649 if (isa<MemberExpr>(Op)) { 13650 auto MA = std::find(MisalignedMembers.begin(), MisalignedMembers.end(), 13651 MisalignedMember(Op)); 13652 if (MA != MisalignedMembers.end() && 13653 (T->isIntegerType() || 13654 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 13655 Context.getTypeAlignInChars( 13656 T->getPointeeType()) <= MA->Alignment)))) 13657 MisalignedMembers.erase(MA); 13658 } 13659 } 13660 } 13661 13662 void Sema::RefersToMemberWithReducedAlignment( 13663 Expr *E, 13664 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 13665 Action) { 13666 const auto *ME = dyn_cast<MemberExpr>(E); 13667 if (!ME) 13668 return; 13669 13670 // No need to check expressions with an __unaligned-qualified type. 13671 if (E->getType().getQualifiers().hasUnaligned()) 13672 return; 13673 13674 // For a chain of MemberExpr like "a.b.c.d" this list 13675 // will keep FieldDecl's like [d, c, b]. 13676 SmallVector<FieldDecl *, 4> ReverseMemberChain; 13677 const MemberExpr *TopME = nullptr; 13678 bool AnyIsPacked = false; 13679 do { 13680 QualType BaseType = ME->getBase()->getType(); 13681 if (ME->isArrow()) 13682 BaseType = BaseType->getPointeeType(); 13683 RecordDecl *RD = BaseType->getAs<RecordType>()->getDecl(); 13684 if (RD->isInvalidDecl()) 13685 return; 13686 13687 ValueDecl *MD = ME->getMemberDecl(); 13688 auto *FD = dyn_cast<FieldDecl>(MD); 13689 // We do not care about non-data members. 13690 if (!FD || FD->isInvalidDecl()) 13691 return; 13692 13693 AnyIsPacked = 13694 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 13695 ReverseMemberChain.push_back(FD); 13696 13697 TopME = ME; 13698 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 13699 } while (ME); 13700 assert(TopME && "We did not compute a topmost MemberExpr!"); 13701 13702 // Not the scope of this diagnostic. 13703 if (!AnyIsPacked) 13704 return; 13705 13706 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 13707 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 13708 // TODO: The innermost base of the member expression may be too complicated. 13709 // For now, just disregard these cases. This is left for future 13710 // improvement. 13711 if (!DRE && !isa<CXXThisExpr>(TopBase)) 13712 return; 13713 13714 // Alignment expected by the whole expression. 13715 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 13716 13717 // No need to do anything else with this case. 13718 if (ExpectedAlignment.isOne()) 13719 return; 13720 13721 // Synthesize offset of the whole access. 13722 CharUnits Offset; 13723 for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend(); 13724 I++) { 13725 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I)); 13726 } 13727 13728 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 13729 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 13730 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 13731 13732 // The base expression of the innermost MemberExpr may give 13733 // stronger guarantees than the class containing the member. 13734 if (DRE && !TopME->isArrow()) { 13735 const ValueDecl *VD = DRE->getDecl(); 13736 if (!VD->getType()->isReferenceType()) 13737 CompleteObjectAlignment = 13738 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 13739 } 13740 13741 // Check if the synthesized offset fulfills the alignment. 13742 if (Offset % ExpectedAlignment != 0 || 13743 // It may fulfill the offset it but the effective alignment may still be 13744 // lower than the expected expression alignment. 13745 CompleteObjectAlignment < ExpectedAlignment) { 13746 // If this happens, we want to determine a sensible culprit of this. 13747 // Intuitively, watching the chain of member expressions from right to 13748 // left, we start with the required alignment (as required by the field 13749 // type) but some packed attribute in that chain has reduced the alignment. 13750 // It may happen that another packed structure increases it again. But if 13751 // we are here such increase has not been enough. So pointing the first 13752 // FieldDecl that either is packed or else its RecordDecl is, 13753 // seems reasonable. 13754 FieldDecl *FD = nullptr; 13755 CharUnits Alignment; 13756 for (FieldDecl *FDI : ReverseMemberChain) { 13757 if (FDI->hasAttr<PackedAttr>() || 13758 FDI->getParent()->hasAttr<PackedAttr>()) { 13759 FD = FDI; 13760 Alignment = std::min( 13761 Context.getTypeAlignInChars(FD->getType()), 13762 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 13763 break; 13764 } 13765 } 13766 assert(FD && "We did not find a packed FieldDecl!"); 13767 Action(E, FD->getParent(), FD, Alignment); 13768 } 13769 } 13770 13771 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 13772 using namespace std::placeholders; 13773 13774 RefersToMemberWithReducedAlignment( 13775 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 13776 _2, _3, _4)); 13777 } 13778