1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSet.h" 79 #include "llvm/ADT/StringSwitch.h" 80 #include "llvm/ADT/Triple.h" 81 #include "llvm/Support/AtomicOrdering.h" 82 #include "llvm/Support/Casting.h" 83 #include "llvm/Support/Compiler.h" 84 #include "llvm/Support/ConvertUTF.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/Format.h" 87 #include "llvm/Support/Locale.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/SaveAndRestore.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include <algorithm> 92 #include <bitset> 93 #include <cassert> 94 #include <cctype> 95 #include <cstddef> 96 #include <cstdint> 97 #include <functional> 98 #include <limits> 99 #include <string> 100 #include <tuple> 101 #include <utility> 102 103 using namespace clang; 104 using namespace sema; 105 106 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 107 unsigned ByteNo) const { 108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 109 Context.getTargetInfo()); 110 } 111 112 /// Checks that a call expression's argument count is at least the desired 113 /// number. This is useful when doing custom type-checking on a variadic 114 /// function. Returns true on error. 115 static bool checkArgCountAtLeast(Sema &S, CallExpr *Call, 116 unsigned MinArgCount) { 117 unsigned ArgCount = Call->getNumArgs(); 118 if (ArgCount >= MinArgCount) 119 return false; 120 121 return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args) 122 << 0 /*function call*/ << MinArgCount << ArgCount 123 << Call->getSourceRange(); 124 } 125 126 /// Checks that a call expression's argument count is the desired number. 127 /// This is useful when doing custom type-checking. Returns true on error. 128 static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) { 129 unsigned ArgCount = Call->getNumArgs(); 130 if (ArgCount == DesiredArgCount) 131 return false; 132 133 if (checkArgCountAtLeast(S, Call, DesiredArgCount)) 134 return true; 135 assert(ArgCount > DesiredArgCount && "should have diagnosed this"); 136 137 // Highlight all the excess arguments. 138 SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(), 139 Call->getArg(ArgCount - 1)->getEndLoc()); 140 141 return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args) 142 << 0 /*function call*/ << DesiredArgCount << ArgCount 143 << Call->getArg(1)->getSourceRange(); 144 } 145 146 /// Check that the first argument to __builtin_annotation is an integer 147 /// and the second argument is a non-wide string literal. 148 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 149 if (checkArgCount(S, TheCall, 2)) 150 return true; 151 152 // First argument should be an integer. 153 Expr *ValArg = TheCall->getArg(0); 154 QualType Ty = ValArg->getType(); 155 if (!Ty->isIntegerType()) { 156 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 157 << ValArg->getSourceRange(); 158 return true; 159 } 160 161 // Second argument should be a constant string. 162 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 163 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 164 if (!Literal || !Literal->isAscii()) { 165 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 166 << StrArg->getSourceRange(); 167 return true; 168 } 169 170 TheCall->setType(Ty); 171 return false; 172 } 173 174 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 175 // We need at least one argument. 176 if (TheCall->getNumArgs() < 1) { 177 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 178 << 0 << 1 << TheCall->getNumArgs() 179 << TheCall->getCallee()->getSourceRange(); 180 return true; 181 } 182 183 // All arguments should be wide string literals. 184 for (Expr *Arg : TheCall->arguments()) { 185 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 186 if (!Literal || !Literal->isWide()) { 187 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 188 << Arg->getSourceRange(); 189 return true; 190 } 191 } 192 193 return false; 194 } 195 196 /// Check that the argument to __builtin_addressof is a glvalue, and set the 197 /// result type to the corresponding pointer type. 198 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 199 if (checkArgCount(S, TheCall, 1)) 200 return true; 201 202 ExprResult Arg(TheCall->getArg(0)); 203 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 204 if (ResultType.isNull()) 205 return true; 206 207 TheCall->setArg(0, Arg.get()); 208 TheCall->setType(ResultType); 209 return false; 210 } 211 212 /// Check that the argument to __builtin_function_start is a function. 213 static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) { 214 if (checkArgCount(S, TheCall, 1)) 215 return true; 216 217 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 218 if (Arg.isInvalid()) 219 return true; 220 221 TheCall->setArg(0, Arg.get()); 222 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>( 223 Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext())); 224 225 if (!FD) { 226 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type) 227 << TheCall->getSourceRange(); 228 return true; 229 } 230 231 return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true, 232 TheCall->getBeginLoc()); 233 } 234 235 /// Check the number of arguments and set the result type to 236 /// the argument type. 237 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 238 if (checkArgCount(S, TheCall, 1)) 239 return true; 240 241 TheCall->setType(TheCall->getArg(0)->getType()); 242 return false; 243 } 244 245 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 246 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 247 /// type (but not a function pointer) and that the alignment is a power-of-two. 248 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 249 if (checkArgCount(S, TheCall, 2)) 250 return true; 251 252 clang::Expr *Source = TheCall->getArg(0); 253 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 254 255 auto IsValidIntegerType = [](QualType Ty) { 256 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 257 }; 258 QualType SrcTy = Source->getType(); 259 // We should also be able to use it with arrays (but not functions!). 260 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 261 SrcTy = S.Context.getDecayedType(SrcTy); 262 } 263 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 264 SrcTy->isFunctionPointerType()) { 265 // FIXME: this is not quite the right error message since we don't allow 266 // floating point types, or member pointers. 267 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 268 << SrcTy; 269 return true; 270 } 271 272 clang::Expr *AlignOp = TheCall->getArg(1); 273 if (!IsValidIntegerType(AlignOp->getType())) { 274 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 275 << AlignOp->getType(); 276 return true; 277 } 278 Expr::EvalResult AlignResult; 279 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 280 // We can't check validity of alignment if it is value dependent. 281 if (!AlignOp->isValueDependent() && 282 AlignOp->EvaluateAsInt(AlignResult, S.Context, 283 Expr::SE_AllowSideEffects)) { 284 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 285 llvm::APSInt MaxValue( 286 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 287 if (AlignValue < 1) { 288 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 289 return true; 290 } 291 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 292 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 293 << toString(MaxValue, 10); 294 return true; 295 } 296 if (!AlignValue.isPowerOf2()) { 297 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 298 return true; 299 } 300 if (AlignValue == 1) { 301 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 302 << IsBooleanAlignBuiltin; 303 } 304 } 305 306 ExprResult SrcArg = S.PerformCopyInitialization( 307 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 308 SourceLocation(), Source); 309 if (SrcArg.isInvalid()) 310 return true; 311 TheCall->setArg(0, SrcArg.get()); 312 ExprResult AlignArg = 313 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 314 S.Context, AlignOp->getType(), false), 315 SourceLocation(), AlignOp); 316 if (AlignArg.isInvalid()) 317 return true; 318 TheCall->setArg(1, AlignArg.get()); 319 // For align_up/align_down, the return type is the same as the (potentially 320 // decayed) argument type including qualifiers. For is_aligned(), the result 321 // is always bool. 322 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 323 return false; 324 } 325 326 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 327 unsigned BuiltinID) { 328 if (checkArgCount(S, TheCall, 3)) 329 return true; 330 331 // First two arguments should be integers. 332 for (unsigned I = 0; I < 2; ++I) { 333 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 334 if (Arg.isInvalid()) return true; 335 TheCall->setArg(I, Arg.get()); 336 337 QualType Ty = Arg.get()->getType(); 338 if (!Ty->isIntegerType()) { 339 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 340 << Ty << Arg.get()->getSourceRange(); 341 return true; 342 } 343 } 344 345 // Third argument should be a pointer to a non-const integer. 346 // IRGen correctly handles volatile, restrict, and address spaces, and 347 // the other qualifiers aren't possible. 348 { 349 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 350 if (Arg.isInvalid()) return true; 351 TheCall->setArg(2, Arg.get()); 352 353 QualType Ty = Arg.get()->getType(); 354 const auto *PtrTy = Ty->getAs<PointerType>(); 355 if (!PtrTy || 356 !PtrTy->getPointeeType()->isIntegerType() || 357 PtrTy->getPointeeType().isConstQualified()) { 358 S.Diag(Arg.get()->getBeginLoc(), 359 diag::err_overflow_builtin_must_be_ptr_int) 360 << Ty << Arg.get()->getSourceRange(); 361 return true; 362 } 363 } 364 365 // Disallow signed bit-precise integer args larger than 128 bits to mul 366 // function until we improve backend support. 367 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 368 for (unsigned I = 0; I < 3; ++I) { 369 const auto Arg = TheCall->getArg(I); 370 // Third argument will be a pointer. 371 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 372 if (Ty->isBitIntType() && Ty->isSignedIntegerType() && 373 S.getASTContext().getIntWidth(Ty) > 128) 374 return S.Diag(Arg->getBeginLoc(), 375 diag::err_overflow_builtin_bit_int_max_size) 376 << 128; 377 } 378 } 379 380 return false; 381 } 382 383 namespace { 384 struct BuiltinDumpStructGenerator { 385 Sema &S; 386 CallExpr *TheCall; 387 SourceLocation Loc = TheCall->getBeginLoc(); 388 SmallVector<Expr *, 32> Actions; 389 DiagnosticErrorTrap ErrorTracker; 390 PrintingPolicy Policy; 391 392 BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall) 393 : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()), 394 Policy(S.Context.getPrintingPolicy()) { 395 Policy.AnonymousTagLocations = false; 396 } 397 398 Expr *makeOpaqueValueExpr(Expr *Inner) { 399 auto *OVE = new (S.Context) 400 OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(), 401 Inner->getObjectKind(), Inner); 402 Actions.push_back(OVE); 403 return OVE; 404 } 405 406 Expr *getStringLiteral(llvm::StringRef Str) { 407 Expr *Lit = S.Context.getPredefinedStringLiteralFromCache(Str); 408 // Wrap the literal in parentheses to attach a source location. 409 return new (S.Context) ParenExpr(Loc, Loc, Lit); 410 } 411 412 bool callPrintFunction(llvm::StringRef Format, 413 llvm::ArrayRef<Expr *> Exprs = {}) { 414 SmallVector<Expr *, 8> Args; 415 assert(TheCall->getNumArgs() >= 2); 416 Args.reserve((TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size()); 417 Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end()); 418 Args.push_back(getStringLiteral(Format)); 419 Args.insert(Args.end(), Exprs.begin(), Exprs.end()); 420 421 // Register a note to explain why we're performing the call. 422 Sema::CodeSynthesisContext Ctx; 423 Ctx.Kind = Sema::CodeSynthesisContext::BuildingBuiltinDumpStructCall; 424 Ctx.PointOfInstantiation = Loc; 425 Ctx.CallArgs = Args.data(); 426 Ctx.NumCallArgs = Args.size(); 427 S.pushCodeSynthesisContext(Ctx); 428 429 ExprResult RealCall = 430 S.BuildCallExpr(/*Scope=*/nullptr, TheCall->getArg(1), 431 TheCall->getBeginLoc(), Args, TheCall->getRParenLoc()); 432 433 S.popCodeSynthesisContext(); 434 if (!RealCall.isInvalid()) 435 Actions.push_back(RealCall.get()); 436 // Bail out if we've hit any errors, even if we managed to build the 437 // call. We don't want to produce more than one error. 438 return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred(); 439 } 440 441 Expr *getIndentString(unsigned Depth) { 442 if (!Depth) 443 return nullptr; 444 445 llvm::SmallString<32> Indent; 446 Indent.resize(Depth * Policy.Indentation, ' '); 447 return getStringLiteral(Indent); 448 } 449 450 Expr *getTypeString(QualType T) { 451 return getStringLiteral(T.getAsString(Policy)); 452 } 453 454 bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) { 455 llvm::raw_svector_ostream OS(Str); 456 457 // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather 458 // than trying to print a single character. 459 if (auto *BT = T->getAs<BuiltinType>()) { 460 switch (BT->getKind()) { 461 case BuiltinType::Bool: 462 OS << "%d"; 463 return true; 464 case BuiltinType::Char_U: 465 case BuiltinType::UChar: 466 OS << "%hhu"; 467 return true; 468 case BuiltinType::Char_S: 469 case BuiltinType::SChar: 470 OS << "%hhd"; 471 return true; 472 default: 473 break; 474 } 475 } 476 477 analyze_printf::PrintfSpecifier Specifier; 478 if (Specifier.fixType(T, S.getLangOpts(), S.Context, /*IsObjCLiteral=*/false)) { 479 // We were able to guess how to format this. 480 if (Specifier.getConversionSpecifier().getKind() == 481 analyze_printf::PrintfConversionSpecifier::sArg) { 482 // Wrap double-quotes around a '%s' specifier and limit its maximum 483 // length. Ideally we'd also somehow escape special characters in the 484 // contents but printf doesn't support that. 485 // FIXME: '%s' formatting is not safe in general. 486 OS << '"'; 487 Specifier.setPrecision(analyze_printf::OptionalAmount(32u)); 488 Specifier.toString(OS); 489 OS << '"'; 490 // FIXME: It would be nice to include a '...' if the string doesn't fit 491 // in the length limit. 492 } else { 493 Specifier.toString(OS); 494 } 495 return true; 496 } 497 498 if (T->isPointerType()) { 499 // Format all pointers with '%p'. 500 OS << "%p"; 501 return true; 502 } 503 504 return false; 505 } 506 507 bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) { 508 Expr *IndentLit = getIndentString(Depth); 509 Expr *TypeLit = getTypeString(S.Context.getRecordType(RD)); 510 if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit}) 511 : callPrintFunction("%s", {TypeLit})) 512 return true; 513 514 return dumpRecordValue(RD, E, IndentLit, Depth); 515 } 516 517 // Dump a record value. E should be a pointer or lvalue referring to an RD. 518 bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent, 519 unsigned Depth) { 520 // FIXME: Decide what to do if RD is a union. At least we should probably 521 // turn off printing `const char*` members with `%s`, because that is very 522 // likely to crash if that's not the active member. Whatever we decide, we 523 // should document it. 524 525 // Build an OpaqueValueExpr so we can refer to E more than once without 526 // triggering re-evaluation. 527 Expr *RecordArg = makeOpaqueValueExpr(E); 528 bool RecordArgIsPtr = RecordArg->getType()->isPointerType(); 529 530 if (callPrintFunction(" {\n")) 531 return true; 532 533 // Dump each base class, regardless of whether they're aggregates. 534 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 535 for (const auto &Base : CXXRD->bases()) { 536 QualType BaseType = 537 RecordArgIsPtr ? S.Context.getPointerType(Base.getType()) 538 : S.Context.getLValueReferenceType(Base.getType()); 539 ExprResult BasePtr = S.BuildCStyleCastExpr( 540 Loc, S.Context.getTrivialTypeSourceInfo(BaseType, Loc), Loc, 541 RecordArg); 542 if (BasePtr.isInvalid() || 543 dumpUnnamedRecord(Base.getType()->getAsRecordDecl(), BasePtr.get(), 544 Depth + 1)) 545 return true; 546 } 547 } 548 549 Expr *FieldIndentArg = getIndentString(Depth + 1); 550 551 // Dump each field. 552 for (auto *D : RD->decls()) { 553 auto *IFD = dyn_cast<IndirectFieldDecl>(D); 554 auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D); 555 if (!FD || FD->isUnnamedBitfield() || FD->isAnonymousStructOrUnion()) 556 continue; 557 558 llvm::SmallString<20> Format = llvm::StringRef("%s%s %s "); 559 llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg, 560 getTypeString(FD->getType()), 561 getStringLiteral(FD->getName())}; 562 563 if (FD->isBitField()) { 564 Format += ": %zu "; 565 QualType SizeT = S.Context.getSizeType(); 566 llvm::APInt BitWidth(S.Context.getIntWidth(SizeT), 567 FD->getBitWidthValue(S.Context)); 568 Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc)); 569 } 570 571 Format += "="; 572 573 ExprResult Field = 574 IFD ? S.BuildAnonymousStructUnionMemberReference( 575 CXXScopeSpec(), Loc, IFD, 576 DeclAccessPair::make(IFD, AS_public), RecordArg, Loc) 577 : S.BuildFieldReferenceExpr( 578 RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD, 579 DeclAccessPair::make(FD, AS_public), 580 DeclarationNameInfo(FD->getDeclName(), Loc)); 581 if (Field.isInvalid()) 582 return true; 583 584 auto *InnerRD = FD->getType()->getAsRecordDecl(); 585 auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD); 586 if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) { 587 // Recursively print the values of members of aggregate record type. 588 if (callPrintFunction(Format, Args) || 589 dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1)) 590 return true; 591 } else { 592 Format += " "; 593 if (appendFormatSpecifier(FD->getType(), Format)) { 594 // We know how to print this field. 595 Args.push_back(Field.get()); 596 } else { 597 // We don't know how to print this field. Print out its address 598 // with a format specifier that a smart tool will be able to 599 // recognize and treat specially. 600 Format += "*%p"; 601 ExprResult FieldAddr = 602 S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get()); 603 if (FieldAddr.isInvalid()) 604 return true; 605 Args.push_back(FieldAddr.get()); 606 } 607 Format += "\n"; 608 if (callPrintFunction(Format, Args)) 609 return true; 610 } 611 } 612 613 return RecordIndent ? callPrintFunction("%s}\n", RecordIndent) 614 : callPrintFunction("}\n"); 615 } 616 617 Expr *buildWrapper() { 618 auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions, 619 PseudoObjectExpr::NoResult); 620 TheCall->setType(Wrapper->getType()); 621 TheCall->setValueKind(Wrapper->getValueKind()); 622 return Wrapper; 623 } 624 }; 625 } // namespace 626 627 static ExprResult SemaBuiltinDumpStruct(Sema &S, CallExpr *TheCall) { 628 if (checkArgCountAtLeast(S, TheCall, 2)) 629 return ExprError(); 630 631 ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0)); 632 if (PtrArgResult.isInvalid()) 633 return ExprError(); 634 TheCall->setArg(0, PtrArgResult.get()); 635 636 // First argument should be a pointer to a struct. 637 QualType PtrArgType = PtrArgResult.get()->getType(); 638 if (!PtrArgType->isPointerType() || 639 !PtrArgType->getPointeeType()->isRecordType()) { 640 S.Diag(PtrArgResult.get()->getBeginLoc(), 641 diag::err_expected_struct_pointer_argument) 642 << 1 << TheCall->getDirectCallee() << PtrArgType; 643 return ExprError(); 644 } 645 const RecordDecl *RD = PtrArgType->getPointeeType()->getAsRecordDecl(); 646 647 // Second argument is a callable, but we can't fully validate it until we try 648 // calling it. 649 QualType FnArgType = TheCall->getArg(1)->getType(); 650 if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() && 651 !FnArgType->isBlockPointerType() && 652 !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) { 653 auto *BT = FnArgType->getAs<BuiltinType>(); 654 switch (BT ? BT->getKind() : BuiltinType::Void) { 655 case BuiltinType::Dependent: 656 case BuiltinType::Overload: 657 case BuiltinType::BoundMember: 658 case BuiltinType::PseudoObject: 659 case BuiltinType::UnknownAny: 660 case BuiltinType::BuiltinFn: 661 // This might be a callable. 662 break; 663 664 default: 665 S.Diag(TheCall->getArg(1)->getBeginLoc(), 666 diag::err_expected_callable_argument) 667 << 2 << TheCall->getDirectCallee() << FnArgType; 668 return ExprError(); 669 } 670 } 671 672 BuiltinDumpStructGenerator Generator(S, TheCall); 673 674 // Wrap parentheses around the given pointer. This is not necessary for 675 // correct code generation, but it means that when we pretty-print the call 676 // arguments in our diagnostics we will produce '(&s)->n' instead of the 677 // incorrect '&s->n'. 678 Expr *PtrArg = PtrArgResult.get(); 679 PtrArg = new (S.Context) 680 ParenExpr(PtrArg->getBeginLoc(), 681 S.getLocForEndOfToken(PtrArg->getEndLoc()), PtrArg); 682 if (Generator.dumpUnnamedRecord(RD, PtrArg, 0)) 683 return ExprError(); 684 685 return Generator.buildWrapper(); 686 } 687 688 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 689 if (checkArgCount(S, BuiltinCall, 2)) 690 return true; 691 692 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 693 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 694 Expr *Call = BuiltinCall->getArg(0); 695 Expr *Chain = BuiltinCall->getArg(1); 696 697 if (Call->getStmtClass() != Stmt::CallExprClass) { 698 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 699 << Call->getSourceRange(); 700 return true; 701 } 702 703 auto CE = cast<CallExpr>(Call); 704 if (CE->getCallee()->getType()->isBlockPointerType()) { 705 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 706 << Call->getSourceRange(); 707 return true; 708 } 709 710 const Decl *TargetDecl = CE->getCalleeDecl(); 711 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 712 if (FD->getBuiltinID()) { 713 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 714 << Call->getSourceRange(); 715 return true; 716 } 717 718 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 719 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 720 << Call->getSourceRange(); 721 return true; 722 } 723 724 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 725 if (ChainResult.isInvalid()) 726 return true; 727 if (!ChainResult.get()->getType()->isPointerType()) { 728 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 729 << Chain->getSourceRange(); 730 return true; 731 } 732 733 QualType ReturnTy = CE->getCallReturnType(S.Context); 734 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 735 QualType BuiltinTy = S.Context.getFunctionType( 736 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 737 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 738 739 Builtin = 740 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 741 742 BuiltinCall->setType(CE->getType()); 743 BuiltinCall->setValueKind(CE->getValueKind()); 744 BuiltinCall->setObjectKind(CE->getObjectKind()); 745 BuiltinCall->setCallee(Builtin); 746 BuiltinCall->setArg(1, ChainResult.get()); 747 748 return false; 749 } 750 751 namespace { 752 753 class ScanfDiagnosticFormatHandler 754 : public analyze_format_string::FormatStringHandler { 755 // Accepts the argument index (relative to the first destination index) of the 756 // argument whose size we want. 757 using ComputeSizeFunction = 758 llvm::function_ref<Optional<llvm::APSInt>(unsigned)>; 759 760 // Accepts the argument index (relative to the first destination index), the 761 // destination size, and the source size). 762 using DiagnoseFunction = 763 llvm::function_ref<void(unsigned, unsigned, unsigned)>; 764 765 ComputeSizeFunction ComputeSizeArgument; 766 DiagnoseFunction Diagnose; 767 768 public: 769 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument, 770 DiagnoseFunction Diagnose) 771 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {} 772 773 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 774 const char *StartSpecifier, 775 unsigned specifierLen) override { 776 if (!FS.consumesDataArgument()) 777 return true; 778 779 unsigned NulByte = 0; 780 switch ((FS.getConversionSpecifier().getKind())) { 781 default: 782 return true; 783 case analyze_format_string::ConversionSpecifier::sArg: 784 case analyze_format_string::ConversionSpecifier::ScanListArg: 785 NulByte = 1; 786 break; 787 case analyze_format_string::ConversionSpecifier::cArg: 788 break; 789 } 790 791 analyze_format_string::OptionalAmount FW = FS.getFieldWidth(); 792 if (FW.getHowSpecified() != 793 analyze_format_string::OptionalAmount::HowSpecified::Constant) 794 return true; 795 796 unsigned SourceSize = FW.getConstantAmount() + NulByte; 797 798 Optional<llvm::APSInt> DestSizeAPS = ComputeSizeArgument(FS.getArgIndex()); 799 if (!DestSizeAPS) 800 return true; 801 802 unsigned DestSize = DestSizeAPS->getZExtValue(); 803 804 if (DestSize < SourceSize) 805 Diagnose(FS.getArgIndex(), DestSize, SourceSize); 806 807 return true; 808 } 809 }; 810 811 class EstimateSizeFormatHandler 812 : public analyze_format_string::FormatStringHandler { 813 size_t Size; 814 815 public: 816 EstimateSizeFormatHandler(StringRef Format) 817 : Size(std::min(Format.find(0), Format.size()) + 818 1 /* null byte always written by sprintf */) {} 819 820 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 821 const char *, unsigned SpecifierLen, 822 const TargetInfo &) override { 823 824 const size_t FieldWidth = computeFieldWidth(FS); 825 const size_t Precision = computePrecision(FS); 826 827 // The actual format. 828 switch (FS.getConversionSpecifier().getKind()) { 829 // Just a char. 830 case analyze_format_string::ConversionSpecifier::cArg: 831 case analyze_format_string::ConversionSpecifier::CArg: 832 Size += std::max(FieldWidth, (size_t)1); 833 break; 834 // Just an integer. 835 case analyze_format_string::ConversionSpecifier::dArg: 836 case analyze_format_string::ConversionSpecifier::DArg: 837 case analyze_format_string::ConversionSpecifier::iArg: 838 case analyze_format_string::ConversionSpecifier::oArg: 839 case analyze_format_string::ConversionSpecifier::OArg: 840 case analyze_format_string::ConversionSpecifier::uArg: 841 case analyze_format_string::ConversionSpecifier::UArg: 842 case analyze_format_string::ConversionSpecifier::xArg: 843 case analyze_format_string::ConversionSpecifier::XArg: 844 Size += std::max(FieldWidth, Precision); 845 break; 846 847 // %g style conversion switches between %f or %e style dynamically. 848 // %f always takes less space, so default to it. 849 case analyze_format_string::ConversionSpecifier::gArg: 850 case analyze_format_string::ConversionSpecifier::GArg: 851 852 // Floating point number in the form '[+]ddd.ddd'. 853 case analyze_format_string::ConversionSpecifier::fArg: 854 case analyze_format_string::ConversionSpecifier::FArg: 855 Size += std::max(FieldWidth, 1 /* integer part */ + 856 (Precision ? 1 + Precision 857 : 0) /* period + decimal */); 858 break; 859 860 // Floating point number in the form '[-]d.ddde[+-]dd'. 861 case analyze_format_string::ConversionSpecifier::eArg: 862 case analyze_format_string::ConversionSpecifier::EArg: 863 Size += 864 std::max(FieldWidth, 865 1 /* integer part */ + 866 (Precision ? 1 + Precision : 0) /* period + decimal */ + 867 1 /* e or E letter */ + 2 /* exponent */); 868 break; 869 870 // Floating point number in the form '[-]0xh.hhhhp±dd'. 871 case analyze_format_string::ConversionSpecifier::aArg: 872 case analyze_format_string::ConversionSpecifier::AArg: 873 Size += 874 std::max(FieldWidth, 875 2 /* 0x */ + 1 /* integer part */ + 876 (Precision ? 1 + Precision : 0) /* period + decimal */ + 877 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 878 break; 879 880 // Just a string. 881 case analyze_format_string::ConversionSpecifier::sArg: 882 case analyze_format_string::ConversionSpecifier::SArg: 883 Size += FieldWidth; 884 break; 885 886 // Just a pointer in the form '0xddd'. 887 case analyze_format_string::ConversionSpecifier::pArg: 888 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 889 break; 890 891 // A plain percent. 892 case analyze_format_string::ConversionSpecifier::PercentArg: 893 Size += 1; 894 break; 895 896 default: 897 break; 898 } 899 900 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 901 902 if (FS.hasAlternativeForm()) { 903 switch (FS.getConversionSpecifier().getKind()) { 904 default: 905 break; 906 // Force a leading '0'. 907 case analyze_format_string::ConversionSpecifier::oArg: 908 Size += 1; 909 break; 910 // Force a leading '0x'. 911 case analyze_format_string::ConversionSpecifier::xArg: 912 case analyze_format_string::ConversionSpecifier::XArg: 913 Size += 2; 914 break; 915 // Force a period '.' before decimal, even if precision is 0. 916 case analyze_format_string::ConversionSpecifier::aArg: 917 case analyze_format_string::ConversionSpecifier::AArg: 918 case analyze_format_string::ConversionSpecifier::eArg: 919 case analyze_format_string::ConversionSpecifier::EArg: 920 case analyze_format_string::ConversionSpecifier::fArg: 921 case analyze_format_string::ConversionSpecifier::FArg: 922 case analyze_format_string::ConversionSpecifier::gArg: 923 case analyze_format_string::ConversionSpecifier::GArg: 924 Size += (Precision ? 0 : 1); 925 break; 926 } 927 } 928 assert(SpecifierLen <= Size && "no underflow"); 929 Size -= SpecifierLen; 930 return true; 931 } 932 933 size_t getSizeLowerBound() const { return Size; } 934 935 private: 936 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 937 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 938 size_t FieldWidth = 0; 939 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 940 FieldWidth = FW.getConstantAmount(); 941 return FieldWidth; 942 } 943 944 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 945 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 946 size_t Precision = 0; 947 948 // See man 3 printf for default precision value based on the specifier. 949 switch (FW.getHowSpecified()) { 950 case analyze_format_string::OptionalAmount::NotSpecified: 951 switch (FS.getConversionSpecifier().getKind()) { 952 default: 953 break; 954 case analyze_format_string::ConversionSpecifier::dArg: // %d 955 case analyze_format_string::ConversionSpecifier::DArg: // %D 956 case analyze_format_string::ConversionSpecifier::iArg: // %i 957 Precision = 1; 958 break; 959 case analyze_format_string::ConversionSpecifier::oArg: // %d 960 case analyze_format_string::ConversionSpecifier::OArg: // %D 961 case analyze_format_string::ConversionSpecifier::uArg: // %d 962 case analyze_format_string::ConversionSpecifier::UArg: // %D 963 case analyze_format_string::ConversionSpecifier::xArg: // %d 964 case analyze_format_string::ConversionSpecifier::XArg: // %D 965 Precision = 1; 966 break; 967 case analyze_format_string::ConversionSpecifier::fArg: // %f 968 case analyze_format_string::ConversionSpecifier::FArg: // %F 969 case analyze_format_string::ConversionSpecifier::eArg: // %e 970 case analyze_format_string::ConversionSpecifier::EArg: // %E 971 case analyze_format_string::ConversionSpecifier::gArg: // %g 972 case analyze_format_string::ConversionSpecifier::GArg: // %G 973 Precision = 6; 974 break; 975 case analyze_format_string::ConversionSpecifier::pArg: // %d 976 Precision = 1; 977 break; 978 } 979 break; 980 case analyze_format_string::OptionalAmount::Constant: 981 Precision = FW.getConstantAmount(); 982 break; 983 default: 984 break; 985 } 986 return Precision; 987 } 988 }; 989 990 } // namespace 991 992 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 993 CallExpr *TheCall) { 994 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 995 isConstantEvaluated()) 996 return; 997 998 bool UseDABAttr = false; 999 const FunctionDecl *UseDecl = FD; 1000 1001 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>(); 1002 if (DABAttr) { 1003 UseDecl = DABAttr->getFunction(); 1004 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!"); 1005 UseDABAttr = true; 1006 } 1007 1008 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true); 1009 1010 if (!BuiltinID) 1011 return; 1012 1013 const TargetInfo &TI = getASTContext().getTargetInfo(); 1014 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 1015 1016 auto TranslateIndex = [&](unsigned Index) -> Optional<unsigned> { 1017 // If we refer to a diagnose_as_builtin attribute, we need to change the 1018 // argument index to refer to the arguments of the called function. Unless 1019 // the index is out of bounds, which presumably means it's a variadic 1020 // function. 1021 if (!UseDABAttr) 1022 return Index; 1023 unsigned DABIndices = DABAttr->argIndices_size(); 1024 unsigned NewIndex = Index < DABIndices 1025 ? DABAttr->argIndices_begin()[Index] 1026 : Index - DABIndices + FD->getNumParams(); 1027 if (NewIndex >= TheCall->getNumArgs()) 1028 return llvm::None; 1029 return NewIndex; 1030 }; 1031 1032 auto ComputeExplicitObjectSizeArgument = 1033 [&](unsigned Index) -> Optional<llvm::APSInt> { 1034 Optional<unsigned> IndexOptional = TranslateIndex(Index); 1035 if (!IndexOptional) 1036 return llvm::None; 1037 unsigned NewIndex = IndexOptional.getValue(); 1038 Expr::EvalResult Result; 1039 Expr *SizeArg = TheCall->getArg(NewIndex); 1040 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 1041 return llvm::None; 1042 llvm::APSInt Integer = Result.Val.getInt(); 1043 Integer.setIsUnsigned(true); 1044 return Integer; 1045 }; 1046 1047 auto ComputeSizeArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 1048 // If the parameter has a pass_object_size attribute, then we should use its 1049 // (potentially) more strict checking mode. Otherwise, conservatively assume 1050 // type 0. 1051 int BOSType = 0; 1052 // This check can fail for variadic functions. 1053 if (Index < FD->getNumParams()) { 1054 if (const auto *POS = 1055 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>()) 1056 BOSType = POS->getType(); 1057 } 1058 1059 Optional<unsigned> IndexOptional = TranslateIndex(Index); 1060 if (!IndexOptional) 1061 return llvm::None; 1062 unsigned NewIndex = IndexOptional.getValue(); 1063 1064 const Expr *ObjArg = TheCall->getArg(NewIndex); 1065 uint64_t Result; 1066 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 1067 return llvm::None; 1068 1069 // Get the object size in the target's size_t width. 1070 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 1071 }; 1072 1073 auto ComputeStrLenArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 1074 Optional<unsigned> IndexOptional = TranslateIndex(Index); 1075 if (!IndexOptional) 1076 return llvm::None; 1077 unsigned NewIndex = IndexOptional.getValue(); 1078 1079 const Expr *ObjArg = TheCall->getArg(NewIndex); 1080 uint64_t Result; 1081 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext())) 1082 return llvm::None; 1083 // Add 1 for null byte. 1084 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth); 1085 }; 1086 1087 Optional<llvm::APSInt> SourceSize; 1088 Optional<llvm::APSInt> DestinationSize; 1089 unsigned DiagID = 0; 1090 bool IsChkVariant = false; 1091 1092 auto GetFunctionName = [&]() { 1093 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 1094 // Skim off the details of whichever builtin was called to produce a better 1095 // diagnostic, as it's unlikely that the user wrote the __builtin 1096 // explicitly. 1097 if (IsChkVariant) { 1098 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 1099 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 1100 } else if (FunctionName.startswith("__builtin_")) { 1101 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 1102 } 1103 return FunctionName; 1104 }; 1105 1106 switch (BuiltinID) { 1107 default: 1108 return; 1109 case Builtin::BI__builtin_strcpy: 1110 case Builtin::BIstrcpy: { 1111 DiagID = diag::warn_fortify_strlen_overflow; 1112 SourceSize = ComputeStrLenArgument(1); 1113 DestinationSize = ComputeSizeArgument(0); 1114 break; 1115 } 1116 1117 case Builtin::BI__builtin___strcpy_chk: { 1118 DiagID = diag::warn_fortify_strlen_overflow; 1119 SourceSize = ComputeStrLenArgument(1); 1120 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1121 IsChkVariant = true; 1122 break; 1123 } 1124 1125 case Builtin::BIscanf: 1126 case Builtin::BIfscanf: 1127 case Builtin::BIsscanf: { 1128 unsigned FormatIndex = 1; 1129 unsigned DataIndex = 2; 1130 if (BuiltinID == Builtin::BIscanf) { 1131 FormatIndex = 0; 1132 DataIndex = 1; 1133 } 1134 1135 const auto *FormatExpr = 1136 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1137 1138 const auto *Format = dyn_cast<StringLiteral>(FormatExpr); 1139 if (!Format) 1140 return; 1141 1142 if (!Format->isAscii() && !Format->isUTF8()) 1143 return; 1144 1145 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize, 1146 unsigned SourceSize) { 1147 DiagID = diag::warn_fortify_scanf_overflow; 1148 unsigned Index = ArgIndex + DataIndex; 1149 StringRef FunctionName = GetFunctionName(); 1150 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall, 1151 PDiag(DiagID) << FunctionName << (Index + 1) 1152 << DestSize << SourceSize); 1153 }; 1154 1155 StringRef FormatStrRef = Format->getString(); 1156 auto ShiftedComputeSizeArgument = [&](unsigned Index) { 1157 return ComputeSizeArgument(Index + DataIndex); 1158 }; 1159 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose); 1160 const char *FormatBytes = FormatStrRef.data(); 1161 const ConstantArrayType *T = 1162 Context.getAsConstantArrayType(Format->getType()); 1163 assert(T && "String literal not of constant array type!"); 1164 size_t TypeSize = T->getSize().getZExtValue(); 1165 1166 // In case there's a null byte somewhere. 1167 size_t StrLen = 1168 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1169 1170 analyze_format_string::ParseScanfString(H, FormatBytes, 1171 FormatBytes + StrLen, getLangOpts(), 1172 Context.getTargetInfo()); 1173 1174 // Unlike the other cases, in this one we have already issued the diagnostic 1175 // here, so no need to continue (because unlike the other cases, here the 1176 // diagnostic refers to the argument number). 1177 return; 1178 } 1179 1180 case Builtin::BIsprintf: 1181 case Builtin::BI__builtin___sprintf_chk: { 1182 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 1183 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1184 1185 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 1186 1187 if (!Format->isAscii() && !Format->isUTF8()) 1188 return; 1189 1190 StringRef FormatStrRef = Format->getString(); 1191 EstimateSizeFormatHandler H(FormatStrRef); 1192 const char *FormatBytes = FormatStrRef.data(); 1193 const ConstantArrayType *T = 1194 Context.getAsConstantArrayType(Format->getType()); 1195 assert(T && "String literal not of constant array type!"); 1196 size_t TypeSize = T->getSize().getZExtValue(); 1197 1198 // In case there's a null byte somewhere. 1199 size_t StrLen = 1200 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1201 if (!analyze_format_string::ParsePrintfString( 1202 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 1203 Context.getTargetInfo(), false)) { 1204 DiagID = diag::warn_fortify_source_format_overflow; 1205 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 1206 .extOrTrunc(SizeTypeWidth); 1207 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 1208 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1209 IsChkVariant = true; 1210 } else { 1211 DestinationSize = ComputeSizeArgument(0); 1212 } 1213 break; 1214 } 1215 } 1216 return; 1217 } 1218 case Builtin::BI__builtin___memcpy_chk: 1219 case Builtin::BI__builtin___memmove_chk: 1220 case Builtin::BI__builtin___memset_chk: 1221 case Builtin::BI__builtin___strlcat_chk: 1222 case Builtin::BI__builtin___strlcpy_chk: 1223 case Builtin::BI__builtin___strncat_chk: 1224 case Builtin::BI__builtin___strncpy_chk: 1225 case Builtin::BI__builtin___stpncpy_chk: 1226 case Builtin::BI__builtin___memccpy_chk: 1227 case Builtin::BI__builtin___mempcpy_chk: { 1228 DiagID = diag::warn_builtin_chk_overflow; 1229 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2); 1230 DestinationSize = 1231 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1232 IsChkVariant = true; 1233 break; 1234 } 1235 1236 case Builtin::BI__builtin___snprintf_chk: 1237 case Builtin::BI__builtin___vsnprintf_chk: { 1238 DiagID = diag::warn_builtin_chk_overflow; 1239 SourceSize = ComputeExplicitObjectSizeArgument(1); 1240 DestinationSize = ComputeExplicitObjectSizeArgument(3); 1241 IsChkVariant = true; 1242 break; 1243 } 1244 1245 case Builtin::BIstrncat: 1246 case Builtin::BI__builtin_strncat: 1247 case Builtin::BIstrncpy: 1248 case Builtin::BI__builtin_strncpy: 1249 case Builtin::BIstpncpy: 1250 case Builtin::BI__builtin_stpncpy: { 1251 // Whether these functions overflow depends on the runtime strlen of the 1252 // string, not just the buffer size, so emitting the "always overflow" 1253 // diagnostic isn't quite right. We should still diagnose passing a buffer 1254 // size larger than the destination buffer though; this is a runtime abort 1255 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 1256 DiagID = diag::warn_fortify_source_size_mismatch; 1257 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1258 DestinationSize = ComputeSizeArgument(0); 1259 break; 1260 } 1261 1262 case Builtin::BImemcpy: 1263 case Builtin::BI__builtin_memcpy: 1264 case Builtin::BImemmove: 1265 case Builtin::BI__builtin_memmove: 1266 case Builtin::BImemset: 1267 case Builtin::BI__builtin_memset: 1268 case Builtin::BImempcpy: 1269 case Builtin::BI__builtin_mempcpy: { 1270 DiagID = diag::warn_fortify_source_overflow; 1271 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1272 DestinationSize = ComputeSizeArgument(0); 1273 break; 1274 } 1275 case Builtin::BIsnprintf: 1276 case Builtin::BI__builtin_snprintf: 1277 case Builtin::BIvsnprintf: 1278 case Builtin::BI__builtin_vsnprintf: { 1279 DiagID = diag::warn_fortify_source_size_mismatch; 1280 SourceSize = ComputeExplicitObjectSizeArgument(1); 1281 DestinationSize = ComputeSizeArgument(0); 1282 break; 1283 } 1284 } 1285 1286 if (!SourceSize || !DestinationSize || 1287 llvm::APSInt::compareValues(SourceSize.getValue(), 1288 DestinationSize.getValue()) <= 0) 1289 return; 1290 1291 StringRef FunctionName = GetFunctionName(); 1292 1293 SmallString<16> DestinationStr; 1294 SmallString<16> SourceStr; 1295 DestinationSize->toString(DestinationStr, /*Radix=*/10); 1296 SourceSize->toString(SourceStr, /*Radix=*/10); 1297 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 1298 PDiag(DiagID) 1299 << FunctionName << DestinationStr << SourceStr); 1300 } 1301 1302 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 1303 Scope::ScopeFlags NeededScopeFlags, 1304 unsigned DiagID) { 1305 // Scopes aren't available during instantiation. Fortunately, builtin 1306 // functions cannot be template args so they cannot be formed through template 1307 // instantiation. Therefore checking once during the parse is sufficient. 1308 if (SemaRef.inTemplateInstantiation()) 1309 return false; 1310 1311 Scope *S = SemaRef.getCurScope(); 1312 while (S && !S->isSEHExceptScope()) 1313 S = S->getParent(); 1314 if (!S || !(S->getFlags() & NeededScopeFlags)) { 1315 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 1316 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 1317 << DRE->getDecl()->getIdentifier(); 1318 return true; 1319 } 1320 1321 return false; 1322 } 1323 1324 static inline bool isBlockPointer(Expr *Arg) { 1325 return Arg->getType()->isBlockPointerType(); 1326 } 1327 1328 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 1329 /// void*, which is a requirement of device side enqueue. 1330 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 1331 const BlockPointerType *BPT = 1332 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1333 ArrayRef<QualType> Params = 1334 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 1335 unsigned ArgCounter = 0; 1336 bool IllegalParams = false; 1337 // Iterate through the block parameters until either one is found that is not 1338 // a local void*, or the block is valid. 1339 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 1340 I != E; ++I, ++ArgCounter) { 1341 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 1342 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 1343 LangAS::opencl_local) { 1344 // Get the location of the error. If a block literal has been passed 1345 // (BlockExpr) then we can point straight to the offending argument, 1346 // else we just point to the variable reference. 1347 SourceLocation ErrorLoc; 1348 if (isa<BlockExpr>(BlockArg)) { 1349 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 1350 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 1351 } else if (isa<DeclRefExpr>(BlockArg)) { 1352 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 1353 } 1354 S.Diag(ErrorLoc, 1355 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 1356 IllegalParams = true; 1357 } 1358 } 1359 1360 return IllegalParams; 1361 } 1362 1363 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 1364 // OpenCL device can support extension but not the feature as extension 1365 // requires subgroup independent forward progress, but subgroup independent 1366 // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature. 1367 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts()) && 1368 !S.getOpenCLOptions().isSupported("__opencl_c_subgroups", 1369 S.getLangOpts())) { 1370 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 1371 << 1 << Call->getDirectCallee() 1372 << "cl_khr_subgroups or __opencl_c_subgroups"; 1373 return true; 1374 } 1375 return false; 1376 } 1377 1378 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 1379 if (checkArgCount(S, TheCall, 2)) 1380 return true; 1381 1382 if (checkOpenCLSubgroupExt(S, TheCall)) 1383 return true; 1384 1385 // First argument is an ndrange_t type. 1386 Expr *NDRangeArg = TheCall->getArg(0); 1387 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1388 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1389 << TheCall->getDirectCallee() << "'ndrange_t'"; 1390 return true; 1391 } 1392 1393 Expr *BlockArg = TheCall->getArg(1); 1394 if (!isBlockPointer(BlockArg)) { 1395 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1396 << TheCall->getDirectCallee() << "block"; 1397 return true; 1398 } 1399 return checkOpenCLBlockArgs(S, BlockArg); 1400 } 1401 1402 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 1403 /// get_kernel_work_group_size 1404 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 1405 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 1406 if (checkArgCount(S, TheCall, 1)) 1407 return true; 1408 1409 Expr *BlockArg = TheCall->getArg(0); 1410 if (!isBlockPointer(BlockArg)) { 1411 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1412 << TheCall->getDirectCallee() << "block"; 1413 return true; 1414 } 1415 return checkOpenCLBlockArgs(S, BlockArg); 1416 } 1417 1418 /// Diagnose integer type and any valid implicit conversion to it. 1419 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 1420 const QualType &IntType); 1421 1422 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 1423 unsigned Start, unsigned End) { 1424 bool IllegalParams = false; 1425 for (unsigned I = Start; I <= End; ++I) 1426 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 1427 S.Context.getSizeType()); 1428 return IllegalParams; 1429 } 1430 1431 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 1432 /// 'local void*' parameter of passed block. 1433 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 1434 Expr *BlockArg, 1435 unsigned NumNonVarArgs) { 1436 const BlockPointerType *BPT = 1437 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1438 unsigned NumBlockParams = 1439 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 1440 unsigned TotalNumArgs = TheCall->getNumArgs(); 1441 1442 // For each argument passed to the block, a corresponding uint needs to 1443 // be passed to describe the size of the local memory. 1444 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 1445 S.Diag(TheCall->getBeginLoc(), 1446 diag::err_opencl_enqueue_kernel_local_size_args); 1447 return true; 1448 } 1449 1450 // Check that the sizes of the local memory are specified by integers. 1451 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 1452 TotalNumArgs - 1); 1453 } 1454 1455 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 1456 /// overload formats specified in Table 6.13.17.1. 1457 /// int enqueue_kernel(queue_t queue, 1458 /// kernel_enqueue_flags_t flags, 1459 /// const ndrange_t ndrange, 1460 /// void (^block)(void)) 1461 /// int enqueue_kernel(queue_t queue, 1462 /// kernel_enqueue_flags_t flags, 1463 /// const ndrange_t ndrange, 1464 /// uint num_events_in_wait_list, 1465 /// clk_event_t *event_wait_list, 1466 /// clk_event_t *event_ret, 1467 /// void (^block)(void)) 1468 /// int enqueue_kernel(queue_t queue, 1469 /// kernel_enqueue_flags_t flags, 1470 /// const ndrange_t ndrange, 1471 /// void (^block)(local void*, ...), 1472 /// uint size0, ...) 1473 /// int enqueue_kernel(queue_t queue, 1474 /// kernel_enqueue_flags_t flags, 1475 /// const ndrange_t ndrange, 1476 /// uint num_events_in_wait_list, 1477 /// clk_event_t *event_wait_list, 1478 /// clk_event_t *event_ret, 1479 /// void (^block)(local void*, ...), 1480 /// uint size0, ...) 1481 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 1482 unsigned NumArgs = TheCall->getNumArgs(); 1483 1484 if (NumArgs < 4) { 1485 S.Diag(TheCall->getBeginLoc(), 1486 diag::err_typecheck_call_too_few_args_at_least) 1487 << 0 << 4 << NumArgs; 1488 return true; 1489 } 1490 1491 Expr *Arg0 = TheCall->getArg(0); 1492 Expr *Arg1 = TheCall->getArg(1); 1493 Expr *Arg2 = TheCall->getArg(2); 1494 Expr *Arg3 = TheCall->getArg(3); 1495 1496 // First argument always needs to be a queue_t type. 1497 if (!Arg0->getType()->isQueueT()) { 1498 S.Diag(TheCall->getArg(0)->getBeginLoc(), 1499 diag::err_opencl_builtin_expected_type) 1500 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 1501 return true; 1502 } 1503 1504 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 1505 if (!Arg1->getType()->isIntegerType()) { 1506 S.Diag(TheCall->getArg(1)->getBeginLoc(), 1507 diag::err_opencl_builtin_expected_type) 1508 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 1509 return true; 1510 } 1511 1512 // Third argument is always an ndrange_t type. 1513 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1514 S.Diag(TheCall->getArg(2)->getBeginLoc(), 1515 diag::err_opencl_builtin_expected_type) 1516 << TheCall->getDirectCallee() << "'ndrange_t'"; 1517 return true; 1518 } 1519 1520 // With four arguments, there is only one form that the function could be 1521 // called in: no events and no variable arguments. 1522 if (NumArgs == 4) { 1523 // check that the last argument is the right block type. 1524 if (!isBlockPointer(Arg3)) { 1525 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1526 << TheCall->getDirectCallee() << "block"; 1527 return true; 1528 } 1529 // we have a block type, check the prototype 1530 const BlockPointerType *BPT = 1531 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1532 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1533 S.Diag(Arg3->getBeginLoc(), 1534 diag::err_opencl_enqueue_kernel_blocks_no_args); 1535 return true; 1536 } 1537 return false; 1538 } 1539 // we can have block + varargs. 1540 if (isBlockPointer(Arg3)) 1541 return (checkOpenCLBlockArgs(S, Arg3) || 1542 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1543 // last two cases with either exactly 7 args or 7 args and varargs. 1544 if (NumArgs >= 7) { 1545 // check common block argument. 1546 Expr *Arg6 = TheCall->getArg(6); 1547 if (!isBlockPointer(Arg6)) { 1548 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1549 << TheCall->getDirectCallee() << "block"; 1550 return true; 1551 } 1552 if (checkOpenCLBlockArgs(S, Arg6)) 1553 return true; 1554 1555 // Forth argument has to be any integer type. 1556 if (!Arg3->getType()->isIntegerType()) { 1557 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1558 diag::err_opencl_builtin_expected_type) 1559 << TheCall->getDirectCallee() << "integer"; 1560 return true; 1561 } 1562 // check remaining common arguments. 1563 Expr *Arg4 = TheCall->getArg(4); 1564 Expr *Arg5 = TheCall->getArg(5); 1565 1566 // Fifth argument is always passed as a pointer to clk_event_t. 1567 if (!Arg4->isNullPointerConstant(S.Context, 1568 Expr::NPC_ValueDependentIsNotNull) && 1569 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1570 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1571 diag::err_opencl_builtin_expected_type) 1572 << TheCall->getDirectCallee() 1573 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1574 return true; 1575 } 1576 1577 // Sixth argument is always passed as a pointer to clk_event_t. 1578 if (!Arg5->isNullPointerConstant(S.Context, 1579 Expr::NPC_ValueDependentIsNotNull) && 1580 !(Arg5->getType()->isPointerType() && 1581 Arg5->getType()->getPointeeType()->isClkEventT())) { 1582 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1583 diag::err_opencl_builtin_expected_type) 1584 << TheCall->getDirectCallee() 1585 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1586 return true; 1587 } 1588 1589 if (NumArgs == 7) 1590 return false; 1591 1592 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1593 } 1594 1595 // None of the specific case has been detected, give generic error 1596 S.Diag(TheCall->getBeginLoc(), 1597 diag::err_opencl_enqueue_kernel_incorrect_args); 1598 return true; 1599 } 1600 1601 /// Returns OpenCL access qual. 1602 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1603 return D->getAttr<OpenCLAccessAttr>(); 1604 } 1605 1606 /// Returns true if pipe element type is different from the pointer. 1607 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1608 const Expr *Arg0 = Call->getArg(0); 1609 // First argument type should always be pipe. 1610 if (!Arg0->getType()->isPipeType()) { 1611 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1612 << Call->getDirectCallee() << Arg0->getSourceRange(); 1613 return true; 1614 } 1615 OpenCLAccessAttr *AccessQual = 1616 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1617 // Validates the access qualifier is compatible with the call. 1618 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1619 // read_only and write_only, and assumed to be read_only if no qualifier is 1620 // specified. 1621 switch (Call->getDirectCallee()->getBuiltinID()) { 1622 case Builtin::BIread_pipe: 1623 case Builtin::BIreserve_read_pipe: 1624 case Builtin::BIcommit_read_pipe: 1625 case Builtin::BIwork_group_reserve_read_pipe: 1626 case Builtin::BIsub_group_reserve_read_pipe: 1627 case Builtin::BIwork_group_commit_read_pipe: 1628 case Builtin::BIsub_group_commit_read_pipe: 1629 if (!(!AccessQual || AccessQual->isReadOnly())) { 1630 S.Diag(Arg0->getBeginLoc(), 1631 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1632 << "read_only" << Arg0->getSourceRange(); 1633 return true; 1634 } 1635 break; 1636 case Builtin::BIwrite_pipe: 1637 case Builtin::BIreserve_write_pipe: 1638 case Builtin::BIcommit_write_pipe: 1639 case Builtin::BIwork_group_reserve_write_pipe: 1640 case Builtin::BIsub_group_reserve_write_pipe: 1641 case Builtin::BIwork_group_commit_write_pipe: 1642 case Builtin::BIsub_group_commit_write_pipe: 1643 if (!(AccessQual && AccessQual->isWriteOnly())) { 1644 S.Diag(Arg0->getBeginLoc(), 1645 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1646 << "write_only" << Arg0->getSourceRange(); 1647 return true; 1648 } 1649 break; 1650 default: 1651 break; 1652 } 1653 return false; 1654 } 1655 1656 /// Returns true if pipe element type is different from the pointer. 1657 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1658 const Expr *Arg0 = Call->getArg(0); 1659 const Expr *ArgIdx = Call->getArg(Idx); 1660 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1661 const QualType EltTy = PipeTy->getElementType(); 1662 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1663 // The Idx argument should be a pointer and the type of the pointer and 1664 // the type of pipe element should also be the same. 1665 if (!ArgTy || 1666 !S.Context.hasSameType( 1667 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1668 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1669 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1670 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1671 return true; 1672 } 1673 return false; 1674 } 1675 1676 // Performs semantic analysis for the read/write_pipe call. 1677 // \param S Reference to the semantic analyzer. 1678 // \param Call A pointer to the builtin call. 1679 // \return True if a semantic error has been found, false otherwise. 1680 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1681 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1682 // functions have two forms. 1683 switch (Call->getNumArgs()) { 1684 case 2: 1685 if (checkOpenCLPipeArg(S, Call)) 1686 return true; 1687 // The call with 2 arguments should be 1688 // read/write_pipe(pipe T, T*). 1689 // Check packet type T. 1690 if (checkOpenCLPipePacketType(S, Call, 1)) 1691 return true; 1692 break; 1693 1694 case 4: { 1695 if (checkOpenCLPipeArg(S, Call)) 1696 return true; 1697 // The call with 4 arguments should be 1698 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1699 // Check reserve_id_t. 1700 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1701 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1702 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1703 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1704 return true; 1705 } 1706 1707 // Check the index. 1708 const Expr *Arg2 = Call->getArg(2); 1709 if (!Arg2->getType()->isIntegerType() && 1710 !Arg2->getType()->isUnsignedIntegerType()) { 1711 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1712 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1713 << Arg2->getType() << Arg2->getSourceRange(); 1714 return true; 1715 } 1716 1717 // Check packet type T. 1718 if (checkOpenCLPipePacketType(S, Call, 3)) 1719 return true; 1720 } break; 1721 default: 1722 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1723 << Call->getDirectCallee() << Call->getSourceRange(); 1724 return true; 1725 } 1726 1727 return false; 1728 } 1729 1730 // Performs a semantic analysis on the {work_group_/sub_group_ 1731 // /_}reserve_{read/write}_pipe 1732 // \param S Reference to the semantic analyzer. 1733 // \param Call The call to the builtin function to be analyzed. 1734 // \return True if a semantic error was found, false otherwise. 1735 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1736 if (checkArgCount(S, Call, 2)) 1737 return true; 1738 1739 if (checkOpenCLPipeArg(S, Call)) 1740 return true; 1741 1742 // Check the reserve size. 1743 if (!Call->getArg(1)->getType()->isIntegerType() && 1744 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1745 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1746 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1747 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1748 return true; 1749 } 1750 1751 // Since return type of reserve_read/write_pipe built-in function is 1752 // reserve_id_t, which is not defined in the builtin def file , we used int 1753 // as return type and need to override the return type of these functions. 1754 Call->setType(S.Context.OCLReserveIDTy); 1755 1756 return false; 1757 } 1758 1759 // Performs a semantic analysis on {work_group_/sub_group_ 1760 // /_}commit_{read/write}_pipe 1761 // \param S Reference to the semantic analyzer. 1762 // \param Call The call to the builtin function to be analyzed. 1763 // \return True if a semantic error was found, false otherwise. 1764 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1765 if (checkArgCount(S, Call, 2)) 1766 return true; 1767 1768 if (checkOpenCLPipeArg(S, Call)) 1769 return true; 1770 1771 // Check reserve_id_t. 1772 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1773 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1774 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1775 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1776 return true; 1777 } 1778 1779 return false; 1780 } 1781 1782 // Performs a semantic analysis on the call to built-in Pipe 1783 // Query Functions. 1784 // \param S Reference to the semantic analyzer. 1785 // \param Call The call to the builtin function to be analyzed. 1786 // \return True if a semantic error was found, false otherwise. 1787 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1788 if (checkArgCount(S, Call, 1)) 1789 return true; 1790 1791 if (!Call->getArg(0)->getType()->isPipeType()) { 1792 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1793 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1794 return true; 1795 } 1796 1797 return false; 1798 } 1799 1800 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1801 // Performs semantic analysis for the to_global/local/private call. 1802 // \param S Reference to the semantic analyzer. 1803 // \param BuiltinID ID of the builtin function. 1804 // \param Call A pointer to the builtin call. 1805 // \return True if a semantic error has been found, false otherwise. 1806 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1807 CallExpr *Call) { 1808 if (checkArgCount(S, Call, 1)) 1809 return true; 1810 1811 auto RT = Call->getArg(0)->getType(); 1812 if (!RT->isPointerType() || RT->getPointeeType() 1813 .getAddressSpace() == LangAS::opencl_constant) { 1814 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1815 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1816 return true; 1817 } 1818 1819 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1820 S.Diag(Call->getArg(0)->getBeginLoc(), 1821 diag::warn_opencl_generic_address_space_arg) 1822 << Call->getDirectCallee()->getNameInfo().getAsString() 1823 << Call->getArg(0)->getSourceRange(); 1824 } 1825 1826 RT = RT->getPointeeType(); 1827 auto Qual = RT.getQualifiers(); 1828 switch (BuiltinID) { 1829 case Builtin::BIto_global: 1830 Qual.setAddressSpace(LangAS::opencl_global); 1831 break; 1832 case Builtin::BIto_local: 1833 Qual.setAddressSpace(LangAS::opencl_local); 1834 break; 1835 case Builtin::BIto_private: 1836 Qual.setAddressSpace(LangAS::opencl_private); 1837 break; 1838 default: 1839 llvm_unreachable("Invalid builtin function"); 1840 } 1841 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1842 RT.getUnqualifiedType(), Qual))); 1843 1844 return false; 1845 } 1846 1847 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1848 if (checkArgCount(S, TheCall, 1)) 1849 return ExprError(); 1850 1851 // Compute __builtin_launder's parameter type from the argument. 1852 // The parameter type is: 1853 // * The type of the argument if it's not an array or function type, 1854 // Otherwise, 1855 // * The decayed argument type. 1856 QualType ParamTy = [&]() { 1857 QualType ArgTy = TheCall->getArg(0)->getType(); 1858 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1859 return S.Context.getPointerType(Ty->getElementType()); 1860 if (ArgTy->isFunctionType()) { 1861 return S.Context.getPointerType(ArgTy); 1862 } 1863 return ArgTy; 1864 }(); 1865 1866 TheCall->setType(ParamTy); 1867 1868 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1869 if (!ParamTy->isPointerType()) 1870 return 0; 1871 if (ParamTy->isFunctionPointerType()) 1872 return 1; 1873 if (ParamTy->isVoidPointerType()) 1874 return 2; 1875 return llvm::Optional<unsigned>{}; 1876 }(); 1877 if (DiagSelect.hasValue()) { 1878 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1879 << DiagSelect.getValue() << TheCall->getSourceRange(); 1880 return ExprError(); 1881 } 1882 1883 // We either have an incomplete class type, or we have a class template 1884 // whose instantiation has not been forced. Example: 1885 // 1886 // template <class T> struct Foo { T value; }; 1887 // Foo<int> *p = nullptr; 1888 // auto *d = __builtin_launder(p); 1889 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1890 diag::err_incomplete_type)) 1891 return ExprError(); 1892 1893 assert(ParamTy->getPointeeType()->isObjectType() && 1894 "Unhandled non-object pointer case"); 1895 1896 InitializedEntity Entity = 1897 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1898 ExprResult Arg = 1899 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1900 if (Arg.isInvalid()) 1901 return ExprError(); 1902 TheCall->setArg(0, Arg.get()); 1903 1904 return TheCall; 1905 } 1906 1907 // Emit an error and return true if the current object format type is in the 1908 // list of unsupported types. 1909 static bool CheckBuiltinTargetNotInUnsupported( 1910 Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1911 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) { 1912 llvm::Triple::ObjectFormatType CurObjFormat = 1913 S.getASTContext().getTargetInfo().getTriple().getObjectFormat(); 1914 if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) { 1915 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1916 << TheCall->getSourceRange(); 1917 return true; 1918 } 1919 return false; 1920 } 1921 1922 // Emit an error and return true if the current architecture is not in the list 1923 // of supported architectures. 1924 static bool 1925 CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1926 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1927 llvm::Triple::ArchType CurArch = 1928 S.getASTContext().getTargetInfo().getTriple().getArch(); 1929 if (llvm::is_contained(SupportedArchs, CurArch)) 1930 return false; 1931 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1932 << TheCall->getSourceRange(); 1933 return true; 1934 } 1935 1936 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1937 SourceLocation CallSiteLoc); 1938 1939 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1940 CallExpr *TheCall) { 1941 switch (TI.getTriple().getArch()) { 1942 default: 1943 // Some builtins don't require additional checking, so just consider these 1944 // acceptable. 1945 return false; 1946 case llvm::Triple::arm: 1947 case llvm::Triple::armeb: 1948 case llvm::Triple::thumb: 1949 case llvm::Triple::thumbeb: 1950 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1951 case llvm::Triple::aarch64: 1952 case llvm::Triple::aarch64_32: 1953 case llvm::Triple::aarch64_be: 1954 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1955 case llvm::Triple::bpfeb: 1956 case llvm::Triple::bpfel: 1957 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1958 case llvm::Triple::hexagon: 1959 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1960 case llvm::Triple::mips: 1961 case llvm::Triple::mipsel: 1962 case llvm::Triple::mips64: 1963 case llvm::Triple::mips64el: 1964 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1965 case llvm::Triple::systemz: 1966 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1967 case llvm::Triple::x86: 1968 case llvm::Triple::x86_64: 1969 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1970 case llvm::Triple::ppc: 1971 case llvm::Triple::ppcle: 1972 case llvm::Triple::ppc64: 1973 case llvm::Triple::ppc64le: 1974 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1975 case llvm::Triple::amdgcn: 1976 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1977 case llvm::Triple::riscv32: 1978 case llvm::Triple::riscv64: 1979 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 1980 } 1981 } 1982 1983 ExprResult 1984 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1985 CallExpr *TheCall) { 1986 ExprResult TheCallResult(TheCall); 1987 1988 // Find out if any arguments are required to be integer constant expressions. 1989 unsigned ICEArguments = 0; 1990 ASTContext::GetBuiltinTypeError Error; 1991 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1992 if (Error != ASTContext::GE_None) 1993 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1994 1995 // If any arguments are required to be ICE's, check and diagnose. 1996 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1997 // Skip arguments not required to be ICE's. 1998 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1999 2000 llvm::APSInt Result; 2001 // If we don't have enough arguments, continue so we can issue better 2002 // diagnostic in checkArgCount(...) 2003 if (ArgNo < TheCall->getNumArgs() && 2004 SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 2005 return true; 2006 ICEArguments &= ~(1 << ArgNo); 2007 } 2008 2009 switch (BuiltinID) { 2010 case Builtin::BI__builtin___CFStringMakeConstantString: 2011 // CFStringMakeConstantString is currently not implemented for GOFF (i.e., 2012 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported 2013 if (CheckBuiltinTargetNotInUnsupported( 2014 *this, BuiltinID, TheCall, 2015 {llvm::Triple::GOFF, llvm::Triple::XCOFF})) 2016 return ExprError(); 2017 assert(TheCall->getNumArgs() == 1 && 2018 "Wrong # arguments to builtin CFStringMakeConstantString"); 2019 if (CheckObjCString(TheCall->getArg(0))) 2020 return ExprError(); 2021 break; 2022 case Builtin::BI__builtin_ms_va_start: 2023 case Builtin::BI__builtin_stdarg_start: 2024 case Builtin::BI__builtin_va_start: 2025 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2026 return ExprError(); 2027 break; 2028 case Builtin::BI__va_start: { 2029 switch (Context.getTargetInfo().getTriple().getArch()) { 2030 case llvm::Triple::aarch64: 2031 case llvm::Triple::arm: 2032 case llvm::Triple::thumb: 2033 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 2034 return ExprError(); 2035 break; 2036 default: 2037 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2038 return ExprError(); 2039 break; 2040 } 2041 break; 2042 } 2043 2044 // The acquire, release, and no fence variants are ARM and AArch64 only. 2045 case Builtin::BI_interlockedbittestandset_acq: 2046 case Builtin::BI_interlockedbittestandset_rel: 2047 case Builtin::BI_interlockedbittestandset_nf: 2048 case Builtin::BI_interlockedbittestandreset_acq: 2049 case Builtin::BI_interlockedbittestandreset_rel: 2050 case Builtin::BI_interlockedbittestandreset_nf: 2051 if (CheckBuiltinTargetInSupported( 2052 *this, BuiltinID, TheCall, 2053 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 2054 return ExprError(); 2055 break; 2056 2057 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 2058 case Builtin::BI_bittest64: 2059 case Builtin::BI_bittestandcomplement64: 2060 case Builtin::BI_bittestandreset64: 2061 case Builtin::BI_bittestandset64: 2062 case Builtin::BI_interlockedbittestandreset64: 2063 case Builtin::BI_interlockedbittestandset64: 2064 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall, 2065 {llvm::Triple::x86_64, llvm::Triple::arm, 2066 llvm::Triple::thumb, 2067 llvm::Triple::aarch64})) 2068 return ExprError(); 2069 break; 2070 2071 case Builtin::BI__builtin_isgreater: 2072 case Builtin::BI__builtin_isgreaterequal: 2073 case Builtin::BI__builtin_isless: 2074 case Builtin::BI__builtin_islessequal: 2075 case Builtin::BI__builtin_islessgreater: 2076 case Builtin::BI__builtin_isunordered: 2077 if (SemaBuiltinUnorderedCompare(TheCall)) 2078 return ExprError(); 2079 break; 2080 case Builtin::BI__builtin_fpclassify: 2081 if (SemaBuiltinFPClassification(TheCall, 6)) 2082 return ExprError(); 2083 break; 2084 case Builtin::BI__builtin_isfinite: 2085 case Builtin::BI__builtin_isinf: 2086 case Builtin::BI__builtin_isinf_sign: 2087 case Builtin::BI__builtin_isnan: 2088 case Builtin::BI__builtin_isnormal: 2089 case Builtin::BI__builtin_signbit: 2090 case Builtin::BI__builtin_signbitf: 2091 case Builtin::BI__builtin_signbitl: 2092 if (SemaBuiltinFPClassification(TheCall, 1)) 2093 return ExprError(); 2094 break; 2095 case Builtin::BI__builtin_shufflevector: 2096 return SemaBuiltinShuffleVector(TheCall); 2097 // TheCall will be freed by the smart pointer here, but that's fine, since 2098 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 2099 case Builtin::BI__builtin_prefetch: 2100 if (SemaBuiltinPrefetch(TheCall)) 2101 return ExprError(); 2102 break; 2103 case Builtin::BI__builtin_alloca_with_align: 2104 case Builtin::BI__builtin_alloca_with_align_uninitialized: 2105 if (SemaBuiltinAllocaWithAlign(TheCall)) 2106 return ExprError(); 2107 LLVM_FALLTHROUGH; 2108 case Builtin::BI__builtin_alloca: 2109 case Builtin::BI__builtin_alloca_uninitialized: 2110 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 2111 << TheCall->getDirectCallee(); 2112 break; 2113 case Builtin::BI__arithmetic_fence: 2114 if (SemaBuiltinArithmeticFence(TheCall)) 2115 return ExprError(); 2116 break; 2117 case Builtin::BI__assume: 2118 case Builtin::BI__builtin_assume: 2119 if (SemaBuiltinAssume(TheCall)) 2120 return ExprError(); 2121 break; 2122 case Builtin::BI__builtin_assume_aligned: 2123 if (SemaBuiltinAssumeAligned(TheCall)) 2124 return ExprError(); 2125 break; 2126 case Builtin::BI__builtin_dynamic_object_size: 2127 case Builtin::BI__builtin_object_size: 2128 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 2129 return ExprError(); 2130 break; 2131 case Builtin::BI__builtin_longjmp: 2132 if (SemaBuiltinLongjmp(TheCall)) 2133 return ExprError(); 2134 break; 2135 case Builtin::BI__builtin_setjmp: 2136 if (SemaBuiltinSetjmp(TheCall)) 2137 return ExprError(); 2138 break; 2139 case Builtin::BI__builtin_classify_type: 2140 if (checkArgCount(*this, TheCall, 1)) return true; 2141 TheCall->setType(Context.IntTy); 2142 break; 2143 case Builtin::BI__builtin_complex: 2144 if (SemaBuiltinComplex(TheCall)) 2145 return ExprError(); 2146 break; 2147 case Builtin::BI__builtin_constant_p: { 2148 if (checkArgCount(*this, TheCall, 1)) return true; 2149 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 2150 if (Arg.isInvalid()) return true; 2151 TheCall->setArg(0, Arg.get()); 2152 TheCall->setType(Context.IntTy); 2153 break; 2154 } 2155 case Builtin::BI__builtin_launder: 2156 return SemaBuiltinLaunder(*this, TheCall); 2157 case Builtin::BI__sync_fetch_and_add: 2158 case Builtin::BI__sync_fetch_and_add_1: 2159 case Builtin::BI__sync_fetch_and_add_2: 2160 case Builtin::BI__sync_fetch_and_add_4: 2161 case Builtin::BI__sync_fetch_and_add_8: 2162 case Builtin::BI__sync_fetch_and_add_16: 2163 case Builtin::BI__sync_fetch_and_sub: 2164 case Builtin::BI__sync_fetch_and_sub_1: 2165 case Builtin::BI__sync_fetch_and_sub_2: 2166 case Builtin::BI__sync_fetch_and_sub_4: 2167 case Builtin::BI__sync_fetch_and_sub_8: 2168 case Builtin::BI__sync_fetch_and_sub_16: 2169 case Builtin::BI__sync_fetch_and_or: 2170 case Builtin::BI__sync_fetch_and_or_1: 2171 case Builtin::BI__sync_fetch_and_or_2: 2172 case Builtin::BI__sync_fetch_and_or_4: 2173 case Builtin::BI__sync_fetch_and_or_8: 2174 case Builtin::BI__sync_fetch_and_or_16: 2175 case Builtin::BI__sync_fetch_and_and: 2176 case Builtin::BI__sync_fetch_and_and_1: 2177 case Builtin::BI__sync_fetch_and_and_2: 2178 case Builtin::BI__sync_fetch_and_and_4: 2179 case Builtin::BI__sync_fetch_and_and_8: 2180 case Builtin::BI__sync_fetch_and_and_16: 2181 case Builtin::BI__sync_fetch_and_xor: 2182 case Builtin::BI__sync_fetch_and_xor_1: 2183 case Builtin::BI__sync_fetch_and_xor_2: 2184 case Builtin::BI__sync_fetch_and_xor_4: 2185 case Builtin::BI__sync_fetch_and_xor_8: 2186 case Builtin::BI__sync_fetch_and_xor_16: 2187 case Builtin::BI__sync_fetch_and_nand: 2188 case Builtin::BI__sync_fetch_and_nand_1: 2189 case Builtin::BI__sync_fetch_and_nand_2: 2190 case Builtin::BI__sync_fetch_and_nand_4: 2191 case Builtin::BI__sync_fetch_and_nand_8: 2192 case Builtin::BI__sync_fetch_and_nand_16: 2193 case Builtin::BI__sync_add_and_fetch: 2194 case Builtin::BI__sync_add_and_fetch_1: 2195 case Builtin::BI__sync_add_and_fetch_2: 2196 case Builtin::BI__sync_add_and_fetch_4: 2197 case Builtin::BI__sync_add_and_fetch_8: 2198 case Builtin::BI__sync_add_and_fetch_16: 2199 case Builtin::BI__sync_sub_and_fetch: 2200 case Builtin::BI__sync_sub_and_fetch_1: 2201 case Builtin::BI__sync_sub_and_fetch_2: 2202 case Builtin::BI__sync_sub_and_fetch_4: 2203 case Builtin::BI__sync_sub_and_fetch_8: 2204 case Builtin::BI__sync_sub_and_fetch_16: 2205 case Builtin::BI__sync_and_and_fetch: 2206 case Builtin::BI__sync_and_and_fetch_1: 2207 case Builtin::BI__sync_and_and_fetch_2: 2208 case Builtin::BI__sync_and_and_fetch_4: 2209 case Builtin::BI__sync_and_and_fetch_8: 2210 case Builtin::BI__sync_and_and_fetch_16: 2211 case Builtin::BI__sync_or_and_fetch: 2212 case Builtin::BI__sync_or_and_fetch_1: 2213 case Builtin::BI__sync_or_and_fetch_2: 2214 case Builtin::BI__sync_or_and_fetch_4: 2215 case Builtin::BI__sync_or_and_fetch_8: 2216 case Builtin::BI__sync_or_and_fetch_16: 2217 case Builtin::BI__sync_xor_and_fetch: 2218 case Builtin::BI__sync_xor_and_fetch_1: 2219 case Builtin::BI__sync_xor_and_fetch_2: 2220 case Builtin::BI__sync_xor_and_fetch_4: 2221 case Builtin::BI__sync_xor_and_fetch_8: 2222 case Builtin::BI__sync_xor_and_fetch_16: 2223 case Builtin::BI__sync_nand_and_fetch: 2224 case Builtin::BI__sync_nand_and_fetch_1: 2225 case Builtin::BI__sync_nand_and_fetch_2: 2226 case Builtin::BI__sync_nand_and_fetch_4: 2227 case Builtin::BI__sync_nand_and_fetch_8: 2228 case Builtin::BI__sync_nand_and_fetch_16: 2229 case Builtin::BI__sync_val_compare_and_swap: 2230 case Builtin::BI__sync_val_compare_and_swap_1: 2231 case Builtin::BI__sync_val_compare_and_swap_2: 2232 case Builtin::BI__sync_val_compare_and_swap_4: 2233 case Builtin::BI__sync_val_compare_and_swap_8: 2234 case Builtin::BI__sync_val_compare_and_swap_16: 2235 case Builtin::BI__sync_bool_compare_and_swap: 2236 case Builtin::BI__sync_bool_compare_and_swap_1: 2237 case Builtin::BI__sync_bool_compare_and_swap_2: 2238 case Builtin::BI__sync_bool_compare_and_swap_4: 2239 case Builtin::BI__sync_bool_compare_and_swap_8: 2240 case Builtin::BI__sync_bool_compare_and_swap_16: 2241 case Builtin::BI__sync_lock_test_and_set: 2242 case Builtin::BI__sync_lock_test_and_set_1: 2243 case Builtin::BI__sync_lock_test_and_set_2: 2244 case Builtin::BI__sync_lock_test_and_set_4: 2245 case Builtin::BI__sync_lock_test_and_set_8: 2246 case Builtin::BI__sync_lock_test_and_set_16: 2247 case Builtin::BI__sync_lock_release: 2248 case Builtin::BI__sync_lock_release_1: 2249 case Builtin::BI__sync_lock_release_2: 2250 case Builtin::BI__sync_lock_release_4: 2251 case Builtin::BI__sync_lock_release_8: 2252 case Builtin::BI__sync_lock_release_16: 2253 case Builtin::BI__sync_swap: 2254 case Builtin::BI__sync_swap_1: 2255 case Builtin::BI__sync_swap_2: 2256 case Builtin::BI__sync_swap_4: 2257 case Builtin::BI__sync_swap_8: 2258 case Builtin::BI__sync_swap_16: 2259 return SemaBuiltinAtomicOverloaded(TheCallResult); 2260 case Builtin::BI__sync_synchronize: 2261 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 2262 << TheCall->getCallee()->getSourceRange(); 2263 break; 2264 case Builtin::BI__builtin_nontemporal_load: 2265 case Builtin::BI__builtin_nontemporal_store: 2266 return SemaBuiltinNontemporalOverloaded(TheCallResult); 2267 case Builtin::BI__builtin_memcpy_inline: { 2268 if (checkArgCount(*this, TheCall, 3)) 2269 return ExprError(); 2270 auto ArgArrayConversionFailed = [&](unsigned Arg) { 2271 ExprResult ArgExpr = 2272 DefaultFunctionArrayLvalueConversion(TheCall->getArg(Arg)); 2273 if (ArgExpr.isInvalid()) 2274 return true; 2275 TheCall->setArg(Arg, ArgExpr.get()); 2276 return false; 2277 }; 2278 2279 if (ArgArrayConversionFailed(0) || ArgArrayConversionFailed(1)) 2280 return true; 2281 clang::Expr *SizeOp = TheCall->getArg(2); 2282 // We warn about copying to or from `nullptr` pointers when `size` is 2283 // greater than 0. When `size` is value dependent we cannot evaluate its 2284 // value so we bail out. 2285 if (SizeOp->isValueDependent()) 2286 break; 2287 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) { 2288 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2289 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 2290 } 2291 break; 2292 } 2293 #define BUILTIN(ID, TYPE, ATTRS) 2294 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 2295 case Builtin::BI##ID: \ 2296 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 2297 #include "clang/Basic/Builtins.def" 2298 case Builtin::BI__annotation: 2299 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 2300 return ExprError(); 2301 break; 2302 case Builtin::BI__builtin_annotation: 2303 if (SemaBuiltinAnnotation(*this, TheCall)) 2304 return ExprError(); 2305 break; 2306 case Builtin::BI__builtin_addressof: 2307 if (SemaBuiltinAddressof(*this, TheCall)) 2308 return ExprError(); 2309 break; 2310 case Builtin::BI__builtin_function_start: 2311 if (SemaBuiltinFunctionStart(*this, TheCall)) 2312 return ExprError(); 2313 break; 2314 case Builtin::BI__builtin_is_aligned: 2315 case Builtin::BI__builtin_align_up: 2316 case Builtin::BI__builtin_align_down: 2317 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 2318 return ExprError(); 2319 break; 2320 case Builtin::BI__builtin_add_overflow: 2321 case Builtin::BI__builtin_sub_overflow: 2322 case Builtin::BI__builtin_mul_overflow: 2323 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 2324 return ExprError(); 2325 break; 2326 case Builtin::BI__builtin_operator_new: 2327 case Builtin::BI__builtin_operator_delete: { 2328 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 2329 ExprResult Res = 2330 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 2331 if (Res.isInvalid()) 2332 CorrectDelayedTyposInExpr(TheCallResult.get()); 2333 return Res; 2334 } 2335 case Builtin::BI__builtin_dump_struct: 2336 return SemaBuiltinDumpStruct(*this, TheCall); 2337 case Builtin::BI__builtin_expect_with_probability: { 2338 // We first want to ensure we are called with 3 arguments 2339 if (checkArgCount(*this, TheCall, 3)) 2340 return ExprError(); 2341 // then check probability is constant float in range [0.0, 1.0] 2342 const Expr *ProbArg = TheCall->getArg(2); 2343 SmallVector<PartialDiagnosticAt, 8> Notes; 2344 Expr::EvalResult Eval; 2345 Eval.Diag = &Notes; 2346 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 2347 !Eval.Val.isFloat()) { 2348 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 2349 << ProbArg->getSourceRange(); 2350 for (const PartialDiagnosticAt &PDiag : Notes) 2351 Diag(PDiag.first, PDiag.second); 2352 return ExprError(); 2353 } 2354 llvm::APFloat Probability = Eval.Val.getFloat(); 2355 bool LoseInfo = false; 2356 Probability.convert(llvm::APFloat::IEEEdouble(), 2357 llvm::RoundingMode::Dynamic, &LoseInfo); 2358 if (!(Probability >= llvm::APFloat(0.0) && 2359 Probability <= llvm::APFloat(1.0))) { 2360 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 2361 << ProbArg->getSourceRange(); 2362 return ExprError(); 2363 } 2364 break; 2365 } 2366 case Builtin::BI__builtin_preserve_access_index: 2367 if (SemaBuiltinPreserveAI(*this, TheCall)) 2368 return ExprError(); 2369 break; 2370 case Builtin::BI__builtin_call_with_static_chain: 2371 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 2372 return ExprError(); 2373 break; 2374 case Builtin::BI__exception_code: 2375 case Builtin::BI_exception_code: 2376 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 2377 diag::err_seh___except_block)) 2378 return ExprError(); 2379 break; 2380 case Builtin::BI__exception_info: 2381 case Builtin::BI_exception_info: 2382 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 2383 diag::err_seh___except_filter)) 2384 return ExprError(); 2385 break; 2386 case Builtin::BI__GetExceptionInfo: 2387 if (checkArgCount(*this, TheCall, 1)) 2388 return ExprError(); 2389 2390 if (CheckCXXThrowOperand( 2391 TheCall->getBeginLoc(), 2392 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 2393 TheCall)) 2394 return ExprError(); 2395 2396 TheCall->setType(Context.VoidPtrTy); 2397 break; 2398 case Builtin::BIaddressof: 2399 case Builtin::BI__addressof: 2400 case Builtin::BIforward: 2401 case Builtin::BImove: 2402 case Builtin::BImove_if_noexcept: 2403 case Builtin::BIas_const: { 2404 // These are all expected to be of the form 2405 // T &/&&/* f(U &/&&) 2406 // where T and U only differ in qualification. 2407 if (checkArgCount(*this, TheCall, 1)) 2408 return ExprError(); 2409 QualType Param = FDecl->getParamDecl(0)->getType(); 2410 QualType Result = FDecl->getReturnType(); 2411 bool ReturnsPointer = BuiltinID == Builtin::BIaddressof || 2412 BuiltinID == Builtin::BI__addressof; 2413 if (!(Param->isReferenceType() && 2414 (ReturnsPointer ? Result->isPointerType() 2415 : Result->isReferenceType()) && 2416 Context.hasSameUnqualifiedType(Param->getPointeeType(), 2417 Result->getPointeeType()))) { 2418 Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported) 2419 << FDecl; 2420 return ExprError(); 2421 } 2422 break; 2423 } 2424 // OpenCL v2.0, s6.13.16 - Pipe functions 2425 case Builtin::BIread_pipe: 2426 case Builtin::BIwrite_pipe: 2427 // Since those two functions are declared with var args, we need a semantic 2428 // check for the argument. 2429 if (SemaBuiltinRWPipe(*this, TheCall)) 2430 return ExprError(); 2431 break; 2432 case Builtin::BIreserve_read_pipe: 2433 case Builtin::BIreserve_write_pipe: 2434 case Builtin::BIwork_group_reserve_read_pipe: 2435 case Builtin::BIwork_group_reserve_write_pipe: 2436 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 2437 return ExprError(); 2438 break; 2439 case Builtin::BIsub_group_reserve_read_pipe: 2440 case Builtin::BIsub_group_reserve_write_pipe: 2441 if (checkOpenCLSubgroupExt(*this, TheCall) || 2442 SemaBuiltinReserveRWPipe(*this, TheCall)) 2443 return ExprError(); 2444 break; 2445 case Builtin::BIcommit_read_pipe: 2446 case Builtin::BIcommit_write_pipe: 2447 case Builtin::BIwork_group_commit_read_pipe: 2448 case Builtin::BIwork_group_commit_write_pipe: 2449 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 2450 return ExprError(); 2451 break; 2452 case Builtin::BIsub_group_commit_read_pipe: 2453 case Builtin::BIsub_group_commit_write_pipe: 2454 if (checkOpenCLSubgroupExt(*this, TheCall) || 2455 SemaBuiltinCommitRWPipe(*this, TheCall)) 2456 return ExprError(); 2457 break; 2458 case Builtin::BIget_pipe_num_packets: 2459 case Builtin::BIget_pipe_max_packets: 2460 if (SemaBuiltinPipePackets(*this, TheCall)) 2461 return ExprError(); 2462 break; 2463 case Builtin::BIto_global: 2464 case Builtin::BIto_local: 2465 case Builtin::BIto_private: 2466 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 2467 return ExprError(); 2468 break; 2469 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 2470 case Builtin::BIenqueue_kernel: 2471 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 2472 return ExprError(); 2473 break; 2474 case Builtin::BIget_kernel_work_group_size: 2475 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 2476 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 2477 return ExprError(); 2478 break; 2479 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 2480 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 2481 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 2482 return ExprError(); 2483 break; 2484 case Builtin::BI__builtin_os_log_format: 2485 Cleanup.setExprNeedsCleanups(true); 2486 LLVM_FALLTHROUGH; 2487 case Builtin::BI__builtin_os_log_format_buffer_size: 2488 if (SemaBuiltinOSLogFormat(TheCall)) 2489 return ExprError(); 2490 break; 2491 case Builtin::BI__builtin_frame_address: 2492 case Builtin::BI__builtin_return_address: { 2493 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 2494 return ExprError(); 2495 2496 // -Wframe-address warning if non-zero passed to builtin 2497 // return/frame address. 2498 Expr::EvalResult Result; 2499 if (!TheCall->getArg(0)->isValueDependent() && 2500 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 2501 Result.Val.getInt() != 0) 2502 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 2503 << ((BuiltinID == Builtin::BI__builtin_return_address) 2504 ? "__builtin_return_address" 2505 : "__builtin_frame_address") 2506 << TheCall->getSourceRange(); 2507 break; 2508 } 2509 2510 // __builtin_elementwise_abs restricts the element type to signed integers or 2511 // floating point types only. 2512 case Builtin::BI__builtin_elementwise_abs: { 2513 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2514 return ExprError(); 2515 2516 QualType ArgTy = TheCall->getArg(0)->getType(); 2517 QualType EltTy = ArgTy; 2518 2519 if (auto *VecTy = EltTy->getAs<VectorType>()) 2520 EltTy = VecTy->getElementType(); 2521 if (EltTy->isUnsignedIntegerType()) { 2522 Diag(TheCall->getArg(0)->getBeginLoc(), 2523 diag::err_builtin_invalid_arg_type) 2524 << 1 << /* signed integer or float ty*/ 3 << ArgTy; 2525 return ExprError(); 2526 } 2527 break; 2528 } 2529 2530 // These builtins restrict the element type to floating point 2531 // types only. 2532 case Builtin::BI__builtin_elementwise_ceil: 2533 case Builtin::BI__builtin_elementwise_floor: 2534 case Builtin::BI__builtin_elementwise_roundeven: 2535 case Builtin::BI__builtin_elementwise_trunc: { 2536 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2537 return ExprError(); 2538 2539 QualType ArgTy = TheCall->getArg(0)->getType(); 2540 QualType EltTy = ArgTy; 2541 2542 if (auto *VecTy = EltTy->getAs<VectorType>()) 2543 EltTy = VecTy->getElementType(); 2544 if (!EltTy->isFloatingType()) { 2545 Diag(TheCall->getArg(0)->getBeginLoc(), 2546 diag::err_builtin_invalid_arg_type) 2547 << 1 << /* float ty*/ 5 << ArgTy; 2548 2549 return ExprError(); 2550 } 2551 break; 2552 } 2553 2554 // These builtins restrict the element type to integer 2555 // types only. 2556 case Builtin::BI__builtin_elementwise_add_sat: 2557 case Builtin::BI__builtin_elementwise_sub_sat: { 2558 if (SemaBuiltinElementwiseMath(TheCall)) 2559 return ExprError(); 2560 2561 const Expr *Arg = TheCall->getArg(0); 2562 QualType ArgTy = Arg->getType(); 2563 QualType EltTy = ArgTy; 2564 2565 if (auto *VecTy = EltTy->getAs<VectorType>()) 2566 EltTy = VecTy->getElementType(); 2567 2568 if (!EltTy->isIntegerType()) { 2569 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2570 << 1 << /* integer ty */ 6 << ArgTy; 2571 return ExprError(); 2572 } 2573 break; 2574 } 2575 2576 case Builtin::BI__builtin_elementwise_min: 2577 case Builtin::BI__builtin_elementwise_max: 2578 if (SemaBuiltinElementwiseMath(TheCall)) 2579 return ExprError(); 2580 break; 2581 case Builtin::BI__builtin_reduce_max: 2582 case Builtin::BI__builtin_reduce_min: { 2583 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2584 return ExprError(); 2585 2586 const Expr *Arg = TheCall->getArg(0); 2587 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2588 if (!TyA) { 2589 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2590 << 1 << /* vector ty*/ 4 << Arg->getType(); 2591 return ExprError(); 2592 } 2593 2594 TheCall->setType(TyA->getElementType()); 2595 break; 2596 } 2597 2598 // These builtins support vectors of integers only. 2599 // TODO: ADD/MUL should support floating-point types. 2600 case Builtin::BI__builtin_reduce_add: 2601 case Builtin::BI__builtin_reduce_mul: 2602 case Builtin::BI__builtin_reduce_xor: 2603 case Builtin::BI__builtin_reduce_or: 2604 case Builtin::BI__builtin_reduce_and: { 2605 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2606 return ExprError(); 2607 2608 const Expr *Arg = TheCall->getArg(0); 2609 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2610 if (!TyA || !TyA->getElementType()->isIntegerType()) { 2611 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2612 << 1 << /* vector of integers */ 6 << Arg->getType(); 2613 return ExprError(); 2614 } 2615 TheCall->setType(TyA->getElementType()); 2616 break; 2617 } 2618 2619 case Builtin::BI__builtin_matrix_transpose: 2620 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 2621 2622 case Builtin::BI__builtin_matrix_column_major_load: 2623 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 2624 2625 case Builtin::BI__builtin_matrix_column_major_store: 2626 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 2627 2628 case Builtin::BI__builtin_get_device_side_mangled_name: { 2629 auto Check = [](CallExpr *TheCall) { 2630 if (TheCall->getNumArgs() != 1) 2631 return false; 2632 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 2633 if (!DRE) 2634 return false; 2635 auto *D = DRE->getDecl(); 2636 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 2637 return false; 2638 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 2639 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 2640 }; 2641 if (!Check(TheCall)) { 2642 Diag(TheCall->getBeginLoc(), 2643 diag::err_hip_invalid_args_builtin_mangled_name); 2644 return ExprError(); 2645 } 2646 } 2647 } 2648 2649 // Since the target specific builtins for each arch overlap, only check those 2650 // of the arch we are compiling for. 2651 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 2652 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 2653 assert(Context.getAuxTargetInfo() && 2654 "Aux Target Builtin, but not an aux target?"); 2655 2656 if (CheckTSBuiltinFunctionCall( 2657 *Context.getAuxTargetInfo(), 2658 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2659 return ExprError(); 2660 } else { 2661 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2662 TheCall)) 2663 return ExprError(); 2664 } 2665 } 2666 2667 return TheCallResult; 2668 } 2669 2670 // Get the valid immediate range for the specified NEON type code. 2671 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2672 NeonTypeFlags Type(t); 2673 int IsQuad = ForceQuad ? true : Type.isQuad(); 2674 switch (Type.getEltType()) { 2675 case NeonTypeFlags::Int8: 2676 case NeonTypeFlags::Poly8: 2677 return shift ? 7 : (8 << IsQuad) - 1; 2678 case NeonTypeFlags::Int16: 2679 case NeonTypeFlags::Poly16: 2680 return shift ? 15 : (4 << IsQuad) - 1; 2681 case NeonTypeFlags::Int32: 2682 return shift ? 31 : (2 << IsQuad) - 1; 2683 case NeonTypeFlags::Int64: 2684 case NeonTypeFlags::Poly64: 2685 return shift ? 63 : (1 << IsQuad) - 1; 2686 case NeonTypeFlags::Poly128: 2687 return shift ? 127 : (1 << IsQuad) - 1; 2688 case NeonTypeFlags::Float16: 2689 assert(!shift && "cannot shift float types!"); 2690 return (4 << IsQuad) - 1; 2691 case NeonTypeFlags::Float32: 2692 assert(!shift && "cannot shift float types!"); 2693 return (2 << IsQuad) - 1; 2694 case NeonTypeFlags::Float64: 2695 assert(!shift && "cannot shift float types!"); 2696 return (1 << IsQuad) - 1; 2697 case NeonTypeFlags::BFloat16: 2698 assert(!shift && "cannot shift float types!"); 2699 return (4 << IsQuad) - 1; 2700 } 2701 llvm_unreachable("Invalid NeonTypeFlag!"); 2702 } 2703 2704 /// getNeonEltType - Return the QualType corresponding to the elements of 2705 /// the vector type specified by the NeonTypeFlags. This is used to check 2706 /// the pointer arguments for Neon load/store intrinsics. 2707 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2708 bool IsPolyUnsigned, bool IsInt64Long) { 2709 switch (Flags.getEltType()) { 2710 case NeonTypeFlags::Int8: 2711 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2712 case NeonTypeFlags::Int16: 2713 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2714 case NeonTypeFlags::Int32: 2715 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2716 case NeonTypeFlags::Int64: 2717 if (IsInt64Long) 2718 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2719 else 2720 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2721 : Context.LongLongTy; 2722 case NeonTypeFlags::Poly8: 2723 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2724 case NeonTypeFlags::Poly16: 2725 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2726 case NeonTypeFlags::Poly64: 2727 if (IsInt64Long) 2728 return Context.UnsignedLongTy; 2729 else 2730 return Context.UnsignedLongLongTy; 2731 case NeonTypeFlags::Poly128: 2732 break; 2733 case NeonTypeFlags::Float16: 2734 return Context.HalfTy; 2735 case NeonTypeFlags::Float32: 2736 return Context.FloatTy; 2737 case NeonTypeFlags::Float64: 2738 return Context.DoubleTy; 2739 case NeonTypeFlags::BFloat16: 2740 return Context.BFloat16Ty; 2741 } 2742 llvm_unreachable("Invalid NeonTypeFlag!"); 2743 } 2744 2745 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2746 // Range check SVE intrinsics that take immediate values. 2747 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2748 2749 switch (BuiltinID) { 2750 default: 2751 return false; 2752 #define GET_SVE_IMMEDIATE_CHECK 2753 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2754 #undef GET_SVE_IMMEDIATE_CHECK 2755 } 2756 2757 // Perform all the immediate checks for this builtin call. 2758 bool HasError = false; 2759 for (auto &I : ImmChecks) { 2760 int ArgNum, CheckTy, ElementSizeInBits; 2761 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2762 2763 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2764 2765 // Function that checks whether the operand (ArgNum) is an immediate 2766 // that is one of the predefined values. 2767 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2768 int ErrDiag) -> bool { 2769 // We can't check the value of a dependent argument. 2770 Expr *Arg = TheCall->getArg(ArgNum); 2771 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2772 return false; 2773 2774 // Check constant-ness first. 2775 llvm::APSInt Imm; 2776 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2777 return true; 2778 2779 if (!CheckImm(Imm.getSExtValue())) 2780 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2781 return false; 2782 }; 2783 2784 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2785 case SVETypeFlags::ImmCheck0_31: 2786 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2787 HasError = true; 2788 break; 2789 case SVETypeFlags::ImmCheck0_13: 2790 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2791 HasError = true; 2792 break; 2793 case SVETypeFlags::ImmCheck1_16: 2794 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2795 HasError = true; 2796 break; 2797 case SVETypeFlags::ImmCheck0_7: 2798 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2799 HasError = true; 2800 break; 2801 case SVETypeFlags::ImmCheckExtract: 2802 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2803 (2048 / ElementSizeInBits) - 1)) 2804 HasError = true; 2805 break; 2806 case SVETypeFlags::ImmCheckShiftRight: 2807 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2808 HasError = true; 2809 break; 2810 case SVETypeFlags::ImmCheckShiftRightNarrow: 2811 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2812 ElementSizeInBits / 2)) 2813 HasError = true; 2814 break; 2815 case SVETypeFlags::ImmCheckShiftLeft: 2816 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2817 ElementSizeInBits - 1)) 2818 HasError = true; 2819 break; 2820 case SVETypeFlags::ImmCheckLaneIndex: 2821 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2822 (128 / (1 * ElementSizeInBits)) - 1)) 2823 HasError = true; 2824 break; 2825 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2826 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2827 (128 / (2 * ElementSizeInBits)) - 1)) 2828 HasError = true; 2829 break; 2830 case SVETypeFlags::ImmCheckLaneIndexDot: 2831 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2832 (128 / (4 * ElementSizeInBits)) - 1)) 2833 HasError = true; 2834 break; 2835 case SVETypeFlags::ImmCheckComplexRot90_270: 2836 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2837 diag::err_rotation_argument_to_cadd)) 2838 HasError = true; 2839 break; 2840 case SVETypeFlags::ImmCheckComplexRotAll90: 2841 if (CheckImmediateInSet( 2842 [](int64_t V) { 2843 return V == 0 || V == 90 || V == 180 || V == 270; 2844 }, 2845 diag::err_rotation_argument_to_cmla)) 2846 HasError = true; 2847 break; 2848 case SVETypeFlags::ImmCheck0_1: 2849 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2850 HasError = true; 2851 break; 2852 case SVETypeFlags::ImmCheck0_2: 2853 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2854 HasError = true; 2855 break; 2856 case SVETypeFlags::ImmCheck0_3: 2857 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2858 HasError = true; 2859 break; 2860 } 2861 } 2862 2863 return HasError; 2864 } 2865 2866 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2867 unsigned BuiltinID, CallExpr *TheCall) { 2868 llvm::APSInt Result; 2869 uint64_t mask = 0; 2870 unsigned TV = 0; 2871 int PtrArgNum = -1; 2872 bool HasConstPtr = false; 2873 switch (BuiltinID) { 2874 #define GET_NEON_OVERLOAD_CHECK 2875 #include "clang/Basic/arm_neon.inc" 2876 #include "clang/Basic/arm_fp16.inc" 2877 #undef GET_NEON_OVERLOAD_CHECK 2878 } 2879 2880 // For NEON intrinsics which are overloaded on vector element type, validate 2881 // the immediate which specifies which variant to emit. 2882 unsigned ImmArg = TheCall->getNumArgs()-1; 2883 if (mask) { 2884 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2885 return true; 2886 2887 TV = Result.getLimitedValue(64); 2888 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2889 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2890 << TheCall->getArg(ImmArg)->getSourceRange(); 2891 } 2892 2893 if (PtrArgNum >= 0) { 2894 // Check that pointer arguments have the specified type. 2895 Expr *Arg = TheCall->getArg(PtrArgNum); 2896 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2897 Arg = ICE->getSubExpr(); 2898 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2899 QualType RHSTy = RHS.get()->getType(); 2900 2901 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2902 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2903 Arch == llvm::Triple::aarch64_32 || 2904 Arch == llvm::Triple::aarch64_be; 2905 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2906 QualType EltTy = 2907 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2908 if (HasConstPtr) 2909 EltTy = EltTy.withConst(); 2910 QualType LHSTy = Context.getPointerType(EltTy); 2911 AssignConvertType ConvTy; 2912 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2913 if (RHS.isInvalid()) 2914 return true; 2915 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2916 RHS.get(), AA_Assigning)) 2917 return true; 2918 } 2919 2920 // For NEON intrinsics which take an immediate value as part of the 2921 // instruction, range check them here. 2922 unsigned i = 0, l = 0, u = 0; 2923 switch (BuiltinID) { 2924 default: 2925 return false; 2926 #define GET_NEON_IMMEDIATE_CHECK 2927 #include "clang/Basic/arm_neon.inc" 2928 #include "clang/Basic/arm_fp16.inc" 2929 #undef GET_NEON_IMMEDIATE_CHECK 2930 } 2931 2932 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2933 } 2934 2935 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2936 switch (BuiltinID) { 2937 default: 2938 return false; 2939 #include "clang/Basic/arm_mve_builtin_sema.inc" 2940 } 2941 } 2942 2943 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2944 CallExpr *TheCall) { 2945 bool Err = false; 2946 switch (BuiltinID) { 2947 default: 2948 return false; 2949 #include "clang/Basic/arm_cde_builtin_sema.inc" 2950 } 2951 2952 if (Err) 2953 return true; 2954 2955 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2956 } 2957 2958 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2959 const Expr *CoprocArg, bool WantCDE) { 2960 if (isConstantEvaluated()) 2961 return false; 2962 2963 // We can't check the value of a dependent argument. 2964 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2965 return false; 2966 2967 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 2968 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2969 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2970 2971 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2972 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2973 2974 if (IsCDECoproc != WantCDE) 2975 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2976 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2977 2978 return false; 2979 } 2980 2981 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2982 unsigned MaxWidth) { 2983 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2984 BuiltinID == ARM::BI__builtin_arm_ldaex || 2985 BuiltinID == ARM::BI__builtin_arm_strex || 2986 BuiltinID == ARM::BI__builtin_arm_stlex || 2987 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2988 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2989 BuiltinID == AArch64::BI__builtin_arm_strex || 2990 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2991 "unexpected ARM builtin"); 2992 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2993 BuiltinID == ARM::BI__builtin_arm_ldaex || 2994 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2995 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2996 2997 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 2998 2999 // Ensure that we have the proper number of arguments. 3000 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 3001 return true; 3002 3003 // Inspect the pointer argument of the atomic builtin. This should always be 3004 // a pointer type, whose element is an integral scalar or pointer type. 3005 // Because it is a pointer type, we don't have to worry about any implicit 3006 // casts here. 3007 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 3008 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 3009 if (PointerArgRes.isInvalid()) 3010 return true; 3011 PointerArg = PointerArgRes.get(); 3012 3013 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 3014 if (!pointerType) { 3015 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 3016 << PointerArg->getType() << PointerArg->getSourceRange(); 3017 return true; 3018 } 3019 3020 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 3021 // task is to insert the appropriate casts into the AST. First work out just 3022 // what the appropriate type is. 3023 QualType ValType = pointerType->getPointeeType(); 3024 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 3025 if (IsLdrex) 3026 AddrType.addConst(); 3027 3028 // Issue a warning if the cast is dodgy. 3029 CastKind CastNeeded = CK_NoOp; 3030 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 3031 CastNeeded = CK_BitCast; 3032 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 3033 << PointerArg->getType() << Context.getPointerType(AddrType) 3034 << AA_Passing << PointerArg->getSourceRange(); 3035 } 3036 3037 // Finally, do the cast and replace the argument with the corrected version. 3038 AddrType = Context.getPointerType(AddrType); 3039 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 3040 if (PointerArgRes.isInvalid()) 3041 return true; 3042 PointerArg = PointerArgRes.get(); 3043 3044 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 3045 3046 // In general, we allow ints, floats and pointers to be loaded and stored. 3047 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 3048 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 3049 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 3050 << PointerArg->getType() << PointerArg->getSourceRange(); 3051 return true; 3052 } 3053 3054 // But ARM doesn't have instructions to deal with 128-bit versions. 3055 if (Context.getTypeSize(ValType) > MaxWidth) { 3056 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 3057 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 3058 << PointerArg->getType() << PointerArg->getSourceRange(); 3059 return true; 3060 } 3061 3062 switch (ValType.getObjCLifetime()) { 3063 case Qualifiers::OCL_None: 3064 case Qualifiers::OCL_ExplicitNone: 3065 // okay 3066 break; 3067 3068 case Qualifiers::OCL_Weak: 3069 case Qualifiers::OCL_Strong: 3070 case Qualifiers::OCL_Autoreleasing: 3071 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 3072 << ValType << PointerArg->getSourceRange(); 3073 return true; 3074 } 3075 3076 if (IsLdrex) { 3077 TheCall->setType(ValType); 3078 return false; 3079 } 3080 3081 // Initialize the argument to be stored. 3082 ExprResult ValArg = TheCall->getArg(0); 3083 InitializedEntity Entity = InitializedEntity::InitializeParameter( 3084 Context, ValType, /*consume*/ false); 3085 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 3086 if (ValArg.isInvalid()) 3087 return true; 3088 TheCall->setArg(0, ValArg.get()); 3089 3090 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 3091 // but the custom checker bypasses all default analysis. 3092 TheCall->setType(Context.IntTy); 3093 return false; 3094 } 3095 3096 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3097 CallExpr *TheCall) { 3098 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 3099 BuiltinID == ARM::BI__builtin_arm_ldaex || 3100 BuiltinID == ARM::BI__builtin_arm_strex || 3101 BuiltinID == ARM::BI__builtin_arm_stlex) { 3102 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 3103 } 3104 3105 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 3106 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3107 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 3108 } 3109 3110 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 3111 BuiltinID == ARM::BI__builtin_arm_wsr64) 3112 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 3113 3114 if (BuiltinID == ARM::BI__builtin_arm_rsr || 3115 BuiltinID == ARM::BI__builtin_arm_rsrp || 3116 BuiltinID == ARM::BI__builtin_arm_wsr || 3117 BuiltinID == ARM::BI__builtin_arm_wsrp) 3118 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3119 3120 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3121 return true; 3122 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 3123 return true; 3124 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3125 return true; 3126 3127 // For intrinsics which take an immediate value as part of the instruction, 3128 // range check them here. 3129 // FIXME: VFP Intrinsics should error if VFP not present. 3130 switch (BuiltinID) { 3131 default: return false; 3132 case ARM::BI__builtin_arm_ssat: 3133 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 3134 case ARM::BI__builtin_arm_usat: 3135 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 3136 case ARM::BI__builtin_arm_ssat16: 3137 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 3138 case ARM::BI__builtin_arm_usat16: 3139 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 3140 case ARM::BI__builtin_arm_vcvtr_f: 3141 case ARM::BI__builtin_arm_vcvtr_d: 3142 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3143 case ARM::BI__builtin_arm_dmb: 3144 case ARM::BI__builtin_arm_dsb: 3145 case ARM::BI__builtin_arm_isb: 3146 case ARM::BI__builtin_arm_dbg: 3147 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 3148 case ARM::BI__builtin_arm_cdp: 3149 case ARM::BI__builtin_arm_cdp2: 3150 case ARM::BI__builtin_arm_mcr: 3151 case ARM::BI__builtin_arm_mcr2: 3152 case ARM::BI__builtin_arm_mrc: 3153 case ARM::BI__builtin_arm_mrc2: 3154 case ARM::BI__builtin_arm_mcrr: 3155 case ARM::BI__builtin_arm_mcrr2: 3156 case ARM::BI__builtin_arm_mrrc: 3157 case ARM::BI__builtin_arm_mrrc2: 3158 case ARM::BI__builtin_arm_ldc: 3159 case ARM::BI__builtin_arm_ldcl: 3160 case ARM::BI__builtin_arm_ldc2: 3161 case ARM::BI__builtin_arm_ldc2l: 3162 case ARM::BI__builtin_arm_stc: 3163 case ARM::BI__builtin_arm_stcl: 3164 case ARM::BI__builtin_arm_stc2: 3165 case ARM::BI__builtin_arm_stc2l: 3166 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 3167 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 3168 /*WantCDE*/ false); 3169 } 3170 } 3171 3172 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 3173 unsigned BuiltinID, 3174 CallExpr *TheCall) { 3175 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 3176 BuiltinID == AArch64::BI__builtin_arm_ldaex || 3177 BuiltinID == AArch64::BI__builtin_arm_strex || 3178 BuiltinID == AArch64::BI__builtin_arm_stlex) { 3179 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 3180 } 3181 3182 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 3183 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3184 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 3185 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 3186 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 3187 } 3188 3189 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 3190 BuiltinID == AArch64::BI__builtin_arm_wsr64) 3191 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3192 3193 // Memory Tagging Extensions (MTE) Intrinsics 3194 if (BuiltinID == AArch64::BI__builtin_arm_irg || 3195 BuiltinID == AArch64::BI__builtin_arm_addg || 3196 BuiltinID == AArch64::BI__builtin_arm_gmi || 3197 BuiltinID == AArch64::BI__builtin_arm_ldg || 3198 BuiltinID == AArch64::BI__builtin_arm_stg || 3199 BuiltinID == AArch64::BI__builtin_arm_subp) { 3200 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 3201 } 3202 3203 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 3204 BuiltinID == AArch64::BI__builtin_arm_rsrp || 3205 BuiltinID == AArch64::BI__builtin_arm_wsr || 3206 BuiltinID == AArch64::BI__builtin_arm_wsrp) 3207 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3208 3209 // Only check the valid encoding range. Any constant in this range would be 3210 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 3211 // an exception for incorrect registers. This matches MSVC behavior. 3212 if (BuiltinID == AArch64::BI_ReadStatusReg || 3213 BuiltinID == AArch64::BI_WriteStatusReg) 3214 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 3215 3216 if (BuiltinID == AArch64::BI__getReg) 3217 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 3218 3219 if (BuiltinID == AArch64::BI__break) 3220 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xffff); 3221 3222 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3223 return true; 3224 3225 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 3226 return true; 3227 3228 // For intrinsics which take an immediate value as part of the instruction, 3229 // range check them here. 3230 unsigned i = 0, l = 0, u = 0; 3231 switch (BuiltinID) { 3232 default: return false; 3233 case AArch64::BI__builtin_arm_dmb: 3234 case AArch64::BI__builtin_arm_dsb: 3235 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 3236 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 3237 } 3238 3239 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 3240 } 3241 3242 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 3243 if (Arg->getType()->getAsPlaceholderType()) 3244 return false; 3245 3246 // The first argument needs to be a record field access. 3247 // If it is an array element access, we delay decision 3248 // to BPF backend to check whether the access is a 3249 // field access or not. 3250 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 3251 isa<MemberExpr>(Arg->IgnoreParens()) || 3252 isa<ArraySubscriptExpr>(Arg->IgnoreParens())); 3253 } 3254 3255 static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S, 3256 QualType VectorTy, QualType EltTy) { 3257 QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType(); 3258 if (!Context.hasSameType(VectorEltTy, EltTy)) { 3259 S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types) 3260 << Call->getSourceRange() << VectorEltTy << EltTy; 3261 return false; 3262 } 3263 return true; 3264 } 3265 3266 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 3267 QualType ArgType = Arg->getType(); 3268 if (ArgType->getAsPlaceholderType()) 3269 return false; 3270 3271 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type 3272 // format: 3273 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 3274 // 2. <type> var; 3275 // __builtin_preserve_type_info(var, flag); 3276 if (!isa<DeclRefExpr>(Arg->IgnoreParens()) && 3277 !isa<UnaryOperator>(Arg->IgnoreParens())) 3278 return false; 3279 3280 // Typedef type. 3281 if (ArgType->getAs<TypedefType>()) 3282 return true; 3283 3284 // Record type or Enum type. 3285 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3286 if (const auto *RT = Ty->getAs<RecordType>()) { 3287 if (!RT->getDecl()->getDeclName().isEmpty()) 3288 return true; 3289 } else if (const auto *ET = Ty->getAs<EnumType>()) { 3290 if (!ET->getDecl()->getDeclName().isEmpty()) 3291 return true; 3292 } 3293 3294 return false; 3295 } 3296 3297 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 3298 QualType ArgType = Arg->getType(); 3299 if (ArgType->getAsPlaceholderType()) 3300 return false; 3301 3302 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 3303 // format: 3304 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 3305 // flag); 3306 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 3307 if (!UO) 3308 return false; 3309 3310 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 3311 if (!CE) 3312 return false; 3313 if (CE->getCastKind() != CK_IntegralToPointer && 3314 CE->getCastKind() != CK_NullToPointer) 3315 return false; 3316 3317 // The integer must be from an EnumConstantDecl. 3318 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 3319 if (!DR) 3320 return false; 3321 3322 const EnumConstantDecl *Enumerator = 3323 dyn_cast<EnumConstantDecl>(DR->getDecl()); 3324 if (!Enumerator) 3325 return false; 3326 3327 // The type must be EnumType. 3328 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3329 const auto *ET = Ty->getAs<EnumType>(); 3330 if (!ET) 3331 return false; 3332 3333 // The enum value must be supported. 3334 return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator); 3335 } 3336 3337 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 3338 CallExpr *TheCall) { 3339 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 3340 BuiltinID == BPF::BI__builtin_btf_type_id || 3341 BuiltinID == BPF::BI__builtin_preserve_type_info || 3342 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 3343 "unexpected BPF builtin"); 3344 3345 if (checkArgCount(*this, TheCall, 2)) 3346 return true; 3347 3348 // The second argument needs to be a constant int 3349 Expr *Arg = TheCall->getArg(1); 3350 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 3351 diag::kind kind; 3352 if (!Value) { 3353 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 3354 kind = diag::err_preserve_field_info_not_const; 3355 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 3356 kind = diag::err_btf_type_id_not_const; 3357 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 3358 kind = diag::err_preserve_type_info_not_const; 3359 else 3360 kind = diag::err_preserve_enum_value_not_const; 3361 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 3362 return true; 3363 } 3364 3365 // The first argument 3366 Arg = TheCall->getArg(0); 3367 bool InvalidArg = false; 3368 bool ReturnUnsignedInt = true; 3369 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 3370 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 3371 InvalidArg = true; 3372 kind = diag::err_preserve_field_info_not_field; 3373 } 3374 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 3375 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 3376 InvalidArg = true; 3377 kind = diag::err_preserve_type_info_invalid; 3378 } 3379 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 3380 if (!isValidBPFPreserveEnumValueArg(Arg)) { 3381 InvalidArg = true; 3382 kind = diag::err_preserve_enum_value_invalid; 3383 } 3384 ReturnUnsignedInt = false; 3385 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 3386 ReturnUnsignedInt = false; 3387 } 3388 3389 if (InvalidArg) { 3390 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 3391 return true; 3392 } 3393 3394 if (ReturnUnsignedInt) 3395 TheCall->setType(Context.UnsignedIntTy); 3396 else 3397 TheCall->setType(Context.UnsignedLongTy); 3398 return false; 3399 } 3400 3401 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3402 struct ArgInfo { 3403 uint8_t OpNum; 3404 bool IsSigned; 3405 uint8_t BitWidth; 3406 uint8_t Align; 3407 }; 3408 struct BuiltinInfo { 3409 unsigned BuiltinID; 3410 ArgInfo Infos[2]; 3411 }; 3412 3413 static BuiltinInfo Infos[] = { 3414 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 3415 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 3416 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 3417 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 3418 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 3419 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 3420 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 3421 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 3422 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 3423 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 3424 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 3425 3426 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 3427 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 3428 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 3429 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 3430 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 3431 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 3432 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 3433 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 3434 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 3435 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 3436 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 3437 3438 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 3439 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 3440 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 3441 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 3442 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 3443 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 3444 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 3445 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 3446 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 3447 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 3448 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 3449 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 3450 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 3451 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 3452 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 3453 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 3454 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 3455 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 3456 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 3457 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 3458 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 3459 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 3460 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 3461 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 3462 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 3463 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 3464 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 3465 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 3466 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 3467 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 3468 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 3469 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 3470 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 3471 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 3472 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 3473 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 3474 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 3475 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 3476 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 3477 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 3478 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 3479 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 3480 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 3481 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 3482 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 3483 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 3484 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 3485 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 3486 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 3487 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 3488 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 3489 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 3490 {{ 1, false, 6, 0 }} }, 3491 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 3492 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 3493 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 3494 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 3495 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 3496 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 3497 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 3498 {{ 1, false, 5, 0 }} }, 3499 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 3500 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 3501 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 3502 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 3503 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 3504 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 3505 { 2, false, 5, 0 }} }, 3506 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 3507 { 2, false, 6, 0 }} }, 3508 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 3509 { 3, false, 5, 0 }} }, 3510 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 3511 { 3, false, 6, 0 }} }, 3512 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 3513 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 3514 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 3515 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 3516 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 3517 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 3518 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 3519 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 3520 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 3521 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 3522 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 3523 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 3524 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 3525 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 3526 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 3527 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 3528 {{ 2, false, 4, 0 }, 3529 { 3, false, 5, 0 }} }, 3530 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 3531 {{ 2, false, 4, 0 }, 3532 { 3, false, 5, 0 }} }, 3533 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 3534 {{ 2, false, 4, 0 }, 3535 { 3, false, 5, 0 }} }, 3536 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 3537 {{ 2, false, 4, 0 }, 3538 { 3, false, 5, 0 }} }, 3539 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 3540 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 3541 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 3542 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 3543 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 3544 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 3545 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 3546 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 3547 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 3548 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 3549 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 3550 { 2, false, 5, 0 }} }, 3551 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 3552 { 2, false, 6, 0 }} }, 3553 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 3554 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 3555 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 3556 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 3557 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 3558 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 3559 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 3560 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 3561 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 3562 {{ 1, false, 4, 0 }} }, 3563 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 3564 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 3565 {{ 1, false, 4, 0 }} }, 3566 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 3567 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 3568 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 3569 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 3570 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 3571 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 3572 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 3573 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 3574 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 3575 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 3576 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 3577 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 3578 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 3579 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 3580 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 3581 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 3582 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 3583 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 3584 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 3585 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 3586 {{ 3, false, 1, 0 }} }, 3587 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 3588 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 3589 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 3590 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 3591 {{ 3, false, 1, 0 }} }, 3592 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 3593 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 3594 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 3595 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 3596 {{ 3, false, 1, 0 }} }, 3597 }; 3598 3599 // Use a dynamically initialized static to sort the table exactly once on 3600 // first run. 3601 static const bool SortOnce = 3602 (llvm::sort(Infos, 3603 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 3604 return LHS.BuiltinID < RHS.BuiltinID; 3605 }), 3606 true); 3607 (void)SortOnce; 3608 3609 const BuiltinInfo *F = llvm::partition_point( 3610 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 3611 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 3612 return false; 3613 3614 bool Error = false; 3615 3616 for (const ArgInfo &A : F->Infos) { 3617 // Ignore empty ArgInfo elements. 3618 if (A.BitWidth == 0) 3619 continue; 3620 3621 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 3622 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 3623 if (!A.Align) { 3624 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3625 } else { 3626 unsigned M = 1 << A.Align; 3627 Min *= M; 3628 Max *= M; 3629 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3630 Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 3631 } 3632 } 3633 return Error; 3634 } 3635 3636 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 3637 CallExpr *TheCall) { 3638 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 3639 } 3640 3641 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 3642 unsigned BuiltinID, CallExpr *TheCall) { 3643 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 3644 CheckMipsBuiltinArgument(BuiltinID, TheCall); 3645 } 3646 3647 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 3648 CallExpr *TheCall) { 3649 3650 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 3651 BuiltinID <= Mips::BI__builtin_mips_lwx) { 3652 if (!TI.hasFeature("dsp")) 3653 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 3654 } 3655 3656 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 3657 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 3658 if (!TI.hasFeature("dspr2")) 3659 return Diag(TheCall->getBeginLoc(), 3660 diag::err_mips_builtin_requires_dspr2); 3661 } 3662 3663 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 3664 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 3665 if (!TI.hasFeature("msa")) 3666 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 3667 } 3668 3669 return false; 3670 } 3671 3672 // CheckMipsBuiltinArgument - Checks the constant value passed to the 3673 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3674 // ordering for DSP is unspecified. MSA is ordered by the data format used 3675 // by the underlying instruction i.e., df/m, df/n and then by size. 3676 // 3677 // FIXME: The size tests here should instead be tablegen'd along with the 3678 // definitions from include/clang/Basic/BuiltinsMips.def. 3679 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3680 // be too. 3681 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3682 unsigned i = 0, l = 0, u = 0, m = 0; 3683 switch (BuiltinID) { 3684 default: return false; 3685 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3686 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3687 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3688 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3689 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3690 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3691 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3692 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3693 // df/m field. 3694 // These intrinsics take an unsigned 3 bit immediate. 3695 case Mips::BI__builtin_msa_bclri_b: 3696 case Mips::BI__builtin_msa_bnegi_b: 3697 case Mips::BI__builtin_msa_bseti_b: 3698 case Mips::BI__builtin_msa_sat_s_b: 3699 case Mips::BI__builtin_msa_sat_u_b: 3700 case Mips::BI__builtin_msa_slli_b: 3701 case Mips::BI__builtin_msa_srai_b: 3702 case Mips::BI__builtin_msa_srari_b: 3703 case Mips::BI__builtin_msa_srli_b: 3704 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3705 case Mips::BI__builtin_msa_binsli_b: 3706 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3707 // These intrinsics take an unsigned 4 bit immediate. 3708 case Mips::BI__builtin_msa_bclri_h: 3709 case Mips::BI__builtin_msa_bnegi_h: 3710 case Mips::BI__builtin_msa_bseti_h: 3711 case Mips::BI__builtin_msa_sat_s_h: 3712 case Mips::BI__builtin_msa_sat_u_h: 3713 case Mips::BI__builtin_msa_slli_h: 3714 case Mips::BI__builtin_msa_srai_h: 3715 case Mips::BI__builtin_msa_srari_h: 3716 case Mips::BI__builtin_msa_srli_h: 3717 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3718 case Mips::BI__builtin_msa_binsli_h: 3719 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3720 // These intrinsics take an unsigned 5 bit immediate. 3721 // The first block of intrinsics actually have an unsigned 5 bit field, 3722 // not a df/n field. 3723 case Mips::BI__builtin_msa_cfcmsa: 3724 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3725 case Mips::BI__builtin_msa_clei_u_b: 3726 case Mips::BI__builtin_msa_clei_u_h: 3727 case Mips::BI__builtin_msa_clei_u_w: 3728 case Mips::BI__builtin_msa_clei_u_d: 3729 case Mips::BI__builtin_msa_clti_u_b: 3730 case Mips::BI__builtin_msa_clti_u_h: 3731 case Mips::BI__builtin_msa_clti_u_w: 3732 case Mips::BI__builtin_msa_clti_u_d: 3733 case Mips::BI__builtin_msa_maxi_u_b: 3734 case Mips::BI__builtin_msa_maxi_u_h: 3735 case Mips::BI__builtin_msa_maxi_u_w: 3736 case Mips::BI__builtin_msa_maxi_u_d: 3737 case Mips::BI__builtin_msa_mini_u_b: 3738 case Mips::BI__builtin_msa_mini_u_h: 3739 case Mips::BI__builtin_msa_mini_u_w: 3740 case Mips::BI__builtin_msa_mini_u_d: 3741 case Mips::BI__builtin_msa_addvi_b: 3742 case Mips::BI__builtin_msa_addvi_h: 3743 case Mips::BI__builtin_msa_addvi_w: 3744 case Mips::BI__builtin_msa_addvi_d: 3745 case Mips::BI__builtin_msa_bclri_w: 3746 case Mips::BI__builtin_msa_bnegi_w: 3747 case Mips::BI__builtin_msa_bseti_w: 3748 case Mips::BI__builtin_msa_sat_s_w: 3749 case Mips::BI__builtin_msa_sat_u_w: 3750 case Mips::BI__builtin_msa_slli_w: 3751 case Mips::BI__builtin_msa_srai_w: 3752 case Mips::BI__builtin_msa_srari_w: 3753 case Mips::BI__builtin_msa_srli_w: 3754 case Mips::BI__builtin_msa_srlri_w: 3755 case Mips::BI__builtin_msa_subvi_b: 3756 case Mips::BI__builtin_msa_subvi_h: 3757 case Mips::BI__builtin_msa_subvi_w: 3758 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3759 case Mips::BI__builtin_msa_binsli_w: 3760 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3761 // These intrinsics take an unsigned 6 bit immediate. 3762 case Mips::BI__builtin_msa_bclri_d: 3763 case Mips::BI__builtin_msa_bnegi_d: 3764 case Mips::BI__builtin_msa_bseti_d: 3765 case Mips::BI__builtin_msa_sat_s_d: 3766 case Mips::BI__builtin_msa_sat_u_d: 3767 case Mips::BI__builtin_msa_slli_d: 3768 case Mips::BI__builtin_msa_srai_d: 3769 case Mips::BI__builtin_msa_srari_d: 3770 case Mips::BI__builtin_msa_srli_d: 3771 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3772 case Mips::BI__builtin_msa_binsli_d: 3773 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3774 // These intrinsics take a signed 5 bit immediate. 3775 case Mips::BI__builtin_msa_ceqi_b: 3776 case Mips::BI__builtin_msa_ceqi_h: 3777 case Mips::BI__builtin_msa_ceqi_w: 3778 case Mips::BI__builtin_msa_ceqi_d: 3779 case Mips::BI__builtin_msa_clti_s_b: 3780 case Mips::BI__builtin_msa_clti_s_h: 3781 case Mips::BI__builtin_msa_clti_s_w: 3782 case Mips::BI__builtin_msa_clti_s_d: 3783 case Mips::BI__builtin_msa_clei_s_b: 3784 case Mips::BI__builtin_msa_clei_s_h: 3785 case Mips::BI__builtin_msa_clei_s_w: 3786 case Mips::BI__builtin_msa_clei_s_d: 3787 case Mips::BI__builtin_msa_maxi_s_b: 3788 case Mips::BI__builtin_msa_maxi_s_h: 3789 case Mips::BI__builtin_msa_maxi_s_w: 3790 case Mips::BI__builtin_msa_maxi_s_d: 3791 case Mips::BI__builtin_msa_mini_s_b: 3792 case Mips::BI__builtin_msa_mini_s_h: 3793 case Mips::BI__builtin_msa_mini_s_w: 3794 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3795 // These intrinsics take an unsigned 8 bit immediate. 3796 case Mips::BI__builtin_msa_andi_b: 3797 case Mips::BI__builtin_msa_nori_b: 3798 case Mips::BI__builtin_msa_ori_b: 3799 case Mips::BI__builtin_msa_shf_b: 3800 case Mips::BI__builtin_msa_shf_h: 3801 case Mips::BI__builtin_msa_shf_w: 3802 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3803 case Mips::BI__builtin_msa_bseli_b: 3804 case Mips::BI__builtin_msa_bmnzi_b: 3805 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3806 // df/n format 3807 // These intrinsics take an unsigned 4 bit immediate. 3808 case Mips::BI__builtin_msa_copy_s_b: 3809 case Mips::BI__builtin_msa_copy_u_b: 3810 case Mips::BI__builtin_msa_insve_b: 3811 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3812 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3813 // These intrinsics take an unsigned 3 bit immediate. 3814 case Mips::BI__builtin_msa_copy_s_h: 3815 case Mips::BI__builtin_msa_copy_u_h: 3816 case Mips::BI__builtin_msa_insve_h: 3817 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3818 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3819 // These intrinsics take an unsigned 2 bit immediate. 3820 case Mips::BI__builtin_msa_copy_s_w: 3821 case Mips::BI__builtin_msa_copy_u_w: 3822 case Mips::BI__builtin_msa_insve_w: 3823 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3824 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3825 // These intrinsics take an unsigned 1 bit immediate. 3826 case Mips::BI__builtin_msa_copy_s_d: 3827 case Mips::BI__builtin_msa_copy_u_d: 3828 case Mips::BI__builtin_msa_insve_d: 3829 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3830 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3831 // Memory offsets and immediate loads. 3832 // These intrinsics take a signed 10 bit immediate. 3833 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3834 case Mips::BI__builtin_msa_ldi_h: 3835 case Mips::BI__builtin_msa_ldi_w: 3836 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3837 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3838 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3839 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3840 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3841 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 3842 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 3843 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3844 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3845 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3846 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3847 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 3848 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 3849 } 3850 3851 if (!m) 3852 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3853 3854 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3855 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3856 } 3857 3858 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 3859 /// advancing the pointer over the consumed characters. The decoded type is 3860 /// returned. If the decoded type represents a constant integer with a 3861 /// constraint on its value then Mask is set to that value. The type descriptors 3862 /// used in Str are specific to PPC MMA builtins and are documented in the file 3863 /// defining the PPC builtins. 3864 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 3865 unsigned &Mask) { 3866 bool RequireICE = false; 3867 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 3868 switch (*Str++) { 3869 case 'V': 3870 return Context.getVectorType(Context.UnsignedCharTy, 16, 3871 VectorType::VectorKind::AltiVecVector); 3872 case 'i': { 3873 char *End; 3874 unsigned size = strtoul(Str, &End, 10); 3875 assert(End != Str && "Missing constant parameter constraint"); 3876 Str = End; 3877 Mask = size; 3878 return Context.IntTy; 3879 } 3880 case 'W': { 3881 char *End; 3882 unsigned size = strtoul(Str, &End, 10); 3883 assert(End != Str && "Missing PowerPC MMA type size"); 3884 Str = End; 3885 QualType Type; 3886 switch (size) { 3887 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 3888 case size: Type = Context.Id##Ty; break; 3889 #include "clang/Basic/PPCTypes.def" 3890 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 3891 } 3892 bool CheckVectorArgs = false; 3893 while (!CheckVectorArgs) { 3894 switch (*Str++) { 3895 case '*': 3896 Type = Context.getPointerType(Type); 3897 break; 3898 case 'C': 3899 Type = Type.withConst(); 3900 break; 3901 default: 3902 CheckVectorArgs = true; 3903 --Str; 3904 break; 3905 } 3906 } 3907 return Type; 3908 } 3909 default: 3910 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 3911 } 3912 } 3913 3914 static bool isPPC_64Builtin(unsigned BuiltinID) { 3915 // These builtins only work on PPC 64bit targets. 3916 switch (BuiltinID) { 3917 case PPC::BI__builtin_divde: 3918 case PPC::BI__builtin_divdeu: 3919 case PPC::BI__builtin_bpermd: 3920 case PPC::BI__builtin_pdepd: 3921 case PPC::BI__builtin_pextd: 3922 case PPC::BI__builtin_ppc_ldarx: 3923 case PPC::BI__builtin_ppc_stdcx: 3924 case PPC::BI__builtin_ppc_tdw: 3925 case PPC::BI__builtin_ppc_trapd: 3926 case PPC::BI__builtin_ppc_cmpeqb: 3927 case PPC::BI__builtin_ppc_setb: 3928 case PPC::BI__builtin_ppc_mulhd: 3929 case PPC::BI__builtin_ppc_mulhdu: 3930 case PPC::BI__builtin_ppc_maddhd: 3931 case PPC::BI__builtin_ppc_maddhdu: 3932 case PPC::BI__builtin_ppc_maddld: 3933 case PPC::BI__builtin_ppc_load8r: 3934 case PPC::BI__builtin_ppc_store8r: 3935 case PPC::BI__builtin_ppc_insert_exp: 3936 case PPC::BI__builtin_ppc_extract_sig: 3937 case PPC::BI__builtin_ppc_addex: 3938 case PPC::BI__builtin_darn: 3939 case PPC::BI__builtin_darn_raw: 3940 case PPC::BI__builtin_ppc_compare_and_swaplp: 3941 case PPC::BI__builtin_ppc_fetch_and_addlp: 3942 case PPC::BI__builtin_ppc_fetch_and_andlp: 3943 case PPC::BI__builtin_ppc_fetch_and_orlp: 3944 case PPC::BI__builtin_ppc_fetch_and_swaplp: 3945 return true; 3946 } 3947 return false; 3948 } 3949 3950 static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall, 3951 StringRef FeatureToCheck, unsigned DiagID, 3952 StringRef DiagArg = "") { 3953 if (S.Context.getTargetInfo().hasFeature(FeatureToCheck)) 3954 return false; 3955 3956 if (DiagArg.empty()) 3957 S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange(); 3958 else 3959 S.Diag(TheCall->getBeginLoc(), DiagID) 3960 << DiagArg << TheCall->getSourceRange(); 3961 3962 return true; 3963 } 3964 3965 /// Returns true if the argument consists of one contiguous run of 1s with any 3966 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so 3967 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not, 3968 /// since all 1s are not contiguous. 3969 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) { 3970 llvm::APSInt Result; 3971 // We can't check the value of a dependent argument. 3972 Expr *Arg = TheCall->getArg(ArgNum); 3973 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3974 return false; 3975 3976 // Check constant-ness first. 3977 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3978 return true; 3979 3980 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s. 3981 if (Result.isShiftedMask() || (~Result).isShiftedMask()) 3982 return false; 3983 3984 return Diag(TheCall->getBeginLoc(), 3985 diag::err_argument_not_contiguous_bit_field) 3986 << ArgNum << Arg->getSourceRange(); 3987 } 3988 3989 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3990 CallExpr *TheCall) { 3991 unsigned i = 0, l = 0, u = 0; 3992 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3993 llvm::APSInt Result; 3994 3995 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit) 3996 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3997 << TheCall->getSourceRange(); 3998 3999 switch (BuiltinID) { 4000 default: return false; 4001 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 4002 case PPC::BI__builtin_altivec_crypto_vshasigmad: 4003 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 4004 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4005 case PPC::BI__builtin_altivec_dss: 4006 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 4007 case PPC::BI__builtin_tbegin: 4008 case PPC::BI__builtin_tend: 4009 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) || 4010 SemaFeatureCheck(*this, TheCall, "htm", 4011 diag::err_ppc_builtin_requires_htm); 4012 case PPC::BI__builtin_tsr: 4013 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 4014 SemaFeatureCheck(*this, TheCall, "htm", 4015 diag::err_ppc_builtin_requires_htm); 4016 case PPC::BI__builtin_tabortwc: 4017 case PPC::BI__builtin_tabortdc: 4018 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 4019 SemaFeatureCheck(*this, TheCall, "htm", 4020 diag::err_ppc_builtin_requires_htm); 4021 case PPC::BI__builtin_tabortwci: 4022 case PPC::BI__builtin_tabortdci: 4023 return SemaFeatureCheck(*this, TheCall, "htm", 4024 diag::err_ppc_builtin_requires_htm) || 4025 (SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 4026 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31)); 4027 case PPC::BI__builtin_tabort: 4028 case PPC::BI__builtin_tcheck: 4029 case PPC::BI__builtin_treclaim: 4030 case PPC::BI__builtin_trechkpt: 4031 case PPC::BI__builtin_tendall: 4032 case PPC::BI__builtin_tresume: 4033 case PPC::BI__builtin_tsuspend: 4034 case PPC::BI__builtin_get_texasr: 4035 case PPC::BI__builtin_get_texasru: 4036 case PPC::BI__builtin_get_tfhar: 4037 case PPC::BI__builtin_get_tfiar: 4038 case PPC::BI__builtin_set_texasr: 4039 case PPC::BI__builtin_set_texasru: 4040 case PPC::BI__builtin_set_tfhar: 4041 case PPC::BI__builtin_set_tfiar: 4042 case PPC::BI__builtin_ttest: 4043 return SemaFeatureCheck(*this, TheCall, "htm", 4044 diag::err_ppc_builtin_requires_htm); 4045 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05', 4046 // __builtin_(un)pack_longdouble are available only if long double uses IBM 4047 // extended double representation. 4048 case PPC::BI__builtin_unpack_longdouble: 4049 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1)) 4050 return true; 4051 LLVM_FALLTHROUGH; 4052 case PPC::BI__builtin_pack_longdouble: 4053 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble()) 4054 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi) 4055 << "ibmlongdouble"; 4056 return false; 4057 case PPC::BI__builtin_altivec_dst: 4058 case PPC::BI__builtin_altivec_dstt: 4059 case PPC::BI__builtin_altivec_dstst: 4060 case PPC::BI__builtin_altivec_dststt: 4061 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4062 case PPC::BI__builtin_vsx_xxpermdi: 4063 case PPC::BI__builtin_vsx_xxsldwi: 4064 return SemaBuiltinVSX(TheCall); 4065 case PPC::BI__builtin_divwe: 4066 case PPC::BI__builtin_divweu: 4067 case PPC::BI__builtin_divde: 4068 case PPC::BI__builtin_divdeu: 4069 return SemaFeatureCheck(*this, TheCall, "extdiv", 4070 diag::err_ppc_builtin_only_on_arch, "7"); 4071 case PPC::BI__builtin_bpermd: 4072 return SemaFeatureCheck(*this, TheCall, "bpermd", 4073 diag::err_ppc_builtin_only_on_arch, "7"); 4074 case PPC::BI__builtin_unpack_vector_int128: 4075 return SemaFeatureCheck(*this, TheCall, "vsx", 4076 diag::err_ppc_builtin_only_on_arch, "7") || 4077 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4078 case PPC::BI__builtin_pack_vector_int128: 4079 return SemaFeatureCheck(*this, TheCall, "vsx", 4080 diag::err_ppc_builtin_only_on_arch, "7"); 4081 case PPC::BI__builtin_pdepd: 4082 case PPC::BI__builtin_pextd: 4083 return SemaFeatureCheck(*this, TheCall, "isa-v31-instructions", 4084 diag::err_ppc_builtin_only_on_arch, "10"); 4085 case PPC::BI__builtin_altivec_vgnb: 4086 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 4087 case PPC::BI__builtin_altivec_vec_replace_elt: 4088 case PPC::BI__builtin_altivec_vec_replace_unaligned: { 4089 QualType VecTy = TheCall->getArg(0)->getType(); 4090 QualType EltTy = TheCall->getArg(1)->getType(); 4091 unsigned Width = Context.getIntWidth(EltTy); 4092 return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) || 4093 !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy); 4094 } 4095 case PPC::BI__builtin_vsx_xxeval: 4096 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 4097 case PPC::BI__builtin_altivec_vsldbi: 4098 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4099 case PPC::BI__builtin_altivec_vsrdbi: 4100 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4101 case PPC::BI__builtin_vsx_xxpermx: 4102 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 4103 case PPC::BI__builtin_ppc_tw: 4104 case PPC::BI__builtin_ppc_tdw: 4105 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); 4106 case PPC::BI__builtin_ppc_cmpeqb: 4107 case PPC::BI__builtin_ppc_setb: 4108 case PPC::BI__builtin_ppc_maddhd: 4109 case PPC::BI__builtin_ppc_maddhdu: 4110 case PPC::BI__builtin_ppc_maddld: 4111 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4112 diag::err_ppc_builtin_only_on_arch, "9"); 4113 case PPC::BI__builtin_ppc_cmprb: 4114 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4115 diag::err_ppc_builtin_only_on_arch, "9") || 4116 SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 4117 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must 4118 // be a constant that represents a contiguous bit field. 4119 case PPC::BI__builtin_ppc_rlwnm: 4120 return SemaValueIsRunOfOnes(TheCall, 2); 4121 case PPC::BI__builtin_ppc_rlwimi: 4122 case PPC::BI__builtin_ppc_rldimi: 4123 return SemaBuiltinConstantArg(TheCall, 2, Result) || 4124 SemaValueIsRunOfOnes(TheCall, 3); 4125 case PPC::BI__builtin_ppc_extract_exp: 4126 case PPC::BI__builtin_ppc_extract_sig: 4127 case PPC::BI__builtin_ppc_insert_exp: 4128 return SemaFeatureCheck(*this, TheCall, "power9-vector", 4129 diag::err_ppc_builtin_only_on_arch, "9"); 4130 case PPC::BI__builtin_ppc_addex: { 4131 if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4132 diag::err_ppc_builtin_only_on_arch, "9") || 4133 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3)) 4134 return true; 4135 // Output warning for reserved values 1 to 3. 4136 int ArgValue = 4137 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue(); 4138 if (ArgValue != 0) 4139 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour) 4140 << ArgValue; 4141 return false; 4142 } 4143 case PPC::BI__builtin_ppc_mtfsb0: 4144 case PPC::BI__builtin_ppc_mtfsb1: 4145 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 4146 case PPC::BI__builtin_ppc_mtfsf: 4147 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255); 4148 case PPC::BI__builtin_ppc_mtfsfi: 4149 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 4150 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 4151 case PPC::BI__builtin_ppc_alignx: 4152 return SemaBuiltinConstantArgPower2(TheCall, 0); 4153 case PPC::BI__builtin_ppc_rdlam: 4154 return SemaValueIsRunOfOnes(TheCall, 2); 4155 case PPC::BI__builtin_ppc_icbt: 4156 case PPC::BI__builtin_ppc_sthcx: 4157 case PPC::BI__builtin_ppc_stbcx: 4158 case PPC::BI__builtin_ppc_lharx: 4159 case PPC::BI__builtin_ppc_lbarx: 4160 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 4161 diag::err_ppc_builtin_only_on_arch, "8"); 4162 case PPC::BI__builtin_vsx_ldrmb: 4163 case PPC::BI__builtin_vsx_strmb: 4164 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 4165 diag::err_ppc_builtin_only_on_arch, "8") || 4166 SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 4167 case PPC::BI__builtin_altivec_vcntmbb: 4168 case PPC::BI__builtin_altivec_vcntmbh: 4169 case PPC::BI__builtin_altivec_vcntmbw: 4170 case PPC::BI__builtin_altivec_vcntmbd: 4171 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4172 case PPC::BI__builtin_darn: 4173 case PPC::BI__builtin_darn_raw: 4174 case PPC::BI__builtin_darn_32: 4175 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4176 diag::err_ppc_builtin_only_on_arch, "9"); 4177 case PPC::BI__builtin_vsx_xxgenpcvbm: 4178 case PPC::BI__builtin_vsx_xxgenpcvhm: 4179 case PPC::BI__builtin_vsx_xxgenpcvwm: 4180 case PPC::BI__builtin_vsx_xxgenpcvdm: 4181 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 4182 case PPC::BI__builtin_ppc_compare_exp_uo: 4183 case PPC::BI__builtin_ppc_compare_exp_lt: 4184 case PPC::BI__builtin_ppc_compare_exp_gt: 4185 case PPC::BI__builtin_ppc_compare_exp_eq: 4186 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4187 diag::err_ppc_builtin_only_on_arch, "9") || 4188 SemaFeatureCheck(*this, TheCall, "vsx", 4189 diag::err_ppc_builtin_requires_vsx); 4190 case PPC::BI__builtin_ppc_test_data_class: { 4191 // Check if the first argument of the __builtin_ppc_test_data_class call is 4192 // valid. The argument must be either a 'float' or a 'double'. 4193 QualType ArgType = TheCall->getArg(0)->getType(); 4194 if (ArgType != QualType(Context.FloatTy) && 4195 ArgType != QualType(Context.DoubleTy)) 4196 return Diag(TheCall->getBeginLoc(), 4197 diag::err_ppc_invalid_test_data_class_type); 4198 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4199 diag::err_ppc_builtin_only_on_arch, "9") || 4200 SemaFeatureCheck(*this, TheCall, "vsx", 4201 diag::err_ppc_builtin_requires_vsx) || 4202 SemaBuiltinConstantArgRange(TheCall, 1, 0, 127); 4203 } 4204 case PPC::BI__builtin_ppc_maxfe: 4205 case PPC::BI__builtin_ppc_minfe: 4206 case PPC::BI__builtin_ppc_maxfl: 4207 case PPC::BI__builtin_ppc_minfl: 4208 case PPC::BI__builtin_ppc_maxfs: 4209 case PPC::BI__builtin_ppc_minfs: { 4210 if (Context.getTargetInfo().getTriple().isOSAIX() && 4211 (BuiltinID == PPC::BI__builtin_ppc_maxfe || 4212 BuiltinID == PPC::BI__builtin_ppc_minfe)) 4213 return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type) 4214 << "builtin" << true << 128 << QualType(Context.LongDoubleTy) 4215 << false << Context.getTargetInfo().getTriple().str(); 4216 // Argument type should be exact. 4217 QualType ArgType = QualType(Context.LongDoubleTy); 4218 if (BuiltinID == PPC::BI__builtin_ppc_maxfl || 4219 BuiltinID == PPC::BI__builtin_ppc_minfl) 4220 ArgType = QualType(Context.DoubleTy); 4221 else if (BuiltinID == PPC::BI__builtin_ppc_maxfs || 4222 BuiltinID == PPC::BI__builtin_ppc_minfs) 4223 ArgType = QualType(Context.FloatTy); 4224 for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I) 4225 if (TheCall->getArg(I)->getType() != ArgType) 4226 return Diag(TheCall->getBeginLoc(), 4227 diag::err_typecheck_convert_incompatible) 4228 << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0; 4229 return false; 4230 } 4231 case PPC::BI__builtin_ppc_load8r: 4232 case PPC::BI__builtin_ppc_store8r: 4233 return SemaFeatureCheck(*this, TheCall, "isa-v206-instructions", 4234 diag::err_ppc_builtin_only_on_arch, "7"); 4235 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ 4236 case PPC::BI__builtin_##Name: \ 4237 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types); 4238 #include "clang/Basic/BuiltinsPPC.def" 4239 } 4240 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4241 } 4242 4243 // Check if the given type is a non-pointer PPC MMA type. This function is used 4244 // in Sema to prevent invalid uses of restricted PPC MMA types. 4245 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 4246 if (Type->isPointerType() || Type->isArrayType()) 4247 return false; 4248 4249 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 4250 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 4251 if (false 4252 #include "clang/Basic/PPCTypes.def" 4253 ) { 4254 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 4255 return true; 4256 } 4257 return false; 4258 } 4259 4260 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 4261 CallExpr *TheCall) { 4262 // position of memory order and scope arguments in the builtin 4263 unsigned OrderIndex, ScopeIndex; 4264 switch (BuiltinID) { 4265 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 4266 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 4267 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 4268 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 4269 OrderIndex = 2; 4270 ScopeIndex = 3; 4271 break; 4272 case AMDGPU::BI__builtin_amdgcn_fence: 4273 OrderIndex = 0; 4274 ScopeIndex = 1; 4275 break; 4276 default: 4277 return false; 4278 } 4279 4280 ExprResult Arg = TheCall->getArg(OrderIndex); 4281 auto ArgExpr = Arg.get(); 4282 Expr::EvalResult ArgResult; 4283 4284 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 4285 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 4286 << ArgExpr->getType(); 4287 auto Ord = ArgResult.Val.getInt().getZExtValue(); 4288 4289 // Check validity of memory ordering as per C11 / C++11's memody model. 4290 // Only fence needs check. Atomic dec/inc allow all memory orders. 4291 if (!llvm::isValidAtomicOrderingCABI(Ord)) 4292 return Diag(ArgExpr->getBeginLoc(), 4293 diag::warn_atomic_op_has_invalid_memory_order) 4294 << ArgExpr->getSourceRange(); 4295 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 4296 case llvm::AtomicOrderingCABI::relaxed: 4297 case llvm::AtomicOrderingCABI::consume: 4298 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 4299 return Diag(ArgExpr->getBeginLoc(), 4300 diag::warn_atomic_op_has_invalid_memory_order) 4301 << ArgExpr->getSourceRange(); 4302 break; 4303 case llvm::AtomicOrderingCABI::acquire: 4304 case llvm::AtomicOrderingCABI::release: 4305 case llvm::AtomicOrderingCABI::acq_rel: 4306 case llvm::AtomicOrderingCABI::seq_cst: 4307 break; 4308 } 4309 4310 Arg = TheCall->getArg(ScopeIndex); 4311 ArgExpr = Arg.get(); 4312 Expr::EvalResult ArgResult1; 4313 // Check that sync scope is a constant literal 4314 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 4315 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 4316 << ArgExpr->getType(); 4317 4318 return false; 4319 } 4320 4321 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 4322 llvm::APSInt Result; 4323 4324 // We can't check the value of a dependent argument. 4325 Expr *Arg = TheCall->getArg(ArgNum); 4326 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4327 return false; 4328 4329 // Check constant-ness first. 4330 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4331 return true; 4332 4333 int64_t Val = Result.getSExtValue(); 4334 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 4335 return false; 4336 4337 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 4338 << Arg->getSourceRange(); 4339 } 4340 4341 static bool isRISCV32Builtin(unsigned BuiltinID) { 4342 // These builtins only work on riscv32 targets. 4343 switch (BuiltinID) { 4344 case RISCV::BI__builtin_riscv_zip_32: 4345 case RISCV::BI__builtin_riscv_unzip_32: 4346 case RISCV::BI__builtin_riscv_aes32dsi_32: 4347 case RISCV::BI__builtin_riscv_aes32dsmi_32: 4348 case RISCV::BI__builtin_riscv_aes32esi_32: 4349 case RISCV::BI__builtin_riscv_aes32esmi_32: 4350 case RISCV::BI__builtin_riscv_sha512sig0h_32: 4351 case RISCV::BI__builtin_riscv_sha512sig0l_32: 4352 case RISCV::BI__builtin_riscv_sha512sig1h_32: 4353 case RISCV::BI__builtin_riscv_sha512sig1l_32: 4354 case RISCV::BI__builtin_riscv_sha512sum0r_32: 4355 case RISCV::BI__builtin_riscv_sha512sum1r_32: 4356 return true; 4357 } 4358 4359 return false; 4360 } 4361 4362 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 4363 unsigned BuiltinID, 4364 CallExpr *TheCall) { 4365 // CodeGenFunction can also detect this, but this gives a better error 4366 // message. 4367 bool FeatureMissing = false; 4368 SmallVector<StringRef> ReqFeatures; 4369 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 4370 Features.split(ReqFeatures, ','); 4371 4372 // Check for 32-bit only builtins on a 64-bit target. 4373 const llvm::Triple &TT = TI.getTriple(); 4374 if (TT.getArch() != llvm::Triple::riscv32 && isRISCV32Builtin(BuiltinID)) 4375 return Diag(TheCall->getCallee()->getBeginLoc(), 4376 diag::err_32_bit_builtin_64_bit_tgt); 4377 4378 // Check if each required feature is included 4379 for (StringRef F : ReqFeatures) { 4380 SmallVector<StringRef> ReqOpFeatures; 4381 F.split(ReqOpFeatures, '|'); 4382 bool HasFeature = false; 4383 for (StringRef OF : ReqOpFeatures) { 4384 if (TI.hasFeature(OF)) { 4385 HasFeature = true; 4386 continue; 4387 } 4388 } 4389 4390 if (!HasFeature) { 4391 std::string FeatureStrs; 4392 for (StringRef OF : ReqOpFeatures) { 4393 // If the feature is 64bit, alter the string so it will print better in 4394 // the diagnostic. 4395 if (OF == "64bit") 4396 OF = "RV64"; 4397 4398 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 4399 OF.consume_front("experimental-"); 4400 std::string FeatureStr = OF.str(); 4401 FeatureStr[0] = std::toupper(FeatureStr[0]); 4402 // Combine strings. 4403 FeatureStrs += FeatureStrs == "" ? "" : ", "; 4404 FeatureStrs += "'"; 4405 FeatureStrs += FeatureStr; 4406 FeatureStrs += "'"; 4407 } 4408 // Error message 4409 FeatureMissing = true; 4410 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 4411 << TheCall->getSourceRange() << StringRef(FeatureStrs); 4412 } 4413 } 4414 4415 if (FeatureMissing) 4416 return true; 4417 4418 switch (BuiltinID) { 4419 case RISCVVector::BI__builtin_rvv_vsetvli: 4420 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 4421 CheckRISCVLMUL(TheCall, 2); 4422 case RISCVVector::BI__builtin_rvv_vsetvlimax: 4423 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4424 CheckRISCVLMUL(TheCall, 1); 4425 case RISCVVector::BI__builtin_rvv_vget_v: { 4426 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4427 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4428 TheCall->getType().getCanonicalType().getTypePtr())); 4429 ASTContext::BuiltinVectorTypeInfo VecInfo = 4430 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4431 TheCall->getArg(0)->getType().getCanonicalType().getTypePtr())); 4432 unsigned MaxIndex = 4433 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) / 4434 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors); 4435 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4436 } 4437 case RISCVVector::BI__builtin_rvv_vset_v: { 4438 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4439 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4440 TheCall->getType().getCanonicalType().getTypePtr())); 4441 ASTContext::BuiltinVectorTypeInfo VecInfo = 4442 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4443 TheCall->getArg(2)->getType().getCanonicalType().getTypePtr())); 4444 unsigned MaxIndex = 4445 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) / 4446 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors); 4447 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4448 } 4449 // Check if byteselect is in [0, 3] 4450 case RISCV::BI__builtin_riscv_aes32dsi_32: 4451 case RISCV::BI__builtin_riscv_aes32dsmi_32: 4452 case RISCV::BI__builtin_riscv_aes32esi_32: 4453 case RISCV::BI__builtin_riscv_aes32esmi_32: 4454 case RISCV::BI__builtin_riscv_sm4ks: 4455 case RISCV::BI__builtin_riscv_sm4ed: 4456 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4457 // Check if rnum is in [0, 10] 4458 case RISCV::BI__builtin_riscv_aes64ks1i_64: 4459 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10); 4460 } 4461 4462 return false; 4463 } 4464 4465 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 4466 CallExpr *TheCall) { 4467 if (BuiltinID == SystemZ::BI__builtin_tabort) { 4468 Expr *Arg = TheCall->getArg(0); 4469 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context)) 4470 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 4471 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 4472 << Arg->getSourceRange(); 4473 } 4474 4475 // For intrinsics which take an immediate value as part of the instruction, 4476 // range check them here. 4477 unsigned i = 0, l = 0, u = 0; 4478 switch (BuiltinID) { 4479 default: return false; 4480 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 4481 case SystemZ::BI__builtin_s390_verimb: 4482 case SystemZ::BI__builtin_s390_verimh: 4483 case SystemZ::BI__builtin_s390_verimf: 4484 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 4485 case SystemZ::BI__builtin_s390_vfaeb: 4486 case SystemZ::BI__builtin_s390_vfaeh: 4487 case SystemZ::BI__builtin_s390_vfaef: 4488 case SystemZ::BI__builtin_s390_vfaebs: 4489 case SystemZ::BI__builtin_s390_vfaehs: 4490 case SystemZ::BI__builtin_s390_vfaefs: 4491 case SystemZ::BI__builtin_s390_vfaezb: 4492 case SystemZ::BI__builtin_s390_vfaezh: 4493 case SystemZ::BI__builtin_s390_vfaezf: 4494 case SystemZ::BI__builtin_s390_vfaezbs: 4495 case SystemZ::BI__builtin_s390_vfaezhs: 4496 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 4497 case SystemZ::BI__builtin_s390_vfisb: 4498 case SystemZ::BI__builtin_s390_vfidb: 4499 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 4500 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4501 case SystemZ::BI__builtin_s390_vftcisb: 4502 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 4503 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 4504 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 4505 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 4506 case SystemZ::BI__builtin_s390_vstrcb: 4507 case SystemZ::BI__builtin_s390_vstrch: 4508 case SystemZ::BI__builtin_s390_vstrcf: 4509 case SystemZ::BI__builtin_s390_vstrczb: 4510 case SystemZ::BI__builtin_s390_vstrczh: 4511 case SystemZ::BI__builtin_s390_vstrczf: 4512 case SystemZ::BI__builtin_s390_vstrcbs: 4513 case SystemZ::BI__builtin_s390_vstrchs: 4514 case SystemZ::BI__builtin_s390_vstrcfs: 4515 case SystemZ::BI__builtin_s390_vstrczbs: 4516 case SystemZ::BI__builtin_s390_vstrczhs: 4517 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 4518 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 4519 case SystemZ::BI__builtin_s390_vfminsb: 4520 case SystemZ::BI__builtin_s390_vfmaxsb: 4521 case SystemZ::BI__builtin_s390_vfmindb: 4522 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 4523 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 4524 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 4525 case SystemZ::BI__builtin_s390_vclfnhs: 4526 case SystemZ::BI__builtin_s390_vclfnls: 4527 case SystemZ::BI__builtin_s390_vcfn: 4528 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; 4529 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; 4530 } 4531 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4532 } 4533 4534 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 4535 /// This checks that the target supports __builtin_cpu_supports and 4536 /// that the string argument is constant and valid. 4537 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 4538 CallExpr *TheCall) { 4539 Expr *Arg = TheCall->getArg(0); 4540 4541 // Check if the argument is a string literal. 4542 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4543 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4544 << Arg->getSourceRange(); 4545 4546 // Check the contents of the string. 4547 StringRef Feature = 4548 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4549 if (!TI.validateCpuSupports(Feature)) 4550 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 4551 << Arg->getSourceRange(); 4552 return false; 4553 } 4554 4555 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 4556 /// This checks that the target supports __builtin_cpu_is and 4557 /// that the string argument is constant and valid. 4558 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 4559 Expr *Arg = TheCall->getArg(0); 4560 4561 // Check if the argument is a string literal. 4562 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4563 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4564 << Arg->getSourceRange(); 4565 4566 // Check the contents of the string. 4567 StringRef Feature = 4568 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4569 if (!TI.validateCpuIs(Feature)) 4570 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 4571 << Arg->getSourceRange(); 4572 return false; 4573 } 4574 4575 // Check if the rounding mode is legal. 4576 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 4577 // Indicates if this instruction has rounding control or just SAE. 4578 bool HasRC = false; 4579 4580 unsigned ArgNum = 0; 4581 switch (BuiltinID) { 4582 default: 4583 return false; 4584 case X86::BI__builtin_ia32_vcvttsd2si32: 4585 case X86::BI__builtin_ia32_vcvttsd2si64: 4586 case X86::BI__builtin_ia32_vcvttsd2usi32: 4587 case X86::BI__builtin_ia32_vcvttsd2usi64: 4588 case X86::BI__builtin_ia32_vcvttss2si32: 4589 case X86::BI__builtin_ia32_vcvttss2si64: 4590 case X86::BI__builtin_ia32_vcvttss2usi32: 4591 case X86::BI__builtin_ia32_vcvttss2usi64: 4592 case X86::BI__builtin_ia32_vcvttsh2si32: 4593 case X86::BI__builtin_ia32_vcvttsh2si64: 4594 case X86::BI__builtin_ia32_vcvttsh2usi32: 4595 case X86::BI__builtin_ia32_vcvttsh2usi64: 4596 ArgNum = 1; 4597 break; 4598 case X86::BI__builtin_ia32_maxpd512: 4599 case X86::BI__builtin_ia32_maxps512: 4600 case X86::BI__builtin_ia32_minpd512: 4601 case X86::BI__builtin_ia32_minps512: 4602 case X86::BI__builtin_ia32_maxph512: 4603 case X86::BI__builtin_ia32_minph512: 4604 ArgNum = 2; 4605 break; 4606 case X86::BI__builtin_ia32_vcvtph2pd512_mask: 4607 case X86::BI__builtin_ia32_vcvtph2psx512_mask: 4608 case X86::BI__builtin_ia32_cvtps2pd512_mask: 4609 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 4610 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 4611 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 4612 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 4613 case X86::BI__builtin_ia32_cvttps2dq512_mask: 4614 case X86::BI__builtin_ia32_cvttps2qq512_mask: 4615 case X86::BI__builtin_ia32_cvttps2udq512_mask: 4616 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 4617 case X86::BI__builtin_ia32_vcvttph2w512_mask: 4618 case X86::BI__builtin_ia32_vcvttph2uw512_mask: 4619 case X86::BI__builtin_ia32_vcvttph2dq512_mask: 4620 case X86::BI__builtin_ia32_vcvttph2udq512_mask: 4621 case X86::BI__builtin_ia32_vcvttph2qq512_mask: 4622 case X86::BI__builtin_ia32_vcvttph2uqq512_mask: 4623 case X86::BI__builtin_ia32_exp2pd_mask: 4624 case X86::BI__builtin_ia32_exp2ps_mask: 4625 case X86::BI__builtin_ia32_getexppd512_mask: 4626 case X86::BI__builtin_ia32_getexpps512_mask: 4627 case X86::BI__builtin_ia32_getexpph512_mask: 4628 case X86::BI__builtin_ia32_rcp28pd_mask: 4629 case X86::BI__builtin_ia32_rcp28ps_mask: 4630 case X86::BI__builtin_ia32_rsqrt28pd_mask: 4631 case X86::BI__builtin_ia32_rsqrt28ps_mask: 4632 case X86::BI__builtin_ia32_vcomisd: 4633 case X86::BI__builtin_ia32_vcomiss: 4634 case X86::BI__builtin_ia32_vcomish: 4635 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 4636 ArgNum = 3; 4637 break; 4638 case X86::BI__builtin_ia32_cmppd512_mask: 4639 case X86::BI__builtin_ia32_cmpps512_mask: 4640 case X86::BI__builtin_ia32_cmpsd_mask: 4641 case X86::BI__builtin_ia32_cmpss_mask: 4642 case X86::BI__builtin_ia32_cmpsh_mask: 4643 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask: 4644 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask: 4645 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 4646 case X86::BI__builtin_ia32_getexpsd128_round_mask: 4647 case X86::BI__builtin_ia32_getexpss128_round_mask: 4648 case X86::BI__builtin_ia32_getexpsh128_round_mask: 4649 case X86::BI__builtin_ia32_getmantpd512_mask: 4650 case X86::BI__builtin_ia32_getmantps512_mask: 4651 case X86::BI__builtin_ia32_getmantph512_mask: 4652 case X86::BI__builtin_ia32_maxsd_round_mask: 4653 case X86::BI__builtin_ia32_maxss_round_mask: 4654 case X86::BI__builtin_ia32_maxsh_round_mask: 4655 case X86::BI__builtin_ia32_minsd_round_mask: 4656 case X86::BI__builtin_ia32_minss_round_mask: 4657 case X86::BI__builtin_ia32_minsh_round_mask: 4658 case X86::BI__builtin_ia32_rcp28sd_round_mask: 4659 case X86::BI__builtin_ia32_rcp28ss_round_mask: 4660 case X86::BI__builtin_ia32_reducepd512_mask: 4661 case X86::BI__builtin_ia32_reduceps512_mask: 4662 case X86::BI__builtin_ia32_reduceph512_mask: 4663 case X86::BI__builtin_ia32_rndscalepd_mask: 4664 case X86::BI__builtin_ia32_rndscaleps_mask: 4665 case X86::BI__builtin_ia32_rndscaleph_mask: 4666 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 4667 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 4668 ArgNum = 4; 4669 break; 4670 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4671 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4672 case X86::BI__builtin_ia32_fixupimmps512_mask: 4673 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4674 case X86::BI__builtin_ia32_fixupimmsd_mask: 4675 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4676 case X86::BI__builtin_ia32_fixupimmss_mask: 4677 case X86::BI__builtin_ia32_fixupimmss_maskz: 4678 case X86::BI__builtin_ia32_getmantsd_round_mask: 4679 case X86::BI__builtin_ia32_getmantss_round_mask: 4680 case X86::BI__builtin_ia32_getmantsh_round_mask: 4681 case X86::BI__builtin_ia32_rangepd512_mask: 4682 case X86::BI__builtin_ia32_rangeps512_mask: 4683 case X86::BI__builtin_ia32_rangesd128_round_mask: 4684 case X86::BI__builtin_ia32_rangess128_round_mask: 4685 case X86::BI__builtin_ia32_reducesd_mask: 4686 case X86::BI__builtin_ia32_reducess_mask: 4687 case X86::BI__builtin_ia32_reducesh_mask: 4688 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4689 case X86::BI__builtin_ia32_rndscaless_round_mask: 4690 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4691 ArgNum = 5; 4692 break; 4693 case X86::BI__builtin_ia32_vcvtsd2si64: 4694 case X86::BI__builtin_ia32_vcvtsd2si32: 4695 case X86::BI__builtin_ia32_vcvtsd2usi32: 4696 case X86::BI__builtin_ia32_vcvtsd2usi64: 4697 case X86::BI__builtin_ia32_vcvtss2si32: 4698 case X86::BI__builtin_ia32_vcvtss2si64: 4699 case X86::BI__builtin_ia32_vcvtss2usi32: 4700 case X86::BI__builtin_ia32_vcvtss2usi64: 4701 case X86::BI__builtin_ia32_vcvtsh2si32: 4702 case X86::BI__builtin_ia32_vcvtsh2si64: 4703 case X86::BI__builtin_ia32_vcvtsh2usi32: 4704 case X86::BI__builtin_ia32_vcvtsh2usi64: 4705 case X86::BI__builtin_ia32_sqrtpd512: 4706 case X86::BI__builtin_ia32_sqrtps512: 4707 case X86::BI__builtin_ia32_sqrtph512: 4708 ArgNum = 1; 4709 HasRC = true; 4710 break; 4711 case X86::BI__builtin_ia32_addph512: 4712 case X86::BI__builtin_ia32_divph512: 4713 case X86::BI__builtin_ia32_mulph512: 4714 case X86::BI__builtin_ia32_subph512: 4715 case X86::BI__builtin_ia32_addpd512: 4716 case X86::BI__builtin_ia32_addps512: 4717 case X86::BI__builtin_ia32_divpd512: 4718 case X86::BI__builtin_ia32_divps512: 4719 case X86::BI__builtin_ia32_mulpd512: 4720 case X86::BI__builtin_ia32_mulps512: 4721 case X86::BI__builtin_ia32_subpd512: 4722 case X86::BI__builtin_ia32_subps512: 4723 case X86::BI__builtin_ia32_cvtsi2sd64: 4724 case X86::BI__builtin_ia32_cvtsi2ss32: 4725 case X86::BI__builtin_ia32_cvtsi2ss64: 4726 case X86::BI__builtin_ia32_cvtusi2sd64: 4727 case X86::BI__builtin_ia32_cvtusi2ss32: 4728 case X86::BI__builtin_ia32_cvtusi2ss64: 4729 case X86::BI__builtin_ia32_vcvtusi2sh: 4730 case X86::BI__builtin_ia32_vcvtusi642sh: 4731 case X86::BI__builtin_ia32_vcvtsi2sh: 4732 case X86::BI__builtin_ia32_vcvtsi642sh: 4733 ArgNum = 2; 4734 HasRC = true; 4735 break; 4736 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 4737 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 4738 case X86::BI__builtin_ia32_vcvtpd2ph512_mask: 4739 case X86::BI__builtin_ia32_vcvtps2phx512_mask: 4740 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 4741 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 4742 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 4743 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 4744 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 4745 case X86::BI__builtin_ia32_cvtps2dq512_mask: 4746 case X86::BI__builtin_ia32_cvtps2qq512_mask: 4747 case X86::BI__builtin_ia32_cvtps2udq512_mask: 4748 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 4749 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 4750 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 4751 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 4752 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 4753 case X86::BI__builtin_ia32_vcvtdq2ph512_mask: 4754 case X86::BI__builtin_ia32_vcvtudq2ph512_mask: 4755 case X86::BI__builtin_ia32_vcvtw2ph512_mask: 4756 case X86::BI__builtin_ia32_vcvtuw2ph512_mask: 4757 case X86::BI__builtin_ia32_vcvtph2w512_mask: 4758 case X86::BI__builtin_ia32_vcvtph2uw512_mask: 4759 case X86::BI__builtin_ia32_vcvtph2dq512_mask: 4760 case X86::BI__builtin_ia32_vcvtph2udq512_mask: 4761 case X86::BI__builtin_ia32_vcvtph2qq512_mask: 4762 case X86::BI__builtin_ia32_vcvtph2uqq512_mask: 4763 case X86::BI__builtin_ia32_vcvtqq2ph512_mask: 4764 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: 4765 ArgNum = 3; 4766 HasRC = true; 4767 break; 4768 case X86::BI__builtin_ia32_addsh_round_mask: 4769 case X86::BI__builtin_ia32_addss_round_mask: 4770 case X86::BI__builtin_ia32_addsd_round_mask: 4771 case X86::BI__builtin_ia32_divsh_round_mask: 4772 case X86::BI__builtin_ia32_divss_round_mask: 4773 case X86::BI__builtin_ia32_divsd_round_mask: 4774 case X86::BI__builtin_ia32_mulsh_round_mask: 4775 case X86::BI__builtin_ia32_mulss_round_mask: 4776 case X86::BI__builtin_ia32_mulsd_round_mask: 4777 case X86::BI__builtin_ia32_subsh_round_mask: 4778 case X86::BI__builtin_ia32_subss_round_mask: 4779 case X86::BI__builtin_ia32_subsd_round_mask: 4780 case X86::BI__builtin_ia32_scalefph512_mask: 4781 case X86::BI__builtin_ia32_scalefpd512_mask: 4782 case X86::BI__builtin_ia32_scalefps512_mask: 4783 case X86::BI__builtin_ia32_scalefsd_round_mask: 4784 case X86::BI__builtin_ia32_scalefss_round_mask: 4785 case X86::BI__builtin_ia32_scalefsh_round_mask: 4786 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 4787 case X86::BI__builtin_ia32_vcvtss2sh_round_mask: 4788 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask: 4789 case X86::BI__builtin_ia32_sqrtsd_round_mask: 4790 case X86::BI__builtin_ia32_sqrtss_round_mask: 4791 case X86::BI__builtin_ia32_sqrtsh_round_mask: 4792 case X86::BI__builtin_ia32_vfmaddsd3_mask: 4793 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 4794 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 4795 case X86::BI__builtin_ia32_vfmaddss3_mask: 4796 case X86::BI__builtin_ia32_vfmaddss3_maskz: 4797 case X86::BI__builtin_ia32_vfmaddss3_mask3: 4798 case X86::BI__builtin_ia32_vfmaddsh3_mask: 4799 case X86::BI__builtin_ia32_vfmaddsh3_maskz: 4800 case X86::BI__builtin_ia32_vfmaddsh3_mask3: 4801 case X86::BI__builtin_ia32_vfmaddpd512_mask: 4802 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 4803 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 4804 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 4805 case X86::BI__builtin_ia32_vfmaddps512_mask: 4806 case X86::BI__builtin_ia32_vfmaddps512_maskz: 4807 case X86::BI__builtin_ia32_vfmaddps512_mask3: 4808 case X86::BI__builtin_ia32_vfmsubps512_mask3: 4809 case X86::BI__builtin_ia32_vfmaddph512_mask: 4810 case X86::BI__builtin_ia32_vfmaddph512_maskz: 4811 case X86::BI__builtin_ia32_vfmaddph512_mask3: 4812 case X86::BI__builtin_ia32_vfmsubph512_mask3: 4813 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 4814 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 4815 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 4816 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 4817 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 4818 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 4819 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 4820 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 4821 case X86::BI__builtin_ia32_vfmaddsubph512_mask: 4822 case X86::BI__builtin_ia32_vfmaddsubph512_maskz: 4823 case X86::BI__builtin_ia32_vfmaddsubph512_mask3: 4824 case X86::BI__builtin_ia32_vfmsubaddph512_mask3: 4825 case X86::BI__builtin_ia32_vfmaddcsh_mask: 4826 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: 4827 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: 4828 case X86::BI__builtin_ia32_vfmaddcph512_mask: 4829 case X86::BI__builtin_ia32_vfmaddcph512_maskz: 4830 case X86::BI__builtin_ia32_vfmaddcph512_mask3: 4831 case X86::BI__builtin_ia32_vfcmaddcsh_mask: 4832 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: 4833 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: 4834 case X86::BI__builtin_ia32_vfcmaddcph512_mask: 4835 case X86::BI__builtin_ia32_vfcmaddcph512_maskz: 4836 case X86::BI__builtin_ia32_vfcmaddcph512_mask3: 4837 case X86::BI__builtin_ia32_vfmulcsh_mask: 4838 case X86::BI__builtin_ia32_vfmulcph512_mask: 4839 case X86::BI__builtin_ia32_vfcmulcsh_mask: 4840 case X86::BI__builtin_ia32_vfcmulcph512_mask: 4841 ArgNum = 4; 4842 HasRC = true; 4843 break; 4844 } 4845 4846 llvm::APSInt Result; 4847 4848 // We can't check the value of a dependent argument. 4849 Expr *Arg = TheCall->getArg(ArgNum); 4850 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4851 return false; 4852 4853 // Check constant-ness first. 4854 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4855 return true; 4856 4857 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 4858 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 4859 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 4860 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 4861 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 4862 Result == 8/*ROUND_NO_EXC*/ || 4863 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 4864 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 4865 return false; 4866 4867 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 4868 << Arg->getSourceRange(); 4869 } 4870 4871 // Check if the gather/scatter scale is legal. 4872 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 4873 CallExpr *TheCall) { 4874 unsigned ArgNum = 0; 4875 switch (BuiltinID) { 4876 default: 4877 return false; 4878 case X86::BI__builtin_ia32_gatherpfdpd: 4879 case X86::BI__builtin_ia32_gatherpfdps: 4880 case X86::BI__builtin_ia32_gatherpfqpd: 4881 case X86::BI__builtin_ia32_gatherpfqps: 4882 case X86::BI__builtin_ia32_scatterpfdpd: 4883 case X86::BI__builtin_ia32_scatterpfdps: 4884 case X86::BI__builtin_ia32_scatterpfqpd: 4885 case X86::BI__builtin_ia32_scatterpfqps: 4886 ArgNum = 3; 4887 break; 4888 case X86::BI__builtin_ia32_gatherd_pd: 4889 case X86::BI__builtin_ia32_gatherd_pd256: 4890 case X86::BI__builtin_ia32_gatherq_pd: 4891 case X86::BI__builtin_ia32_gatherq_pd256: 4892 case X86::BI__builtin_ia32_gatherd_ps: 4893 case X86::BI__builtin_ia32_gatherd_ps256: 4894 case X86::BI__builtin_ia32_gatherq_ps: 4895 case X86::BI__builtin_ia32_gatherq_ps256: 4896 case X86::BI__builtin_ia32_gatherd_q: 4897 case X86::BI__builtin_ia32_gatherd_q256: 4898 case X86::BI__builtin_ia32_gatherq_q: 4899 case X86::BI__builtin_ia32_gatherq_q256: 4900 case X86::BI__builtin_ia32_gatherd_d: 4901 case X86::BI__builtin_ia32_gatherd_d256: 4902 case X86::BI__builtin_ia32_gatherq_d: 4903 case X86::BI__builtin_ia32_gatherq_d256: 4904 case X86::BI__builtin_ia32_gather3div2df: 4905 case X86::BI__builtin_ia32_gather3div2di: 4906 case X86::BI__builtin_ia32_gather3div4df: 4907 case X86::BI__builtin_ia32_gather3div4di: 4908 case X86::BI__builtin_ia32_gather3div4sf: 4909 case X86::BI__builtin_ia32_gather3div4si: 4910 case X86::BI__builtin_ia32_gather3div8sf: 4911 case X86::BI__builtin_ia32_gather3div8si: 4912 case X86::BI__builtin_ia32_gather3siv2df: 4913 case X86::BI__builtin_ia32_gather3siv2di: 4914 case X86::BI__builtin_ia32_gather3siv4df: 4915 case X86::BI__builtin_ia32_gather3siv4di: 4916 case X86::BI__builtin_ia32_gather3siv4sf: 4917 case X86::BI__builtin_ia32_gather3siv4si: 4918 case X86::BI__builtin_ia32_gather3siv8sf: 4919 case X86::BI__builtin_ia32_gather3siv8si: 4920 case X86::BI__builtin_ia32_gathersiv8df: 4921 case X86::BI__builtin_ia32_gathersiv16sf: 4922 case X86::BI__builtin_ia32_gatherdiv8df: 4923 case X86::BI__builtin_ia32_gatherdiv16sf: 4924 case X86::BI__builtin_ia32_gathersiv8di: 4925 case X86::BI__builtin_ia32_gathersiv16si: 4926 case X86::BI__builtin_ia32_gatherdiv8di: 4927 case X86::BI__builtin_ia32_gatherdiv16si: 4928 case X86::BI__builtin_ia32_scatterdiv2df: 4929 case X86::BI__builtin_ia32_scatterdiv2di: 4930 case X86::BI__builtin_ia32_scatterdiv4df: 4931 case X86::BI__builtin_ia32_scatterdiv4di: 4932 case X86::BI__builtin_ia32_scatterdiv4sf: 4933 case X86::BI__builtin_ia32_scatterdiv4si: 4934 case X86::BI__builtin_ia32_scatterdiv8sf: 4935 case X86::BI__builtin_ia32_scatterdiv8si: 4936 case X86::BI__builtin_ia32_scattersiv2df: 4937 case X86::BI__builtin_ia32_scattersiv2di: 4938 case X86::BI__builtin_ia32_scattersiv4df: 4939 case X86::BI__builtin_ia32_scattersiv4di: 4940 case X86::BI__builtin_ia32_scattersiv4sf: 4941 case X86::BI__builtin_ia32_scattersiv4si: 4942 case X86::BI__builtin_ia32_scattersiv8sf: 4943 case X86::BI__builtin_ia32_scattersiv8si: 4944 case X86::BI__builtin_ia32_scattersiv8df: 4945 case X86::BI__builtin_ia32_scattersiv16sf: 4946 case X86::BI__builtin_ia32_scatterdiv8df: 4947 case X86::BI__builtin_ia32_scatterdiv16sf: 4948 case X86::BI__builtin_ia32_scattersiv8di: 4949 case X86::BI__builtin_ia32_scattersiv16si: 4950 case X86::BI__builtin_ia32_scatterdiv8di: 4951 case X86::BI__builtin_ia32_scatterdiv16si: 4952 ArgNum = 4; 4953 break; 4954 } 4955 4956 llvm::APSInt Result; 4957 4958 // We can't check the value of a dependent argument. 4959 Expr *Arg = TheCall->getArg(ArgNum); 4960 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4961 return false; 4962 4963 // Check constant-ness first. 4964 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4965 return true; 4966 4967 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 4968 return false; 4969 4970 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 4971 << Arg->getSourceRange(); 4972 } 4973 4974 enum { TileRegLow = 0, TileRegHigh = 7 }; 4975 4976 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 4977 ArrayRef<int> ArgNums) { 4978 for (int ArgNum : ArgNums) { 4979 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 4980 return true; 4981 } 4982 return false; 4983 } 4984 4985 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 4986 ArrayRef<int> ArgNums) { 4987 // Because the max number of tile register is TileRegHigh + 1, so here we use 4988 // each bit to represent the usage of them in bitset. 4989 std::bitset<TileRegHigh + 1> ArgValues; 4990 for (int ArgNum : ArgNums) { 4991 Expr *Arg = TheCall->getArg(ArgNum); 4992 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4993 continue; 4994 4995 llvm::APSInt Result; 4996 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4997 return true; 4998 int ArgExtValue = Result.getExtValue(); 4999 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 5000 "Incorrect tile register num."); 5001 if (ArgValues.test(ArgExtValue)) 5002 return Diag(TheCall->getBeginLoc(), 5003 diag::err_x86_builtin_tile_arg_duplicate) 5004 << TheCall->getArg(ArgNum)->getSourceRange(); 5005 ArgValues.set(ArgExtValue); 5006 } 5007 return false; 5008 } 5009 5010 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 5011 ArrayRef<int> ArgNums) { 5012 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 5013 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 5014 } 5015 5016 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 5017 switch (BuiltinID) { 5018 default: 5019 return false; 5020 case X86::BI__builtin_ia32_tileloadd64: 5021 case X86::BI__builtin_ia32_tileloaddt164: 5022 case X86::BI__builtin_ia32_tilestored64: 5023 case X86::BI__builtin_ia32_tilezero: 5024 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 5025 case X86::BI__builtin_ia32_tdpbssd: 5026 case X86::BI__builtin_ia32_tdpbsud: 5027 case X86::BI__builtin_ia32_tdpbusd: 5028 case X86::BI__builtin_ia32_tdpbuud: 5029 case X86::BI__builtin_ia32_tdpbf16ps: 5030 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 5031 } 5032 } 5033 static bool isX86_32Builtin(unsigned BuiltinID) { 5034 // These builtins only work on x86-32 targets. 5035 switch (BuiltinID) { 5036 case X86::BI__builtin_ia32_readeflags_u32: 5037 case X86::BI__builtin_ia32_writeeflags_u32: 5038 return true; 5039 } 5040 5041 return false; 5042 } 5043 5044 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 5045 CallExpr *TheCall) { 5046 if (BuiltinID == X86::BI__builtin_cpu_supports) 5047 return SemaBuiltinCpuSupports(*this, TI, TheCall); 5048 5049 if (BuiltinID == X86::BI__builtin_cpu_is) 5050 return SemaBuiltinCpuIs(*this, TI, TheCall); 5051 5052 // Check for 32-bit only builtins on a 64-bit target. 5053 const llvm::Triple &TT = TI.getTriple(); 5054 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 5055 return Diag(TheCall->getCallee()->getBeginLoc(), 5056 diag::err_32_bit_builtin_64_bit_tgt); 5057 5058 // If the intrinsic has rounding or SAE make sure its valid. 5059 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 5060 return true; 5061 5062 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 5063 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 5064 return true; 5065 5066 // If the intrinsic has a tile arguments, make sure they are valid. 5067 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 5068 return true; 5069 5070 // For intrinsics which take an immediate value as part of the instruction, 5071 // range check them here. 5072 int i = 0, l = 0, u = 0; 5073 switch (BuiltinID) { 5074 default: 5075 return false; 5076 case X86::BI__builtin_ia32_vec_ext_v2si: 5077 case X86::BI__builtin_ia32_vec_ext_v2di: 5078 case X86::BI__builtin_ia32_vextractf128_pd256: 5079 case X86::BI__builtin_ia32_vextractf128_ps256: 5080 case X86::BI__builtin_ia32_vextractf128_si256: 5081 case X86::BI__builtin_ia32_extract128i256: 5082 case X86::BI__builtin_ia32_extractf64x4_mask: 5083 case X86::BI__builtin_ia32_extracti64x4_mask: 5084 case X86::BI__builtin_ia32_extractf32x8_mask: 5085 case X86::BI__builtin_ia32_extracti32x8_mask: 5086 case X86::BI__builtin_ia32_extractf64x2_256_mask: 5087 case X86::BI__builtin_ia32_extracti64x2_256_mask: 5088 case X86::BI__builtin_ia32_extractf32x4_256_mask: 5089 case X86::BI__builtin_ia32_extracti32x4_256_mask: 5090 i = 1; l = 0; u = 1; 5091 break; 5092 case X86::BI__builtin_ia32_vec_set_v2di: 5093 case X86::BI__builtin_ia32_vinsertf128_pd256: 5094 case X86::BI__builtin_ia32_vinsertf128_ps256: 5095 case X86::BI__builtin_ia32_vinsertf128_si256: 5096 case X86::BI__builtin_ia32_insert128i256: 5097 case X86::BI__builtin_ia32_insertf32x8: 5098 case X86::BI__builtin_ia32_inserti32x8: 5099 case X86::BI__builtin_ia32_insertf64x4: 5100 case X86::BI__builtin_ia32_inserti64x4: 5101 case X86::BI__builtin_ia32_insertf64x2_256: 5102 case X86::BI__builtin_ia32_inserti64x2_256: 5103 case X86::BI__builtin_ia32_insertf32x4_256: 5104 case X86::BI__builtin_ia32_inserti32x4_256: 5105 i = 2; l = 0; u = 1; 5106 break; 5107 case X86::BI__builtin_ia32_vpermilpd: 5108 case X86::BI__builtin_ia32_vec_ext_v4hi: 5109 case X86::BI__builtin_ia32_vec_ext_v4si: 5110 case X86::BI__builtin_ia32_vec_ext_v4sf: 5111 case X86::BI__builtin_ia32_vec_ext_v4di: 5112 case X86::BI__builtin_ia32_extractf32x4_mask: 5113 case X86::BI__builtin_ia32_extracti32x4_mask: 5114 case X86::BI__builtin_ia32_extractf64x2_512_mask: 5115 case X86::BI__builtin_ia32_extracti64x2_512_mask: 5116 i = 1; l = 0; u = 3; 5117 break; 5118 case X86::BI_mm_prefetch: 5119 case X86::BI__builtin_ia32_vec_ext_v8hi: 5120 case X86::BI__builtin_ia32_vec_ext_v8si: 5121 i = 1; l = 0; u = 7; 5122 break; 5123 case X86::BI__builtin_ia32_sha1rnds4: 5124 case X86::BI__builtin_ia32_blendpd: 5125 case X86::BI__builtin_ia32_shufpd: 5126 case X86::BI__builtin_ia32_vec_set_v4hi: 5127 case X86::BI__builtin_ia32_vec_set_v4si: 5128 case X86::BI__builtin_ia32_vec_set_v4di: 5129 case X86::BI__builtin_ia32_shuf_f32x4_256: 5130 case X86::BI__builtin_ia32_shuf_f64x2_256: 5131 case X86::BI__builtin_ia32_shuf_i32x4_256: 5132 case X86::BI__builtin_ia32_shuf_i64x2_256: 5133 case X86::BI__builtin_ia32_insertf64x2_512: 5134 case X86::BI__builtin_ia32_inserti64x2_512: 5135 case X86::BI__builtin_ia32_insertf32x4: 5136 case X86::BI__builtin_ia32_inserti32x4: 5137 i = 2; l = 0; u = 3; 5138 break; 5139 case X86::BI__builtin_ia32_vpermil2pd: 5140 case X86::BI__builtin_ia32_vpermil2pd256: 5141 case X86::BI__builtin_ia32_vpermil2ps: 5142 case X86::BI__builtin_ia32_vpermil2ps256: 5143 i = 3; l = 0; u = 3; 5144 break; 5145 case X86::BI__builtin_ia32_cmpb128_mask: 5146 case X86::BI__builtin_ia32_cmpw128_mask: 5147 case X86::BI__builtin_ia32_cmpd128_mask: 5148 case X86::BI__builtin_ia32_cmpq128_mask: 5149 case X86::BI__builtin_ia32_cmpb256_mask: 5150 case X86::BI__builtin_ia32_cmpw256_mask: 5151 case X86::BI__builtin_ia32_cmpd256_mask: 5152 case X86::BI__builtin_ia32_cmpq256_mask: 5153 case X86::BI__builtin_ia32_cmpb512_mask: 5154 case X86::BI__builtin_ia32_cmpw512_mask: 5155 case X86::BI__builtin_ia32_cmpd512_mask: 5156 case X86::BI__builtin_ia32_cmpq512_mask: 5157 case X86::BI__builtin_ia32_ucmpb128_mask: 5158 case X86::BI__builtin_ia32_ucmpw128_mask: 5159 case X86::BI__builtin_ia32_ucmpd128_mask: 5160 case X86::BI__builtin_ia32_ucmpq128_mask: 5161 case X86::BI__builtin_ia32_ucmpb256_mask: 5162 case X86::BI__builtin_ia32_ucmpw256_mask: 5163 case X86::BI__builtin_ia32_ucmpd256_mask: 5164 case X86::BI__builtin_ia32_ucmpq256_mask: 5165 case X86::BI__builtin_ia32_ucmpb512_mask: 5166 case X86::BI__builtin_ia32_ucmpw512_mask: 5167 case X86::BI__builtin_ia32_ucmpd512_mask: 5168 case X86::BI__builtin_ia32_ucmpq512_mask: 5169 case X86::BI__builtin_ia32_vpcomub: 5170 case X86::BI__builtin_ia32_vpcomuw: 5171 case X86::BI__builtin_ia32_vpcomud: 5172 case X86::BI__builtin_ia32_vpcomuq: 5173 case X86::BI__builtin_ia32_vpcomb: 5174 case X86::BI__builtin_ia32_vpcomw: 5175 case X86::BI__builtin_ia32_vpcomd: 5176 case X86::BI__builtin_ia32_vpcomq: 5177 case X86::BI__builtin_ia32_vec_set_v8hi: 5178 case X86::BI__builtin_ia32_vec_set_v8si: 5179 i = 2; l = 0; u = 7; 5180 break; 5181 case X86::BI__builtin_ia32_vpermilpd256: 5182 case X86::BI__builtin_ia32_roundps: 5183 case X86::BI__builtin_ia32_roundpd: 5184 case X86::BI__builtin_ia32_roundps256: 5185 case X86::BI__builtin_ia32_roundpd256: 5186 case X86::BI__builtin_ia32_getmantpd128_mask: 5187 case X86::BI__builtin_ia32_getmantpd256_mask: 5188 case X86::BI__builtin_ia32_getmantps128_mask: 5189 case X86::BI__builtin_ia32_getmantps256_mask: 5190 case X86::BI__builtin_ia32_getmantpd512_mask: 5191 case X86::BI__builtin_ia32_getmantps512_mask: 5192 case X86::BI__builtin_ia32_getmantph128_mask: 5193 case X86::BI__builtin_ia32_getmantph256_mask: 5194 case X86::BI__builtin_ia32_getmantph512_mask: 5195 case X86::BI__builtin_ia32_vec_ext_v16qi: 5196 case X86::BI__builtin_ia32_vec_ext_v16hi: 5197 i = 1; l = 0; u = 15; 5198 break; 5199 case X86::BI__builtin_ia32_pblendd128: 5200 case X86::BI__builtin_ia32_blendps: 5201 case X86::BI__builtin_ia32_blendpd256: 5202 case X86::BI__builtin_ia32_shufpd256: 5203 case X86::BI__builtin_ia32_roundss: 5204 case X86::BI__builtin_ia32_roundsd: 5205 case X86::BI__builtin_ia32_rangepd128_mask: 5206 case X86::BI__builtin_ia32_rangepd256_mask: 5207 case X86::BI__builtin_ia32_rangepd512_mask: 5208 case X86::BI__builtin_ia32_rangeps128_mask: 5209 case X86::BI__builtin_ia32_rangeps256_mask: 5210 case X86::BI__builtin_ia32_rangeps512_mask: 5211 case X86::BI__builtin_ia32_getmantsd_round_mask: 5212 case X86::BI__builtin_ia32_getmantss_round_mask: 5213 case X86::BI__builtin_ia32_getmantsh_round_mask: 5214 case X86::BI__builtin_ia32_vec_set_v16qi: 5215 case X86::BI__builtin_ia32_vec_set_v16hi: 5216 i = 2; l = 0; u = 15; 5217 break; 5218 case X86::BI__builtin_ia32_vec_ext_v32qi: 5219 i = 1; l = 0; u = 31; 5220 break; 5221 case X86::BI__builtin_ia32_cmpps: 5222 case X86::BI__builtin_ia32_cmpss: 5223 case X86::BI__builtin_ia32_cmppd: 5224 case X86::BI__builtin_ia32_cmpsd: 5225 case X86::BI__builtin_ia32_cmpps256: 5226 case X86::BI__builtin_ia32_cmppd256: 5227 case X86::BI__builtin_ia32_cmpps128_mask: 5228 case X86::BI__builtin_ia32_cmppd128_mask: 5229 case X86::BI__builtin_ia32_cmpps256_mask: 5230 case X86::BI__builtin_ia32_cmppd256_mask: 5231 case X86::BI__builtin_ia32_cmpps512_mask: 5232 case X86::BI__builtin_ia32_cmppd512_mask: 5233 case X86::BI__builtin_ia32_cmpsd_mask: 5234 case X86::BI__builtin_ia32_cmpss_mask: 5235 case X86::BI__builtin_ia32_vec_set_v32qi: 5236 i = 2; l = 0; u = 31; 5237 break; 5238 case X86::BI__builtin_ia32_permdf256: 5239 case X86::BI__builtin_ia32_permdi256: 5240 case X86::BI__builtin_ia32_permdf512: 5241 case X86::BI__builtin_ia32_permdi512: 5242 case X86::BI__builtin_ia32_vpermilps: 5243 case X86::BI__builtin_ia32_vpermilps256: 5244 case X86::BI__builtin_ia32_vpermilpd512: 5245 case X86::BI__builtin_ia32_vpermilps512: 5246 case X86::BI__builtin_ia32_pshufd: 5247 case X86::BI__builtin_ia32_pshufd256: 5248 case X86::BI__builtin_ia32_pshufd512: 5249 case X86::BI__builtin_ia32_pshufhw: 5250 case X86::BI__builtin_ia32_pshufhw256: 5251 case X86::BI__builtin_ia32_pshufhw512: 5252 case X86::BI__builtin_ia32_pshuflw: 5253 case X86::BI__builtin_ia32_pshuflw256: 5254 case X86::BI__builtin_ia32_pshuflw512: 5255 case X86::BI__builtin_ia32_vcvtps2ph: 5256 case X86::BI__builtin_ia32_vcvtps2ph_mask: 5257 case X86::BI__builtin_ia32_vcvtps2ph256: 5258 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 5259 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 5260 case X86::BI__builtin_ia32_rndscaleps_128_mask: 5261 case X86::BI__builtin_ia32_rndscalepd_128_mask: 5262 case X86::BI__builtin_ia32_rndscaleps_256_mask: 5263 case X86::BI__builtin_ia32_rndscalepd_256_mask: 5264 case X86::BI__builtin_ia32_rndscaleps_mask: 5265 case X86::BI__builtin_ia32_rndscalepd_mask: 5266 case X86::BI__builtin_ia32_rndscaleph_mask: 5267 case X86::BI__builtin_ia32_reducepd128_mask: 5268 case X86::BI__builtin_ia32_reducepd256_mask: 5269 case X86::BI__builtin_ia32_reducepd512_mask: 5270 case X86::BI__builtin_ia32_reduceps128_mask: 5271 case X86::BI__builtin_ia32_reduceps256_mask: 5272 case X86::BI__builtin_ia32_reduceps512_mask: 5273 case X86::BI__builtin_ia32_reduceph128_mask: 5274 case X86::BI__builtin_ia32_reduceph256_mask: 5275 case X86::BI__builtin_ia32_reduceph512_mask: 5276 case X86::BI__builtin_ia32_prold512: 5277 case X86::BI__builtin_ia32_prolq512: 5278 case X86::BI__builtin_ia32_prold128: 5279 case X86::BI__builtin_ia32_prold256: 5280 case X86::BI__builtin_ia32_prolq128: 5281 case X86::BI__builtin_ia32_prolq256: 5282 case X86::BI__builtin_ia32_prord512: 5283 case X86::BI__builtin_ia32_prorq512: 5284 case X86::BI__builtin_ia32_prord128: 5285 case X86::BI__builtin_ia32_prord256: 5286 case X86::BI__builtin_ia32_prorq128: 5287 case X86::BI__builtin_ia32_prorq256: 5288 case X86::BI__builtin_ia32_fpclasspd128_mask: 5289 case X86::BI__builtin_ia32_fpclasspd256_mask: 5290 case X86::BI__builtin_ia32_fpclassps128_mask: 5291 case X86::BI__builtin_ia32_fpclassps256_mask: 5292 case X86::BI__builtin_ia32_fpclassps512_mask: 5293 case X86::BI__builtin_ia32_fpclasspd512_mask: 5294 case X86::BI__builtin_ia32_fpclassph128_mask: 5295 case X86::BI__builtin_ia32_fpclassph256_mask: 5296 case X86::BI__builtin_ia32_fpclassph512_mask: 5297 case X86::BI__builtin_ia32_fpclasssd_mask: 5298 case X86::BI__builtin_ia32_fpclassss_mask: 5299 case X86::BI__builtin_ia32_fpclasssh_mask: 5300 case X86::BI__builtin_ia32_pslldqi128_byteshift: 5301 case X86::BI__builtin_ia32_pslldqi256_byteshift: 5302 case X86::BI__builtin_ia32_pslldqi512_byteshift: 5303 case X86::BI__builtin_ia32_psrldqi128_byteshift: 5304 case X86::BI__builtin_ia32_psrldqi256_byteshift: 5305 case X86::BI__builtin_ia32_psrldqi512_byteshift: 5306 case X86::BI__builtin_ia32_kshiftliqi: 5307 case X86::BI__builtin_ia32_kshiftlihi: 5308 case X86::BI__builtin_ia32_kshiftlisi: 5309 case X86::BI__builtin_ia32_kshiftlidi: 5310 case X86::BI__builtin_ia32_kshiftriqi: 5311 case X86::BI__builtin_ia32_kshiftrihi: 5312 case X86::BI__builtin_ia32_kshiftrisi: 5313 case X86::BI__builtin_ia32_kshiftridi: 5314 i = 1; l = 0; u = 255; 5315 break; 5316 case X86::BI__builtin_ia32_vperm2f128_pd256: 5317 case X86::BI__builtin_ia32_vperm2f128_ps256: 5318 case X86::BI__builtin_ia32_vperm2f128_si256: 5319 case X86::BI__builtin_ia32_permti256: 5320 case X86::BI__builtin_ia32_pblendw128: 5321 case X86::BI__builtin_ia32_pblendw256: 5322 case X86::BI__builtin_ia32_blendps256: 5323 case X86::BI__builtin_ia32_pblendd256: 5324 case X86::BI__builtin_ia32_palignr128: 5325 case X86::BI__builtin_ia32_palignr256: 5326 case X86::BI__builtin_ia32_palignr512: 5327 case X86::BI__builtin_ia32_alignq512: 5328 case X86::BI__builtin_ia32_alignd512: 5329 case X86::BI__builtin_ia32_alignd128: 5330 case X86::BI__builtin_ia32_alignd256: 5331 case X86::BI__builtin_ia32_alignq128: 5332 case X86::BI__builtin_ia32_alignq256: 5333 case X86::BI__builtin_ia32_vcomisd: 5334 case X86::BI__builtin_ia32_vcomiss: 5335 case X86::BI__builtin_ia32_shuf_f32x4: 5336 case X86::BI__builtin_ia32_shuf_f64x2: 5337 case X86::BI__builtin_ia32_shuf_i32x4: 5338 case X86::BI__builtin_ia32_shuf_i64x2: 5339 case X86::BI__builtin_ia32_shufpd512: 5340 case X86::BI__builtin_ia32_shufps: 5341 case X86::BI__builtin_ia32_shufps256: 5342 case X86::BI__builtin_ia32_shufps512: 5343 case X86::BI__builtin_ia32_dbpsadbw128: 5344 case X86::BI__builtin_ia32_dbpsadbw256: 5345 case X86::BI__builtin_ia32_dbpsadbw512: 5346 case X86::BI__builtin_ia32_vpshldd128: 5347 case X86::BI__builtin_ia32_vpshldd256: 5348 case X86::BI__builtin_ia32_vpshldd512: 5349 case X86::BI__builtin_ia32_vpshldq128: 5350 case X86::BI__builtin_ia32_vpshldq256: 5351 case X86::BI__builtin_ia32_vpshldq512: 5352 case X86::BI__builtin_ia32_vpshldw128: 5353 case X86::BI__builtin_ia32_vpshldw256: 5354 case X86::BI__builtin_ia32_vpshldw512: 5355 case X86::BI__builtin_ia32_vpshrdd128: 5356 case X86::BI__builtin_ia32_vpshrdd256: 5357 case X86::BI__builtin_ia32_vpshrdd512: 5358 case X86::BI__builtin_ia32_vpshrdq128: 5359 case X86::BI__builtin_ia32_vpshrdq256: 5360 case X86::BI__builtin_ia32_vpshrdq512: 5361 case X86::BI__builtin_ia32_vpshrdw128: 5362 case X86::BI__builtin_ia32_vpshrdw256: 5363 case X86::BI__builtin_ia32_vpshrdw512: 5364 i = 2; l = 0; u = 255; 5365 break; 5366 case X86::BI__builtin_ia32_fixupimmpd512_mask: 5367 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 5368 case X86::BI__builtin_ia32_fixupimmps512_mask: 5369 case X86::BI__builtin_ia32_fixupimmps512_maskz: 5370 case X86::BI__builtin_ia32_fixupimmsd_mask: 5371 case X86::BI__builtin_ia32_fixupimmsd_maskz: 5372 case X86::BI__builtin_ia32_fixupimmss_mask: 5373 case X86::BI__builtin_ia32_fixupimmss_maskz: 5374 case X86::BI__builtin_ia32_fixupimmpd128_mask: 5375 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 5376 case X86::BI__builtin_ia32_fixupimmpd256_mask: 5377 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 5378 case X86::BI__builtin_ia32_fixupimmps128_mask: 5379 case X86::BI__builtin_ia32_fixupimmps128_maskz: 5380 case X86::BI__builtin_ia32_fixupimmps256_mask: 5381 case X86::BI__builtin_ia32_fixupimmps256_maskz: 5382 case X86::BI__builtin_ia32_pternlogd512_mask: 5383 case X86::BI__builtin_ia32_pternlogd512_maskz: 5384 case X86::BI__builtin_ia32_pternlogq512_mask: 5385 case X86::BI__builtin_ia32_pternlogq512_maskz: 5386 case X86::BI__builtin_ia32_pternlogd128_mask: 5387 case X86::BI__builtin_ia32_pternlogd128_maskz: 5388 case X86::BI__builtin_ia32_pternlogd256_mask: 5389 case X86::BI__builtin_ia32_pternlogd256_maskz: 5390 case X86::BI__builtin_ia32_pternlogq128_mask: 5391 case X86::BI__builtin_ia32_pternlogq128_maskz: 5392 case X86::BI__builtin_ia32_pternlogq256_mask: 5393 case X86::BI__builtin_ia32_pternlogq256_maskz: 5394 i = 3; l = 0; u = 255; 5395 break; 5396 case X86::BI__builtin_ia32_gatherpfdpd: 5397 case X86::BI__builtin_ia32_gatherpfdps: 5398 case X86::BI__builtin_ia32_gatherpfqpd: 5399 case X86::BI__builtin_ia32_gatherpfqps: 5400 case X86::BI__builtin_ia32_scatterpfdpd: 5401 case X86::BI__builtin_ia32_scatterpfdps: 5402 case X86::BI__builtin_ia32_scatterpfqpd: 5403 case X86::BI__builtin_ia32_scatterpfqps: 5404 i = 4; l = 2; u = 3; 5405 break; 5406 case X86::BI__builtin_ia32_reducesd_mask: 5407 case X86::BI__builtin_ia32_reducess_mask: 5408 case X86::BI__builtin_ia32_rndscalesd_round_mask: 5409 case X86::BI__builtin_ia32_rndscaless_round_mask: 5410 case X86::BI__builtin_ia32_rndscalesh_round_mask: 5411 case X86::BI__builtin_ia32_reducesh_mask: 5412 i = 4; l = 0; u = 255; 5413 break; 5414 } 5415 5416 // Note that we don't force a hard error on the range check here, allowing 5417 // template-generated or macro-generated dead code to potentially have out-of- 5418 // range values. These need to code generate, but don't need to necessarily 5419 // make any sense. We use a warning that defaults to an error. 5420 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 5421 } 5422 5423 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 5424 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 5425 /// Returns true when the format fits the function and the FormatStringInfo has 5426 /// been populated. 5427 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 5428 FormatStringInfo *FSI) { 5429 FSI->HasVAListArg = Format->getFirstArg() == 0; 5430 FSI->FormatIdx = Format->getFormatIdx() - 1; 5431 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 5432 5433 // The way the format attribute works in GCC, the implicit this argument 5434 // of member functions is counted. However, it doesn't appear in our own 5435 // lists, so decrement format_idx in that case. 5436 if (IsCXXMember) { 5437 if(FSI->FormatIdx == 0) 5438 return false; 5439 --FSI->FormatIdx; 5440 if (FSI->FirstDataArg != 0) 5441 --FSI->FirstDataArg; 5442 } 5443 return true; 5444 } 5445 5446 /// Checks if a the given expression evaluates to null. 5447 /// 5448 /// Returns true if the value evaluates to null. 5449 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 5450 // If the expression has non-null type, it doesn't evaluate to null. 5451 if (auto nullability 5452 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 5453 if (*nullability == NullabilityKind::NonNull) 5454 return false; 5455 } 5456 5457 // As a special case, transparent unions initialized with zero are 5458 // considered null for the purposes of the nonnull attribute. 5459 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 5460 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 5461 if (const CompoundLiteralExpr *CLE = 5462 dyn_cast<CompoundLiteralExpr>(Expr)) 5463 if (const InitListExpr *ILE = 5464 dyn_cast<InitListExpr>(CLE->getInitializer())) 5465 Expr = ILE->getInit(0); 5466 } 5467 5468 bool Result; 5469 return (!Expr->isValueDependent() && 5470 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 5471 !Result); 5472 } 5473 5474 static void CheckNonNullArgument(Sema &S, 5475 const Expr *ArgExpr, 5476 SourceLocation CallSiteLoc) { 5477 if (CheckNonNullExpr(S, ArgExpr)) 5478 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 5479 S.PDiag(diag::warn_null_arg) 5480 << ArgExpr->getSourceRange()); 5481 } 5482 5483 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 5484 FormatStringInfo FSI; 5485 if ((GetFormatStringType(Format) == FST_NSString) && 5486 getFormatStringInfo(Format, false, &FSI)) { 5487 Idx = FSI.FormatIdx; 5488 return true; 5489 } 5490 return false; 5491 } 5492 5493 /// Diagnose use of %s directive in an NSString which is being passed 5494 /// as formatting string to formatting method. 5495 static void 5496 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 5497 const NamedDecl *FDecl, 5498 Expr **Args, 5499 unsigned NumArgs) { 5500 unsigned Idx = 0; 5501 bool Format = false; 5502 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 5503 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 5504 Idx = 2; 5505 Format = true; 5506 } 5507 else 5508 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5509 if (S.GetFormatNSStringIdx(I, Idx)) { 5510 Format = true; 5511 break; 5512 } 5513 } 5514 if (!Format || NumArgs <= Idx) 5515 return; 5516 const Expr *FormatExpr = Args[Idx]; 5517 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 5518 FormatExpr = CSCE->getSubExpr(); 5519 const StringLiteral *FormatString; 5520 if (const ObjCStringLiteral *OSL = 5521 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 5522 FormatString = OSL->getString(); 5523 else 5524 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 5525 if (!FormatString) 5526 return; 5527 if (S.FormatStringHasSArg(FormatString)) { 5528 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 5529 << "%s" << 1 << 1; 5530 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 5531 << FDecl->getDeclName(); 5532 } 5533 } 5534 5535 /// Determine whether the given type has a non-null nullability annotation. 5536 static bool isNonNullType(ASTContext &ctx, QualType type) { 5537 if (auto nullability = type->getNullability(ctx)) 5538 return *nullability == NullabilityKind::NonNull; 5539 5540 return false; 5541 } 5542 5543 static void CheckNonNullArguments(Sema &S, 5544 const NamedDecl *FDecl, 5545 const FunctionProtoType *Proto, 5546 ArrayRef<const Expr *> Args, 5547 SourceLocation CallSiteLoc) { 5548 assert((FDecl || Proto) && "Need a function declaration or prototype"); 5549 5550 // Already checked by by constant evaluator. 5551 if (S.isConstantEvaluated()) 5552 return; 5553 // Check the attributes attached to the method/function itself. 5554 llvm::SmallBitVector NonNullArgs; 5555 if (FDecl) { 5556 // Handle the nonnull attribute on the function/method declaration itself. 5557 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 5558 if (!NonNull->args_size()) { 5559 // Easy case: all pointer arguments are nonnull. 5560 for (const auto *Arg : Args) 5561 if (S.isValidPointerAttrType(Arg->getType())) 5562 CheckNonNullArgument(S, Arg, CallSiteLoc); 5563 return; 5564 } 5565 5566 for (const ParamIdx &Idx : NonNull->args()) { 5567 unsigned IdxAST = Idx.getASTIndex(); 5568 if (IdxAST >= Args.size()) 5569 continue; 5570 if (NonNullArgs.empty()) 5571 NonNullArgs.resize(Args.size()); 5572 NonNullArgs.set(IdxAST); 5573 } 5574 } 5575 } 5576 5577 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 5578 // Handle the nonnull attribute on the parameters of the 5579 // function/method. 5580 ArrayRef<ParmVarDecl*> parms; 5581 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 5582 parms = FD->parameters(); 5583 else 5584 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 5585 5586 unsigned ParamIndex = 0; 5587 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 5588 I != E; ++I, ++ParamIndex) { 5589 const ParmVarDecl *PVD = *I; 5590 if (PVD->hasAttr<NonNullAttr>() || 5591 isNonNullType(S.Context, PVD->getType())) { 5592 if (NonNullArgs.empty()) 5593 NonNullArgs.resize(Args.size()); 5594 5595 NonNullArgs.set(ParamIndex); 5596 } 5597 } 5598 } else { 5599 // If we have a non-function, non-method declaration but no 5600 // function prototype, try to dig out the function prototype. 5601 if (!Proto) { 5602 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 5603 QualType type = VD->getType().getNonReferenceType(); 5604 if (auto pointerType = type->getAs<PointerType>()) 5605 type = pointerType->getPointeeType(); 5606 else if (auto blockType = type->getAs<BlockPointerType>()) 5607 type = blockType->getPointeeType(); 5608 // FIXME: data member pointers? 5609 5610 // Dig out the function prototype, if there is one. 5611 Proto = type->getAs<FunctionProtoType>(); 5612 } 5613 } 5614 5615 // Fill in non-null argument information from the nullability 5616 // information on the parameter types (if we have them). 5617 if (Proto) { 5618 unsigned Index = 0; 5619 for (auto paramType : Proto->getParamTypes()) { 5620 if (isNonNullType(S.Context, paramType)) { 5621 if (NonNullArgs.empty()) 5622 NonNullArgs.resize(Args.size()); 5623 5624 NonNullArgs.set(Index); 5625 } 5626 5627 ++Index; 5628 } 5629 } 5630 } 5631 5632 // Check for non-null arguments. 5633 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 5634 ArgIndex != ArgIndexEnd; ++ArgIndex) { 5635 if (NonNullArgs[ArgIndex]) 5636 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 5637 } 5638 } 5639 5640 /// Warn if a pointer or reference argument passed to a function points to an 5641 /// object that is less aligned than the parameter. This can happen when 5642 /// creating a typedef with a lower alignment than the original type and then 5643 /// calling functions defined in terms of the original type. 5644 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 5645 StringRef ParamName, QualType ArgTy, 5646 QualType ParamTy) { 5647 5648 // If a function accepts a pointer or reference type 5649 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 5650 return; 5651 5652 // If the parameter is a pointer type, get the pointee type for the 5653 // argument too. If the parameter is a reference type, don't try to get 5654 // the pointee type for the argument. 5655 if (ParamTy->isPointerType()) 5656 ArgTy = ArgTy->getPointeeType(); 5657 5658 // Remove reference or pointer 5659 ParamTy = ParamTy->getPointeeType(); 5660 5661 // Find expected alignment, and the actual alignment of the passed object. 5662 // getTypeAlignInChars requires complete types 5663 if (ArgTy.isNull() || ParamTy->isIncompleteType() || 5664 ArgTy->isIncompleteType() || ParamTy->isUndeducedType() || 5665 ArgTy->isUndeducedType()) 5666 return; 5667 5668 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 5669 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 5670 5671 // If the argument is less aligned than the parameter, there is a 5672 // potential alignment issue. 5673 if (ArgAlign < ParamAlign) 5674 Diag(Loc, diag::warn_param_mismatched_alignment) 5675 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 5676 << ParamName << (FDecl != nullptr) << FDecl; 5677 } 5678 5679 /// Handles the checks for format strings, non-POD arguments to vararg 5680 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 5681 /// attributes. 5682 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 5683 const Expr *ThisArg, ArrayRef<const Expr *> Args, 5684 bool IsMemberFunction, SourceLocation Loc, 5685 SourceRange Range, VariadicCallType CallType) { 5686 // FIXME: We should check as much as we can in the template definition. 5687 if (CurContext->isDependentContext()) 5688 return; 5689 5690 // Printf and scanf checking. 5691 llvm::SmallBitVector CheckedVarArgs; 5692 if (FDecl) { 5693 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5694 // Only create vector if there are format attributes. 5695 CheckedVarArgs.resize(Args.size()); 5696 5697 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 5698 CheckedVarArgs); 5699 } 5700 } 5701 5702 // Refuse POD arguments that weren't caught by the format string 5703 // checks above. 5704 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 5705 if (CallType != VariadicDoesNotApply && 5706 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 5707 unsigned NumParams = Proto ? Proto->getNumParams() 5708 : FDecl && isa<FunctionDecl>(FDecl) 5709 ? cast<FunctionDecl>(FDecl)->getNumParams() 5710 : FDecl && isa<ObjCMethodDecl>(FDecl) 5711 ? cast<ObjCMethodDecl>(FDecl)->param_size() 5712 : 0; 5713 5714 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 5715 // Args[ArgIdx] can be null in malformed code. 5716 if (const Expr *Arg = Args[ArgIdx]) { 5717 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 5718 checkVariadicArgument(Arg, CallType); 5719 } 5720 } 5721 } 5722 5723 if (FDecl || Proto) { 5724 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 5725 5726 // Type safety checking. 5727 if (FDecl) { 5728 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 5729 CheckArgumentWithTypeTag(I, Args, Loc); 5730 } 5731 } 5732 5733 // Check that passed arguments match the alignment of original arguments. 5734 // Try to get the missing prototype from the declaration. 5735 if (!Proto && FDecl) { 5736 const auto *FT = FDecl->getFunctionType(); 5737 if (isa_and_nonnull<FunctionProtoType>(FT)) 5738 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 5739 } 5740 if (Proto) { 5741 // For variadic functions, we may have more args than parameters. 5742 // For some K&R functions, we may have less args than parameters. 5743 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 5744 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 5745 // Args[ArgIdx] can be null in malformed code. 5746 if (const Expr *Arg = Args[ArgIdx]) { 5747 if (Arg->containsErrors()) 5748 continue; 5749 5750 QualType ParamTy = Proto->getParamType(ArgIdx); 5751 QualType ArgTy = Arg->getType(); 5752 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 5753 ArgTy, ParamTy); 5754 } 5755 } 5756 } 5757 5758 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 5759 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 5760 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 5761 if (!Arg->isValueDependent()) { 5762 Expr::EvalResult Align; 5763 if (Arg->EvaluateAsInt(Align, Context)) { 5764 const llvm::APSInt &I = Align.Val.getInt(); 5765 if (!I.isPowerOf2()) 5766 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 5767 << Arg->getSourceRange(); 5768 5769 if (I > Sema::MaximumAlignment) 5770 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 5771 << Arg->getSourceRange() << Sema::MaximumAlignment; 5772 } 5773 } 5774 } 5775 5776 if (FD) 5777 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 5778 } 5779 5780 /// CheckConstructorCall - Check a constructor call for correctness and safety 5781 /// properties not enforced by the C type system. 5782 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 5783 ArrayRef<const Expr *> Args, 5784 const FunctionProtoType *Proto, 5785 SourceLocation Loc) { 5786 VariadicCallType CallType = 5787 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 5788 5789 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 5790 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType), 5791 Context.getPointerType(Ctor->getThisObjectType())); 5792 5793 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 5794 Loc, SourceRange(), CallType); 5795 } 5796 5797 /// CheckFunctionCall - Check a direct function call for various correctness 5798 /// and safety properties not strictly enforced by the C type system. 5799 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 5800 const FunctionProtoType *Proto) { 5801 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 5802 isa<CXXMethodDecl>(FDecl); 5803 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 5804 IsMemberOperatorCall; 5805 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 5806 TheCall->getCallee()); 5807 Expr** Args = TheCall->getArgs(); 5808 unsigned NumArgs = TheCall->getNumArgs(); 5809 5810 Expr *ImplicitThis = nullptr; 5811 if (IsMemberOperatorCall) { 5812 // If this is a call to a member operator, hide the first argument 5813 // from checkCall. 5814 // FIXME: Our choice of AST representation here is less than ideal. 5815 ImplicitThis = Args[0]; 5816 ++Args; 5817 --NumArgs; 5818 } else if (IsMemberFunction) 5819 ImplicitThis = 5820 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 5821 5822 if (ImplicitThis) { 5823 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 5824 // used. 5825 QualType ThisType = ImplicitThis->getType(); 5826 if (!ThisType->isPointerType()) { 5827 assert(!ThisType->isReferenceType()); 5828 ThisType = Context.getPointerType(ThisType); 5829 } 5830 5831 QualType ThisTypeFromDecl = 5832 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType()); 5833 5834 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 5835 ThisTypeFromDecl); 5836 } 5837 5838 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 5839 IsMemberFunction, TheCall->getRParenLoc(), 5840 TheCall->getCallee()->getSourceRange(), CallType); 5841 5842 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 5843 // None of the checks below are needed for functions that don't have 5844 // simple names (e.g., C++ conversion functions). 5845 if (!FnInfo) 5846 return false; 5847 5848 // Enforce TCB except for builtin calls, which are always allowed. 5849 if (FDecl->getBuiltinID() == 0) 5850 CheckTCBEnforcement(TheCall->getExprLoc(), FDecl); 5851 5852 CheckAbsoluteValueFunction(TheCall, FDecl); 5853 CheckMaxUnsignedZero(TheCall, FDecl); 5854 5855 if (getLangOpts().ObjC) 5856 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 5857 5858 unsigned CMId = FDecl->getMemoryFunctionKind(); 5859 5860 // Handle memory setting and copying functions. 5861 switch (CMId) { 5862 case 0: 5863 return false; 5864 case Builtin::BIstrlcpy: // fallthrough 5865 case Builtin::BIstrlcat: 5866 CheckStrlcpycatArguments(TheCall, FnInfo); 5867 break; 5868 case Builtin::BIstrncat: 5869 CheckStrncatArguments(TheCall, FnInfo); 5870 break; 5871 case Builtin::BIfree: 5872 CheckFreeArguments(TheCall); 5873 break; 5874 default: 5875 CheckMemaccessArguments(TheCall, CMId, FnInfo); 5876 } 5877 5878 return false; 5879 } 5880 5881 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 5882 ArrayRef<const Expr *> Args) { 5883 VariadicCallType CallType = 5884 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 5885 5886 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 5887 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 5888 CallType); 5889 5890 CheckTCBEnforcement(lbrac, Method); 5891 5892 return false; 5893 } 5894 5895 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 5896 const FunctionProtoType *Proto) { 5897 QualType Ty; 5898 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 5899 Ty = V->getType().getNonReferenceType(); 5900 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 5901 Ty = F->getType().getNonReferenceType(); 5902 else 5903 return false; 5904 5905 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 5906 !Ty->isFunctionProtoType()) 5907 return false; 5908 5909 VariadicCallType CallType; 5910 if (!Proto || !Proto->isVariadic()) { 5911 CallType = VariadicDoesNotApply; 5912 } else if (Ty->isBlockPointerType()) { 5913 CallType = VariadicBlock; 5914 } else { // Ty->isFunctionPointerType() 5915 CallType = VariadicFunction; 5916 } 5917 5918 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 5919 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5920 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5921 TheCall->getCallee()->getSourceRange(), CallType); 5922 5923 return false; 5924 } 5925 5926 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 5927 /// such as function pointers returned from functions. 5928 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 5929 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 5930 TheCall->getCallee()); 5931 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 5932 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5933 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5934 TheCall->getCallee()->getSourceRange(), CallType); 5935 5936 return false; 5937 } 5938 5939 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 5940 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 5941 return false; 5942 5943 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 5944 switch (Op) { 5945 case AtomicExpr::AO__c11_atomic_init: 5946 case AtomicExpr::AO__opencl_atomic_init: 5947 llvm_unreachable("There is no ordering argument for an init"); 5948 5949 case AtomicExpr::AO__c11_atomic_load: 5950 case AtomicExpr::AO__opencl_atomic_load: 5951 case AtomicExpr::AO__hip_atomic_load: 5952 case AtomicExpr::AO__atomic_load_n: 5953 case AtomicExpr::AO__atomic_load: 5954 return OrderingCABI != llvm::AtomicOrderingCABI::release && 5955 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5956 5957 case AtomicExpr::AO__c11_atomic_store: 5958 case AtomicExpr::AO__opencl_atomic_store: 5959 case AtomicExpr::AO__hip_atomic_store: 5960 case AtomicExpr::AO__atomic_store: 5961 case AtomicExpr::AO__atomic_store_n: 5962 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 5963 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 5964 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5965 5966 default: 5967 return true; 5968 } 5969 } 5970 5971 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 5972 AtomicExpr::AtomicOp Op) { 5973 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 5974 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5975 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 5976 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 5977 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 5978 Op); 5979 } 5980 5981 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 5982 SourceLocation RParenLoc, MultiExprArg Args, 5983 AtomicExpr::AtomicOp Op, 5984 AtomicArgumentOrder ArgOrder) { 5985 // All the non-OpenCL operations take one of the following forms. 5986 // The OpenCL operations take the __c11 forms with one extra argument for 5987 // synchronization scope. 5988 enum { 5989 // C __c11_atomic_init(A *, C) 5990 Init, 5991 5992 // C __c11_atomic_load(A *, int) 5993 Load, 5994 5995 // void __atomic_load(A *, CP, int) 5996 LoadCopy, 5997 5998 // void __atomic_store(A *, CP, int) 5999 Copy, 6000 6001 // C __c11_atomic_add(A *, M, int) 6002 Arithmetic, 6003 6004 // C __atomic_exchange_n(A *, CP, int) 6005 Xchg, 6006 6007 // void __atomic_exchange(A *, C *, CP, int) 6008 GNUXchg, 6009 6010 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 6011 C11CmpXchg, 6012 6013 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 6014 GNUCmpXchg 6015 } Form = Init; 6016 6017 const unsigned NumForm = GNUCmpXchg + 1; 6018 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 6019 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 6020 // where: 6021 // C is an appropriate type, 6022 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 6023 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 6024 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 6025 // the int parameters are for orderings. 6026 6027 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 6028 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 6029 "need to update code for modified forms"); 6030 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 6031 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 6032 AtomicExpr::AO__atomic_load, 6033 "need to update code for modified C11 atomics"); 6034 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 6035 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 6036 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load && 6037 Op <= AtomicExpr::AO__hip_atomic_fetch_max; 6038 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 6039 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 6040 IsOpenCL; 6041 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 6042 Op == AtomicExpr::AO__atomic_store_n || 6043 Op == AtomicExpr::AO__atomic_exchange_n || 6044 Op == AtomicExpr::AO__atomic_compare_exchange_n; 6045 bool IsAddSub = false; 6046 6047 switch (Op) { 6048 case AtomicExpr::AO__c11_atomic_init: 6049 case AtomicExpr::AO__opencl_atomic_init: 6050 Form = Init; 6051 break; 6052 6053 case AtomicExpr::AO__c11_atomic_load: 6054 case AtomicExpr::AO__opencl_atomic_load: 6055 case AtomicExpr::AO__hip_atomic_load: 6056 case AtomicExpr::AO__atomic_load_n: 6057 Form = Load; 6058 break; 6059 6060 case AtomicExpr::AO__atomic_load: 6061 Form = LoadCopy; 6062 break; 6063 6064 case AtomicExpr::AO__c11_atomic_store: 6065 case AtomicExpr::AO__opencl_atomic_store: 6066 case AtomicExpr::AO__hip_atomic_store: 6067 case AtomicExpr::AO__atomic_store: 6068 case AtomicExpr::AO__atomic_store_n: 6069 Form = Copy; 6070 break; 6071 case AtomicExpr::AO__hip_atomic_fetch_add: 6072 case AtomicExpr::AO__hip_atomic_fetch_min: 6073 case AtomicExpr::AO__hip_atomic_fetch_max: 6074 case AtomicExpr::AO__c11_atomic_fetch_add: 6075 case AtomicExpr::AO__c11_atomic_fetch_sub: 6076 case AtomicExpr::AO__opencl_atomic_fetch_add: 6077 case AtomicExpr::AO__opencl_atomic_fetch_sub: 6078 case AtomicExpr::AO__atomic_fetch_add: 6079 case AtomicExpr::AO__atomic_fetch_sub: 6080 case AtomicExpr::AO__atomic_add_fetch: 6081 case AtomicExpr::AO__atomic_sub_fetch: 6082 IsAddSub = true; 6083 Form = Arithmetic; 6084 break; 6085 case AtomicExpr::AO__c11_atomic_fetch_and: 6086 case AtomicExpr::AO__c11_atomic_fetch_or: 6087 case AtomicExpr::AO__c11_atomic_fetch_xor: 6088 case AtomicExpr::AO__hip_atomic_fetch_and: 6089 case AtomicExpr::AO__hip_atomic_fetch_or: 6090 case AtomicExpr::AO__hip_atomic_fetch_xor: 6091 case AtomicExpr::AO__c11_atomic_fetch_nand: 6092 case AtomicExpr::AO__opencl_atomic_fetch_and: 6093 case AtomicExpr::AO__opencl_atomic_fetch_or: 6094 case AtomicExpr::AO__opencl_atomic_fetch_xor: 6095 case AtomicExpr::AO__atomic_fetch_and: 6096 case AtomicExpr::AO__atomic_fetch_or: 6097 case AtomicExpr::AO__atomic_fetch_xor: 6098 case AtomicExpr::AO__atomic_fetch_nand: 6099 case AtomicExpr::AO__atomic_and_fetch: 6100 case AtomicExpr::AO__atomic_or_fetch: 6101 case AtomicExpr::AO__atomic_xor_fetch: 6102 case AtomicExpr::AO__atomic_nand_fetch: 6103 Form = Arithmetic; 6104 break; 6105 case AtomicExpr::AO__c11_atomic_fetch_min: 6106 case AtomicExpr::AO__c11_atomic_fetch_max: 6107 case AtomicExpr::AO__opencl_atomic_fetch_min: 6108 case AtomicExpr::AO__opencl_atomic_fetch_max: 6109 case AtomicExpr::AO__atomic_min_fetch: 6110 case AtomicExpr::AO__atomic_max_fetch: 6111 case AtomicExpr::AO__atomic_fetch_min: 6112 case AtomicExpr::AO__atomic_fetch_max: 6113 Form = Arithmetic; 6114 break; 6115 6116 case AtomicExpr::AO__c11_atomic_exchange: 6117 case AtomicExpr::AO__hip_atomic_exchange: 6118 case AtomicExpr::AO__opencl_atomic_exchange: 6119 case AtomicExpr::AO__atomic_exchange_n: 6120 Form = Xchg; 6121 break; 6122 6123 case AtomicExpr::AO__atomic_exchange: 6124 Form = GNUXchg; 6125 break; 6126 6127 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 6128 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 6129 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 6130 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 6131 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 6132 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 6133 Form = C11CmpXchg; 6134 break; 6135 6136 case AtomicExpr::AO__atomic_compare_exchange: 6137 case AtomicExpr::AO__atomic_compare_exchange_n: 6138 Form = GNUCmpXchg; 6139 break; 6140 } 6141 6142 unsigned AdjustedNumArgs = NumArgs[Form]; 6143 if ((IsOpenCL || IsHIP) && Op != AtomicExpr::AO__opencl_atomic_init) 6144 ++AdjustedNumArgs; 6145 // Check we have the right number of arguments. 6146 if (Args.size() < AdjustedNumArgs) { 6147 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 6148 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 6149 << ExprRange; 6150 return ExprError(); 6151 } else if (Args.size() > AdjustedNumArgs) { 6152 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 6153 diag::err_typecheck_call_too_many_args) 6154 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 6155 << ExprRange; 6156 return ExprError(); 6157 } 6158 6159 // Inspect the first argument of the atomic operation. 6160 Expr *Ptr = Args[0]; 6161 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 6162 if (ConvertedPtr.isInvalid()) 6163 return ExprError(); 6164 6165 Ptr = ConvertedPtr.get(); 6166 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 6167 if (!pointerType) { 6168 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 6169 << Ptr->getType() << Ptr->getSourceRange(); 6170 return ExprError(); 6171 } 6172 6173 // For a __c11 builtin, this should be a pointer to an _Atomic type. 6174 QualType AtomTy = pointerType->getPointeeType(); // 'A' 6175 QualType ValType = AtomTy; // 'C' 6176 if (IsC11) { 6177 if (!AtomTy->isAtomicType()) { 6178 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 6179 << Ptr->getType() << Ptr->getSourceRange(); 6180 return ExprError(); 6181 } 6182 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 6183 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 6184 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 6185 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 6186 << Ptr->getSourceRange(); 6187 return ExprError(); 6188 } 6189 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 6190 } else if (Form != Load && Form != LoadCopy) { 6191 if (ValType.isConstQualified()) { 6192 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 6193 << Ptr->getType() << Ptr->getSourceRange(); 6194 return ExprError(); 6195 } 6196 } 6197 6198 // For an arithmetic operation, the implied arithmetic must be well-formed. 6199 if (Form == Arithmetic) { 6200 // GCC does not enforce these rules for GNU atomics, but we do to help catch 6201 // trivial type errors. 6202 auto IsAllowedValueType = [&](QualType ValType) { 6203 if (ValType->isIntegerType()) 6204 return true; 6205 if (ValType->isPointerType()) 6206 return true; 6207 if (!ValType->isFloatingType()) 6208 return false; 6209 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 6210 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 6211 &Context.getTargetInfo().getLongDoubleFormat() == 6212 &llvm::APFloat::x87DoubleExtended()) 6213 return false; 6214 return true; 6215 }; 6216 if (IsAddSub && !IsAllowedValueType(ValType)) { 6217 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp) 6218 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6219 return ExprError(); 6220 } 6221 if (!IsAddSub && !ValType->isIntegerType()) { 6222 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 6223 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6224 return ExprError(); 6225 } 6226 if (IsC11 && ValType->isPointerType() && 6227 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 6228 diag::err_incomplete_type)) { 6229 return ExprError(); 6230 } 6231 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 6232 // For __atomic_*_n operations, the value type must be a scalar integral or 6233 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 6234 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 6235 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6236 return ExprError(); 6237 } 6238 6239 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 6240 !AtomTy->isScalarType()) { 6241 // For GNU atomics, require a trivially-copyable type. This is not part of 6242 // the GNU atomics specification but we enforce it for consistency with 6243 // other atomics which generally all require a trivially-copyable type. This 6244 // is because atomics just copy bits. 6245 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 6246 << Ptr->getType() << Ptr->getSourceRange(); 6247 return ExprError(); 6248 } 6249 6250 switch (ValType.getObjCLifetime()) { 6251 case Qualifiers::OCL_None: 6252 case Qualifiers::OCL_ExplicitNone: 6253 // okay 6254 break; 6255 6256 case Qualifiers::OCL_Weak: 6257 case Qualifiers::OCL_Strong: 6258 case Qualifiers::OCL_Autoreleasing: 6259 // FIXME: Can this happen? By this point, ValType should be known 6260 // to be trivially copyable. 6261 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 6262 << ValType << Ptr->getSourceRange(); 6263 return ExprError(); 6264 } 6265 6266 // All atomic operations have an overload which takes a pointer to a volatile 6267 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 6268 // into the result or the other operands. Similarly atomic_load takes a 6269 // pointer to a const 'A'. 6270 ValType.removeLocalVolatile(); 6271 ValType.removeLocalConst(); 6272 QualType ResultType = ValType; 6273 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 6274 Form == Init) 6275 ResultType = Context.VoidTy; 6276 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 6277 ResultType = Context.BoolTy; 6278 6279 // The type of a parameter passed 'by value'. In the GNU atomics, such 6280 // arguments are actually passed as pointers. 6281 QualType ByValType = ValType; // 'CP' 6282 bool IsPassedByAddress = false; 6283 if (!IsC11 && !IsHIP && !IsN) { 6284 ByValType = Ptr->getType(); 6285 IsPassedByAddress = true; 6286 } 6287 6288 SmallVector<Expr *, 5> APIOrderedArgs; 6289 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 6290 APIOrderedArgs.push_back(Args[0]); 6291 switch (Form) { 6292 case Init: 6293 case Load: 6294 APIOrderedArgs.push_back(Args[1]); // Val1/Order 6295 break; 6296 case LoadCopy: 6297 case Copy: 6298 case Arithmetic: 6299 case Xchg: 6300 APIOrderedArgs.push_back(Args[2]); // Val1 6301 APIOrderedArgs.push_back(Args[1]); // Order 6302 break; 6303 case GNUXchg: 6304 APIOrderedArgs.push_back(Args[2]); // Val1 6305 APIOrderedArgs.push_back(Args[3]); // Val2 6306 APIOrderedArgs.push_back(Args[1]); // Order 6307 break; 6308 case C11CmpXchg: 6309 APIOrderedArgs.push_back(Args[2]); // Val1 6310 APIOrderedArgs.push_back(Args[4]); // Val2 6311 APIOrderedArgs.push_back(Args[1]); // Order 6312 APIOrderedArgs.push_back(Args[3]); // OrderFail 6313 break; 6314 case GNUCmpXchg: 6315 APIOrderedArgs.push_back(Args[2]); // Val1 6316 APIOrderedArgs.push_back(Args[4]); // Val2 6317 APIOrderedArgs.push_back(Args[5]); // Weak 6318 APIOrderedArgs.push_back(Args[1]); // Order 6319 APIOrderedArgs.push_back(Args[3]); // OrderFail 6320 break; 6321 } 6322 } else 6323 APIOrderedArgs.append(Args.begin(), Args.end()); 6324 6325 // The first argument's non-CV pointer type is used to deduce the type of 6326 // subsequent arguments, except for: 6327 // - weak flag (always converted to bool) 6328 // - memory order (always converted to int) 6329 // - scope (always converted to int) 6330 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 6331 QualType Ty; 6332 if (i < NumVals[Form] + 1) { 6333 switch (i) { 6334 case 0: 6335 // The first argument is always a pointer. It has a fixed type. 6336 // It is always dereferenced, a nullptr is undefined. 6337 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 6338 // Nothing else to do: we already know all we want about this pointer. 6339 continue; 6340 case 1: 6341 // The second argument is the non-atomic operand. For arithmetic, this 6342 // is always passed by value, and for a compare_exchange it is always 6343 // passed by address. For the rest, GNU uses by-address and C11 uses 6344 // by-value. 6345 assert(Form != Load); 6346 if (Form == Arithmetic && ValType->isPointerType()) 6347 Ty = Context.getPointerDiffType(); 6348 else if (Form == Init || Form == Arithmetic) 6349 Ty = ValType; 6350 else if (Form == Copy || Form == Xchg) { 6351 if (IsPassedByAddress) { 6352 // The value pointer is always dereferenced, a nullptr is undefined. 6353 CheckNonNullArgument(*this, APIOrderedArgs[i], 6354 ExprRange.getBegin()); 6355 } 6356 Ty = ByValType; 6357 } else { 6358 Expr *ValArg = APIOrderedArgs[i]; 6359 // The value pointer is always dereferenced, a nullptr is undefined. 6360 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 6361 LangAS AS = LangAS::Default; 6362 // Keep address space of non-atomic pointer type. 6363 if (const PointerType *PtrTy = 6364 ValArg->getType()->getAs<PointerType>()) { 6365 AS = PtrTy->getPointeeType().getAddressSpace(); 6366 } 6367 Ty = Context.getPointerType( 6368 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 6369 } 6370 break; 6371 case 2: 6372 // The third argument to compare_exchange / GNU exchange is the desired 6373 // value, either by-value (for the C11 and *_n variant) or as a pointer. 6374 if (IsPassedByAddress) 6375 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 6376 Ty = ByValType; 6377 break; 6378 case 3: 6379 // The fourth argument to GNU compare_exchange is a 'weak' flag. 6380 Ty = Context.BoolTy; 6381 break; 6382 } 6383 } else { 6384 // The order(s) and scope are always converted to int. 6385 Ty = Context.IntTy; 6386 } 6387 6388 InitializedEntity Entity = 6389 InitializedEntity::InitializeParameter(Context, Ty, false); 6390 ExprResult Arg = APIOrderedArgs[i]; 6391 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6392 if (Arg.isInvalid()) 6393 return true; 6394 APIOrderedArgs[i] = Arg.get(); 6395 } 6396 6397 // Permute the arguments into a 'consistent' order. 6398 SmallVector<Expr*, 5> SubExprs; 6399 SubExprs.push_back(Ptr); 6400 switch (Form) { 6401 case Init: 6402 // Note, AtomicExpr::getVal1() has a special case for this atomic. 6403 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6404 break; 6405 case Load: 6406 SubExprs.push_back(APIOrderedArgs[1]); // Order 6407 break; 6408 case LoadCopy: 6409 case Copy: 6410 case Arithmetic: 6411 case Xchg: 6412 SubExprs.push_back(APIOrderedArgs[2]); // Order 6413 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6414 break; 6415 case GNUXchg: 6416 // Note, AtomicExpr::getVal2() has a special case for this atomic. 6417 SubExprs.push_back(APIOrderedArgs[3]); // Order 6418 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6419 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6420 break; 6421 case C11CmpXchg: 6422 SubExprs.push_back(APIOrderedArgs[3]); // Order 6423 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6424 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 6425 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6426 break; 6427 case GNUCmpXchg: 6428 SubExprs.push_back(APIOrderedArgs[4]); // Order 6429 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6430 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 6431 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6432 SubExprs.push_back(APIOrderedArgs[3]); // Weak 6433 break; 6434 } 6435 6436 if (SubExprs.size() >= 2 && Form != Init) { 6437 if (Optional<llvm::APSInt> Result = 6438 SubExprs[1]->getIntegerConstantExpr(Context)) 6439 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 6440 Diag(SubExprs[1]->getBeginLoc(), 6441 diag::warn_atomic_op_has_invalid_memory_order) 6442 << SubExprs[1]->getSourceRange(); 6443 } 6444 6445 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 6446 auto *Scope = Args[Args.size() - 1]; 6447 if (Optional<llvm::APSInt> Result = 6448 Scope->getIntegerConstantExpr(Context)) { 6449 if (!ScopeModel->isValid(Result->getZExtValue())) 6450 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 6451 << Scope->getSourceRange(); 6452 } 6453 SubExprs.push_back(Scope); 6454 } 6455 6456 AtomicExpr *AE = new (Context) 6457 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 6458 6459 if ((Op == AtomicExpr::AO__c11_atomic_load || 6460 Op == AtomicExpr::AO__c11_atomic_store || 6461 Op == AtomicExpr::AO__opencl_atomic_load || 6462 Op == AtomicExpr::AO__hip_atomic_load || 6463 Op == AtomicExpr::AO__opencl_atomic_store || 6464 Op == AtomicExpr::AO__hip_atomic_store) && 6465 Context.AtomicUsesUnsupportedLibcall(AE)) 6466 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 6467 << ((Op == AtomicExpr::AO__c11_atomic_load || 6468 Op == AtomicExpr::AO__opencl_atomic_load || 6469 Op == AtomicExpr::AO__hip_atomic_load) 6470 ? 0 6471 : 1); 6472 6473 if (ValType->isBitIntType()) { 6474 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit); 6475 return ExprError(); 6476 } 6477 6478 return AE; 6479 } 6480 6481 /// checkBuiltinArgument - Given a call to a builtin function, perform 6482 /// normal type-checking on the given argument, updating the call in 6483 /// place. This is useful when a builtin function requires custom 6484 /// type-checking for some of its arguments but not necessarily all of 6485 /// them. 6486 /// 6487 /// Returns true on error. 6488 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 6489 FunctionDecl *Fn = E->getDirectCallee(); 6490 assert(Fn && "builtin call without direct callee!"); 6491 6492 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 6493 InitializedEntity Entity = 6494 InitializedEntity::InitializeParameter(S.Context, Param); 6495 6496 ExprResult Arg = E->getArg(ArgIndex); 6497 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 6498 if (Arg.isInvalid()) 6499 return true; 6500 6501 E->setArg(ArgIndex, Arg.get()); 6502 return false; 6503 } 6504 6505 /// We have a call to a function like __sync_fetch_and_add, which is an 6506 /// overloaded function based on the pointer type of its first argument. 6507 /// The main BuildCallExpr routines have already promoted the types of 6508 /// arguments because all of these calls are prototyped as void(...). 6509 /// 6510 /// This function goes through and does final semantic checking for these 6511 /// builtins, as well as generating any warnings. 6512 ExprResult 6513 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 6514 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 6515 Expr *Callee = TheCall->getCallee(); 6516 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 6517 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6518 6519 // Ensure that we have at least one argument to do type inference from. 6520 if (TheCall->getNumArgs() < 1) { 6521 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6522 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 6523 return ExprError(); 6524 } 6525 6526 // Inspect the first argument of the atomic builtin. This should always be 6527 // a pointer type, whose element is an integral scalar or pointer type. 6528 // Because it is a pointer type, we don't have to worry about any implicit 6529 // casts here. 6530 // FIXME: We don't allow floating point scalars as input. 6531 Expr *FirstArg = TheCall->getArg(0); 6532 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 6533 if (FirstArgResult.isInvalid()) 6534 return ExprError(); 6535 FirstArg = FirstArgResult.get(); 6536 TheCall->setArg(0, FirstArg); 6537 6538 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 6539 if (!pointerType) { 6540 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 6541 << FirstArg->getType() << FirstArg->getSourceRange(); 6542 return ExprError(); 6543 } 6544 6545 QualType ValType = pointerType->getPointeeType(); 6546 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6547 !ValType->isBlockPointerType()) { 6548 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 6549 << FirstArg->getType() << FirstArg->getSourceRange(); 6550 return ExprError(); 6551 } 6552 6553 if (ValType.isConstQualified()) { 6554 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 6555 << FirstArg->getType() << FirstArg->getSourceRange(); 6556 return ExprError(); 6557 } 6558 6559 switch (ValType.getObjCLifetime()) { 6560 case Qualifiers::OCL_None: 6561 case Qualifiers::OCL_ExplicitNone: 6562 // okay 6563 break; 6564 6565 case Qualifiers::OCL_Weak: 6566 case Qualifiers::OCL_Strong: 6567 case Qualifiers::OCL_Autoreleasing: 6568 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 6569 << ValType << FirstArg->getSourceRange(); 6570 return ExprError(); 6571 } 6572 6573 // Strip any qualifiers off ValType. 6574 ValType = ValType.getUnqualifiedType(); 6575 6576 // The majority of builtins return a value, but a few have special return 6577 // types, so allow them to override appropriately below. 6578 QualType ResultType = ValType; 6579 6580 // We need to figure out which concrete builtin this maps onto. For example, 6581 // __sync_fetch_and_add with a 2 byte object turns into 6582 // __sync_fetch_and_add_2. 6583 #define BUILTIN_ROW(x) \ 6584 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 6585 Builtin::BI##x##_8, Builtin::BI##x##_16 } 6586 6587 static const unsigned BuiltinIndices[][5] = { 6588 BUILTIN_ROW(__sync_fetch_and_add), 6589 BUILTIN_ROW(__sync_fetch_and_sub), 6590 BUILTIN_ROW(__sync_fetch_and_or), 6591 BUILTIN_ROW(__sync_fetch_and_and), 6592 BUILTIN_ROW(__sync_fetch_and_xor), 6593 BUILTIN_ROW(__sync_fetch_and_nand), 6594 6595 BUILTIN_ROW(__sync_add_and_fetch), 6596 BUILTIN_ROW(__sync_sub_and_fetch), 6597 BUILTIN_ROW(__sync_and_and_fetch), 6598 BUILTIN_ROW(__sync_or_and_fetch), 6599 BUILTIN_ROW(__sync_xor_and_fetch), 6600 BUILTIN_ROW(__sync_nand_and_fetch), 6601 6602 BUILTIN_ROW(__sync_val_compare_and_swap), 6603 BUILTIN_ROW(__sync_bool_compare_and_swap), 6604 BUILTIN_ROW(__sync_lock_test_and_set), 6605 BUILTIN_ROW(__sync_lock_release), 6606 BUILTIN_ROW(__sync_swap) 6607 }; 6608 #undef BUILTIN_ROW 6609 6610 // Determine the index of the size. 6611 unsigned SizeIndex; 6612 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 6613 case 1: SizeIndex = 0; break; 6614 case 2: SizeIndex = 1; break; 6615 case 4: SizeIndex = 2; break; 6616 case 8: SizeIndex = 3; break; 6617 case 16: SizeIndex = 4; break; 6618 default: 6619 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 6620 << FirstArg->getType() << FirstArg->getSourceRange(); 6621 return ExprError(); 6622 } 6623 6624 // Each of these builtins has one pointer argument, followed by some number of 6625 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 6626 // that we ignore. Find out which row of BuiltinIndices to read from as well 6627 // as the number of fixed args. 6628 unsigned BuiltinID = FDecl->getBuiltinID(); 6629 unsigned BuiltinIndex, NumFixed = 1; 6630 bool WarnAboutSemanticsChange = false; 6631 switch (BuiltinID) { 6632 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 6633 case Builtin::BI__sync_fetch_and_add: 6634 case Builtin::BI__sync_fetch_and_add_1: 6635 case Builtin::BI__sync_fetch_and_add_2: 6636 case Builtin::BI__sync_fetch_and_add_4: 6637 case Builtin::BI__sync_fetch_and_add_8: 6638 case Builtin::BI__sync_fetch_and_add_16: 6639 BuiltinIndex = 0; 6640 break; 6641 6642 case Builtin::BI__sync_fetch_and_sub: 6643 case Builtin::BI__sync_fetch_and_sub_1: 6644 case Builtin::BI__sync_fetch_and_sub_2: 6645 case Builtin::BI__sync_fetch_and_sub_4: 6646 case Builtin::BI__sync_fetch_and_sub_8: 6647 case Builtin::BI__sync_fetch_and_sub_16: 6648 BuiltinIndex = 1; 6649 break; 6650 6651 case Builtin::BI__sync_fetch_and_or: 6652 case Builtin::BI__sync_fetch_and_or_1: 6653 case Builtin::BI__sync_fetch_and_or_2: 6654 case Builtin::BI__sync_fetch_and_or_4: 6655 case Builtin::BI__sync_fetch_and_or_8: 6656 case Builtin::BI__sync_fetch_and_or_16: 6657 BuiltinIndex = 2; 6658 break; 6659 6660 case Builtin::BI__sync_fetch_and_and: 6661 case Builtin::BI__sync_fetch_and_and_1: 6662 case Builtin::BI__sync_fetch_and_and_2: 6663 case Builtin::BI__sync_fetch_and_and_4: 6664 case Builtin::BI__sync_fetch_and_and_8: 6665 case Builtin::BI__sync_fetch_and_and_16: 6666 BuiltinIndex = 3; 6667 break; 6668 6669 case Builtin::BI__sync_fetch_and_xor: 6670 case Builtin::BI__sync_fetch_and_xor_1: 6671 case Builtin::BI__sync_fetch_and_xor_2: 6672 case Builtin::BI__sync_fetch_and_xor_4: 6673 case Builtin::BI__sync_fetch_and_xor_8: 6674 case Builtin::BI__sync_fetch_and_xor_16: 6675 BuiltinIndex = 4; 6676 break; 6677 6678 case Builtin::BI__sync_fetch_and_nand: 6679 case Builtin::BI__sync_fetch_and_nand_1: 6680 case Builtin::BI__sync_fetch_and_nand_2: 6681 case Builtin::BI__sync_fetch_and_nand_4: 6682 case Builtin::BI__sync_fetch_and_nand_8: 6683 case Builtin::BI__sync_fetch_and_nand_16: 6684 BuiltinIndex = 5; 6685 WarnAboutSemanticsChange = true; 6686 break; 6687 6688 case Builtin::BI__sync_add_and_fetch: 6689 case Builtin::BI__sync_add_and_fetch_1: 6690 case Builtin::BI__sync_add_and_fetch_2: 6691 case Builtin::BI__sync_add_and_fetch_4: 6692 case Builtin::BI__sync_add_and_fetch_8: 6693 case Builtin::BI__sync_add_and_fetch_16: 6694 BuiltinIndex = 6; 6695 break; 6696 6697 case Builtin::BI__sync_sub_and_fetch: 6698 case Builtin::BI__sync_sub_and_fetch_1: 6699 case Builtin::BI__sync_sub_and_fetch_2: 6700 case Builtin::BI__sync_sub_and_fetch_4: 6701 case Builtin::BI__sync_sub_and_fetch_8: 6702 case Builtin::BI__sync_sub_and_fetch_16: 6703 BuiltinIndex = 7; 6704 break; 6705 6706 case Builtin::BI__sync_and_and_fetch: 6707 case Builtin::BI__sync_and_and_fetch_1: 6708 case Builtin::BI__sync_and_and_fetch_2: 6709 case Builtin::BI__sync_and_and_fetch_4: 6710 case Builtin::BI__sync_and_and_fetch_8: 6711 case Builtin::BI__sync_and_and_fetch_16: 6712 BuiltinIndex = 8; 6713 break; 6714 6715 case Builtin::BI__sync_or_and_fetch: 6716 case Builtin::BI__sync_or_and_fetch_1: 6717 case Builtin::BI__sync_or_and_fetch_2: 6718 case Builtin::BI__sync_or_and_fetch_4: 6719 case Builtin::BI__sync_or_and_fetch_8: 6720 case Builtin::BI__sync_or_and_fetch_16: 6721 BuiltinIndex = 9; 6722 break; 6723 6724 case Builtin::BI__sync_xor_and_fetch: 6725 case Builtin::BI__sync_xor_and_fetch_1: 6726 case Builtin::BI__sync_xor_and_fetch_2: 6727 case Builtin::BI__sync_xor_and_fetch_4: 6728 case Builtin::BI__sync_xor_and_fetch_8: 6729 case Builtin::BI__sync_xor_and_fetch_16: 6730 BuiltinIndex = 10; 6731 break; 6732 6733 case Builtin::BI__sync_nand_and_fetch: 6734 case Builtin::BI__sync_nand_and_fetch_1: 6735 case Builtin::BI__sync_nand_and_fetch_2: 6736 case Builtin::BI__sync_nand_and_fetch_4: 6737 case Builtin::BI__sync_nand_and_fetch_8: 6738 case Builtin::BI__sync_nand_and_fetch_16: 6739 BuiltinIndex = 11; 6740 WarnAboutSemanticsChange = true; 6741 break; 6742 6743 case Builtin::BI__sync_val_compare_and_swap: 6744 case Builtin::BI__sync_val_compare_and_swap_1: 6745 case Builtin::BI__sync_val_compare_and_swap_2: 6746 case Builtin::BI__sync_val_compare_and_swap_4: 6747 case Builtin::BI__sync_val_compare_and_swap_8: 6748 case Builtin::BI__sync_val_compare_and_swap_16: 6749 BuiltinIndex = 12; 6750 NumFixed = 2; 6751 break; 6752 6753 case Builtin::BI__sync_bool_compare_and_swap: 6754 case Builtin::BI__sync_bool_compare_and_swap_1: 6755 case Builtin::BI__sync_bool_compare_and_swap_2: 6756 case Builtin::BI__sync_bool_compare_and_swap_4: 6757 case Builtin::BI__sync_bool_compare_and_swap_8: 6758 case Builtin::BI__sync_bool_compare_and_swap_16: 6759 BuiltinIndex = 13; 6760 NumFixed = 2; 6761 ResultType = Context.BoolTy; 6762 break; 6763 6764 case Builtin::BI__sync_lock_test_and_set: 6765 case Builtin::BI__sync_lock_test_and_set_1: 6766 case Builtin::BI__sync_lock_test_and_set_2: 6767 case Builtin::BI__sync_lock_test_and_set_4: 6768 case Builtin::BI__sync_lock_test_and_set_8: 6769 case Builtin::BI__sync_lock_test_and_set_16: 6770 BuiltinIndex = 14; 6771 break; 6772 6773 case Builtin::BI__sync_lock_release: 6774 case Builtin::BI__sync_lock_release_1: 6775 case Builtin::BI__sync_lock_release_2: 6776 case Builtin::BI__sync_lock_release_4: 6777 case Builtin::BI__sync_lock_release_8: 6778 case Builtin::BI__sync_lock_release_16: 6779 BuiltinIndex = 15; 6780 NumFixed = 0; 6781 ResultType = Context.VoidTy; 6782 break; 6783 6784 case Builtin::BI__sync_swap: 6785 case Builtin::BI__sync_swap_1: 6786 case Builtin::BI__sync_swap_2: 6787 case Builtin::BI__sync_swap_4: 6788 case Builtin::BI__sync_swap_8: 6789 case Builtin::BI__sync_swap_16: 6790 BuiltinIndex = 16; 6791 break; 6792 } 6793 6794 // Now that we know how many fixed arguments we expect, first check that we 6795 // have at least that many. 6796 if (TheCall->getNumArgs() < 1+NumFixed) { 6797 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6798 << 0 << 1 + NumFixed << TheCall->getNumArgs() 6799 << Callee->getSourceRange(); 6800 return ExprError(); 6801 } 6802 6803 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 6804 << Callee->getSourceRange(); 6805 6806 if (WarnAboutSemanticsChange) { 6807 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 6808 << Callee->getSourceRange(); 6809 } 6810 6811 // Get the decl for the concrete builtin from this, we can tell what the 6812 // concrete integer type we should convert to is. 6813 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 6814 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 6815 FunctionDecl *NewBuiltinDecl; 6816 if (NewBuiltinID == BuiltinID) 6817 NewBuiltinDecl = FDecl; 6818 else { 6819 // Perform builtin lookup to avoid redeclaring it. 6820 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 6821 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 6822 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 6823 assert(Res.getFoundDecl()); 6824 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 6825 if (!NewBuiltinDecl) 6826 return ExprError(); 6827 } 6828 6829 // The first argument --- the pointer --- has a fixed type; we 6830 // deduce the types of the rest of the arguments accordingly. Walk 6831 // the remaining arguments, converting them to the deduced value type. 6832 for (unsigned i = 0; i != NumFixed; ++i) { 6833 ExprResult Arg = TheCall->getArg(i+1); 6834 6835 // GCC does an implicit conversion to the pointer or integer ValType. This 6836 // can fail in some cases (1i -> int**), check for this error case now. 6837 // Initialize the argument. 6838 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6839 ValType, /*consume*/ false); 6840 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6841 if (Arg.isInvalid()) 6842 return ExprError(); 6843 6844 // Okay, we have something that *can* be converted to the right type. Check 6845 // to see if there is a potentially weird extension going on here. This can 6846 // happen when you do an atomic operation on something like an char* and 6847 // pass in 42. The 42 gets converted to char. This is even more strange 6848 // for things like 45.123 -> char, etc. 6849 // FIXME: Do this check. 6850 TheCall->setArg(i+1, Arg.get()); 6851 } 6852 6853 // Create a new DeclRefExpr to refer to the new decl. 6854 DeclRefExpr *NewDRE = DeclRefExpr::Create( 6855 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 6856 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 6857 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 6858 6859 // Set the callee in the CallExpr. 6860 // FIXME: This loses syntactic information. 6861 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 6862 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 6863 CK_BuiltinFnToFnPtr); 6864 TheCall->setCallee(PromotedCall.get()); 6865 6866 // Change the result type of the call to match the original value type. This 6867 // is arbitrary, but the codegen for these builtins ins design to handle it 6868 // gracefully. 6869 TheCall->setType(ResultType); 6870 6871 // Prohibit problematic uses of bit-precise integer types with atomic 6872 // builtins. The arguments would have already been converted to the first 6873 // argument's type, so only need to check the first argument. 6874 const auto *BitIntValType = ValType->getAs<BitIntType>(); 6875 if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) { 6876 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 6877 return ExprError(); 6878 } 6879 6880 return TheCallResult; 6881 } 6882 6883 /// SemaBuiltinNontemporalOverloaded - We have a call to 6884 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 6885 /// overloaded function based on the pointer type of its last argument. 6886 /// 6887 /// This function goes through and does final semantic checking for these 6888 /// builtins. 6889 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 6890 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 6891 DeclRefExpr *DRE = 6892 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6893 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6894 unsigned BuiltinID = FDecl->getBuiltinID(); 6895 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 6896 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 6897 "Unexpected nontemporal load/store builtin!"); 6898 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 6899 unsigned numArgs = isStore ? 2 : 1; 6900 6901 // Ensure that we have the proper number of arguments. 6902 if (checkArgCount(*this, TheCall, numArgs)) 6903 return ExprError(); 6904 6905 // Inspect the last argument of the nontemporal builtin. This should always 6906 // be a pointer type, from which we imply the type of the memory access. 6907 // Because it is a pointer type, we don't have to worry about any implicit 6908 // casts here. 6909 Expr *PointerArg = TheCall->getArg(numArgs - 1); 6910 ExprResult PointerArgResult = 6911 DefaultFunctionArrayLvalueConversion(PointerArg); 6912 6913 if (PointerArgResult.isInvalid()) 6914 return ExprError(); 6915 PointerArg = PointerArgResult.get(); 6916 TheCall->setArg(numArgs - 1, PointerArg); 6917 6918 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 6919 if (!pointerType) { 6920 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 6921 << PointerArg->getType() << PointerArg->getSourceRange(); 6922 return ExprError(); 6923 } 6924 6925 QualType ValType = pointerType->getPointeeType(); 6926 6927 // Strip any qualifiers off ValType. 6928 ValType = ValType.getUnqualifiedType(); 6929 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6930 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 6931 !ValType->isVectorType()) { 6932 Diag(DRE->getBeginLoc(), 6933 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 6934 << PointerArg->getType() << PointerArg->getSourceRange(); 6935 return ExprError(); 6936 } 6937 6938 if (!isStore) { 6939 TheCall->setType(ValType); 6940 return TheCallResult; 6941 } 6942 6943 ExprResult ValArg = TheCall->getArg(0); 6944 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6945 Context, ValType, /*consume*/ false); 6946 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 6947 if (ValArg.isInvalid()) 6948 return ExprError(); 6949 6950 TheCall->setArg(0, ValArg.get()); 6951 TheCall->setType(Context.VoidTy); 6952 return TheCallResult; 6953 } 6954 6955 /// CheckObjCString - Checks that the argument to the builtin 6956 /// CFString constructor is correct 6957 /// Note: It might also make sense to do the UTF-16 conversion here (would 6958 /// simplify the backend). 6959 bool Sema::CheckObjCString(Expr *Arg) { 6960 Arg = Arg->IgnoreParenCasts(); 6961 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 6962 6963 if (!Literal || !Literal->isAscii()) { 6964 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 6965 << Arg->getSourceRange(); 6966 return true; 6967 } 6968 6969 if (Literal->containsNonAsciiOrNull()) { 6970 StringRef String = Literal->getString(); 6971 unsigned NumBytes = String.size(); 6972 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 6973 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 6974 llvm::UTF16 *ToPtr = &ToBuf[0]; 6975 6976 llvm::ConversionResult Result = 6977 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 6978 ToPtr + NumBytes, llvm::strictConversion); 6979 // Check for conversion failure. 6980 if (Result != llvm::conversionOK) 6981 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 6982 << Arg->getSourceRange(); 6983 } 6984 return false; 6985 } 6986 6987 /// CheckObjCString - Checks that the format string argument to the os_log() 6988 /// and os_trace() functions is correct, and converts it to const char *. 6989 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 6990 Arg = Arg->IgnoreParenCasts(); 6991 auto *Literal = dyn_cast<StringLiteral>(Arg); 6992 if (!Literal) { 6993 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 6994 Literal = ObjcLiteral->getString(); 6995 } 6996 } 6997 6998 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 6999 return ExprError( 7000 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 7001 << Arg->getSourceRange()); 7002 } 7003 7004 ExprResult Result(Literal); 7005 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 7006 InitializedEntity Entity = 7007 InitializedEntity::InitializeParameter(Context, ResultTy, false); 7008 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 7009 return Result; 7010 } 7011 7012 /// Check that the user is calling the appropriate va_start builtin for the 7013 /// target and calling convention. 7014 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 7015 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 7016 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 7017 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 7018 TT.getArch() == llvm::Triple::aarch64_32); 7019 bool IsWindows = TT.isOSWindows(); 7020 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 7021 if (IsX64 || IsAArch64) { 7022 CallingConv CC = CC_C; 7023 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 7024 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 7025 if (IsMSVAStart) { 7026 // Don't allow this in System V ABI functions. 7027 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 7028 return S.Diag(Fn->getBeginLoc(), 7029 diag::err_ms_va_start_used_in_sysv_function); 7030 } else { 7031 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 7032 // On x64 Windows, don't allow this in System V ABI functions. 7033 // (Yes, that means there's no corresponding way to support variadic 7034 // System V ABI functions on Windows.) 7035 if ((IsWindows && CC == CC_X86_64SysV) || 7036 (!IsWindows && CC == CC_Win64)) 7037 return S.Diag(Fn->getBeginLoc(), 7038 diag::err_va_start_used_in_wrong_abi_function) 7039 << !IsWindows; 7040 } 7041 return false; 7042 } 7043 7044 if (IsMSVAStart) 7045 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 7046 return false; 7047 } 7048 7049 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 7050 ParmVarDecl **LastParam = nullptr) { 7051 // Determine whether the current function, block, or obj-c method is variadic 7052 // and get its parameter list. 7053 bool IsVariadic = false; 7054 ArrayRef<ParmVarDecl *> Params; 7055 DeclContext *Caller = S.CurContext; 7056 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 7057 IsVariadic = Block->isVariadic(); 7058 Params = Block->parameters(); 7059 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 7060 IsVariadic = FD->isVariadic(); 7061 Params = FD->parameters(); 7062 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 7063 IsVariadic = MD->isVariadic(); 7064 // FIXME: This isn't correct for methods (results in bogus warning). 7065 Params = MD->parameters(); 7066 } else if (isa<CapturedDecl>(Caller)) { 7067 // We don't support va_start in a CapturedDecl. 7068 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 7069 return true; 7070 } else { 7071 // This must be some other declcontext that parses exprs. 7072 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 7073 return true; 7074 } 7075 7076 if (!IsVariadic) { 7077 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 7078 return true; 7079 } 7080 7081 if (LastParam) 7082 *LastParam = Params.empty() ? nullptr : Params.back(); 7083 7084 return false; 7085 } 7086 7087 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 7088 /// for validity. Emit an error and return true on failure; return false 7089 /// on success. 7090 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 7091 Expr *Fn = TheCall->getCallee(); 7092 7093 if (checkVAStartABI(*this, BuiltinID, Fn)) 7094 return true; 7095 7096 if (checkArgCount(*this, TheCall, 2)) 7097 return true; 7098 7099 // Type-check the first argument normally. 7100 if (checkBuiltinArgument(*this, TheCall, 0)) 7101 return true; 7102 7103 // Check that the current function is variadic, and get its last parameter. 7104 ParmVarDecl *LastParam; 7105 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 7106 return true; 7107 7108 // Verify that the second argument to the builtin is the last argument of the 7109 // current function or method. 7110 bool SecondArgIsLastNamedArgument = false; 7111 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 7112 7113 // These are valid if SecondArgIsLastNamedArgument is false after the next 7114 // block. 7115 QualType Type; 7116 SourceLocation ParamLoc; 7117 bool IsCRegister = false; 7118 7119 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 7120 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 7121 SecondArgIsLastNamedArgument = PV == LastParam; 7122 7123 Type = PV->getType(); 7124 ParamLoc = PV->getLocation(); 7125 IsCRegister = 7126 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 7127 } 7128 } 7129 7130 if (!SecondArgIsLastNamedArgument) 7131 Diag(TheCall->getArg(1)->getBeginLoc(), 7132 diag::warn_second_arg_of_va_start_not_last_named_param); 7133 else if (IsCRegister || Type->isReferenceType() || 7134 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 7135 // Promotable integers are UB, but enumerations need a bit of 7136 // extra checking to see what their promotable type actually is. 7137 if (!Type->isPromotableIntegerType()) 7138 return false; 7139 if (!Type->isEnumeralType()) 7140 return true; 7141 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 7142 return !(ED && 7143 Context.typesAreCompatible(ED->getPromotionType(), Type)); 7144 }()) { 7145 unsigned Reason = 0; 7146 if (Type->isReferenceType()) Reason = 1; 7147 else if (IsCRegister) Reason = 2; 7148 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 7149 Diag(ParamLoc, diag::note_parameter_type) << Type; 7150 } 7151 7152 TheCall->setType(Context.VoidTy); 7153 return false; 7154 } 7155 7156 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 7157 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool { 7158 const LangOptions &LO = getLangOpts(); 7159 7160 if (LO.CPlusPlus) 7161 return Arg->getType() 7162 .getCanonicalType() 7163 .getTypePtr() 7164 ->getPointeeType() 7165 .withoutLocalFastQualifiers() == Context.CharTy; 7166 7167 // In C, allow aliasing through `char *`, this is required for AArch64 at 7168 // least. 7169 return true; 7170 }; 7171 7172 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 7173 // const char *named_addr); 7174 7175 Expr *Func = Call->getCallee(); 7176 7177 if (Call->getNumArgs() < 3) 7178 return Diag(Call->getEndLoc(), 7179 diag::err_typecheck_call_too_few_args_at_least) 7180 << 0 /*function call*/ << 3 << Call->getNumArgs(); 7181 7182 // Type-check the first argument normally. 7183 if (checkBuiltinArgument(*this, Call, 0)) 7184 return true; 7185 7186 // Check that the current function is variadic. 7187 if (checkVAStartIsInVariadicFunction(*this, Func)) 7188 return true; 7189 7190 // __va_start on Windows does not validate the parameter qualifiers 7191 7192 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 7193 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 7194 7195 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 7196 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 7197 7198 const QualType &ConstCharPtrTy = 7199 Context.getPointerType(Context.CharTy.withConst()); 7200 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1)) 7201 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 7202 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 7203 << 0 /* qualifier difference */ 7204 << 3 /* parameter mismatch */ 7205 << 2 << Arg1->getType() << ConstCharPtrTy; 7206 7207 const QualType SizeTy = Context.getSizeType(); 7208 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 7209 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 7210 << Arg2->getType() << SizeTy << 1 /* different class */ 7211 << 0 /* qualifier difference */ 7212 << 3 /* parameter mismatch */ 7213 << 3 << Arg2->getType() << SizeTy; 7214 7215 return false; 7216 } 7217 7218 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 7219 /// friends. This is declared to take (...), so we have to check everything. 7220 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 7221 if (checkArgCount(*this, TheCall, 2)) 7222 return true; 7223 7224 ExprResult OrigArg0 = TheCall->getArg(0); 7225 ExprResult OrigArg1 = TheCall->getArg(1); 7226 7227 // Do standard promotions between the two arguments, returning their common 7228 // type. 7229 QualType Res = UsualArithmeticConversions( 7230 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 7231 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 7232 return true; 7233 7234 // Make sure any conversions are pushed back into the call; this is 7235 // type safe since unordered compare builtins are declared as "_Bool 7236 // foo(...)". 7237 TheCall->setArg(0, OrigArg0.get()); 7238 TheCall->setArg(1, OrigArg1.get()); 7239 7240 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 7241 return false; 7242 7243 // If the common type isn't a real floating type, then the arguments were 7244 // invalid for this operation. 7245 if (Res.isNull() || !Res->isRealFloatingType()) 7246 return Diag(OrigArg0.get()->getBeginLoc(), 7247 diag::err_typecheck_call_invalid_ordered_compare) 7248 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 7249 << SourceRange(OrigArg0.get()->getBeginLoc(), 7250 OrigArg1.get()->getEndLoc()); 7251 7252 return false; 7253 } 7254 7255 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 7256 /// __builtin_isnan and friends. This is declared to take (...), so we have 7257 /// to check everything. We expect the last argument to be a floating point 7258 /// value. 7259 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 7260 if (checkArgCount(*this, TheCall, NumArgs)) 7261 return true; 7262 7263 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 7264 // on all preceding parameters just being int. Try all of those. 7265 for (unsigned i = 0; i < NumArgs - 1; ++i) { 7266 Expr *Arg = TheCall->getArg(i); 7267 7268 if (Arg->isTypeDependent()) 7269 return false; 7270 7271 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 7272 7273 if (Res.isInvalid()) 7274 return true; 7275 TheCall->setArg(i, Res.get()); 7276 } 7277 7278 Expr *OrigArg = TheCall->getArg(NumArgs-1); 7279 7280 if (OrigArg->isTypeDependent()) 7281 return false; 7282 7283 // Usual Unary Conversions will convert half to float, which we want for 7284 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 7285 // type how it is, but do normal L->Rvalue conversions. 7286 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 7287 OrigArg = UsualUnaryConversions(OrigArg).get(); 7288 else 7289 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 7290 TheCall->setArg(NumArgs - 1, OrigArg); 7291 7292 // This operation requires a non-_Complex floating-point number. 7293 if (!OrigArg->getType()->isRealFloatingType()) 7294 return Diag(OrigArg->getBeginLoc(), 7295 diag::err_typecheck_call_invalid_unary_fp) 7296 << OrigArg->getType() << OrigArg->getSourceRange(); 7297 7298 return false; 7299 } 7300 7301 /// Perform semantic analysis for a call to __builtin_complex. 7302 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 7303 if (checkArgCount(*this, TheCall, 2)) 7304 return true; 7305 7306 bool Dependent = false; 7307 for (unsigned I = 0; I != 2; ++I) { 7308 Expr *Arg = TheCall->getArg(I); 7309 QualType T = Arg->getType(); 7310 if (T->isDependentType()) { 7311 Dependent = true; 7312 continue; 7313 } 7314 7315 // Despite supporting _Complex int, GCC requires a real floating point type 7316 // for the operands of __builtin_complex. 7317 if (!T->isRealFloatingType()) { 7318 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 7319 << Arg->getType() << Arg->getSourceRange(); 7320 } 7321 7322 ExprResult Converted = DefaultLvalueConversion(Arg); 7323 if (Converted.isInvalid()) 7324 return true; 7325 TheCall->setArg(I, Converted.get()); 7326 } 7327 7328 if (Dependent) { 7329 TheCall->setType(Context.DependentTy); 7330 return false; 7331 } 7332 7333 Expr *Real = TheCall->getArg(0); 7334 Expr *Imag = TheCall->getArg(1); 7335 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 7336 return Diag(Real->getBeginLoc(), 7337 diag::err_typecheck_call_different_arg_types) 7338 << Real->getType() << Imag->getType() 7339 << Real->getSourceRange() << Imag->getSourceRange(); 7340 } 7341 7342 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 7343 // don't allow this builtin to form those types either. 7344 // FIXME: Should we allow these types? 7345 if (Real->getType()->isFloat16Type()) 7346 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 7347 << "_Float16"; 7348 if (Real->getType()->isHalfType()) 7349 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 7350 << "half"; 7351 7352 TheCall->setType(Context.getComplexType(Real->getType())); 7353 return false; 7354 } 7355 7356 // Customized Sema Checking for VSX builtins that have the following signature: 7357 // vector [...] builtinName(vector [...], vector [...], const int); 7358 // Which takes the same type of vectors (any legal vector type) for the first 7359 // two arguments and takes compile time constant for the third argument. 7360 // Example builtins are : 7361 // vector double vec_xxpermdi(vector double, vector double, int); 7362 // vector short vec_xxsldwi(vector short, vector short, int); 7363 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 7364 unsigned ExpectedNumArgs = 3; 7365 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 7366 return true; 7367 7368 // Check the third argument is a compile time constant 7369 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 7370 return Diag(TheCall->getBeginLoc(), 7371 diag::err_vsx_builtin_nonconstant_argument) 7372 << 3 /* argument index */ << TheCall->getDirectCallee() 7373 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 7374 TheCall->getArg(2)->getEndLoc()); 7375 7376 QualType Arg1Ty = TheCall->getArg(0)->getType(); 7377 QualType Arg2Ty = TheCall->getArg(1)->getType(); 7378 7379 // Check the type of argument 1 and argument 2 are vectors. 7380 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 7381 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 7382 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 7383 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 7384 << TheCall->getDirectCallee() 7385 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7386 TheCall->getArg(1)->getEndLoc()); 7387 } 7388 7389 // Check the first two arguments are the same type. 7390 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 7391 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 7392 << TheCall->getDirectCallee() 7393 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7394 TheCall->getArg(1)->getEndLoc()); 7395 } 7396 7397 // When default clang type checking is turned off and the customized type 7398 // checking is used, the returning type of the function must be explicitly 7399 // set. Otherwise it is _Bool by default. 7400 TheCall->setType(Arg1Ty); 7401 7402 return false; 7403 } 7404 7405 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 7406 // This is declared to take (...), so we have to check everything. 7407 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 7408 if (TheCall->getNumArgs() < 2) 7409 return ExprError(Diag(TheCall->getEndLoc(), 7410 diag::err_typecheck_call_too_few_args_at_least) 7411 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 7412 << TheCall->getSourceRange()); 7413 7414 // Determine which of the following types of shufflevector we're checking: 7415 // 1) unary, vector mask: (lhs, mask) 7416 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 7417 QualType resType = TheCall->getArg(0)->getType(); 7418 unsigned numElements = 0; 7419 7420 if (!TheCall->getArg(0)->isTypeDependent() && 7421 !TheCall->getArg(1)->isTypeDependent()) { 7422 QualType LHSType = TheCall->getArg(0)->getType(); 7423 QualType RHSType = TheCall->getArg(1)->getType(); 7424 7425 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 7426 return ExprError( 7427 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 7428 << TheCall->getDirectCallee() 7429 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7430 TheCall->getArg(1)->getEndLoc())); 7431 7432 numElements = LHSType->castAs<VectorType>()->getNumElements(); 7433 unsigned numResElements = TheCall->getNumArgs() - 2; 7434 7435 // Check to see if we have a call with 2 vector arguments, the unary shuffle 7436 // with mask. If so, verify that RHS is an integer vector type with the 7437 // same number of elts as lhs. 7438 if (TheCall->getNumArgs() == 2) { 7439 if (!RHSType->hasIntegerRepresentation() || 7440 RHSType->castAs<VectorType>()->getNumElements() != numElements) 7441 return ExprError(Diag(TheCall->getBeginLoc(), 7442 diag::err_vec_builtin_incompatible_vector) 7443 << TheCall->getDirectCallee() 7444 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 7445 TheCall->getArg(1)->getEndLoc())); 7446 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 7447 return ExprError(Diag(TheCall->getBeginLoc(), 7448 diag::err_vec_builtin_incompatible_vector) 7449 << TheCall->getDirectCallee() 7450 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7451 TheCall->getArg(1)->getEndLoc())); 7452 } else if (numElements != numResElements) { 7453 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 7454 resType = Context.getVectorType(eltType, numResElements, 7455 VectorType::GenericVector); 7456 } 7457 } 7458 7459 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 7460 if (TheCall->getArg(i)->isTypeDependent() || 7461 TheCall->getArg(i)->isValueDependent()) 7462 continue; 7463 7464 Optional<llvm::APSInt> Result; 7465 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 7466 return ExprError(Diag(TheCall->getBeginLoc(), 7467 diag::err_shufflevector_nonconstant_argument) 7468 << TheCall->getArg(i)->getSourceRange()); 7469 7470 // Allow -1 which will be translated to undef in the IR. 7471 if (Result->isSigned() && Result->isAllOnes()) 7472 continue; 7473 7474 if (Result->getActiveBits() > 64 || 7475 Result->getZExtValue() >= numElements * 2) 7476 return ExprError(Diag(TheCall->getBeginLoc(), 7477 diag::err_shufflevector_argument_too_large) 7478 << TheCall->getArg(i)->getSourceRange()); 7479 } 7480 7481 SmallVector<Expr*, 32> exprs; 7482 7483 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 7484 exprs.push_back(TheCall->getArg(i)); 7485 TheCall->setArg(i, nullptr); 7486 } 7487 7488 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 7489 TheCall->getCallee()->getBeginLoc(), 7490 TheCall->getRParenLoc()); 7491 } 7492 7493 /// SemaConvertVectorExpr - Handle __builtin_convertvector 7494 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 7495 SourceLocation BuiltinLoc, 7496 SourceLocation RParenLoc) { 7497 ExprValueKind VK = VK_PRValue; 7498 ExprObjectKind OK = OK_Ordinary; 7499 QualType DstTy = TInfo->getType(); 7500 QualType SrcTy = E->getType(); 7501 7502 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 7503 return ExprError(Diag(BuiltinLoc, 7504 diag::err_convertvector_non_vector) 7505 << E->getSourceRange()); 7506 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 7507 return ExprError(Diag(BuiltinLoc, 7508 diag::err_convertvector_non_vector_type)); 7509 7510 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 7511 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 7512 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 7513 if (SrcElts != DstElts) 7514 return ExprError(Diag(BuiltinLoc, 7515 diag::err_convertvector_incompatible_vector) 7516 << E->getSourceRange()); 7517 } 7518 7519 return new (Context) 7520 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 7521 } 7522 7523 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 7524 // This is declared to take (const void*, ...) and can take two 7525 // optional constant int args. 7526 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 7527 unsigned NumArgs = TheCall->getNumArgs(); 7528 7529 if (NumArgs > 3) 7530 return Diag(TheCall->getEndLoc(), 7531 diag::err_typecheck_call_too_many_args_at_most) 7532 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7533 7534 // Argument 0 is checked for us and the remaining arguments must be 7535 // constant integers. 7536 for (unsigned i = 1; i != NumArgs; ++i) 7537 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 7538 return true; 7539 7540 return false; 7541 } 7542 7543 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence. 7544 bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) { 7545 if (!Context.getTargetInfo().checkArithmeticFenceSupported()) 7546 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 7547 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7548 if (checkArgCount(*this, TheCall, 1)) 7549 return true; 7550 Expr *Arg = TheCall->getArg(0); 7551 if (Arg->isInstantiationDependent()) 7552 return false; 7553 7554 QualType ArgTy = Arg->getType(); 7555 if (!ArgTy->hasFloatingRepresentation()) 7556 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector) 7557 << ArgTy; 7558 if (Arg->isLValue()) { 7559 ExprResult FirstArg = DefaultLvalueConversion(Arg); 7560 TheCall->setArg(0, FirstArg.get()); 7561 } 7562 TheCall->setType(TheCall->getArg(0)->getType()); 7563 return false; 7564 } 7565 7566 /// SemaBuiltinAssume - Handle __assume (MS Extension). 7567 // __assume does not evaluate its arguments, and should warn if its argument 7568 // has side effects. 7569 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 7570 Expr *Arg = TheCall->getArg(0); 7571 if (Arg->isInstantiationDependent()) return false; 7572 7573 if (Arg->HasSideEffects(Context)) 7574 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 7575 << Arg->getSourceRange() 7576 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 7577 7578 return false; 7579 } 7580 7581 /// Handle __builtin_alloca_with_align. This is declared 7582 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 7583 /// than 8. 7584 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 7585 // The alignment must be a constant integer. 7586 Expr *Arg = TheCall->getArg(1); 7587 7588 // We can't check the value of a dependent argument. 7589 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7590 if (const auto *UE = 7591 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 7592 if (UE->getKind() == UETT_AlignOf || 7593 UE->getKind() == UETT_PreferredAlignOf) 7594 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 7595 << Arg->getSourceRange(); 7596 7597 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 7598 7599 if (!Result.isPowerOf2()) 7600 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7601 << Arg->getSourceRange(); 7602 7603 if (Result < Context.getCharWidth()) 7604 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 7605 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 7606 7607 if (Result > std::numeric_limits<int32_t>::max()) 7608 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 7609 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 7610 } 7611 7612 return false; 7613 } 7614 7615 /// Handle __builtin_assume_aligned. This is declared 7616 /// as (const void*, size_t, ...) and can take one optional constant int arg. 7617 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 7618 unsigned NumArgs = TheCall->getNumArgs(); 7619 7620 if (NumArgs > 3) 7621 return Diag(TheCall->getEndLoc(), 7622 diag::err_typecheck_call_too_many_args_at_most) 7623 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7624 7625 // The alignment must be a constant integer. 7626 Expr *Arg = TheCall->getArg(1); 7627 7628 // We can't check the value of a dependent argument. 7629 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7630 llvm::APSInt Result; 7631 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7632 return true; 7633 7634 if (!Result.isPowerOf2()) 7635 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7636 << Arg->getSourceRange(); 7637 7638 if (Result > Sema::MaximumAlignment) 7639 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 7640 << Arg->getSourceRange() << Sema::MaximumAlignment; 7641 } 7642 7643 if (NumArgs > 2) { 7644 ExprResult Arg(TheCall->getArg(2)); 7645 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 7646 Context.getSizeType(), false); 7647 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7648 if (Arg.isInvalid()) return true; 7649 TheCall->setArg(2, Arg.get()); 7650 } 7651 7652 return false; 7653 } 7654 7655 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 7656 unsigned BuiltinID = 7657 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 7658 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 7659 7660 unsigned NumArgs = TheCall->getNumArgs(); 7661 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 7662 if (NumArgs < NumRequiredArgs) { 7663 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 7664 << 0 /* function call */ << NumRequiredArgs << NumArgs 7665 << TheCall->getSourceRange(); 7666 } 7667 if (NumArgs >= NumRequiredArgs + 0x100) { 7668 return Diag(TheCall->getEndLoc(), 7669 diag::err_typecheck_call_too_many_args_at_most) 7670 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 7671 << TheCall->getSourceRange(); 7672 } 7673 unsigned i = 0; 7674 7675 // For formatting call, check buffer arg. 7676 if (!IsSizeCall) { 7677 ExprResult Arg(TheCall->getArg(i)); 7678 InitializedEntity Entity = InitializedEntity::InitializeParameter( 7679 Context, Context.VoidPtrTy, false); 7680 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7681 if (Arg.isInvalid()) 7682 return true; 7683 TheCall->setArg(i, Arg.get()); 7684 i++; 7685 } 7686 7687 // Check string literal arg. 7688 unsigned FormatIdx = i; 7689 { 7690 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 7691 if (Arg.isInvalid()) 7692 return true; 7693 TheCall->setArg(i, Arg.get()); 7694 i++; 7695 } 7696 7697 // Make sure variadic args are scalar. 7698 unsigned FirstDataArg = i; 7699 while (i < NumArgs) { 7700 ExprResult Arg = DefaultVariadicArgumentPromotion( 7701 TheCall->getArg(i), VariadicFunction, nullptr); 7702 if (Arg.isInvalid()) 7703 return true; 7704 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 7705 if (ArgSize.getQuantity() >= 0x100) { 7706 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 7707 << i << (int)ArgSize.getQuantity() << 0xff 7708 << TheCall->getSourceRange(); 7709 } 7710 TheCall->setArg(i, Arg.get()); 7711 i++; 7712 } 7713 7714 // Check formatting specifiers. NOTE: We're only doing this for the non-size 7715 // call to avoid duplicate diagnostics. 7716 if (!IsSizeCall) { 7717 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 7718 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 7719 bool Success = CheckFormatArguments( 7720 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 7721 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 7722 CheckedVarArgs); 7723 if (!Success) 7724 return true; 7725 } 7726 7727 if (IsSizeCall) { 7728 TheCall->setType(Context.getSizeType()); 7729 } else { 7730 TheCall->setType(Context.VoidPtrTy); 7731 } 7732 return false; 7733 } 7734 7735 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 7736 /// TheCall is a constant expression. 7737 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 7738 llvm::APSInt &Result) { 7739 Expr *Arg = TheCall->getArg(ArgNum); 7740 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7741 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7742 7743 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 7744 7745 Optional<llvm::APSInt> R; 7746 if (!(R = Arg->getIntegerConstantExpr(Context))) 7747 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 7748 << FDecl->getDeclName() << Arg->getSourceRange(); 7749 Result = *R; 7750 return false; 7751 } 7752 7753 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 7754 /// TheCall is a constant expression in the range [Low, High]. 7755 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 7756 int Low, int High, bool RangeIsError) { 7757 if (isConstantEvaluated()) 7758 return false; 7759 llvm::APSInt Result; 7760 7761 // We can't check the value of a dependent argument. 7762 Expr *Arg = TheCall->getArg(ArgNum); 7763 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7764 return false; 7765 7766 // Check constant-ness first. 7767 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7768 return true; 7769 7770 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 7771 if (RangeIsError) 7772 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 7773 << toString(Result, 10) << Low << High << Arg->getSourceRange(); 7774 else 7775 // Defer the warning until we know if the code will be emitted so that 7776 // dead code can ignore this. 7777 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 7778 PDiag(diag::warn_argument_invalid_range) 7779 << toString(Result, 10) << Low << High 7780 << Arg->getSourceRange()); 7781 } 7782 7783 return false; 7784 } 7785 7786 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 7787 /// TheCall is a constant expression is a multiple of Num.. 7788 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 7789 unsigned Num) { 7790 llvm::APSInt Result; 7791 7792 // We can't check the value of a dependent argument. 7793 Expr *Arg = TheCall->getArg(ArgNum); 7794 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7795 return false; 7796 7797 // Check constant-ness first. 7798 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7799 return true; 7800 7801 if (Result.getSExtValue() % Num != 0) 7802 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 7803 << Num << Arg->getSourceRange(); 7804 7805 return false; 7806 } 7807 7808 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 7809 /// constant expression representing a power of 2. 7810 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 7811 llvm::APSInt Result; 7812 7813 // We can't check the value of a dependent argument. 7814 Expr *Arg = TheCall->getArg(ArgNum); 7815 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7816 return false; 7817 7818 // Check constant-ness first. 7819 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7820 return true; 7821 7822 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 7823 // and only if x is a power of 2. 7824 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 7825 return false; 7826 7827 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 7828 << Arg->getSourceRange(); 7829 } 7830 7831 static bool IsShiftedByte(llvm::APSInt Value) { 7832 if (Value.isNegative()) 7833 return false; 7834 7835 // Check if it's a shifted byte, by shifting it down 7836 while (true) { 7837 // If the value fits in the bottom byte, the check passes. 7838 if (Value < 0x100) 7839 return true; 7840 7841 // Otherwise, if the value has _any_ bits in the bottom byte, the check 7842 // fails. 7843 if ((Value & 0xFF) != 0) 7844 return false; 7845 7846 // If the bottom 8 bits are all 0, but something above that is nonzero, 7847 // then shifting the value right by 8 bits won't affect whether it's a 7848 // shifted byte or not. So do that, and go round again. 7849 Value >>= 8; 7850 } 7851 } 7852 7853 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 7854 /// a constant expression representing an arbitrary byte value shifted left by 7855 /// a multiple of 8 bits. 7856 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 7857 unsigned ArgBits) { 7858 llvm::APSInt Result; 7859 7860 // We can't check the value of a dependent argument. 7861 Expr *Arg = TheCall->getArg(ArgNum); 7862 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7863 return false; 7864 7865 // Check constant-ness first. 7866 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7867 return true; 7868 7869 // Truncate to the given size. 7870 Result = Result.getLoBits(ArgBits); 7871 Result.setIsUnsigned(true); 7872 7873 if (IsShiftedByte(Result)) 7874 return false; 7875 7876 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 7877 << Arg->getSourceRange(); 7878 } 7879 7880 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 7881 /// TheCall is a constant expression representing either a shifted byte value, 7882 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 7883 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 7884 /// Arm MVE intrinsics. 7885 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 7886 int ArgNum, 7887 unsigned ArgBits) { 7888 llvm::APSInt Result; 7889 7890 // We can't check the value of a dependent argument. 7891 Expr *Arg = TheCall->getArg(ArgNum); 7892 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7893 return false; 7894 7895 // Check constant-ness first. 7896 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7897 return true; 7898 7899 // Truncate to the given size. 7900 Result = Result.getLoBits(ArgBits); 7901 Result.setIsUnsigned(true); 7902 7903 // Check to see if it's in either of the required forms. 7904 if (IsShiftedByte(Result) || 7905 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 7906 return false; 7907 7908 return Diag(TheCall->getBeginLoc(), 7909 diag::err_argument_not_shifted_byte_or_xxff) 7910 << Arg->getSourceRange(); 7911 } 7912 7913 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 7914 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 7915 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 7916 if (checkArgCount(*this, TheCall, 2)) 7917 return true; 7918 Expr *Arg0 = TheCall->getArg(0); 7919 Expr *Arg1 = TheCall->getArg(1); 7920 7921 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7922 if (FirstArg.isInvalid()) 7923 return true; 7924 QualType FirstArgType = FirstArg.get()->getType(); 7925 if (!FirstArgType->isAnyPointerType()) 7926 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7927 << "first" << FirstArgType << Arg0->getSourceRange(); 7928 TheCall->setArg(0, FirstArg.get()); 7929 7930 ExprResult SecArg = DefaultLvalueConversion(Arg1); 7931 if (SecArg.isInvalid()) 7932 return true; 7933 QualType SecArgType = SecArg.get()->getType(); 7934 if (!SecArgType->isIntegerType()) 7935 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7936 << "second" << SecArgType << Arg1->getSourceRange(); 7937 7938 // Derive the return type from the pointer argument. 7939 TheCall->setType(FirstArgType); 7940 return false; 7941 } 7942 7943 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 7944 if (checkArgCount(*this, TheCall, 2)) 7945 return true; 7946 7947 Expr *Arg0 = TheCall->getArg(0); 7948 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7949 if (FirstArg.isInvalid()) 7950 return true; 7951 QualType FirstArgType = FirstArg.get()->getType(); 7952 if (!FirstArgType->isAnyPointerType()) 7953 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7954 << "first" << FirstArgType << Arg0->getSourceRange(); 7955 TheCall->setArg(0, FirstArg.get()); 7956 7957 // Derive the return type from the pointer argument. 7958 TheCall->setType(FirstArgType); 7959 7960 // Second arg must be an constant in range [0,15] 7961 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7962 } 7963 7964 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 7965 if (checkArgCount(*this, TheCall, 2)) 7966 return true; 7967 Expr *Arg0 = TheCall->getArg(0); 7968 Expr *Arg1 = TheCall->getArg(1); 7969 7970 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7971 if (FirstArg.isInvalid()) 7972 return true; 7973 QualType FirstArgType = FirstArg.get()->getType(); 7974 if (!FirstArgType->isAnyPointerType()) 7975 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7976 << "first" << FirstArgType << Arg0->getSourceRange(); 7977 7978 QualType SecArgType = Arg1->getType(); 7979 if (!SecArgType->isIntegerType()) 7980 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7981 << "second" << SecArgType << Arg1->getSourceRange(); 7982 TheCall->setType(Context.IntTy); 7983 return false; 7984 } 7985 7986 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 7987 BuiltinID == AArch64::BI__builtin_arm_stg) { 7988 if (checkArgCount(*this, TheCall, 1)) 7989 return true; 7990 Expr *Arg0 = TheCall->getArg(0); 7991 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7992 if (FirstArg.isInvalid()) 7993 return true; 7994 7995 QualType FirstArgType = FirstArg.get()->getType(); 7996 if (!FirstArgType->isAnyPointerType()) 7997 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7998 << "first" << FirstArgType << Arg0->getSourceRange(); 7999 TheCall->setArg(0, FirstArg.get()); 8000 8001 // Derive the return type from the pointer argument. 8002 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 8003 TheCall->setType(FirstArgType); 8004 return false; 8005 } 8006 8007 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 8008 Expr *ArgA = TheCall->getArg(0); 8009 Expr *ArgB = TheCall->getArg(1); 8010 8011 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 8012 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 8013 8014 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 8015 return true; 8016 8017 QualType ArgTypeA = ArgExprA.get()->getType(); 8018 QualType ArgTypeB = ArgExprB.get()->getType(); 8019 8020 auto isNull = [&] (Expr *E) -> bool { 8021 return E->isNullPointerConstant( 8022 Context, Expr::NPC_ValueDependentIsNotNull); }; 8023 8024 // argument should be either a pointer or null 8025 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 8026 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 8027 << "first" << ArgTypeA << ArgA->getSourceRange(); 8028 8029 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 8030 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 8031 << "second" << ArgTypeB << ArgB->getSourceRange(); 8032 8033 // Ensure Pointee types are compatible 8034 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 8035 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 8036 QualType pointeeA = ArgTypeA->getPointeeType(); 8037 QualType pointeeB = ArgTypeB->getPointeeType(); 8038 if (!Context.typesAreCompatible( 8039 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 8040 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 8041 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 8042 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 8043 << ArgB->getSourceRange(); 8044 } 8045 } 8046 8047 // at least one argument should be pointer type 8048 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 8049 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 8050 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 8051 8052 if (isNull(ArgA)) // adopt type of the other pointer 8053 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 8054 8055 if (isNull(ArgB)) 8056 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 8057 8058 TheCall->setArg(0, ArgExprA.get()); 8059 TheCall->setArg(1, ArgExprB.get()); 8060 TheCall->setType(Context.LongLongTy); 8061 return false; 8062 } 8063 assert(false && "Unhandled ARM MTE intrinsic"); 8064 return true; 8065 } 8066 8067 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 8068 /// TheCall is an ARM/AArch64 special register string literal. 8069 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 8070 int ArgNum, unsigned ExpectedFieldNum, 8071 bool AllowName) { 8072 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 8073 BuiltinID == ARM::BI__builtin_arm_wsr64 || 8074 BuiltinID == ARM::BI__builtin_arm_rsr || 8075 BuiltinID == ARM::BI__builtin_arm_rsrp || 8076 BuiltinID == ARM::BI__builtin_arm_wsr || 8077 BuiltinID == ARM::BI__builtin_arm_wsrp; 8078 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 8079 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 8080 BuiltinID == AArch64::BI__builtin_arm_rsr || 8081 BuiltinID == AArch64::BI__builtin_arm_rsrp || 8082 BuiltinID == AArch64::BI__builtin_arm_wsr || 8083 BuiltinID == AArch64::BI__builtin_arm_wsrp; 8084 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 8085 8086 // We can't check the value of a dependent argument. 8087 Expr *Arg = TheCall->getArg(ArgNum); 8088 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8089 return false; 8090 8091 // Check if the argument is a string literal. 8092 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 8093 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 8094 << Arg->getSourceRange(); 8095 8096 // Check the type of special register given. 8097 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 8098 SmallVector<StringRef, 6> Fields; 8099 Reg.split(Fields, ":"); 8100 8101 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 8102 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 8103 << Arg->getSourceRange(); 8104 8105 // If the string is the name of a register then we cannot check that it is 8106 // valid here but if the string is of one the forms described in ACLE then we 8107 // can check that the supplied fields are integers and within the valid 8108 // ranges. 8109 if (Fields.size() > 1) { 8110 bool FiveFields = Fields.size() == 5; 8111 8112 bool ValidString = true; 8113 if (IsARMBuiltin) { 8114 ValidString &= Fields[0].startswith_insensitive("cp") || 8115 Fields[0].startswith_insensitive("p"); 8116 if (ValidString) 8117 Fields[0] = Fields[0].drop_front( 8118 Fields[0].startswith_insensitive("cp") ? 2 : 1); 8119 8120 ValidString &= Fields[2].startswith_insensitive("c"); 8121 if (ValidString) 8122 Fields[2] = Fields[2].drop_front(1); 8123 8124 if (FiveFields) { 8125 ValidString &= Fields[3].startswith_insensitive("c"); 8126 if (ValidString) 8127 Fields[3] = Fields[3].drop_front(1); 8128 } 8129 } 8130 8131 SmallVector<int, 5> Ranges; 8132 if (FiveFields) 8133 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 8134 else 8135 Ranges.append({15, 7, 15}); 8136 8137 for (unsigned i=0; i<Fields.size(); ++i) { 8138 int IntField; 8139 ValidString &= !Fields[i].getAsInteger(10, IntField); 8140 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 8141 } 8142 8143 if (!ValidString) 8144 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 8145 << Arg->getSourceRange(); 8146 } else if (IsAArch64Builtin && Fields.size() == 1) { 8147 // If the register name is one of those that appear in the condition below 8148 // and the special register builtin being used is one of the write builtins, 8149 // then we require that the argument provided for writing to the register 8150 // is an integer constant expression. This is because it will be lowered to 8151 // an MSR (immediate) instruction, so we need to know the immediate at 8152 // compile time. 8153 if (TheCall->getNumArgs() != 2) 8154 return false; 8155 8156 std::string RegLower = Reg.lower(); 8157 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 8158 RegLower != "pan" && RegLower != "uao") 8159 return false; 8160 8161 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 8162 } 8163 8164 return false; 8165 } 8166 8167 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 8168 /// Emit an error and return true on failure; return false on success. 8169 /// TypeStr is a string containing the type descriptor of the value returned by 8170 /// the builtin and the descriptors of the expected type of the arguments. 8171 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, 8172 const char *TypeStr) { 8173 8174 assert((TypeStr[0] != '\0') && 8175 "Invalid types in PPC MMA builtin declaration"); 8176 8177 switch (BuiltinID) { 8178 default: 8179 // This function is called in CheckPPCBuiltinFunctionCall where the 8180 // BuiltinID is guaranteed to be an MMA or pair vector memop builtin, here 8181 // we are isolating the pair vector memop builtins that can be used with mma 8182 // off so the default case is every builtin that requires mma and paired 8183 // vector memops. 8184 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 8185 diag::err_ppc_builtin_only_on_arch, "10") || 8186 SemaFeatureCheck(*this, TheCall, "mma", 8187 diag::err_ppc_builtin_only_on_arch, "10")) 8188 return true; 8189 break; 8190 case PPC::BI__builtin_vsx_lxvp: 8191 case PPC::BI__builtin_vsx_stxvp: 8192 case PPC::BI__builtin_vsx_assemble_pair: 8193 case PPC::BI__builtin_vsx_disassemble_pair: 8194 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 8195 diag::err_ppc_builtin_only_on_arch, "10")) 8196 return true; 8197 break; 8198 } 8199 8200 unsigned Mask = 0; 8201 unsigned ArgNum = 0; 8202 8203 // The first type in TypeStr is the type of the value returned by the 8204 // builtin. So we first read that type and change the type of TheCall. 8205 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8206 TheCall->setType(type); 8207 8208 while (*TypeStr != '\0') { 8209 Mask = 0; 8210 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8211 if (ArgNum >= TheCall->getNumArgs()) { 8212 ArgNum++; 8213 break; 8214 } 8215 8216 Expr *Arg = TheCall->getArg(ArgNum); 8217 QualType PassedType = Arg->getType(); 8218 QualType StrippedRVType = PassedType.getCanonicalType(); 8219 8220 // Strip Restrict/Volatile qualifiers. 8221 if (StrippedRVType.isRestrictQualified() || 8222 StrippedRVType.isVolatileQualified()) 8223 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType(); 8224 8225 // The only case where the argument type and expected type are allowed to 8226 // mismatch is if the argument type is a non-void pointer (or array) and 8227 // expected type is a void pointer. 8228 if (StrippedRVType != ExpectedType) 8229 if (!(ExpectedType->isVoidPointerType() && 8230 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType()))) 8231 return Diag(Arg->getBeginLoc(), 8232 diag::err_typecheck_convert_incompatible) 8233 << PassedType << ExpectedType << 1 << 0 << 0; 8234 8235 // If the value of the Mask is not 0, we have a constraint in the size of 8236 // the integer argument so here we ensure the argument is a constant that 8237 // is in the valid range. 8238 if (Mask != 0 && 8239 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 8240 return true; 8241 8242 ArgNum++; 8243 } 8244 8245 // In case we exited early from the previous loop, there are other types to 8246 // read from TypeStr. So we need to read them all to ensure we have the right 8247 // number of arguments in TheCall and if it is not the case, to display a 8248 // better error message. 8249 while (*TypeStr != '\0') { 8250 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8251 ArgNum++; 8252 } 8253 if (checkArgCount(*this, TheCall, ArgNum)) 8254 return true; 8255 8256 return false; 8257 } 8258 8259 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 8260 /// This checks that the target supports __builtin_longjmp and 8261 /// that val is a constant 1. 8262 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 8263 if (!Context.getTargetInfo().hasSjLjLowering()) 8264 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 8265 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8266 8267 Expr *Arg = TheCall->getArg(1); 8268 llvm::APSInt Result; 8269 8270 // TODO: This is less than ideal. Overload this to take a value. 8271 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 8272 return true; 8273 8274 if (Result != 1) 8275 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 8276 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 8277 8278 return false; 8279 } 8280 8281 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 8282 /// This checks that the target supports __builtin_setjmp. 8283 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 8284 if (!Context.getTargetInfo().hasSjLjLowering()) 8285 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 8286 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8287 return false; 8288 } 8289 8290 namespace { 8291 8292 class UncoveredArgHandler { 8293 enum { Unknown = -1, AllCovered = -2 }; 8294 8295 signed FirstUncoveredArg = Unknown; 8296 SmallVector<const Expr *, 4> DiagnosticExprs; 8297 8298 public: 8299 UncoveredArgHandler() = default; 8300 8301 bool hasUncoveredArg() const { 8302 return (FirstUncoveredArg >= 0); 8303 } 8304 8305 unsigned getUncoveredArg() const { 8306 assert(hasUncoveredArg() && "no uncovered argument"); 8307 return FirstUncoveredArg; 8308 } 8309 8310 void setAllCovered() { 8311 // A string has been found with all arguments covered, so clear out 8312 // the diagnostics. 8313 DiagnosticExprs.clear(); 8314 FirstUncoveredArg = AllCovered; 8315 } 8316 8317 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 8318 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 8319 8320 // Don't update if a previous string covers all arguments. 8321 if (FirstUncoveredArg == AllCovered) 8322 return; 8323 8324 // UncoveredArgHandler tracks the highest uncovered argument index 8325 // and with it all the strings that match this index. 8326 if (NewFirstUncoveredArg == FirstUncoveredArg) 8327 DiagnosticExprs.push_back(StrExpr); 8328 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 8329 DiagnosticExprs.clear(); 8330 DiagnosticExprs.push_back(StrExpr); 8331 FirstUncoveredArg = NewFirstUncoveredArg; 8332 } 8333 } 8334 8335 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 8336 }; 8337 8338 enum StringLiteralCheckType { 8339 SLCT_NotALiteral, 8340 SLCT_UncheckedLiteral, 8341 SLCT_CheckedLiteral 8342 }; 8343 8344 } // namespace 8345 8346 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 8347 BinaryOperatorKind BinOpKind, 8348 bool AddendIsRight) { 8349 unsigned BitWidth = Offset.getBitWidth(); 8350 unsigned AddendBitWidth = Addend.getBitWidth(); 8351 // There might be negative interim results. 8352 if (Addend.isUnsigned()) { 8353 Addend = Addend.zext(++AddendBitWidth); 8354 Addend.setIsSigned(true); 8355 } 8356 // Adjust the bit width of the APSInts. 8357 if (AddendBitWidth > BitWidth) { 8358 Offset = Offset.sext(AddendBitWidth); 8359 BitWidth = AddendBitWidth; 8360 } else if (BitWidth > AddendBitWidth) { 8361 Addend = Addend.sext(BitWidth); 8362 } 8363 8364 bool Ov = false; 8365 llvm::APSInt ResOffset = Offset; 8366 if (BinOpKind == BO_Add) 8367 ResOffset = Offset.sadd_ov(Addend, Ov); 8368 else { 8369 assert(AddendIsRight && BinOpKind == BO_Sub && 8370 "operator must be add or sub with addend on the right"); 8371 ResOffset = Offset.ssub_ov(Addend, Ov); 8372 } 8373 8374 // We add an offset to a pointer here so we should support an offset as big as 8375 // possible. 8376 if (Ov) { 8377 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 8378 "index (intermediate) result too big"); 8379 Offset = Offset.sext(2 * BitWidth); 8380 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 8381 return; 8382 } 8383 8384 Offset = ResOffset; 8385 } 8386 8387 namespace { 8388 8389 // This is a wrapper class around StringLiteral to support offsetted string 8390 // literals as format strings. It takes the offset into account when returning 8391 // the string and its length or the source locations to display notes correctly. 8392 class FormatStringLiteral { 8393 const StringLiteral *FExpr; 8394 int64_t Offset; 8395 8396 public: 8397 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 8398 : FExpr(fexpr), Offset(Offset) {} 8399 8400 StringRef getString() const { 8401 return FExpr->getString().drop_front(Offset); 8402 } 8403 8404 unsigned getByteLength() const { 8405 return FExpr->getByteLength() - getCharByteWidth() * Offset; 8406 } 8407 8408 unsigned getLength() const { return FExpr->getLength() - Offset; } 8409 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 8410 8411 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 8412 8413 QualType getType() const { return FExpr->getType(); } 8414 8415 bool isAscii() const { return FExpr->isAscii(); } 8416 bool isWide() const { return FExpr->isWide(); } 8417 bool isUTF8() const { return FExpr->isUTF8(); } 8418 bool isUTF16() const { return FExpr->isUTF16(); } 8419 bool isUTF32() const { return FExpr->isUTF32(); } 8420 bool isPascal() const { return FExpr->isPascal(); } 8421 8422 SourceLocation getLocationOfByte( 8423 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 8424 const TargetInfo &Target, unsigned *StartToken = nullptr, 8425 unsigned *StartTokenByteOffset = nullptr) const { 8426 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 8427 StartToken, StartTokenByteOffset); 8428 } 8429 8430 SourceLocation getBeginLoc() const LLVM_READONLY { 8431 return FExpr->getBeginLoc().getLocWithOffset(Offset); 8432 } 8433 8434 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 8435 }; 8436 8437 } // namespace 8438 8439 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 8440 const Expr *OrigFormatExpr, 8441 ArrayRef<const Expr *> Args, 8442 bool HasVAListArg, unsigned format_idx, 8443 unsigned firstDataArg, 8444 Sema::FormatStringType Type, 8445 bool inFunctionCall, 8446 Sema::VariadicCallType CallType, 8447 llvm::SmallBitVector &CheckedVarArgs, 8448 UncoveredArgHandler &UncoveredArg, 8449 bool IgnoreStringsWithoutSpecifiers); 8450 8451 // Determine if an expression is a string literal or constant string. 8452 // If this function returns false on the arguments to a function expecting a 8453 // format string, we will usually need to emit a warning. 8454 // True string literals are then checked by CheckFormatString. 8455 static StringLiteralCheckType 8456 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 8457 bool HasVAListArg, unsigned format_idx, 8458 unsigned firstDataArg, Sema::FormatStringType Type, 8459 Sema::VariadicCallType CallType, bool InFunctionCall, 8460 llvm::SmallBitVector &CheckedVarArgs, 8461 UncoveredArgHandler &UncoveredArg, 8462 llvm::APSInt Offset, 8463 bool IgnoreStringsWithoutSpecifiers = false) { 8464 if (S.isConstantEvaluated()) 8465 return SLCT_NotALiteral; 8466 tryAgain: 8467 assert(Offset.isSigned() && "invalid offset"); 8468 8469 if (E->isTypeDependent() || E->isValueDependent()) 8470 return SLCT_NotALiteral; 8471 8472 E = E->IgnoreParenCasts(); 8473 8474 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 8475 // Technically -Wformat-nonliteral does not warn about this case. 8476 // The behavior of printf and friends in this case is implementation 8477 // dependent. Ideally if the format string cannot be null then 8478 // it should have a 'nonnull' attribute in the function prototype. 8479 return SLCT_UncheckedLiteral; 8480 8481 switch (E->getStmtClass()) { 8482 case Stmt::BinaryConditionalOperatorClass: 8483 case Stmt::ConditionalOperatorClass: { 8484 // The expression is a literal if both sub-expressions were, and it was 8485 // completely checked only if both sub-expressions were checked. 8486 const AbstractConditionalOperator *C = 8487 cast<AbstractConditionalOperator>(E); 8488 8489 // Determine whether it is necessary to check both sub-expressions, for 8490 // example, because the condition expression is a constant that can be 8491 // evaluated at compile time. 8492 bool CheckLeft = true, CheckRight = true; 8493 8494 bool Cond; 8495 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 8496 S.isConstantEvaluated())) { 8497 if (Cond) 8498 CheckRight = false; 8499 else 8500 CheckLeft = false; 8501 } 8502 8503 // We need to maintain the offsets for the right and the left hand side 8504 // separately to check if every possible indexed expression is a valid 8505 // string literal. They might have different offsets for different string 8506 // literals in the end. 8507 StringLiteralCheckType Left; 8508 if (!CheckLeft) 8509 Left = SLCT_UncheckedLiteral; 8510 else { 8511 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 8512 HasVAListArg, format_idx, firstDataArg, 8513 Type, CallType, InFunctionCall, 8514 CheckedVarArgs, UncoveredArg, Offset, 8515 IgnoreStringsWithoutSpecifiers); 8516 if (Left == SLCT_NotALiteral || !CheckRight) { 8517 return Left; 8518 } 8519 } 8520 8521 StringLiteralCheckType Right = checkFormatStringExpr( 8522 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 8523 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8524 IgnoreStringsWithoutSpecifiers); 8525 8526 return (CheckLeft && Left < Right) ? Left : Right; 8527 } 8528 8529 case Stmt::ImplicitCastExprClass: 8530 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 8531 goto tryAgain; 8532 8533 case Stmt::OpaqueValueExprClass: 8534 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 8535 E = src; 8536 goto tryAgain; 8537 } 8538 return SLCT_NotALiteral; 8539 8540 case Stmt::PredefinedExprClass: 8541 // While __func__, etc., are technically not string literals, they 8542 // cannot contain format specifiers and thus are not a security 8543 // liability. 8544 return SLCT_UncheckedLiteral; 8545 8546 case Stmt::DeclRefExprClass: { 8547 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 8548 8549 // As an exception, do not flag errors for variables binding to 8550 // const string literals. 8551 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 8552 bool isConstant = false; 8553 QualType T = DR->getType(); 8554 8555 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 8556 isConstant = AT->getElementType().isConstant(S.Context); 8557 } else if (const PointerType *PT = T->getAs<PointerType>()) { 8558 isConstant = T.isConstant(S.Context) && 8559 PT->getPointeeType().isConstant(S.Context); 8560 } else if (T->isObjCObjectPointerType()) { 8561 // In ObjC, there is usually no "const ObjectPointer" type, 8562 // so don't check if the pointee type is constant. 8563 isConstant = T.isConstant(S.Context); 8564 } 8565 8566 if (isConstant) { 8567 if (const Expr *Init = VD->getAnyInitializer()) { 8568 // Look through initializers like const char c[] = { "foo" } 8569 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 8570 if (InitList->isStringLiteralInit()) 8571 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 8572 } 8573 return checkFormatStringExpr(S, Init, Args, 8574 HasVAListArg, format_idx, 8575 firstDataArg, Type, CallType, 8576 /*InFunctionCall*/ false, CheckedVarArgs, 8577 UncoveredArg, Offset); 8578 } 8579 } 8580 8581 // For vprintf* functions (i.e., HasVAListArg==true), we add a 8582 // special check to see if the format string is a function parameter 8583 // of the function calling the printf function. If the function 8584 // has an attribute indicating it is a printf-like function, then we 8585 // should suppress warnings concerning non-literals being used in a call 8586 // to a vprintf function. For example: 8587 // 8588 // void 8589 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 8590 // va_list ap; 8591 // va_start(ap, fmt); 8592 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 8593 // ... 8594 // } 8595 if (HasVAListArg) { 8596 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 8597 if (const Decl *D = dyn_cast<Decl>(PV->getDeclContext())) { 8598 int PVIndex = PV->getFunctionScopeIndex() + 1; 8599 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) { 8600 // adjust for implicit parameter 8601 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) 8602 if (MD->isInstance()) 8603 ++PVIndex; 8604 // We also check if the formats are compatible. 8605 // We can't pass a 'scanf' string to a 'printf' function. 8606 if (PVIndex == PVFormat->getFormatIdx() && 8607 Type == S.GetFormatStringType(PVFormat)) 8608 return SLCT_UncheckedLiteral; 8609 } 8610 } 8611 } 8612 } 8613 } 8614 8615 return SLCT_NotALiteral; 8616 } 8617 8618 case Stmt::CallExprClass: 8619 case Stmt::CXXMemberCallExprClass: { 8620 const CallExpr *CE = cast<CallExpr>(E); 8621 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 8622 bool IsFirst = true; 8623 StringLiteralCheckType CommonResult; 8624 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 8625 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 8626 StringLiteralCheckType Result = checkFormatStringExpr( 8627 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 8628 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8629 IgnoreStringsWithoutSpecifiers); 8630 if (IsFirst) { 8631 CommonResult = Result; 8632 IsFirst = false; 8633 } 8634 } 8635 if (!IsFirst) 8636 return CommonResult; 8637 8638 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 8639 unsigned BuiltinID = FD->getBuiltinID(); 8640 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 8641 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 8642 const Expr *Arg = CE->getArg(0); 8643 return checkFormatStringExpr(S, Arg, Args, 8644 HasVAListArg, format_idx, 8645 firstDataArg, Type, CallType, 8646 InFunctionCall, CheckedVarArgs, 8647 UncoveredArg, Offset, 8648 IgnoreStringsWithoutSpecifiers); 8649 } 8650 } 8651 } 8652 8653 return SLCT_NotALiteral; 8654 } 8655 case Stmt::ObjCMessageExprClass: { 8656 const auto *ME = cast<ObjCMessageExpr>(E); 8657 if (const auto *MD = ME->getMethodDecl()) { 8658 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 8659 // As a special case heuristic, if we're using the method -[NSBundle 8660 // localizedStringForKey:value:table:], ignore any key strings that lack 8661 // format specifiers. The idea is that if the key doesn't have any 8662 // format specifiers then its probably just a key to map to the 8663 // localized strings. If it does have format specifiers though, then its 8664 // likely that the text of the key is the format string in the 8665 // programmer's language, and should be checked. 8666 const ObjCInterfaceDecl *IFace; 8667 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 8668 IFace->getIdentifier()->isStr("NSBundle") && 8669 MD->getSelector().isKeywordSelector( 8670 {"localizedStringForKey", "value", "table"})) { 8671 IgnoreStringsWithoutSpecifiers = true; 8672 } 8673 8674 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 8675 return checkFormatStringExpr( 8676 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 8677 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8678 IgnoreStringsWithoutSpecifiers); 8679 } 8680 } 8681 8682 return SLCT_NotALiteral; 8683 } 8684 case Stmt::ObjCStringLiteralClass: 8685 case Stmt::StringLiteralClass: { 8686 const StringLiteral *StrE = nullptr; 8687 8688 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 8689 StrE = ObjCFExpr->getString(); 8690 else 8691 StrE = cast<StringLiteral>(E); 8692 8693 if (StrE) { 8694 if (Offset.isNegative() || Offset > StrE->getLength()) { 8695 // TODO: It would be better to have an explicit warning for out of 8696 // bounds literals. 8697 return SLCT_NotALiteral; 8698 } 8699 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 8700 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 8701 firstDataArg, Type, InFunctionCall, CallType, 8702 CheckedVarArgs, UncoveredArg, 8703 IgnoreStringsWithoutSpecifiers); 8704 return SLCT_CheckedLiteral; 8705 } 8706 8707 return SLCT_NotALiteral; 8708 } 8709 case Stmt::BinaryOperatorClass: { 8710 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 8711 8712 // A string literal + an int offset is still a string literal. 8713 if (BinOp->isAdditiveOp()) { 8714 Expr::EvalResult LResult, RResult; 8715 8716 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 8717 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8718 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 8719 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8720 8721 if (LIsInt != RIsInt) { 8722 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 8723 8724 if (LIsInt) { 8725 if (BinOpKind == BO_Add) { 8726 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 8727 E = BinOp->getRHS(); 8728 goto tryAgain; 8729 } 8730 } else { 8731 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 8732 E = BinOp->getLHS(); 8733 goto tryAgain; 8734 } 8735 } 8736 } 8737 8738 return SLCT_NotALiteral; 8739 } 8740 case Stmt::UnaryOperatorClass: { 8741 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 8742 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 8743 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 8744 Expr::EvalResult IndexResult; 8745 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 8746 Expr::SE_NoSideEffects, 8747 S.isConstantEvaluated())) { 8748 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 8749 /*RHS is int*/ true); 8750 E = ASE->getBase(); 8751 goto tryAgain; 8752 } 8753 } 8754 8755 return SLCT_NotALiteral; 8756 } 8757 8758 default: 8759 return SLCT_NotALiteral; 8760 } 8761 } 8762 8763 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 8764 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 8765 .Case("scanf", FST_Scanf) 8766 .Cases("printf", "printf0", FST_Printf) 8767 .Cases("NSString", "CFString", FST_NSString) 8768 .Case("strftime", FST_Strftime) 8769 .Case("strfmon", FST_Strfmon) 8770 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 8771 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 8772 .Case("os_trace", FST_OSLog) 8773 .Case("os_log", FST_OSLog) 8774 .Default(FST_Unknown); 8775 } 8776 8777 /// CheckFormatArguments - Check calls to printf and scanf (and similar 8778 /// functions) for correct use of format strings. 8779 /// Returns true if a format string has been fully checked. 8780 bool Sema::CheckFormatArguments(const FormatAttr *Format, 8781 ArrayRef<const Expr *> Args, 8782 bool IsCXXMember, 8783 VariadicCallType CallType, 8784 SourceLocation Loc, SourceRange Range, 8785 llvm::SmallBitVector &CheckedVarArgs) { 8786 FormatStringInfo FSI; 8787 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 8788 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 8789 FSI.FirstDataArg, GetFormatStringType(Format), 8790 CallType, Loc, Range, CheckedVarArgs); 8791 return false; 8792 } 8793 8794 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 8795 bool HasVAListArg, unsigned format_idx, 8796 unsigned firstDataArg, FormatStringType Type, 8797 VariadicCallType CallType, 8798 SourceLocation Loc, SourceRange Range, 8799 llvm::SmallBitVector &CheckedVarArgs) { 8800 // CHECK: printf/scanf-like function is called with no format string. 8801 if (format_idx >= Args.size()) { 8802 Diag(Loc, diag::warn_missing_format_string) << Range; 8803 return false; 8804 } 8805 8806 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 8807 8808 // CHECK: format string is not a string literal. 8809 // 8810 // Dynamically generated format strings are difficult to 8811 // automatically vet at compile time. Requiring that format strings 8812 // are string literals: (1) permits the checking of format strings by 8813 // the compiler and thereby (2) can practically remove the source of 8814 // many format string exploits. 8815 8816 // Format string can be either ObjC string (e.g. @"%d") or 8817 // C string (e.g. "%d") 8818 // ObjC string uses the same format specifiers as C string, so we can use 8819 // the same format string checking logic for both ObjC and C strings. 8820 UncoveredArgHandler UncoveredArg; 8821 StringLiteralCheckType CT = 8822 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 8823 format_idx, firstDataArg, Type, CallType, 8824 /*IsFunctionCall*/ true, CheckedVarArgs, 8825 UncoveredArg, 8826 /*no string offset*/ llvm::APSInt(64, false) = 0); 8827 8828 // Generate a diagnostic where an uncovered argument is detected. 8829 if (UncoveredArg.hasUncoveredArg()) { 8830 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 8831 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 8832 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 8833 } 8834 8835 if (CT != SLCT_NotALiteral) 8836 // Literal format string found, check done! 8837 return CT == SLCT_CheckedLiteral; 8838 8839 // Strftime is particular as it always uses a single 'time' argument, 8840 // so it is safe to pass a non-literal string. 8841 if (Type == FST_Strftime) 8842 return false; 8843 8844 // Do not emit diag when the string param is a macro expansion and the 8845 // format is either NSString or CFString. This is a hack to prevent 8846 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 8847 // which are usually used in place of NS and CF string literals. 8848 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 8849 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 8850 return false; 8851 8852 // If there are no arguments specified, warn with -Wformat-security, otherwise 8853 // warn only with -Wformat-nonliteral. 8854 if (Args.size() == firstDataArg) { 8855 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 8856 << OrigFormatExpr->getSourceRange(); 8857 switch (Type) { 8858 default: 8859 break; 8860 case FST_Kprintf: 8861 case FST_FreeBSDKPrintf: 8862 case FST_Printf: 8863 Diag(FormatLoc, diag::note_format_security_fixit) 8864 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 8865 break; 8866 case FST_NSString: 8867 Diag(FormatLoc, diag::note_format_security_fixit) 8868 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 8869 break; 8870 } 8871 } else { 8872 Diag(FormatLoc, diag::warn_format_nonliteral) 8873 << OrigFormatExpr->getSourceRange(); 8874 } 8875 return false; 8876 } 8877 8878 namespace { 8879 8880 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 8881 protected: 8882 Sema &S; 8883 const FormatStringLiteral *FExpr; 8884 const Expr *OrigFormatExpr; 8885 const Sema::FormatStringType FSType; 8886 const unsigned FirstDataArg; 8887 const unsigned NumDataArgs; 8888 const char *Beg; // Start of format string. 8889 const bool HasVAListArg; 8890 ArrayRef<const Expr *> Args; 8891 unsigned FormatIdx; 8892 llvm::SmallBitVector CoveredArgs; 8893 bool usesPositionalArgs = false; 8894 bool atFirstArg = true; 8895 bool inFunctionCall; 8896 Sema::VariadicCallType CallType; 8897 llvm::SmallBitVector &CheckedVarArgs; 8898 UncoveredArgHandler &UncoveredArg; 8899 8900 public: 8901 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 8902 const Expr *origFormatExpr, 8903 const Sema::FormatStringType type, unsigned firstDataArg, 8904 unsigned numDataArgs, const char *beg, bool hasVAListArg, 8905 ArrayRef<const Expr *> Args, unsigned formatIdx, 8906 bool inFunctionCall, Sema::VariadicCallType callType, 8907 llvm::SmallBitVector &CheckedVarArgs, 8908 UncoveredArgHandler &UncoveredArg) 8909 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 8910 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 8911 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 8912 inFunctionCall(inFunctionCall), CallType(callType), 8913 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 8914 CoveredArgs.resize(numDataArgs); 8915 CoveredArgs.reset(); 8916 } 8917 8918 void DoneProcessing(); 8919 8920 void HandleIncompleteSpecifier(const char *startSpecifier, 8921 unsigned specifierLen) override; 8922 8923 void HandleInvalidLengthModifier( 8924 const analyze_format_string::FormatSpecifier &FS, 8925 const analyze_format_string::ConversionSpecifier &CS, 8926 const char *startSpecifier, unsigned specifierLen, 8927 unsigned DiagID); 8928 8929 void HandleNonStandardLengthModifier( 8930 const analyze_format_string::FormatSpecifier &FS, 8931 const char *startSpecifier, unsigned specifierLen); 8932 8933 void HandleNonStandardConversionSpecifier( 8934 const analyze_format_string::ConversionSpecifier &CS, 8935 const char *startSpecifier, unsigned specifierLen); 8936 8937 void HandlePosition(const char *startPos, unsigned posLen) override; 8938 8939 void HandleInvalidPosition(const char *startSpecifier, 8940 unsigned specifierLen, 8941 analyze_format_string::PositionContext p) override; 8942 8943 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 8944 8945 void HandleNullChar(const char *nullCharacter) override; 8946 8947 template <typename Range> 8948 static void 8949 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 8950 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 8951 bool IsStringLocation, Range StringRange, 8952 ArrayRef<FixItHint> Fixit = None); 8953 8954 protected: 8955 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 8956 const char *startSpec, 8957 unsigned specifierLen, 8958 const char *csStart, unsigned csLen); 8959 8960 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 8961 const char *startSpec, 8962 unsigned specifierLen); 8963 8964 SourceRange getFormatStringRange(); 8965 CharSourceRange getSpecifierRange(const char *startSpecifier, 8966 unsigned specifierLen); 8967 SourceLocation getLocationOfByte(const char *x); 8968 8969 const Expr *getDataArg(unsigned i) const; 8970 8971 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 8972 const analyze_format_string::ConversionSpecifier &CS, 8973 const char *startSpecifier, unsigned specifierLen, 8974 unsigned argIndex); 8975 8976 template <typename Range> 8977 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 8978 bool IsStringLocation, Range StringRange, 8979 ArrayRef<FixItHint> Fixit = None); 8980 }; 8981 8982 } // namespace 8983 8984 SourceRange CheckFormatHandler::getFormatStringRange() { 8985 return OrigFormatExpr->getSourceRange(); 8986 } 8987 8988 CharSourceRange CheckFormatHandler:: 8989 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 8990 SourceLocation Start = getLocationOfByte(startSpecifier); 8991 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 8992 8993 // Advance the end SourceLocation by one due to half-open ranges. 8994 End = End.getLocWithOffset(1); 8995 8996 return CharSourceRange::getCharRange(Start, End); 8997 } 8998 8999 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 9000 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 9001 S.getLangOpts(), S.Context.getTargetInfo()); 9002 } 9003 9004 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 9005 unsigned specifierLen){ 9006 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 9007 getLocationOfByte(startSpecifier), 9008 /*IsStringLocation*/true, 9009 getSpecifierRange(startSpecifier, specifierLen)); 9010 } 9011 9012 void CheckFormatHandler::HandleInvalidLengthModifier( 9013 const analyze_format_string::FormatSpecifier &FS, 9014 const analyze_format_string::ConversionSpecifier &CS, 9015 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 9016 using namespace analyze_format_string; 9017 9018 const LengthModifier &LM = FS.getLengthModifier(); 9019 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 9020 9021 // See if we know how to fix this length modifier. 9022 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 9023 if (FixedLM) { 9024 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 9025 getLocationOfByte(LM.getStart()), 9026 /*IsStringLocation*/true, 9027 getSpecifierRange(startSpecifier, specifierLen)); 9028 9029 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 9030 << FixedLM->toString() 9031 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 9032 9033 } else { 9034 FixItHint Hint; 9035 if (DiagID == diag::warn_format_nonsensical_length) 9036 Hint = FixItHint::CreateRemoval(LMRange); 9037 9038 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 9039 getLocationOfByte(LM.getStart()), 9040 /*IsStringLocation*/true, 9041 getSpecifierRange(startSpecifier, specifierLen), 9042 Hint); 9043 } 9044 } 9045 9046 void CheckFormatHandler::HandleNonStandardLengthModifier( 9047 const analyze_format_string::FormatSpecifier &FS, 9048 const char *startSpecifier, unsigned specifierLen) { 9049 using namespace analyze_format_string; 9050 9051 const LengthModifier &LM = FS.getLengthModifier(); 9052 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 9053 9054 // See if we know how to fix this length modifier. 9055 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 9056 if (FixedLM) { 9057 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9058 << LM.toString() << 0, 9059 getLocationOfByte(LM.getStart()), 9060 /*IsStringLocation*/true, 9061 getSpecifierRange(startSpecifier, specifierLen)); 9062 9063 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 9064 << FixedLM->toString() 9065 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 9066 9067 } else { 9068 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9069 << LM.toString() << 0, 9070 getLocationOfByte(LM.getStart()), 9071 /*IsStringLocation*/true, 9072 getSpecifierRange(startSpecifier, specifierLen)); 9073 } 9074 } 9075 9076 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 9077 const analyze_format_string::ConversionSpecifier &CS, 9078 const char *startSpecifier, unsigned specifierLen) { 9079 using namespace analyze_format_string; 9080 9081 // See if we know how to fix this conversion specifier. 9082 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 9083 if (FixedCS) { 9084 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9085 << CS.toString() << /*conversion specifier*/1, 9086 getLocationOfByte(CS.getStart()), 9087 /*IsStringLocation*/true, 9088 getSpecifierRange(startSpecifier, specifierLen)); 9089 9090 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 9091 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 9092 << FixedCS->toString() 9093 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 9094 } else { 9095 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9096 << CS.toString() << /*conversion specifier*/1, 9097 getLocationOfByte(CS.getStart()), 9098 /*IsStringLocation*/true, 9099 getSpecifierRange(startSpecifier, specifierLen)); 9100 } 9101 } 9102 9103 void CheckFormatHandler::HandlePosition(const char *startPos, 9104 unsigned posLen) { 9105 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 9106 getLocationOfByte(startPos), 9107 /*IsStringLocation*/true, 9108 getSpecifierRange(startPos, posLen)); 9109 } 9110 9111 void 9112 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 9113 analyze_format_string::PositionContext p) { 9114 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 9115 << (unsigned) p, 9116 getLocationOfByte(startPos), /*IsStringLocation*/true, 9117 getSpecifierRange(startPos, posLen)); 9118 } 9119 9120 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 9121 unsigned posLen) { 9122 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 9123 getLocationOfByte(startPos), 9124 /*IsStringLocation*/true, 9125 getSpecifierRange(startPos, posLen)); 9126 } 9127 9128 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 9129 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 9130 // The presence of a null character is likely an error. 9131 EmitFormatDiagnostic( 9132 S.PDiag(diag::warn_printf_format_string_contains_null_char), 9133 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 9134 getFormatStringRange()); 9135 } 9136 } 9137 9138 // Note that this may return NULL if there was an error parsing or building 9139 // one of the argument expressions. 9140 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 9141 return Args[FirstDataArg + i]; 9142 } 9143 9144 void CheckFormatHandler::DoneProcessing() { 9145 // Does the number of data arguments exceed the number of 9146 // format conversions in the format string? 9147 if (!HasVAListArg) { 9148 // Find any arguments that weren't covered. 9149 CoveredArgs.flip(); 9150 signed notCoveredArg = CoveredArgs.find_first(); 9151 if (notCoveredArg >= 0) { 9152 assert((unsigned)notCoveredArg < NumDataArgs); 9153 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 9154 } else { 9155 UncoveredArg.setAllCovered(); 9156 } 9157 } 9158 } 9159 9160 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 9161 const Expr *ArgExpr) { 9162 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 9163 "Invalid state"); 9164 9165 if (!ArgExpr) 9166 return; 9167 9168 SourceLocation Loc = ArgExpr->getBeginLoc(); 9169 9170 if (S.getSourceManager().isInSystemMacro(Loc)) 9171 return; 9172 9173 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 9174 for (auto E : DiagnosticExprs) 9175 PDiag << E->getSourceRange(); 9176 9177 CheckFormatHandler::EmitFormatDiagnostic( 9178 S, IsFunctionCall, DiagnosticExprs[0], 9179 PDiag, Loc, /*IsStringLocation*/false, 9180 DiagnosticExprs[0]->getSourceRange()); 9181 } 9182 9183 bool 9184 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 9185 SourceLocation Loc, 9186 const char *startSpec, 9187 unsigned specifierLen, 9188 const char *csStart, 9189 unsigned csLen) { 9190 bool keepGoing = true; 9191 if (argIndex < NumDataArgs) { 9192 // Consider the argument coverered, even though the specifier doesn't 9193 // make sense. 9194 CoveredArgs.set(argIndex); 9195 } 9196 else { 9197 // If argIndex exceeds the number of data arguments we 9198 // don't issue a warning because that is just a cascade of warnings (and 9199 // they may have intended '%%' anyway). We don't want to continue processing 9200 // the format string after this point, however, as we will like just get 9201 // gibberish when trying to match arguments. 9202 keepGoing = false; 9203 } 9204 9205 StringRef Specifier(csStart, csLen); 9206 9207 // If the specifier in non-printable, it could be the first byte of a UTF-8 9208 // sequence. In that case, print the UTF-8 code point. If not, print the byte 9209 // hex value. 9210 std::string CodePointStr; 9211 if (!llvm::sys::locale::isPrint(*csStart)) { 9212 llvm::UTF32 CodePoint; 9213 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 9214 const llvm::UTF8 *E = 9215 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 9216 llvm::ConversionResult Result = 9217 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 9218 9219 if (Result != llvm::conversionOK) { 9220 unsigned char FirstChar = *csStart; 9221 CodePoint = (llvm::UTF32)FirstChar; 9222 } 9223 9224 llvm::raw_string_ostream OS(CodePointStr); 9225 if (CodePoint < 256) 9226 OS << "\\x" << llvm::format("%02x", CodePoint); 9227 else if (CodePoint <= 0xFFFF) 9228 OS << "\\u" << llvm::format("%04x", CodePoint); 9229 else 9230 OS << "\\U" << llvm::format("%08x", CodePoint); 9231 OS.flush(); 9232 Specifier = CodePointStr; 9233 } 9234 9235 EmitFormatDiagnostic( 9236 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 9237 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 9238 9239 return keepGoing; 9240 } 9241 9242 void 9243 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 9244 const char *startSpec, 9245 unsigned specifierLen) { 9246 EmitFormatDiagnostic( 9247 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 9248 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 9249 } 9250 9251 bool 9252 CheckFormatHandler::CheckNumArgs( 9253 const analyze_format_string::FormatSpecifier &FS, 9254 const analyze_format_string::ConversionSpecifier &CS, 9255 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 9256 9257 if (argIndex >= NumDataArgs) { 9258 PartialDiagnostic PDiag = FS.usesPositionalArg() 9259 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 9260 << (argIndex+1) << NumDataArgs) 9261 : S.PDiag(diag::warn_printf_insufficient_data_args); 9262 EmitFormatDiagnostic( 9263 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 9264 getSpecifierRange(startSpecifier, specifierLen)); 9265 9266 // Since more arguments than conversion tokens are given, by extension 9267 // all arguments are covered, so mark this as so. 9268 UncoveredArg.setAllCovered(); 9269 return false; 9270 } 9271 return true; 9272 } 9273 9274 template<typename Range> 9275 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 9276 SourceLocation Loc, 9277 bool IsStringLocation, 9278 Range StringRange, 9279 ArrayRef<FixItHint> FixIt) { 9280 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 9281 Loc, IsStringLocation, StringRange, FixIt); 9282 } 9283 9284 /// If the format string is not within the function call, emit a note 9285 /// so that the function call and string are in diagnostic messages. 9286 /// 9287 /// \param InFunctionCall if true, the format string is within the function 9288 /// call and only one diagnostic message will be produced. Otherwise, an 9289 /// extra note will be emitted pointing to location of the format string. 9290 /// 9291 /// \param ArgumentExpr the expression that is passed as the format string 9292 /// argument in the function call. Used for getting locations when two 9293 /// diagnostics are emitted. 9294 /// 9295 /// \param PDiag the callee should already have provided any strings for the 9296 /// diagnostic message. This function only adds locations and fixits 9297 /// to diagnostics. 9298 /// 9299 /// \param Loc primary location for diagnostic. If two diagnostics are 9300 /// required, one will be at Loc and a new SourceLocation will be created for 9301 /// the other one. 9302 /// 9303 /// \param IsStringLocation if true, Loc points to the format string should be 9304 /// used for the note. Otherwise, Loc points to the argument list and will 9305 /// be used with PDiag. 9306 /// 9307 /// \param StringRange some or all of the string to highlight. This is 9308 /// templated so it can accept either a CharSourceRange or a SourceRange. 9309 /// 9310 /// \param FixIt optional fix it hint for the format string. 9311 template <typename Range> 9312 void CheckFormatHandler::EmitFormatDiagnostic( 9313 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 9314 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 9315 Range StringRange, ArrayRef<FixItHint> FixIt) { 9316 if (InFunctionCall) { 9317 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 9318 D << StringRange; 9319 D << FixIt; 9320 } else { 9321 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 9322 << ArgumentExpr->getSourceRange(); 9323 9324 const Sema::SemaDiagnosticBuilder &Note = 9325 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 9326 diag::note_format_string_defined); 9327 9328 Note << StringRange; 9329 Note << FixIt; 9330 } 9331 } 9332 9333 //===--- CHECK: Printf format string checking ------------------------------===// 9334 9335 namespace { 9336 9337 class CheckPrintfHandler : public CheckFormatHandler { 9338 public: 9339 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 9340 const Expr *origFormatExpr, 9341 const Sema::FormatStringType type, unsigned firstDataArg, 9342 unsigned numDataArgs, bool isObjC, const char *beg, 9343 bool hasVAListArg, ArrayRef<const Expr *> Args, 9344 unsigned formatIdx, bool inFunctionCall, 9345 Sema::VariadicCallType CallType, 9346 llvm::SmallBitVector &CheckedVarArgs, 9347 UncoveredArgHandler &UncoveredArg) 9348 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 9349 numDataArgs, beg, hasVAListArg, Args, formatIdx, 9350 inFunctionCall, CallType, CheckedVarArgs, 9351 UncoveredArg) {} 9352 9353 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 9354 9355 /// Returns true if '%@' specifiers are allowed in the format string. 9356 bool allowsObjCArg() const { 9357 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 9358 FSType == Sema::FST_OSTrace; 9359 } 9360 9361 bool HandleInvalidPrintfConversionSpecifier( 9362 const analyze_printf::PrintfSpecifier &FS, 9363 const char *startSpecifier, 9364 unsigned specifierLen) override; 9365 9366 void handleInvalidMaskType(StringRef MaskType) override; 9367 9368 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 9369 const char *startSpecifier, unsigned specifierLen, 9370 const TargetInfo &Target) override; 9371 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9372 const char *StartSpecifier, 9373 unsigned SpecifierLen, 9374 const Expr *E); 9375 9376 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 9377 const char *startSpecifier, unsigned specifierLen); 9378 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 9379 const analyze_printf::OptionalAmount &Amt, 9380 unsigned type, 9381 const char *startSpecifier, unsigned specifierLen); 9382 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9383 const analyze_printf::OptionalFlag &flag, 9384 const char *startSpecifier, unsigned specifierLen); 9385 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 9386 const analyze_printf::OptionalFlag &ignoredFlag, 9387 const analyze_printf::OptionalFlag &flag, 9388 const char *startSpecifier, unsigned specifierLen); 9389 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 9390 const Expr *E); 9391 9392 void HandleEmptyObjCModifierFlag(const char *startFlag, 9393 unsigned flagLen) override; 9394 9395 void HandleInvalidObjCModifierFlag(const char *startFlag, 9396 unsigned flagLen) override; 9397 9398 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 9399 const char *flagsEnd, 9400 const char *conversionPosition) 9401 override; 9402 }; 9403 9404 } // namespace 9405 9406 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 9407 const analyze_printf::PrintfSpecifier &FS, 9408 const char *startSpecifier, 9409 unsigned specifierLen) { 9410 const analyze_printf::PrintfConversionSpecifier &CS = 9411 FS.getConversionSpecifier(); 9412 9413 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 9414 getLocationOfByte(CS.getStart()), 9415 startSpecifier, specifierLen, 9416 CS.getStart(), CS.getLength()); 9417 } 9418 9419 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 9420 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 9421 } 9422 9423 bool CheckPrintfHandler::HandleAmount( 9424 const analyze_format_string::OptionalAmount &Amt, 9425 unsigned k, const char *startSpecifier, 9426 unsigned specifierLen) { 9427 if (Amt.hasDataArgument()) { 9428 if (!HasVAListArg) { 9429 unsigned argIndex = Amt.getArgIndex(); 9430 if (argIndex >= NumDataArgs) { 9431 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 9432 << k, 9433 getLocationOfByte(Amt.getStart()), 9434 /*IsStringLocation*/true, 9435 getSpecifierRange(startSpecifier, specifierLen)); 9436 // Don't do any more checking. We will just emit 9437 // spurious errors. 9438 return false; 9439 } 9440 9441 // Type check the data argument. It should be an 'int'. 9442 // Although not in conformance with C99, we also allow the argument to be 9443 // an 'unsigned int' as that is a reasonably safe case. GCC also 9444 // doesn't emit a warning for that case. 9445 CoveredArgs.set(argIndex); 9446 const Expr *Arg = getDataArg(argIndex); 9447 if (!Arg) 9448 return false; 9449 9450 QualType T = Arg->getType(); 9451 9452 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 9453 assert(AT.isValid()); 9454 9455 if (!AT.matchesType(S.Context, T)) { 9456 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 9457 << k << AT.getRepresentativeTypeName(S.Context) 9458 << T << Arg->getSourceRange(), 9459 getLocationOfByte(Amt.getStart()), 9460 /*IsStringLocation*/true, 9461 getSpecifierRange(startSpecifier, specifierLen)); 9462 // Don't do any more checking. We will just emit 9463 // spurious errors. 9464 return false; 9465 } 9466 } 9467 } 9468 return true; 9469 } 9470 9471 void CheckPrintfHandler::HandleInvalidAmount( 9472 const analyze_printf::PrintfSpecifier &FS, 9473 const analyze_printf::OptionalAmount &Amt, 9474 unsigned type, 9475 const char *startSpecifier, 9476 unsigned specifierLen) { 9477 const analyze_printf::PrintfConversionSpecifier &CS = 9478 FS.getConversionSpecifier(); 9479 9480 FixItHint fixit = 9481 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 9482 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 9483 Amt.getConstantLength())) 9484 : FixItHint(); 9485 9486 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 9487 << type << CS.toString(), 9488 getLocationOfByte(Amt.getStart()), 9489 /*IsStringLocation*/true, 9490 getSpecifierRange(startSpecifier, specifierLen), 9491 fixit); 9492 } 9493 9494 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9495 const analyze_printf::OptionalFlag &flag, 9496 const char *startSpecifier, 9497 unsigned specifierLen) { 9498 // Warn about pointless flag with a fixit removal. 9499 const analyze_printf::PrintfConversionSpecifier &CS = 9500 FS.getConversionSpecifier(); 9501 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 9502 << flag.toString() << CS.toString(), 9503 getLocationOfByte(flag.getPosition()), 9504 /*IsStringLocation*/true, 9505 getSpecifierRange(startSpecifier, specifierLen), 9506 FixItHint::CreateRemoval( 9507 getSpecifierRange(flag.getPosition(), 1))); 9508 } 9509 9510 void CheckPrintfHandler::HandleIgnoredFlag( 9511 const analyze_printf::PrintfSpecifier &FS, 9512 const analyze_printf::OptionalFlag &ignoredFlag, 9513 const analyze_printf::OptionalFlag &flag, 9514 const char *startSpecifier, 9515 unsigned specifierLen) { 9516 // Warn about ignored flag with a fixit removal. 9517 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 9518 << ignoredFlag.toString() << flag.toString(), 9519 getLocationOfByte(ignoredFlag.getPosition()), 9520 /*IsStringLocation*/true, 9521 getSpecifierRange(startSpecifier, specifierLen), 9522 FixItHint::CreateRemoval( 9523 getSpecifierRange(ignoredFlag.getPosition(), 1))); 9524 } 9525 9526 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 9527 unsigned flagLen) { 9528 // Warn about an empty flag. 9529 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 9530 getLocationOfByte(startFlag), 9531 /*IsStringLocation*/true, 9532 getSpecifierRange(startFlag, flagLen)); 9533 } 9534 9535 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 9536 unsigned flagLen) { 9537 // Warn about an invalid flag. 9538 auto Range = getSpecifierRange(startFlag, flagLen); 9539 StringRef flag(startFlag, flagLen); 9540 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 9541 getLocationOfByte(startFlag), 9542 /*IsStringLocation*/true, 9543 Range, FixItHint::CreateRemoval(Range)); 9544 } 9545 9546 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 9547 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 9548 // Warn about using '[...]' without a '@' conversion. 9549 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 9550 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 9551 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 9552 getLocationOfByte(conversionPosition), 9553 /*IsStringLocation*/true, 9554 Range, FixItHint::CreateRemoval(Range)); 9555 } 9556 9557 // Determines if the specified is a C++ class or struct containing 9558 // a member with the specified name and kind (e.g. a CXXMethodDecl named 9559 // "c_str()"). 9560 template<typename MemberKind> 9561 static llvm::SmallPtrSet<MemberKind*, 1> 9562 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 9563 const RecordType *RT = Ty->getAs<RecordType>(); 9564 llvm::SmallPtrSet<MemberKind*, 1> Results; 9565 9566 if (!RT) 9567 return Results; 9568 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 9569 if (!RD || !RD->getDefinition()) 9570 return Results; 9571 9572 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 9573 Sema::LookupMemberName); 9574 R.suppressDiagnostics(); 9575 9576 // We just need to include all members of the right kind turned up by the 9577 // filter, at this point. 9578 if (S.LookupQualifiedName(R, RT->getDecl())) 9579 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 9580 NamedDecl *decl = (*I)->getUnderlyingDecl(); 9581 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 9582 Results.insert(FK); 9583 } 9584 return Results; 9585 } 9586 9587 /// Check if we could call '.c_str()' on an object. 9588 /// 9589 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 9590 /// allow the call, or if it would be ambiguous). 9591 bool Sema::hasCStrMethod(const Expr *E) { 9592 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9593 9594 MethodSet Results = 9595 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 9596 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9597 MI != ME; ++MI) 9598 if ((*MI)->getMinRequiredArguments() == 0) 9599 return true; 9600 return false; 9601 } 9602 9603 // Check if a (w)string was passed when a (w)char* was needed, and offer a 9604 // better diagnostic if so. AT is assumed to be valid. 9605 // Returns true when a c_str() conversion method is found. 9606 bool CheckPrintfHandler::checkForCStrMembers( 9607 const analyze_printf::ArgType &AT, const Expr *E) { 9608 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9609 9610 MethodSet Results = 9611 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 9612 9613 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9614 MI != ME; ++MI) { 9615 const CXXMethodDecl *Method = *MI; 9616 if (Method->getMinRequiredArguments() == 0 && 9617 AT.matchesType(S.Context, Method->getReturnType())) { 9618 // FIXME: Suggest parens if the expression needs them. 9619 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 9620 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 9621 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 9622 return true; 9623 } 9624 } 9625 9626 return false; 9627 } 9628 9629 bool CheckPrintfHandler::HandlePrintfSpecifier( 9630 const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, 9631 unsigned specifierLen, const TargetInfo &Target) { 9632 using namespace analyze_format_string; 9633 using namespace analyze_printf; 9634 9635 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 9636 9637 if (FS.consumesDataArgument()) { 9638 if (atFirstArg) { 9639 atFirstArg = false; 9640 usesPositionalArgs = FS.usesPositionalArg(); 9641 } 9642 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9643 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9644 startSpecifier, specifierLen); 9645 return false; 9646 } 9647 } 9648 9649 // First check if the field width, precision, and conversion specifier 9650 // have matching data arguments. 9651 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 9652 startSpecifier, specifierLen)) { 9653 return false; 9654 } 9655 9656 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 9657 startSpecifier, specifierLen)) { 9658 return false; 9659 } 9660 9661 if (!CS.consumesDataArgument()) { 9662 // FIXME: Technically specifying a precision or field width here 9663 // makes no sense. Worth issuing a warning at some point. 9664 return true; 9665 } 9666 9667 // Consume the argument. 9668 unsigned argIndex = FS.getArgIndex(); 9669 if (argIndex < NumDataArgs) { 9670 // The check to see if the argIndex is valid will come later. 9671 // We set the bit here because we may exit early from this 9672 // function if we encounter some other error. 9673 CoveredArgs.set(argIndex); 9674 } 9675 9676 // FreeBSD kernel extensions. 9677 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 9678 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 9679 // We need at least two arguments. 9680 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 9681 return false; 9682 9683 // Claim the second argument. 9684 CoveredArgs.set(argIndex + 1); 9685 9686 // Type check the first argument (int for %b, pointer for %D) 9687 const Expr *Ex = getDataArg(argIndex); 9688 const analyze_printf::ArgType &AT = 9689 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 9690 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 9691 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 9692 EmitFormatDiagnostic( 9693 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9694 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 9695 << false << Ex->getSourceRange(), 9696 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9697 getSpecifierRange(startSpecifier, specifierLen)); 9698 9699 // Type check the second argument (char * for both %b and %D) 9700 Ex = getDataArg(argIndex + 1); 9701 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 9702 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 9703 EmitFormatDiagnostic( 9704 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9705 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 9706 << false << Ex->getSourceRange(), 9707 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9708 getSpecifierRange(startSpecifier, specifierLen)); 9709 9710 return true; 9711 } 9712 9713 // Check for using an Objective-C specific conversion specifier 9714 // in a non-ObjC literal. 9715 if (!allowsObjCArg() && CS.isObjCArg()) { 9716 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9717 specifierLen); 9718 } 9719 9720 // %P can only be used with os_log. 9721 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 9722 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9723 specifierLen); 9724 } 9725 9726 // %n is not allowed with os_log. 9727 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 9728 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 9729 getLocationOfByte(CS.getStart()), 9730 /*IsStringLocation*/ false, 9731 getSpecifierRange(startSpecifier, specifierLen)); 9732 9733 return true; 9734 } 9735 9736 // Only scalars are allowed for os_trace. 9737 if (FSType == Sema::FST_OSTrace && 9738 (CS.getKind() == ConversionSpecifier::PArg || 9739 CS.getKind() == ConversionSpecifier::sArg || 9740 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 9741 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9742 specifierLen); 9743 } 9744 9745 // Check for use of public/private annotation outside of os_log(). 9746 if (FSType != Sema::FST_OSLog) { 9747 if (FS.isPublic().isSet()) { 9748 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9749 << "public", 9750 getLocationOfByte(FS.isPublic().getPosition()), 9751 /*IsStringLocation*/ false, 9752 getSpecifierRange(startSpecifier, specifierLen)); 9753 } 9754 if (FS.isPrivate().isSet()) { 9755 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9756 << "private", 9757 getLocationOfByte(FS.isPrivate().getPosition()), 9758 /*IsStringLocation*/ false, 9759 getSpecifierRange(startSpecifier, specifierLen)); 9760 } 9761 } 9762 9763 const llvm::Triple &Triple = Target.getTriple(); 9764 if (CS.getKind() == ConversionSpecifier::nArg && 9765 (Triple.isAndroid() || Triple.isOSFuchsia())) { 9766 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported), 9767 getLocationOfByte(CS.getStart()), 9768 /*IsStringLocation*/ false, 9769 getSpecifierRange(startSpecifier, specifierLen)); 9770 } 9771 9772 // Check for invalid use of field width 9773 if (!FS.hasValidFieldWidth()) { 9774 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 9775 startSpecifier, specifierLen); 9776 } 9777 9778 // Check for invalid use of precision 9779 if (!FS.hasValidPrecision()) { 9780 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 9781 startSpecifier, specifierLen); 9782 } 9783 9784 // Precision is mandatory for %P specifier. 9785 if (CS.getKind() == ConversionSpecifier::PArg && 9786 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 9787 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 9788 getLocationOfByte(startSpecifier), 9789 /*IsStringLocation*/ false, 9790 getSpecifierRange(startSpecifier, specifierLen)); 9791 } 9792 9793 // Check each flag does not conflict with any other component. 9794 if (!FS.hasValidThousandsGroupingPrefix()) 9795 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 9796 if (!FS.hasValidLeadingZeros()) 9797 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 9798 if (!FS.hasValidPlusPrefix()) 9799 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 9800 if (!FS.hasValidSpacePrefix()) 9801 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 9802 if (!FS.hasValidAlternativeForm()) 9803 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 9804 if (!FS.hasValidLeftJustified()) 9805 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 9806 9807 // Check that flags are not ignored by another flag 9808 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 9809 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 9810 startSpecifier, specifierLen); 9811 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 9812 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 9813 startSpecifier, specifierLen); 9814 9815 // Check the length modifier is valid with the given conversion specifier. 9816 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9817 S.getLangOpts())) 9818 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9819 diag::warn_format_nonsensical_length); 9820 else if (!FS.hasStandardLengthModifier()) 9821 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9822 else if (!FS.hasStandardLengthConversionCombination()) 9823 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9824 diag::warn_format_non_standard_conversion_spec); 9825 9826 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9827 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9828 9829 // The remaining checks depend on the data arguments. 9830 if (HasVAListArg) 9831 return true; 9832 9833 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9834 return false; 9835 9836 const Expr *Arg = getDataArg(argIndex); 9837 if (!Arg) 9838 return true; 9839 9840 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 9841 } 9842 9843 static bool requiresParensToAddCast(const Expr *E) { 9844 // FIXME: We should have a general way to reason about operator 9845 // precedence and whether parens are actually needed here. 9846 // Take care of a few common cases where they aren't. 9847 const Expr *Inside = E->IgnoreImpCasts(); 9848 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 9849 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 9850 9851 switch (Inside->getStmtClass()) { 9852 case Stmt::ArraySubscriptExprClass: 9853 case Stmt::CallExprClass: 9854 case Stmt::CharacterLiteralClass: 9855 case Stmt::CXXBoolLiteralExprClass: 9856 case Stmt::DeclRefExprClass: 9857 case Stmt::FloatingLiteralClass: 9858 case Stmt::IntegerLiteralClass: 9859 case Stmt::MemberExprClass: 9860 case Stmt::ObjCArrayLiteralClass: 9861 case Stmt::ObjCBoolLiteralExprClass: 9862 case Stmt::ObjCBoxedExprClass: 9863 case Stmt::ObjCDictionaryLiteralClass: 9864 case Stmt::ObjCEncodeExprClass: 9865 case Stmt::ObjCIvarRefExprClass: 9866 case Stmt::ObjCMessageExprClass: 9867 case Stmt::ObjCPropertyRefExprClass: 9868 case Stmt::ObjCStringLiteralClass: 9869 case Stmt::ObjCSubscriptRefExprClass: 9870 case Stmt::ParenExprClass: 9871 case Stmt::StringLiteralClass: 9872 case Stmt::UnaryOperatorClass: 9873 return false; 9874 default: 9875 return true; 9876 } 9877 } 9878 9879 static std::pair<QualType, StringRef> 9880 shouldNotPrintDirectly(const ASTContext &Context, 9881 QualType IntendedTy, 9882 const Expr *E) { 9883 // Use a 'while' to peel off layers of typedefs. 9884 QualType TyTy = IntendedTy; 9885 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 9886 StringRef Name = UserTy->getDecl()->getName(); 9887 QualType CastTy = llvm::StringSwitch<QualType>(Name) 9888 .Case("CFIndex", Context.getNSIntegerType()) 9889 .Case("NSInteger", Context.getNSIntegerType()) 9890 .Case("NSUInteger", Context.getNSUIntegerType()) 9891 .Case("SInt32", Context.IntTy) 9892 .Case("UInt32", Context.UnsignedIntTy) 9893 .Default(QualType()); 9894 9895 if (!CastTy.isNull()) 9896 return std::make_pair(CastTy, Name); 9897 9898 TyTy = UserTy->desugar(); 9899 } 9900 9901 // Strip parens if necessary. 9902 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 9903 return shouldNotPrintDirectly(Context, 9904 PE->getSubExpr()->getType(), 9905 PE->getSubExpr()); 9906 9907 // If this is a conditional expression, then its result type is constructed 9908 // via usual arithmetic conversions and thus there might be no necessary 9909 // typedef sugar there. Recurse to operands to check for NSInteger & 9910 // Co. usage condition. 9911 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 9912 QualType TrueTy, FalseTy; 9913 StringRef TrueName, FalseName; 9914 9915 std::tie(TrueTy, TrueName) = 9916 shouldNotPrintDirectly(Context, 9917 CO->getTrueExpr()->getType(), 9918 CO->getTrueExpr()); 9919 std::tie(FalseTy, FalseName) = 9920 shouldNotPrintDirectly(Context, 9921 CO->getFalseExpr()->getType(), 9922 CO->getFalseExpr()); 9923 9924 if (TrueTy == FalseTy) 9925 return std::make_pair(TrueTy, TrueName); 9926 else if (TrueTy.isNull()) 9927 return std::make_pair(FalseTy, FalseName); 9928 else if (FalseTy.isNull()) 9929 return std::make_pair(TrueTy, TrueName); 9930 } 9931 9932 return std::make_pair(QualType(), StringRef()); 9933 } 9934 9935 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 9936 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 9937 /// type do not count. 9938 static bool 9939 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 9940 QualType From = ICE->getSubExpr()->getType(); 9941 QualType To = ICE->getType(); 9942 // It's an integer promotion if the destination type is the promoted 9943 // source type. 9944 if (ICE->getCastKind() == CK_IntegralCast && 9945 From->isPromotableIntegerType() && 9946 S.Context.getPromotedIntegerType(From) == To) 9947 return true; 9948 // Look through vector types, since we do default argument promotion for 9949 // those in OpenCL. 9950 if (const auto *VecTy = From->getAs<ExtVectorType>()) 9951 From = VecTy->getElementType(); 9952 if (const auto *VecTy = To->getAs<ExtVectorType>()) 9953 To = VecTy->getElementType(); 9954 // It's a floating promotion if the source type is a lower rank. 9955 return ICE->getCastKind() == CK_FloatingCast && 9956 S.Context.getFloatingTypeOrder(From, To) < 0; 9957 } 9958 9959 bool 9960 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9961 const char *StartSpecifier, 9962 unsigned SpecifierLen, 9963 const Expr *E) { 9964 using namespace analyze_format_string; 9965 using namespace analyze_printf; 9966 9967 // Now type check the data expression that matches the 9968 // format specifier. 9969 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 9970 if (!AT.isValid()) 9971 return true; 9972 9973 QualType ExprTy = E->getType(); 9974 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 9975 ExprTy = TET->getUnderlyingExpr()->getType(); 9976 } 9977 9978 // Diagnose attempts to print a boolean value as a character. Unlike other 9979 // -Wformat diagnostics, this is fine from a type perspective, but it still 9980 // doesn't make sense. 9981 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 9982 E->isKnownToHaveBooleanValue()) { 9983 const CharSourceRange &CSR = 9984 getSpecifierRange(StartSpecifier, SpecifierLen); 9985 SmallString<4> FSString; 9986 llvm::raw_svector_ostream os(FSString); 9987 FS.toString(os); 9988 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 9989 << FSString, 9990 E->getExprLoc(), false, CSR); 9991 return true; 9992 } 9993 9994 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 9995 if (Match == analyze_printf::ArgType::Match) 9996 return true; 9997 9998 // Look through argument promotions for our error message's reported type. 9999 // This includes the integral and floating promotions, but excludes array 10000 // and function pointer decay (seeing that an argument intended to be a 10001 // string has type 'char [6]' is probably more confusing than 'char *') and 10002 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 10003 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 10004 if (isArithmeticArgumentPromotion(S, ICE)) { 10005 E = ICE->getSubExpr(); 10006 ExprTy = E->getType(); 10007 10008 // Check if we didn't match because of an implicit cast from a 'char' 10009 // or 'short' to an 'int'. This is done because printf is a varargs 10010 // function. 10011 if (ICE->getType() == S.Context.IntTy || 10012 ICE->getType() == S.Context.UnsignedIntTy) { 10013 // All further checking is done on the subexpression 10014 const analyze_printf::ArgType::MatchKind ImplicitMatch = 10015 AT.matchesType(S.Context, ExprTy); 10016 if (ImplicitMatch == analyze_printf::ArgType::Match) 10017 return true; 10018 if (ImplicitMatch == ArgType::NoMatchPedantic || 10019 ImplicitMatch == ArgType::NoMatchTypeConfusion) 10020 Match = ImplicitMatch; 10021 } 10022 } 10023 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 10024 // Special case for 'a', which has type 'int' in C. 10025 // Note, however, that we do /not/ want to treat multibyte constants like 10026 // 'MooV' as characters! This form is deprecated but still exists. In 10027 // addition, don't treat expressions as of type 'char' if one byte length 10028 // modifier is provided. 10029 if (ExprTy == S.Context.IntTy && 10030 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 10031 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 10032 ExprTy = S.Context.CharTy; 10033 } 10034 10035 // Look through enums to their underlying type. 10036 bool IsEnum = false; 10037 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 10038 ExprTy = EnumTy->getDecl()->getIntegerType(); 10039 IsEnum = true; 10040 } 10041 10042 // %C in an Objective-C context prints a unichar, not a wchar_t. 10043 // If the argument is an integer of some kind, believe the %C and suggest 10044 // a cast instead of changing the conversion specifier. 10045 QualType IntendedTy = ExprTy; 10046 if (isObjCContext() && 10047 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 10048 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 10049 !ExprTy->isCharType()) { 10050 // 'unichar' is defined as a typedef of unsigned short, but we should 10051 // prefer using the typedef if it is visible. 10052 IntendedTy = S.Context.UnsignedShortTy; 10053 10054 // While we are here, check if the value is an IntegerLiteral that happens 10055 // to be within the valid range. 10056 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 10057 const llvm::APInt &V = IL->getValue(); 10058 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 10059 return true; 10060 } 10061 10062 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 10063 Sema::LookupOrdinaryName); 10064 if (S.LookupName(Result, S.getCurScope())) { 10065 NamedDecl *ND = Result.getFoundDecl(); 10066 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 10067 if (TD->getUnderlyingType() == IntendedTy) 10068 IntendedTy = S.Context.getTypedefType(TD); 10069 } 10070 } 10071 } 10072 10073 // Special-case some of Darwin's platform-independence types by suggesting 10074 // casts to primitive types that are known to be large enough. 10075 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 10076 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 10077 QualType CastTy; 10078 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 10079 if (!CastTy.isNull()) { 10080 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 10081 // (long in ASTContext). Only complain to pedants. 10082 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 10083 (AT.isSizeT() || AT.isPtrdiffT()) && 10084 AT.matchesType(S.Context, CastTy)) 10085 Match = ArgType::NoMatchPedantic; 10086 IntendedTy = CastTy; 10087 ShouldNotPrintDirectly = true; 10088 } 10089 } 10090 10091 // We may be able to offer a FixItHint if it is a supported type. 10092 PrintfSpecifier fixedFS = FS; 10093 bool Success = 10094 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 10095 10096 if (Success) { 10097 // Get the fix string from the fixed format specifier 10098 SmallString<16> buf; 10099 llvm::raw_svector_ostream os(buf); 10100 fixedFS.toString(os); 10101 10102 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 10103 10104 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 10105 unsigned Diag; 10106 switch (Match) { 10107 case ArgType::Match: llvm_unreachable("expected non-matching"); 10108 case ArgType::NoMatchPedantic: 10109 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 10110 break; 10111 case ArgType::NoMatchTypeConfusion: 10112 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 10113 break; 10114 case ArgType::NoMatch: 10115 Diag = diag::warn_format_conversion_argument_type_mismatch; 10116 break; 10117 } 10118 10119 // In this case, the specifier is wrong and should be changed to match 10120 // the argument. 10121 EmitFormatDiagnostic(S.PDiag(Diag) 10122 << AT.getRepresentativeTypeName(S.Context) 10123 << IntendedTy << IsEnum << E->getSourceRange(), 10124 E->getBeginLoc(), 10125 /*IsStringLocation*/ false, SpecRange, 10126 FixItHint::CreateReplacement(SpecRange, os.str())); 10127 } else { 10128 // The canonical type for formatting this value is different from the 10129 // actual type of the expression. (This occurs, for example, with Darwin's 10130 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 10131 // should be printed as 'long' for 64-bit compatibility.) 10132 // Rather than emitting a normal format/argument mismatch, we want to 10133 // add a cast to the recommended type (and correct the format string 10134 // if necessary). 10135 SmallString<16> CastBuf; 10136 llvm::raw_svector_ostream CastFix(CastBuf); 10137 CastFix << "("; 10138 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 10139 CastFix << ")"; 10140 10141 SmallVector<FixItHint,4> Hints; 10142 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 10143 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 10144 10145 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 10146 // If there's already a cast present, just replace it. 10147 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 10148 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 10149 10150 } else if (!requiresParensToAddCast(E)) { 10151 // If the expression has high enough precedence, 10152 // just write the C-style cast. 10153 Hints.push_back( 10154 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 10155 } else { 10156 // Otherwise, add parens around the expression as well as the cast. 10157 CastFix << "("; 10158 Hints.push_back( 10159 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 10160 10161 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 10162 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 10163 } 10164 10165 if (ShouldNotPrintDirectly) { 10166 // The expression has a type that should not be printed directly. 10167 // We extract the name from the typedef because we don't want to show 10168 // the underlying type in the diagnostic. 10169 StringRef Name; 10170 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 10171 Name = TypedefTy->getDecl()->getName(); 10172 else 10173 Name = CastTyName; 10174 unsigned Diag = Match == ArgType::NoMatchPedantic 10175 ? diag::warn_format_argument_needs_cast_pedantic 10176 : diag::warn_format_argument_needs_cast; 10177 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 10178 << E->getSourceRange(), 10179 E->getBeginLoc(), /*IsStringLocation=*/false, 10180 SpecRange, Hints); 10181 } else { 10182 // In this case, the expression could be printed using a different 10183 // specifier, but we've decided that the specifier is probably correct 10184 // and we should cast instead. Just use the normal warning message. 10185 EmitFormatDiagnostic( 10186 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10187 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 10188 << E->getSourceRange(), 10189 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 10190 } 10191 } 10192 } else { 10193 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 10194 SpecifierLen); 10195 // Since the warning for passing non-POD types to variadic functions 10196 // was deferred until now, we emit a warning for non-POD 10197 // arguments here. 10198 switch (S.isValidVarArgType(ExprTy)) { 10199 case Sema::VAK_Valid: 10200 case Sema::VAK_ValidInCXX11: { 10201 unsigned Diag; 10202 switch (Match) { 10203 case ArgType::Match: llvm_unreachable("expected non-matching"); 10204 case ArgType::NoMatchPedantic: 10205 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 10206 break; 10207 case ArgType::NoMatchTypeConfusion: 10208 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 10209 break; 10210 case ArgType::NoMatch: 10211 Diag = diag::warn_format_conversion_argument_type_mismatch; 10212 break; 10213 } 10214 10215 EmitFormatDiagnostic( 10216 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 10217 << IsEnum << CSR << E->getSourceRange(), 10218 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10219 break; 10220 } 10221 case Sema::VAK_Undefined: 10222 case Sema::VAK_MSVCUndefined: 10223 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 10224 << S.getLangOpts().CPlusPlus11 << ExprTy 10225 << CallType 10226 << AT.getRepresentativeTypeName(S.Context) << CSR 10227 << E->getSourceRange(), 10228 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10229 checkForCStrMembers(AT, E); 10230 break; 10231 10232 case Sema::VAK_Invalid: 10233 if (ExprTy->isObjCObjectType()) 10234 EmitFormatDiagnostic( 10235 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 10236 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 10237 << AT.getRepresentativeTypeName(S.Context) << CSR 10238 << E->getSourceRange(), 10239 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10240 else 10241 // FIXME: If this is an initializer list, suggest removing the braces 10242 // or inserting a cast to the target type. 10243 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 10244 << isa<InitListExpr>(E) << ExprTy << CallType 10245 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 10246 break; 10247 } 10248 10249 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 10250 "format string specifier index out of range"); 10251 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 10252 } 10253 10254 return true; 10255 } 10256 10257 //===--- CHECK: Scanf format string checking ------------------------------===// 10258 10259 namespace { 10260 10261 class CheckScanfHandler : public CheckFormatHandler { 10262 public: 10263 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 10264 const Expr *origFormatExpr, Sema::FormatStringType type, 10265 unsigned firstDataArg, unsigned numDataArgs, 10266 const char *beg, bool hasVAListArg, 10267 ArrayRef<const Expr *> Args, unsigned formatIdx, 10268 bool inFunctionCall, Sema::VariadicCallType CallType, 10269 llvm::SmallBitVector &CheckedVarArgs, 10270 UncoveredArgHandler &UncoveredArg) 10271 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 10272 numDataArgs, beg, hasVAListArg, Args, formatIdx, 10273 inFunctionCall, CallType, CheckedVarArgs, 10274 UncoveredArg) {} 10275 10276 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 10277 const char *startSpecifier, 10278 unsigned specifierLen) override; 10279 10280 bool HandleInvalidScanfConversionSpecifier( 10281 const analyze_scanf::ScanfSpecifier &FS, 10282 const char *startSpecifier, 10283 unsigned specifierLen) override; 10284 10285 void HandleIncompleteScanList(const char *start, const char *end) override; 10286 }; 10287 10288 } // namespace 10289 10290 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 10291 const char *end) { 10292 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 10293 getLocationOfByte(end), /*IsStringLocation*/true, 10294 getSpecifierRange(start, end - start)); 10295 } 10296 10297 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 10298 const analyze_scanf::ScanfSpecifier &FS, 10299 const char *startSpecifier, 10300 unsigned specifierLen) { 10301 const analyze_scanf::ScanfConversionSpecifier &CS = 10302 FS.getConversionSpecifier(); 10303 10304 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 10305 getLocationOfByte(CS.getStart()), 10306 startSpecifier, specifierLen, 10307 CS.getStart(), CS.getLength()); 10308 } 10309 10310 bool CheckScanfHandler::HandleScanfSpecifier( 10311 const analyze_scanf::ScanfSpecifier &FS, 10312 const char *startSpecifier, 10313 unsigned specifierLen) { 10314 using namespace analyze_scanf; 10315 using namespace analyze_format_string; 10316 10317 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 10318 10319 // Handle case where '%' and '*' don't consume an argument. These shouldn't 10320 // be used to decide if we are using positional arguments consistently. 10321 if (FS.consumesDataArgument()) { 10322 if (atFirstArg) { 10323 atFirstArg = false; 10324 usesPositionalArgs = FS.usesPositionalArg(); 10325 } 10326 else if (usesPositionalArgs != FS.usesPositionalArg()) { 10327 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 10328 startSpecifier, specifierLen); 10329 return false; 10330 } 10331 } 10332 10333 // Check if the field with is non-zero. 10334 const OptionalAmount &Amt = FS.getFieldWidth(); 10335 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 10336 if (Amt.getConstantAmount() == 0) { 10337 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 10338 Amt.getConstantLength()); 10339 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 10340 getLocationOfByte(Amt.getStart()), 10341 /*IsStringLocation*/true, R, 10342 FixItHint::CreateRemoval(R)); 10343 } 10344 } 10345 10346 if (!FS.consumesDataArgument()) { 10347 // FIXME: Technically specifying a precision or field width here 10348 // makes no sense. Worth issuing a warning at some point. 10349 return true; 10350 } 10351 10352 // Consume the argument. 10353 unsigned argIndex = FS.getArgIndex(); 10354 if (argIndex < NumDataArgs) { 10355 // The check to see if the argIndex is valid will come later. 10356 // We set the bit here because we may exit early from this 10357 // function if we encounter some other error. 10358 CoveredArgs.set(argIndex); 10359 } 10360 10361 // Check the length modifier is valid with the given conversion specifier. 10362 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 10363 S.getLangOpts())) 10364 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10365 diag::warn_format_nonsensical_length); 10366 else if (!FS.hasStandardLengthModifier()) 10367 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 10368 else if (!FS.hasStandardLengthConversionCombination()) 10369 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10370 diag::warn_format_non_standard_conversion_spec); 10371 10372 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 10373 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 10374 10375 // The remaining checks depend on the data arguments. 10376 if (HasVAListArg) 10377 return true; 10378 10379 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 10380 return false; 10381 10382 // Check that the argument type matches the format specifier. 10383 const Expr *Ex = getDataArg(argIndex); 10384 if (!Ex) 10385 return true; 10386 10387 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 10388 10389 if (!AT.isValid()) { 10390 return true; 10391 } 10392 10393 analyze_format_string::ArgType::MatchKind Match = 10394 AT.matchesType(S.Context, Ex->getType()); 10395 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 10396 if (Match == analyze_format_string::ArgType::Match) 10397 return true; 10398 10399 ScanfSpecifier fixedFS = FS; 10400 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 10401 S.getLangOpts(), S.Context); 10402 10403 unsigned Diag = 10404 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 10405 : diag::warn_format_conversion_argument_type_mismatch; 10406 10407 if (Success) { 10408 // Get the fix string from the fixed format specifier. 10409 SmallString<128> buf; 10410 llvm::raw_svector_ostream os(buf); 10411 fixedFS.toString(os); 10412 10413 EmitFormatDiagnostic( 10414 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 10415 << Ex->getType() << false << Ex->getSourceRange(), 10416 Ex->getBeginLoc(), 10417 /*IsStringLocation*/ false, 10418 getSpecifierRange(startSpecifier, specifierLen), 10419 FixItHint::CreateReplacement( 10420 getSpecifierRange(startSpecifier, specifierLen), os.str())); 10421 } else { 10422 EmitFormatDiagnostic(S.PDiag(Diag) 10423 << AT.getRepresentativeTypeName(S.Context) 10424 << Ex->getType() << false << Ex->getSourceRange(), 10425 Ex->getBeginLoc(), 10426 /*IsStringLocation*/ false, 10427 getSpecifierRange(startSpecifier, specifierLen)); 10428 } 10429 10430 return true; 10431 } 10432 10433 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 10434 const Expr *OrigFormatExpr, 10435 ArrayRef<const Expr *> Args, 10436 bool HasVAListArg, unsigned format_idx, 10437 unsigned firstDataArg, 10438 Sema::FormatStringType Type, 10439 bool inFunctionCall, 10440 Sema::VariadicCallType CallType, 10441 llvm::SmallBitVector &CheckedVarArgs, 10442 UncoveredArgHandler &UncoveredArg, 10443 bool IgnoreStringsWithoutSpecifiers) { 10444 // CHECK: is the format string a wide literal? 10445 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 10446 CheckFormatHandler::EmitFormatDiagnostic( 10447 S, inFunctionCall, Args[format_idx], 10448 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 10449 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10450 return; 10451 } 10452 10453 // Str - The format string. NOTE: this is NOT null-terminated! 10454 StringRef StrRef = FExpr->getString(); 10455 const char *Str = StrRef.data(); 10456 // Account for cases where the string literal is truncated in a declaration. 10457 const ConstantArrayType *T = 10458 S.Context.getAsConstantArrayType(FExpr->getType()); 10459 assert(T && "String literal not of constant array type!"); 10460 size_t TypeSize = T->getSize().getZExtValue(); 10461 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10462 const unsigned numDataArgs = Args.size() - firstDataArg; 10463 10464 if (IgnoreStringsWithoutSpecifiers && 10465 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 10466 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 10467 return; 10468 10469 // Emit a warning if the string literal is truncated and does not contain an 10470 // embedded null character. 10471 if (TypeSize <= StrRef.size() && !StrRef.substr(0, TypeSize).contains('\0')) { 10472 CheckFormatHandler::EmitFormatDiagnostic( 10473 S, inFunctionCall, Args[format_idx], 10474 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 10475 FExpr->getBeginLoc(), 10476 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 10477 return; 10478 } 10479 10480 // CHECK: empty format string? 10481 if (StrLen == 0 && numDataArgs > 0) { 10482 CheckFormatHandler::EmitFormatDiagnostic( 10483 S, inFunctionCall, Args[format_idx], 10484 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 10485 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10486 return; 10487 } 10488 10489 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 10490 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 10491 Type == Sema::FST_OSTrace) { 10492 CheckPrintfHandler H( 10493 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 10494 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 10495 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 10496 CheckedVarArgs, UncoveredArg); 10497 10498 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 10499 S.getLangOpts(), 10500 S.Context.getTargetInfo(), 10501 Type == Sema::FST_FreeBSDKPrintf)) 10502 H.DoneProcessing(); 10503 } else if (Type == Sema::FST_Scanf) { 10504 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 10505 numDataArgs, Str, HasVAListArg, Args, format_idx, 10506 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 10507 10508 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 10509 S.getLangOpts(), 10510 S.Context.getTargetInfo())) 10511 H.DoneProcessing(); 10512 } // TODO: handle other formats 10513 } 10514 10515 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 10516 // Str - The format string. NOTE: this is NOT null-terminated! 10517 StringRef StrRef = FExpr->getString(); 10518 const char *Str = StrRef.data(); 10519 // Account for cases where the string literal is truncated in a declaration. 10520 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 10521 assert(T && "String literal not of constant array type!"); 10522 size_t TypeSize = T->getSize().getZExtValue(); 10523 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10524 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 10525 getLangOpts(), 10526 Context.getTargetInfo()); 10527 } 10528 10529 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 10530 10531 // Returns the related absolute value function that is larger, of 0 if one 10532 // does not exist. 10533 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 10534 switch (AbsFunction) { 10535 default: 10536 return 0; 10537 10538 case Builtin::BI__builtin_abs: 10539 return Builtin::BI__builtin_labs; 10540 case Builtin::BI__builtin_labs: 10541 return Builtin::BI__builtin_llabs; 10542 case Builtin::BI__builtin_llabs: 10543 return 0; 10544 10545 case Builtin::BI__builtin_fabsf: 10546 return Builtin::BI__builtin_fabs; 10547 case Builtin::BI__builtin_fabs: 10548 return Builtin::BI__builtin_fabsl; 10549 case Builtin::BI__builtin_fabsl: 10550 return 0; 10551 10552 case Builtin::BI__builtin_cabsf: 10553 return Builtin::BI__builtin_cabs; 10554 case Builtin::BI__builtin_cabs: 10555 return Builtin::BI__builtin_cabsl; 10556 case Builtin::BI__builtin_cabsl: 10557 return 0; 10558 10559 case Builtin::BIabs: 10560 return Builtin::BIlabs; 10561 case Builtin::BIlabs: 10562 return Builtin::BIllabs; 10563 case Builtin::BIllabs: 10564 return 0; 10565 10566 case Builtin::BIfabsf: 10567 return Builtin::BIfabs; 10568 case Builtin::BIfabs: 10569 return Builtin::BIfabsl; 10570 case Builtin::BIfabsl: 10571 return 0; 10572 10573 case Builtin::BIcabsf: 10574 return Builtin::BIcabs; 10575 case Builtin::BIcabs: 10576 return Builtin::BIcabsl; 10577 case Builtin::BIcabsl: 10578 return 0; 10579 } 10580 } 10581 10582 // Returns the argument type of the absolute value function. 10583 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 10584 unsigned AbsType) { 10585 if (AbsType == 0) 10586 return QualType(); 10587 10588 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 10589 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 10590 if (Error != ASTContext::GE_None) 10591 return QualType(); 10592 10593 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 10594 if (!FT) 10595 return QualType(); 10596 10597 if (FT->getNumParams() != 1) 10598 return QualType(); 10599 10600 return FT->getParamType(0); 10601 } 10602 10603 // Returns the best absolute value function, or zero, based on type and 10604 // current absolute value function. 10605 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 10606 unsigned AbsFunctionKind) { 10607 unsigned BestKind = 0; 10608 uint64_t ArgSize = Context.getTypeSize(ArgType); 10609 for (unsigned Kind = AbsFunctionKind; Kind != 0; 10610 Kind = getLargerAbsoluteValueFunction(Kind)) { 10611 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 10612 if (Context.getTypeSize(ParamType) >= ArgSize) { 10613 if (BestKind == 0) 10614 BestKind = Kind; 10615 else if (Context.hasSameType(ParamType, ArgType)) { 10616 BestKind = Kind; 10617 break; 10618 } 10619 } 10620 } 10621 return BestKind; 10622 } 10623 10624 enum AbsoluteValueKind { 10625 AVK_Integer, 10626 AVK_Floating, 10627 AVK_Complex 10628 }; 10629 10630 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 10631 if (T->isIntegralOrEnumerationType()) 10632 return AVK_Integer; 10633 if (T->isRealFloatingType()) 10634 return AVK_Floating; 10635 if (T->isAnyComplexType()) 10636 return AVK_Complex; 10637 10638 llvm_unreachable("Type not integer, floating, or complex"); 10639 } 10640 10641 // Changes the absolute value function to a different type. Preserves whether 10642 // the function is a builtin. 10643 static unsigned changeAbsFunction(unsigned AbsKind, 10644 AbsoluteValueKind ValueKind) { 10645 switch (ValueKind) { 10646 case AVK_Integer: 10647 switch (AbsKind) { 10648 default: 10649 return 0; 10650 case Builtin::BI__builtin_fabsf: 10651 case Builtin::BI__builtin_fabs: 10652 case Builtin::BI__builtin_fabsl: 10653 case Builtin::BI__builtin_cabsf: 10654 case Builtin::BI__builtin_cabs: 10655 case Builtin::BI__builtin_cabsl: 10656 return Builtin::BI__builtin_abs; 10657 case Builtin::BIfabsf: 10658 case Builtin::BIfabs: 10659 case Builtin::BIfabsl: 10660 case Builtin::BIcabsf: 10661 case Builtin::BIcabs: 10662 case Builtin::BIcabsl: 10663 return Builtin::BIabs; 10664 } 10665 case AVK_Floating: 10666 switch (AbsKind) { 10667 default: 10668 return 0; 10669 case Builtin::BI__builtin_abs: 10670 case Builtin::BI__builtin_labs: 10671 case Builtin::BI__builtin_llabs: 10672 case Builtin::BI__builtin_cabsf: 10673 case Builtin::BI__builtin_cabs: 10674 case Builtin::BI__builtin_cabsl: 10675 return Builtin::BI__builtin_fabsf; 10676 case Builtin::BIabs: 10677 case Builtin::BIlabs: 10678 case Builtin::BIllabs: 10679 case Builtin::BIcabsf: 10680 case Builtin::BIcabs: 10681 case Builtin::BIcabsl: 10682 return Builtin::BIfabsf; 10683 } 10684 case AVK_Complex: 10685 switch (AbsKind) { 10686 default: 10687 return 0; 10688 case Builtin::BI__builtin_abs: 10689 case Builtin::BI__builtin_labs: 10690 case Builtin::BI__builtin_llabs: 10691 case Builtin::BI__builtin_fabsf: 10692 case Builtin::BI__builtin_fabs: 10693 case Builtin::BI__builtin_fabsl: 10694 return Builtin::BI__builtin_cabsf; 10695 case Builtin::BIabs: 10696 case Builtin::BIlabs: 10697 case Builtin::BIllabs: 10698 case Builtin::BIfabsf: 10699 case Builtin::BIfabs: 10700 case Builtin::BIfabsl: 10701 return Builtin::BIcabsf; 10702 } 10703 } 10704 llvm_unreachable("Unable to convert function"); 10705 } 10706 10707 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 10708 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 10709 if (!FnInfo) 10710 return 0; 10711 10712 switch (FDecl->getBuiltinID()) { 10713 default: 10714 return 0; 10715 case Builtin::BI__builtin_abs: 10716 case Builtin::BI__builtin_fabs: 10717 case Builtin::BI__builtin_fabsf: 10718 case Builtin::BI__builtin_fabsl: 10719 case Builtin::BI__builtin_labs: 10720 case Builtin::BI__builtin_llabs: 10721 case Builtin::BI__builtin_cabs: 10722 case Builtin::BI__builtin_cabsf: 10723 case Builtin::BI__builtin_cabsl: 10724 case Builtin::BIabs: 10725 case Builtin::BIlabs: 10726 case Builtin::BIllabs: 10727 case Builtin::BIfabs: 10728 case Builtin::BIfabsf: 10729 case Builtin::BIfabsl: 10730 case Builtin::BIcabs: 10731 case Builtin::BIcabsf: 10732 case Builtin::BIcabsl: 10733 return FDecl->getBuiltinID(); 10734 } 10735 llvm_unreachable("Unknown Builtin type"); 10736 } 10737 10738 // If the replacement is valid, emit a note with replacement function. 10739 // Additionally, suggest including the proper header if not already included. 10740 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 10741 unsigned AbsKind, QualType ArgType) { 10742 bool EmitHeaderHint = true; 10743 const char *HeaderName = nullptr; 10744 const char *FunctionName = nullptr; 10745 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 10746 FunctionName = "std::abs"; 10747 if (ArgType->isIntegralOrEnumerationType()) { 10748 HeaderName = "cstdlib"; 10749 } else if (ArgType->isRealFloatingType()) { 10750 HeaderName = "cmath"; 10751 } else { 10752 llvm_unreachable("Invalid Type"); 10753 } 10754 10755 // Lookup all std::abs 10756 if (NamespaceDecl *Std = S.getStdNamespace()) { 10757 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 10758 R.suppressDiagnostics(); 10759 S.LookupQualifiedName(R, Std); 10760 10761 for (const auto *I : R) { 10762 const FunctionDecl *FDecl = nullptr; 10763 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 10764 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 10765 } else { 10766 FDecl = dyn_cast<FunctionDecl>(I); 10767 } 10768 if (!FDecl) 10769 continue; 10770 10771 // Found std::abs(), check that they are the right ones. 10772 if (FDecl->getNumParams() != 1) 10773 continue; 10774 10775 // Check that the parameter type can handle the argument. 10776 QualType ParamType = FDecl->getParamDecl(0)->getType(); 10777 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 10778 S.Context.getTypeSize(ArgType) <= 10779 S.Context.getTypeSize(ParamType)) { 10780 // Found a function, don't need the header hint. 10781 EmitHeaderHint = false; 10782 break; 10783 } 10784 } 10785 } 10786 } else { 10787 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 10788 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 10789 10790 if (HeaderName) { 10791 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 10792 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 10793 R.suppressDiagnostics(); 10794 S.LookupName(R, S.getCurScope()); 10795 10796 if (R.isSingleResult()) { 10797 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 10798 if (FD && FD->getBuiltinID() == AbsKind) { 10799 EmitHeaderHint = false; 10800 } else { 10801 return; 10802 } 10803 } else if (!R.empty()) { 10804 return; 10805 } 10806 } 10807 } 10808 10809 S.Diag(Loc, diag::note_replace_abs_function) 10810 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 10811 10812 if (!HeaderName) 10813 return; 10814 10815 if (!EmitHeaderHint) 10816 return; 10817 10818 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 10819 << FunctionName; 10820 } 10821 10822 template <std::size_t StrLen> 10823 static bool IsStdFunction(const FunctionDecl *FDecl, 10824 const char (&Str)[StrLen]) { 10825 if (!FDecl) 10826 return false; 10827 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 10828 return false; 10829 if (!FDecl->isInStdNamespace()) 10830 return false; 10831 10832 return true; 10833 } 10834 10835 // Warn when using the wrong abs() function. 10836 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 10837 const FunctionDecl *FDecl) { 10838 if (Call->getNumArgs() != 1) 10839 return; 10840 10841 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 10842 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 10843 if (AbsKind == 0 && !IsStdAbs) 10844 return; 10845 10846 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10847 QualType ParamType = Call->getArg(0)->getType(); 10848 10849 // Unsigned types cannot be negative. Suggest removing the absolute value 10850 // function call. 10851 if (ArgType->isUnsignedIntegerType()) { 10852 const char *FunctionName = 10853 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 10854 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 10855 Diag(Call->getExprLoc(), diag::note_remove_abs) 10856 << FunctionName 10857 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 10858 return; 10859 } 10860 10861 // Taking the absolute value of a pointer is very suspicious, they probably 10862 // wanted to index into an array, dereference a pointer, call a function, etc. 10863 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 10864 unsigned DiagType = 0; 10865 if (ArgType->isFunctionType()) 10866 DiagType = 1; 10867 else if (ArgType->isArrayType()) 10868 DiagType = 2; 10869 10870 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 10871 return; 10872 } 10873 10874 // std::abs has overloads which prevent most of the absolute value problems 10875 // from occurring. 10876 if (IsStdAbs) 10877 return; 10878 10879 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 10880 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 10881 10882 // The argument and parameter are the same kind. Check if they are the right 10883 // size. 10884 if (ArgValueKind == ParamValueKind) { 10885 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 10886 return; 10887 10888 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 10889 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 10890 << FDecl << ArgType << ParamType; 10891 10892 if (NewAbsKind == 0) 10893 return; 10894 10895 emitReplacement(*this, Call->getExprLoc(), 10896 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10897 return; 10898 } 10899 10900 // ArgValueKind != ParamValueKind 10901 // The wrong type of absolute value function was used. Attempt to find the 10902 // proper one. 10903 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 10904 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 10905 if (NewAbsKind == 0) 10906 return; 10907 10908 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 10909 << FDecl << ParamValueKind << ArgValueKind; 10910 10911 emitReplacement(*this, Call->getExprLoc(), 10912 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10913 } 10914 10915 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 10916 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 10917 const FunctionDecl *FDecl) { 10918 if (!Call || !FDecl) return; 10919 10920 // Ignore template specializations and macros. 10921 if (inTemplateInstantiation()) return; 10922 if (Call->getExprLoc().isMacroID()) return; 10923 10924 // Only care about the one template argument, two function parameter std::max 10925 if (Call->getNumArgs() != 2) return; 10926 if (!IsStdFunction(FDecl, "max")) return; 10927 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 10928 if (!ArgList) return; 10929 if (ArgList->size() != 1) return; 10930 10931 // Check that template type argument is unsigned integer. 10932 const auto& TA = ArgList->get(0); 10933 if (TA.getKind() != TemplateArgument::Type) return; 10934 QualType ArgType = TA.getAsType(); 10935 if (!ArgType->isUnsignedIntegerType()) return; 10936 10937 // See if either argument is a literal zero. 10938 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 10939 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 10940 if (!MTE) return false; 10941 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 10942 if (!Num) return false; 10943 if (Num->getValue() != 0) return false; 10944 return true; 10945 }; 10946 10947 const Expr *FirstArg = Call->getArg(0); 10948 const Expr *SecondArg = Call->getArg(1); 10949 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 10950 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 10951 10952 // Only warn when exactly one argument is zero. 10953 if (IsFirstArgZero == IsSecondArgZero) return; 10954 10955 SourceRange FirstRange = FirstArg->getSourceRange(); 10956 SourceRange SecondRange = SecondArg->getSourceRange(); 10957 10958 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 10959 10960 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 10961 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 10962 10963 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 10964 SourceRange RemovalRange; 10965 if (IsFirstArgZero) { 10966 RemovalRange = SourceRange(FirstRange.getBegin(), 10967 SecondRange.getBegin().getLocWithOffset(-1)); 10968 } else { 10969 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 10970 SecondRange.getEnd()); 10971 } 10972 10973 Diag(Call->getExprLoc(), diag::note_remove_max_call) 10974 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 10975 << FixItHint::CreateRemoval(RemovalRange); 10976 } 10977 10978 //===--- CHECK: Standard memory functions ---------------------------------===// 10979 10980 /// Takes the expression passed to the size_t parameter of functions 10981 /// such as memcmp, strncat, etc and warns if it's a comparison. 10982 /// 10983 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 10984 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 10985 IdentifierInfo *FnName, 10986 SourceLocation FnLoc, 10987 SourceLocation RParenLoc) { 10988 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 10989 if (!Size) 10990 return false; 10991 10992 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 10993 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 10994 return false; 10995 10996 SourceRange SizeRange = Size->getSourceRange(); 10997 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 10998 << SizeRange << FnName; 10999 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 11000 << FnName 11001 << FixItHint::CreateInsertion( 11002 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 11003 << FixItHint::CreateRemoval(RParenLoc); 11004 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 11005 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 11006 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 11007 ")"); 11008 11009 return true; 11010 } 11011 11012 /// Determine whether the given type is or contains a dynamic class type 11013 /// (e.g., whether it has a vtable). 11014 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 11015 bool &IsContained) { 11016 // Look through array types while ignoring qualifiers. 11017 const Type *Ty = T->getBaseElementTypeUnsafe(); 11018 IsContained = false; 11019 11020 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 11021 RD = RD ? RD->getDefinition() : nullptr; 11022 if (!RD || RD->isInvalidDecl()) 11023 return nullptr; 11024 11025 if (RD->isDynamicClass()) 11026 return RD; 11027 11028 // Check all the fields. If any bases were dynamic, the class is dynamic. 11029 // It's impossible for a class to transitively contain itself by value, so 11030 // infinite recursion is impossible. 11031 for (auto *FD : RD->fields()) { 11032 bool SubContained; 11033 if (const CXXRecordDecl *ContainedRD = 11034 getContainedDynamicClass(FD->getType(), SubContained)) { 11035 IsContained = true; 11036 return ContainedRD; 11037 } 11038 } 11039 11040 return nullptr; 11041 } 11042 11043 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 11044 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 11045 if (Unary->getKind() == UETT_SizeOf) 11046 return Unary; 11047 return nullptr; 11048 } 11049 11050 /// If E is a sizeof expression, returns its argument expression, 11051 /// otherwise returns NULL. 11052 static const Expr *getSizeOfExprArg(const Expr *E) { 11053 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 11054 if (!SizeOf->isArgumentType()) 11055 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 11056 return nullptr; 11057 } 11058 11059 /// If E is a sizeof expression, returns its argument type. 11060 static QualType getSizeOfArgType(const Expr *E) { 11061 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 11062 return SizeOf->getTypeOfArgument(); 11063 return QualType(); 11064 } 11065 11066 namespace { 11067 11068 struct SearchNonTrivialToInitializeField 11069 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 11070 using Super = 11071 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 11072 11073 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 11074 11075 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 11076 SourceLocation SL) { 11077 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 11078 asDerived().visitArray(PDIK, AT, SL); 11079 return; 11080 } 11081 11082 Super::visitWithKind(PDIK, FT, SL); 11083 } 11084 11085 void visitARCStrong(QualType FT, SourceLocation SL) { 11086 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 11087 } 11088 void visitARCWeak(QualType FT, SourceLocation SL) { 11089 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 11090 } 11091 void visitStruct(QualType FT, SourceLocation SL) { 11092 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 11093 visit(FD->getType(), FD->getLocation()); 11094 } 11095 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 11096 const ArrayType *AT, SourceLocation SL) { 11097 visit(getContext().getBaseElementType(AT), SL); 11098 } 11099 void visitTrivial(QualType FT, SourceLocation SL) {} 11100 11101 static void diag(QualType RT, const Expr *E, Sema &S) { 11102 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 11103 } 11104 11105 ASTContext &getContext() { return S.getASTContext(); } 11106 11107 const Expr *E; 11108 Sema &S; 11109 }; 11110 11111 struct SearchNonTrivialToCopyField 11112 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 11113 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 11114 11115 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 11116 11117 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 11118 SourceLocation SL) { 11119 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 11120 asDerived().visitArray(PCK, AT, SL); 11121 return; 11122 } 11123 11124 Super::visitWithKind(PCK, FT, SL); 11125 } 11126 11127 void visitARCStrong(QualType FT, SourceLocation SL) { 11128 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 11129 } 11130 void visitARCWeak(QualType FT, SourceLocation SL) { 11131 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 11132 } 11133 void visitStruct(QualType FT, SourceLocation SL) { 11134 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 11135 visit(FD->getType(), FD->getLocation()); 11136 } 11137 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 11138 SourceLocation SL) { 11139 visit(getContext().getBaseElementType(AT), SL); 11140 } 11141 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 11142 SourceLocation SL) {} 11143 void visitTrivial(QualType FT, SourceLocation SL) {} 11144 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 11145 11146 static void diag(QualType RT, const Expr *E, Sema &S) { 11147 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 11148 } 11149 11150 ASTContext &getContext() { return S.getASTContext(); } 11151 11152 const Expr *E; 11153 Sema &S; 11154 }; 11155 11156 } 11157 11158 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 11159 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 11160 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 11161 11162 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 11163 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 11164 return false; 11165 11166 return doesExprLikelyComputeSize(BO->getLHS()) || 11167 doesExprLikelyComputeSize(BO->getRHS()); 11168 } 11169 11170 return getAsSizeOfExpr(SizeofExpr) != nullptr; 11171 } 11172 11173 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 11174 /// 11175 /// \code 11176 /// #define MACRO 0 11177 /// foo(MACRO); 11178 /// foo(0); 11179 /// \endcode 11180 /// 11181 /// This should return true for the first call to foo, but not for the second 11182 /// (regardless of whether foo is a macro or function). 11183 static bool isArgumentExpandedFromMacro(SourceManager &SM, 11184 SourceLocation CallLoc, 11185 SourceLocation ArgLoc) { 11186 if (!CallLoc.isMacroID()) 11187 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 11188 11189 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 11190 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 11191 } 11192 11193 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 11194 /// last two arguments transposed. 11195 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 11196 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 11197 return; 11198 11199 const Expr *SizeArg = 11200 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 11201 11202 auto isLiteralZero = [](const Expr *E) { 11203 return (isa<IntegerLiteral>(E) && 11204 cast<IntegerLiteral>(E)->getValue() == 0) || 11205 (isa<CharacterLiteral>(E) && 11206 cast<CharacterLiteral>(E)->getValue() == 0); 11207 }; 11208 11209 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 11210 SourceLocation CallLoc = Call->getRParenLoc(); 11211 SourceManager &SM = S.getSourceManager(); 11212 if (isLiteralZero(SizeArg) && 11213 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 11214 11215 SourceLocation DiagLoc = SizeArg->getExprLoc(); 11216 11217 // Some platforms #define bzero to __builtin_memset. See if this is the 11218 // case, and if so, emit a better diagnostic. 11219 if (BId == Builtin::BIbzero || 11220 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 11221 CallLoc, SM, S.getLangOpts()) == "bzero")) { 11222 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 11223 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 11224 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 11225 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 11226 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 11227 } 11228 return; 11229 } 11230 11231 // If the second argument to a memset is a sizeof expression and the third 11232 // isn't, this is also likely an error. This should catch 11233 // 'memset(buf, sizeof(buf), 0xff)'. 11234 if (BId == Builtin::BImemset && 11235 doesExprLikelyComputeSize(Call->getArg(1)) && 11236 !doesExprLikelyComputeSize(Call->getArg(2))) { 11237 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 11238 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 11239 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 11240 return; 11241 } 11242 } 11243 11244 /// Check for dangerous or invalid arguments to memset(). 11245 /// 11246 /// This issues warnings on known problematic, dangerous or unspecified 11247 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 11248 /// function calls. 11249 /// 11250 /// \param Call The call expression to diagnose. 11251 void Sema::CheckMemaccessArguments(const CallExpr *Call, 11252 unsigned BId, 11253 IdentifierInfo *FnName) { 11254 assert(BId != 0); 11255 11256 // It is possible to have a non-standard definition of memset. Validate 11257 // we have enough arguments, and if not, abort further checking. 11258 unsigned ExpectedNumArgs = 11259 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 11260 if (Call->getNumArgs() < ExpectedNumArgs) 11261 return; 11262 11263 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 11264 BId == Builtin::BIstrndup ? 1 : 2); 11265 unsigned LenArg = 11266 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 11267 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 11268 11269 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 11270 Call->getBeginLoc(), Call->getRParenLoc())) 11271 return; 11272 11273 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 11274 CheckMemaccessSize(*this, BId, Call); 11275 11276 // We have special checking when the length is a sizeof expression. 11277 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 11278 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 11279 llvm::FoldingSetNodeID SizeOfArgID; 11280 11281 // Although widely used, 'bzero' is not a standard function. Be more strict 11282 // with the argument types before allowing diagnostics and only allow the 11283 // form bzero(ptr, sizeof(...)). 11284 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 11285 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 11286 return; 11287 11288 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 11289 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 11290 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 11291 11292 QualType DestTy = Dest->getType(); 11293 QualType PointeeTy; 11294 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 11295 PointeeTy = DestPtrTy->getPointeeType(); 11296 11297 // Never warn about void type pointers. This can be used to suppress 11298 // false positives. 11299 if (PointeeTy->isVoidType()) 11300 continue; 11301 11302 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 11303 // actually comparing the expressions for equality. Because computing the 11304 // expression IDs can be expensive, we only do this if the diagnostic is 11305 // enabled. 11306 if (SizeOfArg && 11307 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 11308 SizeOfArg->getExprLoc())) { 11309 // We only compute IDs for expressions if the warning is enabled, and 11310 // cache the sizeof arg's ID. 11311 if (SizeOfArgID == llvm::FoldingSetNodeID()) 11312 SizeOfArg->Profile(SizeOfArgID, Context, true); 11313 llvm::FoldingSetNodeID DestID; 11314 Dest->Profile(DestID, Context, true); 11315 if (DestID == SizeOfArgID) { 11316 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 11317 // over sizeof(src) as well. 11318 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 11319 StringRef ReadableName = FnName->getName(); 11320 11321 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 11322 if (UnaryOp->getOpcode() == UO_AddrOf) 11323 ActionIdx = 1; // If its an address-of operator, just remove it. 11324 if (!PointeeTy->isIncompleteType() && 11325 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 11326 ActionIdx = 2; // If the pointee's size is sizeof(char), 11327 // suggest an explicit length. 11328 11329 // If the function is defined as a builtin macro, do not show macro 11330 // expansion. 11331 SourceLocation SL = SizeOfArg->getExprLoc(); 11332 SourceRange DSR = Dest->getSourceRange(); 11333 SourceRange SSR = SizeOfArg->getSourceRange(); 11334 SourceManager &SM = getSourceManager(); 11335 11336 if (SM.isMacroArgExpansion(SL)) { 11337 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 11338 SL = SM.getSpellingLoc(SL); 11339 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 11340 SM.getSpellingLoc(DSR.getEnd())); 11341 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 11342 SM.getSpellingLoc(SSR.getEnd())); 11343 } 11344 11345 DiagRuntimeBehavior(SL, SizeOfArg, 11346 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 11347 << ReadableName 11348 << PointeeTy 11349 << DestTy 11350 << DSR 11351 << SSR); 11352 DiagRuntimeBehavior(SL, SizeOfArg, 11353 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 11354 << ActionIdx 11355 << SSR); 11356 11357 break; 11358 } 11359 } 11360 11361 // Also check for cases where the sizeof argument is the exact same 11362 // type as the memory argument, and where it points to a user-defined 11363 // record type. 11364 if (SizeOfArgTy != QualType()) { 11365 if (PointeeTy->isRecordType() && 11366 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 11367 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 11368 PDiag(diag::warn_sizeof_pointer_type_memaccess) 11369 << FnName << SizeOfArgTy << ArgIdx 11370 << PointeeTy << Dest->getSourceRange() 11371 << LenExpr->getSourceRange()); 11372 break; 11373 } 11374 } 11375 } else if (DestTy->isArrayType()) { 11376 PointeeTy = DestTy; 11377 } 11378 11379 if (PointeeTy == QualType()) 11380 continue; 11381 11382 // Always complain about dynamic classes. 11383 bool IsContained; 11384 if (const CXXRecordDecl *ContainedRD = 11385 getContainedDynamicClass(PointeeTy, IsContained)) { 11386 11387 unsigned OperationType = 0; 11388 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 11389 // "overwritten" if we're warning about the destination for any call 11390 // but memcmp; otherwise a verb appropriate to the call. 11391 if (ArgIdx != 0 || IsCmp) { 11392 if (BId == Builtin::BImemcpy) 11393 OperationType = 1; 11394 else if(BId == Builtin::BImemmove) 11395 OperationType = 2; 11396 else if (IsCmp) 11397 OperationType = 3; 11398 } 11399 11400 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11401 PDiag(diag::warn_dyn_class_memaccess) 11402 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 11403 << IsContained << ContainedRD << OperationType 11404 << Call->getCallee()->getSourceRange()); 11405 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 11406 BId != Builtin::BImemset) 11407 DiagRuntimeBehavior( 11408 Dest->getExprLoc(), Dest, 11409 PDiag(diag::warn_arc_object_memaccess) 11410 << ArgIdx << FnName << PointeeTy 11411 << Call->getCallee()->getSourceRange()); 11412 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 11413 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 11414 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 11415 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11416 PDiag(diag::warn_cstruct_memaccess) 11417 << ArgIdx << FnName << PointeeTy << 0); 11418 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 11419 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 11420 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 11421 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11422 PDiag(diag::warn_cstruct_memaccess) 11423 << ArgIdx << FnName << PointeeTy << 1); 11424 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 11425 } else { 11426 continue; 11427 } 11428 } else 11429 continue; 11430 11431 DiagRuntimeBehavior( 11432 Dest->getExprLoc(), Dest, 11433 PDiag(diag::note_bad_memaccess_silence) 11434 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 11435 break; 11436 } 11437 } 11438 11439 // A little helper routine: ignore addition and subtraction of integer literals. 11440 // This intentionally does not ignore all integer constant expressions because 11441 // we don't want to remove sizeof(). 11442 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 11443 Ex = Ex->IgnoreParenCasts(); 11444 11445 while (true) { 11446 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 11447 if (!BO || !BO->isAdditiveOp()) 11448 break; 11449 11450 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 11451 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 11452 11453 if (isa<IntegerLiteral>(RHS)) 11454 Ex = LHS; 11455 else if (isa<IntegerLiteral>(LHS)) 11456 Ex = RHS; 11457 else 11458 break; 11459 } 11460 11461 return Ex; 11462 } 11463 11464 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 11465 ASTContext &Context) { 11466 // Only handle constant-sized or VLAs, but not flexible members. 11467 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 11468 // Only issue the FIXIT for arrays of size > 1. 11469 if (CAT->getSize().getSExtValue() <= 1) 11470 return false; 11471 } else if (!Ty->isVariableArrayType()) { 11472 return false; 11473 } 11474 return true; 11475 } 11476 11477 // Warn if the user has made the 'size' argument to strlcpy or strlcat 11478 // be the size of the source, instead of the destination. 11479 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 11480 IdentifierInfo *FnName) { 11481 11482 // Don't crash if the user has the wrong number of arguments 11483 unsigned NumArgs = Call->getNumArgs(); 11484 if ((NumArgs != 3) && (NumArgs != 4)) 11485 return; 11486 11487 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 11488 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 11489 const Expr *CompareWithSrc = nullptr; 11490 11491 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 11492 Call->getBeginLoc(), Call->getRParenLoc())) 11493 return; 11494 11495 // Look for 'strlcpy(dst, x, sizeof(x))' 11496 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 11497 CompareWithSrc = Ex; 11498 else { 11499 // Look for 'strlcpy(dst, x, strlen(x))' 11500 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 11501 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 11502 SizeCall->getNumArgs() == 1) 11503 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 11504 } 11505 } 11506 11507 if (!CompareWithSrc) 11508 return; 11509 11510 // Determine if the argument to sizeof/strlen is equal to the source 11511 // argument. In principle there's all kinds of things you could do 11512 // here, for instance creating an == expression and evaluating it with 11513 // EvaluateAsBooleanCondition, but this uses a more direct technique: 11514 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 11515 if (!SrcArgDRE) 11516 return; 11517 11518 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 11519 if (!CompareWithSrcDRE || 11520 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 11521 return; 11522 11523 const Expr *OriginalSizeArg = Call->getArg(2); 11524 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 11525 << OriginalSizeArg->getSourceRange() << FnName; 11526 11527 // Output a FIXIT hint if the destination is an array (rather than a 11528 // pointer to an array). This could be enhanced to handle some 11529 // pointers if we know the actual size, like if DstArg is 'array+2' 11530 // we could say 'sizeof(array)-2'. 11531 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 11532 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 11533 return; 11534 11535 SmallString<128> sizeString; 11536 llvm::raw_svector_ostream OS(sizeString); 11537 OS << "sizeof("; 11538 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11539 OS << ")"; 11540 11541 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 11542 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 11543 OS.str()); 11544 } 11545 11546 /// Check if two expressions refer to the same declaration. 11547 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 11548 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 11549 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 11550 return D1->getDecl() == D2->getDecl(); 11551 return false; 11552 } 11553 11554 static const Expr *getStrlenExprArg(const Expr *E) { 11555 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 11556 const FunctionDecl *FD = CE->getDirectCallee(); 11557 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 11558 return nullptr; 11559 return CE->getArg(0)->IgnoreParenCasts(); 11560 } 11561 return nullptr; 11562 } 11563 11564 // Warn on anti-patterns as the 'size' argument to strncat. 11565 // The correct size argument should look like following: 11566 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 11567 void Sema::CheckStrncatArguments(const CallExpr *CE, 11568 IdentifierInfo *FnName) { 11569 // Don't crash if the user has the wrong number of arguments. 11570 if (CE->getNumArgs() < 3) 11571 return; 11572 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 11573 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 11574 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 11575 11576 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 11577 CE->getRParenLoc())) 11578 return; 11579 11580 // Identify common expressions, which are wrongly used as the size argument 11581 // to strncat and may lead to buffer overflows. 11582 unsigned PatternType = 0; 11583 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 11584 // - sizeof(dst) 11585 if (referToTheSameDecl(SizeOfArg, DstArg)) 11586 PatternType = 1; 11587 // - sizeof(src) 11588 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 11589 PatternType = 2; 11590 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 11591 if (BE->getOpcode() == BO_Sub) { 11592 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 11593 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 11594 // - sizeof(dst) - strlen(dst) 11595 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 11596 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 11597 PatternType = 1; 11598 // - sizeof(src) - (anything) 11599 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 11600 PatternType = 2; 11601 } 11602 } 11603 11604 if (PatternType == 0) 11605 return; 11606 11607 // Generate the diagnostic. 11608 SourceLocation SL = LenArg->getBeginLoc(); 11609 SourceRange SR = LenArg->getSourceRange(); 11610 SourceManager &SM = getSourceManager(); 11611 11612 // If the function is defined as a builtin macro, do not show macro expansion. 11613 if (SM.isMacroArgExpansion(SL)) { 11614 SL = SM.getSpellingLoc(SL); 11615 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 11616 SM.getSpellingLoc(SR.getEnd())); 11617 } 11618 11619 // Check if the destination is an array (rather than a pointer to an array). 11620 QualType DstTy = DstArg->getType(); 11621 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 11622 Context); 11623 if (!isKnownSizeArray) { 11624 if (PatternType == 1) 11625 Diag(SL, diag::warn_strncat_wrong_size) << SR; 11626 else 11627 Diag(SL, diag::warn_strncat_src_size) << SR; 11628 return; 11629 } 11630 11631 if (PatternType == 1) 11632 Diag(SL, diag::warn_strncat_large_size) << SR; 11633 else 11634 Diag(SL, diag::warn_strncat_src_size) << SR; 11635 11636 SmallString<128> sizeString; 11637 llvm::raw_svector_ostream OS(sizeString); 11638 OS << "sizeof("; 11639 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11640 OS << ") - "; 11641 OS << "strlen("; 11642 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11643 OS << ") - 1"; 11644 11645 Diag(SL, diag::note_strncat_wrong_size) 11646 << FixItHint::CreateReplacement(SR, OS.str()); 11647 } 11648 11649 namespace { 11650 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 11651 const UnaryOperator *UnaryExpr, const Decl *D) { 11652 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 11653 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 11654 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 11655 return; 11656 } 11657 } 11658 11659 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 11660 const UnaryOperator *UnaryExpr) { 11661 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 11662 const Decl *D = Lvalue->getDecl(); 11663 if (isa<DeclaratorDecl>(D)) 11664 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType()) 11665 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 11666 } 11667 11668 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 11669 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 11670 Lvalue->getMemberDecl()); 11671 } 11672 11673 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 11674 const UnaryOperator *UnaryExpr) { 11675 const auto *Lambda = dyn_cast<LambdaExpr>( 11676 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 11677 if (!Lambda) 11678 return; 11679 11680 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 11681 << CalleeName << 2 /*object: lambda expression*/; 11682 } 11683 11684 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 11685 const DeclRefExpr *Lvalue) { 11686 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 11687 if (Var == nullptr) 11688 return; 11689 11690 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 11691 << CalleeName << 0 /*object: */ << Var; 11692 } 11693 11694 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 11695 const CastExpr *Cast) { 11696 SmallString<128> SizeString; 11697 llvm::raw_svector_ostream OS(SizeString); 11698 11699 clang::CastKind Kind = Cast->getCastKind(); 11700 if (Kind == clang::CK_BitCast && 11701 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 11702 return; 11703 if (Kind == clang::CK_IntegralToPointer && 11704 !isa<IntegerLiteral>( 11705 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 11706 return; 11707 11708 switch (Cast->getCastKind()) { 11709 case clang::CK_BitCast: 11710 case clang::CK_IntegralToPointer: 11711 case clang::CK_FunctionToPointerDecay: 11712 OS << '\''; 11713 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 11714 OS << '\''; 11715 break; 11716 default: 11717 return; 11718 } 11719 11720 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 11721 << CalleeName << 0 /*object: */ << OS.str(); 11722 } 11723 } // namespace 11724 11725 /// Alerts the user that they are attempting to free a non-malloc'd object. 11726 void Sema::CheckFreeArguments(const CallExpr *E) { 11727 const std::string CalleeName = 11728 cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 11729 11730 { // Prefer something that doesn't involve a cast to make things simpler. 11731 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 11732 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 11733 switch (UnaryExpr->getOpcode()) { 11734 case UnaryOperator::Opcode::UO_AddrOf: 11735 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 11736 case UnaryOperator::Opcode::UO_Plus: 11737 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 11738 default: 11739 break; 11740 } 11741 11742 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 11743 if (Lvalue->getType()->isArrayType()) 11744 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 11745 11746 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 11747 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 11748 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 11749 return; 11750 } 11751 11752 if (isa<BlockExpr>(Arg)) { 11753 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 11754 << CalleeName << 1 /*object: block*/; 11755 return; 11756 } 11757 } 11758 // Maybe the cast was important, check after the other cases. 11759 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 11760 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 11761 } 11762 11763 void 11764 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 11765 SourceLocation ReturnLoc, 11766 bool isObjCMethod, 11767 const AttrVec *Attrs, 11768 const FunctionDecl *FD) { 11769 // Check if the return value is null but should not be. 11770 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 11771 (!isObjCMethod && isNonNullType(Context, lhsType))) && 11772 CheckNonNullExpr(*this, RetValExp)) 11773 Diag(ReturnLoc, diag::warn_null_ret) 11774 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 11775 11776 // C++11 [basic.stc.dynamic.allocation]p4: 11777 // If an allocation function declared with a non-throwing 11778 // exception-specification fails to allocate storage, it shall return 11779 // a null pointer. Any other allocation function that fails to allocate 11780 // storage shall indicate failure only by throwing an exception [...] 11781 if (FD) { 11782 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 11783 if (Op == OO_New || Op == OO_Array_New) { 11784 const FunctionProtoType *Proto 11785 = FD->getType()->castAs<FunctionProtoType>(); 11786 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 11787 CheckNonNullExpr(*this, RetValExp)) 11788 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 11789 << FD << getLangOpts().CPlusPlus11; 11790 } 11791 } 11792 11793 // PPC MMA non-pointer types are not allowed as return type. Checking the type 11794 // here prevent the user from using a PPC MMA type as trailing return type. 11795 if (Context.getTargetInfo().getTriple().isPPC64()) 11796 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 11797 } 11798 11799 /// Check for comparisons of floating-point values using == and !=. Issue a 11800 /// warning if the comparison is not likely to do what the programmer intended. 11801 void Sema::CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS, 11802 BinaryOperatorKind Opcode) { 11803 // Match and capture subexpressions such as "(float) X == 0.1". 11804 FloatingLiteral *FPLiteral; 11805 CastExpr *FPCast; 11806 auto getCastAndLiteral = [&FPLiteral, &FPCast](Expr *L, Expr *R) { 11807 FPLiteral = dyn_cast<FloatingLiteral>(L->IgnoreParens()); 11808 FPCast = dyn_cast<CastExpr>(R->IgnoreParens()); 11809 return FPLiteral && FPCast; 11810 }; 11811 11812 if (getCastAndLiteral(LHS, RHS) || getCastAndLiteral(RHS, LHS)) { 11813 auto *SourceTy = FPCast->getSubExpr()->getType()->getAs<BuiltinType>(); 11814 auto *TargetTy = FPLiteral->getType()->getAs<BuiltinType>(); 11815 if (SourceTy && TargetTy && SourceTy->isFloatingPoint() && 11816 TargetTy->isFloatingPoint()) { 11817 bool Lossy; 11818 llvm::APFloat TargetC = FPLiteral->getValue(); 11819 TargetC.convert(Context.getFloatTypeSemantics(QualType(SourceTy, 0)), 11820 llvm::APFloat::rmNearestTiesToEven, &Lossy); 11821 if (Lossy) { 11822 // If the literal cannot be represented in the source type, then a 11823 // check for == is always false and check for != is always true. 11824 Diag(Loc, diag::warn_float_compare_literal) 11825 << (Opcode == BO_EQ) << QualType(SourceTy, 0) 11826 << LHS->getSourceRange() << RHS->getSourceRange(); 11827 return; 11828 } 11829 } 11830 } 11831 11832 // Match a more general floating-point equality comparison (-Wfloat-equal). 11833 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 11834 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 11835 11836 // Special case: check for x == x (which is OK). 11837 // Do not emit warnings for such cases. 11838 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 11839 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 11840 if (DRL->getDecl() == DRR->getDecl()) 11841 return; 11842 11843 // Special case: check for comparisons against literals that can be exactly 11844 // represented by APFloat. In such cases, do not emit a warning. This 11845 // is a heuristic: often comparison against such literals are used to 11846 // detect if a value in a variable has not changed. This clearly can 11847 // lead to false negatives. 11848 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 11849 if (FLL->isExact()) 11850 return; 11851 } else 11852 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 11853 if (FLR->isExact()) 11854 return; 11855 11856 // Check for comparisons with builtin types. 11857 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 11858 if (CL->getBuiltinCallee()) 11859 return; 11860 11861 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 11862 if (CR->getBuiltinCallee()) 11863 return; 11864 11865 // Emit the diagnostic. 11866 Diag(Loc, diag::warn_floatingpoint_eq) 11867 << LHS->getSourceRange() << RHS->getSourceRange(); 11868 } 11869 11870 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 11871 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 11872 11873 namespace { 11874 11875 /// Structure recording the 'active' range of an integer-valued 11876 /// expression. 11877 struct IntRange { 11878 /// The number of bits active in the int. Note that this includes exactly one 11879 /// sign bit if !NonNegative. 11880 unsigned Width; 11881 11882 /// True if the int is known not to have negative values. If so, all leading 11883 /// bits before Width are known zero, otherwise they are known to be the 11884 /// same as the MSB within Width. 11885 bool NonNegative; 11886 11887 IntRange(unsigned Width, bool NonNegative) 11888 : Width(Width), NonNegative(NonNegative) {} 11889 11890 /// Number of bits excluding the sign bit. 11891 unsigned valueBits() const { 11892 return NonNegative ? Width : Width - 1; 11893 } 11894 11895 /// Returns the range of the bool type. 11896 static IntRange forBoolType() { 11897 return IntRange(1, true); 11898 } 11899 11900 /// Returns the range of an opaque value of the given integral type. 11901 static IntRange forValueOfType(ASTContext &C, QualType T) { 11902 return forValueOfCanonicalType(C, 11903 T->getCanonicalTypeInternal().getTypePtr()); 11904 } 11905 11906 /// Returns the range of an opaque value of a canonical integral type. 11907 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 11908 assert(T->isCanonicalUnqualified()); 11909 11910 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11911 T = VT->getElementType().getTypePtr(); 11912 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11913 T = CT->getElementType().getTypePtr(); 11914 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11915 T = AT->getValueType().getTypePtr(); 11916 11917 if (!C.getLangOpts().CPlusPlus) { 11918 // For enum types in C code, use the underlying datatype. 11919 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11920 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 11921 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 11922 // For enum types in C++, use the known bit width of the enumerators. 11923 EnumDecl *Enum = ET->getDecl(); 11924 // In C++11, enums can have a fixed underlying type. Use this type to 11925 // compute the range. 11926 if (Enum->isFixed()) { 11927 return IntRange(C.getIntWidth(QualType(T, 0)), 11928 !ET->isSignedIntegerOrEnumerationType()); 11929 } 11930 11931 unsigned NumPositive = Enum->getNumPositiveBits(); 11932 unsigned NumNegative = Enum->getNumNegativeBits(); 11933 11934 if (NumNegative == 0) 11935 return IntRange(NumPositive, true/*NonNegative*/); 11936 else 11937 return IntRange(std::max(NumPositive + 1, NumNegative), 11938 false/*NonNegative*/); 11939 } 11940 11941 if (const auto *EIT = dyn_cast<BitIntType>(T)) 11942 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11943 11944 const BuiltinType *BT = cast<BuiltinType>(T); 11945 assert(BT->isInteger()); 11946 11947 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11948 } 11949 11950 /// Returns the "target" range of a canonical integral type, i.e. 11951 /// the range of values expressible in the type. 11952 /// 11953 /// This matches forValueOfCanonicalType except that enums have the 11954 /// full range of their type, not the range of their enumerators. 11955 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 11956 assert(T->isCanonicalUnqualified()); 11957 11958 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11959 T = VT->getElementType().getTypePtr(); 11960 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11961 T = CT->getElementType().getTypePtr(); 11962 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11963 T = AT->getValueType().getTypePtr(); 11964 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11965 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 11966 11967 if (const auto *EIT = dyn_cast<BitIntType>(T)) 11968 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11969 11970 const BuiltinType *BT = cast<BuiltinType>(T); 11971 assert(BT->isInteger()); 11972 11973 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11974 } 11975 11976 /// Returns the supremum of two ranges: i.e. their conservative merge. 11977 static IntRange join(IntRange L, IntRange R) { 11978 bool Unsigned = L.NonNegative && R.NonNegative; 11979 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 11980 L.NonNegative && R.NonNegative); 11981 } 11982 11983 /// Return the range of a bitwise-AND of the two ranges. 11984 static IntRange bit_and(IntRange L, IntRange R) { 11985 unsigned Bits = std::max(L.Width, R.Width); 11986 bool NonNegative = false; 11987 if (L.NonNegative) { 11988 Bits = std::min(Bits, L.Width); 11989 NonNegative = true; 11990 } 11991 if (R.NonNegative) { 11992 Bits = std::min(Bits, R.Width); 11993 NonNegative = true; 11994 } 11995 return IntRange(Bits, NonNegative); 11996 } 11997 11998 /// Return the range of a sum of the two ranges. 11999 static IntRange sum(IntRange L, IntRange R) { 12000 bool Unsigned = L.NonNegative && R.NonNegative; 12001 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 12002 Unsigned); 12003 } 12004 12005 /// Return the range of a difference of the two ranges. 12006 static IntRange difference(IntRange L, IntRange R) { 12007 // We need a 1-bit-wider range if: 12008 // 1) LHS can be negative: least value can be reduced. 12009 // 2) RHS can be negative: greatest value can be increased. 12010 bool CanWiden = !L.NonNegative || !R.NonNegative; 12011 bool Unsigned = L.NonNegative && R.Width == 0; 12012 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 12013 !Unsigned, 12014 Unsigned); 12015 } 12016 12017 /// Return the range of a product of the two ranges. 12018 static IntRange product(IntRange L, IntRange R) { 12019 // If both LHS and RHS can be negative, we can form 12020 // -2^L * -2^R = 2^(L + R) 12021 // which requires L + R + 1 value bits to represent. 12022 bool CanWiden = !L.NonNegative && !R.NonNegative; 12023 bool Unsigned = L.NonNegative && R.NonNegative; 12024 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 12025 Unsigned); 12026 } 12027 12028 /// Return the range of a remainder operation between the two ranges. 12029 static IntRange rem(IntRange L, IntRange R) { 12030 // The result of a remainder can't be larger than the result of 12031 // either side. The sign of the result is the sign of the LHS. 12032 bool Unsigned = L.NonNegative; 12033 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 12034 Unsigned); 12035 } 12036 }; 12037 12038 } // namespace 12039 12040 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 12041 unsigned MaxWidth) { 12042 if (value.isSigned() && value.isNegative()) 12043 return IntRange(value.getMinSignedBits(), false); 12044 12045 if (value.getBitWidth() > MaxWidth) 12046 value = value.trunc(MaxWidth); 12047 12048 // isNonNegative() just checks the sign bit without considering 12049 // signedness. 12050 return IntRange(value.getActiveBits(), true); 12051 } 12052 12053 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 12054 unsigned MaxWidth) { 12055 if (result.isInt()) 12056 return GetValueRange(C, result.getInt(), MaxWidth); 12057 12058 if (result.isVector()) { 12059 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 12060 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 12061 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 12062 R = IntRange::join(R, El); 12063 } 12064 return R; 12065 } 12066 12067 if (result.isComplexInt()) { 12068 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 12069 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 12070 return IntRange::join(R, I); 12071 } 12072 12073 // This can happen with lossless casts to intptr_t of "based" lvalues. 12074 // Assume it might use arbitrary bits. 12075 // FIXME: The only reason we need to pass the type in here is to get 12076 // the sign right on this one case. It would be nice if APValue 12077 // preserved this. 12078 assert(result.isLValue() || result.isAddrLabelDiff()); 12079 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 12080 } 12081 12082 static QualType GetExprType(const Expr *E) { 12083 QualType Ty = E->getType(); 12084 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 12085 Ty = AtomicRHS->getValueType(); 12086 return Ty; 12087 } 12088 12089 /// Pseudo-evaluate the given integer expression, estimating the 12090 /// range of values it might take. 12091 /// 12092 /// \param MaxWidth The width to which the value will be truncated. 12093 /// \param Approximate If \c true, return a likely range for the result: in 12094 /// particular, assume that arithmetic on narrower types doesn't leave 12095 /// those types. If \c false, return a range including all possible 12096 /// result values. 12097 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 12098 bool InConstantContext, bool Approximate) { 12099 E = E->IgnoreParens(); 12100 12101 // Try a full evaluation first. 12102 Expr::EvalResult result; 12103 if (E->EvaluateAsRValue(result, C, InConstantContext)) 12104 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 12105 12106 // I think we only want to look through implicit casts here; if the 12107 // user has an explicit widening cast, we should treat the value as 12108 // being of the new, wider type. 12109 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 12110 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 12111 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 12112 Approximate); 12113 12114 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 12115 12116 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 12117 CE->getCastKind() == CK_BooleanToSignedIntegral; 12118 12119 // Assume that non-integer casts can span the full range of the type. 12120 if (!isIntegerCast) 12121 return OutputTypeRange; 12122 12123 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 12124 std::min(MaxWidth, OutputTypeRange.Width), 12125 InConstantContext, Approximate); 12126 12127 // Bail out if the subexpr's range is as wide as the cast type. 12128 if (SubRange.Width >= OutputTypeRange.Width) 12129 return OutputTypeRange; 12130 12131 // Otherwise, we take the smaller width, and we're non-negative if 12132 // either the output type or the subexpr is. 12133 return IntRange(SubRange.Width, 12134 SubRange.NonNegative || OutputTypeRange.NonNegative); 12135 } 12136 12137 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 12138 // If we can fold the condition, just take that operand. 12139 bool CondResult; 12140 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 12141 return GetExprRange(C, 12142 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 12143 MaxWidth, InConstantContext, Approximate); 12144 12145 // Otherwise, conservatively merge. 12146 // GetExprRange requires an integer expression, but a throw expression 12147 // results in a void type. 12148 Expr *E = CO->getTrueExpr(); 12149 IntRange L = E->getType()->isVoidType() 12150 ? IntRange{0, true} 12151 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 12152 E = CO->getFalseExpr(); 12153 IntRange R = E->getType()->isVoidType() 12154 ? IntRange{0, true} 12155 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 12156 return IntRange::join(L, R); 12157 } 12158 12159 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 12160 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 12161 12162 switch (BO->getOpcode()) { 12163 case BO_Cmp: 12164 llvm_unreachable("builtin <=> should have class type"); 12165 12166 // Boolean-valued operations are single-bit and positive. 12167 case BO_LAnd: 12168 case BO_LOr: 12169 case BO_LT: 12170 case BO_GT: 12171 case BO_LE: 12172 case BO_GE: 12173 case BO_EQ: 12174 case BO_NE: 12175 return IntRange::forBoolType(); 12176 12177 // The type of the assignments is the type of the LHS, so the RHS 12178 // is not necessarily the same type. 12179 case BO_MulAssign: 12180 case BO_DivAssign: 12181 case BO_RemAssign: 12182 case BO_AddAssign: 12183 case BO_SubAssign: 12184 case BO_XorAssign: 12185 case BO_OrAssign: 12186 // TODO: bitfields? 12187 return IntRange::forValueOfType(C, GetExprType(E)); 12188 12189 // Simple assignments just pass through the RHS, which will have 12190 // been coerced to the LHS type. 12191 case BO_Assign: 12192 // TODO: bitfields? 12193 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 12194 Approximate); 12195 12196 // Operations with opaque sources are black-listed. 12197 case BO_PtrMemD: 12198 case BO_PtrMemI: 12199 return IntRange::forValueOfType(C, GetExprType(E)); 12200 12201 // Bitwise-and uses the *infinum* of the two source ranges. 12202 case BO_And: 12203 case BO_AndAssign: 12204 Combine = IntRange::bit_and; 12205 break; 12206 12207 // Left shift gets black-listed based on a judgement call. 12208 case BO_Shl: 12209 // ...except that we want to treat '1 << (blah)' as logically 12210 // positive. It's an important idiom. 12211 if (IntegerLiteral *I 12212 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 12213 if (I->getValue() == 1) { 12214 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 12215 return IntRange(R.Width, /*NonNegative*/ true); 12216 } 12217 } 12218 LLVM_FALLTHROUGH; 12219 12220 case BO_ShlAssign: 12221 return IntRange::forValueOfType(C, GetExprType(E)); 12222 12223 // Right shift by a constant can narrow its left argument. 12224 case BO_Shr: 12225 case BO_ShrAssign: { 12226 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 12227 Approximate); 12228 12229 // If the shift amount is a positive constant, drop the width by 12230 // that much. 12231 if (Optional<llvm::APSInt> shift = 12232 BO->getRHS()->getIntegerConstantExpr(C)) { 12233 if (shift->isNonNegative()) { 12234 unsigned zext = shift->getZExtValue(); 12235 if (zext >= L.Width) 12236 L.Width = (L.NonNegative ? 0 : 1); 12237 else 12238 L.Width -= zext; 12239 } 12240 } 12241 12242 return L; 12243 } 12244 12245 // Comma acts as its right operand. 12246 case BO_Comma: 12247 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 12248 Approximate); 12249 12250 case BO_Add: 12251 if (!Approximate) 12252 Combine = IntRange::sum; 12253 break; 12254 12255 case BO_Sub: 12256 if (BO->getLHS()->getType()->isPointerType()) 12257 return IntRange::forValueOfType(C, GetExprType(E)); 12258 if (!Approximate) 12259 Combine = IntRange::difference; 12260 break; 12261 12262 case BO_Mul: 12263 if (!Approximate) 12264 Combine = IntRange::product; 12265 break; 12266 12267 // The width of a division result is mostly determined by the size 12268 // of the LHS. 12269 case BO_Div: { 12270 // Don't 'pre-truncate' the operands. 12271 unsigned opWidth = C.getIntWidth(GetExprType(E)); 12272 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 12273 Approximate); 12274 12275 // If the divisor is constant, use that. 12276 if (Optional<llvm::APSInt> divisor = 12277 BO->getRHS()->getIntegerConstantExpr(C)) { 12278 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 12279 if (log2 >= L.Width) 12280 L.Width = (L.NonNegative ? 0 : 1); 12281 else 12282 L.Width = std::min(L.Width - log2, MaxWidth); 12283 return L; 12284 } 12285 12286 // Otherwise, just use the LHS's width. 12287 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 12288 // could be -1. 12289 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 12290 Approximate); 12291 return IntRange(L.Width, L.NonNegative && R.NonNegative); 12292 } 12293 12294 case BO_Rem: 12295 Combine = IntRange::rem; 12296 break; 12297 12298 // The default behavior is okay for these. 12299 case BO_Xor: 12300 case BO_Or: 12301 break; 12302 } 12303 12304 // Combine the two ranges, but limit the result to the type in which we 12305 // performed the computation. 12306 QualType T = GetExprType(E); 12307 unsigned opWidth = C.getIntWidth(T); 12308 IntRange L = 12309 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 12310 IntRange R = 12311 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 12312 IntRange C = Combine(L, R); 12313 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 12314 C.Width = std::min(C.Width, MaxWidth); 12315 return C; 12316 } 12317 12318 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 12319 switch (UO->getOpcode()) { 12320 // Boolean-valued operations are white-listed. 12321 case UO_LNot: 12322 return IntRange::forBoolType(); 12323 12324 // Operations with opaque sources are black-listed. 12325 case UO_Deref: 12326 case UO_AddrOf: // should be impossible 12327 return IntRange::forValueOfType(C, GetExprType(E)); 12328 12329 default: 12330 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 12331 Approximate); 12332 } 12333 } 12334 12335 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 12336 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 12337 Approximate); 12338 12339 if (const auto *BitField = E->getSourceBitField()) 12340 return IntRange(BitField->getBitWidthValue(C), 12341 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 12342 12343 return IntRange::forValueOfType(C, GetExprType(E)); 12344 } 12345 12346 static IntRange GetExprRange(ASTContext &C, const Expr *E, 12347 bool InConstantContext, bool Approximate) { 12348 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 12349 Approximate); 12350 } 12351 12352 /// Checks whether the given value, which currently has the given 12353 /// source semantics, has the same value when coerced through the 12354 /// target semantics. 12355 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 12356 const llvm::fltSemantics &Src, 12357 const llvm::fltSemantics &Tgt) { 12358 llvm::APFloat truncated = value; 12359 12360 bool ignored; 12361 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 12362 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 12363 12364 return truncated.bitwiseIsEqual(value); 12365 } 12366 12367 /// Checks whether the given value, which currently has the given 12368 /// source semantics, has the same value when coerced through the 12369 /// target semantics. 12370 /// 12371 /// The value might be a vector of floats (or a complex number). 12372 static bool IsSameFloatAfterCast(const APValue &value, 12373 const llvm::fltSemantics &Src, 12374 const llvm::fltSemantics &Tgt) { 12375 if (value.isFloat()) 12376 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 12377 12378 if (value.isVector()) { 12379 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 12380 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 12381 return false; 12382 return true; 12383 } 12384 12385 assert(value.isComplexFloat()); 12386 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 12387 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 12388 } 12389 12390 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 12391 bool IsListInit = false); 12392 12393 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 12394 // Suppress cases where we are comparing against an enum constant. 12395 if (const DeclRefExpr *DR = 12396 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 12397 if (isa<EnumConstantDecl>(DR->getDecl())) 12398 return true; 12399 12400 // Suppress cases where the value is expanded from a macro, unless that macro 12401 // is how a language represents a boolean literal. This is the case in both C 12402 // and Objective-C. 12403 SourceLocation BeginLoc = E->getBeginLoc(); 12404 if (BeginLoc.isMacroID()) { 12405 StringRef MacroName = Lexer::getImmediateMacroName( 12406 BeginLoc, S.getSourceManager(), S.getLangOpts()); 12407 return MacroName != "YES" && MacroName != "NO" && 12408 MacroName != "true" && MacroName != "false"; 12409 } 12410 12411 return false; 12412 } 12413 12414 static bool isKnownToHaveUnsignedValue(Expr *E) { 12415 return E->getType()->isIntegerType() && 12416 (!E->getType()->isSignedIntegerType() || 12417 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 12418 } 12419 12420 namespace { 12421 /// The promoted range of values of a type. In general this has the 12422 /// following structure: 12423 /// 12424 /// |-----------| . . . |-----------| 12425 /// ^ ^ ^ ^ 12426 /// Min HoleMin HoleMax Max 12427 /// 12428 /// ... where there is only a hole if a signed type is promoted to unsigned 12429 /// (in which case Min and Max are the smallest and largest representable 12430 /// values). 12431 struct PromotedRange { 12432 // Min, or HoleMax if there is a hole. 12433 llvm::APSInt PromotedMin; 12434 // Max, or HoleMin if there is a hole. 12435 llvm::APSInt PromotedMax; 12436 12437 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 12438 if (R.Width == 0) 12439 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 12440 else if (R.Width >= BitWidth && !Unsigned) { 12441 // Promotion made the type *narrower*. This happens when promoting 12442 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 12443 // Treat all values of 'signed int' as being in range for now. 12444 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 12445 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 12446 } else { 12447 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 12448 .extOrTrunc(BitWidth); 12449 PromotedMin.setIsUnsigned(Unsigned); 12450 12451 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 12452 .extOrTrunc(BitWidth); 12453 PromotedMax.setIsUnsigned(Unsigned); 12454 } 12455 } 12456 12457 // Determine whether this range is contiguous (has no hole). 12458 bool isContiguous() const { return PromotedMin <= PromotedMax; } 12459 12460 // Where a constant value is within the range. 12461 enum ComparisonResult { 12462 LT = 0x1, 12463 LE = 0x2, 12464 GT = 0x4, 12465 GE = 0x8, 12466 EQ = 0x10, 12467 NE = 0x20, 12468 InRangeFlag = 0x40, 12469 12470 Less = LE | LT | NE, 12471 Min = LE | InRangeFlag, 12472 InRange = InRangeFlag, 12473 Max = GE | InRangeFlag, 12474 Greater = GE | GT | NE, 12475 12476 OnlyValue = LE | GE | EQ | InRangeFlag, 12477 InHole = NE 12478 }; 12479 12480 ComparisonResult compare(const llvm::APSInt &Value) const { 12481 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 12482 Value.isUnsigned() == PromotedMin.isUnsigned()); 12483 if (!isContiguous()) { 12484 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 12485 if (Value.isMinValue()) return Min; 12486 if (Value.isMaxValue()) return Max; 12487 if (Value >= PromotedMin) return InRange; 12488 if (Value <= PromotedMax) return InRange; 12489 return InHole; 12490 } 12491 12492 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 12493 case -1: return Less; 12494 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 12495 case 1: 12496 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 12497 case -1: return InRange; 12498 case 0: return Max; 12499 case 1: return Greater; 12500 } 12501 } 12502 12503 llvm_unreachable("impossible compare result"); 12504 } 12505 12506 static llvm::Optional<StringRef> 12507 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 12508 if (Op == BO_Cmp) { 12509 ComparisonResult LTFlag = LT, GTFlag = GT; 12510 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 12511 12512 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 12513 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 12514 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 12515 return llvm::None; 12516 } 12517 12518 ComparisonResult TrueFlag, FalseFlag; 12519 if (Op == BO_EQ) { 12520 TrueFlag = EQ; 12521 FalseFlag = NE; 12522 } else if (Op == BO_NE) { 12523 TrueFlag = NE; 12524 FalseFlag = EQ; 12525 } else { 12526 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 12527 TrueFlag = LT; 12528 FalseFlag = GE; 12529 } else { 12530 TrueFlag = GT; 12531 FalseFlag = LE; 12532 } 12533 if (Op == BO_GE || Op == BO_LE) 12534 std::swap(TrueFlag, FalseFlag); 12535 } 12536 if (R & TrueFlag) 12537 return StringRef("true"); 12538 if (R & FalseFlag) 12539 return StringRef("false"); 12540 return llvm::None; 12541 } 12542 }; 12543 } 12544 12545 static bool HasEnumType(Expr *E) { 12546 // Strip off implicit integral promotions. 12547 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 12548 if (ICE->getCastKind() != CK_IntegralCast && 12549 ICE->getCastKind() != CK_NoOp) 12550 break; 12551 E = ICE->getSubExpr(); 12552 } 12553 12554 return E->getType()->isEnumeralType(); 12555 } 12556 12557 static int classifyConstantValue(Expr *Constant) { 12558 // The values of this enumeration are used in the diagnostics 12559 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 12560 enum ConstantValueKind { 12561 Miscellaneous = 0, 12562 LiteralTrue, 12563 LiteralFalse 12564 }; 12565 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 12566 return BL->getValue() ? ConstantValueKind::LiteralTrue 12567 : ConstantValueKind::LiteralFalse; 12568 return ConstantValueKind::Miscellaneous; 12569 } 12570 12571 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 12572 Expr *Constant, Expr *Other, 12573 const llvm::APSInt &Value, 12574 bool RhsConstant) { 12575 if (S.inTemplateInstantiation()) 12576 return false; 12577 12578 Expr *OriginalOther = Other; 12579 12580 Constant = Constant->IgnoreParenImpCasts(); 12581 Other = Other->IgnoreParenImpCasts(); 12582 12583 // Suppress warnings on tautological comparisons between values of the same 12584 // enumeration type. There are only two ways we could warn on this: 12585 // - If the constant is outside the range of representable values of 12586 // the enumeration. In such a case, we should warn about the cast 12587 // to enumeration type, not about the comparison. 12588 // - If the constant is the maximum / minimum in-range value. For an 12589 // enumeratin type, such comparisons can be meaningful and useful. 12590 if (Constant->getType()->isEnumeralType() && 12591 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 12592 return false; 12593 12594 IntRange OtherValueRange = GetExprRange( 12595 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 12596 12597 QualType OtherT = Other->getType(); 12598 if (const auto *AT = OtherT->getAs<AtomicType>()) 12599 OtherT = AT->getValueType(); 12600 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 12601 12602 // Special case for ObjC BOOL on targets where its a typedef for a signed char 12603 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 12604 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 12605 S.NSAPIObj->isObjCBOOLType(OtherT) && 12606 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 12607 12608 // Whether we're treating Other as being a bool because of the form of 12609 // expression despite it having another type (typically 'int' in C). 12610 bool OtherIsBooleanDespiteType = 12611 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 12612 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 12613 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 12614 12615 // Check if all values in the range of possible values of this expression 12616 // lead to the same comparison outcome. 12617 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 12618 Value.isUnsigned()); 12619 auto Cmp = OtherPromotedValueRange.compare(Value); 12620 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 12621 if (!Result) 12622 return false; 12623 12624 // Also consider the range determined by the type alone. This allows us to 12625 // classify the warning under the proper diagnostic group. 12626 bool TautologicalTypeCompare = false; 12627 { 12628 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 12629 Value.isUnsigned()); 12630 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 12631 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 12632 RhsConstant)) { 12633 TautologicalTypeCompare = true; 12634 Cmp = TypeCmp; 12635 Result = TypeResult; 12636 } 12637 } 12638 12639 // Don't warn if the non-constant operand actually always evaluates to the 12640 // same value. 12641 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 12642 return false; 12643 12644 // Suppress the diagnostic for an in-range comparison if the constant comes 12645 // from a macro or enumerator. We don't want to diagnose 12646 // 12647 // some_long_value <= INT_MAX 12648 // 12649 // when sizeof(int) == sizeof(long). 12650 bool InRange = Cmp & PromotedRange::InRangeFlag; 12651 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 12652 return false; 12653 12654 // A comparison of an unsigned bit-field against 0 is really a type problem, 12655 // even though at the type level the bit-field might promote to 'signed int'. 12656 if (Other->refersToBitField() && InRange && Value == 0 && 12657 Other->getType()->isUnsignedIntegerOrEnumerationType()) 12658 TautologicalTypeCompare = true; 12659 12660 // If this is a comparison to an enum constant, include that 12661 // constant in the diagnostic. 12662 const EnumConstantDecl *ED = nullptr; 12663 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 12664 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 12665 12666 // Should be enough for uint128 (39 decimal digits) 12667 SmallString<64> PrettySourceValue; 12668 llvm::raw_svector_ostream OS(PrettySourceValue); 12669 if (ED) { 12670 OS << '\'' << *ED << "' (" << Value << ")"; 12671 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 12672 Constant->IgnoreParenImpCasts())) { 12673 OS << (BL->getValue() ? "YES" : "NO"); 12674 } else { 12675 OS << Value; 12676 } 12677 12678 if (!TautologicalTypeCompare) { 12679 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 12680 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 12681 << E->getOpcodeStr() << OS.str() << *Result 12682 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12683 return true; 12684 } 12685 12686 if (IsObjCSignedCharBool) { 12687 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12688 S.PDiag(diag::warn_tautological_compare_objc_bool) 12689 << OS.str() << *Result); 12690 return true; 12691 } 12692 12693 // FIXME: We use a somewhat different formatting for the in-range cases and 12694 // cases involving boolean values for historical reasons. We should pick a 12695 // consistent way of presenting these diagnostics. 12696 if (!InRange || Other->isKnownToHaveBooleanValue()) { 12697 12698 S.DiagRuntimeBehavior( 12699 E->getOperatorLoc(), E, 12700 S.PDiag(!InRange ? diag::warn_out_of_range_compare 12701 : diag::warn_tautological_bool_compare) 12702 << OS.str() << classifyConstantValue(Constant) << OtherT 12703 << OtherIsBooleanDespiteType << *Result 12704 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 12705 } else { 12706 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 12707 unsigned Diag = 12708 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 12709 ? (HasEnumType(OriginalOther) 12710 ? diag::warn_unsigned_enum_always_true_comparison 12711 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 12712 : diag::warn_unsigned_always_true_comparison) 12713 : diag::warn_tautological_constant_compare; 12714 12715 S.Diag(E->getOperatorLoc(), Diag) 12716 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 12717 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12718 } 12719 12720 return true; 12721 } 12722 12723 /// Analyze the operands of the given comparison. Implements the 12724 /// fallback case from AnalyzeComparison. 12725 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 12726 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12727 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12728 } 12729 12730 /// Implements -Wsign-compare. 12731 /// 12732 /// \param E the binary operator to check for warnings 12733 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 12734 // The type the comparison is being performed in. 12735 QualType T = E->getLHS()->getType(); 12736 12737 // Only analyze comparison operators where both sides have been converted to 12738 // the same type. 12739 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 12740 return AnalyzeImpConvsInComparison(S, E); 12741 12742 // Don't analyze value-dependent comparisons directly. 12743 if (E->isValueDependent()) 12744 return AnalyzeImpConvsInComparison(S, E); 12745 12746 Expr *LHS = E->getLHS(); 12747 Expr *RHS = E->getRHS(); 12748 12749 if (T->isIntegralType(S.Context)) { 12750 Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context); 12751 Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context); 12752 12753 // We don't care about expressions whose result is a constant. 12754 if (RHSValue && LHSValue) 12755 return AnalyzeImpConvsInComparison(S, E); 12756 12757 // We only care about expressions where just one side is literal 12758 if ((bool)RHSValue ^ (bool)LHSValue) { 12759 // Is the constant on the RHS or LHS? 12760 const bool RhsConstant = (bool)RHSValue; 12761 Expr *Const = RhsConstant ? RHS : LHS; 12762 Expr *Other = RhsConstant ? LHS : RHS; 12763 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 12764 12765 // Check whether an integer constant comparison results in a value 12766 // of 'true' or 'false'. 12767 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 12768 return AnalyzeImpConvsInComparison(S, E); 12769 } 12770 } 12771 12772 if (!T->hasUnsignedIntegerRepresentation()) { 12773 // We don't do anything special if this isn't an unsigned integral 12774 // comparison: we're only interested in integral comparisons, and 12775 // signed comparisons only happen in cases we don't care to warn about. 12776 return AnalyzeImpConvsInComparison(S, E); 12777 } 12778 12779 LHS = LHS->IgnoreParenImpCasts(); 12780 RHS = RHS->IgnoreParenImpCasts(); 12781 12782 if (!S.getLangOpts().CPlusPlus) { 12783 // Avoid warning about comparison of integers with different signs when 12784 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 12785 // the type of `E`. 12786 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 12787 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12788 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 12789 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12790 } 12791 12792 // Check to see if one of the (unmodified) operands is of different 12793 // signedness. 12794 Expr *signedOperand, *unsignedOperand; 12795 if (LHS->getType()->hasSignedIntegerRepresentation()) { 12796 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 12797 "unsigned comparison between two signed integer expressions?"); 12798 signedOperand = LHS; 12799 unsignedOperand = RHS; 12800 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 12801 signedOperand = RHS; 12802 unsignedOperand = LHS; 12803 } else { 12804 return AnalyzeImpConvsInComparison(S, E); 12805 } 12806 12807 // Otherwise, calculate the effective range of the signed operand. 12808 IntRange signedRange = GetExprRange( 12809 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 12810 12811 // Go ahead and analyze implicit conversions in the operands. Note 12812 // that we skip the implicit conversions on both sides. 12813 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 12814 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 12815 12816 // If the signed range is non-negative, -Wsign-compare won't fire. 12817 if (signedRange.NonNegative) 12818 return; 12819 12820 // For (in)equality comparisons, if the unsigned operand is a 12821 // constant which cannot collide with a overflowed signed operand, 12822 // then reinterpreting the signed operand as unsigned will not 12823 // change the result of the comparison. 12824 if (E->isEqualityOp()) { 12825 unsigned comparisonWidth = S.Context.getIntWidth(T); 12826 IntRange unsignedRange = 12827 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 12828 /*Approximate*/ true); 12829 12830 // We should never be unable to prove that the unsigned operand is 12831 // non-negative. 12832 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 12833 12834 if (unsignedRange.Width < comparisonWidth) 12835 return; 12836 } 12837 12838 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12839 S.PDiag(diag::warn_mixed_sign_comparison) 12840 << LHS->getType() << RHS->getType() 12841 << LHS->getSourceRange() << RHS->getSourceRange()); 12842 } 12843 12844 /// Analyzes an attempt to assign the given value to a bitfield. 12845 /// 12846 /// Returns true if there was something fishy about the attempt. 12847 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 12848 SourceLocation InitLoc) { 12849 assert(Bitfield->isBitField()); 12850 if (Bitfield->isInvalidDecl()) 12851 return false; 12852 12853 // White-list bool bitfields. 12854 QualType BitfieldType = Bitfield->getType(); 12855 if (BitfieldType->isBooleanType()) 12856 return false; 12857 12858 if (BitfieldType->isEnumeralType()) { 12859 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 12860 // If the underlying enum type was not explicitly specified as an unsigned 12861 // type and the enum contain only positive values, MSVC++ will cause an 12862 // inconsistency by storing this as a signed type. 12863 if (S.getLangOpts().CPlusPlus11 && 12864 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 12865 BitfieldEnumDecl->getNumPositiveBits() > 0 && 12866 BitfieldEnumDecl->getNumNegativeBits() == 0) { 12867 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 12868 << BitfieldEnumDecl; 12869 } 12870 } 12871 12872 if (Bitfield->getType()->isBooleanType()) 12873 return false; 12874 12875 // Ignore value- or type-dependent expressions. 12876 if (Bitfield->getBitWidth()->isValueDependent() || 12877 Bitfield->getBitWidth()->isTypeDependent() || 12878 Init->isValueDependent() || 12879 Init->isTypeDependent()) 12880 return false; 12881 12882 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 12883 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 12884 12885 Expr::EvalResult Result; 12886 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 12887 Expr::SE_AllowSideEffects)) { 12888 // The RHS is not constant. If the RHS has an enum type, make sure the 12889 // bitfield is wide enough to hold all the values of the enum without 12890 // truncation. 12891 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 12892 EnumDecl *ED = EnumTy->getDecl(); 12893 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 12894 12895 // Enum types are implicitly signed on Windows, so check if there are any 12896 // negative enumerators to see if the enum was intended to be signed or 12897 // not. 12898 bool SignedEnum = ED->getNumNegativeBits() > 0; 12899 12900 // Check for surprising sign changes when assigning enum values to a 12901 // bitfield of different signedness. If the bitfield is signed and we 12902 // have exactly the right number of bits to store this unsigned enum, 12903 // suggest changing the enum to an unsigned type. This typically happens 12904 // on Windows where unfixed enums always use an underlying type of 'int'. 12905 unsigned DiagID = 0; 12906 if (SignedEnum && !SignedBitfield) { 12907 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 12908 } else if (SignedBitfield && !SignedEnum && 12909 ED->getNumPositiveBits() == FieldWidth) { 12910 DiagID = diag::warn_signed_bitfield_enum_conversion; 12911 } 12912 12913 if (DiagID) { 12914 S.Diag(InitLoc, DiagID) << Bitfield << ED; 12915 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 12916 SourceRange TypeRange = 12917 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 12918 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 12919 << SignedEnum << TypeRange; 12920 } 12921 12922 // Compute the required bitwidth. If the enum has negative values, we need 12923 // one more bit than the normal number of positive bits to represent the 12924 // sign bit. 12925 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 12926 ED->getNumNegativeBits()) 12927 : ED->getNumPositiveBits(); 12928 12929 // Check the bitwidth. 12930 if (BitsNeeded > FieldWidth) { 12931 Expr *WidthExpr = Bitfield->getBitWidth(); 12932 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 12933 << Bitfield << ED; 12934 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 12935 << BitsNeeded << ED << WidthExpr->getSourceRange(); 12936 } 12937 } 12938 12939 return false; 12940 } 12941 12942 llvm::APSInt Value = Result.Val.getInt(); 12943 12944 unsigned OriginalWidth = Value.getBitWidth(); 12945 12946 if (!Value.isSigned() || Value.isNegative()) 12947 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 12948 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 12949 OriginalWidth = Value.getMinSignedBits(); 12950 12951 if (OriginalWidth <= FieldWidth) 12952 return false; 12953 12954 // Compute the value which the bitfield will contain. 12955 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 12956 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 12957 12958 // Check whether the stored value is equal to the original value. 12959 TruncatedValue = TruncatedValue.extend(OriginalWidth); 12960 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 12961 return false; 12962 12963 // Special-case bitfields of width 1: booleans are naturally 0/1, and 12964 // therefore don't strictly fit into a signed bitfield of width 1. 12965 if (FieldWidth == 1 && Value == 1) 12966 return false; 12967 12968 std::string PrettyValue = toString(Value, 10); 12969 std::string PrettyTrunc = toString(TruncatedValue, 10); 12970 12971 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 12972 << PrettyValue << PrettyTrunc << OriginalInit->getType() 12973 << Init->getSourceRange(); 12974 12975 return true; 12976 } 12977 12978 /// Analyze the given simple or compound assignment for warning-worthy 12979 /// operations. 12980 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 12981 // Just recurse on the LHS. 12982 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12983 12984 // We want to recurse on the RHS as normal unless we're assigning to 12985 // a bitfield. 12986 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 12987 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 12988 E->getOperatorLoc())) { 12989 // Recurse, ignoring any implicit conversions on the RHS. 12990 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 12991 E->getOperatorLoc()); 12992 } 12993 } 12994 12995 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12996 12997 // Diagnose implicitly sequentially-consistent atomic assignment. 12998 if (E->getLHS()->getType()->isAtomicType()) 12999 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 13000 } 13001 13002 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 13003 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 13004 SourceLocation CContext, unsigned diag, 13005 bool pruneControlFlow = false) { 13006 if (pruneControlFlow) { 13007 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13008 S.PDiag(diag) 13009 << SourceType << T << E->getSourceRange() 13010 << SourceRange(CContext)); 13011 return; 13012 } 13013 S.Diag(E->getExprLoc(), diag) 13014 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 13015 } 13016 13017 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 13018 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 13019 SourceLocation CContext, 13020 unsigned diag, bool pruneControlFlow = false) { 13021 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 13022 } 13023 13024 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 13025 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 13026 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 13027 } 13028 13029 static void adornObjCBoolConversionDiagWithTernaryFixit( 13030 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 13031 Expr *Ignored = SourceExpr->IgnoreImplicit(); 13032 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 13033 Ignored = OVE->getSourceExpr(); 13034 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 13035 isa<BinaryOperator>(Ignored) || 13036 isa<CXXOperatorCallExpr>(Ignored); 13037 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 13038 if (NeedsParens) 13039 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 13040 << FixItHint::CreateInsertion(EndLoc, ")"); 13041 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 13042 } 13043 13044 /// Diagnose an implicit cast from a floating point value to an integer value. 13045 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 13046 SourceLocation CContext) { 13047 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 13048 const bool PruneWarnings = S.inTemplateInstantiation(); 13049 13050 Expr *InnerE = E->IgnoreParenImpCasts(); 13051 // We also want to warn on, e.g., "int i = -1.234" 13052 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 13053 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 13054 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 13055 13056 const bool IsLiteral = 13057 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 13058 13059 llvm::APFloat Value(0.0); 13060 bool IsConstant = 13061 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 13062 if (!IsConstant) { 13063 if (isObjCSignedCharBool(S, T)) { 13064 return adornObjCBoolConversionDiagWithTernaryFixit( 13065 S, E, 13066 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 13067 << E->getType()); 13068 } 13069 13070 return DiagnoseImpCast(S, E, T, CContext, 13071 diag::warn_impcast_float_integer, PruneWarnings); 13072 } 13073 13074 bool isExact = false; 13075 13076 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 13077 T->hasUnsignedIntegerRepresentation()); 13078 llvm::APFloat::opStatus Result = Value.convertToInteger( 13079 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 13080 13081 // FIXME: Force the precision of the source value down so we don't print 13082 // digits which are usually useless (we don't really care here if we 13083 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 13084 // would automatically print the shortest representation, but it's a bit 13085 // tricky to implement. 13086 SmallString<16> PrettySourceValue; 13087 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 13088 precision = (precision * 59 + 195) / 196; 13089 Value.toString(PrettySourceValue, precision); 13090 13091 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 13092 return adornObjCBoolConversionDiagWithTernaryFixit( 13093 S, E, 13094 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 13095 << PrettySourceValue); 13096 } 13097 13098 if (Result == llvm::APFloat::opOK && isExact) { 13099 if (IsLiteral) return; 13100 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 13101 PruneWarnings); 13102 } 13103 13104 // Conversion of a floating-point value to a non-bool integer where the 13105 // integral part cannot be represented by the integer type is undefined. 13106 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 13107 return DiagnoseImpCast( 13108 S, E, T, CContext, 13109 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 13110 : diag::warn_impcast_float_to_integer_out_of_range, 13111 PruneWarnings); 13112 13113 unsigned DiagID = 0; 13114 if (IsLiteral) { 13115 // Warn on floating point literal to integer. 13116 DiagID = diag::warn_impcast_literal_float_to_integer; 13117 } else if (IntegerValue == 0) { 13118 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 13119 return DiagnoseImpCast(S, E, T, CContext, 13120 diag::warn_impcast_float_integer, PruneWarnings); 13121 } 13122 // Warn on non-zero to zero conversion. 13123 DiagID = diag::warn_impcast_float_to_integer_zero; 13124 } else { 13125 if (IntegerValue.isUnsigned()) { 13126 if (!IntegerValue.isMaxValue()) { 13127 return DiagnoseImpCast(S, E, T, CContext, 13128 diag::warn_impcast_float_integer, PruneWarnings); 13129 } 13130 } else { // IntegerValue.isSigned() 13131 if (!IntegerValue.isMaxSignedValue() && 13132 !IntegerValue.isMinSignedValue()) { 13133 return DiagnoseImpCast(S, E, T, CContext, 13134 diag::warn_impcast_float_integer, PruneWarnings); 13135 } 13136 } 13137 // Warn on evaluatable floating point expression to integer conversion. 13138 DiagID = diag::warn_impcast_float_to_integer; 13139 } 13140 13141 SmallString<16> PrettyTargetValue; 13142 if (IsBool) 13143 PrettyTargetValue = Value.isZero() ? "false" : "true"; 13144 else 13145 IntegerValue.toString(PrettyTargetValue); 13146 13147 if (PruneWarnings) { 13148 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13149 S.PDiag(DiagID) 13150 << E->getType() << T.getUnqualifiedType() 13151 << PrettySourceValue << PrettyTargetValue 13152 << E->getSourceRange() << SourceRange(CContext)); 13153 } else { 13154 S.Diag(E->getExprLoc(), DiagID) 13155 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 13156 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 13157 } 13158 } 13159 13160 /// Analyze the given compound assignment for the possible losing of 13161 /// floating-point precision. 13162 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 13163 assert(isa<CompoundAssignOperator>(E) && 13164 "Must be compound assignment operation"); 13165 // Recurse on the LHS and RHS in here 13166 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 13167 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 13168 13169 if (E->getLHS()->getType()->isAtomicType()) 13170 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 13171 13172 // Now check the outermost expression 13173 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 13174 const auto *RBT = cast<CompoundAssignOperator>(E) 13175 ->getComputationResultType() 13176 ->getAs<BuiltinType>(); 13177 13178 // The below checks assume source is floating point. 13179 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 13180 13181 // If source is floating point but target is an integer. 13182 if (ResultBT->isInteger()) 13183 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 13184 E->getExprLoc(), diag::warn_impcast_float_integer); 13185 13186 if (!ResultBT->isFloatingPoint()) 13187 return; 13188 13189 // If both source and target are floating points, warn about losing precision. 13190 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 13191 QualType(ResultBT, 0), QualType(RBT, 0)); 13192 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 13193 // warn about dropping FP rank. 13194 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 13195 diag::warn_impcast_float_result_precision); 13196 } 13197 13198 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 13199 IntRange Range) { 13200 if (!Range.Width) return "0"; 13201 13202 llvm::APSInt ValueInRange = Value; 13203 ValueInRange.setIsSigned(!Range.NonNegative); 13204 ValueInRange = ValueInRange.trunc(Range.Width); 13205 return toString(ValueInRange, 10); 13206 } 13207 13208 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 13209 if (!isa<ImplicitCastExpr>(Ex)) 13210 return false; 13211 13212 Expr *InnerE = Ex->IgnoreParenImpCasts(); 13213 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 13214 const Type *Source = 13215 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 13216 if (Target->isDependentType()) 13217 return false; 13218 13219 const BuiltinType *FloatCandidateBT = 13220 dyn_cast<BuiltinType>(ToBool ? Source : Target); 13221 const Type *BoolCandidateType = ToBool ? Target : Source; 13222 13223 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 13224 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 13225 } 13226 13227 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 13228 SourceLocation CC) { 13229 unsigned NumArgs = TheCall->getNumArgs(); 13230 for (unsigned i = 0; i < NumArgs; ++i) { 13231 Expr *CurrA = TheCall->getArg(i); 13232 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 13233 continue; 13234 13235 bool IsSwapped = ((i > 0) && 13236 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 13237 IsSwapped |= ((i < (NumArgs - 1)) && 13238 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 13239 if (IsSwapped) { 13240 // Warn on this floating-point to bool conversion. 13241 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 13242 CurrA->getType(), CC, 13243 diag::warn_impcast_floating_point_to_bool); 13244 } 13245 } 13246 } 13247 13248 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 13249 SourceLocation CC) { 13250 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 13251 E->getExprLoc())) 13252 return; 13253 13254 // Don't warn on functions which have return type nullptr_t. 13255 if (isa<CallExpr>(E)) 13256 return; 13257 13258 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 13259 const Expr::NullPointerConstantKind NullKind = 13260 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 13261 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 13262 return; 13263 13264 // Return if target type is a safe conversion. 13265 if (T->isAnyPointerType() || T->isBlockPointerType() || 13266 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 13267 return; 13268 13269 SourceLocation Loc = E->getSourceRange().getBegin(); 13270 13271 // Venture through the macro stacks to get to the source of macro arguments. 13272 // The new location is a better location than the complete location that was 13273 // passed in. 13274 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 13275 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 13276 13277 // __null is usually wrapped in a macro. Go up a macro if that is the case. 13278 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 13279 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 13280 Loc, S.SourceMgr, S.getLangOpts()); 13281 if (MacroName == "NULL") 13282 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 13283 } 13284 13285 // Only warn if the null and context location are in the same macro expansion. 13286 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 13287 return; 13288 13289 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 13290 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 13291 << FixItHint::CreateReplacement(Loc, 13292 S.getFixItZeroLiteralForType(T, Loc)); 13293 } 13294 13295 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 13296 ObjCArrayLiteral *ArrayLiteral); 13297 13298 static void 13299 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 13300 ObjCDictionaryLiteral *DictionaryLiteral); 13301 13302 /// Check a single element within a collection literal against the 13303 /// target element type. 13304 static void checkObjCCollectionLiteralElement(Sema &S, 13305 QualType TargetElementType, 13306 Expr *Element, 13307 unsigned ElementKind) { 13308 // Skip a bitcast to 'id' or qualified 'id'. 13309 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 13310 if (ICE->getCastKind() == CK_BitCast && 13311 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 13312 Element = ICE->getSubExpr(); 13313 } 13314 13315 QualType ElementType = Element->getType(); 13316 ExprResult ElementResult(Element); 13317 if (ElementType->getAs<ObjCObjectPointerType>() && 13318 S.CheckSingleAssignmentConstraints(TargetElementType, 13319 ElementResult, 13320 false, false) 13321 != Sema::Compatible) { 13322 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 13323 << ElementType << ElementKind << TargetElementType 13324 << Element->getSourceRange(); 13325 } 13326 13327 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 13328 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 13329 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 13330 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 13331 } 13332 13333 /// Check an Objective-C array literal being converted to the given 13334 /// target type. 13335 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 13336 ObjCArrayLiteral *ArrayLiteral) { 13337 if (!S.NSArrayDecl) 13338 return; 13339 13340 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 13341 if (!TargetObjCPtr) 13342 return; 13343 13344 if (TargetObjCPtr->isUnspecialized() || 13345 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 13346 != S.NSArrayDecl->getCanonicalDecl()) 13347 return; 13348 13349 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 13350 if (TypeArgs.size() != 1) 13351 return; 13352 13353 QualType TargetElementType = TypeArgs[0]; 13354 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 13355 checkObjCCollectionLiteralElement(S, TargetElementType, 13356 ArrayLiteral->getElement(I), 13357 0); 13358 } 13359 } 13360 13361 /// Check an Objective-C dictionary literal being converted to the given 13362 /// target type. 13363 static void 13364 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 13365 ObjCDictionaryLiteral *DictionaryLiteral) { 13366 if (!S.NSDictionaryDecl) 13367 return; 13368 13369 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 13370 if (!TargetObjCPtr) 13371 return; 13372 13373 if (TargetObjCPtr->isUnspecialized() || 13374 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 13375 != S.NSDictionaryDecl->getCanonicalDecl()) 13376 return; 13377 13378 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 13379 if (TypeArgs.size() != 2) 13380 return; 13381 13382 QualType TargetKeyType = TypeArgs[0]; 13383 QualType TargetObjectType = TypeArgs[1]; 13384 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 13385 auto Element = DictionaryLiteral->getKeyValueElement(I); 13386 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 13387 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 13388 } 13389 } 13390 13391 // Helper function to filter out cases for constant width constant conversion. 13392 // Don't warn on char array initialization or for non-decimal values. 13393 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 13394 SourceLocation CC) { 13395 // If initializing from a constant, and the constant starts with '0', 13396 // then it is a binary, octal, or hexadecimal. Allow these constants 13397 // to fill all the bits, even if there is a sign change. 13398 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 13399 const char FirstLiteralCharacter = 13400 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 13401 if (FirstLiteralCharacter == '0') 13402 return false; 13403 } 13404 13405 // If the CC location points to a '{', and the type is char, then assume 13406 // assume it is an array initialization. 13407 if (CC.isValid() && T->isCharType()) { 13408 const char FirstContextCharacter = 13409 S.getSourceManager().getCharacterData(CC)[0]; 13410 if (FirstContextCharacter == '{') 13411 return false; 13412 } 13413 13414 return true; 13415 } 13416 13417 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 13418 const auto *IL = dyn_cast<IntegerLiteral>(E); 13419 if (!IL) { 13420 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 13421 if (UO->getOpcode() == UO_Minus) 13422 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 13423 } 13424 } 13425 13426 return IL; 13427 } 13428 13429 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 13430 E = E->IgnoreParenImpCasts(); 13431 SourceLocation ExprLoc = E->getExprLoc(); 13432 13433 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 13434 BinaryOperator::Opcode Opc = BO->getOpcode(); 13435 Expr::EvalResult Result; 13436 // Do not diagnose unsigned shifts. 13437 if (Opc == BO_Shl) { 13438 const auto *LHS = getIntegerLiteral(BO->getLHS()); 13439 const auto *RHS = getIntegerLiteral(BO->getRHS()); 13440 if (LHS && LHS->getValue() == 0) 13441 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 13442 else if (!E->isValueDependent() && LHS && RHS && 13443 RHS->getValue().isNonNegative() && 13444 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 13445 S.Diag(ExprLoc, diag::warn_left_shift_always) 13446 << (Result.Val.getInt() != 0); 13447 else if (E->getType()->isSignedIntegerType()) 13448 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 13449 } 13450 } 13451 13452 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 13453 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 13454 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 13455 if (!LHS || !RHS) 13456 return; 13457 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 13458 (RHS->getValue() == 0 || RHS->getValue() == 1)) 13459 // Do not diagnose common idioms. 13460 return; 13461 if (LHS->getValue() != 0 && RHS->getValue() != 0) 13462 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 13463 } 13464 } 13465 13466 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 13467 SourceLocation CC, 13468 bool *ICContext = nullptr, 13469 bool IsListInit = false) { 13470 if (E->isTypeDependent() || E->isValueDependent()) return; 13471 13472 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 13473 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 13474 if (Source == Target) return; 13475 if (Target->isDependentType()) return; 13476 13477 // If the conversion context location is invalid don't complain. We also 13478 // don't want to emit a warning if the issue occurs from the expansion of 13479 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 13480 // delay this check as long as possible. Once we detect we are in that 13481 // scenario, we just return. 13482 if (CC.isInvalid()) 13483 return; 13484 13485 if (Source->isAtomicType()) 13486 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 13487 13488 // Diagnose implicit casts to bool. 13489 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 13490 if (isa<StringLiteral>(E)) 13491 // Warn on string literal to bool. Checks for string literals in logical 13492 // and expressions, for instance, assert(0 && "error here"), are 13493 // prevented by a check in AnalyzeImplicitConversions(). 13494 return DiagnoseImpCast(S, E, T, CC, 13495 diag::warn_impcast_string_literal_to_bool); 13496 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 13497 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 13498 // This covers the literal expressions that evaluate to Objective-C 13499 // objects. 13500 return DiagnoseImpCast(S, E, T, CC, 13501 diag::warn_impcast_objective_c_literal_to_bool); 13502 } 13503 if (Source->isPointerType() || Source->canDecayToPointerType()) { 13504 // Warn on pointer to bool conversion that is always true. 13505 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 13506 SourceRange(CC)); 13507 } 13508 } 13509 13510 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 13511 // is a typedef for signed char (macOS), then that constant value has to be 1 13512 // or 0. 13513 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 13514 Expr::EvalResult Result; 13515 if (E->EvaluateAsInt(Result, S.getASTContext(), 13516 Expr::SE_AllowSideEffects)) { 13517 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 13518 adornObjCBoolConversionDiagWithTernaryFixit( 13519 S, E, 13520 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 13521 << toString(Result.Val.getInt(), 10)); 13522 } 13523 return; 13524 } 13525 } 13526 13527 // Check implicit casts from Objective-C collection literals to specialized 13528 // collection types, e.g., NSArray<NSString *> *. 13529 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 13530 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 13531 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 13532 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 13533 13534 // Strip vector types. 13535 if (isa<VectorType>(Source)) { 13536 if (Target->isVLSTBuiltinType() && 13537 (S.Context.areCompatibleSveTypes(QualType(Target, 0), 13538 QualType(Source, 0)) || 13539 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), 13540 QualType(Source, 0)))) 13541 return; 13542 13543 if (!isa<VectorType>(Target)) { 13544 if (S.SourceMgr.isInSystemMacro(CC)) 13545 return; 13546 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 13547 } 13548 13549 // If the vector cast is cast between two vectors of the same size, it is 13550 // a bitcast, not a conversion. 13551 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 13552 return; 13553 13554 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 13555 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 13556 } 13557 if (auto VecTy = dyn_cast<VectorType>(Target)) 13558 Target = VecTy->getElementType().getTypePtr(); 13559 13560 // Strip complex types. 13561 if (isa<ComplexType>(Source)) { 13562 if (!isa<ComplexType>(Target)) { 13563 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 13564 return; 13565 13566 return DiagnoseImpCast(S, E, T, CC, 13567 S.getLangOpts().CPlusPlus 13568 ? diag::err_impcast_complex_scalar 13569 : diag::warn_impcast_complex_scalar); 13570 } 13571 13572 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 13573 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 13574 } 13575 13576 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 13577 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 13578 13579 // If the source is floating point... 13580 if (SourceBT && SourceBT->isFloatingPoint()) { 13581 // ...and the target is floating point... 13582 if (TargetBT && TargetBT->isFloatingPoint()) { 13583 // ...then warn if we're dropping FP rank. 13584 13585 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 13586 QualType(SourceBT, 0), QualType(TargetBT, 0)); 13587 if (Order > 0) { 13588 // Don't warn about float constants that are precisely 13589 // representable in the target type. 13590 Expr::EvalResult result; 13591 if (E->EvaluateAsRValue(result, S.Context)) { 13592 // Value might be a float, a float vector, or a float complex. 13593 if (IsSameFloatAfterCast(result.Val, 13594 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 13595 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 13596 return; 13597 } 13598 13599 if (S.SourceMgr.isInSystemMacro(CC)) 13600 return; 13601 13602 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 13603 } 13604 // ... or possibly if we're increasing rank, too 13605 else if (Order < 0) { 13606 if (S.SourceMgr.isInSystemMacro(CC)) 13607 return; 13608 13609 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 13610 } 13611 return; 13612 } 13613 13614 // If the target is integral, always warn. 13615 if (TargetBT && TargetBT->isInteger()) { 13616 if (S.SourceMgr.isInSystemMacro(CC)) 13617 return; 13618 13619 DiagnoseFloatingImpCast(S, E, T, CC); 13620 } 13621 13622 // Detect the case where a call result is converted from floating-point to 13623 // to bool, and the final argument to the call is converted from bool, to 13624 // discover this typo: 13625 // 13626 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 13627 // 13628 // FIXME: This is an incredibly special case; is there some more general 13629 // way to detect this class of misplaced-parentheses bug? 13630 if (Target->isBooleanType() && isa<CallExpr>(E)) { 13631 // Check last argument of function call to see if it is an 13632 // implicit cast from a type matching the type the result 13633 // is being cast to. 13634 CallExpr *CEx = cast<CallExpr>(E); 13635 if (unsigned NumArgs = CEx->getNumArgs()) { 13636 Expr *LastA = CEx->getArg(NumArgs - 1); 13637 Expr *InnerE = LastA->IgnoreParenImpCasts(); 13638 if (isa<ImplicitCastExpr>(LastA) && 13639 InnerE->getType()->isBooleanType()) { 13640 // Warn on this floating-point to bool conversion 13641 DiagnoseImpCast(S, E, T, CC, 13642 diag::warn_impcast_floating_point_to_bool); 13643 } 13644 } 13645 } 13646 return; 13647 } 13648 13649 // Valid casts involving fixed point types should be accounted for here. 13650 if (Source->isFixedPointType()) { 13651 if (Target->isUnsaturatedFixedPointType()) { 13652 Expr::EvalResult Result; 13653 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 13654 S.isConstantEvaluated())) { 13655 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 13656 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 13657 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 13658 if (Value > MaxVal || Value < MinVal) { 13659 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13660 S.PDiag(diag::warn_impcast_fixed_point_range) 13661 << Value.toString() << T 13662 << E->getSourceRange() 13663 << clang::SourceRange(CC)); 13664 return; 13665 } 13666 } 13667 } else if (Target->isIntegerType()) { 13668 Expr::EvalResult Result; 13669 if (!S.isConstantEvaluated() && 13670 E->EvaluateAsFixedPoint(Result, S.Context, 13671 Expr::SE_AllowSideEffects)) { 13672 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 13673 13674 bool Overflowed; 13675 llvm::APSInt IntResult = FXResult.convertToInt( 13676 S.Context.getIntWidth(T), 13677 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 13678 13679 if (Overflowed) { 13680 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13681 S.PDiag(diag::warn_impcast_fixed_point_range) 13682 << FXResult.toString() << T 13683 << E->getSourceRange() 13684 << clang::SourceRange(CC)); 13685 return; 13686 } 13687 } 13688 } 13689 } else if (Target->isUnsaturatedFixedPointType()) { 13690 if (Source->isIntegerType()) { 13691 Expr::EvalResult Result; 13692 if (!S.isConstantEvaluated() && 13693 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 13694 llvm::APSInt Value = Result.Val.getInt(); 13695 13696 bool Overflowed; 13697 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 13698 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 13699 13700 if (Overflowed) { 13701 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13702 S.PDiag(diag::warn_impcast_fixed_point_range) 13703 << toString(Value, /*Radix=*/10) << T 13704 << E->getSourceRange() 13705 << clang::SourceRange(CC)); 13706 return; 13707 } 13708 } 13709 } 13710 } 13711 13712 // If we are casting an integer type to a floating point type without 13713 // initialization-list syntax, we might lose accuracy if the floating 13714 // point type has a narrower significand than the integer type. 13715 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 13716 TargetBT->isFloatingType() && !IsListInit) { 13717 // Determine the number of precision bits in the source integer type. 13718 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 13719 /*Approximate*/ true); 13720 unsigned int SourcePrecision = SourceRange.Width; 13721 13722 // Determine the number of precision bits in the 13723 // target floating point type. 13724 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 13725 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13726 13727 if (SourcePrecision > 0 && TargetPrecision > 0 && 13728 SourcePrecision > TargetPrecision) { 13729 13730 if (Optional<llvm::APSInt> SourceInt = 13731 E->getIntegerConstantExpr(S.Context)) { 13732 // If the source integer is a constant, convert it to the target 13733 // floating point type. Issue a warning if the value changes 13734 // during the whole conversion. 13735 llvm::APFloat TargetFloatValue( 13736 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13737 llvm::APFloat::opStatus ConversionStatus = 13738 TargetFloatValue.convertFromAPInt( 13739 *SourceInt, SourceBT->isSignedInteger(), 13740 llvm::APFloat::rmNearestTiesToEven); 13741 13742 if (ConversionStatus != llvm::APFloat::opOK) { 13743 SmallString<32> PrettySourceValue; 13744 SourceInt->toString(PrettySourceValue, 10); 13745 SmallString<32> PrettyTargetValue; 13746 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 13747 13748 S.DiagRuntimeBehavior( 13749 E->getExprLoc(), E, 13750 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 13751 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13752 << E->getSourceRange() << clang::SourceRange(CC)); 13753 } 13754 } else { 13755 // Otherwise, the implicit conversion may lose precision. 13756 DiagnoseImpCast(S, E, T, CC, 13757 diag::warn_impcast_integer_float_precision); 13758 } 13759 } 13760 } 13761 13762 DiagnoseNullConversion(S, E, T, CC); 13763 13764 S.DiscardMisalignedMemberAddress(Target, E); 13765 13766 if (Target->isBooleanType()) 13767 DiagnoseIntInBoolContext(S, E); 13768 13769 if (!Source->isIntegerType() || !Target->isIntegerType()) 13770 return; 13771 13772 // TODO: remove this early return once the false positives for constant->bool 13773 // in templates, macros, etc, are reduced or removed. 13774 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 13775 return; 13776 13777 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 13778 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 13779 return adornObjCBoolConversionDiagWithTernaryFixit( 13780 S, E, 13781 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 13782 << E->getType()); 13783 } 13784 13785 IntRange SourceTypeRange = 13786 IntRange::forTargetOfCanonicalType(S.Context, Source); 13787 IntRange LikelySourceRange = 13788 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 13789 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 13790 13791 if (LikelySourceRange.Width > TargetRange.Width) { 13792 // If the source is a constant, use a default-on diagnostic. 13793 // TODO: this should happen for bitfield stores, too. 13794 Expr::EvalResult Result; 13795 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 13796 S.isConstantEvaluated())) { 13797 llvm::APSInt Value(32); 13798 Value = Result.Val.getInt(); 13799 13800 if (S.SourceMgr.isInSystemMacro(CC)) 13801 return; 13802 13803 std::string PrettySourceValue = toString(Value, 10); 13804 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13805 13806 S.DiagRuntimeBehavior( 13807 E->getExprLoc(), E, 13808 S.PDiag(diag::warn_impcast_integer_precision_constant) 13809 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13810 << E->getSourceRange() << SourceRange(CC)); 13811 return; 13812 } 13813 13814 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 13815 if (S.SourceMgr.isInSystemMacro(CC)) 13816 return; 13817 13818 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 13819 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 13820 /* pruneControlFlow */ true); 13821 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 13822 } 13823 13824 if (TargetRange.Width > SourceTypeRange.Width) { 13825 if (auto *UO = dyn_cast<UnaryOperator>(E)) 13826 if (UO->getOpcode() == UO_Minus) 13827 if (Source->isUnsignedIntegerType()) { 13828 if (Target->isUnsignedIntegerType()) 13829 return DiagnoseImpCast(S, E, T, CC, 13830 diag::warn_impcast_high_order_zero_bits); 13831 if (Target->isSignedIntegerType()) 13832 return DiagnoseImpCast(S, E, T, CC, 13833 diag::warn_impcast_nonnegative_result); 13834 } 13835 } 13836 13837 if (TargetRange.Width == LikelySourceRange.Width && 13838 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 13839 Source->isSignedIntegerType()) { 13840 // Warn when doing a signed to signed conversion, warn if the positive 13841 // source value is exactly the width of the target type, which will 13842 // cause a negative value to be stored. 13843 13844 Expr::EvalResult Result; 13845 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 13846 !S.SourceMgr.isInSystemMacro(CC)) { 13847 llvm::APSInt Value = Result.Val.getInt(); 13848 if (isSameWidthConstantConversion(S, E, T, CC)) { 13849 std::string PrettySourceValue = toString(Value, 10); 13850 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13851 13852 S.DiagRuntimeBehavior( 13853 E->getExprLoc(), E, 13854 S.PDiag(diag::warn_impcast_integer_precision_constant) 13855 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13856 << E->getSourceRange() << SourceRange(CC)); 13857 return; 13858 } 13859 } 13860 13861 // Fall through for non-constants to give a sign conversion warning. 13862 } 13863 13864 if ((!isa<EnumType>(Target) || !isa<EnumType>(Source)) && 13865 ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 13866 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 13867 LikelySourceRange.Width == TargetRange.Width))) { 13868 if (S.SourceMgr.isInSystemMacro(CC)) 13869 return; 13870 13871 unsigned DiagID = diag::warn_impcast_integer_sign; 13872 13873 // Traditionally, gcc has warned about this under -Wsign-compare. 13874 // We also want to warn about it in -Wconversion. 13875 // So if -Wconversion is off, use a completely identical diagnostic 13876 // in the sign-compare group. 13877 // The conditional-checking code will 13878 if (ICContext) { 13879 DiagID = diag::warn_impcast_integer_sign_conditional; 13880 *ICContext = true; 13881 } 13882 13883 return DiagnoseImpCast(S, E, T, CC, DiagID); 13884 } 13885 13886 // Diagnose conversions between different enumeration types. 13887 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 13888 // type, to give us better diagnostics. 13889 QualType SourceType = E->getType(); 13890 if (!S.getLangOpts().CPlusPlus) { 13891 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 13892 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 13893 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 13894 SourceType = S.Context.getTypeDeclType(Enum); 13895 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 13896 } 13897 } 13898 13899 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 13900 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 13901 if (SourceEnum->getDecl()->hasNameForLinkage() && 13902 TargetEnum->getDecl()->hasNameForLinkage() && 13903 SourceEnum != TargetEnum) { 13904 if (S.SourceMgr.isInSystemMacro(CC)) 13905 return; 13906 13907 return DiagnoseImpCast(S, E, SourceType, T, CC, 13908 diag::warn_impcast_different_enum_types); 13909 } 13910 } 13911 13912 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13913 SourceLocation CC, QualType T); 13914 13915 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 13916 SourceLocation CC, bool &ICContext) { 13917 E = E->IgnoreParenImpCasts(); 13918 13919 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 13920 return CheckConditionalOperator(S, CO, CC, T); 13921 13922 AnalyzeImplicitConversions(S, E, CC); 13923 if (E->getType() != T) 13924 return CheckImplicitConversion(S, E, T, CC, &ICContext); 13925 } 13926 13927 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13928 SourceLocation CC, QualType T) { 13929 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 13930 13931 Expr *TrueExpr = E->getTrueExpr(); 13932 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 13933 TrueExpr = BCO->getCommon(); 13934 13935 bool Suspicious = false; 13936 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 13937 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 13938 13939 if (T->isBooleanType()) 13940 DiagnoseIntInBoolContext(S, E); 13941 13942 // If -Wconversion would have warned about either of the candidates 13943 // for a signedness conversion to the context type... 13944 if (!Suspicious) return; 13945 13946 // ...but it's currently ignored... 13947 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 13948 return; 13949 13950 // ...then check whether it would have warned about either of the 13951 // candidates for a signedness conversion to the condition type. 13952 if (E->getType() == T) return; 13953 13954 Suspicious = false; 13955 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 13956 E->getType(), CC, &Suspicious); 13957 if (!Suspicious) 13958 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 13959 E->getType(), CC, &Suspicious); 13960 } 13961 13962 /// Check conversion of given expression to boolean. 13963 /// Input argument E is a logical expression. 13964 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 13965 if (S.getLangOpts().Bool) 13966 return; 13967 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 13968 return; 13969 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 13970 } 13971 13972 namespace { 13973 struct AnalyzeImplicitConversionsWorkItem { 13974 Expr *E; 13975 SourceLocation CC; 13976 bool IsListInit; 13977 }; 13978 } 13979 13980 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 13981 /// that should be visited are added to WorkList. 13982 static void AnalyzeImplicitConversions( 13983 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 13984 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 13985 Expr *OrigE = Item.E; 13986 SourceLocation CC = Item.CC; 13987 13988 QualType T = OrigE->getType(); 13989 Expr *E = OrigE->IgnoreParenImpCasts(); 13990 13991 // Propagate whether we are in a C++ list initialization expression. 13992 // If so, we do not issue warnings for implicit int-float conversion 13993 // precision loss, because C++11 narrowing already handles it. 13994 bool IsListInit = Item.IsListInit || 13995 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 13996 13997 if (E->isTypeDependent() || E->isValueDependent()) 13998 return; 13999 14000 Expr *SourceExpr = E; 14001 // Examine, but don't traverse into the source expression of an 14002 // OpaqueValueExpr, since it may have multiple parents and we don't want to 14003 // emit duplicate diagnostics. Its fine to examine the form or attempt to 14004 // evaluate it in the context of checking the specific conversion to T though. 14005 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 14006 if (auto *Src = OVE->getSourceExpr()) 14007 SourceExpr = Src; 14008 14009 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 14010 if (UO->getOpcode() == UO_Not && 14011 UO->getSubExpr()->isKnownToHaveBooleanValue()) 14012 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 14013 << OrigE->getSourceRange() << T->isBooleanType() 14014 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 14015 14016 if (const auto *BO = dyn_cast<BinaryOperator>(SourceExpr)) 14017 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) && 14018 BO->getLHS()->isKnownToHaveBooleanValue() && 14019 BO->getRHS()->isKnownToHaveBooleanValue() && 14020 BO->getLHS()->HasSideEffects(S.Context) && 14021 BO->getRHS()->HasSideEffects(S.Context)) { 14022 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical) 14023 << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange() 14024 << FixItHint::CreateReplacement( 14025 BO->getOperatorLoc(), 14026 (BO->getOpcode() == BO_And ? "&&" : "||")); 14027 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int); 14028 } 14029 14030 // For conditional operators, we analyze the arguments as if they 14031 // were being fed directly into the output. 14032 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 14033 CheckConditionalOperator(S, CO, CC, T); 14034 return; 14035 } 14036 14037 // Check implicit argument conversions for function calls. 14038 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 14039 CheckImplicitArgumentConversions(S, Call, CC); 14040 14041 // Go ahead and check any implicit conversions we might have skipped. 14042 // The non-canonical typecheck is just an optimization; 14043 // CheckImplicitConversion will filter out dead implicit conversions. 14044 if (SourceExpr->getType() != T) 14045 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 14046 14047 // Now continue drilling into this expression. 14048 14049 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 14050 // The bound subexpressions in a PseudoObjectExpr are not reachable 14051 // as transitive children. 14052 // FIXME: Use a more uniform representation for this. 14053 for (auto *SE : POE->semantics()) 14054 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 14055 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 14056 } 14057 14058 // Skip past explicit casts. 14059 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 14060 E = CE->getSubExpr()->IgnoreParenImpCasts(); 14061 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 14062 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 14063 WorkList.push_back({E, CC, IsListInit}); 14064 return; 14065 } 14066 14067 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 14068 // Do a somewhat different check with comparison operators. 14069 if (BO->isComparisonOp()) 14070 return AnalyzeComparison(S, BO); 14071 14072 // And with simple assignments. 14073 if (BO->getOpcode() == BO_Assign) 14074 return AnalyzeAssignment(S, BO); 14075 // And with compound assignments. 14076 if (BO->isAssignmentOp()) 14077 return AnalyzeCompoundAssignment(S, BO); 14078 } 14079 14080 // These break the otherwise-useful invariant below. Fortunately, 14081 // we don't really need to recurse into them, because any internal 14082 // expressions should have been analyzed already when they were 14083 // built into statements. 14084 if (isa<StmtExpr>(E)) return; 14085 14086 // Don't descend into unevaluated contexts. 14087 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 14088 14089 // Now just recurse over the expression's children. 14090 CC = E->getExprLoc(); 14091 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 14092 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 14093 for (Stmt *SubStmt : E->children()) { 14094 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 14095 if (!ChildExpr) 14096 continue; 14097 14098 if (IsLogicalAndOperator && 14099 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 14100 // Ignore checking string literals that are in logical and operators. 14101 // This is a common pattern for asserts. 14102 continue; 14103 WorkList.push_back({ChildExpr, CC, IsListInit}); 14104 } 14105 14106 if (BO && BO->isLogicalOp()) { 14107 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 14108 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 14109 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 14110 14111 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 14112 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 14113 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 14114 } 14115 14116 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 14117 if (U->getOpcode() == UO_LNot) { 14118 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 14119 } else if (U->getOpcode() != UO_AddrOf) { 14120 if (U->getSubExpr()->getType()->isAtomicType()) 14121 S.Diag(U->getSubExpr()->getBeginLoc(), 14122 diag::warn_atomic_implicit_seq_cst); 14123 } 14124 } 14125 } 14126 14127 /// AnalyzeImplicitConversions - Find and report any interesting 14128 /// implicit conversions in the given expression. There are a couple 14129 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 14130 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 14131 bool IsListInit/*= false*/) { 14132 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 14133 WorkList.push_back({OrigE, CC, IsListInit}); 14134 while (!WorkList.empty()) 14135 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 14136 } 14137 14138 /// Diagnose integer type and any valid implicit conversion to it. 14139 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 14140 // Taking into account implicit conversions, 14141 // allow any integer. 14142 if (!E->getType()->isIntegerType()) { 14143 S.Diag(E->getBeginLoc(), 14144 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 14145 return true; 14146 } 14147 // Potentially emit standard warnings for implicit conversions if enabled 14148 // using -Wconversion. 14149 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 14150 return false; 14151 } 14152 14153 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 14154 // Returns true when emitting a warning about taking the address of a reference. 14155 static bool CheckForReference(Sema &SemaRef, const Expr *E, 14156 const PartialDiagnostic &PD) { 14157 E = E->IgnoreParenImpCasts(); 14158 14159 const FunctionDecl *FD = nullptr; 14160 14161 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 14162 if (!DRE->getDecl()->getType()->isReferenceType()) 14163 return false; 14164 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 14165 if (!M->getMemberDecl()->getType()->isReferenceType()) 14166 return false; 14167 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 14168 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 14169 return false; 14170 FD = Call->getDirectCallee(); 14171 } else { 14172 return false; 14173 } 14174 14175 SemaRef.Diag(E->getExprLoc(), PD); 14176 14177 // If possible, point to location of function. 14178 if (FD) { 14179 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 14180 } 14181 14182 return true; 14183 } 14184 14185 // Returns true if the SourceLocation is expanded from any macro body. 14186 // Returns false if the SourceLocation is invalid, is from not in a macro 14187 // expansion, or is from expanded from a top-level macro argument. 14188 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 14189 if (Loc.isInvalid()) 14190 return false; 14191 14192 while (Loc.isMacroID()) { 14193 if (SM.isMacroBodyExpansion(Loc)) 14194 return true; 14195 Loc = SM.getImmediateMacroCallerLoc(Loc); 14196 } 14197 14198 return false; 14199 } 14200 14201 /// Diagnose pointers that are always non-null. 14202 /// \param E the expression containing the pointer 14203 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 14204 /// compared to a null pointer 14205 /// \param IsEqual True when the comparison is equal to a null pointer 14206 /// \param Range Extra SourceRange to highlight in the diagnostic 14207 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 14208 Expr::NullPointerConstantKind NullKind, 14209 bool IsEqual, SourceRange Range) { 14210 if (!E) 14211 return; 14212 14213 // Don't warn inside macros. 14214 if (E->getExprLoc().isMacroID()) { 14215 const SourceManager &SM = getSourceManager(); 14216 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 14217 IsInAnyMacroBody(SM, Range.getBegin())) 14218 return; 14219 } 14220 E = E->IgnoreImpCasts(); 14221 14222 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 14223 14224 if (isa<CXXThisExpr>(E)) { 14225 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 14226 : diag::warn_this_bool_conversion; 14227 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 14228 return; 14229 } 14230 14231 bool IsAddressOf = false; 14232 14233 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 14234 if (UO->getOpcode() != UO_AddrOf) 14235 return; 14236 IsAddressOf = true; 14237 E = UO->getSubExpr(); 14238 } 14239 14240 if (IsAddressOf) { 14241 unsigned DiagID = IsCompare 14242 ? diag::warn_address_of_reference_null_compare 14243 : diag::warn_address_of_reference_bool_conversion; 14244 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 14245 << IsEqual; 14246 if (CheckForReference(*this, E, PD)) { 14247 return; 14248 } 14249 } 14250 14251 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 14252 bool IsParam = isa<NonNullAttr>(NonnullAttr); 14253 std::string Str; 14254 llvm::raw_string_ostream S(Str); 14255 E->printPretty(S, nullptr, getPrintingPolicy()); 14256 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 14257 : diag::warn_cast_nonnull_to_bool; 14258 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 14259 << E->getSourceRange() << Range << IsEqual; 14260 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 14261 }; 14262 14263 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 14264 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 14265 if (auto *Callee = Call->getDirectCallee()) { 14266 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 14267 ComplainAboutNonnullParamOrCall(A); 14268 return; 14269 } 14270 } 14271 } 14272 14273 // Expect to find a single Decl. Skip anything more complicated. 14274 ValueDecl *D = nullptr; 14275 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 14276 D = R->getDecl(); 14277 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 14278 D = M->getMemberDecl(); 14279 } 14280 14281 // Weak Decls can be null. 14282 if (!D || D->isWeak()) 14283 return; 14284 14285 // Check for parameter decl with nonnull attribute 14286 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 14287 if (getCurFunction() && 14288 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 14289 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 14290 ComplainAboutNonnullParamOrCall(A); 14291 return; 14292 } 14293 14294 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 14295 // Skip function template not specialized yet. 14296 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 14297 return; 14298 auto ParamIter = llvm::find(FD->parameters(), PV); 14299 assert(ParamIter != FD->param_end()); 14300 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 14301 14302 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 14303 if (!NonNull->args_size()) { 14304 ComplainAboutNonnullParamOrCall(NonNull); 14305 return; 14306 } 14307 14308 for (const ParamIdx &ArgNo : NonNull->args()) { 14309 if (ArgNo.getASTIndex() == ParamNo) { 14310 ComplainAboutNonnullParamOrCall(NonNull); 14311 return; 14312 } 14313 } 14314 } 14315 } 14316 } 14317 } 14318 14319 QualType T = D->getType(); 14320 const bool IsArray = T->isArrayType(); 14321 const bool IsFunction = T->isFunctionType(); 14322 14323 // Address of function is used to silence the function warning. 14324 if (IsAddressOf && IsFunction) { 14325 return; 14326 } 14327 14328 // Found nothing. 14329 if (!IsAddressOf && !IsFunction && !IsArray) 14330 return; 14331 14332 // Pretty print the expression for the diagnostic. 14333 std::string Str; 14334 llvm::raw_string_ostream S(Str); 14335 E->printPretty(S, nullptr, getPrintingPolicy()); 14336 14337 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 14338 : diag::warn_impcast_pointer_to_bool; 14339 enum { 14340 AddressOf, 14341 FunctionPointer, 14342 ArrayPointer 14343 } DiagType; 14344 if (IsAddressOf) 14345 DiagType = AddressOf; 14346 else if (IsFunction) 14347 DiagType = FunctionPointer; 14348 else if (IsArray) 14349 DiagType = ArrayPointer; 14350 else 14351 llvm_unreachable("Could not determine diagnostic."); 14352 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 14353 << Range << IsEqual; 14354 14355 if (!IsFunction) 14356 return; 14357 14358 // Suggest '&' to silence the function warning. 14359 Diag(E->getExprLoc(), diag::note_function_warning_silence) 14360 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 14361 14362 // Check to see if '()' fixit should be emitted. 14363 QualType ReturnType; 14364 UnresolvedSet<4> NonTemplateOverloads; 14365 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 14366 if (ReturnType.isNull()) 14367 return; 14368 14369 if (IsCompare) { 14370 // There are two cases here. If there is null constant, the only suggest 14371 // for a pointer return type. If the null is 0, then suggest if the return 14372 // type is a pointer or an integer type. 14373 if (!ReturnType->isPointerType()) { 14374 if (NullKind == Expr::NPCK_ZeroExpression || 14375 NullKind == Expr::NPCK_ZeroLiteral) { 14376 if (!ReturnType->isIntegerType()) 14377 return; 14378 } else { 14379 return; 14380 } 14381 } 14382 } else { // !IsCompare 14383 // For function to bool, only suggest if the function pointer has bool 14384 // return type. 14385 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 14386 return; 14387 } 14388 Diag(E->getExprLoc(), diag::note_function_to_function_call) 14389 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 14390 } 14391 14392 /// Diagnoses "dangerous" implicit conversions within the given 14393 /// expression (which is a full expression). Implements -Wconversion 14394 /// and -Wsign-compare. 14395 /// 14396 /// \param CC the "context" location of the implicit conversion, i.e. 14397 /// the most location of the syntactic entity requiring the implicit 14398 /// conversion 14399 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 14400 // Don't diagnose in unevaluated contexts. 14401 if (isUnevaluatedContext()) 14402 return; 14403 14404 // Don't diagnose for value- or type-dependent expressions. 14405 if (E->isTypeDependent() || E->isValueDependent()) 14406 return; 14407 14408 // Check for array bounds violations in cases where the check isn't triggered 14409 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 14410 // ArraySubscriptExpr is on the RHS of a variable initialization. 14411 CheckArrayAccess(E); 14412 14413 // This is not the right CC for (e.g.) a variable initialization. 14414 AnalyzeImplicitConversions(*this, E, CC); 14415 } 14416 14417 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 14418 /// Input argument E is a logical expression. 14419 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 14420 ::CheckBoolLikeConversion(*this, E, CC); 14421 } 14422 14423 /// Diagnose when expression is an integer constant expression and its evaluation 14424 /// results in integer overflow 14425 void Sema::CheckForIntOverflow (Expr *E) { 14426 // Use a work list to deal with nested struct initializers. 14427 SmallVector<Expr *, 2> Exprs(1, E); 14428 14429 do { 14430 Expr *OriginalE = Exprs.pop_back_val(); 14431 Expr *E = OriginalE->IgnoreParenCasts(); 14432 14433 if (isa<BinaryOperator>(E)) { 14434 E->EvaluateForOverflow(Context); 14435 continue; 14436 } 14437 14438 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 14439 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 14440 else if (isa<ObjCBoxedExpr>(OriginalE)) 14441 E->EvaluateForOverflow(Context); 14442 else if (auto Call = dyn_cast<CallExpr>(E)) 14443 Exprs.append(Call->arg_begin(), Call->arg_end()); 14444 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 14445 Exprs.append(Message->arg_begin(), Message->arg_end()); 14446 } while (!Exprs.empty()); 14447 } 14448 14449 namespace { 14450 14451 /// Visitor for expressions which looks for unsequenced operations on the 14452 /// same object. 14453 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 14454 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 14455 14456 /// A tree of sequenced regions within an expression. Two regions are 14457 /// unsequenced if one is an ancestor or a descendent of the other. When we 14458 /// finish processing an expression with sequencing, such as a comma 14459 /// expression, we fold its tree nodes into its parent, since they are 14460 /// unsequenced with respect to nodes we will visit later. 14461 class SequenceTree { 14462 struct Value { 14463 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 14464 unsigned Parent : 31; 14465 unsigned Merged : 1; 14466 }; 14467 SmallVector<Value, 8> Values; 14468 14469 public: 14470 /// A region within an expression which may be sequenced with respect 14471 /// to some other region. 14472 class Seq { 14473 friend class SequenceTree; 14474 14475 unsigned Index; 14476 14477 explicit Seq(unsigned N) : Index(N) {} 14478 14479 public: 14480 Seq() : Index(0) {} 14481 }; 14482 14483 SequenceTree() { Values.push_back(Value(0)); } 14484 Seq root() const { return Seq(0); } 14485 14486 /// Create a new sequence of operations, which is an unsequenced 14487 /// subset of \p Parent. This sequence of operations is sequenced with 14488 /// respect to other children of \p Parent. 14489 Seq allocate(Seq Parent) { 14490 Values.push_back(Value(Parent.Index)); 14491 return Seq(Values.size() - 1); 14492 } 14493 14494 /// Merge a sequence of operations into its parent. 14495 void merge(Seq S) { 14496 Values[S.Index].Merged = true; 14497 } 14498 14499 /// Determine whether two operations are unsequenced. This operation 14500 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 14501 /// should have been merged into its parent as appropriate. 14502 bool isUnsequenced(Seq Cur, Seq Old) { 14503 unsigned C = representative(Cur.Index); 14504 unsigned Target = representative(Old.Index); 14505 while (C >= Target) { 14506 if (C == Target) 14507 return true; 14508 C = Values[C].Parent; 14509 } 14510 return false; 14511 } 14512 14513 private: 14514 /// Pick a representative for a sequence. 14515 unsigned representative(unsigned K) { 14516 if (Values[K].Merged) 14517 // Perform path compression as we go. 14518 return Values[K].Parent = representative(Values[K].Parent); 14519 return K; 14520 } 14521 }; 14522 14523 /// An object for which we can track unsequenced uses. 14524 using Object = const NamedDecl *; 14525 14526 /// Different flavors of object usage which we track. We only track the 14527 /// least-sequenced usage of each kind. 14528 enum UsageKind { 14529 /// A read of an object. Multiple unsequenced reads are OK. 14530 UK_Use, 14531 14532 /// A modification of an object which is sequenced before the value 14533 /// computation of the expression, such as ++n in C++. 14534 UK_ModAsValue, 14535 14536 /// A modification of an object which is not sequenced before the value 14537 /// computation of the expression, such as n++. 14538 UK_ModAsSideEffect, 14539 14540 UK_Count = UK_ModAsSideEffect + 1 14541 }; 14542 14543 /// Bundle together a sequencing region and the expression corresponding 14544 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 14545 struct Usage { 14546 const Expr *UsageExpr; 14547 SequenceTree::Seq Seq; 14548 14549 Usage() : UsageExpr(nullptr) {} 14550 }; 14551 14552 struct UsageInfo { 14553 Usage Uses[UK_Count]; 14554 14555 /// Have we issued a diagnostic for this object already? 14556 bool Diagnosed; 14557 14558 UsageInfo() : Diagnosed(false) {} 14559 }; 14560 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 14561 14562 Sema &SemaRef; 14563 14564 /// Sequenced regions within the expression. 14565 SequenceTree Tree; 14566 14567 /// Declaration modifications and references which we have seen. 14568 UsageInfoMap UsageMap; 14569 14570 /// The region we are currently within. 14571 SequenceTree::Seq Region; 14572 14573 /// Filled in with declarations which were modified as a side-effect 14574 /// (that is, post-increment operations). 14575 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 14576 14577 /// Expressions to check later. We defer checking these to reduce 14578 /// stack usage. 14579 SmallVectorImpl<const Expr *> &WorkList; 14580 14581 /// RAII object wrapping the visitation of a sequenced subexpression of an 14582 /// expression. At the end of this process, the side-effects of the evaluation 14583 /// become sequenced with respect to the value computation of the result, so 14584 /// we downgrade any UK_ModAsSideEffect within the evaluation to 14585 /// UK_ModAsValue. 14586 struct SequencedSubexpression { 14587 SequencedSubexpression(SequenceChecker &Self) 14588 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 14589 Self.ModAsSideEffect = &ModAsSideEffect; 14590 } 14591 14592 ~SequencedSubexpression() { 14593 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 14594 // Add a new usage with usage kind UK_ModAsValue, and then restore 14595 // the previous usage with UK_ModAsSideEffect (thus clearing it if 14596 // the previous one was empty). 14597 UsageInfo &UI = Self.UsageMap[M.first]; 14598 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 14599 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 14600 SideEffectUsage = M.second; 14601 } 14602 Self.ModAsSideEffect = OldModAsSideEffect; 14603 } 14604 14605 SequenceChecker &Self; 14606 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 14607 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 14608 }; 14609 14610 /// RAII object wrapping the visitation of a subexpression which we might 14611 /// choose to evaluate as a constant. If any subexpression is evaluated and 14612 /// found to be non-constant, this allows us to suppress the evaluation of 14613 /// the outer expression. 14614 class EvaluationTracker { 14615 public: 14616 EvaluationTracker(SequenceChecker &Self) 14617 : Self(Self), Prev(Self.EvalTracker) { 14618 Self.EvalTracker = this; 14619 } 14620 14621 ~EvaluationTracker() { 14622 Self.EvalTracker = Prev; 14623 if (Prev) 14624 Prev->EvalOK &= EvalOK; 14625 } 14626 14627 bool evaluate(const Expr *E, bool &Result) { 14628 if (!EvalOK || E->isValueDependent()) 14629 return false; 14630 EvalOK = E->EvaluateAsBooleanCondition( 14631 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 14632 return EvalOK; 14633 } 14634 14635 private: 14636 SequenceChecker &Self; 14637 EvaluationTracker *Prev; 14638 bool EvalOK = true; 14639 } *EvalTracker = nullptr; 14640 14641 /// Find the object which is produced by the specified expression, 14642 /// if any. 14643 Object getObject(const Expr *E, bool Mod) const { 14644 E = E->IgnoreParenCasts(); 14645 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 14646 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 14647 return getObject(UO->getSubExpr(), Mod); 14648 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 14649 if (BO->getOpcode() == BO_Comma) 14650 return getObject(BO->getRHS(), Mod); 14651 if (Mod && BO->isAssignmentOp()) 14652 return getObject(BO->getLHS(), Mod); 14653 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 14654 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 14655 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 14656 return ME->getMemberDecl(); 14657 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 14658 // FIXME: If this is a reference, map through to its value. 14659 return DRE->getDecl(); 14660 return nullptr; 14661 } 14662 14663 /// Note that an object \p O was modified or used by an expression 14664 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 14665 /// the object \p O as obtained via the \p UsageMap. 14666 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 14667 // Get the old usage for the given object and usage kind. 14668 Usage &U = UI.Uses[UK]; 14669 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 14670 // If we have a modification as side effect and are in a sequenced 14671 // subexpression, save the old Usage so that we can restore it later 14672 // in SequencedSubexpression::~SequencedSubexpression. 14673 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 14674 ModAsSideEffect->push_back(std::make_pair(O, U)); 14675 // Then record the new usage with the current sequencing region. 14676 U.UsageExpr = UsageExpr; 14677 U.Seq = Region; 14678 } 14679 } 14680 14681 /// Check whether a modification or use of an object \p O in an expression 14682 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 14683 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 14684 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 14685 /// usage and false we are checking for a mod-use unsequenced usage. 14686 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 14687 UsageKind OtherKind, bool IsModMod) { 14688 if (UI.Diagnosed) 14689 return; 14690 14691 const Usage &U = UI.Uses[OtherKind]; 14692 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 14693 return; 14694 14695 const Expr *Mod = U.UsageExpr; 14696 const Expr *ModOrUse = UsageExpr; 14697 if (OtherKind == UK_Use) 14698 std::swap(Mod, ModOrUse); 14699 14700 SemaRef.DiagRuntimeBehavior( 14701 Mod->getExprLoc(), {Mod, ModOrUse}, 14702 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 14703 : diag::warn_unsequenced_mod_use) 14704 << O << SourceRange(ModOrUse->getExprLoc())); 14705 UI.Diagnosed = true; 14706 } 14707 14708 // A note on note{Pre, Post}{Use, Mod}: 14709 // 14710 // (It helps to follow the algorithm with an expression such as 14711 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 14712 // operations before C++17 and both are well-defined in C++17). 14713 // 14714 // When visiting a node which uses/modify an object we first call notePreUse 14715 // or notePreMod before visiting its sub-expression(s). At this point the 14716 // children of the current node have not yet been visited and so the eventual 14717 // uses/modifications resulting from the children of the current node have not 14718 // been recorded yet. 14719 // 14720 // We then visit the children of the current node. After that notePostUse or 14721 // notePostMod is called. These will 1) detect an unsequenced modification 14722 // as side effect (as in "k++ + k") and 2) add a new usage with the 14723 // appropriate usage kind. 14724 // 14725 // We also have to be careful that some operation sequences modification as 14726 // side effect as well (for example: || or ,). To account for this we wrap 14727 // the visitation of such a sub-expression (for example: the LHS of || or ,) 14728 // with SequencedSubexpression. SequencedSubexpression is an RAII object 14729 // which record usages which are modifications as side effect, and then 14730 // downgrade them (or more accurately restore the previous usage which was a 14731 // modification as side effect) when exiting the scope of the sequenced 14732 // subexpression. 14733 14734 void notePreUse(Object O, const Expr *UseExpr) { 14735 UsageInfo &UI = UsageMap[O]; 14736 // Uses conflict with other modifications. 14737 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 14738 } 14739 14740 void notePostUse(Object O, const Expr *UseExpr) { 14741 UsageInfo &UI = UsageMap[O]; 14742 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 14743 /*IsModMod=*/false); 14744 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 14745 } 14746 14747 void notePreMod(Object O, const Expr *ModExpr) { 14748 UsageInfo &UI = UsageMap[O]; 14749 // Modifications conflict with other modifications and with uses. 14750 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 14751 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 14752 } 14753 14754 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 14755 UsageInfo &UI = UsageMap[O]; 14756 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 14757 /*IsModMod=*/true); 14758 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 14759 } 14760 14761 public: 14762 SequenceChecker(Sema &S, const Expr *E, 14763 SmallVectorImpl<const Expr *> &WorkList) 14764 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 14765 Visit(E); 14766 // Silence a -Wunused-private-field since WorkList is now unused. 14767 // TODO: Evaluate if it can be used, and if not remove it. 14768 (void)this->WorkList; 14769 } 14770 14771 void VisitStmt(const Stmt *S) { 14772 // Skip all statements which aren't expressions for now. 14773 } 14774 14775 void VisitExpr(const Expr *E) { 14776 // By default, just recurse to evaluated subexpressions. 14777 Base::VisitStmt(E); 14778 } 14779 14780 void VisitCastExpr(const CastExpr *E) { 14781 Object O = Object(); 14782 if (E->getCastKind() == CK_LValueToRValue) 14783 O = getObject(E->getSubExpr(), false); 14784 14785 if (O) 14786 notePreUse(O, E); 14787 VisitExpr(E); 14788 if (O) 14789 notePostUse(O, E); 14790 } 14791 14792 void VisitSequencedExpressions(const Expr *SequencedBefore, 14793 const Expr *SequencedAfter) { 14794 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 14795 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 14796 SequenceTree::Seq OldRegion = Region; 14797 14798 { 14799 SequencedSubexpression SeqBefore(*this); 14800 Region = BeforeRegion; 14801 Visit(SequencedBefore); 14802 } 14803 14804 Region = AfterRegion; 14805 Visit(SequencedAfter); 14806 14807 Region = OldRegion; 14808 14809 Tree.merge(BeforeRegion); 14810 Tree.merge(AfterRegion); 14811 } 14812 14813 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 14814 // C++17 [expr.sub]p1: 14815 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 14816 // expression E1 is sequenced before the expression E2. 14817 if (SemaRef.getLangOpts().CPlusPlus17) 14818 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 14819 else { 14820 Visit(ASE->getLHS()); 14821 Visit(ASE->getRHS()); 14822 } 14823 } 14824 14825 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14826 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14827 void VisitBinPtrMem(const BinaryOperator *BO) { 14828 // C++17 [expr.mptr.oper]p4: 14829 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 14830 // the expression E1 is sequenced before the expression E2. 14831 if (SemaRef.getLangOpts().CPlusPlus17) 14832 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14833 else { 14834 Visit(BO->getLHS()); 14835 Visit(BO->getRHS()); 14836 } 14837 } 14838 14839 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14840 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14841 void VisitBinShlShr(const BinaryOperator *BO) { 14842 // C++17 [expr.shift]p4: 14843 // The expression E1 is sequenced before the expression E2. 14844 if (SemaRef.getLangOpts().CPlusPlus17) 14845 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14846 else { 14847 Visit(BO->getLHS()); 14848 Visit(BO->getRHS()); 14849 } 14850 } 14851 14852 void VisitBinComma(const BinaryOperator *BO) { 14853 // C++11 [expr.comma]p1: 14854 // Every value computation and side effect associated with the left 14855 // expression is sequenced before every value computation and side 14856 // effect associated with the right expression. 14857 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14858 } 14859 14860 void VisitBinAssign(const BinaryOperator *BO) { 14861 SequenceTree::Seq RHSRegion; 14862 SequenceTree::Seq LHSRegion; 14863 if (SemaRef.getLangOpts().CPlusPlus17) { 14864 RHSRegion = Tree.allocate(Region); 14865 LHSRegion = Tree.allocate(Region); 14866 } else { 14867 RHSRegion = Region; 14868 LHSRegion = Region; 14869 } 14870 SequenceTree::Seq OldRegion = Region; 14871 14872 // C++11 [expr.ass]p1: 14873 // [...] the assignment is sequenced after the value computation 14874 // of the right and left operands, [...] 14875 // 14876 // so check it before inspecting the operands and update the 14877 // map afterwards. 14878 Object O = getObject(BO->getLHS(), /*Mod=*/true); 14879 if (O) 14880 notePreMod(O, BO); 14881 14882 if (SemaRef.getLangOpts().CPlusPlus17) { 14883 // C++17 [expr.ass]p1: 14884 // [...] The right operand is sequenced before the left operand. [...] 14885 { 14886 SequencedSubexpression SeqBefore(*this); 14887 Region = RHSRegion; 14888 Visit(BO->getRHS()); 14889 } 14890 14891 Region = LHSRegion; 14892 Visit(BO->getLHS()); 14893 14894 if (O && isa<CompoundAssignOperator>(BO)) 14895 notePostUse(O, BO); 14896 14897 } else { 14898 // C++11 does not specify any sequencing between the LHS and RHS. 14899 Region = LHSRegion; 14900 Visit(BO->getLHS()); 14901 14902 if (O && isa<CompoundAssignOperator>(BO)) 14903 notePostUse(O, BO); 14904 14905 Region = RHSRegion; 14906 Visit(BO->getRHS()); 14907 } 14908 14909 // C++11 [expr.ass]p1: 14910 // the assignment is sequenced [...] before the value computation of the 14911 // assignment expression. 14912 // C11 6.5.16/3 has no such rule. 14913 Region = OldRegion; 14914 if (O) 14915 notePostMod(O, BO, 14916 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14917 : UK_ModAsSideEffect); 14918 if (SemaRef.getLangOpts().CPlusPlus17) { 14919 Tree.merge(RHSRegion); 14920 Tree.merge(LHSRegion); 14921 } 14922 } 14923 14924 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 14925 VisitBinAssign(CAO); 14926 } 14927 14928 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14929 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14930 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 14931 Object O = getObject(UO->getSubExpr(), true); 14932 if (!O) 14933 return VisitExpr(UO); 14934 14935 notePreMod(O, UO); 14936 Visit(UO->getSubExpr()); 14937 // C++11 [expr.pre.incr]p1: 14938 // the expression ++x is equivalent to x+=1 14939 notePostMod(O, UO, 14940 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14941 : UK_ModAsSideEffect); 14942 } 14943 14944 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14945 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14946 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 14947 Object O = getObject(UO->getSubExpr(), true); 14948 if (!O) 14949 return VisitExpr(UO); 14950 14951 notePreMod(O, UO); 14952 Visit(UO->getSubExpr()); 14953 notePostMod(O, UO, UK_ModAsSideEffect); 14954 } 14955 14956 void VisitBinLOr(const BinaryOperator *BO) { 14957 // C++11 [expr.log.or]p2: 14958 // If the second expression is evaluated, every value computation and 14959 // side effect associated with the first expression is sequenced before 14960 // every value computation and side effect associated with the 14961 // second expression. 14962 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14963 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14964 SequenceTree::Seq OldRegion = Region; 14965 14966 EvaluationTracker Eval(*this); 14967 { 14968 SequencedSubexpression Sequenced(*this); 14969 Region = LHSRegion; 14970 Visit(BO->getLHS()); 14971 } 14972 14973 // C++11 [expr.log.or]p1: 14974 // [...] the second operand is not evaluated if the first operand 14975 // evaluates to true. 14976 bool EvalResult = false; 14977 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 14978 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 14979 if (ShouldVisitRHS) { 14980 Region = RHSRegion; 14981 Visit(BO->getRHS()); 14982 } 14983 14984 Region = OldRegion; 14985 Tree.merge(LHSRegion); 14986 Tree.merge(RHSRegion); 14987 } 14988 14989 void VisitBinLAnd(const BinaryOperator *BO) { 14990 // C++11 [expr.log.and]p2: 14991 // If the second expression is evaluated, every value computation and 14992 // side effect associated with the first expression is sequenced before 14993 // every value computation and side effect associated with the 14994 // second expression. 14995 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14996 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14997 SequenceTree::Seq OldRegion = Region; 14998 14999 EvaluationTracker Eval(*this); 15000 { 15001 SequencedSubexpression Sequenced(*this); 15002 Region = LHSRegion; 15003 Visit(BO->getLHS()); 15004 } 15005 15006 // C++11 [expr.log.and]p1: 15007 // [...] the second operand is not evaluated if the first operand is false. 15008 bool EvalResult = false; 15009 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 15010 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 15011 if (ShouldVisitRHS) { 15012 Region = RHSRegion; 15013 Visit(BO->getRHS()); 15014 } 15015 15016 Region = OldRegion; 15017 Tree.merge(LHSRegion); 15018 Tree.merge(RHSRegion); 15019 } 15020 15021 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 15022 // C++11 [expr.cond]p1: 15023 // [...] Every value computation and side effect associated with the first 15024 // expression is sequenced before every value computation and side effect 15025 // associated with the second or third expression. 15026 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 15027 15028 // No sequencing is specified between the true and false expression. 15029 // However since exactly one of both is going to be evaluated we can 15030 // consider them to be sequenced. This is needed to avoid warning on 15031 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 15032 // both the true and false expressions because we can't evaluate x. 15033 // This will still allow us to detect an expression like (pre C++17) 15034 // "(x ? y += 1 : y += 2) = y". 15035 // 15036 // We don't wrap the visitation of the true and false expression with 15037 // SequencedSubexpression because we don't want to downgrade modifications 15038 // as side effect in the true and false expressions after the visition 15039 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 15040 // not warn between the two "y++", but we should warn between the "y++" 15041 // and the "y". 15042 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 15043 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 15044 SequenceTree::Seq OldRegion = Region; 15045 15046 EvaluationTracker Eval(*this); 15047 { 15048 SequencedSubexpression Sequenced(*this); 15049 Region = ConditionRegion; 15050 Visit(CO->getCond()); 15051 } 15052 15053 // C++11 [expr.cond]p1: 15054 // [...] The first expression is contextually converted to bool (Clause 4). 15055 // It is evaluated and if it is true, the result of the conditional 15056 // expression is the value of the second expression, otherwise that of the 15057 // third expression. Only one of the second and third expressions is 15058 // evaluated. [...] 15059 bool EvalResult = false; 15060 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 15061 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 15062 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 15063 if (ShouldVisitTrueExpr) { 15064 Region = TrueRegion; 15065 Visit(CO->getTrueExpr()); 15066 } 15067 if (ShouldVisitFalseExpr) { 15068 Region = FalseRegion; 15069 Visit(CO->getFalseExpr()); 15070 } 15071 15072 Region = OldRegion; 15073 Tree.merge(ConditionRegion); 15074 Tree.merge(TrueRegion); 15075 Tree.merge(FalseRegion); 15076 } 15077 15078 void VisitCallExpr(const CallExpr *CE) { 15079 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 15080 15081 if (CE->isUnevaluatedBuiltinCall(Context)) 15082 return; 15083 15084 // C++11 [intro.execution]p15: 15085 // When calling a function [...], every value computation and side effect 15086 // associated with any argument expression, or with the postfix expression 15087 // designating the called function, is sequenced before execution of every 15088 // expression or statement in the body of the function [and thus before 15089 // the value computation of its result]. 15090 SequencedSubexpression Sequenced(*this); 15091 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 15092 // C++17 [expr.call]p5 15093 // The postfix-expression is sequenced before each expression in the 15094 // expression-list and any default argument. [...] 15095 SequenceTree::Seq CalleeRegion; 15096 SequenceTree::Seq OtherRegion; 15097 if (SemaRef.getLangOpts().CPlusPlus17) { 15098 CalleeRegion = Tree.allocate(Region); 15099 OtherRegion = Tree.allocate(Region); 15100 } else { 15101 CalleeRegion = Region; 15102 OtherRegion = Region; 15103 } 15104 SequenceTree::Seq OldRegion = Region; 15105 15106 // Visit the callee expression first. 15107 Region = CalleeRegion; 15108 if (SemaRef.getLangOpts().CPlusPlus17) { 15109 SequencedSubexpression Sequenced(*this); 15110 Visit(CE->getCallee()); 15111 } else { 15112 Visit(CE->getCallee()); 15113 } 15114 15115 // Then visit the argument expressions. 15116 Region = OtherRegion; 15117 for (const Expr *Argument : CE->arguments()) 15118 Visit(Argument); 15119 15120 Region = OldRegion; 15121 if (SemaRef.getLangOpts().CPlusPlus17) { 15122 Tree.merge(CalleeRegion); 15123 Tree.merge(OtherRegion); 15124 } 15125 }); 15126 } 15127 15128 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 15129 // C++17 [over.match.oper]p2: 15130 // [...] the operator notation is first transformed to the equivalent 15131 // function-call notation as summarized in Table 12 (where @ denotes one 15132 // of the operators covered in the specified subclause). However, the 15133 // operands are sequenced in the order prescribed for the built-in 15134 // operator (Clause 8). 15135 // 15136 // From the above only overloaded binary operators and overloaded call 15137 // operators have sequencing rules in C++17 that we need to handle 15138 // separately. 15139 if (!SemaRef.getLangOpts().CPlusPlus17 || 15140 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 15141 return VisitCallExpr(CXXOCE); 15142 15143 enum { 15144 NoSequencing, 15145 LHSBeforeRHS, 15146 RHSBeforeLHS, 15147 LHSBeforeRest 15148 } SequencingKind; 15149 switch (CXXOCE->getOperator()) { 15150 case OO_Equal: 15151 case OO_PlusEqual: 15152 case OO_MinusEqual: 15153 case OO_StarEqual: 15154 case OO_SlashEqual: 15155 case OO_PercentEqual: 15156 case OO_CaretEqual: 15157 case OO_AmpEqual: 15158 case OO_PipeEqual: 15159 case OO_LessLessEqual: 15160 case OO_GreaterGreaterEqual: 15161 SequencingKind = RHSBeforeLHS; 15162 break; 15163 15164 case OO_LessLess: 15165 case OO_GreaterGreater: 15166 case OO_AmpAmp: 15167 case OO_PipePipe: 15168 case OO_Comma: 15169 case OO_ArrowStar: 15170 case OO_Subscript: 15171 SequencingKind = LHSBeforeRHS; 15172 break; 15173 15174 case OO_Call: 15175 SequencingKind = LHSBeforeRest; 15176 break; 15177 15178 default: 15179 SequencingKind = NoSequencing; 15180 break; 15181 } 15182 15183 if (SequencingKind == NoSequencing) 15184 return VisitCallExpr(CXXOCE); 15185 15186 // This is a call, so all subexpressions are sequenced before the result. 15187 SequencedSubexpression Sequenced(*this); 15188 15189 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 15190 assert(SemaRef.getLangOpts().CPlusPlus17 && 15191 "Should only get there with C++17 and above!"); 15192 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 15193 "Should only get there with an overloaded binary operator" 15194 " or an overloaded call operator!"); 15195 15196 if (SequencingKind == LHSBeforeRest) { 15197 assert(CXXOCE->getOperator() == OO_Call && 15198 "We should only have an overloaded call operator here!"); 15199 15200 // This is very similar to VisitCallExpr, except that we only have the 15201 // C++17 case. The postfix-expression is the first argument of the 15202 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 15203 // are in the following arguments. 15204 // 15205 // Note that we intentionally do not visit the callee expression since 15206 // it is just a decayed reference to a function. 15207 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 15208 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 15209 SequenceTree::Seq OldRegion = Region; 15210 15211 assert(CXXOCE->getNumArgs() >= 1 && 15212 "An overloaded call operator must have at least one argument" 15213 " for the postfix-expression!"); 15214 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 15215 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 15216 CXXOCE->getNumArgs() - 1); 15217 15218 // Visit the postfix-expression first. 15219 { 15220 Region = PostfixExprRegion; 15221 SequencedSubexpression Sequenced(*this); 15222 Visit(PostfixExpr); 15223 } 15224 15225 // Then visit the argument expressions. 15226 Region = ArgsRegion; 15227 for (const Expr *Arg : Args) 15228 Visit(Arg); 15229 15230 Region = OldRegion; 15231 Tree.merge(PostfixExprRegion); 15232 Tree.merge(ArgsRegion); 15233 } else { 15234 assert(CXXOCE->getNumArgs() == 2 && 15235 "Should only have two arguments here!"); 15236 assert((SequencingKind == LHSBeforeRHS || 15237 SequencingKind == RHSBeforeLHS) && 15238 "Unexpected sequencing kind!"); 15239 15240 // We do not visit the callee expression since it is just a decayed 15241 // reference to a function. 15242 const Expr *E1 = CXXOCE->getArg(0); 15243 const Expr *E2 = CXXOCE->getArg(1); 15244 if (SequencingKind == RHSBeforeLHS) 15245 std::swap(E1, E2); 15246 15247 return VisitSequencedExpressions(E1, E2); 15248 } 15249 }); 15250 } 15251 15252 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 15253 // This is a call, so all subexpressions are sequenced before the result. 15254 SequencedSubexpression Sequenced(*this); 15255 15256 if (!CCE->isListInitialization()) 15257 return VisitExpr(CCE); 15258 15259 // In C++11, list initializations are sequenced. 15260 SmallVector<SequenceTree::Seq, 32> Elts; 15261 SequenceTree::Seq Parent = Region; 15262 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 15263 E = CCE->arg_end(); 15264 I != E; ++I) { 15265 Region = Tree.allocate(Parent); 15266 Elts.push_back(Region); 15267 Visit(*I); 15268 } 15269 15270 // Forget that the initializers are sequenced. 15271 Region = Parent; 15272 for (unsigned I = 0; I < Elts.size(); ++I) 15273 Tree.merge(Elts[I]); 15274 } 15275 15276 void VisitInitListExpr(const InitListExpr *ILE) { 15277 if (!SemaRef.getLangOpts().CPlusPlus11) 15278 return VisitExpr(ILE); 15279 15280 // In C++11, list initializations are sequenced. 15281 SmallVector<SequenceTree::Seq, 32> Elts; 15282 SequenceTree::Seq Parent = Region; 15283 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 15284 const Expr *E = ILE->getInit(I); 15285 if (!E) 15286 continue; 15287 Region = Tree.allocate(Parent); 15288 Elts.push_back(Region); 15289 Visit(E); 15290 } 15291 15292 // Forget that the initializers are sequenced. 15293 Region = Parent; 15294 for (unsigned I = 0; I < Elts.size(); ++I) 15295 Tree.merge(Elts[I]); 15296 } 15297 }; 15298 15299 } // namespace 15300 15301 void Sema::CheckUnsequencedOperations(const Expr *E) { 15302 SmallVector<const Expr *, 8> WorkList; 15303 WorkList.push_back(E); 15304 while (!WorkList.empty()) { 15305 const Expr *Item = WorkList.pop_back_val(); 15306 SequenceChecker(*this, Item, WorkList); 15307 } 15308 } 15309 15310 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 15311 bool IsConstexpr) { 15312 llvm::SaveAndRestore<bool> ConstantContext( 15313 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 15314 CheckImplicitConversions(E, CheckLoc); 15315 if (!E->isInstantiationDependent()) 15316 CheckUnsequencedOperations(E); 15317 if (!IsConstexpr && !E->isValueDependent()) 15318 CheckForIntOverflow(E); 15319 DiagnoseMisalignedMembers(); 15320 } 15321 15322 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 15323 FieldDecl *BitField, 15324 Expr *Init) { 15325 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 15326 } 15327 15328 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 15329 SourceLocation Loc) { 15330 if (!PType->isVariablyModifiedType()) 15331 return; 15332 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 15333 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 15334 return; 15335 } 15336 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 15337 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 15338 return; 15339 } 15340 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 15341 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 15342 return; 15343 } 15344 15345 const ArrayType *AT = S.Context.getAsArrayType(PType); 15346 if (!AT) 15347 return; 15348 15349 if (AT->getSizeModifier() != ArrayType::Star) { 15350 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 15351 return; 15352 } 15353 15354 S.Diag(Loc, diag::err_array_star_in_function_definition); 15355 } 15356 15357 /// CheckParmsForFunctionDef - Check that the parameters of the given 15358 /// function are appropriate for the definition of a function. This 15359 /// takes care of any checks that cannot be performed on the 15360 /// declaration itself, e.g., that the types of each of the function 15361 /// parameters are complete. 15362 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 15363 bool CheckParameterNames) { 15364 bool HasInvalidParm = false; 15365 for (ParmVarDecl *Param : Parameters) { 15366 // C99 6.7.5.3p4: the parameters in a parameter type list in a 15367 // function declarator that is part of a function definition of 15368 // that function shall not have incomplete type. 15369 // 15370 // This is also C++ [dcl.fct]p6. 15371 if (!Param->isInvalidDecl() && 15372 RequireCompleteType(Param->getLocation(), Param->getType(), 15373 diag::err_typecheck_decl_incomplete_type)) { 15374 Param->setInvalidDecl(); 15375 HasInvalidParm = true; 15376 } 15377 15378 // C99 6.9.1p5: If the declarator includes a parameter type list, the 15379 // declaration of each parameter shall include an identifier. 15380 if (CheckParameterNames && Param->getIdentifier() == nullptr && 15381 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 15382 // Diagnose this as an extension in C17 and earlier. 15383 if (!getLangOpts().C2x) 15384 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 15385 } 15386 15387 // C99 6.7.5.3p12: 15388 // If the function declarator is not part of a definition of that 15389 // function, parameters may have incomplete type and may use the [*] 15390 // notation in their sequences of declarator specifiers to specify 15391 // variable length array types. 15392 QualType PType = Param->getOriginalType(); 15393 // FIXME: This diagnostic should point the '[*]' if source-location 15394 // information is added for it. 15395 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 15396 15397 // If the parameter is a c++ class type and it has to be destructed in the 15398 // callee function, declare the destructor so that it can be called by the 15399 // callee function. Do not perform any direct access check on the dtor here. 15400 if (!Param->isInvalidDecl()) { 15401 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 15402 if (!ClassDecl->isInvalidDecl() && 15403 !ClassDecl->hasIrrelevantDestructor() && 15404 !ClassDecl->isDependentContext() && 15405 ClassDecl->isParamDestroyedInCallee()) { 15406 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 15407 MarkFunctionReferenced(Param->getLocation(), Destructor); 15408 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 15409 } 15410 } 15411 } 15412 15413 // Parameters with the pass_object_size attribute only need to be marked 15414 // constant at function definitions. Because we lack information about 15415 // whether we're on a declaration or definition when we're instantiating the 15416 // attribute, we need to check for constness here. 15417 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 15418 if (!Param->getType().isConstQualified()) 15419 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 15420 << Attr->getSpelling() << 1; 15421 15422 // Check for parameter names shadowing fields from the class. 15423 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 15424 // The owning context for the parameter should be the function, but we 15425 // want to see if this function's declaration context is a record. 15426 DeclContext *DC = Param->getDeclContext(); 15427 if (DC && DC->isFunctionOrMethod()) { 15428 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 15429 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 15430 RD, /*DeclIsField*/ false); 15431 } 15432 } 15433 } 15434 15435 return HasInvalidParm; 15436 } 15437 15438 Optional<std::pair<CharUnits, CharUnits>> 15439 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 15440 15441 /// Compute the alignment and offset of the base class object given the 15442 /// derived-to-base cast expression and the alignment and offset of the derived 15443 /// class object. 15444 static std::pair<CharUnits, CharUnits> 15445 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 15446 CharUnits BaseAlignment, CharUnits Offset, 15447 ASTContext &Ctx) { 15448 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 15449 ++PathI) { 15450 const CXXBaseSpecifier *Base = *PathI; 15451 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 15452 if (Base->isVirtual()) { 15453 // The complete object may have a lower alignment than the non-virtual 15454 // alignment of the base, in which case the base may be misaligned. Choose 15455 // the smaller of the non-virtual alignment and BaseAlignment, which is a 15456 // conservative lower bound of the complete object alignment. 15457 CharUnits NonVirtualAlignment = 15458 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 15459 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 15460 Offset = CharUnits::Zero(); 15461 } else { 15462 const ASTRecordLayout &RL = 15463 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 15464 Offset += RL.getBaseClassOffset(BaseDecl); 15465 } 15466 DerivedType = Base->getType(); 15467 } 15468 15469 return std::make_pair(BaseAlignment, Offset); 15470 } 15471 15472 /// Compute the alignment and offset of a binary additive operator. 15473 static Optional<std::pair<CharUnits, CharUnits>> 15474 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 15475 bool IsSub, ASTContext &Ctx) { 15476 QualType PointeeType = PtrE->getType()->getPointeeType(); 15477 15478 if (!PointeeType->isConstantSizeType()) 15479 return llvm::None; 15480 15481 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 15482 15483 if (!P) 15484 return llvm::None; 15485 15486 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 15487 if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 15488 CharUnits Offset = EltSize * IdxRes->getExtValue(); 15489 if (IsSub) 15490 Offset = -Offset; 15491 return std::make_pair(P->first, P->second + Offset); 15492 } 15493 15494 // If the integer expression isn't a constant expression, compute the lower 15495 // bound of the alignment using the alignment and offset of the pointer 15496 // expression and the element size. 15497 return std::make_pair( 15498 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 15499 CharUnits::Zero()); 15500 } 15501 15502 /// This helper function takes an lvalue expression and returns the alignment of 15503 /// a VarDecl and a constant offset from the VarDecl. 15504 Optional<std::pair<CharUnits, CharUnits>> 15505 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 15506 E = E->IgnoreParens(); 15507 switch (E->getStmtClass()) { 15508 default: 15509 break; 15510 case Stmt::CStyleCastExprClass: 15511 case Stmt::CXXStaticCastExprClass: 15512 case Stmt::ImplicitCastExprClass: { 15513 auto *CE = cast<CastExpr>(E); 15514 const Expr *From = CE->getSubExpr(); 15515 switch (CE->getCastKind()) { 15516 default: 15517 break; 15518 case CK_NoOp: 15519 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15520 case CK_UncheckedDerivedToBase: 15521 case CK_DerivedToBase: { 15522 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15523 if (!P) 15524 break; 15525 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 15526 P->second, Ctx); 15527 } 15528 } 15529 break; 15530 } 15531 case Stmt::ArraySubscriptExprClass: { 15532 auto *ASE = cast<ArraySubscriptExpr>(E); 15533 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 15534 false, Ctx); 15535 } 15536 case Stmt::DeclRefExprClass: { 15537 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 15538 // FIXME: If VD is captured by copy or is an escaping __block variable, 15539 // use the alignment of VD's type. 15540 if (!VD->getType()->isReferenceType()) 15541 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 15542 if (VD->hasInit()) 15543 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 15544 } 15545 break; 15546 } 15547 case Stmt::MemberExprClass: { 15548 auto *ME = cast<MemberExpr>(E); 15549 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 15550 if (!FD || FD->getType()->isReferenceType() || 15551 FD->getParent()->isInvalidDecl()) 15552 break; 15553 Optional<std::pair<CharUnits, CharUnits>> P; 15554 if (ME->isArrow()) 15555 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 15556 else 15557 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 15558 if (!P) 15559 break; 15560 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 15561 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 15562 return std::make_pair(P->first, 15563 P->second + CharUnits::fromQuantity(Offset)); 15564 } 15565 case Stmt::UnaryOperatorClass: { 15566 auto *UO = cast<UnaryOperator>(E); 15567 switch (UO->getOpcode()) { 15568 default: 15569 break; 15570 case UO_Deref: 15571 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 15572 } 15573 break; 15574 } 15575 case Stmt::BinaryOperatorClass: { 15576 auto *BO = cast<BinaryOperator>(E); 15577 auto Opcode = BO->getOpcode(); 15578 switch (Opcode) { 15579 default: 15580 break; 15581 case BO_Comma: 15582 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 15583 } 15584 break; 15585 } 15586 } 15587 return llvm::None; 15588 } 15589 15590 /// This helper function takes a pointer expression and returns the alignment of 15591 /// a VarDecl and a constant offset from the VarDecl. 15592 Optional<std::pair<CharUnits, CharUnits>> 15593 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 15594 E = E->IgnoreParens(); 15595 switch (E->getStmtClass()) { 15596 default: 15597 break; 15598 case Stmt::CStyleCastExprClass: 15599 case Stmt::CXXStaticCastExprClass: 15600 case Stmt::ImplicitCastExprClass: { 15601 auto *CE = cast<CastExpr>(E); 15602 const Expr *From = CE->getSubExpr(); 15603 switch (CE->getCastKind()) { 15604 default: 15605 break; 15606 case CK_NoOp: 15607 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15608 case CK_ArrayToPointerDecay: 15609 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15610 case CK_UncheckedDerivedToBase: 15611 case CK_DerivedToBase: { 15612 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15613 if (!P) 15614 break; 15615 return getDerivedToBaseAlignmentAndOffset( 15616 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 15617 } 15618 } 15619 break; 15620 } 15621 case Stmt::CXXThisExprClass: { 15622 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 15623 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 15624 return std::make_pair(Alignment, CharUnits::Zero()); 15625 } 15626 case Stmt::UnaryOperatorClass: { 15627 auto *UO = cast<UnaryOperator>(E); 15628 if (UO->getOpcode() == UO_AddrOf) 15629 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 15630 break; 15631 } 15632 case Stmt::BinaryOperatorClass: { 15633 auto *BO = cast<BinaryOperator>(E); 15634 auto Opcode = BO->getOpcode(); 15635 switch (Opcode) { 15636 default: 15637 break; 15638 case BO_Add: 15639 case BO_Sub: { 15640 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 15641 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 15642 std::swap(LHS, RHS); 15643 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 15644 Ctx); 15645 } 15646 case BO_Comma: 15647 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 15648 } 15649 break; 15650 } 15651 } 15652 return llvm::None; 15653 } 15654 15655 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 15656 // See if we can compute the alignment of a VarDecl and an offset from it. 15657 Optional<std::pair<CharUnits, CharUnits>> P = 15658 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 15659 15660 if (P) 15661 return P->first.alignmentAtOffset(P->second); 15662 15663 // If that failed, return the type's alignment. 15664 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 15665 } 15666 15667 /// CheckCastAlign - Implements -Wcast-align, which warns when a 15668 /// pointer cast increases the alignment requirements. 15669 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 15670 // This is actually a lot of work to potentially be doing on every 15671 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 15672 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 15673 return; 15674 15675 // Ignore dependent types. 15676 if (T->isDependentType() || Op->getType()->isDependentType()) 15677 return; 15678 15679 // Require that the destination be a pointer type. 15680 const PointerType *DestPtr = T->getAs<PointerType>(); 15681 if (!DestPtr) return; 15682 15683 // If the destination has alignment 1, we're done. 15684 QualType DestPointee = DestPtr->getPointeeType(); 15685 if (DestPointee->isIncompleteType()) return; 15686 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 15687 if (DestAlign.isOne()) return; 15688 15689 // Require that the source be a pointer type. 15690 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 15691 if (!SrcPtr) return; 15692 QualType SrcPointee = SrcPtr->getPointeeType(); 15693 15694 // Explicitly allow casts from cv void*. We already implicitly 15695 // allowed casts to cv void*, since they have alignment 1. 15696 // Also allow casts involving incomplete types, which implicitly 15697 // includes 'void'. 15698 if (SrcPointee->isIncompleteType()) return; 15699 15700 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 15701 15702 if (SrcAlign >= DestAlign) return; 15703 15704 Diag(TRange.getBegin(), diag::warn_cast_align) 15705 << Op->getType() << T 15706 << static_cast<unsigned>(SrcAlign.getQuantity()) 15707 << static_cast<unsigned>(DestAlign.getQuantity()) 15708 << TRange << Op->getSourceRange(); 15709 } 15710 15711 /// Check whether this array fits the idiom of a size-one tail padded 15712 /// array member of a struct. 15713 /// 15714 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 15715 /// commonly used to emulate flexible arrays in C89 code. 15716 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 15717 const NamedDecl *ND) { 15718 if (Size != 1 || !ND) return false; 15719 15720 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 15721 if (!FD) return false; 15722 15723 // Don't consider sizes resulting from macro expansions or template argument 15724 // substitution to form C89 tail-padded arrays. 15725 15726 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 15727 while (TInfo) { 15728 TypeLoc TL = TInfo->getTypeLoc(); 15729 // Look through typedefs. 15730 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 15731 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 15732 TInfo = TDL->getTypeSourceInfo(); 15733 continue; 15734 } 15735 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 15736 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 15737 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 15738 return false; 15739 } 15740 break; 15741 } 15742 15743 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 15744 if (!RD) return false; 15745 if (RD->isUnion()) return false; 15746 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 15747 if (!CRD->isStandardLayout()) return false; 15748 } 15749 15750 // See if this is the last field decl in the record. 15751 const Decl *D = FD; 15752 while ((D = D->getNextDeclInContext())) 15753 if (isa<FieldDecl>(D)) 15754 return false; 15755 return true; 15756 } 15757 15758 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 15759 const ArraySubscriptExpr *ASE, 15760 bool AllowOnePastEnd, bool IndexNegated) { 15761 // Already diagnosed by the constant evaluator. 15762 if (isConstantEvaluated()) 15763 return; 15764 15765 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 15766 if (IndexExpr->isValueDependent()) 15767 return; 15768 15769 const Type *EffectiveType = 15770 BaseExpr->getType()->getPointeeOrArrayElementType(); 15771 BaseExpr = BaseExpr->IgnoreParenCasts(); 15772 const ConstantArrayType *ArrayTy = 15773 Context.getAsConstantArrayType(BaseExpr->getType()); 15774 15775 const Type *BaseType = 15776 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); 15777 bool IsUnboundedArray = (BaseType == nullptr); 15778 if (EffectiveType->isDependentType() || 15779 (!IsUnboundedArray && BaseType->isDependentType())) 15780 return; 15781 15782 Expr::EvalResult Result; 15783 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 15784 return; 15785 15786 llvm::APSInt index = Result.Val.getInt(); 15787 if (IndexNegated) { 15788 index.setIsUnsigned(false); 15789 index = -index; 15790 } 15791 15792 const NamedDecl *ND = nullptr; 15793 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15794 ND = DRE->getDecl(); 15795 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 15796 ND = ME->getMemberDecl(); 15797 15798 if (IsUnboundedArray) { 15799 if (EffectiveType->isFunctionType()) 15800 return; 15801 if (index.isUnsigned() || !index.isNegative()) { 15802 const auto &ASTC = getASTContext(); 15803 unsigned AddrBits = 15804 ASTC.getTargetInfo().getPointerWidth(ASTC.getTargetAddressSpace( 15805 EffectiveType->getCanonicalTypeInternal())); 15806 if (index.getBitWidth() < AddrBits) 15807 index = index.zext(AddrBits); 15808 Optional<CharUnits> ElemCharUnits = 15809 ASTC.getTypeSizeInCharsIfKnown(EffectiveType); 15810 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void 15811 // pointer) bounds-checking isn't meaningful. 15812 if (!ElemCharUnits) 15813 return; 15814 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity()); 15815 // If index has more active bits than address space, we already know 15816 // we have a bounds violation to warn about. Otherwise, compute 15817 // address of (index + 1)th element, and warn about bounds violation 15818 // only if that address exceeds address space. 15819 if (index.getActiveBits() <= AddrBits) { 15820 bool Overflow; 15821 llvm::APInt Product(index); 15822 Product += 1; 15823 Product = Product.umul_ov(ElemBytes, Overflow); 15824 if (!Overflow && Product.getActiveBits() <= AddrBits) 15825 return; 15826 } 15827 15828 // Need to compute max possible elements in address space, since that 15829 // is included in diag message. 15830 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits); 15831 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth())); 15832 MaxElems += 1; 15833 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth()); 15834 MaxElems = MaxElems.udiv(ElemBytes); 15835 15836 unsigned DiagID = 15837 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds 15838 : diag::warn_ptr_arith_exceeds_max_addressable_bounds; 15839 15840 // Diag message shows element size in bits and in "bytes" (platform- 15841 // dependent CharUnits) 15842 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15843 PDiag(DiagID) 15844 << toString(index, 10, true) << AddrBits 15845 << (unsigned)ASTC.toBits(*ElemCharUnits) 15846 << toString(ElemBytes, 10, false) 15847 << toString(MaxElems, 10, false) 15848 << (unsigned)MaxElems.getLimitedValue(~0U) 15849 << IndexExpr->getSourceRange()); 15850 15851 if (!ND) { 15852 // Try harder to find a NamedDecl to point at in the note. 15853 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15854 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15855 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15856 ND = DRE->getDecl(); 15857 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15858 ND = ME->getMemberDecl(); 15859 } 15860 15861 if (ND) 15862 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15863 PDiag(diag::note_array_declared_here) << ND); 15864 } 15865 return; 15866 } 15867 15868 if (index.isUnsigned() || !index.isNegative()) { 15869 // It is possible that the type of the base expression after 15870 // IgnoreParenCasts is incomplete, even though the type of the base 15871 // expression before IgnoreParenCasts is complete (see PR39746 for an 15872 // example). In this case we have no information about whether the array 15873 // access exceeds the array bounds. However we can still diagnose an array 15874 // access which precedes the array bounds. 15875 if (BaseType->isIncompleteType()) 15876 return; 15877 15878 llvm::APInt size = ArrayTy->getSize(); 15879 if (!size.isStrictlyPositive()) 15880 return; 15881 15882 if (BaseType != EffectiveType) { 15883 // Make sure we're comparing apples to apples when comparing index to size 15884 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 15885 uint64_t array_typesize = Context.getTypeSize(BaseType); 15886 // Handle ptrarith_typesize being zero, such as when casting to void* 15887 if (!ptrarith_typesize) ptrarith_typesize = 1; 15888 if (ptrarith_typesize != array_typesize) { 15889 // There's a cast to a different size type involved 15890 uint64_t ratio = array_typesize / ptrarith_typesize; 15891 // TODO: Be smarter about handling cases where array_typesize is not a 15892 // multiple of ptrarith_typesize 15893 if (ptrarith_typesize * ratio == array_typesize) 15894 size *= llvm::APInt(size.getBitWidth(), ratio); 15895 } 15896 } 15897 15898 if (size.getBitWidth() > index.getBitWidth()) 15899 index = index.zext(size.getBitWidth()); 15900 else if (size.getBitWidth() < index.getBitWidth()) 15901 size = size.zext(index.getBitWidth()); 15902 15903 // For array subscripting the index must be less than size, but for pointer 15904 // arithmetic also allow the index (offset) to be equal to size since 15905 // computing the next address after the end of the array is legal and 15906 // commonly done e.g. in C++ iterators and range-based for loops. 15907 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 15908 return; 15909 15910 // Also don't warn for arrays of size 1 which are members of some 15911 // structure. These are often used to approximate flexible arrays in C89 15912 // code. 15913 if (IsTailPaddedMemberArray(*this, size, ND)) 15914 return; 15915 15916 // Suppress the warning if the subscript expression (as identified by the 15917 // ']' location) and the index expression are both from macro expansions 15918 // within a system header. 15919 if (ASE) { 15920 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 15921 ASE->getRBracketLoc()); 15922 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 15923 SourceLocation IndexLoc = 15924 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 15925 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 15926 return; 15927 } 15928 } 15929 15930 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds 15931 : diag::warn_ptr_arith_exceeds_bounds; 15932 15933 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15934 PDiag(DiagID) << toString(index, 10, true) 15935 << toString(size, 10, true) 15936 << (unsigned)size.getLimitedValue(~0U) 15937 << IndexExpr->getSourceRange()); 15938 } else { 15939 unsigned DiagID = diag::warn_array_index_precedes_bounds; 15940 if (!ASE) { 15941 DiagID = diag::warn_ptr_arith_precedes_bounds; 15942 if (index.isNegative()) index = -index; 15943 } 15944 15945 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15946 PDiag(DiagID) << toString(index, 10, true) 15947 << IndexExpr->getSourceRange()); 15948 } 15949 15950 if (!ND) { 15951 // Try harder to find a NamedDecl to point at in the note. 15952 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15953 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15954 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15955 ND = DRE->getDecl(); 15956 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15957 ND = ME->getMemberDecl(); 15958 } 15959 15960 if (ND) 15961 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15962 PDiag(diag::note_array_declared_here) << ND); 15963 } 15964 15965 void Sema::CheckArrayAccess(const Expr *expr) { 15966 int AllowOnePastEnd = 0; 15967 while (expr) { 15968 expr = expr->IgnoreParenImpCasts(); 15969 switch (expr->getStmtClass()) { 15970 case Stmt::ArraySubscriptExprClass: { 15971 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 15972 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 15973 AllowOnePastEnd > 0); 15974 expr = ASE->getBase(); 15975 break; 15976 } 15977 case Stmt::MemberExprClass: { 15978 expr = cast<MemberExpr>(expr)->getBase(); 15979 break; 15980 } 15981 case Stmt::OMPArraySectionExprClass: { 15982 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 15983 if (ASE->getLowerBound()) 15984 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 15985 /*ASE=*/nullptr, AllowOnePastEnd > 0); 15986 return; 15987 } 15988 case Stmt::UnaryOperatorClass: { 15989 // Only unwrap the * and & unary operators 15990 const UnaryOperator *UO = cast<UnaryOperator>(expr); 15991 expr = UO->getSubExpr(); 15992 switch (UO->getOpcode()) { 15993 case UO_AddrOf: 15994 AllowOnePastEnd++; 15995 break; 15996 case UO_Deref: 15997 AllowOnePastEnd--; 15998 break; 15999 default: 16000 return; 16001 } 16002 break; 16003 } 16004 case Stmt::ConditionalOperatorClass: { 16005 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 16006 if (const Expr *lhs = cond->getLHS()) 16007 CheckArrayAccess(lhs); 16008 if (const Expr *rhs = cond->getRHS()) 16009 CheckArrayAccess(rhs); 16010 return; 16011 } 16012 case Stmt::CXXOperatorCallExprClass: { 16013 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 16014 for (const auto *Arg : OCE->arguments()) 16015 CheckArrayAccess(Arg); 16016 return; 16017 } 16018 default: 16019 return; 16020 } 16021 } 16022 } 16023 16024 //===--- CHECK: Objective-C retain cycles ----------------------------------// 16025 16026 namespace { 16027 16028 struct RetainCycleOwner { 16029 VarDecl *Variable = nullptr; 16030 SourceRange Range; 16031 SourceLocation Loc; 16032 bool Indirect = false; 16033 16034 RetainCycleOwner() = default; 16035 16036 void setLocsFrom(Expr *e) { 16037 Loc = e->getExprLoc(); 16038 Range = e->getSourceRange(); 16039 } 16040 }; 16041 16042 } // namespace 16043 16044 /// Consider whether capturing the given variable can possibly lead to 16045 /// a retain cycle. 16046 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 16047 // In ARC, it's captured strongly iff the variable has __strong 16048 // lifetime. In MRR, it's captured strongly if the variable is 16049 // __block and has an appropriate type. 16050 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 16051 return false; 16052 16053 owner.Variable = var; 16054 if (ref) 16055 owner.setLocsFrom(ref); 16056 return true; 16057 } 16058 16059 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 16060 while (true) { 16061 e = e->IgnoreParens(); 16062 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 16063 switch (cast->getCastKind()) { 16064 case CK_BitCast: 16065 case CK_LValueBitCast: 16066 case CK_LValueToRValue: 16067 case CK_ARCReclaimReturnedObject: 16068 e = cast->getSubExpr(); 16069 continue; 16070 16071 default: 16072 return false; 16073 } 16074 } 16075 16076 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 16077 ObjCIvarDecl *ivar = ref->getDecl(); 16078 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 16079 return false; 16080 16081 // Try to find a retain cycle in the base. 16082 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 16083 return false; 16084 16085 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 16086 owner.Indirect = true; 16087 return true; 16088 } 16089 16090 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 16091 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 16092 if (!var) return false; 16093 return considerVariable(var, ref, owner); 16094 } 16095 16096 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 16097 if (member->isArrow()) return false; 16098 16099 // Don't count this as an indirect ownership. 16100 e = member->getBase(); 16101 continue; 16102 } 16103 16104 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 16105 // Only pay attention to pseudo-objects on property references. 16106 ObjCPropertyRefExpr *pre 16107 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 16108 ->IgnoreParens()); 16109 if (!pre) return false; 16110 if (pre->isImplicitProperty()) return false; 16111 ObjCPropertyDecl *property = pre->getExplicitProperty(); 16112 if (!property->isRetaining() && 16113 !(property->getPropertyIvarDecl() && 16114 property->getPropertyIvarDecl()->getType() 16115 .getObjCLifetime() == Qualifiers::OCL_Strong)) 16116 return false; 16117 16118 owner.Indirect = true; 16119 if (pre->isSuperReceiver()) { 16120 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 16121 if (!owner.Variable) 16122 return false; 16123 owner.Loc = pre->getLocation(); 16124 owner.Range = pre->getSourceRange(); 16125 return true; 16126 } 16127 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 16128 ->getSourceExpr()); 16129 continue; 16130 } 16131 16132 // Array ivars? 16133 16134 return false; 16135 } 16136 } 16137 16138 namespace { 16139 16140 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 16141 ASTContext &Context; 16142 VarDecl *Variable; 16143 Expr *Capturer = nullptr; 16144 bool VarWillBeReased = false; 16145 16146 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 16147 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 16148 Context(Context), Variable(variable) {} 16149 16150 void VisitDeclRefExpr(DeclRefExpr *ref) { 16151 if (ref->getDecl() == Variable && !Capturer) 16152 Capturer = ref; 16153 } 16154 16155 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 16156 if (Capturer) return; 16157 Visit(ref->getBase()); 16158 if (Capturer && ref->isFreeIvar()) 16159 Capturer = ref; 16160 } 16161 16162 void VisitBlockExpr(BlockExpr *block) { 16163 // Look inside nested blocks 16164 if (block->getBlockDecl()->capturesVariable(Variable)) 16165 Visit(block->getBlockDecl()->getBody()); 16166 } 16167 16168 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 16169 if (Capturer) return; 16170 if (OVE->getSourceExpr()) 16171 Visit(OVE->getSourceExpr()); 16172 } 16173 16174 void VisitBinaryOperator(BinaryOperator *BinOp) { 16175 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 16176 return; 16177 Expr *LHS = BinOp->getLHS(); 16178 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 16179 if (DRE->getDecl() != Variable) 16180 return; 16181 if (Expr *RHS = BinOp->getRHS()) { 16182 RHS = RHS->IgnoreParenCasts(); 16183 Optional<llvm::APSInt> Value; 16184 VarWillBeReased = 16185 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 16186 *Value == 0); 16187 } 16188 } 16189 } 16190 }; 16191 16192 } // namespace 16193 16194 /// Check whether the given argument is a block which captures a 16195 /// variable. 16196 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 16197 assert(owner.Variable && owner.Loc.isValid()); 16198 16199 e = e->IgnoreParenCasts(); 16200 16201 // Look through [^{...} copy] and Block_copy(^{...}). 16202 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 16203 Selector Cmd = ME->getSelector(); 16204 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 16205 e = ME->getInstanceReceiver(); 16206 if (!e) 16207 return nullptr; 16208 e = e->IgnoreParenCasts(); 16209 } 16210 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 16211 if (CE->getNumArgs() == 1) { 16212 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 16213 if (Fn) { 16214 const IdentifierInfo *FnI = Fn->getIdentifier(); 16215 if (FnI && FnI->isStr("_Block_copy")) { 16216 e = CE->getArg(0)->IgnoreParenCasts(); 16217 } 16218 } 16219 } 16220 } 16221 16222 BlockExpr *block = dyn_cast<BlockExpr>(e); 16223 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 16224 return nullptr; 16225 16226 FindCaptureVisitor visitor(S.Context, owner.Variable); 16227 visitor.Visit(block->getBlockDecl()->getBody()); 16228 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 16229 } 16230 16231 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 16232 RetainCycleOwner &owner) { 16233 assert(capturer); 16234 assert(owner.Variable && owner.Loc.isValid()); 16235 16236 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 16237 << owner.Variable << capturer->getSourceRange(); 16238 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 16239 << owner.Indirect << owner.Range; 16240 } 16241 16242 /// Check for a keyword selector that starts with the word 'add' or 16243 /// 'set'. 16244 static bool isSetterLikeSelector(Selector sel) { 16245 if (sel.isUnarySelector()) return false; 16246 16247 StringRef str = sel.getNameForSlot(0); 16248 while (!str.empty() && str.front() == '_') str = str.substr(1); 16249 if (str.startswith("set")) 16250 str = str.substr(3); 16251 else if (str.startswith("add")) { 16252 // Specially allow 'addOperationWithBlock:'. 16253 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 16254 return false; 16255 str = str.substr(3); 16256 } 16257 else 16258 return false; 16259 16260 if (str.empty()) return true; 16261 return !isLowercase(str.front()); 16262 } 16263 16264 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 16265 ObjCMessageExpr *Message) { 16266 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 16267 Message->getReceiverInterface(), 16268 NSAPI::ClassId_NSMutableArray); 16269 if (!IsMutableArray) { 16270 return None; 16271 } 16272 16273 Selector Sel = Message->getSelector(); 16274 16275 Optional<NSAPI::NSArrayMethodKind> MKOpt = 16276 S.NSAPIObj->getNSArrayMethodKind(Sel); 16277 if (!MKOpt) { 16278 return None; 16279 } 16280 16281 NSAPI::NSArrayMethodKind MK = *MKOpt; 16282 16283 switch (MK) { 16284 case NSAPI::NSMutableArr_addObject: 16285 case NSAPI::NSMutableArr_insertObjectAtIndex: 16286 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 16287 return 0; 16288 case NSAPI::NSMutableArr_replaceObjectAtIndex: 16289 return 1; 16290 16291 default: 16292 return None; 16293 } 16294 16295 return None; 16296 } 16297 16298 static 16299 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 16300 ObjCMessageExpr *Message) { 16301 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 16302 Message->getReceiverInterface(), 16303 NSAPI::ClassId_NSMutableDictionary); 16304 if (!IsMutableDictionary) { 16305 return None; 16306 } 16307 16308 Selector Sel = Message->getSelector(); 16309 16310 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 16311 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 16312 if (!MKOpt) { 16313 return None; 16314 } 16315 16316 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 16317 16318 switch (MK) { 16319 case NSAPI::NSMutableDict_setObjectForKey: 16320 case NSAPI::NSMutableDict_setValueForKey: 16321 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 16322 return 0; 16323 16324 default: 16325 return None; 16326 } 16327 16328 return None; 16329 } 16330 16331 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 16332 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 16333 Message->getReceiverInterface(), 16334 NSAPI::ClassId_NSMutableSet); 16335 16336 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 16337 Message->getReceiverInterface(), 16338 NSAPI::ClassId_NSMutableOrderedSet); 16339 if (!IsMutableSet && !IsMutableOrderedSet) { 16340 return None; 16341 } 16342 16343 Selector Sel = Message->getSelector(); 16344 16345 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 16346 if (!MKOpt) { 16347 return None; 16348 } 16349 16350 NSAPI::NSSetMethodKind MK = *MKOpt; 16351 16352 switch (MK) { 16353 case NSAPI::NSMutableSet_addObject: 16354 case NSAPI::NSOrderedSet_setObjectAtIndex: 16355 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 16356 case NSAPI::NSOrderedSet_insertObjectAtIndex: 16357 return 0; 16358 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 16359 return 1; 16360 } 16361 16362 return None; 16363 } 16364 16365 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 16366 if (!Message->isInstanceMessage()) { 16367 return; 16368 } 16369 16370 Optional<int> ArgOpt; 16371 16372 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 16373 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 16374 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 16375 return; 16376 } 16377 16378 int ArgIndex = *ArgOpt; 16379 16380 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 16381 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 16382 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 16383 } 16384 16385 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 16386 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 16387 if (ArgRE->isObjCSelfExpr()) { 16388 Diag(Message->getSourceRange().getBegin(), 16389 diag::warn_objc_circular_container) 16390 << ArgRE->getDecl() << StringRef("'super'"); 16391 } 16392 } 16393 } else { 16394 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 16395 16396 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 16397 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 16398 } 16399 16400 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 16401 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 16402 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 16403 ValueDecl *Decl = ReceiverRE->getDecl(); 16404 Diag(Message->getSourceRange().getBegin(), 16405 diag::warn_objc_circular_container) 16406 << Decl << Decl; 16407 if (!ArgRE->isObjCSelfExpr()) { 16408 Diag(Decl->getLocation(), 16409 diag::note_objc_circular_container_declared_here) 16410 << Decl; 16411 } 16412 } 16413 } 16414 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 16415 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 16416 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 16417 ObjCIvarDecl *Decl = IvarRE->getDecl(); 16418 Diag(Message->getSourceRange().getBegin(), 16419 diag::warn_objc_circular_container) 16420 << Decl << Decl; 16421 Diag(Decl->getLocation(), 16422 diag::note_objc_circular_container_declared_here) 16423 << Decl; 16424 } 16425 } 16426 } 16427 } 16428 } 16429 16430 /// Check a message send to see if it's likely to cause a retain cycle. 16431 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 16432 // Only check instance methods whose selector looks like a setter. 16433 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 16434 return; 16435 16436 // Try to find a variable that the receiver is strongly owned by. 16437 RetainCycleOwner owner; 16438 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 16439 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 16440 return; 16441 } else { 16442 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 16443 owner.Variable = getCurMethodDecl()->getSelfDecl(); 16444 owner.Loc = msg->getSuperLoc(); 16445 owner.Range = msg->getSuperLoc(); 16446 } 16447 16448 // Check whether the receiver is captured by any of the arguments. 16449 const ObjCMethodDecl *MD = msg->getMethodDecl(); 16450 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 16451 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 16452 // noescape blocks should not be retained by the method. 16453 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 16454 continue; 16455 return diagnoseRetainCycle(*this, capturer, owner); 16456 } 16457 } 16458 } 16459 16460 /// Check a property assign to see if it's likely to cause a retain cycle. 16461 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 16462 RetainCycleOwner owner; 16463 if (!findRetainCycleOwner(*this, receiver, owner)) 16464 return; 16465 16466 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 16467 diagnoseRetainCycle(*this, capturer, owner); 16468 } 16469 16470 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 16471 RetainCycleOwner Owner; 16472 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 16473 return; 16474 16475 // Because we don't have an expression for the variable, we have to set the 16476 // location explicitly here. 16477 Owner.Loc = Var->getLocation(); 16478 Owner.Range = Var->getSourceRange(); 16479 16480 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 16481 diagnoseRetainCycle(*this, Capturer, Owner); 16482 } 16483 16484 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 16485 Expr *RHS, bool isProperty) { 16486 // Check if RHS is an Objective-C object literal, which also can get 16487 // immediately zapped in a weak reference. Note that we explicitly 16488 // allow ObjCStringLiterals, since those are designed to never really die. 16489 RHS = RHS->IgnoreParenImpCasts(); 16490 16491 // This enum needs to match with the 'select' in 16492 // warn_objc_arc_literal_assign (off-by-1). 16493 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 16494 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 16495 return false; 16496 16497 S.Diag(Loc, diag::warn_arc_literal_assign) 16498 << (unsigned) Kind 16499 << (isProperty ? 0 : 1) 16500 << RHS->getSourceRange(); 16501 16502 return true; 16503 } 16504 16505 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 16506 Qualifiers::ObjCLifetime LT, 16507 Expr *RHS, bool isProperty) { 16508 // Strip off any implicit cast added to get to the one ARC-specific. 16509 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16510 if (cast->getCastKind() == CK_ARCConsumeObject) { 16511 S.Diag(Loc, diag::warn_arc_retained_assign) 16512 << (LT == Qualifiers::OCL_ExplicitNone) 16513 << (isProperty ? 0 : 1) 16514 << RHS->getSourceRange(); 16515 return true; 16516 } 16517 RHS = cast->getSubExpr(); 16518 } 16519 16520 if (LT == Qualifiers::OCL_Weak && 16521 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 16522 return true; 16523 16524 return false; 16525 } 16526 16527 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 16528 QualType LHS, Expr *RHS) { 16529 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 16530 16531 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 16532 return false; 16533 16534 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 16535 return true; 16536 16537 return false; 16538 } 16539 16540 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 16541 Expr *LHS, Expr *RHS) { 16542 QualType LHSType; 16543 // PropertyRef on LHS type need be directly obtained from 16544 // its declaration as it has a PseudoType. 16545 ObjCPropertyRefExpr *PRE 16546 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 16547 if (PRE && !PRE->isImplicitProperty()) { 16548 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16549 if (PD) 16550 LHSType = PD->getType(); 16551 } 16552 16553 if (LHSType.isNull()) 16554 LHSType = LHS->getType(); 16555 16556 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 16557 16558 if (LT == Qualifiers::OCL_Weak) { 16559 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 16560 getCurFunction()->markSafeWeakUse(LHS); 16561 } 16562 16563 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 16564 return; 16565 16566 // FIXME. Check for other life times. 16567 if (LT != Qualifiers::OCL_None) 16568 return; 16569 16570 if (PRE) { 16571 if (PRE->isImplicitProperty()) 16572 return; 16573 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16574 if (!PD) 16575 return; 16576 16577 unsigned Attributes = PD->getPropertyAttributes(); 16578 if (Attributes & ObjCPropertyAttribute::kind_assign) { 16579 // when 'assign' attribute was not explicitly specified 16580 // by user, ignore it and rely on property type itself 16581 // for lifetime info. 16582 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 16583 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 16584 LHSType->isObjCRetainableType()) 16585 return; 16586 16587 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16588 if (cast->getCastKind() == CK_ARCConsumeObject) { 16589 Diag(Loc, diag::warn_arc_retained_property_assign) 16590 << RHS->getSourceRange(); 16591 return; 16592 } 16593 RHS = cast->getSubExpr(); 16594 } 16595 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 16596 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 16597 return; 16598 } 16599 } 16600 } 16601 16602 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 16603 16604 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 16605 SourceLocation StmtLoc, 16606 const NullStmt *Body) { 16607 // Do not warn if the body is a macro that expands to nothing, e.g: 16608 // 16609 // #define CALL(x) 16610 // if (condition) 16611 // CALL(0); 16612 if (Body->hasLeadingEmptyMacro()) 16613 return false; 16614 16615 // Get line numbers of statement and body. 16616 bool StmtLineInvalid; 16617 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 16618 &StmtLineInvalid); 16619 if (StmtLineInvalid) 16620 return false; 16621 16622 bool BodyLineInvalid; 16623 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 16624 &BodyLineInvalid); 16625 if (BodyLineInvalid) 16626 return false; 16627 16628 // Warn if null statement and body are on the same line. 16629 if (StmtLine != BodyLine) 16630 return false; 16631 16632 return true; 16633 } 16634 16635 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 16636 const Stmt *Body, 16637 unsigned DiagID) { 16638 // Since this is a syntactic check, don't emit diagnostic for template 16639 // instantiations, this just adds noise. 16640 if (CurrentInstantiationScope) 16641 return; 16642 16643 // The body should be a null statement. 16644 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16645 if (!NBody) 16646 return; 16647 16648 // Do the usual checks. 16649 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16650 return; 16651 16652 Diag(NBody->getSemiLoc(), DiagID); 16653 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16654 } 16655 16656 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 16657 const Stmt *PossibleBody) { 16658 assert(!CurrentInstantiationScope); // Ensured by caller 16659 16660 SourceLocation StmtLoc; 16661 const Stmt *Body; 16662 unsigned DiagID; 16663 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 16664 StmtLoc = FS->getRParenLoc(); 16665 Body = FS->getBody(); 16666 DiagID = diag::warn_empty_for_body; 16667 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 16668 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 16669 Body = WS->getBody(); 16670 DiagID = diag::warn_empty_while_body; 16671 } else 16672 return; // Neither `for' nor `while'. 16673 16674 // The body should be a null statement. 16675 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16676 if (!NBody) 16677 return; 16678 16679 // Skip expensive checks if diagnostic is disabled. 16680 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 16681 return; 16682 16683 // Do the usual checks. 16684 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16685 return; 16686 16687 // `for(...);' and `while(...);' are popular idioms, so in order to keep 16688 // noise level low, emit diagnostics only if for/while is followed by a 16689 // CompoundStmt, e.g.: 16690 // for (int i = 0; i < n; i++); 16691 // { 16692 // a(i); 16693 // } 16694 // or if for/while is followed by a statement with more indentation 16695 // than for/while itself: 16696 // for (int i = 0; i < n; i++); 16697 // a(i); 16698 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 16699 if (!ProbableTypo) { 16700 bool BodyColInvalid; 16701 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 16702 PossibleBody->getBeginLoc(), &BodyColInvalid); 16703 if (BodyColInvalid) 16704 return; 16705 16706 bool StmtColInvalid; 16707 unsigned StmtCol = 16708 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 16709 if (StmtColInvalid) 16710 return; 16711 16712 if (BodyCol > StmtCol) 16713 ProbableTypo = true; 16714 } 16715 16716 if (ProbableTypo) { 16717 Diag(NBody->getSemiLoc(), DiagID); 16718 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16719 } 16720 } 16721 16722 //===--- CHECK: Warn on self move with std::move. -------------------------===// 16723 16724 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 16725 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 16726 SourceLocation OpLoc) { 16727 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 16728 return; 16729 16730 if (inTemplateInstantiation()) 16731 return; 16732 16733 // Strip parens and casts away. 16734 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 16735 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 16736 16737 // Check for a call expression 16738 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 16739 if (!CE || CE->getNumArgs() != 1) 16740 return; 16741 16742 // Check for a call to std::move 16743 if (!CE->isCallToStdMove()) 16744 return; 16745 16746 // Get argument from std::move 16747 RHSExpr = CE->getArg(0); 16748 16749 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 16750 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 16751 16752 // Two DeclRefExpr's, check that the decls are the same. 16753 if (LHSDeclRef && RHSDeclRef) { 16754 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16755 return; 16756 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16757 RHSDeclRef->getDecl()->getCanonicalDecl()) 16758 return; 16759 16760 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16761 << LHSExpr->getSourceRange() 16762 << RHSExpr->getSourceRange(); 16763 return; 16764 } 16765 16766 // Member variables require a different approach to check for self moves. 16767 // MemberExpr's are the same if every nested MemberExpr refers to the same 16768 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 16769 // the base Expr's are CXXThisExpr's. 16770 const Expr *LHSBase = LHSExpr; 16771 const Expr *RHSBase = RHSExpr; 16772 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 16773 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 16774 if (!LHSME || !RHSME) 16775 return; 16776 16777 while (LHSME && RHSME) { 16778 if (LHSME->getMemberDecl()->getCanonicalDecl() != 16779 RHSME->getMemberDecl()->getCanonicalDecl()) 16780 return; 16781 16782 LHSBase = LHSME->getBase(); 16783 RHSBase = RHSME->getBase(); 16784 LHSME = dyn_cast<MemberExpr>(LHSBase); 16785 RHSME = dyn_cast<MemberExpr>(RHSBase); 16786 } 16787 16788 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 16789 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 16790 if (LHSDeclRef && RHSDeclRef) { 16791 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16792 return; 16793 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16794 RHSDeclRef->getDecl()->getCanonicalDecl()) 16795 return; 16796 16797 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16798 << LHSExpr->getSourceRange() 16799 << RHSExpr->getSourceRange(); 16800 return; 16801 } 16802 16803 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 16804 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16805 << LHSExpr->getSourceRange() 16806 << RHSExpr->getSourceRange(); 16807 } 16808 16809 //===--- Layout compatibility ----------------------------------------------// 16810 16811 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 16812 16813 /// Check if two enumeration types are layout-compatible. 16814 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 16815 // C++11 [dcl.enum] p8: 16816 // Two enumeration types are layout-compatible if they have the same 16817 // underlying type. 16818 return ED1->isComplete() && ED2->isComplete() && 16819 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 16820 } 16821 16822 /// Check if two fields are layout-compatible. 16823 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 16824 FieldDecl *Field2) { 16825 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 16826 return false; 16827 16828 if (Field1->isBitField() != Field2->isBitField()) 16829 return false; 16830 16831 if (Field1->isBitField()) { 16832 // Make sure that the bit-fields are the same length. 16833 unsigned Bits1 = Field1->getBitWidthValue(C); 16834 unsigned Bits2 = Field2->getBitWidthValue(C); 16835 16836 if (Bits1 != Bits2) 16837 return false; 16838 } 16839 16840 return true; 16841 } 16842 16843 /// Check if two standard-layout structs are layout-compatible. 16844 /// (C++11 [class.mem] p17) 16845 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 16846 RecordDecl *RD2) { 16847 // If both records are C++ classes, check that base classes match. 16848 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 16849 // If one of records is a CXXRecordDecl we are in C++ mode, 16850 // thus the other one is a CXXRecordDecl, too. 16851 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 16852 // Check number of base classes. 16853 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 16854 return false; 16855 16856 // Check the base classes. 16857 for (CXXRecordDecl::base_class_const_iterator 16858 Base1 = D1CXX->bases_begin(), 16859 BaseEnd1 = D1CXX->bases_end(), 16860 Base2 = D2CXX->bases_begin(); 16861 Base1 != BaseEnd1; 16862 ++Base1, ++Base2) { 16863 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 16864 return false; 16865 } 16866 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 16867 // If only RD2 is a C++ class, it should have zero base classes. 16868 if (D2CXX->getNumBases() > 0) 16869 return false; 16870 } 16871 16872 // Check the fields. 16873 RecordDecl::field_iterator Field2 = RD2->field_begin(), 16874 Field2End = RD2->field_end(), 16875 Field1 = RD1->field_begin(), 16876 Field1End = RD1->field_end(); 16877 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 16878 if (!isLayoutCompatible(C, *Field1, *Field2)) 16879 return false; 16880 } 16881 if (Field1 != Field1End || Field2 != Field2End) 16882 return false; 16883 16884 return true; 16885 } 16886 16887 /// Check if two standard-layout unions are layout-compatible. 16888 /// (C++11 [class.mem] p18) 16889 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 16890 RecordDecl *RD2) { 16891 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 16892 for (auto *Field2 : RD2->fields()) 16893 UnmatchedFields.insert(Field2); 16894 16895 for (auto *Field1 : RD1->fields()) { 16896 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 16897 I = UnmatchedFields.begin(), 16898 E = UnmatchedFields.end(); 16899 16900 for ( ; I != E; ++I) { 16901 if (isLayoutCompatible(C, Field1, *I)) { 16902 bool Result = UnmatchedFields.erase(*I); 16903 (void) Result; 16904 assert(Result); 16905 break; 16906 } 16907 } 16908 if (I == E) 16909 return false; 16910 } 16911 16912 return UnmatchedFields.empty(); 16913 } 16914 16915 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 16916 RecordDecl *RD2) { 16917 if (RD1->isUnion() != RD2->isUnion()) 16918 return false; 16919 16920 if (RD1->isUnion()) 16921 return isLayoutCompatibleUnion(C, RD1, RD2); 16922 else 16923 return isLayoutCompatibleStruct(C, RD1, RD2); 16924 } 16925 16926 /// Check if two types are layout-compatible in C++11 sense. 16927 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 16928 if (T1.isNull() || T2.isNull()) 16929 return false; 16930 16931 // C++11 [basic.types] p11: 16932 // If two types T1 and T2 are the same type, then T1 and T2 are 16933 // layout-compatible types. 16934 if (C.hasSameType(T1, T2)) 16935 return true; 16936 16937 T1 = T1.getCanonicalType().getUnqualifiedType(); 16938 T2 = T2.getCanonicalType().getUnqualifiedType(); 16939 16940 const Type::TypeClass TC1 = T1->getTypeClass(); 16941 const Type::TypeClass TC2 = T2->getTypeClass(); 16942 16943 if (TC1 != TC2) 16944 return false; 16945 16946 if (TC1 == Type::Enum) { 16947 return isLayoutCompatible(C, 16948 cast<EnumType>(T1)->getDecl(), 16949 cast<EnumType>(T2)->getDecl()); 16950 } else if (TC1 == Type::Record) { 16951 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 16952 return false; 16953 16954 return isLayoutCompatible(C, 16955 cast<RecordType>(T1)->getDecl(), 16956 cast<RecordType>(T2)->getDecl()); 16957 } 16958 16959 return false; 16960 } 16961 16962 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 16963 16964 /// Given a type tag expression find the type tag itself. 16965 /// 16966 /// \param TypeExpr Type tag expression, as it appears in user's code. 16967 /// 16968 /// \param VD Declaration of an identifier that appears in a type tag. 16969 /// 16970 /// \param MagicValue Type tag magic value. 16971 /// 16972 /// \param isConstantEvaluated whether the evalaution should be performed in 16973 16974 /// constant context. 16975 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 16976 const ValueDecl **VD, uint64_t *MagicValue, 16977 bool isConstantEvaluated) { 16978 while(true) { 16979 if (!TypeExpr) 16980 return false; 16981 16982 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 16983 16984 switch (TypeExpr->getStmtClass()) { 16985 case Stmt::UnaryOperatorClass: { 16986 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 16987 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 16988 TypeExpr = UO->getSubExpr(); 16989 continue; 16990 } 16991 return false; 16992 } 16993 16994 case Stmt::DeclRefExprClass: { 16995 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 16996 *VD = DRE->getDecl(); 16997 return true; 16998 } 16999 17000 case Stmt::IntegerLiteralClass: { 17001 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 17002 llvm::APInt MagicValueAPInt = IL->getValue(); 17003 if (MagicValueAPInt.getActiveBits() <= 64) { 17004 *MagicValue = MagicValueAPInt.getZExtValue(); 17005 return true; 17006 } else 17007 return false; 17008 } 17009 17010 case Stmt::BinaryConditionalOperatorClass: 17011 case Stmt::ConditionalOperatorClass: { 17012 const AbstractConditionalOperator *ACO = 17013 cast<AbstractConditionalOperator>(TypeExpr); 17014 bool Result; 17015 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 17016 isConstantEvaluated)) { 17017 if (Result) 17018 TypeExpr = ACO->getTrueExpr(); 17019 else 17020 TypeExpr = ACO->getFalseExpr(); 17021 continue; 17022 } 17023 return false; 17024 } 17025 17026 case Stmt::BinaryOperatorClass: { 17027 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 17028 if (BO->getOpcode() == BO_Comma) { 17029 TypeExpr = BO->getRHS(); 17030 continue; 17031 } 17032 return false; 17033 } 17034 17035 default: 17036 return false; 17037 } 17038 } 17039 } 17040 17041 /// Retrieve the C type corresponding to type tag TypeExpr. 17042 /// 17043 /// \param TypeExpr Expression that specifies a type tag. 17044 /// 17045 /// \param MagicValues Registered magic values. 17046 /// 17047 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 17048 /// kind. 17049 /// 17050 /// \param TypeInfo Information about the corresponding C type. 17051 /// 17052 /// \param isConstantEvaluated whether the evalaution should be performed in 17053 /// constant context. 17054 /// 17055 /// \returns true if the corresponding C type was found. 17056 static bool GetMatchingCType( 17057 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 17058 const ASTContext &Ctx, 17059 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 17060 *MagicValues, 17061 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 17062 bool isConstantEvaluated) { 17063 FoundWrongKind = false; 17064 17065 // Variable declaration that has type_tag_for_datatype attribute. 17066 const ValueDecl *VD = nullptr; 17067 17068 uint64_t MagicValue; 17069 17070 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 17071 return false; 17072 17073 if (VD) { 17074 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 17075 if (I->getArgumentKind() != ArgumentKind) { 17076 FoundWrongKind = true; 17077 return false; 17078 } 17079 TypeInfo.Type = I->getMatchingCType(); 17080 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 17081 TypeInfo.MustBeNull = I->getMustBeNull(); 17082 return true; 17083 } 17084 return false; 17085 } 17086 17087 if (!MagicValues) 17088 return false; 17089 17090 llvm::DenseMap<Sema::TypeTagMagicValue, 17091 Sema::TypeTagData>::const_iterator I = 17092 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 17093 if (I == MagicValues->end()) 17094 return false; 17095 17096 TypeInfo = I->second; 17097 return true; 17098 } 17099 17100 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 17101 uint64_t MagicValue, QualType Type, 17102 bool LayoutCompatible, 17103 bool MustBeNull) { 17104 if (!TypeTagForDatatypeMagicValues) 17105 TypeTagForDatatypeMagicValues.reset( 17106 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 17107 17108 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 17109 (*TypeTagForDatatypeMagicValues)[Magic] = 17110 TypeTagData(Type, LayoutCompatible, MustBeNull); 17111 } 17112 17113 static bool IsSameCharType(QualType T1, QualType T2) { 17114 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 17115 if (!BT1) 17116 return false; 17117 17118 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 17119 if (!BT2) 17120 return false; 17121 17122 BuiltinType::Kind T1Kind = BT1->getKind(); 17123 BuiltinType::Kind T2Kind = BT2->getKind(); 17124 17125 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 17126 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 17127 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 17128 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 17129 } 17130 17131 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 17132 const ArrayRef<const Expr *> ExprArgs, 17133 SourceLocation CallSiteLoc) { 17134 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 17135 bool IsPointerAttr = Attr->getIsPointer(); 17136 17137 // Retrieve the argument representing the 'type_tag'. 17138 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 17139 if (TypeTagIdxAST >= ExprArgs.size()) { 17140 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 17141 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 17142 return; 17143 } 17144 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 17145 bool FoundWrongKind; 17146 TypeTagData TypeInfo; 17147 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 17148 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 17149 TypeInfo, isConstantEvaluated())) { 17150 if (FoundWrongKind) 17151 Diag(TypeTagExpr->getExprLoc(), 17152 diag::warn_type_tag_for_datatype_wrong_kind) 17153 << TypeTagExpr->getSourceRange(); 17154 return; 17155 } 17156 17157 // Retrieve the argument representing the 'arg_idx'. 17158 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 17159 if (ArgumentIdxAST >= ExprArgs.size()) { 17160 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 17161 << 1 << Attr->getArgumentIdx().getSourceIndex(); 17162 return; 17163 } 17164 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 17165 if (IsPointerAttr) { 17166 // Skip implicit cast of pointer to `void *' (as a function argument). 17167 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 17168 if (ICE->getType()->isVoidPointerType() && 17169 ICE->getCastKind() == CK_BitCast) 17170 ArgumentExpr = ICE->getSubExpr(); 17171 } 17172 QualType ArgumentType = ArgumentExpr->getType(); 17173 17174 // Passing a `void*' pointer shouldn't trigger a warning. 17175 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 17176 return; 17177 17178 if (TypeInfo.MustBeNull) { 17179 // Type tag with matching void type requires a null pointer. 17180 if (!ArgumentExpr->isNullPointerConstant(Context, 17181 Expr::NPC_ValueDependentIsNotNull)) { 17182 Diag(ArgumentExpr->getExprLoc(), 17183 diag::warn_type_safety_null_pointer_required) 17184 << ArgumentKind->getName() 17185 << ArgumentExpr->getSourceRange() 17186 << TypeTagExpr->getSourceRange(); 17187 } 17188 return; 17189 } 17190 17191 QualType RequiredType = TypeInfo.Type; 17192 if (IsPointerAttr) 17193 RequiredType = Context.getPointerType(RequiredType); 17194 17195 bool mismatch = false; 17196 if (!TypeInfo.LayoutCompatible) { 17197 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 17198 17199 // C++11 [basic.fundamental] p1: 17200 // Plain char, signed char, and unsigned char are three distinct types. 17201 // 17202 // But we treat plain `char' as equivalent to `signed char' or `unsigned 17203 // char' depending on the current char signedness mode. 17204 if (mismatch) 17205 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 17206 RequiredType->getPointeeType())) || 17207 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 17208 mismatch = false; 17209 } else 17210 if (IsPointerAttr) 17211 mismatch = !isLayoutCompatible(Context, 17212 ArgumentType->getPointeeType(), 17213 RequiredType->getPointeeType()); 17214 else 17215 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 17216 17217 if (mismatch) 17218 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 17219 << ArgumentType << ArgumentKind 17220 << TypeInfo.LayoutCompatible << RequiredType 17221 << ArgumentExpr->getSourceRange() 17222 << TypeTagExpr->getSourceRange(); 17223 } 17224 17225 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 17226 CharUnits Alignment) { 17227 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 17228 } 17229 17230 void Sema::DiagnoseMisalignedMembers() { 17231 for (MisalignedMember &m : MisalignedMembers) { 17232 const NamedDecl *ND = m.RD; 17233 if (ND->getName().empty()) { 17234 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 17235 ND = TD; 17236 } 17237 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 17238 << m.MD << ND << m.E->getSourceRange(); 17239 } 17240 MisalignedMembers.clear(); 17241 } 17242 17243 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 17244 E = E->IgnoreParens(); 17245 if (!T->isPointerType() && !T->isIntegerType()) 17246 return; 17247 if (isa<UnaryOperator>(E) && 17248 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 17249 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 17250 if (isa<MemberExpr>(Op)) { 17251 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 17252 if (MA != MisalignedMembers.end() && 17253 (T->isIntegerType() || 17254 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 17255 Context.getTypeAlignInChars( 17256 T->getPointeeType()) <= MA->Alignment)))) 17257 MisalignedMembers.erase(MA); 17258 } 17259 } 17260 } 17261 17262 void Sema::RefersToMemberWithReducedAlignment( 17263 Expr *E, 17264 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 17265 Action) { 17266 const auto *ME = dyn_cast<MemberExpr>(E); 17267 if (!ME) 17268 return; 17269 17270 // No need to check expressions with an __unaligned-qualified type. 17271 if (E->getType().getQualifiers().hasUnaligned()) 17272 return; 17273 17274 // For a chain of MemberExpr like "a.b.c.d" this list 17275 // will keep FieldDecl's like [d, c, b]. 17276 SmallVector<FieldDecl *, 4> ReverseMemberChain; 17277 const MemberExpr *TopME = nullptr; 17278 bool AnyIsPacked = false; 17279 do { 17280 QualType BaseType = ME->getBase()->getType(); 17281 if (BaseType->isDependentType()) 17282 return; 17283 if (ME->isArrow()) 17284 BaseType = BaseType->getPointeeType(); 17285 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 17286 if (RD->isInvalidDecl()) 17287 return; 17288 17289 ValueDecl *MD = ME->getMemberDecl(); 17290 auto *FD = dyn_cast<FieldDecl>(MD); 17291 // We do not care about non-data members. 17292 if (!FD || FD->isInvalidDecl()) 17293 return; 17294 17295 AnyIsPacked = 17296 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 17297 ReverseMemberChain.push_back(FD); 17298 17299 TopME = ME; 17300 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 17301 } while (ME); 17302 assert(TopME && "We did not compute a topmost MemberExpr!"); 17303 17304 // Not the scope of this diagnostic. 17305 if (!AnyIsPacked) 17306 return; 17307 17308 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 17309 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 17310 // TODO: The innermost base of the member expression may be too complicated. 17311 // For now, just disregard these cases. This is left for future 17312 // improvement. 17313 if (!DRE && !isa<CXXThisExpr>(TopBase)) 17314 return; 17315 17316 // Alignment expected by the whole expression. 17317 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 17318 17319 // No need to do anything else with this case. 17320 if (ExpectedAlignment.isOne()) 17321 return; 17322 17323 // Synthesize offset of the whole access. 17324 CharUnits Offset; 17325 for (const FieldDecl *FD : llvm::reverse(ReverseMemberChain)) 17326 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(FD)); 17327 17328 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 17329 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 17330 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 17331 17332 // The base expression of the innermost MemberExpr may give 17333 // stronger guarantees than the class containing the member. 17334 if (DRE && !TopME->isArrow()) { 17335 const ValueDecl *VD = DRE->getDecl(); 17336 if (!VD->getType()->isReferenceType()) 17337 CompleteObjectAlignment = 17338 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 17339 } 17340 17341 // Check if the synthesized offset fulfills the alignment. 17342 if (Offset % ExpectedAlignment != 0 || 17343 // It may fulfill the offset it but the effective alignment may still be 17344 // lower than the expected expression alignment. 17345 CompleteObjectAlignment < ExpectedAlignment) { 17346 // If this happens, we want to determine a sensible culprit of this. 17347 // Intuitively, watching the chain of member expressions from right to 17348 // left, we start with the required alignment (as required by the field 17349 // type) but some packed attribute in that chain has reduced the alignment. 17350 // It may happen that another packed structure increases it again. But if 17351 // we are here such increase has not been enough. So pointing the first 17352 // FieldDecl that either is packed or else its RecordDecl is, 17353 // seems reasonable. 17354 FieldDecl *FD = nullptr; 17355 CharUnits Alignment; 17356 for (FieldDecl *FDI : ReverseMemberChain) { 17357 if (FDI->hasAttr<PackedAttr>() || 17358 FDI->getParent()->hasAttr<PackedAttr>()) { 17359 FD = FDI; 17360 Alignment = std::min( 17361 Context.getTypeAlignInChars(FD->getType()), 17362 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 17363 break; 17364 } 17365 } 17366 assert(FD && "We did not find a packed FieldDecl!"); 17367 Action(E, FD->getParent(), FD, Alignment); 17368 } 17369 } 17370 17371 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 17372 using namespace std::placeholders; 17373 17374 RefersToMemberWithReducedAlignment( 17375 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 17376 _2, _3, _4)); 17377 } 17378 17379 // Check if \p Ty is a valid type for the elementwise math builtins. If it is 17380 // not a valid type, emit an error message and return true. Otherwise return 17381 // false. 17382 static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc, 17383 QualType Ty) { 17384 if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) { 17385 S.Diag(Loc, diag::err_builtin_invalid_arg_type) 17386 << 1 << /* vector, integer or float ty*/ 0 << Ty; 17387 return true; 17388 } 17389 return false; 17390 } 17391 17392 bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) { 17393 if (checkArgCount(*this, TheCall, 1)) 17394 return true; 17395 17396 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 17397 if (A.isInvalid()) 17398 return true; 17399 17400 TheCall->setArg(0, A.get()); 17401 QualType TyA = A.get()->getType(); 17402 17403 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 17404 return true; 17405 17406 TheCall->setType(TyA); 17407 return false; 17408 } 17409 17410 bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) { 17411 if (checkArgCount(*this, TheCall, 2)) 17412 return true; 17413 17414 ExprResult A = TheCall->getArg(0); 17415 ExprResult B = TheCall->getArg(1); 17416 // Do standard promotions between the two arguments, returning their common 17417 // type. 17418 QualType Res = 17419 UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison); 17420 if (A.isInvalid() || B.isInvalid()) 17421 return true; 17422 17423 QualType TyA = A.get()->getType(); 17424 QualType TyB = B.get()->getType(); 17425 17426 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType()) 17427 return Diag(A.get()->getBeginLoc(), 17428 diag::err_typecheck_call_different_arg_types) 17429 << TyA << TyB; 17430 17431 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 17432 return true; 17433 17434 TheCall->setArg(0, A.get()); 17435 TheCall->setArg(1, B.get()); 17436 TheCall->setType(Res); 17437 return false; 17438 } 17439 17440 bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) { 17441 if (checkArgCount(*this, TheCall, 1)) 17442 return true; 17443 17444 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 17445 if (A.isInvalid()) 17446 return true; 17447 17448 TheCall->setArg(0, A.get()); 17449 return false; 17450 } 17451 17452 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 17453 ExprResult CallResult) { 17454 if (checkArgCount(*this, TheCall, 1)) 17455 return ExprError(); 17456 17457 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 17458 if (MatrixArg.isInvalid()) 17459 return MatrixArg; 17460 Expr *Matrix = MatrixArg.get(); 17461 17462 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 17463 if (!MType) { 17464 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17465 << 1 << /* matrix ty*/ 1 << Matrix->getType(); 17466 return ExprError(); 17467 } 17468 17469 // Create returned matrix type by swapping rows and columns of the argument 17470 // matrix type. 17471 QualType ResultType = Context.getConstantMatrixType( 17472 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 17473 17474 // Change the return type to the type of the returned matrix. 17475 TheCall->setType(ResultType); 17476 17477 // Update call argument to use the possibly converted matrix argument. 17478 TheCall->setArg(0, Matrix); 17479 return CallResult; 17480 } 17481 17482 // Get and verify the matrix dimensions. 17483 static llvm::Optional<unsigned> 17484 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 17485 SourceLocation ErrorPos; 17486 Optional<llvm::APSInt> Value = 17487 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 17488 if (!Value) { 17489 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 17490 << Name; 17491 return {}; 17492 } 17493 uint64_t Dim = Value->getZExtValue(); 17494 if (!ConstantMatrixType::isDimensionValid(Dim)) { 17495 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 17496 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 17497 return {}; 17498 } 17499 return Dim; 17500 } 17501 17502 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 17503 ExprResult CallResult) { 17504 if (!getLangOpts().MatrixTypes) { 17505 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 17506 return ExprError(); 17507 } 17508 17509 if (checkArgCount(*this, TheCall, 4)) 17510 return ExprError(); 17511 17512 unsigned PtrArgIdx = 0; 17513 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17514 Expr *RowsExpr = TheCall->getArg(1); 17515 Expr *ColumnsExpr = TheCall->getArg(2); 17516 Expr *StrideExpr = TheCall->getArg(3); 17517 17518 bool ArgError = false; 17519 17520 // Check pointer argument. 17521 { 17522 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17523 if (PtrConv.isInvalid()) 17524 return PtrConv; 17525 PtrExpr = PtrConv.get(); 17526 TheCall->setArg(0, PtrExpr); 17527 if (PtrExpr->isTypeDependent()) { 17528 TheCall->setType(Context.DependentTy); 17529 return TheCall; 17530 } 17531 } 17532 17533 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17534 QualType ElementTy; 17535 if (!PtrTy) { 17536 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17537 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17538 ArgError = true; 17539 } else { 17540 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 17541 17542 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 17543 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17544 << PtrArgIdx + 1 << /* pointer to element ty*/ 2 17545 << PtrExpr->getType(); 17546 ArgError = true; 17547 } 17548 } 17549 17550 // Apply default Lvalue conversions and convert the expression to size_t. 17551 auto ApplyArgumentConversions = [this](Expr *E) { 17552 ExprResult Conv = DefaultLvalueConversion(E); 17553 if (Conv.isInvalid()) 17554 return Conv; 17555 17556 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 17557 }; 17558 17559 // Apply conversion to row and column expressions. 17560 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 17561 if (!RowsConv.isInvalid()) { 17562 RowsExpr = RowsConv.get(); 17563 TheCall->setArg(1, RowsExpr); 17564 } else 17565 RowsExpr = nullptr; 17566 17567 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 17568 if (!ColumnsConv.isInvalid()) { 17569 ColumnsExpr = ColumnsConv.get(); 17570 TheCall->setArg(2, ColumnsExpr); 17571 } else 17572 ColumnsExpr = nullptr; 17573 17574 // If any any part of the result matrix type is still pending, just use 17575 // Context.DependentTy, until all parts are resolved. 17576 if ((RowsExpr && RowsExpr->isTypeDependent()) || 17577 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 17578 TheCall->setType(Context.DependentTy); 17579 return CallResult; 17580 } 17581 17582 // Check row and column dimensions. 17583 llvm::Optional<unsigned> MaybeRows; 17584 if (RowsExpr) 17585 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 17586 17587 llvm::Optional<unsigned> MaybeColumns; 17588 if (ColumnsExpr) 17589 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 17590 17591 // Check stride argument. 17592 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 17593 if (StrideConv.isInvalid()) 17594 return ExprError(); 17595 StrideExpr = StrideConv.get(); 17596 TheCall->setArg(3, StrideExpr); 17597 17598 if (MaybeRows) { 17599 if (Optional<llvm::APSInt> Value = 17600 StrideExpr->getIntegerConstantExpr(Context)) { 17601 uint64_t Stride = Value->getZExtValue(); 17602 if (Stride < *MaybeRows) { 17603 Diag(StrideExpr->getBeginLoc(), 17604 diag::err_builtin_matrix_stride_too_small); 17605 ArgError = true; 17606 } 17607 } 17608 } 17609 17610 if (ArgError || !MaybeRows || !MaybeColumns) 17611 return ExprError(); 17612 17613 TheCall->setType( 17614 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 17615 return CallResult; 17616 } 17617 17618 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 17619 ExprResult CallResult) { 17620 if (checkArgCount(*this, TheCall, 3)) 17621 return ExprError(); 17622 17623 unsigned PtrArgIdx = 1; 17624 Expr *MatrixExpr = TheCall->getArg(0); 17625 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17626 Expr *StrideExpr = TheCall->getArg(2); 17627 17628 bool ArgError = false; 17629 17630 { 17631 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 17632 if (MatrixConv.isInvalid()) 17633 return MatrixConv; 17634 MatrixExpr = MatrixConv.get(); 17635 TheCall->setArg(0, MatrixExpr); 17636 } 17637 if (MatrixExpr->isTypeDependent()) { 17638 TheCall->setType(Context.DependentTy); 17639 return TheCall; 17640 } 17641 17642 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 17643 if (!MatrixTy) { 17644 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17645 << 1 << /*matrix ty */ 1 << MatrixExpr->getType(); 17646 ArgError = true; 17647 } 17648 17649 { 17650 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17651 if (PtrConv.isInvalid()) 17652 return PtrConv; 17653 PtrExpr = PtrConv.get(); 17654 TheCall->setArg(1, PtrExpr); 17655 if (PtrExpr->isTypeDependent()) { 17656 TheCall->setType(Context.DependentTy); 17657 return TheCall; 17658 } 17659 } 17660 17661 // Check pointer argument. 17662 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17663 if (!PtrTy) { 17664 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17665 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17666 ArgError = true; 17667 } else { 17668 QualType ElementTy = PtrTy->getPointeeType(); 17669 if (ElementTy.isConstQualified()) { 17670 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 17671 ArgError = true; 17672 } 17673 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 17674 if (MatrixTy && 17675 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 17676 Diag(PtrExpr->getBeginLoc(), 17677 diag::err_builtin_matrix_pointer_arg_mismatch) 17678 << ElementTy << MatrixTy->getElementType(); 17679 ArgError = true; 17680 } 17681 } 17682 17683 // Apply default Lvalue conversions and convert the stride expression to 17684 // size_t. 17685 { 17686 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 17687 if (StrideConv.isInvalid()) 17688 return StrideConv; 17689 17690 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 17691 if (StrideConv.isInvalid()) 17692 return StrideConv; 17693 StrideExpr = StrideConv.get(); 17694 TheCall->setArg(2, StrideExpr); 17695 } 17696 17697 // Check stride argument. 17698 if (MatrixTy) { 17699 if (Optional<llvm::APSInt> Value = 17700 StrideExpr->getIntegerConstantExpr(Context)) { 17701 uint64_t Stride = Value->getZExtValue(); 17702 if (Stride < MatrixTy->getNumRows()) { 17703 Diag(StrideExpr->getBeginLoc(), 17704 diag::err_builtin_matrix_stride_too_small); 17705 ArgError = true; 17706 } 17707 } 17708 } 17709 17710 if (ArgError) 17711 return ExprError(); 17712 17713 return CallResult; 17714 } 17715 17716 /// \brief Enforce the bounds of a TCB 17717 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 17718 /// directly calls other functions in the same TCB as marked by the enforce_tcb 17719 /// and enforce_tcb_leaf attributes. 17720 void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc, 17721 const NamedDecl *Callee) { 17722 const NamedDecl *Caller = getCurFunctionOrMethodDecl(); 17723 17724 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>()) 17725 return; 17726 17727 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 17728 // all TCBs the callee is a part of. 17729 llvm::StringSet<> CalleeTCBs; 17730 for_each(Callee->specific_attrs<EnforceTCBAttr>(), 17731 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 17732 for_each(Callee->specific_attrs<EnforceTCBLeafAttr>(), 17733 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 17734 17735 // Go through the TCBs the caller is a part of and emit warnings if Caller 17736 // is in a TCB that the Callee is not. 17737 for_each( 17738 Caller->specific_attrs<EnforceTCBAttr>(), 17739 [&](const auto *A) { 17740 StringRef CallerTCB = A->getTCBName(); 17741 if (CalleeTCBs.count(CallerTCB) == 0) { 17742 this->Diag(CallExprLoc, diag::warn_tcb_enforcement_violation) 17743 << Callee << CallerTCB; 17744 } 17745 }); 17746 } 17747