1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSet.h" 79 #include "llvm/ADT/StringSwitch.h" 80 #include "llvm/ADT/Triple.h" 81 #include "llvm/Support/AtomicOrdering.h" 82 #include "llvm/Support/Casting.h" 83 #include "llvm/Support/Compiler.h" 84 #include "llvm/Support/ConvertUTF.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/Format.h" 87 #include "llvm/Support/Locale.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/SaveAndRestore.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include <algorithm> 92 #include <bitset> 93 #include <cassert> 94 #include <cctype> 95 #include <cstddef> 96 #include <cstdint> 97 #include <functional> 98 #include <limits> 99 #include <string> 100 #include <tuple> 101 #include <utility> 102 103 using namespace clang; 104 using namespace sema; 105 106 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 107 unsigned ByteNo) const { 108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 109 Context.getTargetInfo()); 110 } 111 112 /// Checks that a call expression's argument count is at least the desired 113 /// number. This is useful when doing custom type-checking on a variadic 114 /// function. Returns true on error. 115 static bool checkArgCountAtLeast(Sema &S, CallExpr *Call, 116 unsigned MinArgCount) { 117 unsigned ArgCount = Call->getNumArgs(); 118 if (ArgCount >= MinArgCount) 119 return false; 120 121 return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args) 122 << 0 /*function call*/ << MinArgCount << ArgCount 123 << Call->getSourceRange(); 124 } 125 126 /// Checks that a call expression's argument count is the desired number. 127 /// This is useful when doing custom type-checking. Returns true on error. 128 static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) { 129 unsigned ArgCount = Call->getNumArgs(); 130 if (ArgCount == DesiredArgCount) 131 return false; 132 133 if (checkArgCountAtLeast(S, Call, DesiredArgCount)) 134 return true; 135 assert(ArgCount > DesiredArgCount && "should have diagnosed this"); 136 137 // Highlight all the excess arguments. 138 SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(), 139 Call->getArg(ArgCount - 1)->getEndLoc()); 140 141 return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args) 142 << 0 /*function call*/ << DesiredArgCount << ArgCount 143 << Call->getArg(1)->getSourceRange(); 144 } 145 146 /// Check that the first argument to __builtin_annotation is an integer 147 /// and the second argument is a non-wide string literal. 148 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 149 if (checkArgCount(S, TheCall, 2)) 150 return true; 151 152 // First argument should be an integer. 153 Expr *ValArg = TheCall->getArg(0); 154 QualType Ty = ValArg->getType(); 155 if (!Ty->isIntegerType()) { 156 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 157 << ValArg->getSourceRange(); 158 return true; 159 } 160 161 // Second argument should be a constant string. 162 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 163 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 164 if (!Literal || !Literal->isAscii()) { 165 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 166 << StrArg->getSourceRange(); 167 return true; 168 } 169 170 TheCall->setType(Ty); 171 return false; 172 } 173 174 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 175 // We need at least one argument. 176 if (TheCall->getNumArgs() < 1) { 177 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 178 << 0 << 1 << TheCall->getNumArgs() 179 << TheCall->getCallee()->getSourceRange(); 180 return true; 181 } 182 183 // All arguments should be wide string literals. 184 for (Expr *Arg : TheCall->arguments()) { 185 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 186 if (!Literal || !Literal->isWide()) { 187 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 188 << Arg->getSourceRange(); 189 return true; 190 } 191 } 192 193 return false; 194 } 195 196 /// Check that the argument to __builtin_addressof is a glvalue, and set the 197 /// result type to the corresponding pointer type. 198 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 199 if (checkArgCount(S, TheCall, 1)) 200 return true; 201 202 ExprResult Arg(TheCall->getArg(0)); 203 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 204 if (ResultType.isNull()) 205 return true; 206 207 TheCall->setArg(0, Arg.get()); 208 TheCall->setType(ResultType); 209 return false; 210 } 211 212 /// Check that the argument to __builtin_function_start is a function. 213 static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) { 214 if (checkArgCount(S, TheCall, 1)) 215 return true; 216 217 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 218 if (Arg.isInvalid()) 219 return true; 220 221 TheCall->setArg(0, Arg.get()); 222 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>( 223 Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext())); 224 225 if (!FD) { 226 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type) 227 << TheCall->getSourceRange(); 228 return true; 229 } 230 231 return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true, 232 TheCall->getBeginLoc()); 233 } 234 235 /// Check the number of arguments and set the result type to 236 /// the argument type. 237 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 238 if (checkArgCount(S, TheCall, 1)) 239 return true; 240 241 TheCall->setType(TheCall->getArg(0)->getType()); 242 return false; 243 } 244 245 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 246 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 247 /// type (but not a function pointer) and that the alignment is a power-of-two. 248 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 249 if (checkArgCount(S, TheCall, 2)) 250 return true; 251 252 clang::Expr *Source = TheCall->getArg(0); 253 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 254 255 auto IsValidIntegerType = [](QualType Ty) { 256 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 257 }; 258 QualType SrcTy = Source->getType(); 259 // We should also be able to use it with arrays (but not functions!). 260 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 261 SrcTy = S.Context.getDecayedType(SrcTy); 262 } 263 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 264 SrcTy->isFunctionPointerType()) { 265 // FIXME: this is not quite the right error message since we don't allow 266 // floating point types, or member pointers. 267 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 268 << SrcTy; 269 return true; 270 } 271 272 clang::Expr *AlignOp = TheCall->getArg(1); 273 if (!IsValidIntegerType(AlignOp->getType())) { 274 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 275 << AlignOp->getType(); 276 return true; 277 } 278 Expr::EvalResult AlignResult; 279 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 280 // We can't check validity of alignment if it is value dependent. 281 if (!AlignOp->isValueDependent() && 282 AlignOp->EvaluateAsInt(AlignResult, S.Context, 283 Expr::SE_AllowSideEffects)) { 284 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 285 llvm::APSInt MaxValue( 286 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 287 if (AlignValue < 1) { 288 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 289 return true; 290 } 291 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 292 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 293 << toString(MaxValue, 10); 294 return true; 295 } 296 if (!AlignValue.isPowerOf2()) { 297 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 298 return true; 299 } 300 if (AlignValue == 1) { 301 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 302 << IsBooleanAlignBuiltin; 303 } 304 } 305 306 ExprResult SrcArg = S.PerformCopyInitialization( 307 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 308 SourceLocation(), Source); 309 if (SrcArg.isInvalid()) 310 return true; 311 TheCall->setArg(0, SrcArg.get()); 312 ExprResult AlignArg = 313 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 314 S.Context, AlignOp->getType(), false), 315 SourceLocation(), AlignOp); 316 if (AlignArg.isInvalid()) 317 return true; 318 TheCall->setArg(1, AlignArg.get()); 319 // For align_up/align_down, the return type is the same as the (potentially 320 // decayed) argument type including qualifiers. For is_aligned(), the result 321 // is always bool. 322 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 323 return false; 324 } 325 326 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 327 unsigned BuiltinID) { 328 if (checkArgCount(S, TheCall, 3)) 329 return true; 330 331 // First two arguments should be integers. 332 for (unsigned I = 0; I < 2; ++I) { 333 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 334 if (Arg.isInvalid()) return true; 335 TheCall->setArg(I, Arg.get()); 336 337 QualType Ty = Arg.get()->getType(); 338 if (!Ty->isIntegerType()) { 339 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 340 << Ty << Arg.get()->getSourceRange(); 341 return true; 342 } 343 } 344 345 // Third argument should be a pointer to a non-const integer. 346 // IRGen correctly handles volatile, restrict, and address spaces, and 347 // the other qualifiers aren't possible. 348 { 349 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 350 if (Arg.isInvalid()) return true; 351 TheCall->setArg(2, Arg.get()); 352 353 QualType Ty = Arg.get()->getType(); 354 const auto *PtrTy = Ty->getAs<PointerType>(); 355 if (!PtrTy || 356 !PtrTy->getPointeeType()->isIntegerType() || 357 PtrTy->getPointeeType().isConstQualified()) { 358 S.Diag(Arg.get()->getBeginLoc(), 359 diag::err_overflow_builtin_must_be_ptr_int) 360 << Ty << Arg.get()->getSourceRange(); 361 return true; 362 } 363 } 364 365 // Disallow signed bit-precise integer args larger than 128 bits to mul 366 // function until we improve backend support. 367 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 368 for (unsigned I = 0; I < 3; ++I) { 369 const auto Arg = TheCall->getArg(I); 370 // Third argument will be a pointer. 371 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 372 if (Ty->isBitIntType() && Ty->isSignedIntegerType() && 373 S.getASTContext().getIntWidth(Ty) > 128) 374 return S.Diag(Arg->getBeginLoc(), 375 diag::err_overflow_builtin_bit_int_max_size) 376 << 128; 377 } 378 } 379 380 return false; 381 } 382 383 namespace { 384 struct BuiltinDumpStructGenerator { 385 Sema &S; 386 CallExpr *TheCall; 387 SourceLocation Loc = TheCall->getBeginLoc(); 388 SmallVector<Expr *, 32> Actions; 389 DiagnosticErrorTrap ErrorTracker; 390 PrintingPolicy Policy; 391 392 BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall) 393 : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()), 394 Policy(S.Context.getPrintingPolicy()) { 395 Policy.AnonymousTagLocations = false; 396 } 397 398 Expr *makeOpaqueValueExpr(Expr *Inner) { 399 auto *OVE = new (S.Context) 400 OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(), 401 Inner->getObjectKind(), Inner); 402 Actions.push_back(OVE); 403 return OVE; 404 } 405 406 Expr *getStringLiteral(llvm::StringRef Str) { 407 Expr *Lit = S.Context.getPredefinedStringLiteralFromCache(Str); 408 // Wrap the literal in parentheses to attach a source location. 409 return new (S.Context) ParenExpr(Loc, Loc, Lit); 410 } 411 412 bool callPrintFunction(llvm::StringRef Format, 413 llvm::ArrayRef<Expr *> Exprs = {}) { 414 SmallVector<Expr *, 8> Args; 415 assert(TheCall->getNumArgs() >= 2); 416 Args.reserve((TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size()); 417 Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end()); 418 Args.push_back(getStringLiteral(Format)); 419 Args.insert(Args.end(), Exprs.begin(), Exprs.end()); 420 421 // Register a note to explain why we're performing the call. 422 Sema::CodeSynthesisContext Ctx; 423 Ctx.Kind = Sema::CodeSynthesisContext::BuildingBuiltinDumpStructCall; 424 Ctx.PointOfInstantiation = Loc; 425 Ctx.CallArgs = Args.data(); 426 Ctx.NumCallArgs = Args.size(); 427 S.pushCodeSynthesisContext(Ctx); 428 429 ExprResult RealCall = 430 S.BuildCallExpr(/*Scope=*/nullptr, TheCall->getArg(1), 431 TheCall->getBeginLoc(), Args, TheCall->getRParenLoc()); 432 433 S.popCodeSynthesisContext(); 434 if (!RealCall.isInvalid()) 435 Actions.push_back(RealCall.get()); 436 // Bail out if we've hit any errors, even if we managed to build the 437 // call. We don't want to produce more than one error. 438 return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred(); 439 } 440 441 Expr *getIndentString(unsigned Depth) { 442 if (!Depth) 443 return nullptr; 444 445 llvm::SmallString<32> Indent; 446 Indent.resize(Depth * Policy.Indentation, ' '); 447 return getStringLiteral(Indent); 448 } 449 450 Expr *getTypeString(QualType T) { 451 return getStringLiteral(T.getAsString(Policy)); 452 } 453 454 bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) { 455 llvm::raw_svector_ostream OS(Str); 456 457 // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather 458 // than trying to print a single character. 459 if (auto *BT = T->getAs<BuiltinType>()) { 460 switch (BT->getKind()) { 461 case BuiltinType::Bool: 462 OS << "%d"; 463 return true; 464 case BuiltinType::Char_U: 465 case BuiltinType::UChar: 466 OS << "%hhu"; 467 return true; 468 case BuiltinType::Char_S: 469 case BuiltinType::SChar: 470 OS << "%hhd"; 471 return true; 472 default: 473 break; 474 } 475 } 476 477 analyze_printf::PrintfSpecifier Specifier; 478 if (Specifier.fixType(T, S.getLangOpts(), S.Context, /*IsObjCLiteral=*/false)) { 479 // We were able to guess how to format this. 480 if (Specifier.getConversionSpecifier().getKind() == 481 analyze_printf::PrintfConversionSpecifier::sArg) { 482 // Wrap double-quotes around a '%s' specifier and limit its maximum 483 // length. Ideally we'd also somehow escape special characters in the 484 // contents but printf doesn't support that. 485 // FIXME: '%s' formatting is not safe in general. 486 OS << '"'; 487 Specifier.setPrecision(analyze_printf::OptionalAmount(32u)); 488 Specifier.toString(OS); 489 OS << '"'; 490 // FIXME: It would be nice to include a '...' if the string doesn't fit 491 // in the length limit. 492 } else { 493 Specifier.toString(OS); 494 } 495 return true; 496 } 497 498 if (T->isPointerType()) { 499 // Format all pointers with '%p'. 500 OS << "%p"; 501 return true; 502 } 503 504 return false; 505 } 506 507 bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) { 508 Expr *IndentLit = getIndentString(Depth); 509 Expr *TypeLit = getTypeString(S.Context.getRecordType(RD)); 510 if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit}) 511 : callPrintFunction("%s", {TypeLit})) 512 return true; 513 514 return dumpRecordValue(RD, E, IndentLit, Depth); 515 } 516 517 // Dump a record value. E should be a pointer or lvalue referring to an RD. 518 bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent, 519 unsigned Depth) { 520 // FIXME: Decide what to do if RD is a union. At least we should probably 521 // turn off printing `const char*` members with `%s`, because that is very 522 // likely to crash if that's not the active member. Whatever we decide, we 523 // should document it. 524 525 // Build an OpaqueValueExpr so we can refer to E more than once without 526 // triggering re-evaluation. 527 Expr *RecordArg = makeOpaqueValueExpr(E); 528 bool RecordArgIsPtr = RecordArg->getType()->isPointerType(); 529 530 if (callPrintFunction(" {\n")) 531 return true; 532 533 // Dump each base class, regardless of whether they're aggregates. 534 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 535 for (const auto &Base : CXXRD->bases()) { 536 QualType BaseType = 537 RecordArgIsPtr ? S.Context.getPointerType(Base.getType()) 538 : S.Context.getLValueReferenceType(Base.getType()); 539 ExprResult BasePtr = S.BuildCStyleCastExpr( 540 Loc, S.Context.getTrivialTypeSourceInfo(BaseType, Loc), Loc, 541 RecordArg); 542 if (BasePtr.isInvalid() || 543 dumpUnnamedRecord(Base.getType()->getAsRecordDecl(), BasePtr.get(), 544 Depth + 1)) 545 return true; 546 } 547 } 548 549 Expr *FieldIndentArg = getIndentString(Depth + 1); 550 551 // Dump each field. 552 for (auto *D : RD->decls()) { 553 auto *IFD = dyn_cast<IndirectFieldDecl>(D); 554 auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D); 555 if (!FD || FD->isUnnamedBitfield() || FD->isAnonymousStructOrUnion()) 556 continue; 557 558 llvm::SmallString<20> Format = llvm::StringRef("%s%s %s "); 559 llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg, 560 getTypeString(FD->getType()), 561 getStringLiteral(FD->getName())}; 562 563 if (FD->isBitField()) { 564 Format += ": %zu "; 565 QualType SizeT = S.Context.getSizeType(); 566 llvm::APInt BitWidth(S.Context.getIntWidth(SizeT), 567 FD->getBitWidthValue(S.Context)); 568 Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc)); 569 } 570 571 Format += "="; 572 573 ExprResult Field = 574 IFD ? S.BuildAnonymousStructUnionMemberReference( 575 CXXScopeSpec(), Loc, IFD, 576 DeclAccessPair::make(IFD, AS_public), RecordArg, Loc) 577 : S.BuildFieldReferenceExpr( 578 RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD, 579 DeclAccessPair::make(FD, AS_public), 580 DeclarationNameInfo(FD->getDeclName(), Loc)); 581 if (Field.isInvalid()) 582 return true; 583 584 auto *InnerRD = FD->getType()->getAsRecordDecl(); 585 auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD); 586 if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) { 587 // Recursively print the values of members of aggregate record type. 588 if (callPrintFunction(Format, Args) || 589 dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1)) 590 return true; 591 } else { 592 Format += " "; 593 if (appendFormatSpecifier(FD->getType(), Format)) { 594 // We know how to print this field. 595 Args.push_back(Field.get()); 596 } else { 597 // We don't know how to print this field. Print out its address 598 // with a format specifier that a smart tool will be able to 599 // recognize and treat specially. 600 Format += "*%p"; 601 ExprResult FieldAddr = 602 S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get()); 603 if (FieldAddr.isInvalid()) 604 return true; 605 Args.push_back(FieldAddr.get()); 606 } 607 Format += "\n"; 608 if (callPrintFunction(Format, Args)) 609 return true; 610 } 611 } 612 613 return RecordIndent ? callPrintFunction("%s}\n", RecordIndent) 614 : callPrintFunction("}\n"); 615 } 616 617 Expr *buildWrapper() { 618 auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions, 619 PseudoObjectExpr::NoResult); 620 TheCall->setType(Wrapper->getType()); 621 TheCall->setValueKind(Wrapper->getValueKind()); 622 return Wrapper; 623 } 624 }; 625 } // namespace 626 627 static ExprResult SemaBuiltinDumpStruct(Sema &S, CallExpr *TheCall) { 628 if (checkArgCountAtLeast(S, TheCall, 2)) 629 return ExprError(); 630 631 ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0)); 632 if (PtrArgResult.isInvalid()) 633 return ExprError(); 634 TheCall->setArg(0, PtrArgResult.get()); 635 636 // First argument should be a pointer to a struct. 637 QualType PtrArgType = PtrArgResult.get()->getType(); 638 if (!PtrArgType->isPointerType() || 639 !PtrArgType->getPointeeType()->isRecordType()) { 640 S.Diag(PtrArgResult.get()->getBeginLoc(), 641 diag::err_expected_struct_pointer_argument) 642 << 1 << TheCall->getDirectCallee() << PtrArgType; 643 return ExprError(); 644 } 645 const RecordDecl *RD = PtrArgType->getPointeeType()->getAsRecordDecl(); 646 647 // Second argument is a callable, but we can't fully validate it until we try 648 // calling it. 649 QualType FnArgType = TheCall->getArg(1)->getType(); 650 if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() && 651 !FnArgType->isBlockPointerType() && 652 !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) { 653 auto *BT = FnArgType->getAs<BuiltinType>(); 654 switch (BT ? BT->getKind() : BuiltinType::Void) { 655 case BuiltinType::Dependent: 656 case BuiltinType::Overload: 657 case BuiltinType::BoundMember: 658 case BuiltinType::PseudoObject: 659 case BuiltinType::UnknownAny: 660 case BuiltinType::BuiltinFn: 661 // This might be a callable. 662 break; 663 664 default: 665 S.Diag(TheCall->getArg(1)->getBeginLoc(), 666 diag::err_expected_callable_argument) 667 << 2 << TheCall->getDirectCallee() << FnArgType; 668 return ExprError(); 669 } 670 } 671 672 BuiltinDumpStructGenerator Generator(S, TheCall); 673 674 // Wrap parentheses around the given pointer. This is not necessary for 675 // correct code generation, but it means that when we pretty-print the call 676 // arguments in our diagnostics we will produce '(&s)->n' instead of the 677 // incorrect '&s->n'. 678 Expr *PtrArg = PtrArgResult.get(); 679 PtrArg = new (S.Context) 680 ParenExpr(PtrArg->getBeginLoc(), 681 S.getLocForEndOfToken(PtrArg->getEndLoc()), PtrArg); 682 if (Generator.dumpUnnamedRecord(RD, PtrArg, 0)) 683 return ExprError(); 684 685 return Generator.buildWrapper(); 686 } 687 688 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 689 if (checkArgCount(S, BuiltinCall, 2)) 690 return true; 691 692 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 693 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 694 Expr *Call = BuiltinCall->getArg(0); 695 Expr *Chain = BuiltinCall->getArg(1); 696 697 if (Call->getStmtClass() != Stmt::CallExprClass) { 698 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 699 << Call->getSourceRange(); 700 return true; 701 } 702 703 auto CE = cast<CallExpr>(Call); 704 if (CE->getCallee()->getType()->isBlockPointerType()) { 705 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 706 << Call->getSourceRange(); 707 return true; 708 } 709 710 const Decl *TargetDecl = CE->getCalleeDecl(); 711 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 712 if (FD->getBuiltinID()) { 713 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 714 << Call->getSourceRange(); 715 return true; 716 } 717 718 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 719 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 720 << Call->getSourceRange(); 721 return true; 722 } 723 724 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 725 if (ChainResult.isInvalid()) 726 return true; 727 if (!ChainResult.get()->getType()->isPointerType()) { 728 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 729 << Chain->getSourceRange(); 730 return true; 731 } 732 733 QualType ReturnTy = CE->getCallReturnType(S.Context); 734 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 735 QualType BuiltinTy = S.Context.getFunctionType( 736 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 737 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 738 739 Builtin = 740 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 741 742 BuiltinCall->setType(CE->getType()); 743 BuiltinCall->setValueKind(CE->getValueKind()); 744 BuiltinCall->setObjectKind(CE->getObjectKind()); 745 BuiltinCall->setCallee(Builtin); 746 BuiltinCall->setArg(1, ChainResult.get()); 747 748 return false; 749 } 750 751 namespace { 752 753 class ScanfDiagnosticFormatHandler 754 : public analyze_format_string::FormatStringHandler { 755 // Accepts the argument index (relative to the first destination index) of the 756 // argument whose size we want. 757 using ComputeSizeFunction = 758 llvm::function_ref<Optional<llvm::APSInt>(unsigned)>; 759 760 // Accepts the argument index (relative to the first destination index), the 761 // destination size, and the source size). 762 using DiagnoseFunction = 763 llvm::function_ref<void(unsigned, unsigned, unsigned)>; 764 765 ComputeSizeFunction ComputeSizeArgument; 766 DiagnoseFunction Diagnose; 767 768 public: 769 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument, 770 DiagnoseFunction Diagnose) 771 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {} 772 773 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 774 const char *StartSpecifier, 775 unsigned specifierLen) override { 776 if (!FS.consumesDataArgument()) 777 return true; 778 779 unsigned NulByte = 0; 780 switch ((FS.getConversionSpecifier().getKind())) { 781 default: 782 return true; 783 case analyze_format_string::ConversionSpecifier::sArg: 784 case analyze_format_string::ConversionSpecifier::ScanListArg: 785 NulByte = 1; 786 break; 787 case analyze_format_string::ConversionSpecifier::cArg: 788 break; 789 } 790 791 analyze_format_string::OptionalAmount FW = FS.getFieldWidth(); 792 if (FW.getHowSpecified() != 793 analyze_format_string::OptionalAmount::HowSpecified::Constant) 794 return true; 795 796 unsigned SourceSize = FW.getConstantAmount() + NulByte; 797 798 Optional<llvm::APSInt> DestSizeAPS = ComputeSizeArgument(FS.getArgIndex()); 799 if (!DestSizeAPS) 800 return true; 801 802 unsigned DestSize = DestSizeAPS->getZExtValue(); 803 804 if (DestSize < SourceSize) 805 Diagnose(FS.getArgIndex(), DestSize, SourceSize); 806 807 return true; 808 } 809 }; 810 811 class EstimateSizeFormatHandler 812 : public analyze_format_string::FormatStringHandler { 813 size_t Size; 814 815 public: 816 EstimateSizeFormatHandler(StringRef Format) 817 : Size(std::min(Format.find(0), Format.size()) + 818 1 /* null byte always written by sprintf */) {} 819 820 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 821 const char *, unsigned SpecifierLen, 822 const TargetInfo &) override { 823 824 const size_t FieldWidth = computeFieldWidth(FS); 825 const size_t Precision = computePrecision(FS); 826 827 // The actual format. 828 switch (FS.getConversionSpecifier().getKind()) { 829 // Just a char. 830 case analyze_format_string::ConversionSpecifier::cArg: 831 case analyze_format_string::ConversionSpecifier::CArg: 832 Size += std::max(FieldWidth, (size_t)1); 833 break; 834 // Just an integer. 835 case analyze_format_string::ConversionSpecifier::dArg: 836 case analyze_format_string::ConversionSpecifier::DArg: 837 case analyze_format_string::ConversionSpecifier::iArg: 838 case analyze_format_string::ConversionSpecifier::oArg: 839 case analyze_format_string::ConversionSpecifier::OArg: 840 case analyze_format_string::ConversionSpecifier::uArg: 841 case analyze_format_string::ConversionSpecifier::UArg: 842 case analyze_format_string::ConversionSpecifier::xArg: 843 case analyze_format_string::ConversionSpecifier::XArg: 844 Size += std::max(FieldWidth, Precision); 845 break; 846 847 // %g style conversion switches between %f or %e style dynamically. 848 // %f always takes less space, so default to it. 849 case analyze_format_string::ConversionSpecifier::gArg: 850 case analyze_format_string::ConversionSpecifier::GArg: 851 852 // Floating point number in the form '[+]ddd.ddd'. 853 case analyze_format_string::ConversionSpecifier::fArg: 854 case analyze_format_string::ConversionSpecifier::FArg: 855 Size += std::max(FieldWidth, 1 /* integer part */ + 856 (Precision ? 1 + Precision 857 : 0) /* period + decimal */); 858 break; 859 860 // Floating point number in the form '[-]d.ddde[+-]dd'. 861 case analyze_format_string::ConversionSpecifier::eArg: 862 case analyze_format_string::ConversionSpecifier::EArg: 863 Size += 864 std::max(FieldWidth, 865 1 /* integer part */ + 866 (Precision ? 1 + Precision : 0) /* period + decimal */ + 867 1 /* e or E letter */ + 2 /* exponent */); 868 break; 869 870 // Floating point number in the form '[-]0xh.hhhhp±dd'. 871 case analyze_format_string::ConversionSpecifier::aArg: 872 case analyze_format_string::ConversionSpecifier::AArg: 873 Size += 874 std::max(FieldWidth, 875 2 /* 0x */ + 1 /* integer part */ + 876 (Precision ? 1 + Precision : 0) /* period + decimal */ + 877 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 878 break; 879 880 // Just a string. 881 case analyze_format_string::ConversionSpecifier::sArg: 882 case analyze_format_string::ConversionSpecifier::SArg: 883 Size += FieldWidth; 884 break; 885 886 // Just a pointer in the form '0xddd'. 887 case analyze_format_string::ConversionSpecifier::pArg: 888 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 889 break; 890 891 // A plain percent. 892 case analyze_format_string::ConversionSpecifier::PercentArg: 893 Size += 1; 894 break; 895 896 default: 897 break; 898 } 899 900 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 901 902 if (FS.hasAlternativeForm()) { 903 switch (FS.getConversionSpecifier().getKind()) { 904 default: 905 break; 906 // Force a leading '0'. 907 case analyze_format_string::ConversionSpecifier::oArg: 908 Size += 1; 909 break; 910 // Force a leading '0x'. 911 case analyze_format_string::ConversionSpecifier::xArg: 912 case analyze_format_string::ConversionSpecifier::XArg: 913 Size += 2; 914 break; 915 // Force a period '.' before decimal, even if precision is 0. 916 case analyze_format_string::ConversionSpecifier::aArg: 917 case analyze_format_string::ConversionSpecifier::AArg: 918 case analyze_format_string::ConversionSpecifier::eArg: 919 case analyze_format_string::ConversionSpecifier::EArg: 920 case analyze_format_string::ConversionSpecifier::fArg: 921 case analyze_format_string::ConversionSpecifier::FArg: 922 case analyze_format_string::ConversionSpecifier::gArg: 923 case analyze_format_string::ConversionSpecifier::GArg: 924 Size += (Precision ? 0 : 1); 925 break; 926 } 927 } 928 assert(SpecifierLen <= Size && "no underflow"); 929 Size -= SpecifierLen; 930 return true; 931 } 932 933 size_t getSizeLowerBound() const { return Size; } 934 935 private: 936 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 937 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 938 size_t FieldWidth = 0; 939 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 940 FieldWidth = FW.getConstantAmount(); 941 return FieldWidth; 942 } 943 944 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 945 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 946 size_t Precision = 0; 947 948 // See man 3 printf for default precision value based on the specifier. 949 switch (FW.getHowSpecified()) { 950 case analyze_format_string::OptionalAmount::NotSpecified: 951 switch (FS.getConversionSpecifier().getKind()) { 952 default: 953 break; 954 case analyze_format_string::ConversionSpecifier::dArg: // %d 955 case analyze_format_string::ConversionSpecifier::DArg: // %D 956 case analyze_format_string::ConversionSpecifier::iArg: // %i 957 Precision = 1; 958 break; 959 case analyze_format_string::ConversionSpecifier::oArg: // %d 960 case analyze_format_string::ConversionSpecifier::OArg: // %D 961 case analyze_format_string::ConversionSpecifier::uArg: // %d 962 case analyze_format_string::ConversionSpecifier::UArg: // %D 963 case analyze_format_string::ConversionSpecifier::xArg: // %d 964 case analyze_format_string::ConversionSpecifier::XArg: // %D 965 Precision = 1; 966 break; 967 case analyze_format_string::ConversionSpecifier::fArg: // %f 968 case analyze_format_string::ConversionSpecifier::FArg: // %F 969 case analyze_format_string::ConversionSpecifier::eArg: // %e 970 case analyze_format_string::ConversionSpecifier::EArg: // %E 971 case analyze_format_string::ConversionSpecifier::gArg: // %g 972 case analyze_format_string::ConversionSpecifier::GArg: // %G 973 Precision = 6; 974 break; 975 case analyze_format_string::ConversionSpecifier::pArg: // %d 976 Precision = 1; 977 break; 978 } 979 break; 980 case analyze_format_string::OptionalAmount::Constant: 981 Precision = FW.getConstantAmount(); 982 break; 983 default: 984 break; 985 } 986 return Precision; 987 } 988 }; 989 990 } // namespace 991 992 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 993 CallExpr *TheCall) { 994 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 995 isConstantEvaluated()) 996 return; 997 998 bool UseDABAttr = false; 999 const FunctionDecl *UseDecl = FD; 1000 1001 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>(); 1002 if (DABAttr) { 1003 UseDecl = DABAttr->getFunction(); 1004 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!"); 1005 UseDABAttr = true; 1006 } 1007 1008 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true); 1009 1010 if (!BuiltinID) 1011 return; 1012 1013 const TargetInfo &TI = getASTContext().getTargetInfo(); 1014 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 1015 1016 auto TranslateIndex = [&](unsigned Index) -> Optional<unsigned> { 1017 // If we refer to a diagnose_as_builtin attribute, we need to change the 1018 // argument index to refer to the arguments of the called function. Unless 1019 // the index is out of bounds, which presumably means it's a variadic 1020 // function. 1021 if (!UseDABAttr) 1022 return Index; 1023 unsigned DABIndices = DABAttr->argIndices_size(); 1024 unsigned NewIndex = Index < DABIndices 1025 ? DABAttr->argIndices_begin()[Index] 1026 : Index - DABIndices + FD->getNumParams(); 1027 if (NewIndex >= TheCall->getNumArgs()) 1028 return llvm::None; 1029 return NewIndex; 1030 }; 1031 1032 auto ComputeExplicitObjectSizeArgument = 1033 [&](unsigned Index) -> Optional<llvm::APSInt> { 1034 Optional<unsigned> IndexOptional = TranslateIndex(Index); 1035 if (!IndexOptional) 1036 return llvm::None; 1037 unsigned NewIndex = *IndexOptional; 1038 Expr::EvalResult Result; 1039 Expr *SizeArg = TheCall->getArg(NewIndex); 1040 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 1041 return llvm::None; 1042 llvm::APSInt Integer = Result.Val.getInt(); 1043 Integer.setIsUnsigned(true); 1044 return Integer; 1045 }; 1046 1047 auto ComputeSizeArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 1048 // If the parameter has a pass_object_size attribute, then we should use its 1049 // (potentially) more strict checking mode. Otherwise, conservatively assume 1050 // type 0. 1051 int BOSType = 0; 1052 // This check can fail for variadic functions. 1053 if (Index < FD->getNumParams()) { 1054 if (const auto *POS = 1055 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>()) 1056 BOSType = POS->getType(); 1057 } 1058 1059 Optional<unsigned> IndexOptional = TranslateIndex(Index); 1060 if (!IndexOptional) 1061 return llvm::None; 1062 unsigned NewIndex = *IndexOptional; 1063 1064 const Expr *ObjArg = TheCall->getArg(NewIndex); 1065 uint64_t Result; 1066 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 1067 return llvm::None; 1068 1069 // Get the object size in the target's size_t width. 1070 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 1071 }; 1072 1073 auto ComputeStrLenArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 1074 Optional<unsigned> IndexOptional = TranslateIndex(Index); 1075 if (!IndexOptional) 1076 return llvm::None; 1077 unsigned NewIndex = *IndexOptional; 1078 1079 const Expr *ObjArg = TheCall->getArg(NewIndex); 1080 uint64_t Result; 1081 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext())) 1082 return llvm::None; 1083 // Add 1 for null byte. 1084 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth); 1085 }; 1086 1087 Optional<llvm::APSInt> SourceSize; 1088 Optional<llvm::APSInt> DestinationSize; 1089 unsigned DiagID = 0; 1090 bool IsChkVariant = false; 1091 1092 auto GetFunctionName = [&]() { 1093 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 1094 // Skim off the details of whichever builtin was called to produce a better 1095 // diagnostic, as it's unlikely that the user wrote the __builtin 1096 // explicitly. 1097 if (IsChkVariant) { 1098 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 1099 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 1100 } else if (FunctionName.startswith("__builtin_")) { 1101 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 1102 } 1103 return FunctionName; 1104 }; 1105 1106 switch (BuiltinID) { 1107 default: 1108 return; 1109 case Builtin::BI__builtin_strcpy: 1110 case Builtin::BIstrcpy: { 1111 DiagID = diag::warn_fortify_strlen_overflow; 1112 SourceSize = ComputeStrLenArgument(1); 1113 DestinationSize = ComputeSizeArgument(0); 1114 break; 1115 } 1116 1117 case Builtin::BI__builtin___strcpy_chk: { 1118 DiagID = diag::warn_fortify_strlen_overflow; 1119 SourceSize = ComputeStrLenArgument(1); 1120 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1121 IsChkVariant = true; 1122 break; 1123 } 1124 1125 case Builtin::BIscanf: 1126 case Builtin::BIfscanf: 1127 case Builtin::BIsscanf: { 1128 unsigned FormatIndex = 1; 1129 unsigned DataIndex = 2; 1130 if (BuiltinID == Builtin::BIscanf) { 1131 FormatIndex = 0; 1132 DataIndex = 1; 1133 } 1134 1135 const auto *FormatExpr = 1136 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1137 1138 const auto *Format = dyn_cast<StringLiteral>(FormatExpr); 1139 if (!Format) 1140 return; 1141 1142 if (!Format->isAscii() && !Format->isUTF8()) 1143 return; 1144 1145 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize, 1146 unsigned SourceSize) { 1147 DiagID = diag::warn_fortify_scanf_overflow; 1148 unsigned Index = ArgIndex + DataIndex; 1149 StringRef FunctionName = GetFunctionName(); 1150 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall, 1151 PDiag(DiagID) << FunctionName << (Index + 1) 1152 << DestSize << SourceSize); 1153 }; 1154 1155 StringRef FormatStrRef = Format->getString(); 1156 auto ShiftedComputeSizeArgument = [&](unsigned Index) { 1157 return ComputeSizeArgument(Index + DataIndex); 1158 }; 1159 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose); 1160 const char *FormatBytes = FormatStrRef.data(); 1161 const ConstantArrayType *T = 1162 Context.getAsConstantArrayType(Format->getType()); 1163 assert(T && "String literal not of constant array type!"); 1164 size_t TypeSize = T->getSize().getZExtValue(); 1165 1166 // In case there's a null byte somewhere. 1167 size_t StrLen = 1168 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1169 1170 analyze_format_string::ParseScanfString(H, FormatBytes, 1171 FormatBytes + StrLen, getLangOpts(), 1172 Context.getTargetInfo()); 1173 1174 // Unlike the other cases, in this one we have already issued the diagnostic 1175 // here, so no need to continue (because unlike the other cases, here the 1176 // diagnostic refers to the argument number). 1177 return; 1178 } 1179 1180 case Builtin::BIsprintf: 1181 case Builtin::BI__builtin___sprintf_chk: { 1182 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 1183 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1184 1185 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 1186 1187 if (!Format->isAscii() && !Format->isUTF8()) 1188 return; 1189 1190 StringRef FormatStrRef = Format->getString(); 1191 EstimateSizeFormatHandler H(FormatStrRef); 1192 const char *FormatBytes = FormatStrRef.data(); 1193 const ConstantArrayType *T = 1194 Context.getAsConstantArrayType(Format->getType()); 1195 assert(T && "String literal not of constant array type!"); 1196 size_t TypeSize = T->getSize().getZExtValue(); 1197 1198 // In case there's a null byte somewhere. 1199 size_t StrLen = 1200 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1201 if (!analyze_format_string::ParsePrintfString( 1202 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 1203 Context.getTargetInfo(), false)) { 1204 DiagID = diag::warn_fortify_source_format_overflow; 1205 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 1206 .extOrTrunc(SizeTypeWidth); 1207 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 1208 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1209 IsChkVariant = true; 1210 } else { 1211 DestinationSize = ComputeSizeArgument(0); 1212 } 1213 break; 1214 } 1215 } 1216 return; 1217 } 1218 case Builtin::BI__builtin___memcpy_chk: 1219 case Builtin::BI__builtin___memmove_chk: 1220 case Builtin::BI__builtin___memset_chk: 1221 case Builtin::BI__builtin___strlcat_chk: 1222 case Builtin::BI__builtin___strlcpy_chk: 1223 case Builtin::BI__builtin___strncat_chk: 1224 case Builtin::BI__builtin___strncpy_chk: 1225 case Builtin::BI__builtin___stpncpy_chk: 1226 case Builtin::BI__builtin___memccpy_chk: 1227 case Builtin::BI__builtin___mempcpy_chk: { 1228 DiagID = diag::warn_builtin_chk_overflow; 1229 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2); 1230 DestinationSize = 1231 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1232 IsChkVariant = true; 1233 break; 1234 } 1235 1236 case Builtin::BI__builtin___snprintf_chk: 1237 case Builtin::BI__builtin___vsnprintf_chk: { 1238 DiagID = diag::warn_builtin_chk_overflow; 1239 SourceSize = ComputeExplicitObjectSizeArgument(1); 1240 DestinationSize = ComputeExplicitObjectSizeArgument(3); 1241 IsChkVariant = true; 1242 break; 1243 } 1244 1245 case Builtin::BIstrncat: 1246 case Builtin::BI__builtin_strncat: 1247 case Builtin::BIstrncpy: 1248 case Builtin::BI__builtin_strncpy: 1249 case Builtin::BIstpncpy: 1250 case Builtin::BI__builtin_stpncpy: { 1251 // Whether these functions overflow depends on the runtime strlen of the 1252 // string, not just the buffer size, so emitting the "always overflow" 1253 // diagnostic isn't quite right. We should still diagnose passing a buffer 1254 // size larger than the destination buffer though; this is a runtime abort 1255 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 1256 DiagID = diag::warn_fortify_source_size_mismatch; 1257 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1258 DestinationSize = ComputeSizeArgument(0); 1259 break; 1260 } 1261 1262 case Builtin::BImemcpy: 1263 case Builtin::BI__builtin_memcpy: 1264 case Builtin::BImemmove: 1265 case Builtin::BI__builtin_memmove: 1266 case Builtin::BImemset: 1267 case Builtin::BI__builtin_memset: 1268 case Builtin::BImempcpy: 1269 case Builtin::BI__builtin_mempcpy: { 1270 DiagID = diag::warn_fortify_source_overflow; 1271 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1272 DestinationSize = ComputeSizeArgument(0); 1273 break; 1274 } 1275 case Builtin::BIsnprintf: 1276 case Builtin::BI__builtin_snprintf: 1277 case Builtin::BIvsnprintf: 1278 case Builtin::BI__builtin_vsnprintf: { 1279 DiagID = diag::warn_fortify_source_size_mismatch; 1280 SourceSize = ComputeExplicitObjectSizeArgument(1); 1281 DestinationSize = ComputeSizeArgument(0); 1282 break; 1283 } 1284 } 1285 1286 if (!SourceSize || !DestinationSize || 1287 llvm::APSInt::compareValues(*SourceSize, *DestinationSize) <= 0) 1288 return; 1289 1290 StringRef FunctionName = GetFunctionName(); 1291 1292 SmallString<16> DestinationStr; 1293 SmallString<16> SourceStr; 1294 DestinationSize->toString(DestinationStr, /*Radix=*/10); 1295 SourceSize->toString(SourceStr, /*Radix=*/10); 1296 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 1297 PDiag(DiagID) 1298 << FunctionName << DestinationStr << SourceStr); 1299 } 1300 1301 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 1302 Scope::ScopeFlags NeededScopeFlags, 1303 unsigned DiagID) { 1304 // Scopes aren't available during instantiation. Fortunately, builtin 1305 // functions cannot be template args so they cannot be formed through template 1306 // instantiation. Therefore checking once during the parse is sufficient. 1307 if (SemaRef.inTemplateInstantiation()) 1308 return false; 1309 1310 Scope *S = SemaRef.getCurScope(); 1311 while (S && !S->isSEHExceptScope()) 1312 S = S->getParent(); 1313 if (!S || !(S->getFlags() & NeededScopeFlags)) { 1314 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 1315 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 1316 << DRE->getDecl()->getIdentifier(); 1317 return true; 1318 } 1319 1320 return false; 1321 } 1322 1323 static inline bool isBlockPointer(Expr *Arg) { 1324 return Arg->getType()->isBlockPointerType(); 1325 } 1326 1327 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 1328 /// void*, which is a requirement of device side enqueue. 1329 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 1330 const BlockPointerType *BPT = 1331 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1332 ArrayRef<QualType> Params = 1333 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 1334 unsigned ArgCounter = 0; 1335 bool IllegalParams = false; 1336 // Iterate through the block parameters until either one is found that is not 1337 // a local void*, or the block is valid. 1338 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 1339 I != E; ++I, ++ArgCounter) { 1340 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 1341 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 1342 LangAS::opencl_local) { 1343 // Get the location of the error. If a block literal has been passed 1344 // (BlockExpr) then we can point straight to the offending argument, 1345 // else we just point to the variable reference. 1346 SourceLocation ErrorLoc; 1347 if (isa<BlockExpr>(BlockArg)) { 1348 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 1349 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 1350 } else if (isa<DeclRefExpr>(BlockArg)) { 1351 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 1352 } 1353 S.Diag(ErrorLoc, 1354 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 1355 IllegalParams = true; 1356 } 1357 } 1358 1359 return IllegalParams; 1360 } 1361 1362 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 1363 // OpenCL device can support extension but not the feature as extension 1364 // requires subgroup independent forward progress, but subgroup independent 1365 // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature. 1366 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts()) && 1367 !S.getOpenCLOptions().isSupported("__opencl_c_subgroups", 1368 S.getLangOpts())) { 1369 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 1370 << 1 << Call->getDirectCallee() 1371 << "cl_khr_subgroups or __opencl_c_subgroups"; 1372 return true; 1373 } 1374 return false; 1375 } 1376 1377 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 1378 if (checkArgCount(S, TheCall, 2)) 1379 return true; 1380 1381 if (checkOpenCLSubgroupExt(S, TheCall)) 1382 return true; 1383 1384 // First argument is an ndrange_t type. 1385 Expr *NDRangeArg = TheCall->getArg(0); 1386 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1387 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1388 << TheCall->getDirectCallee() << "'ndrange_t'"; 1389 return true; 1390 } 1391 1392 Expr *BlockArg = TheCall->getArg(1); 1393 if (!isBlockPointer(BlockArg)) { 1394 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1395 << TheCall->getDirectCallee() << "block"; 1396 return true; 1397 } 1398 return checkOpenCLBlockArgs(S, BlockArg); 1399 } 1400 1401 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 1402 /// get_kernel_work_group_size 1403 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 1404 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 1405 if (checkArgCount(S, TheCall, 1)) 1406 return true; 1407 1408 Expr *BlockArg = TheCall->getArg(0); 1409 if (!isBlockPointer(BlockArg)) { 1410 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1411 << TheCall->getDirectCallee() << "block"; 1412 return true; 1413 } 1414 return checkOpenCLBlockArgs(S, BlockArg); 1415 } 1416 1417 /// Diagnose integer type and any valid implicit conversion to it. 1418 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 1419 const QualType &IntType); 1420 1421 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 1422 unsigned Start, unsigned End) { 1423 bool IllegalParams = false; 1424 for (unsigned I = Start; I <= End; ++I) 1425 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 1426 S.Context.getSizeType()); 1427 return IllegalParams; 1428 } 1429 1430 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 1431 /// 'local void*' parameter of passed block. 1432 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 1433 Expr *BlockArg, 1434 unsigned NumNonVarArgs) { 1435 const BlockPointerType *BPT = 1436 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1437 unsigned NumBlockParams = 1438 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 1439 unsigned TotalNumArgs = TheCall->getNumArgs(); 1440 1441 // For each argument passed to the block, a corresponding uint needs to 1442 // be passed to describe the size of the local memory. 1443 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 1444 S.Diag(TheCall->getBeginLoc(), 1445 diag::err_opencl_enqueue_kernel_local_size_args); 1446 return true; 1447 } 1448 1449 // Check that the sizes of the local memory are specified by integers. 1450 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 1451 TotalNumArgs - 1); 1452 } 1453 1454 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 1455 /// overload formats specified in Table 6.13.17.1. 1456 /// int enqueue_kernel(queue_t queue, 1457 /// kernel_enqueue_flags_t flags, 1458 /// const ndrange_t ndrange, 1459 /// void (^block)(void)) 1460 /// int enqueue_kernel(queue_t queue, 1461 /// kernel_enqueue_flags_t flags, 1462 /// const ndrange_t ndrange, 1463 /// uint num_events_in_wait_list, 1464 /// clk_event_t *event_wait_list, 1465 /// clk_event_t *event_ret, 1466 /// void (^block)(void)) 1467 /// int enqueue_kernel(queue_t queue, 1468 /// kernel_enqueue_flags_t flags, 1469 /// const ndrange_t ndrange, 1470 /// void (^block)(local void*, ...), 1471 /// uint size0, ...) 1472 /// int enqueue_kernel(queue_t queue, 1473 /// kernel_enqueue_flags_t flags, 1474 /// const ndrange_t ndrange, 1475 /// uint num_events_in_wait_list, 1476 /// clk_event_t *event_wait_list, 1477 /// clk_event_t *event_ret, 1478 /// void (^block)(local void*, ...), 1479 /// uint size0, ...) 1480 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 1481 unsigned NumArgs = TheCall->getNumArgs(); 1482 1483 if (NumArgs < 4) { 1484 S.Diag(TheCall->getBeginLoc(), 1485 diag::err_typecheck_call_too_few_args_at_least) 1486 << 0 << 4 << NumArgs; 1487 return true; 1488 } 1489 1490 Expr *Arg0 = TheCall->getArg(0); 1491 Expr *Arg1 = TheCall->getArg(1); 1492 Expr *Arg2 = TheCall->getArg(2); 1493 Expr *Arg3 = TheCall->getArg(3); 1494 1495 // First argument always needs to be a queue_t type. 1496 if (!Arg0->getType()->isQueueT()) { 1497 S.Diag(TheCall->getArg(0)->getBeginLoc(), 1498 diag::err_opencl_builtin_expected_type) 1499 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 1500 return true; 1501 } 1502 1503 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 1504 if (!Arg1->getType()->isIntegerType()) { 1505 S.Diag(TheCall->getArg(1)->getBeginLoc(), 1506 diag::err_opencl_builtin_expected_type) 1507 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 1508 return true; 1509 } 1510 1511 // Third argument is always an ndrange_t type. 1512 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1513 S.Diag(TheCall->getArg(2)->getBeginLoc(), 1514 diag::err_opencl_builtin_expected_type) 1515 << TheCall->getDirectCallee() << "'ndrange_t'"; 1516 return true; 1517 } 1518 1519 // With four arguments, there is only one form that the function could be 1520 // called in: no events and no variable arguments. 1521 if (NumArgs == 4) { 1522 // check that the last argument is the right block type. 1523 if (!isBlockPointer(Arg3)) { 1524 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1525 << TheCall->getDirectCallee() << "block"; 1526 return true; 1527 } 1528 // we have a block type, check the prototype 1529 const BlockPointerType *BPT = 1530 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1531 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1532 S.Diag(Arg3->getBeginLoc(), 1533 diag::err_opencl_enqueue_kernel_blocks_no_args); 1534 return true; 1535 } 1536 return false; 1537 } 1538 // we can have block + varargs. 1539 if (isBlockPointer(Arg3)) 1540 return (checkOpenCLBlockArgs(S, Arg3) || 1541 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1542 // last two cases with either exactly 7 args or 7 args and varargs. 1543 if (NumArgs >= 7) { 1544 // check common block argument. 1545 Expr *Arg6 = TheCall->getArg(6); 1546 if (!isBlockPointer(Arg6)) { 1547 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1548 << TheCall->getDirectCallee() << "block"; 1549 return true; 1550 } 1551 if (checkOpenCLBlockArgs(S, Arg6)) 1552 return true; 1553 1554 // Forth argument has to be any integer type. 1555 if (!Arg3->getType()->isIntegerType()) { 1556 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1557 diag::err_opencl_builtin_expected_type) 1558 << TheCall->getDirectCallee() << "integer"; 1559 return true; 1560 } 1561 // check remaining common arguments. 1562 Expr *Arg4 = TheCall->getArg(4); 1563 Expr *Arg5 = TheCall->getArg(5); 1564 1565 // Fifth argument is always passed as a pointer to clk_event_t. 1566 if (!Arg4->isNullPointerConstant(S.Context, 1567 Expr::NPC_ValueDependentIsNotNull) && 1568 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1569 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1570 diag::err_opencl_builtin_expected_type) 1571 << TheCall->getDirectCallee() 1572 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1573 return true; 1574 } 1575 1576 // Sixth argument is always passed as a pointer to clk_event_t. 1577 if (!Arg5->isNullPointerConstant(S.Context, 1578 Expr::NPC_ValueDependentIsNotNull) && 1579 !(Arg5->getType()->isPointerType() && 1580 Arg5->getType()->getPointeeType()->isClkEventT())) { 1581 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1582 diag::err_opencl_builtin_expected_type) 1583 << TheCall->getDirectCallee() 1584 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1585 return true; 1586 } 1587 1588 if (NumArgs == 7) 1589 return false; 1590 1591 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1592 } 1593 1594 // None of the specific case has been detected, give generic error 1595 S.Diag(TheCall->getBeginLoc(), 1596 diag::err_opencl_enqueue_kernel_incorrect_args); 1597 return true; 1598 } 1599 1600 /// Returns OpenCL access qual. 1601 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1602 return D->getAttr<OpenCLAccessAttr>(); 1603 } 1604 1605 /// Returns true if pipe element type is different from the pointer. 1606 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1607 const Expr *Arg0 = Call->getArg(0); 1608 // First argument type should always be pipe. 1609 if (!Arg0->getType()->isPipeType()) { 1610 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1611 << Call->getDirectCallee() << Arg0->getSourceRange(); 1612 return true; 1613 } 1614 OpenCLAccessAttr *AccessQual = 1615 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1616 // Validates the access qualifier is compatible with the call. 1617 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1618 // read_only and write_only, and assumed to be read_only if no qualifier is 1619 // specified. 1620 switch (Call->getDirectCallee()->getBuiltinID()) { 1621 case Builtin::BIread_pipe: 1622 case Builtin::BIreserve_read_pipe: 1623 case Builtin::BIcommit_read_pipe: 1624 case Builtin::BIwork_group_reserve_read_pipe: 1625 case Builtin::BIsub_group_reserve_read_pipe: 1626 case Builtin::BIwork_group_commit_read_pipe: 1627 case Builtin::BIsub_group_commit_read_pipe: 1628 if (!(!AccessQual || AccessQual->isReadOnly())) { 1629 S.Diag(Arg0->getBeginLoc(), 1630 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1631 << "read_only" << Arg0->getSourceRange(); 1632 return true; 1633 } 1634 break; 1635 case Builtin::BIwrite_pipe: 1636 case Builtin::BIreserve_write_pipe: 1637 case Builtin::BIcommit_write_pipe: 1638 case Builtin::BIwork_group_reserve_write_pipe: 1639 case Builtin::BIsub_group_reserve_write_pipe: 1640 case Builtin::BIwork_group_commit_write_pipe: 1641 case Builtin::BIsub_group_commit_write_pipe: 1642 if (!(AccessQual && AccessQual->isWriteOnly())) { 1643 S.Diag(Arg0->getBeginLoc(), 1644 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1645 << "write_only" << Arg0->getSourceRange(); 1646 return true; 1647 } 1648 break; 1649 default: 1650 break; 1651 } 1652 return false; 1653 } 1654 1655 /// Returns true if pipe element type is different from the pointer. 1656 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1657 const Expr *Arg0 = Call->getArg(0); 1658 const Expr *ArgIdx = Call->getArg(Idx); 1659 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1660 const QualType EltTy = PipeTy->getElementType(); 1661 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1662 // The Idx argument should be a pointer and the type of the pointer and 1663 // the type of pipe element should also be the same. 1664 if (!ArgTy || 1665 !S.Context.hasSameType( 1666 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1667 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1668 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1669 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1670 return true; 1671 } 1672 return false; 1673 } 1674 1675 // Performs semantic analysis for the read/write_pipe call. 1676 // \param S Reference to the semantic analyzer. 1677 // \param Call A pointer to the builtin call. 1678 // \return True if a semantic error has been found, false otherwise. 1679 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1680 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1681 // functions have two forms. 1682 switch (Call->getNumArgs()) { 1683 case 2: 1684 if (checkOpenCLPipeArg(S, Call)) 1685 return true; 1686 // The call with 2 arguments should be 1687 // read/write_pipe(pipe T, T*). 1688 // Check packet type T. 1689 if (checkOpenCLPipePacketType(S, Call, 1)) 1690 return true; 1691 break; 1692 1693 case 4: { 1694 if (checkOpenCLPipeArg(S, Call)) 1695 return true; 1696 // The call with 4 arguments should be 1697 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1698 // Check reserve_id_t. 1699 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1700 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1701 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1702 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1703 return true; 1704 } 1705 1706 // Check the index. 1707 const Expr *Arg2 = Call->getArg(2); 1708 if (!Arg2->getType()->isIntegerType() && 1709 !Arg2->getType()->isUnsignedIntegerType()) { 1710 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1711 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1712 << Arg2->getType() << Arg2->getSourceRange(); 1713 return true; 1714 } 1715 1716 // Check packet type T. 1717 if (checkOpenCLPipePacketType(S, Call, 3)) 1718 return true; 1719 } break; 1720 default: 1721 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1722 << Call->getDirectCallee() << Call->getSourceRange(); 1723 return true; 1724 } 1725 1726 return false; 1727 } 1728 1729 // Performs a semantic analysis on the {work_group_/sub_group_ 1730 // /_}reserve_{read/write}_pipe 1731 // \param S Reference to the semantic analyzer. 1732 // \param Call The call to the builtin function to be analyzed. 1733 // \return True if a semantic error was found, false otherwise. 1734 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1735 if (checkArgCount(S, Call, 2)) 1736 return true; 1737 1738 if (checkOpenCLPipeArg(S, Call)) 1739 return true; 1740 1741 // Check the reserve size. 1742 if (!Call->getArg(1)->getType()->isIntegerType() && 1743 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1744 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1745 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1746 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1747 return true; 1748 } 1749 1750 // Since return type of reserve_read/write_pipe built-in function is 1751 // reserve_id_t, which is not defined in the builtin def file , we used int 1752 // as return type and need to override the return type of these functions. 1753 Call->setType(S.Context.OCLReserveIDTy); 1754 1755 return false; 1756 } 1757 1758 // Performs a semantic analysis on {work_group_/sub_group_ 1759 // /_}commit_{read/write}_pipe 1760 // \param S Reference to the semantic analyzer. 1761 // \param Call The call to the builtin function to be analyzed. 1762 // \return True if a semantic error was found, false otherwise. 1763 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1764 if (checkArgCount(S, Call, 2)) 1765 return true; 1766 1767 if (checkOpenCLPipeArg(S, Call)) 1768 return true; 1769 1770 // Check reserve_id_t. 1771 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1772 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1773 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1774 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1775 return true; 1776 } 1777 1778 return false; 1779 } 1780 1781 // Performs a semantic analysis on the call to built-in Pipe 1782 // Query Functions. 1783 // \param S Reference to the semantic analyzer. 1784 // \param Call The call to the builtin function to be analyzed. 1785 // \return True if a semantic error was found, false otherwise. 1786 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1787 if (checkArgCount(S, Call, 1)) 1788 return true; 1789 1790 if (!Call->getArg(0)->getType()->isPipeType()) { 1791 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1792 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1793 return true; 1794 } 1795 1796 return false; 1797 } 1798 1799 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1800 // Performs semantic analysis for the to_global/local/private call. 1801 // \param S Reference to the semantic analyzer. 1802 // \param BuiltinID ID of the builtin function. 1803 // \param Call A pointer to the builtin call. 1804 // \return True if a semantic error has been found, false otherwise. 1805 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1806 CallExpr *Call) { 1807 if (checkArgCount(S, Call, 1)) 1808 return true; 1809 1810 auto RT = Call->getArg(0)->getType(); 1811 if (!RT->isPointerType() || RT->getPointeeType() 1812 .getAddressSpace() == LangAS::opencl_constant) { 1813 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1814 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1815 return true; 1816 } 1817 1818 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1819 S.Diag(Call->getArg(0)->getBeginLoc(), 1820 diag::warn_opencl_generic_address_space_arg) 1821 << Call->getDirectCallee()->getNameInfo().getAsString() 1822 << Call->getArg(0)->getSourceRange(); 1823 } 1824 1825 RT = RT->getPointeeType(); 1826 auto Qual = RT.getQualifiers(); 1827 switch (BuiltinID) { 1828 case Builtin::BIto_global: 1829 Qual.setAddressSpace(LangAS::opencl_global); 1830 break; 1831 case Builtin::BIto_local: 1832 Qual.setAddressSpace(LangAS::opencl_local); 1833 break; 1834 case Builtin::BIto_private: 1835 Qual.setAddressSpace(LangAS::opencl_private); 1836 break; 1837 default: 1838 llvm_unreachable("Invalid builtin function"); 1839 } 1840 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1841 RT.getUnqualifiedType(), Qual))); 1842 1843 return false; 1844 } 1845 1846 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1847 if (checkArgCount(S, TheCall, 1)) 1848 return ExprError(); 1849 1850 // Compute __builtin_launder's parameter type from the argument. 1851 // The parameter type is: 1852 // * The type of the argument if it's not an array or function type, 1853 // Otherwise, 1854 // * The decayed argument type. 1855 QualType ParamTy = [&]() { 1856 QualType ArgTy = TheCall->getArg(0)->getType(); 1857 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1858 return S.Context.getPointerType(Ty->getElementType()); 1859 if (ArgTy->isFunctionType()) { 1860 return S.Context.getPointerType(ArgTy); 1861 } 1862 return ArgTy; 1863 }(); 1864 1865 TheCall->setType(ParamTy); 1866 1867 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1868 if (!ParamTy->isPointerType()) 1869 return 0; 1870 if (ParamTy->isFunctionPointerType()) 1871 return 1; 1872 if (ParamTy->isVoidPointerType()) 1873 return 2; 1874 return llvm::Optional<unsigned>{}; 1875 }(); 1876 if (DiagSelect) { 1877 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1878 << DiagSelect.getValue() << TheCall->getSourceRange(); 1879 return ExprError(); 1880 } 1881 1882 // We either have an incomplete class type, or we have a class template 1883 // whose instantiation has not been forced. Example: 1884 // 1885 // template <class T> struct Foo { T value; }; 1886 // Foo<int> *p = nullptr; 1887 // auto *d = __builtin_launder(p); 1888 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1889 diag::err_incomplete_type)) 1890 return ExprError(); 1891 1892 assert(ParamTy->getPointeeType()->isObjectType() && 1893 "Unhandled non-object pointer case"); 1894 1895 InitializedEntity Entity = 1896 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1897 ExprResult Arg = 1898 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1899 if (Arg.isInvalid()) 1900 return ExprError(); 1901 TheCall->setArg(0, Arg.get()); 1902 1903 return TheCall; 1904 } 1905 1906 // Emit an error and return true if the current object format type is in the 1907 // list of unsupported types. 1908 static bool CheckBuiltinTargetNotInUnsupported( 1909 Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1910 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) { 1911 llvm::Triple::ObjectFormatType CurObjFormat = 1912 S.getASTContext().getTargetInfo().getTriple().getObjectFormat(); 1913 if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) { 1914 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1915 << TheCall->getSourceRange(); 1916 return true; 1917 } 1918 return false; 1919 } 1920 1921 // Emit an error and return true if the current architecture is not in the list 1922 // of supported architectures. 1923 static bool 1924 CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1925 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1926 llvm::Triple::ArchType CurArch = 1927 S.getASTContext().getTargetInfo().getTriple().getArch(); 1928 if (llvm::is_contained(SupportedArchs, CurArch)) 1929 return false; 1930 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1931 << TheCall->getSourceRange(); 1932 return true; 1933 } 1934 1935 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1936 SourceLocation CallSiteLoc); 1937 1938 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1939 CallExpr *TheCall) { 1940 switch (TI.getTriple().getArch()) { 1941 default: 1942 // Some builtins don't require additional checking, so just consider these 1943 // acceptable. 1944 return false; 1945 case llvm::Triple::arm: 1946 case llvm::Triple::armeb: 1947 case llvm::Triple::thumb: 1948 case llvm::Triple::thumbeb: 1949 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1950 case llvm::Triple::aarch64: 1951 case llvm::Triple::aarch64_32: 1952 case llvm::Triple::aarch64_be: 1953 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1954 case llvm::Triple::bpfeb: 1955 case llvm::Triple::bpfel: 1956 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1957 case llvm::Triple::hexagon: 1958 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1959 case llvm::Triple::mips: 1960 case llvm::Triple::mipsel: 1961 case llvm::Triple::mips64: 1962 case llvm::Triple::mips64el: 1963 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1964 case llvm::Triple::systemz: 1965 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1966 case llvm::Triple::x86: 1967 case llvm::Triple::x86_64: 1968 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1969 case llvm::Triple::ppc: 1970 case llvm::Triple::ppcle: 1971 case llvm::Triple::ppc64: 1972 case llvm::Triple::ppc64le: 1973 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1974 case llvm::Triple::amdgcn: 1975 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1976 case llvm::Triple::riscv32: 1977 case llvm::Triple::riscv64: 1978 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 1979 } 1980 } 1981 1982 ExprResult 1983 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1984 CallExpr *TheCall) { 1985 ExprResult TheCallResult(TheCall); 1986 1987 // Find out if any arguments are required to be integer constant expressions. 1988 unsigned ICEArguments = 0; 1989 ASTContext::GetBuiltinTypeError Error; 1990 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1991 if (Error != ASTContext::GE_None) 1992 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1993 1994 // If any arguments are required to be ICE's, check and diagnose. 1995 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1996 // Skip arguments not required to be ICE's. 1997 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1998 1999 llvm::APSInt Result; 2000 // If we don't have enough arguments, continue so we can issue better 2001 // diagnostic in checkArgCount(...) 2002 if (ArgNo < TheCall->getNumArgs() && 2003 SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 2004 return true; 2005 ICEArguments &= ~(1 << ArgNo); 2006 } 2007 2008 switch (BuiltinID) { 2009 case Builtin::BI__builtin___CFStringMakeConstantString: 2010 // CFStringMakeConstantString is currently not implemented for GOFF (i.e., 2011 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported 2012 if (CheckBuiltinTargetNotInUnsupported( 2013 *this, BuiltinID, TheCall, 2014 {llvm::Triple::GOFF, llvm::Triple::XCOFF})) 2015 return ExprError(); 2016 assert(TheCall->getNumArgs() == 1 && 2017 "Wrong # arguments to builtin CFStringMakeConstantString"); 2018 if (CheckObjCString(TheCall->getArg(0))) 2019 return ExprError(); 2020 break; 2021 case Builtin::BI__builtin_ms_va_start: 2022 case Builtin::BI__builtin_stdarg_start: 2023 case Builtin::BI__builtin_va_start: 2024 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2025 return ExprError(); 2026 break; 2027 case Builtin::BI__va_start: { 2028 switch (Context.getTargetInfo().getTriple().getArch()) { 2029 case llvm::Triple::aarch64: 2030 case llvm::Triple::arm: 2031 case llvm::Triple::thumb: 2032 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 2033 return ExprError(); 2034 break; 2035 default: 2036 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2037 return ExprError(); 2038 break; 2039 } 2040 break; 2041 } 2042 2043 // The acquire, release, and no fence variants are ARM and AArch64 only. 2044 case Builtin::BI_interlockedbittestandset_acq: 2045 case Builtin::BI_interlockedbittestandset_rel: 2046 case Builtin::BI_interlockedbittestandset_nf: 2047 case Builtin::BI_interlockedbittestandreset_acq: 2048 case Builtin::BI_interlockedbittestandreset_rel: 2049 case Builtin::BI_interlockedbittestandreset_nf: 2050 if (CheckBuiltinTargetInSupported( 2051 *this, BuiltinID, TheCall, 2052 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 2053 return ExprError(); 2054 break; 2055 2056 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 2057 case Builtin::BI_bittest64: 2058 case Builtin::BI_bittestandcomplement64: 2059 case Builtin::BI_bittestandreset64: 2060 case Builtin::BI_bittestandset64: 2061 case Builtin::BI_interlockedbittestandreset64: 2062 case Builtin::BI_interlockedbittestandset64: 2063 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall, 2064 {llvm::Triple::x86_64, llvm::Triple::arm, 2065 llvm::Triple::thumb, 2066 llvm::Triple::aarch64})) 2067 return ExprError(); 2068 break; 2069 2070 case Builtin::BI__builtin_isgreater: 2071 case Builtin::BI__builtin_isgreaterequal: 2072 case Builtin::BI__builtin_isless: 2073 case Builtin::BI__builtin_islessequal: 2074 case Builtin::BI__builtin_islessgreater: 2075 case Builtin::BI__builtin_isunordered: 2076 if (SemaBuiltinUnorderedCompare(TheCall)) 2077 return ExprError(); 2078 break; 2079 case Builtin::BI__builtin_fpclassify: 2080 if (SemaBuiltinFPClassification(TheCall, 6)) 2081 return ExprError(); 2082 break; 2083 case Builtin::BI__builtin_isfinite: 2084 case Builtin::BI__builtin_isinf: 2085 case Builtin::BI__builtin_isinf_sign: 2086 case Builtin::BI__builtin_isnan: 2087 case Builtin::BI__builtin_isnormal: 2088 case Builtin::BI__builtin_signbit: 2089 case Builtin::BI__builtin_signbitf: 2090 case Builtin::BI__builtin_signbitl: 2091 if (SemaBuiltinFPClassification(TheCall, 1)) 2092 return ExprError(); 2093 break; 2094 case Builtin::BI__builtin_shufflevector: 2095 return SemaBuiltinShuffleVector(TheCall); 2096 // TheCall will be freed by the smart pointer here, but that's fine, since 2097 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 2098 case Builtin::BI__builtin_prefetch: 2099 if (SemaBuiltinPrefetch(TheCall)) 2100 return ExprError(); 2101 break; 2102 case Builtin::BI__builtin_alloca_with_align: 2103 case Builtin::BI__builtin_alloca_with_align_uninitialized: 2104 if (SemaBuiltinAllocaWithAlign(TheCall)) 2105 return ExprError(); 2106 LLVM_FALLTHROUGH; 2107 case Builtin::BI__builtin_alloca: 2108 case Builtin::BI__builtin_alloca_uninitialized: 2109 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 2110 << TheCall->getDirectCallee(); 2111 break; 2112 case Builtin::BI__arithmetic_fence: 2113 if (SemaBuiltinArithmeticFence(TheCall)) 2114 return ExprError(); 2115 break; 2116 case Builtin::BI__assume: 2117 case Builtin::BI__builtin_assume: 2118 if (SemaBuiltinAssume(TheCall)) 2119 return ExprError(); 2120 break; 2121 case Builtin::BI__builtin_assume_aligned: 2122 if (SemaBuiltinAssumeAligned(TheCall)) 2123 return ExprError(); 2124 break; 2125 case Builtin::BI__builtin_dynamic_object_size: 2126 case Builtin::BI__builtin_object_size: 2127 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 2128 return ExprError(); 2129 break; 2130 case Builtin::BI__builtin_longjmp: 2131 if (SemaBuiltinLongjmp(TheCall)) 2132 return ExprError(); 2133 break; 2134 case Builtin::BI__builtin_setjmp: 2135 if (SemaBuiltinSetjmp(TheCall)) 2136 return ExprError(); 2137 break; 2138 case Builtin::BI__builtin_classify_type: 2139 if (checkArgCount(*this, TheCall, 1)) return true; 2140 TheCall->setType(Context.IntTy); 2141 break; 2142 case Builtin::BI__builtin_complex: 2143 if (SemaBuiltinComplex(TheCall)) 2144 return ExprError(); 2145 break; 2146 case Builtin::BI__builtin_constant_p: { 2147 if (checkArgCount(*this, TheCall, 1)) return true; 2148 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 2149 if (Arg.isInvalid()) return true; 2150 TheCall->setArg(0, Arg.get()); 2151 TheCall->setType(Context.IntTy); 2152 break; 2153 } 2154 case Builtin::BI__builtin_launder: 2155 return SemaBuiltinLaunder(*this, TheCall); 2156 case Builtin::BI__sync_fetch_and_add: 2157 case Builtin::BI__sync_fetch_and_add_1: 2158 case Builtin::BI__sync_fetch_and_add_2: 2159 case Builtin::BI__sync_fetch_and_add_4: 2160 case Builtin::BI__sync_fetch_and_add_8: 2161 case Builtin::BI__sync_fetch_and_add_16: 2162 case Builtin::BI__sync_fetch_and_sub: 2163 case Builtin::BI__sync_fetch_and_sub_1: 2164 case Builtin::BI__sync_fetch_and_sub_2: 2165 case Builtin::BI__sync_fetch_and_sub_4: 2166 case Builtin::BI__sync_fetch_and_sub_8: 2167 case Builtin::BI__sync_fetch_and_sub_16: 2168 case Builtin::BI__sync_fetch_and_or: 2169 case Builtin::BI__sync_fetch_and_or_1: 2170 case Builtin::BI__sync_fetch_and_or_2: 2171 case Builtin::BI__sync_fetch_and_or_4: 2172 case Builtin::BI__sync_fetch_and_or_8: 2173 case Builtin::BI__sync_fetch_and_or_16: 2174 case Builtin::BI__sync_fetch_and_and: 2175 case Builtin::BI__sync_fetch_and_and_1: 2176 case Builtin::BI__sync_fetch_and_and_2: 2177 case Builtin::BI__sync_fetch_and_and_4: 2178 case Builtin::BI__sync_fetch_and_and_8: 2179 case Builtin::BI__sync_fetch_and_and_16: 2180 case Builtin::BI__sync_fetch_and_xor: 2181 case Builtin::BI__sync_fetch_and_xor_1: 2182 case Builtin::BI__sync_fetch_and_xor_2: 2183 case Builtin::BI__sync_fetch_and_xor_4: 2184 case Builtin::BI__sync_fetch_and_xor_8: 2185 case Builtin::BI__sync_fetch_and_xor_16: 2186 case Builtin::BI__sync_fetch_and_nand: 2187 case Builtin::BI__sync_fetch_and_nand_1: 2188 case Builtin::BI__sync_fetch_and_nand_2: 2189 case Builtin::BI__sync_fetch_and_nand_4: 2190 case Builtin::BI__sync_fetch_and_nand_8: 2191 case Builtin::BI__sync_fetch_and_nand_16: 2192 case Builtin::BI__sync_add_and_fetch: 2193 case Builtin::BI__sync_add_and_fetch_1: 2194 case Builtin::BI__sync_add_and_fetch_2: 2195 case Builtin::BI__sync_add_and_fetch_4: 2196 case Builtin::BI__sync_add_and_fetch_8: 2197 case Builtin::BI__sync_add_and_fetch_16: 2198 case Builtin::BI__sync_sub_and_fetch: 2199 case Builtin::BI__sync_sub_and_fetch_1: 2200 case Builtin::BI__sync_sub_and_fetch_2: 2201 case Builtin::BI__sync_sub_and_fetch_4: 2202 case Builtin::BI__sync_sub_and_fetch_8: 2203 case Builtin::BI__sync_sub_and_fetch_16: 2204 case Builtin::BI__sync_and_and_fetch: 2205 case Builtin::BI__sync_and_and_fetch_1: 2206 case Builtin::BI__sync_and_and_fetch_2: 2207 case Builtin::BI__sync_and_and_fetch_4: 2208 case Builtin::BI__sync_and_and_fetch_8: 2209 case Builtin::BI__sync_and_and_fetch_16: 2210 case Builtin::BI__sync_or_and_fetch: 2211 case Builtin::BI__sync_or_and_fetch_1: 2212 case Builtin::BI__sync_or_and_fetch_2: 2213 case Builtin::BI__sync_or_and_fetch_4: 2214 case Builtin::BI__sync_or_and_fetch_8: 2215 case Builtin::BI__sync_or_and_fetch_16: 2216 case Builtin::BI__sync_xor_and_fetch: 2217 case Builtin::BI__sync_xor_and_fetch_1: 2218 case Builtin::BI__sync_xor_and_fetch_2: 2219 case Builtin::BI__sync_xor_and_fetch_4: 2220 case Builtin::BI__sync_xor_and_fetch_8: 2221 case Builtin::BI__sync_xor_and_fetch_16: 2222 case Builtin::BI__sync_nand_and_fetch: 2223 case Builtin::BI__sync_nand_and_fetch_1: 2224 case Builtin::BI__sync_nand_and_fetch_2: 2225 case Builtin::BI__sync_nand_and_fetch_4: 2226 case Builtin::BI__sync_nand_and_fetch_8: 2227 case Builtin::BI__sync_nand_and_fetch_16: 2228 case Builtin::BI__sync_val_compare_and_swap: 2229 case Builtin::BI__sync_val_compare_and_swap_1: 2230 case Builtin::BI__sync_val_compare_and_swap_2: 2231 case Builtin::BI__sync_val_compare_and_swap_4: 2232 case Builtin::BI__sync_val_compare_and_swap_8: 2233 case Builtin::BI__sync_val_compare_and_swap_16: 2234 case Builtin::BI__sync_bool_compare_and_swap: 2235 case Builtin::BI__sync_bool_compare_and_swap_1: 2236 case Builtin::BI__sync_bool_compare_and_swap_2: 2237 case Builtin::BI__sync_bool_compare_and_swap_4: 2238 case Builtin::BI__sync_bool_compare_and_swap_8: 2239 case Builtin::BI__sync_bool_compare_and_swap_16: 2240 case Builtin::BI__sync_lock_test_and_set: 2241 case Builtin::BI__sync_lock_test_and_set_1: 2242 case Builtin::BI__sync_lock_test_and_set_2: 2243 case Builtin::BI__sync_lock_test_and_set_4: 2244 case Builtin::BI__sync_lock_test_and_set_8: 2245 case Builtin::BI__sync_lock_test_and_set_16: 2246 case Builtin::BI__sync_lock_release: 2247 case Builtin::BI__sync_lock_release_1: 2248 case Builtin::BI__sync_lock_release_2: 2249 case Builtin::BI__sync_lock_release_4: 2250 case Builtin::BI__sync_lock_release_8: 2251 case Builtin::BI__sync_lock_release_16: 2252 case Builtin::BI__sync_swap: 2253 case Builtin::BI__sync_swap_1: 2254 case Builtin::BI__sync_swap_2: 2255 case Builtin::BI__sync_swap_4: 2256 case Builtin::BI__sync_swap_8: 2257 case Builtin::BI__sync_swap_16: 2258 return SemaBuiltinAtomicOverloaded(TheCallResult); 2259 case Builtin::BI__sync_synchronize: 2260 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 2261 << TheCall->getCallee()->getSourceRange(); 2262 break; 2263 case Builtin::BI__builtin_nontemporal_load: 2264 case Builtin::BI__builtin_nontemporal_store: 2265 return SemaBuiltinNontemporalOverloaded(TheCallResult); 2266 case Builtin::BI__builtin_memcpy_inline: { 2267 clang::Expr *SizeOp = TheCall->getArg(2); 2268 // We warn about copying to or from `nullptr` pointers when `size` is 2269 // greater than 0. When `size` is value dependent we cannot evaluate its 2270 // value so we bail out. 2271 if (SizeOp->isValueDependent()) 2272 break; 2273 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) { 2274 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2275 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 2276 } 2277 break; 2278 } 2279 case Builtin::BI__builtin_memset_inline: { 2280 clang::Expr *SizeOp = TheCall->getArg(2); 2281 // We warn about filling to `nullptr` pointers when `size` is greater than 2282 // 0. When `size` is value dependent we cannot evaluate its value so we bail 2283 // out. 2284 if (SizeOp->isValueDependent()) 2285 break; 2286 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) 2287 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2288 break; 2289 } 2290 #define BUILTIN(ID, TYPE, ATTRS) 2291 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 2292 case Builtin::BI##ID: \ 2293 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 2294 #include "clang/Basic/Builtins.def" 2295 case Builtin::BI__annotation: 2296 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 2297 return ExprError(); 2298 break; 2299 case Builtin::BI__builtin_annotation: 2300 if (SemaBuiltinAnnotation(*this, TheCall)) 2301 return ExprError(); 2302 break; 2303 case Builtin::BI__builtin_addressof: 2304 if (SemaBuiltinAddressof(*this, TheCall)) 2305 return ExprError(); 2306 break; 2307 case Builtin::BI__builtin_function_start: 2308 if (SemaBuiltinFunctionStart(*this, TheCall)) 2309 return ExprError(); 2310 break; 2311 case Builtin::BI__builtin_is_aligned: 2312 case Builtin::BI__builtin_align_up: 2313 case Builtin::BI__builtin_align_down: 2314 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 2315 return ExprError(); 2316 break; 2317 case Builtin::BI__builtin_add_overflow: 2318 case Builtin::BI__builtin_sub_overflow: 2319 case Builtin::BI__builtin_mul_overflow: 2320 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 2321 return ExprError(); 2322 break; 2323 case Builtin::BI__builtin_operator_new: 2324 case Builtin::BI__builtin_operator_delete: { 2325 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 2326 ExprResult Res = 2327 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 2328 if (Res.isInvalid()) 2329 CorrectDelayedTyposInExpr(TheCallResult.get()); 2330 return Res; 2331 } 2332 case Builtin::BI__builtin_dump_struct: 2333 return SemaBuiltinDumpStruct(*this, TheCall); 2334 case Builtin::BI__builtin_expect_with_probability: { 2335 // We first want to ensure we are called with 3 arguments 2336 if (checkArgCount(*this, TheCall, 3)) 2337 return ExprError(); 2338 // then check probability is constant float in range [0.0, 1.0] 2339 const Expr *ProbArg = TheCall->getArg(2); 2340 SmallVector<PartialDiagnosticAt, 8> Notes; 2341 Expr::EvalResult Eval; 2342 Eval.Diag = &Notes; 2343 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 2344 !Eval.Val.isFloat()) { 2345 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 2346 << ProbArg->getSourceRange(); 2347 for (const PartialDiagnosticAt &PDiag : Notes) 2348 Diag(PDiag.first, PDiag.second); 2349 return ExprError(); 2350 } 2351 llvm::APFloat Probability = Eval.Val.getFloat(); 2352 bool LoseInfo = false; 2353 Probability.convert(llvm::APFloat::IEEEdouble(), 2354 llvm::RoundingMode::Dynamic, &LoseInfo); 2355 if (!(Probability >= llvm::APFloat(0.0) && 2356 Probability <= llvm::APFloat(1.0))) { 2357 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 2358 << ProbArg->getSourceRange(); 2359 return ExprError(); 2360 } 2361 break; 2362 } 2363 case Builtin::BI__builtin_preserve_access_index: 2364 if (SemaBuiltinPreserveAI(*this, TheCall)) 2365 return ExprError(); 2366 break; 2367 case Builtin::BI__builtin_call_with_static_chain: 2368 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 2369 return ExprError(); 2370 break; 2371 case Builtin::BI__exception_code: 2372 case Builtin::BI_exception_code: 2373 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 2374 diag::err_seh___except_block)) 2375 return ExprError(); 2376 break; 2377 case Builtin::BI__exception_info: 2378 case Builtin::BI_exception_info: 2379 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 2380 diag::err_seh___except_filter)) 2381 return ExprError(); 2382 break; 2383 case Builtin::BI__GetExceptionInfo: 2384 if (checkArgCount(*this, TheCall, 1)) 2385 return ExprError(); 2386 2387 if (CheckCXXThrowOperand( 2388 TheCall->getBeginLoc(), 2389 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 2390 TheCall)) 2391 return ExprError(); 2392 2393 TheCall->setType(Context.VoidPtrTy); 2394 break; 2395 case Builtin::BIaddressof: 2396 case Builtin::BI__addressof: 2397 case Builtin::BIforward: 2398 case Builtin::BImove: 2399 case Builtin::BImove_if_noexcept: 2400 case Builtin::BIas_const: { 2401 // These are all expected to be of the form 2402 // T &/&&/* f(U &/&&) 2403 // where T and U only differ in qualification. 2404 if (checkArgCount(*this, TheCall, 1)) 2405 return ExprError(); 2406 QualType Param = FDecl->getParamDecl(0)->getType(); 2407 QualType Result = FDecl->getReturnType(); 2408 bool ReturnsPointer = BuiltinID == Builtin::BIaddressof || 2409 BuiltinID == Builtin::BI__addressof; 2410 if (!(Param->isReferenceType() && 2411 (ReturnsPointer ? Result->isPointerType() 2412 : Result->isReferenceType()) && 2413 Context.hasSameUnqualifiedType(Param->getPointeeType(), 2414 Result->getPointeeType()))) { 2415 Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported) 2416 << FDecl; 2417 return ExprError(); 2418 } 2419 break; 2420 } 2421 // OpenCL v2.0, s6.13.16 - Pipe functions 2422 case Builtin::BIread_pipe: 2423 case Builtin::BIwrite_pipe: 2424 // Since those two functions are declared with var args, we need a semantic 2425 // check for the argument. 2426 if (SemaBuiltinRWPipe(*this, TheCall)) 2427 return ExprError(); 2428 break; 2429 case Builtin::BIreserve_read_pipe: 2430 case Builtin::BIreserve_write_pipe: 2431 case Builtin::BIwork_group_reserve_read_pipe: 2432 case Builtin::BIwork_group_reserve_write_pipe: 2433 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 2434 return ExprError(); 2435 break; 2436 case Builtin::BIsub_group_reserve_read_pipe: 2437 case Builtin::BIsub_group_reserve_write_pipe: 2438 if (checkOpenCLSubgroupExt(*this, TheCall) || 2439 SemaBuiltinReserveRWPipe(*this, TheCall)) 2440 return ExprError(); 2441 break; 2442 case Builtin::BIcommit_read_pipe: 2443 case Builtin::BIcommit_write_pipe: 2444 case Builtin::BIwork_group_commit_read_pipe: 2445 case Builtin::BIwork_group_commit_write_pipe: 2446 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 2447 return ExprError(); 2448 break; 2449 case Builtin::BIsub_group_commit_read_pipe: 2450 case Builtin::BIsub_group_commit_write_pipe: 2451 if (checkOpenCLSubgroupExt(*this, TheCall) || 2452 SemaBuiltinCommitRWPipe(*this, TheCall)) 2453 return ExprError(); 2454 break; 2455 case Builtin::BIget_pipe_num_packets: 2456 case Builtin::BIget_pipe_max_packets: 2457 if (SemaBuiltinPipePackets(*this, TheCall)) 2458 return ExprError(); 2459 break; 2460 case Builtin::BIto_global: 2461 case Builtin::BIto_local: 2462 case Builtin::BIto_private: 2463 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 2464 return ExprError(); 2465 break; 2466 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 2467 case Builtin::BIenqueue_kernel: 2468 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 2469 return ExprError(); 2470 break; 2471 case Builtin::BIget_kernel_work_group_size: 2472 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 2473 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 2474 return ExprError(); 2475 break; 2476 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 2477 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 2478 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 2479 return ExprError(); 2480 break; 2481 case Builtin::BI__builtin_os_log_format: 2482 Cleanup.setExprNeedsCleanups(true); 2483 LLVM_FALLTHROUGH; 2484 case Builtin::BI__builtin_os_log_format_buffer_size: 2485 if (SemaBuiltinOSLogFormat(TheCall)) 2486 return ExprError(); 2487 break; 2488 case Builtin::BI__builtin_frame_address: 2489 case Builtin::BI__builtin_return_address: { 2490 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 2491 return ExprError(); 2492 2493 // -Wframe-address warning if non-zero passed to builtin 2494 // return/frame address. 2495 Expr::EvalResult Result; 2496 if (!TheCall->getArg(0)->isValueDependent() && 2497 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 2498 Result.Val.getInt() != 0) 2499 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 2500 << ((BuiltinID == Builtin::BI__builtin_return_address) 2501 ? "__builtin_return_address" 2502 : "__builtin_frame_address") 2503 << TheCall->getSourceRange(); 2504 break; 2505 } 2506 2507 // __builtin_elementwise_abs restricts the element type to signed integers or 2508 // floating point types only. 2509 case Builtin::BI__builtin_elementwise_abs: { 2510 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2511 return ExprError(); 2512 2513 QualType ArgTy = TheCall->getArg(0)->getType(); 2514 QualType EltTy = ArgTy; 2515 2516 if (auto *VecTy = EltTy->getAs<VectorType>()) 2517 EltTy = VecTy->getElementType(); 2518 if (EltTy->isUnsignedIntegerType()) { 2519 Diag(TheCall->getArg(0)->getBeginLoc(), 2520 diag::err_builtin_invalid_arg_type) 2521 << 1 << /* signed integer or float ty*/ 3 << ArgTy; 2522 return ExprError(); 2523 } 2524 break; 2525 } 2526 2527 // These builtins restrict the element type to floating point 2528 // types only. 2529 case Builtin::BI__builtin_elementwise_ceil: 2530 case Builtin::BI__builtin_elementwise_floor: 2531 case Builtin::BI__builtin_elementwise_roundeven: 2532 case Builtin::BI__builtin_elementwise_trunc: { 2533 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2534 return ExprError(); 2535 2536 QualType ArgTy = TheCall->getArg(0)->getType(); 2537 QualType EltTy = ArgTy; 2538 2539 if (auto *VecTy = EltTy->getAs<VectorType>()) 2540 EltTy = VecTy->getElementType(); 2541 if (!EltTy->isFloatingType()) { 2542 Diag(TheCall->getArg(0)->getBeginLoc(), 2543 diag::err_builtin_invalid_arg_type) 2544 << 1 << /* float ty*/ 5 << ArgTy; 2545 2546 return ExprError(); 2547 } 2548 break; 2549 } 2550 2551 // These builtins restrict the element type to integer 2552 // types only. 2553 case Builtin::BI__builtin_elementwise_add_sat: 2554 case Builtin::BI__builtin_elementwise_sub_sat: { 2555 if (SemaBuiltinElementwiseMath(TheCall)) 2556 return ExprError(); 2557 2558 const Expr *Arg = TheCall->getArg(0); 2559 QualType ArgTy = Arg->getType(); 2560 QualType EltTy = ArgTy; 2561 2562 if (auto *VecTy = EltTy->getAs<VectorType>()) 2563 EltTy = VecTy->getElementType(); 2564 2565 if (!EltTy->isIntegerType()) { 2566 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2567 << 1 << /* integer ty */ 6 << ArgTy; 2568 return ExprError(); 2569 } 2570 break; 2571 } 2572 2573 case Builtin::BI__builtin_elementwise_min: 2574 case Builtin::BI__builtin_elementwise_max: 2575 if (SemaBuiltinElementwiseMath(TheCall)) 2576 return ExprError(); 2577 break; 2578 case Builtin::BI__builtin_reduce_max: 2579 case Builtin::BI__builtin_reduce_min: { 2580 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2581 return ExprError(); 2582 2583 const Expr *Arg = TheCall->getArg(0); 2584 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2585 if (!TyA) { 2586 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2587 << 1 << /* vector ty*/ 4 << Arg->getType(); 2588 return ExprError(); 2589 } 2590 2591 TheCall->setType(TyA->getElementType()); 2592 break; 2593 } 2594 2595 // These builtins support vectors of integers only. 2596 // TODO: ADD/MUL should support floating-point types. 2597 case Builtin::BI__builtin_reduce_add: 2598 case Builtin::BI__builtin_reduce_mul: 2599 case Builtin::BI__builtin_reduce_xor: 2600 case Builtin::BI__builtin_reduce_or: 2601 case Builtin::BI__builtin_reduce_and: { 2602 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2603 return ExprError(); 2604 2605 const Expr *Arg = TheCall->getArg(0); 2606 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2607 if (!TyA || !TyA->getElementType()->isIntegerType()) { 2608 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2609 << 1 << /* vector of integers */ 6 << Arg->getType(); 2610 return ExprError(); 2611 } 2612 TheCall->setType(TyA->getElementType()); 2613 break; 2614 } 2615 2616 case Builtin::BI__builtin_matrix_transpose: 2617 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 2618 2619 case Builtin::BI__builtin_matrix_column_major_load: 2620 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 2621 2622 case Builtin::BI__builtin_matrix_column_major_store: 2623 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 2624 2625 case Builtin::BI__builtin_get_device_side_mangled_name: { 2626 auto Check = [](CallExpr *TheCall) { 2627 if (TheCall->getNumArgs() != 1) 2628 return false; 2629 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 2630 if (!DRE) 2631 return false; 2632 auto *D = DRE->getDecl(); 2633 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 2634 return false; 2635 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 2636 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 2637 }; 2638 if (!Check(TheCall)) { 2639 Diag(TheCall->getBeginLoc(), 2640 diag::err_hip_invalid_args_builtin_mangled_name); 2641 return ExprError(); 2642 } 2643 } 2644 } 2645 2646 // Since the target specific builtins for each arch overlap, only check those 2647 // of the arch we are compiling for. 2648 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 2649 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 2650 assert(Context.getAuxTargetInfo() && 2651 "Aux Target Builtin, but not an aux target?"); 2652 2653 if (CheckTSBuiltinFunctionCall( 2654 *Context.getAuxTargetInfo(), 2655 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2656 return ExprError(); 2657 } else { 2658 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2659 TheCall)) 2660 return ExprError(); 2661 } 2662 } 2663 2664 return TheCallResult; 2665 } 2666 2667 // Get the valid immediate range for the specified NEON type code. 2668 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2669 NeonTypeFlags Type(t); 2670 int IsQuad = ForceQuad ? true : Type.isQuad(); 2671 switch (Type.getEltType()) { 2672 case NeonTypeFlags::Int8: 2673 case NeonTypeFlags::Poly8: 2674 return shift ? 7 : (8 << IsQuad) - 1; 2675 case NeonTypeFlags::Int16: 2676 case NeonTypeFlags::Poly16: 2677 return shift ? 15 : (4 << IsQuad) - 1; 2678 case NeonTypeFlags::Int32: 2679 return shift ? 31 : (2 << IsQuad) - 1; 2680 case NeonTypeFlags::Int64: 2681 case NeonTypeFlags::Poly64: 2682 return shift ? 63 : (1 << IsQuad) - 1; 2683 case NeonTypeFlags::Poly128: 2684 return shift ? 127 : (1 << IsQuad) - 1; 2685 case NeonTypeFlags::Float16: 2686 assert(!shift && "cannot shift float types!"); 2687 return (4 << IsQuad) - 1; 2688 case NeonTypeFlags::Float32: 2689 assert(!shift && "cannot shift float types!"); 2690 return (2 << IsQuad) - 1; 2691 case NeonTypeFlags::Float64: 2692 assert(!shift && "cannot shift float types!"); 2693 return (1 << IsQuad) - 1; 2694 case NeonTypeFlags::BFloat16: 2695 assert(!shift && "cannot shift float types!"); 2696 return (4 << IsQuad) - 1; 2697 } 2698 llvm_unreachable("Invalid NeonTypeFlag!"); 2699 } 2700 2701 /// getNeonEltType - Return the QualType corresponding to the elements of 2702 /// the vector type specified by the NeonTypeFlags. This is used to check 2703 /// the pointer arguments for Neon load/store intrinsics. 2704 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2705 bool IsPolyUnsigned, bool IsInt64Long) { 2706 switch (Flags.getEltType()) { 2707 case NeonTypeFlags::Int8: 2708 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2709 case NeonTypeFlags::Int16: 2710 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2711 case NeonTypeFlags::Int32: 2712 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2713 case NeonTypeFlags::Int64: 2714 if (IsInt64Long) 2715 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2716 else 2717 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2718 : Context.LongLongTy; 2719 case NeonTypeFlags::Poly8: 2720 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2721 case NeonTypeFlags::Poly16: 2722 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2723 case NeonTypeFlags::Poly64: 2724 if (IsInt64Long) 2725 return Context.UnsignedLongTy; 2726 else 2727 return Context.UnsignedLongLongTy; 2728 case NeonTypeFlags::Poly128: 2729 break; 2730 case NeonTypeFlags::Float16: 2731 return Context.HalfTy; 2732 case NeonTypeFlags::Float32: 2733 return Context.FloatTy; 2734 case NeonTypeFlags::Float64: 2735 return Context.DoubleTy; 2736 case NeonTypeFlags::BFloat16: 2737 return Context.BFloat16Ty; 2738 } 2739 llvm_unreachable("Invalid NeonTypeFlag!"); 2740 } 2741 2742 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2743 // Range check SVE intrinsics that take immediate values. 2744 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2745 2746 switch (BuiltinID) { 2747 default: 2748 return false; 2749 #define GET_SVE_IMMEDIATE_CHECK 2750 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2751 #undef GET_SVE_IMMEDIATE_CHECK 2752 } 2753 2754 // Perform all the immediate checks for this builtin call. 2755 bool HasError = false; 2756 for (auto &I : ImmChecks) { 2757 int ArgNum, CheckTy, ElementSizeInBits; 2758 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2759 2760 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2761 2762 // Function that checks whether the operand (ArgNum) is an immediate 2763 // that is one of the predefined values. 2764 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2765 int ErrDiag) -> bool { 2766 // We can't check the value of a dependent argument. 2767 Expr *Arg = TheCall->getArg(ArgNum); 2768 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2769 return false; 2770 2771 // Check constant-ness first. 2772 llvm::APSInt Imm; 2773 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2774 return true; 2775 2776 if (!CheckImm(Imm.getSExtValue())) 2777 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2778 return false; 2779 }; 2780 2781 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2782 case SVETypeFlags::ImmCheck0_31: 2783 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2784 HasError = true; 2785 break; 2786 case SVETypeFlags::ImmCheck0_13: 2787 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2788 HasError = true; 2789 break; 2790 case SVETypeFlags::ImmCheck1_16: 2791 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2792 HasError = true; 2793 break; 2794 case SVETypeFlags::ImmCheck0_7: 2795 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2796 HasError = true; 2797 break; 2798 case SVETypeFlags::ImmCheckExtract: 2799 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2800 (2048 / ElementSizeInBits) - 1)) 2801 HasError = true; 2802 break; 2803 case SVETypeFlags::ImmCheckShiftRight: 2804 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2805 HasError = true; 2806 break; 2807 case SVETypeFlags::ImmCheckShiftRightNarrow: 2808 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2809 ElementSizeInBits / 2)) 2810 HasError = true; 2811 break; 2812 case SVETypeFlags::ImmCheckShiftLeft: 2813 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2814 ElementSizeInBits - 1)) 2815 HasError = true; 2816 break; 2817 case SVETypeFlags::ImmCheckLaneIndex: 2818 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2819 (128 / (1 * ElementSizeInBits)) - 1)) 2820 HasError = true; 2821 break; 2822 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2823 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2824 (128 / (2 * ElementSizeInBits)) - 1)) 2825 HasError = true; 2826 break; 2827 case SVETypeFlags::ImmCheckLaneIndexDot: 2828 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2829 (128 / (4 * ElementSizeInBits)) - 1)) 2830 HasError = true; 2831 break; 2832 case SVETypeFlags::ImmCheckComplexRot90_270: 2833 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2834 diag::err_rotation_argument_to_cadd)) 2835 HasError = true; 2836 break; 2837 case SVETypeFlags::ImmCheckComplexRotAll90: 2838 if (CheckImmediateInSet( 2839 [](int64_t V) { 2840 return V == 0 || V == 90 || V == 180 || V == 270; 2841 }, 2842 diag::err_rotation_argument_to_cmla)) 2843 HasError = true; 2844 break; 2845 case SVETypeFlags::ImmCheck0_1: 2846 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2847 HasError = true; 2848 break; 2849 case SVETypeFlags::ImmCheck0_2: 2850 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2851 HasError = true; 2852 break; 2853 case SVETypeFlags::ImmCheck0_3: 2854 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2855 HasError = true; 2856 break; 2857 } 2858 } 2859 2860 return HasError; 2861 } 2862 2863 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2864 unsigned BuiltinID, CallExpr *TheCall) { 2865 llvm::APSInt Result; 2866 uint64_t mask = 0; 2867 unsigned TV = 0; 2868 int PtrArgNum = -1; 2869 bool HasConstPtr = false; 2870 switch (BuiltinID) { 2871 #define GET_NEON_OVERLOAD_CHECK 2872 #include "clang/Basic/arm_neon.inc" 2873 #include "clang/Basic/arm_fp16.inc" 2874 #undef GET_NEON_OVERLOAD_CHECK 2875 } 2876 2877 // For NEON intrinsics which are overloaded on vector element type, validate 2878 // the immediate which specifies which variant to emit. 2879 unsigned ImmArg = TheCall->getNumArgs()-1; 2880 if (mask) { 2881 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2882 return true; 2883 2884 TV = Result.getLimitedValue(64); 2885 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2886 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2887 << TheCall->getArg(ImmArg)->getSourceRange(); 2888 } 2889 2890 if (PtrArgNum >= 0) { 2891 // Check that pointer arguments have the specified type. 2892 Expr *Arg = TheCall->getArg(PtrArgNum); 2893 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2894 Arg = ICE->getSubExpr(); 2895 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2896 QualType RHSTy = RHS.get()->getType(); 2897 2898 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2899 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2900 Arch == llvm::Triple::aarch64_32 || 2901 Arch == llvm::Triple::aarch64_be; 2902 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2903 QualType EltTy = 2904 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2905 if (HasConstPtr) 2906 EltTy = EltTy.withConst(); 2907 QualType LHSTy = Context.getPointerType(EltTy); 2908 AssignConvertType ConvTy; 2909 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2910 if (RHS.isInvalid()) 2911 return true; 2912 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2913 RHS.get(), AA_Assigning)) 2914 return true; 2915 } 2916 2917 // For NEON intrinsics which take an immediate value as part of the 2918 // instruction, range check them here. 2919 unsigned i = 0, l = 0, u = 0; 2920 switch (BuiltinID) { 2921 default: 2922 return false; 2923 #define GET_NEON_IMMEDIATE_CHECK 2924 #include "clang/Basic/arm_neon.inc" 2925 #include "clang/Basic/arm_fp16.inc" 2926 #undef GET_NEON_IMMEDIATE_CHECK 2927 } 2928 2929 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2930 } 2931 2932 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2933 switch (BuiltinID) { 2934 default: 2935 return false; 2936 #include "clang/Basic/arm_mve_builtin_sema.inc" 2937 } 2938 } 2939 2940 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2941 CallExpr *TheCall) { 2942 bool Err = false; 2943 switch (BuiltinID) { 2944 default: 2945 return false; 2946 #include "clang/Basic/arm_cde_builtin_sema.inc" 2947 } 2948 2949 if (Err) 2950 return true; 2951 2952 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2953 } 2954 2955 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2956 const Expr *CoprocArg, bool WantCDE) { 2957 if (isConstantEvaluated()) 2958 return false; 2959 2960 // We can't check the value of a dependent argument. 2961 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2962 return false; 2963 2964 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 2965 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2966 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2967 2968 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2969 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2970 2971 if (IsCDECoproc != WantCDE) 2972 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2973 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2974 2975 return false; 2976 } 2977 2978 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2979 unsigned MaxWidth) { 2980 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2981 BuiltinID == ARM::BI__builtin_arm_ldaex || 2982 BuiltinID == ARM::BI__builtin_arm_strex || 2983 BuiltinID == ARM::BI__builtin_arm_stlex || 2984 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2985 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2986 BuiltinID == AArch64::BI__builtin_arm_strex || 2987 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2988 "unexpected ARM builtin"); 2989 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2990 BuiltinID == ARM::BI__builtin_arm_ldaex || 2991 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2992 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2993 2994 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 2995 2996 // Ensure that we have the proper number of arguments. 2997 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 2998 return true; 2999 3000 // Inspect the pointer argument of the atomic builtin. This should always be 3001 // a pointer type, whose element is an integral scalar or pointer type. 3002 // Because it is a pointer type, we don't have to worry about any implicit 3003 // casts here. 3004 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 3005 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 3006 if (PointerArgRes.isInvalid()) 3007 return true; 3008 PointerArg = PointerArgRes.get(); 3009 3010 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 3011 if (!pointerType) { 3012 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 3013 << PointerArg->getType() << PointerArg->getSourceRange(); 3014 return true; 3015 } 3016 3017 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 3018 // task is to insert the appropriate casts into the AST. First work out just 3019 // what the appropriate type is. 3020 QualType ValType = pointerType->getPointeeType(); 3021 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 3022 if (IsLdrex) 3023 AddrType.addConst(); 3024 3025 // Issue a warning if the cast is dodgy. 3026 CastKind CastNeeded = CK_NoOp; 3027 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 3028 CastNeeded = CK_BitCast; 3029 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 3030 << PointerArg->getType() << Context.getPointerType(AddrType) 3031 << AA_Passing << PointerArg->getSourceRange(); 3032 } 3033 3034 // Finally, do the cast and replace the argument with the corrected version. 3035 AddrType = Context.getPointerType(AddrType); 3036 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 3037 if (PointerArgRes.isInvalid()) 3038 return true; 3039 PointerArg = PointerArgRes.get(); 3040 3041 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 3042 3043 // In general, we allow ints, floats and pointers to be loaded and stored. 3044 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 3045 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 3046 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 3047 << PointerArg->getType() << PointerArg->getSourceRange(); 3048 return true; 3049 } 3050 3051 // But ARM doesn't have instructions to deal with 128-bit versions. 3052 if (Context.getTypeSize(ValType) > MaxWidth) { 3053 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 3054 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 3055 << PointerArg->getType() << PointerArg->getSourceRange(); 3056 return true; 3057 } 3058 3059 switch (ValType.getObjCLifetime()) { 3060 case Qualifiers::OCL_None: 3061 case Qualifiers::OCL_ExplicitNone: 3062 // okay 3063 break; 3064 3065 case Qualifiers::OCL_Weak: 3066 case Qualifiers::OCL_Strong: 3067 case Qualifiers::OCL_Autoreleasing: 3068 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 3069 << ValType << PointerArg->getSourceRange(); 3070 return true; 3071 } 3072 3073 if (IsLdrex) { 3074 TheCall->setType(ValType); 3075 return false; 3076 } 3077 3078 // Initialize the argument to be stored. 3079 ExprResult ValArg = TheCall->getArg(0); 3080 InitializedEntity Entity = InitializedEntity::InitializeParameter( 3081 Context, ValType, /*consume*/ false); 3082 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 3083 if (ValArg.isInvalid()) 3084 return true; 3085 TheCall->setArg(0, ValArg.get()); 3086 3087 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 3088 // but the custom checker bypasses all default analysis. 3089 TheCall->setType(Context.IntTy); 3090 return false; 3091 } 3092 3093 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3094 CallExpr *TheCall) { 3095 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 3096 BuiltinID == ARM::BI__builtin_arm_ldaex || 3097 BuiltinID == ARM::BI__builtin_arm_strex || 3098 BuiltinID == ARM::BI__builtin_arm_stlex) { 3099 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 3100 } 3101 3102 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 3103 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3104 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 3105 } 3106 3107 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 3108 BuiltinID == ARM::BI__builtin_arm_wsr64) 3109 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 3110 3111 if (BuiltinID == ARM::BI__builtin_arm_rsr || 3112 BuiltinID == ARM::BI__builtin_arm_rsrp || 3113 BuiltinID == ARM::BI__builtin_arm_wsr || 3114 BuiltinID == ARM::BI__builtin_arm_wsrp) 3115 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3116 3117 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3118 return true; 3119 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 3120 return true; 3121 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3122 return true; 3123 3124 // For intrinsics which take an immediate value as part of the instruction, 3125 // range check them here. 3126 // FIXME: VFP Intrinsics should error if VFP not present. 3127 switch (BuiltinID) { 3128 default: return false; 3129 case ARM::BI__builtin_arm_ssat: 3130 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 3131 case ARM::BI__builtin_arm_usat: 3132 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 3133 case ARM::BI__builtin_arm_ssat16: 3134 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 3135 case ARM::BI__builtin_arm_usat16: 3136 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 3137 case ARM::BI__builtin_arm_vcvtr_f: 3138 case ARM::BI__builtin_arm_vcvtr_d: 3139 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3140 case ARM::BI__builtin_arm_dmb: 3141 case ARM::BI__builtin_arm_dsb: 3142 case ARM::BI__builtin_arm_isb: 3143 case ARM::BI__builtin_arm_dbg: 3144 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 3145 case ARM::BI__builtin_arm_cdp: 3146 case ARM::BI__builtin_arm_cdp2: 3147 case ARM::BI__builtin_arm_mcr: 3148 case ARM::BI__builtin_arm_mcr2: 3149 case ARM::BI__builtin_arm_mrc: 3150 case ARM::BI__builtin_arm_mrc2: 3151 case ARM::BI__builtin_arm_mcrr: 3152 case ARM::BI__builtin_arm_mcrr2: 3153 case ARM::BI__builtin_arm_mrrc: 3154 case ARM::BI__builtin_arm_mrrc2: 3155 case ARM::BI__builtin_arm_ldc: 3156 case ARM::BI__builtin_arm_ldcl: 3157 case ARM::BI__builtin_arm_ldc2: 3158 case ARM::BI__builtin_arm_ldc2l: 3159 case ARM::BI__builtin_arm_stc: 3160 case ARM::BI__builtin_arm_stcl: 3161 case ARM::BI__builtin_arm_stc2: 3162 case ARM::BI__builtin_arm_stc2l: 3163 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 3164 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 3165 /*WantCDE*/ false); 3166 } 3167 } 3168 3169 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 3170 unsigned BuiltinID, 3171 CallExpr *TheCall) { 3172 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 3173 BuiltinID == AArch64::BI__builtin_arm_ldaex || 3174 BuiltinID == AArch64::BI__builtin_arm_strex || 3175 BuiltinID == AArch64::BI__builtin_arm_stlex) { 3176 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 3177 } 3178 3179 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 3180 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3181 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 3182 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 3183 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 3184 } 3185 3186 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 3187 BuiltinID == AArch64::BI__builtin_arm_wsr64) 3188 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3189 3190 // Memory Tagging Extensions (MTE) Intrinsics 3191 if (BuiltinID == AArch64::BI__builtin_arm_irg || 3192 BuiltinID == AArch64::BI__builtin_arm_addg || 3193 BuiltinID == AArch64::BI__builtin_arm_gmi || 3194 BuiltinID == AArch64::BI__builtin_arm_ldg || 3195 BuiltinID == AArch64::BI__builtin_arm_stg || 3196 BuiltinID == AArch64::BI__builtin_arm_subp) { 3197 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 3198 } 3199 3200 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 3201 BuiltinID == AArch64::BI__builtin_arm_rsrp || 3202 BuiltinID == AArch64::BI__builtin_arm_wsr || 3203 BuiltinID == AArch64::BI__builtin_arm_wsrp) 3204 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3205 3206 // Only check the valid encoding range. Any constant in this range would be 3207 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 3208 // an exception for incorrect registers. This matches MSVC behavior. 3209 if (BuiltinID == AArch64::BI_ReadStatusReg || 3210 BuiltinID == AArch64::BI_WriteStatusReg) 3211 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 3212 3213 if (BuiltinID == AArch64::BI__getReg) 3214 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 3215 3216 if (BuiltinID == AArch64::BI__break) 3217 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xffff); 3218 3219 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3220 return true; 3221 3222 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 3223 return true; 3224 3225 // For intrinsics which take an immediate value as part of the instruction, 3226 // range check them here. 3227 unsigned i = 0, l = 0, u = 0; 3228 switch (BuiltinID) { 3229 default: return false; 3230 case AArch64::BI__builtin_arm_dmb: 3231 case AArch64::BI__builtin_arm_dsb: 3232 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 3233 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 3234 } 3235 3236 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 3237 } 3238 3239 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 3240 if (Arg->getType()->getAsPlaceholderType()) 3241 return false; 3242 3243 // The first argument needs to be a record field access. 3244 // If it is an array element access, we delay decision 3245 // to BPF backend to check whether the access is a 3246 // field access or not. 3247 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 3248 isa<MemberExpr>(Arg->IgnoreParens()) || 3249 isa<ArraySubscriptExpr>(Arg->IgnoreParens())); 3250 } 3251 3252 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 3253 QualType ArgType = Arg->getType(); 3254 if (ArgType->getAsPlaceholderType()) 3255 return false; 3256 3257 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type 3258 // format: 3259 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 3260 // 2. <type> var; 3261 // __builtin_preserve_type_info(var, flag); 3262 if (!isa<DeclRefExpr>(Arg->IgnoreParens()) && 3263 !isa<UnaryOperator>(Arg->IgnoreParens())) 3264 return false; 3265 3266 // Typedef type. 3267 if (ArgType->getAs<TypedefType>()) 3268 return true; 3269 3270 // Record type or Enum type. 3271 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3272 if (const auto *RT = Ty->getAs<RecordType>()) { 3273 if (!RT->getDecl()->getDeclName().isEmpty()) 3274 return true; 3275 } else if (const auto *ET = Ty->getAs<EnumType>()) { 3276 if (!ET->getDecl()->getDeclName().isEmpty()) 3277 return true; 3278 } 3279 3280 return false; 3281 } 3282 3283 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 3284 QualType ArgType = Arg->getType(); 3285 if (ArgType->getAsPlaceholderType()) 3286 return false; 3287 3288 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 3289 // format: 3290 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 3291 // flag); 3292 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 3293 if (!UO) 3294 return false; 3295 3296 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 3297 if (!CE) 3298 return false; 3299 if (CE->getCastKind() != CK_IntegralToPointer && 3300 CE->getCastKind() != CK_NullToPointer) 3301 return false; 3302 3303 // The integer must be from an EnumConstantDecl. 3304 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 3305 if (!DR) 3306 return false; 3307 3308 const EnumConstantDecl *Enumerator = 3309 dyn_cast<EnumConstantDecl>(DR->getDecl()); 3310 if (!Enumerator) 3311 return false; 3312 3313 // The type must be EnumType. 3314 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3315 const auto *ET = Ty->getAs<EnumType>(); 3316 if (!ET) 3317 return false; 3318 3319 // The enum value must be supported. 3320 return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator); 3321 } 3322 3323 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 3324 CallExpr *TheCall) { 3325 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 3326 BuiltinID == BPF::BI__builtin_btf_type_id || 3327 BuiltinID == BPF::BI__builtin_preserve_type_info || 3328 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 3329 "unexpected BPF builtin"); 3330 3331 if (checkArgCount(*this, TheCall, 2)) 3332 return true; 3333 3334 // The second argument needs to be a constant int 3335 Expr *Arg = TheCall->getArg(1); 3336 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 3337 diag::kind kind; 3338 if (!Value) { 3339 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 3340 kind = diag::err_preserve_field_info_not_const; 3341 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 3342 kind = diag::err_btf_type_id_not_const; 3343 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 3344 kind = diag::err_preserve_type_info_not_const; 3345 else 3346 kind = diag::err_preserve_enum_value_not_const; 3347 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 3348 return true; 3349 } 3350 3351 // The first argument 3352 Arg = TheCall->getArg(0); 3353 bool InvalidArg = false; 3354 bool ReturnUnsignedInt = true; 3355 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 3356 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 3357 InvalidArg = true; 3358 kind = diag::err_preserve_field_info_not_field; 3359 } 3360 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 3361 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 3362 InvalidArg = true; 3363 kind = diag::err_preserve_type_info_invalid; 3364 } 3365 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 3366 if (!isValidBPFPreserveEnumValueArg(Arg)) { 3367 InvalidArg = true; 3368 kind = diag::err_preserve_enum_value_invalid; 3369 } 3370 ReturnUnsignedInt = false; 3371 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 3372 ReturnUnsignedInt = false; 3373 } 3374 3375 if (InvalidArg) { 3376 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 3377 return true; 3378 } 3379 3380 if (ReturnUnsignedInt) 3381 TheCall->setType(Context.UnsignedIntTy); 3382 else 3383 TheCall->setType(Context.UnsignedLongTy); 3384 return false; 3385 } 3386 3387 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3388 struct ArgInfo { 3389 uint8_t OpNum; 3390 bool IsSigned; 3391 uint8_t BitWidth; 3392 uint8_t Align; 3393 }; 3394 struct BuiltinInfo { 3395 unsigned BuiltinID; 3396 ArgInfo Infos[2]; 3397 }; 3398 3399 static BuiltinInfo Infos[] = { 3400 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 3401 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 3402 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 3403 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 3404 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 3405 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 3406 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 3407 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 3408 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 3409 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 3410 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 3411 3412 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 3413 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 3414 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 3415 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 3416 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 3417 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 3418 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 3419 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 3420 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 3421 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 3422 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 3423 3424 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 3425 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 3426 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 3427 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 3428 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 3429 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 3430 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 3431 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 3432 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 3433 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 3434 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 3435 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 3436 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 3437 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 3438 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 3439 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 3440 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 3441 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 3442 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 3443 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 3444 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 3445 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 3446 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 3447 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 3448 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 3449 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 3450 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 3451 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 3452 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 3453 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 3454 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 3455 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 3456 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 3457 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 3458 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 3459 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 3460 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 3461 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 3462 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 3463 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 3464 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 3465 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 3466 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 3467 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 3468 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 3469 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 3470 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 3471 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 3472 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 3473 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 3474 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 3475 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 3476 {{ 1, false, 6, 0 }} }, 3477 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 3478 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 3479 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 3480 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 3481 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 3482 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 3483 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 3484 {{ 1, false, 5, 0 }} }, 3485 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 3486 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 3487 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 3488 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 3489 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 3490 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 3491 { 2, false, 5, 0 }} }, 3492 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 3493 { 2, false, 6, 0 }} }, 3494 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 3495 { 3, false, 5, 0 }} }, 3496 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 3497 { 3, false, 6, 0 }} }, 3498 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 3499 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 3500 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 3501 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 3502 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 3503 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 3504 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 3505 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 3506 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 3507 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 3508 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 3509 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 3510 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 3511 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 3512 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 3513 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 3514 {{ 2, false, 4, 0 }, 3515 { 3, false, 5, 0 }} }, 3516 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 3517 {{ 2, false, 4, 0 }, 3518 { 3, false, 5, 0 }} }, 3519 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 3520 {{ 2, false, 4, 0 }, 3521 { 3, false, 5, 0 }} }, 3522 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 3523 {{ 2, false, 4, 0 }, 3524 { 3, false, 5, 0 }} }, 3525 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 3526 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 3527 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 3528 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 3529 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 3530 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 3531 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 3532 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 3533 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 3534 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 3535 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 3536 { 2, false, 5, 0 }} }, 3537 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 3538 { 2, false, 6, 0 }} }, 3539 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 3540 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 3541 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 3542 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 3543 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 3544 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 3545 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 3546 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 3547 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 3548 {{ 1, false, 4, 0 }} }, 3549 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 3550 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 3551 {{ 1, false, 4, 0 }} }, 3552 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 3553 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 3554 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 3555 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 3556 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 3557 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 3558 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 3559 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 3560 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 3561 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 3562 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 3563 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 3564 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 3565 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 3566 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 3567 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 3568 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 3569 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 3570 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 3571 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 3572 {{ 3, false, 1, 0 }} }, 3573 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 3574 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 3575 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 3576 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 3577 {{ 3, false, 1, 0 }} }, 3578 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 3579 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 3580 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 3581 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 3582 {{ 3, false, 1, 0 }} }, 3583 }; 3584 3585 // Use a dynamically initialized static to sort the table exactly once on 3586 // first run. 3587 static const bool SortOnce = 3588 (llvm::sort(Infos, 3589 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 3590 return LHS.BuiltinID < RHS.BuiltinID; 3591 }), 3592 true); 3593 (void)SortOnce; 3594 3595 const BuiltinInfo *F = llvm::partition_point( 3596 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 3597 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 3598 return false; 3599 3600 bool Error = false; 3601 3602 for (const ArgInfo &A : F->Infos) { 3603 // Ignore empty ArgInfo elements. 3604 if (A.BitWidth == 0) 3605 continue; 3606 3607 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 3608 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 3609 if (!A.Align) { 3610 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3611 } else { 3612 unsigned M = 1 << A.Align; 3613 Min *= M; 3614 Max *= M; 3615 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3616 Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 3617 } 3618 } 3619 return Error; 3620 } 3621 3622 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 3623 CallExpr *TheCall) { 3624 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 3625 } 3626 3627 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 3628 unsigned BuiltinID, CallExpr *TheCall) { 3629 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 3630 CheckMipsBuiltinArgument(BuiltinID, TheCall); 3631 } 3632 3633 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 3634 CallExpr *TheCall) { 3635 3636 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 3637 BuiltinID <= Mips::BI__builtin_mips_lwx) { 3638 if (!TI.hasFeature("dsp")) 3639 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 3640 } 3641 3642 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 3643 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 3644 if (!TI.hasFeature("dspr2")) 3645 return Diag(TheCall->getBeginLoc(), 3646 diag::err_mips_builtin_requires_dspr2); 3647 } 3648 3649 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 3650 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 3651 if (!TI.hasFeature("msa")) 3652 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 3653 } 3654 3655 return false; 3656 } 3657 3658 // CheckMipsBuiltinArgument - Checks the constant value passed to the 3659 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3660 // ordering for DSP is unspecified. MSA is ordered by the data format used 3661 // by the underlying instruction i.e., df/m, df/n and then by size. 3662 // 3663 // FIXME: The size tests here should instead be tablegen'd along with the 3664 // definitions from include/clang/Basic/BuiltinsMips.def. 3665 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3666 // be too. 3667 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3668 unsigned i = 0, l = 0, u = 0, m = 0; 3669 switch (BuiltinID) { 3670 default: return false; 3671 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3672 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3673 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3674 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3675 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3676 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3677 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3678 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3679 // df/m field. 3680 // These intrinsics take an unsigned 3 bit immediate. 3681 case Mips::BI__builtin_msa_bclri_b: 3682 case Mips::BI__builtin_msa_bnegi_b: 3683 case Mips::BI__builtin_msa_bseti_b: 3684 case Mips::BI__builtin_msa_sat_s_b: 3685 case Mips::BI__builtin_msa_sat_u_b: 3686 case Mips::BI__builtin_msa_slli_b: 3687 case Mips::BI__builtin_msa_srai_b: 3688 case Mips::BI__builtin_msa_srari_b: 3689 case Mips::BI__builtin_msa_srli_b: 3690 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3691 case Mips::BI__builtin_msa_binsli_b: 3692 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3693 // These intrinsics take an unsigned 4 bit immediate. 3694 case Mips::BI__builtin_msa_bclri_h: 3695 case Mips::BI__builtin_msa_bnegi_h: 3696 case Mips::BI__builtin_msa_bseti_h: 3697 case Mips::BI__builtin_msa_sat_s_h: 3698 case Mips::BI__builtin_msa_sat_u_h: 3699 case Mips::BI__builtin_msa_slli_h: 3700 case Mips::BI__builtin_msa_srai_h: 3701 case Mips::BI__builtin_msa_srari_h: 3702 case Mips::BI__builtin_msa_srli_h: 3703 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3704 case Mips::BI__builtin_msa_binsli_h: 3705 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3706 // These intrinsics take an unsigned 5 bit immediate. 3707 // The first block of intrinsics actually have an unsigned 5 bit field, 3708 // not a df/n field. 3709 case Mips::BI__builtin_msa_cfcmsa: 3710 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3711 case Mips::BI__builtin_msa_clei_u_b: 3712 case Mips::BI__builtin_msa_clei_u_h: 3713 case Mips::BI__builtin_msa_clei_u_w: 3714 case Mips::BI__builtin_msa_clei_u_d: 3715 case Mips::BI__builtin_msa_clti_u_b: 3716 case Mips::BI__builtin_msa_clti_u_h: 3717 case Mips::BI__builtin_msa_clti_u_w: 3718 case Mips::BI__builtin_msa_clti_u_d: 3719 case Mips::BI__builtin_msa_maxi_u_b: 3720 case Mips::BI__builtin_msa_maxi_u_h: 3721 case Mips::BI__builtin_msa_maxi_u_w: 3722 case Mips::BI__builtin_msa_maxi_u_d: 3723 case Mips::BI__builtin_msa_mini_u_b: 3724 case Mips::BI__builtin_msa_mini_u_h: 3725 case Mips::BI__builtin_msa_mini_u_w: 3726 case Mips::BI__builtin_msa_mini_u_d: 3727 case Mips::BI__builtin_msa_addvi_b: 3728 case Mips::BI__builtin_msa_addvi_h: 3729 case Mips::BI__builtin_msa_addvi_w: 3730 case Mips::BI__builtin_msa_addvi_d: 3731 case Mips::BI__builtin_msa_bclri_w: 3732 case Mips::BI__builtin_msa_bnegi_w: 3733 case Mips::BI__builtin_msa_bseti_w: 3734 case Mips::BI__builtin_msa_sat_s_w: 3735 case Mips::BI__builtin_msa_sat_u_w: 3736 case Mips::BI__builtin_msa_slli_w: 3737 case Mips::BI__builtin_msa_srai_w: 3738 case Mips::BI__builtin_msa_srari_w: 3739 case Mips::BI__builtin_msa_srli_w: 3740 case Mips::BI__builtin_msa_srlri_w: 3741 case Mips::BI__builtin_msa_subvi_b: 3742 case Mips::BI__builtin_msa_subvi_h: 3743 case Mips::BI__builtin_msa_subvi_w: 3744 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3745 case Mips::BI__builtin_msa_binsli_w: 3746 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3747 // These intrinsics take an unsigned 6 bit immediate. 3748 case Mips::BI__builtin_msa_bclri_d: 3749 case Mips::BI__builtin_msa_bnegi_d: 3750 case Mips::BI__builtin_msa_bseti_d: 3751 case Mips::BI__builtin_msa_sat_s_d: 3752 case Mips::BI__builtin_msa_sat_u_d: 3753 case Mips::BI__builtin_msa_slli_d: 3754 case Mips::BI__builtin_msa_srai_d: 3755 case Mips::BI__builtin_msa_srari_d: 3756 case Mips::BI__builtin_msa_srli_d: 3757 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3758 case Mips::BI__builtin_msa_binsli_d: 3759 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3760 // These intrinsics take a signed 5 bit immediate. 3761 case Mips::BI__builtin_msa_ceqi_b: 3762 case Mips::BI__builtin_msa_ceqi_h: 3763 case Mips::BI__builtin_msa_ceqi_w: 3764 case Mips::BI__builtin_msa_ceqi_d: 3765 case Mips::BI__builtin_msa_clti_s_b: 3766 case Mips::BI__builtin_msa_clti_s_h: 3767 case Mips::BI__builtin_msa_clti_s_w: 3768 case Mips::BI__builtin_msa_clti_s_d: 3769 case Mips::BI__builtin_msa_clei_s_b: 3770 case Mips::BI__builtin_msa_clei_s_h: 3771 case Mips::BI__builtin_msa_clei_s_w: 3772 case Mips::BI__builtin_msa_clei_s_d: 3773 case Mips::BI__builtin_msa_maxi_s_b: 3774 case Mips::BI__builtin_msa_maxi_s_h: 3775 case Mips::BI__builtin_msa_maxi_s_w: 3776 case Mips::BI__builtin_msa_maxi_s_d: 3777 case Mips::BI__builtin_msa_mini_s_b: 3778 case Mips::BI__builtin_msa_mini_s_h: 3779 case Mips::BI__builtin_msa_mini_s_w: 3780 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3781 // These intrinsics take an unsigned 8 bit immediate. 3782 case Mips::BI__builtin_msa_andi_b: 3783 case Mips::BI__builtin_msa_nori_b: 3784 case Mips::BI__builtin_msa_ori_b: 3785 case Mips::BI__builtin_msa_shf_b: 3786 case Mips::BI__builtin_msa_shf_h: 3787 case Mips::BI__builtin_msa_shf_w: 3788 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3789 case Mips::BI__builtin_msa_bseli_b: 3790 case Mips::BI__builtin_msa_bmnzi_b: 3791 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3792 // df/n format 3793 // These intrinsics take an unsigned 4 bit immediate. 3794 case Mips::BI__builtin_msa_copy_s_b: 3795 case Mips::BI__builtin_msa_copy_u_b: 3796 case Mips::BI__builtin_msa_insve_b: 3797 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3798 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3799 // These intrinsics take an unsigned 3 bit immediate. 3800 case Mips::BI__builtin_msa_copy_s_h: 3801 case Mips::BI__builtin_msa_copy_u_h: 3802 case Mips::BI__builtin_msa_insve_h: 3803 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3804 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3805 // These intrinsics take an unsigned 2 bit immediate. 3806 case Mips::BI__builtin_msa_copy_s_w: 3807 case Mips::BI__builtin_msa_copy_u_w: 3808 case Mips::BI__builtin_msa_insve_w: 3809 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3810 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3811 // These intrinsics take an unsigned 1 bit immediate. 3812 case Mips::BI__builtin_msa_copy_s_d: 3813 case Mips::BI__builtin_msa_copy_u_d: 3814 case Mips::BI__builtin_msa_insve_d: 3815 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3816 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3817 // Memory offsets and immediate loads. 3818 // These intrinsics take a signed 10 bit immediate. 3819 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3820 case Mips::BI__builtin_msa_ldi_h: 3821 case Mips::BI__builtin_msa_ldi_w: 3822 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3823 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3824 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3825 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3826 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3827 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 3828 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 3829 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3830 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3831 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3832 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3833 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 3834 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 3835 } 3836 3837 if (!m) 3838 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3839 3840 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3841 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3842 } 3843 3844 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 3845 /// advancing the pointer over the consumed characters. The decoded type is 3846 /// returned. If the decoded type represents a constant integer with a 3847 /// constraint on its value then Mask is set to that value. The type descriptors 3848 /// used in Str are specific to PPC MMA builtins and are documented in the file 3849 /// defining the PPC builtins. 3850 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 3851 unsigned &Mask) { 3852 bool RequireICE = false; 3853 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 3854 switch (*Str++) { 3855 case 'V': 3856 return Context.getVectorType(Context.UnsignedCharTy, 16, 3857 VectorType::VectorKind::AltiVecVector); 3858 case 'i': { 3859 char *End; 3860 unsigned size = strtoul(Str, &End, 10); 3861 assert(End != Str && "Missing constant parameter constraint"); 3862 Str = End; 3863 Mask = size; 3864 return Context.IntTy; 3865 } 3866 case 'W': { 3867 char *End; 3868 unsigned size = strtoul(Str, &End, 10); 3869 assert(End != Str && "Missing PowerPC MMA type size"); 3870 Str = End; 3871 QualType Type; 3872 switch (size) { 3873 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 3874 case size: Type = Context.Id##Ty; break; 3875 #include "clang/Basic/PPCTypes.def" 3876 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 3877 } 3878 bool CheckVectorArgs = false; 3879 while (!CheckVectorArgs) { 3880 switch (*Str++) { 3881 case '*': 3882 Type = Context.getPointerType(Type); 3883 break; 3884 case 'C': 3885 Type = Type.withConst(); 3886 break; 3887 default: 3888 CheckVectorArgs = true; 3889 --Str; 3890 break; 3891 } 3892 } 3893 return Type; 3894 } 3895 default: 3896 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 3897 } 3898 } 3899 3900 static bool isPPC_64Builtin(unsigned BuiltinID) { 3901 // These builtins only work on PPC 64bit targets. 3902 switch (BuiltinID) { 3903 case PPC::BI__builtin_divde: 3904 case PPC::BI__builtin_divdeu: 3905 case PPC::BI__builtin_bpermd: 3906 case PPC::BI__builtin_pdepd: 3907 case PPC::BI__builtin_pextd: 3908 case PPC::BI__builtin_ppc_ldarx: 3909 case PPC::BI__builtin_ppc_stdcx: 3910 case PPC::BI__builtin_ppc_tdw: 3911 case PPC::BI__builtin_ppc_trapd: 3912 case PPC::BI__builtin_ppc_cmpeqb: 3913 case PPC::BI__builtin_ppc_setb: 3914 case PPC::BI__builtin_ppc_mulhd: 3915 case PPC::BI__builtin_ppc_mulhdu: 3916 case PPC::BI__builtin_ppc_maddhd: 3917 case PPC::BI__builtin_ppc_maddhdu: 3918 case PPC::BI__builtin_ppc_maddld: 3919 case PPC::BI__builtin_ppc_load8r: 3920 case PPC::BI__builtin_ppc_store8r: 3921 case PPC::BI__builtin_ppc_insert_exp: 3922 case PPC::BI__builtin_ppc_extract_sig: 3923 case PPC::BI__builtin_ppc_addex: 3924 case PPC::BI__builtin_darn: 3925 case PPC::BI__builtin_darn_raw: 3926 case PPC::BI__builtin_ppc_compare_and_swaplp: 3927 case PPC::BI__builtin_ppc_fetch_and_addlp: 3928 case PPC::BI__builtin_ppc_fetch_and_andlp: 3929 case PPC::BI__builtin_ppc_fetch_and_orlp: 3930 case PPC::BI__builtin_ppc_fetch_and_swaplp: 3931 return true; 3932 } 3933 return false; 3934 } 3935 3936 static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall, 3937 StringRef FeatureToCheck, unsigned DiagID, 3938 StringRef DiagArg = "") { 3939 if (S.Context.getTargetInfo().hasFeature(FeatureToCheck)) 3940 return false; 3941 3942 if (DiagArg.empty()) 3943 S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange(); 3944 else 3945 S.Diag(TheCall->getBeginLoc(), DiagID) 3946 << DiagArg << TheCall->getSourceRange(); 3947 3948 return true; 3949 } 3950 3951 /// Returns true if the argument consists of one contiguous run of 1s with any 3952 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so 3953 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not, 3954 /// since all 1s are not contiguous. 3955 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) { 3956 llvm::APSInt Result; 3957 // We can't check the value of a dependent argument. 3958 Expr *Arg = TheCall->getArg(ArgNum); 3959 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3960 return false; 3961 3962 // Check constant-ness first. 3963 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3964 return true; 3965 3966 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s. 3967 if (Result.isShiftedMask() || (~Result).isShiftedMask()) 3968 return false; 3969 3970 return Diag(TheCall->getBeginLoc(), 3971 diag::err_argument_not_contiguous_bit_field) 3972 << ArgNum << Arg->getSourceRange(); 3973 } 3974 3975 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3976 CallExpr *TheCall) { 3977 unsigned i = 0, l = 0, u = 0; 3978 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3979 llvm::APSInt Result; 3980 3981 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit) 3982 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3983 << TheCall->getSourceRange(); 3984 3985 switch (BuiltinID) { 3986 default: return false; 3987 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3988 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3989 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3990 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3991 case PPC::BI__builtin_altivec_dss: 3992 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 3993 case PPC::BI__builtin_tbegin: 3994 case PPC::BI__builtin_tend: 3995 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) || 3996 SemaFeatureCheck(*this, TheCall, "htm", 3997 diag::err_ppc_builtin_requires_htm); 3998 case PPC::BI__builtin_tsr: 3999 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 4000 SemaFeatureCheck(*this, TheCall, "htm", 4001 diag::err_ppc_builtin_requires_htm); 4002 case PPC::BI__builtin_tabortwc: 4003 case PPC::BI__builtin_tabortdc: 4004 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 4005 SemaFeatureCheck(*this, TheCall, "htm", 4006 diag::err_ppc_builtin_requires_htm); 4007 case PPC::BI__builtin_tabortwci: 4008 case PPC::BI__builtin_tabortdci: 4009 return SemaFeatureCheck(*this, TheCall, "htm", 4010 diag::err_ppc_builtin_requires_htm) || 4011 (SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 4012 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31)); 4013 case PPC::BI__builtin_tabort: 4014 case PPC::BI__builtin_tcheck: 4015 case PPC::BI__builtin_treclaim: 4016 case PPC::BI__builtin_trechkpt: 4017 case PPC::BI__builtin_tendall: 4018 case PPC::BI__builtin_tresume: 4019 case PPC::BI__builtin_tsuspend: 4020 case PPC::BI__builtin_get_texasr: 4021 case PPC::BI__builtin_get_texasru: 4022 case PPC::BI__builtin_get_tfhar: 4023 case PPC::BI__builtin_get_tfiar: 4024 case PPC::BI__builtin_set_texasr: 4025 case PPC::BI__builtin_set_texasru: 4026 case PPC::BI__builtin_set_tfhar: 4027 case PPC::BI__builtin_set_tfiar: 4028 case PPC::BI__builtin_ttest: 4029 return SemaFeatureCheck(*this, TheCall, "htm", 4030 diag::err_ppc_builtin_requires_htm); 4031 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05', 4032 // __builtin_(un)pack_longdouble are available only if long double uses IBM 4033 // extended double representation. 4034 case PPC::BI__builtin_unpack_longdouble: 4035 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1)) 4036 return true; 4037 LLVM_FALLTHROUGH; 4038 case PPC::BI__builtin_pack_longdouble: 4039 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble()) 4040 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi) 4041 << "ibmlongdouble"; 4042 return false; 4043 case PPC::BI__builtin_altivec_dst: 4044 case PPC::BI__builtin_altivec_dstt: 4045 case PPC::BI__builtin_altivec_dstst: 4046 case PPC::BI__builtin_altivec_dststt: 4047 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4048 case PPC::BI__builtin_vsx_xxpermdi: 4049 case PPC::BI__builtin_vsx_xxsldwi: 4050 return SemaBuiltinVSX(TheCall); 4051 case PPC::BI__builtin_divwe: 4052 case PPC::BI__builtin_divweu: 4053 case PPC::BI__builtin_divde: 4054 case PPC::BI__builtin_divdeu: 4055 return SemaFeatureCheck(*this, TheCall, "extdiv", 4056 diag::err_ppc_builtin_only_on_arch, "7"); 4057 case PPC::BI__builtin_bpermd: 4058 return SemaFeatureCheck(*this, TheCall, "bpermd", 4059 diag::err_ppc_builtin_only_on_arch, "7"); 4060 case PPC::BI__builtin_unpack_vector_int128: 4061 return SemaFeatureCheck(*this, TheCall, "vsx", 4062 diag::err_ppc_builtin_only_on_arch, "7") || 4063 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4064 case PPC::BI__builtin_pack_vector_int128: 4065 return SemaFeatureCheck(*this, TheCall, "vsx", 4066 diag::err_ppc_builtin_only_on_arch, "7"); 4067 case PPC::BI__builtin_pdepd: 4068 case PPC::BI__builtin_pextd: 4069 return SemaFeatureCheck(*this, TheCall, "isa-v31-instructions", 4070 diag::err_ppc_builtin_only_on_arch, "10"); 4071 case PPC::BI__builtin_altivec_vgnb: 4072 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 4073 case PPC::BI__builtin_vsx_xxeval: 4074 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 4075 case PPC::BI__builtin_altivec_vsldbi: 4076 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4077 case PPC::BI__builtin_altivec_vsrdbi: 4078 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4079 case PPC::BI__builtin_vsx_xxpermx: 4080 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 4081 case PPC::BI__builtin_ppc_tw: 4082 case PPC::BI__builtin_ppc_tdw: 4083 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); 4084 case PPC::BI__builtin_ppc_cmpeqb: 4085 case PPC::BI__builtin_ppc_setb: 4086 case PPC::BI__builtin_ppc_maddhd: 4087 case PPC::BI__builtin_ppc_maddhdu: 4088 case PPC::BI__builtin_ppc_maddld: 4089 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4090 diag::err_ppc_builtin_only_on_arch, "9"); 4091 case PPC::BI__builtin_ppc_cmprb: 4092 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4093 diag::err_ppc_builtin_only_on_arch, "9") || 4094 SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 4095 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must 4096 // be a constant that represents a contiguous bit field. 4097 case PPC::BI__builtin_ppc_rlwnm: 4098 return SemaValueIsRunOfOnes(TheCall, 2); 4099 case PPC::BI__builtin_ppc_rlwimi: 4100 case PPC::BI__builtin_ppc_rldimi: 4101 return SemaBuiltinConstantArg(TheCall, 2, Result) || 4102 SemaValueIsRunOfOnes(TheCall, 3); 4103 case PPC::BI__builtin_ppc_extract_exp: 4104 case PPC::BI__builtin_ppc_extract_sig: 4105 case PPC::BI__builtin_ppc_insert_exp: 4106 return SemaFeatureCheck(*this, TheCall, "power9-vector", 4107 diag::err_ppc_builtin_only_on_arch, "9"); 4108 case PPC::BI__builtin_ppc_addex: { 4109 if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4110 diag::err_ppc_builtin_only_on_arch, "9") || 4111 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3)) 4112 return true; 4113 // Output warning for reserved values 1 to 3. 4114 int ArgValue = 4115 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue(); 4116 if (ArgValue != 0) 4117 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour) 4118 << ArgValue; 4119 return false; 4120 } 4121 case PPC::BI__builtin_ppc_mtfsb0: 4122 case PPC::BI__builtin_ppc_mtfsb1: 4123 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 4124 case PPC::BI__builtin_ppc_mtfsf: 4125 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255); 4126 case PPC::BI__builtin_ppc_mtfsfi: 4127 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 4128 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 4129 case PPC::BI__builtin_ppc_alignx: 4130 return SemaBuiltinConstantArgPower2(TheCall, 0); 4131 case PPC::BI__builtin_ppc_rdlam: 4132 return SemaValueIsRunOfOnes(TheCall, 2); 4133 case PPC::BI__builtin_ppc_icbt: 4134 case PPC::BI__builtin_ppc_sthcx: 4135 case PPC::BI__builtin_ppc_stbcx: 4136 case PPC::BI__builtin_ppc_lharx: 4137 case PPC::BI__builtin_ppc_lbarx: 4138 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 4139 diag::err_ppc_builtin_only_on_arch, "8"); 4140 case PPC::BI__builtin_vsx_ldrmb: 4141 case PPC::BI__builtin_vsx_strmb: 4142 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 4143 diag::err_ppc_builtin_only_on_arch, "8") || 4144 SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 4145 case PPC::BI__builtin_altivec_vcntmbb: 4146 case PPC::BI__builtin_altivec_vcntmbh: 4147 case PPC::BI__builtin_altivec_vcntmbw: 4148 case PPC::BI__builtin_altivec_vcntmbd: 4149 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4150 case PPC::BI__builtin_darn: 4151 case PPC::BI__builtin_darn_raw: 4152 case PPC::BI__builtin_darn_32: 4153 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4154 diag::err_ppc_builtin_only_on_arch, "9"); 4155 case PPC::BI__builtin_vsx_xxgenpcvbm: 4156 case PPC::BI__builtin_vsx_xxgenpcvhm: 4157 case PPC::BI__builtin_vsx_xxgenpcvwm: 4158 case PPC::BI__builtin_vsx_xxgenpcvdm: 4159 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 4160 case PPC::BI__builtin_ppc_compare_exp_uo: 4161 case PPC::BI__builtin_ppc_compare_exp_lt: 4162 case PPC::BI__builtin_ppc_compare_exp_gt: 4163 case PPC::BI__builtin_ppc_compare_exp_eq: 4164 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4165 diag::err_ppc_builtin_only_on_arch, "9") || 4166 SemaFeatureCheck(*this, TheCall, "vsx", 4167 diag::err_ppc_builtin_requires_vsx); 4168 case PPC::BI__builtin_ppc_test_data_class: { 4169 // Check if the first argument of the __builtin_ppc_test_data_class call is 4170 // valid. The argument must be either a 'float' or a 'double'. 4171 QualType ArgType = TheCall->getArg(0)->getType(); 4172 if (ArgType != QualType(Context.FloatTy) && 4173 ArgType != QualType(Context.DoubleTy)) 4174 return Diag(TheCall->getBeginLoc(), 4175 diag::err_ppc_invalid_test_data_class_type); 4176 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4177 diag::err_ppc_builtin_only_on_arch, "9") || 4178 SemaFeatureCheck(*this, TheCall, "vsx", 4179 diag::err_ppc_builtin_requires_vsx) || 4180 SemaBuiltinConstantArgRange(TheCall, 1, 0, 127); 4181 } 4182 case PPC::BI__builtin_ppc_maxfe: 4183 case PPC::BI__builtin_ppc_minfe: 4184 case PPC::BI__builtin_ppc_maxfl: 4185 case PPC::BI__builtin_ppc_minfl: 4186 case PPC::BI__builtin_ppc_maxfs: 4187 case PPC::BI__builtin_ppc_minfs: { 4188 if (Context.getTargetInfo().getTriple().isOSAIX() && 4189 (BuiltinID == PPC::BI__builtin_ppc_maxfe || 4190 BuiltinID == PPC::BI__builtin_ppc_minfe)) 4191 return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type) 4192 << "builtin" << true << 128 << QualType(Context.LongDoubleTy) 4193 << false << Context.getTargetInfo().getTriple().str(); 4194 // Argument type should be exact. 4195 QualType ArgType = QualType(Context.LongDoubleTy); 4196 if (BuiltinID == PPC::BI__builtin_ppc_maxfl || 4197 BuiltinID == PPC::BI__builtin_ppc_minfl) 4198 ArgType = QualType(Context.DoubleTy); 4199 else if (BuiltinID == PPC::BI__builtin_ppc_maxfs || 4200 BuiltinID == PPC::BI__builtin_ppc_minfs) 4201 ArgType = QualType(Context.FloatTy); 4202 for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I) 4203 if (TheCall->getArg(I)->getType() != ArgType) 4204 return Diag(TheCall->getBeginLoc(), 4205 diag::err_typecheck_convert_incompatible) 4206 << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0; 4207 return false; 4208 } 4209 case PPC::BI__builtin_ppc_load8r: 4210 case PPC::BI__builtin_ppc_store8r: 4211 return SemaFeatureCheck(*this, TheCall, "isa-v206-instructions", 4212 diag::err_ppc_builtin_only_on_arch, "7"); 4213 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ 4214 case PPC::BI__builtin_##Name: \ 4215 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types); 4216 #include "clang/Basic/BuiltinsPPC.def" 4217 } 4218 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4219 } 4220 4221 // Check if the given type is a non-pointer PPC MMA type. This function is used 4222 // in Sema to prevent invalid uses of restricted PPC MMA types. 4223 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 4224 if (Type->isPointerType() || Type->isArrayType()) 4225 return false; 4226 4227 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 4228 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 4229 if (false 4230 #include "clang/Basic/PPCTypes.def" 4231 ) { 4232 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 4233 return true; 4234 } 4235 return false; 4236 } 4237 4238 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 4239 CallExpr *TheCall) { 4240 // position of memory order and scope arguments in the builtin 4241 unsigned OrderIndex, ScopeIndex; 4242 switch (BuiltinID) { 4243 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 4244 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 4245 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 4246 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 4247 OrderIndex = 2; 4248 ScopeIndex = 3; 4249 break; 4250 case AMDGPU::BI__builtin_amdgcn_fence: 4251 OrderIndex = 0; 4252 ScopeIndex = 1; 4253 break; 4254 default: 4255 return false; 4256 } 4257 4258 ExprResult Arg = TheCall->getArg(OrderIndex); 4259 auto ArgExpr = Arg.get(); 4260 Expr::EvalResult ArgResult; 4261 4262 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 4263 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 4264 << ArgExpr->getType(); 4265 auto Ord = ArgResult.Val.getInt().getZExtValue(); 4266 4267 // Check validity of memory ordering as per C11 / C++11's memody model. 4268 // Only fence needs check. Atomic dec/inc allow all memory orders. 4269 if (!llvm::isValidAtomicOrderingCABI(Ord)) 4270 return Diag(ArgExpr->getBeginLoc(), 4271 diag::warn_atomic_op_has_invalid_memory_order) 4272 << ArgExpr->getSourceRange(); 4273 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 4274 case llvm::AtomicOrderingCABI::relaxed: 4275 case llvm::AtomicOrderingCABI::consume: 4276 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 4277 return Diag(ArgExpr->getBeginLoc(), 4278 diag::warn_atomic_op_has_invalid_memory_order) 4279 << ArgExpr->getSourceRange(); 4280 break; 4281 case llvm::AtomicOrderingCABI::acquire: 4282 case llvm::AtomicOrderingCABI::release: 4283 case llvm::AtomicOrderingCABI::acq_rel: 4284 case llvm::AtomicOrderingCABI::seq_cst: 4285 break; 4286 } 4287 4288 Arg = TheCall->getArg(ScopeIndex); 4289 ArgExpr = Arg.get(); 4290 Expr::EvalResult ArgResult1; 4291 // Check that sync scope is a constant literal 4292 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 4293 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 4294 << ArgExpr->getType(); 4295 4296 return false; 4297 } 4298 4299 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 4300 llvm::APSInt Result; 4301 4302 // We can't check the value of a dependent argument. 4303 Expr *Arg = TheCall->getArg(ArgNum); 4304 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4305 return false; 4306 4307 // Check constant-ness first. 4308 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4309 return true; 4310 4311 int64_t Val = Result.getSExtValue(); 4312 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 4313 return false; 4314 4315 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 4316 << Arg->getSourceRange(); 4317 } 4318 4319 static bool isRISCV32Builtin(unsigned BuiltinID) { 4320 // These builtins only work on riscv32 targets. 4321 switch (BuiltinID) { 4322 case RISCV::BI__builtin_riscv_zip_32: 4323 case RISCV::BI__builtin_riscv_unzip_32: 4324 case RISCV::BI__builtin_riscv_aes32dsi_32: 4325 case RISCV::BI__builtin_riscv_aes32dsmi_32: 4326 case RISCV::BI__builtin_riscv_aes32esi_32: 4327 case RISCV::BI__builtin_riscv_aes32esmi_32: 4328 case RISCV::BI__builtin_riscv_sha512sig0h_32: 4329 case RISCV::BI__builtin_riscv_sha512sig0l_32: 4330 case RISCV::BI__builtin_riscv_sha512sig1h_32: 4331 case RISCV::BI__builtin_riscv_sha512sig1l_32: 4332 case RISCV::BI__builtin_riscv_sha512sum0r_32: 4333 case RISCV::BI__builtin_riscv_sha512sum1r_32: 4334 return true; 4335 } 4336 4337 return false; 4338 } 4339 4340 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 4341 unsigned BuiltinID, 4342 CallExpr *TheCall) { 4343 // CodeGenFunction can also detect this, but this gives a better error 4344 // message. 4345 bool FeatureMissing = false; 4346 SmallVector<StringRef> ReqFeatures; 4347 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 4348 Features.split(ReqFeatures, ','); 4349 4350 // Check for 32-bit only builtins on a 64-bit target. 4351 const llvm::Triple &TT = TI.getTriple(); 4352 if (TT.getArch() != llvm::Triple::riscv32 && isRISCV32Builtin(BuiltinID)) 4353 return Diag(TheCall->getCallee()->getBeginLoc(), 4354 diag::err_32_bit_builtin_64_bit_tgt); 4355 4356 // Check if each required feature is included 4357 for (StringRef F : ReqFeatures) { 4358 SmallVector<StringRef> ReqOpFeatures; 4359 F.split(ReqOpFeatures, '|'); 4360 bool HasFeature = false; 4361 for (StringRef OF : ReqOpFeatures) { 4362 if (TI.hasFeature(OF)) { 4363 HasFeature = true; 4364 continue; 4365 } 4366 } 4367 4368 if (!HasFeature) { 4369 std::string FeatureStrs; 4370 for (StringRef OF : ReqOpFeatures) { 4371 // If the feature is 64bit, alter the string so it will print better in 4372 // the diagnostic. 4373 if (OF == "64bit") 4374 OF = "RV64"; 4375 4376 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 4377 OF.consume_front("experimental-"); 4378 std::string FeatureStr = OF.str(); 4379 FeatureStr[0] = std::toupper(FeatureStr[0]); 4380 // Combine strings. 4381 FeatureStrs += FeatureStrs == "" ? "" : ", "; 4382 FeatureStrs += "'"; 4383 FeatureStrs += FeatureStr; 4384 FeatureStrs += "'"; 4385 } 4386 // Error message 4387 FeatureMissing = true; 4388 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 4389 << TheCall->getSourceRange() << StringRef(FeatureStrs); 4390 } 4391 } 4392 4393 if (FeatureMissing) 4394 return true; 4395 4396 switch (BuiltinID) { 4397 case RISCVVector::BI__builtin_rvv_vsetvli: 4398 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 4399 CheckRISCVLMUL(TheCall, 2); 4400 case RISCVVector::BI__builtin_rvv_vsetvlimax: 4401 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4402 CheckRISCVLMUL(TheCall, 1); 4403 case RISCVVector::BI__builtin_rvv_vget_v: { 4404 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4405 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4406 TheCall->getType().getCanonicalType().getTypePtr())); 4407 ASTContext::BuiltinVectorTypeInfo VecInfo = 4408 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4409 TheCall->getArg(0)->getType().getCanonicalType().getTypePtr())); 4410 unsigned MaxIndex = 4411 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) / 4412 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors); 4413 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4414 } 4415 case RISCVVector::BI__builtin_rvv_vset_v: { 4416 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4417 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4418 TheCall->getType().getCanonicalType().getTypePtr())); 4419 ASTContext::BuiltinVectorTypeInfo VecInfo = 4420 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4421 TheCall->getArg(2)->getType().getCanonicalType().getTypePtr())); 4422 unsigned MaxIndex = 4423 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) / 4424 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors); 4425 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4426 } 4427 // Check if byteselect is in [0, 3] 4428 case RISCV::BI__builtin_riscv_aes32dsi_32: 4429 case RISCV::BI__builtin_riscv_aes32dsmi_32: 4430 case RISCV::BI__builtin_riscv_aes32esi_32: 4431 case RISCV::BI__builtin_riscv_aes32esmi_32: 4432 case RISCV::BI__builtin_riscv_sm4ks: 4433 case RISCV::BI__builtin_riscv_sm4ed: 4434 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4435 // Check if rnum is in [0, 10] 4436 case RISCV::BI__builtin_riscv_aes64ks1i_64: 4437 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10); 4438 } 4439 4440 return false; 4441 } 4442 4443 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 4444 CallExpr *TheCall) { 4445 if (BuiltinID == SystemZ::BI__builtin_tabort) { 4446 Expr *Arg = TheCall->getArg(0); 4447 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context)) 4448 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 4449 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 4450 << Arg->getSourceRange(); 4451 } 4452 4453 // For intrinsics which take an immediate value as part of the instruction, 4454 // range check them here. 4455 unsigned i = 0, l = 0, u = 0; 4456 switch (BuiltinID) { 4457 default: return false; 4458 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 4459 case SystemZ::BI__builtin_s390_verimb: 4460 case SystemZ::BI__builtin_s390_verimh: 4461 case SystemZ::BI__builtin_s390_verimf: 4462 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 4463 case SystemZ::BI__builtin_s390_vfaeb: 4464 case SystemZ::BI__builtin_s390_vfaeh: 4465 case SystemZ::BI__builtin_s390_vfaef: 4466 case SystemZ::BI__builtin_s390_vfaebs: 4467 case SystemZ::BI__builtin_s390_vfaehs: 4468 case SystemZ::BI__builtin_s390_vfaefs: 4469 case SystemZ::BI__builtin_s390_vfaezb: 4470 case SystemZ::BI__builtin_s390_vfaezh: 4471 case SystemZ::BI__builtin_s390_vfaezf: 4472 case SystemZ::BI__builtin_s390_vfaezbs: 4473 case SystemZ::BI__builtin_s390_vfaezhs: 4474 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 4475 case SystemZ::BI__builtin_s390_vfisb: 4476 case SystemZ::BI__builtin_s390_vfidb: 4477 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 4478 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4479 case SystemZ::BI__builtin_s390_vftcisb: 4480 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 4481 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 4482 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 4483 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 4484 case SystemZ::BI__builtin_s390_vstrcb: 4485 case SystemZ::BI__builtin_s390_vstrch: 4486 case SystemZ::BI__builtin_s390_vstrcf: 4487 case SystemZ::BI__builtin_s390_vstrczb: 4488 case SystemZ::BI__builtin_s390_vstrczh: 4489 case SystemZ::BI__builtin_s390_vstrczf: 4490 case SystemZ::BI__builtin_s390_vstrcbs: 4491 case SystemZ::BI__builtin_s390_vstrchs: 4492 case SystemZ::BI__builtin_s390_vstrcfs: 4493 case SystemZ::BI__builtin_s390_vstrczbs: 4494 case SystemZ::BI__builtin_s390_vstrczhs: 4495 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 4496 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 4497 case SystemZ::BI__builtin_s390_vfminsb: 4498 case SystemZ::BI__builtin_s390_vfmaxsb: 4499 case SystemZ::BI__builtin_s390_vfmindb: 4500 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 4501 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 4502 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 4503 case SystemZ::BI__builtin_s390_vclfnhs: 4504 case SystemZ::BI__builtin_s390_vclfnls: 4505 case SystemZ::BI__builtin_s390_vcfn: 4506 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; 4507 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; 4508 } 4509 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4510 } 4511 4512 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 4513 /// This checks that the target supports __builtin_cpu_supports and 4514 /// that the string argument is constant and valid. 4515 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 4516 CallExpr *TheCall) { 4517 Expr *Arg = TheCall->getArg(0); 4518 4519 // Check if the argument is a string literal. 4520 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4521 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4522 << Arg->getSourceRange(); 4523 4524 // Check the contents of the string. 4525 StringRef Feature = 4526 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4527 if (!TI.validateCpuSupports(Feature)) 4528 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 4529 << Arg->getSourceRange(); 4530 return false; 4531 } 4532 4533 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 4534 /// This checks that the target supports __builtin_cpu_is and 4535 /// that the string argument is constant and valid. 4536 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 4537 Expr *Arg = TheCall->getArg(0); 4538 4539 // Check if the argument is a string literal. 4540 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4541 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4542 << Arg->getSourceRange(); 4543 4544 // Check the contents of the string. 4545 StringRef Feature = 4546 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4547 if (!TI.validateCpuIs(Feature)) 4548 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 4549 << Arg->getSourceRange(); 4550 return false; 4551 } 4552 4553 // Check if the rounding mode is legal. 4554 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 4555 // Indicates if this instruction has rounding control or just SAE. 4556 bool HasRC = false; 4557 4558 unsigned ArgNum = 0; 4559 switch (BuiltinID) { 4560 default: 4561 return false; 4562 case X86::BI__builtin_ia32_vcvttsd2si32: 4563 case X86::BI__builtin_ia32_vcvttsd2si64: 4564 case X86::BI__builtin_ia32_vcvttsd2usi32: 4565 case X86::BI__builtin_ia32_vcvttsd2usi64: 4566 case X86::BI__builtin_ia32_vcvttss2si32: 4567 case X86::BI__builtin_ia32_vcvttss2si64: 4568 case X86::BI__builtin_ia32_vcvttss2usi32: 4569 case X86::BI__builtin_ia32_vcvttss2usi64: 4570 case X86::BI__builtin_ia32_vcvttsh2si32: 4571 case X86::BI__builtin_ia32_vcvttsh2si64: 4572 case X86::BI__builtin_ia32_vcvttsh2usi32: 4573 case X86::BI__builtin_ia32_vcvttsh2usi64: 4574 ArgNum = 1; 4575 break; 4576 case X86::BI__builtin_ia32_maxpd512: 4577 case X86::BI__builtin_ia32_maxps512: 4578 case X86::BI__builtin_ia32_minpd512: 4579 case X86::BI__builtin_ia32_minps512: 4580 case X86::BI__builtin_ia32_maxph512: 4581 case X86::BI__builtin_ia32_minph512: 4582 ArgNum = 2; 4583 break; 4584 case X86::BI__builtin_ia32_vcvtph2pd512_mask: 4585 case X86::BI__builtin_ia32_vcvtph2psx512_mask: 4586 case X86::BI__builtin_ia32_cvtps2pd512_mask: 4587 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 4588 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 4589 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 4590 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 4591 case X86::BI__builtin_ia32_cvttps2dq512_mask: 4592 case X86::BI__builtin_ia32_cvttps2qq512_mask: 4593 case X86::BI__builtin_ia32_cvttps2udq512_mask: 4594 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 4595 case X86::BI__builtin_ia32_vcvttph2w512_mask: 4596 case X86::BI__builtin_ia32_vcvttph2uw512_mask: 4597 case X86::BI__builtin_ia32_vcvttph2dq512_mask: 4598 case X86::BI__builtin_ia32_vcvttph2udq512_mask: 4599 case X86::BI__builtin_ia32_vcvttph2qq512_mask: 4600 case X86::BI__builtin_ia32_vcvttph2uqq512_mask: 4601 case X86::BI__builtin_ia32_exp2pd_mask: 4602 case X86::BI__builtin_ia32_exp2ps_mask: 4603 case X86::BI__builtin_ia32_getexppd512_mask: 4604 case X86::BI__builtin_ia32_getexpps512_mask: 4605 case X86::BI__builtin_ia32_getexpph512_mask: 4606 case X86::BI__builtin_ia32_rcp28pd_mask: 4607 case X86::BI__builtin_ia32_rcp28ps_mask: 4608 case X86::BI__builtin_ia32_rsqrt28pd_mask: 4609 case X86::BI__builtin_ia32_rsqrt28ps_mask: 4610 case X86::BI__builtin_ia32_vcomisd: 4611 case X86::BI__builtin_ia32_vcomiss: 4612 case X86::BI__builtin_ia32_vcomish: 4613 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 4614 ArgNum = 3; 4615 break; 4616 case X86::BI__builtin_ia32_cmppd512_mask: 4617 case X86::BI__builtin_ia32_cmpps512_mask: 4618 case X86::BI__builtin_ia32_cmpsd_mask: 4619 case X86::BI__builtin_ia32_cmpss_mask: 4620 case X86::BI__builtin_ia32_cmpsh_mask: 4621 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask: 4622 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask: 4623 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 4624 case X86::BI__builtin_ia32_getexpsd128_round_mask: 4625 case X86::BI__builtin_ia32_getexpss128_round_mask: 4626 case X86::BI__builtin_ia32_getexpsh128_round_mask: 4627 case X86::BI__builtin_ia32_getmantpd512_mask: 4628 case X86::BI__builtin_ia32_getmantps512_mask: 4629 case X86::BI__builtin_ia32_getmantph512_mask: 4630 case X86::BI__builtin_ia32_maxsd_round_mask: 4631 case X86::BI__builtin_ia32_maxss_round_mask: 4632 case X86::BI__builtin_ia32_maxsh_round_mask: 4633 case X86::BI__builtin_ia32_minsd_round_mask: 4634 case X86::BI__builtin_ia32_minss_round_mask: 4635 case X86::BI__builtin_ia32_minsh_round_mask: 4636 case X86::BI__builtin_ia32_rcp28sd_round_mask: 4637 case X86::BI__builtin_ia32_rcp28ss_round_mask: 4638 case X86::BI__builtin_ia32_reducepd512_mask: 4639 case X86::BI__builtin_ia32_reduceps512_mask: 4640 case X86::BI__builtin_ia32_reduceph512_mask: 4641 case X86::BI__builtin_ia32_rndscalepd_mask: 4642 case X86::BI__builtin_ia32_rndscaleps_mask: 4643 case X86::BI__builtin_ia32_rndscaleph_mask: 4644 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 4645 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 4646 ArgNum = 4; 4647 break; 4648 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4649 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4650 case X86::BI__builtin_ia32_fixupimmps512_mask: 4651 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4652 case X86::BI__builtin_ia32_fixupimmsd_mask: 4653 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4654 case X86::BI__builtin_ia32_fixupimmss_mask: 4655 case X86::BI__builtin_ia32_fixupimmss_maskz: 4656 case X86::BI__builtin_ia32_getmantsd_round_mask: 4657 case X86::BI__builtin_ia32_getmantss_round_mask: 4658 case X86::BI__builtin_ia32_getmantsh_round_mask: 4659 case X86::BI__builtin_ia32_rangepd512_mask: 4660 case X86::BI__builtin_ia32_rangeps512_mask: 4661 case X86::BI__builtin_ia32_rangesd128_round_mask: 4662 case X86::BI__builtin_ia32_rangess128_round_mask: 4663 case X86::BI__builtin_ia32_reducesd_mask: 4664 case X86::BI__builtin_ia32_reducess_mask: 4665 case X86::BI__builtin_ia32_reducesh_mask: 4666 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4667 case X86::BI__builtin_ia32_rndscaless_round_mask: 4668 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4669 ArgNum = 5; 4670 break; 4671 case X86::BI__builtin_ia32_vcvtsd2si64: 4672 case X86::BI__builtin_ia32_vcvtsd2si32: 4673 case X86::BI__builtin_ia32_vcvtsd2usi32: 4674 case X86::BI__builtin_ia32_vcvtsd2usi64: 4675 case X86::BI__builtin_ia32_vcvtss2si32: 4676 case X86::BI__builtin_ia32_vcvtss2si64: 4677 case X86::BI__builtin_ia32_vcvtss2usi32: 4678 case X86::BI__builtin_ia32_vcvtss2usi64: 4679 case X86::BI__builtin_ia32_vcvtsh2si32: 4680 case X86::BI__builtin_ia32_vcvtsh2si64: 4681 case X86::BI__builtin_ia32_vcvtsh2usi32: 4682 case X86::BI__builtin_ia32_vcvtsh2usi64: 4683 case X86::BI__builtin_ia32_sqrtpd512: 4684 case X86::BI__builtin_ia32_sqrtps512: 4685 case X86::BI__builtin_ia32_sqrtph512: 4686 ArgNum = 1; 4687 HasRC = true; 4688 break; 4689 case X86::BI__builtin_ia32_addph512: 4690 case X86::BI__builtin_ia32_divph512: 4691 case X86::BI__builtin_ia32_mulph512: 4692 case X86::BI__builtin_ia32_subph512: 4693 case X86::BI__builtin_ia32_addpd512: 4694 case X86::BI__builtin_ia32_addps512: 4695 case X86::BI__builtin_ia32_divpd512: 4696 case X86::BI__builtin_ia32_divps512: 4697 case X86::BI__builtin_ia32_mulpd512: 4698 case X86::BI__builtin_ia32_mulps512: 4699 case X86::BI__builtin_ia32_subpd512: 4700 case X86::BI__builtin_ia32_subps512: 4701 case X86::BI__builtin_ia32_cvtsi2sd64: 4702 case X86::BI__builtin_ia32_cvtsi2ss32: 4703 case X86::BI__builtin_ia32_cvtsi2ss64: 4704 case X86::BI__builtin_ia32_cvtusi2sd64: 4705 case X86::BI__builtin_ia32_cvtusi2ss32: 4706 case X86::BI__builtin_ia32_cvtusi2ss64: 4707 case X86::BI__builtin_ia32_vcvtusi2sh: 4708 case X86::BI__builtin_ia32_vcvtusi642sh: 4709 case X86::BI__builtin_ia32_vcvtsi2sh: 4710 case X86::BI__builtin_ia32_vcvtsi642sh: 4711 ArgNum = 2; 4712 HasRC = true; 4713 break; 4714 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 4715 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 4716 case X86::BI__builtin_ia32_vcvtpd2ph512_mask: 4717 case X86::BI__builtin_ia32_vcvtps2phx512_mask: 4718 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 4719 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 4720 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 4721 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 4722 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 4723 case X86::BI__builtin_ia32_cvtps2dq512_mask: 4724 case X86::BI__builtin_ia32_cvtps2qq512_mask: 4725 case X86::BI__builtin_ia32_cvtps2udq512_mask: 4726 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 4727 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 4728 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 4729 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 4730 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 4731 case X86::BI__builtin_ia32_vcvtdq2ph512_mask: 4732 case X86::BI__builtin_ia32_vcvtudq2ph512_mask: 4733 case X86::BI__builtin_ia32_vcvtw2ph512_mask: 4734 case X86::BI__builtin_ia32_vcvtuw2ph512_mask: 4735 case X86::BI__builtin_ia32_vcvtph2w512_mask: 4736 case X86::BI__builtin_ia32_vcvtph2uw512_mask: 4737 case X86::BI__builtin_ia32_vcvtph2dq512_mask: 4738 case X86::BI__builtin_ia32_vcvtph2udq512_mask: 4739 case X86::BI__builtin_ia32_vcvtph2qq512_mask: 4740 case X86::BI__builtin_ia32_vcvtph2uqq512_mask: 4741 case X86::BI__builtin_ia32_vcvtqq2ph512_mask: 4742 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: 4743 ArgNum = 3; 4744 HasRC = true; 4745 break; 4746 case X86::BI__builtin_ia32_addsh_round_mask: 4747 case X86::BI__builtin_ia32_addss_round_mask: 4748 case X86::BI__builtin_ia32_addsd_round_mask: 4749 case X86::BI__builtin_ia32_divsh_round_mask: 4750 case X86::BI__builtin_ia32_divss_round_mask: 4751 case X86::BI__builtin_ia32_divsd_round_mask: 4752 case X86::BI__builtin_ia32_mulsh_round_mask: 4753 case X86::BI__builtin_ia32_mulss_round_mask: 4754 case X86::BI__builtin_ia32_mulsd_round_mask: 4755 case X86::BI__builtin_ia32_subsh_round_mask: 4756 case X86::BI__builtin_ia32_subss_round_mask: 4757 case X86::BI__builtin_ia32_subsd_round_mask: 4758 case X86::BI__builtin_ia32_scalefph512_mask: 4759 case X86::BI__builtin_ia32_scalefpd512_mask: 4760 case X86::BI__builtin_ia32_scalefps512_mask: 4761 case X86::BI__builtin_ia32_scalefsd_round_mask: 4762 case X86::BI__builtin_ia32_scalefss_round_mask: 4763 case X86::BI__builtin_ia32_scalefsh_round_mask: 4764 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 4765 case X86::BI__builtin_ia32_vcvtss2sh_round_mask: 4766 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask: 4767 case X86::BI__builtin_ia32_sqrtsd_round_mask: 4768 case X86::BI__builtin_ia32_sqrtss_round_mask: 4769 case X86::BI__builtin_ia32_sqrtsh_round_mask: 4770 case X86::BI__builtin_ia32_vfmaddsd3_mask: 4771 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 4772 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 4773 case X86::BI__builtin_ia32_vfmaddss3_mask: 4774 case X86::BI__builtin_ia32_vfmaddss3_maskz: 4775 case X86::BI__builtin_ia32_vfmaddss3_mask3: 4776 case X86::BI__builtin_ia32_vfmaddsh3_mask: 4777 case X86::BI__builtin_ia32_vfmaddsh3_maskz: 4778 case X86::BI__builtin_ia32_vfmaddsh3_mask3: 4779 case X86::BI__builtin_ia32_vfmaddpd512_mask: 4780 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 4781 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 4782 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 4783 case X86::BI__builtin_ia32_vfmaddps512_mask: 4784 case X86::BI__builtin_ia32_vfmaddps512_maskz: 4785 case X86::BI__builtin_ia32_vfmaddps512_mask3: 4786 case X86::BI__builtin_ia32_vfmsubps512_mask3: 4787 case X86::BI__builtin_ia32_vfmaddph512_mask: 4788 case X86::BI__builtin_ia32_vfmaddph512_maskz: 4789 case X86::BI__builtin_ia32_vfmaddph512_mask3: 4790 case X86::BI__builtin_ia32_vfmsubph512_mask3: 4791 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 4792 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 4793 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 4794 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 4795 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 4796 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 4797 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 4798 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 4799 case X86::BI__builtin_ia32_vfmaddsubph512_mask: 4800 case X86::BI__builtin_ia32_vfmaddsubph512_maskz: 4801 case X86::BI__builtin_ia32_vfmaddsubph512_mask3: 4802 case X86::BI__builtin_ia32_vfmsubaddph512_mask3: 4803 case X86::BI__builtin_ia32_vfmaddcsh_mask: 4804 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: 4805 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: 4806 case X86::BI__builtin_ia32_vfmaddcph512_mask: 4807 case X86::BI__builtin_ia32_vfmaddcph512_maskz: 4808 case X86::BI__builtin_ia32_vfmaddcph512_mask3: 4809 case X86::BI__builtin_ia32_vfcmaddcsh_mask: 4810 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: 4811 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: 4812 case X86::BI__builtin_ia32_vfcmaddcph512_mask: 4813 case X86::BI__builtin_ia32_vfcmaddcph512_maskz: 4814 case X86::BI__builtin_ia32_vfcmaddcph512_mask3: 4815 case X86::BI__builtin_ia32_vfmulcsh_mask: 4816 case X86::BI__builtin_ia32_vfmulcph512_mask: 4817 case X86::BI__builtin_ia32_vfcmulcsh_mask: 4818 case X86::BI__builtin_ia32_vfcmulcph512_mask: 4819 ArgNum = 4; 4820 HasRC = true; 4821 break; 4822 } 4823 4824 llvm::APSInt Result; 4825 4826 // We can't check the value of a dependent argument. 4827 Expr *Arg = TheCall->getArg(ArgNum); 4828 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4829 return false; 4830 4831 // Check constant-ness first. 4832 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4833 return true; 4834 4835 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 4836 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 4837 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 4838 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 4839 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 4840 Result == 8/*ROUND_NO_EXC*/ || 4841 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 4842 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 4843 return false; 4844 4845 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 4846 << Arg->getSourceRange(); 4847 } 4848 4849 // Check if the gather/scatter scale is legal. 4850 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 4851 CallExpr *TheCall) { 4852 unsigned ArgNum = 0; 4853 switch (BuiltinID) { 4854 default: 4855 return false; 4856 case X86::BI__builtin_ia32_gatherpfdpd: 4857 case X86::BI__builtin_ia32_gatherpfdps: 4858 case X86::BI__builtin_ia32_gatherpfqpd: 4859 case X86::BI__builtin_ia32_gatherpfqps: 4860 case X86::BI__builtin_ia32_scatterpfdpd: 4861 case X86::BI__builtin_ia32_scatterpfdps: 4862 case X86::BI__builtin_ia32_scatterpfqpd: 4863 case X86::BI__builtin_ia32_scatterpfqps: 4864 ArgNum = 3; 4865 break; 4866 case X86::BI__builtin_ia32_gatherd_pd: 4867 case X86::BI__builtin_ia32_gatherd_pd256: 4868 case X86::BI__builtin_ia32_gatherq_pd: 4869 case X86::BI__builtin_ia32_gatherq_pd256: 4870 case X86::BI__builtin_ia32_gatherd_ps: 4871 case X86::BI__builtin_ia32_gatherd_ps256: 4872 case X86::BI__builtin_ia32_gatherq_ps: 4873 case X86::BI__builtin_ia32_gatherq_ps256: 4874 case X86::BI__builtin_ia32_gatherd_q: 4875 case X86::BI__builtin_ia32_gatherd_q256: 4876 case X86::BI__builtin_ia32_gatherq_q: 4877 case X86::BI__builtin_ia32_gatherq_q256: 4878 case X86::BI__builtin_ia32_gatherd_d: 4879 case X86::BI__builtin_ia32_gatherd_d256: 4880 case X86::BI__builtin_ia32_gatherq_d: 4881 case X86::BI__builtin_ia32_gatherq_d256: 4882 case X86::BI__builtin_ia32_gather3div2df: 4883 case X86::BI__builtin_ia32_gather3div2di: 4884 case X86::BI__builtin_ia32_gather3div4df: 4885 case X86::BI__builtin_ia32_gather3div4di: 4886 case X86::BI__builtin_ia32_gather3div4sf: 4887 case X86::BI__builtin_ia32_gather3div4si: 4888 case X86::BI__builtin_ia32_gather3div8sf: 4889 case X86::BI__builtin_ia32_gather3div8si: 4890 case X86::BI__builtin_ia32_gather3siv2df: 4891 case X86::BI__builtin_ia32_gather3siv2di: 4892 case X86::BI__builtin_ia32_gather3siv4df: 4893 case X86::BI__builtin_ia32_gather3siv4di: 4894 case X86::BI__builtin_ia32_gather3siv4sf: 4895 case X86::BI__builtin_ia32_gather3siv4si: 4896 case X86::BI__builtin_ia32_gather3siv8sf: 4897 case X86::BI__builtin_ia32_gather3siv8si: 4898 case X86::BI__builtin_ia32_gathersiv8df: 4899 case X86::BI__builtin_ia32_gathersiv16sf: 4900 case X86::BI__builtin_ia32_gatherdiv8df: 4901 case X86::BI__builtin_ia32_gatherdiv16sf: 4902 case X86::BI__builtin_ia32_gathersiv8di: 4903 case X86::BI__builtin_ia32_gathersiv16si: 4904 case X86::BI__builtin_ia32_gatherdiv8di: 4905 case X86::BI__builtin_ia32_gatherdiv16si: 4906 case X86::BI__builtin_ia32_scatterdiv2df: 4907 case X86::BI__builtin_ia32_scatterdiv2di: 4908 case X86::BI__builtin_ia32_scatterdiv4df: 4909 case X86::BI__builtin_ia32_scatterdiv4di: 4910 case X86::BI__builtin_ia32_scatterdiv4sf: 4911 case X86::BI__builtin_ia32_scatterdiv4si: 4912 case X86::BI__builtin_ia32_scatterdiv8sf: 4913 case X86::BI__builtin_ia32_scatterdiv8si: 4914 case X86::BI__builtin_ia32_scattersiv2df: 4915 case X86::BI__builtin_ia32_scattersiv2di: 4916 case X86::BI__builtin_ia32_scattersiv4df: 4917 case X86::BI__builtin_ia32_scattersiv4di: 4918 case X86::BI__builtin_ia32_scattersiv4sf: 4919 case X86::BI__builtin_ia32_scattersiv4si: 4920 case X86::BI__builtin_ia32_scattersiv8sf: 4921 case X86::BI__builtin_ia32_scattersiv8si: 4922 case X86::BI__builtin_ia32_scattersiv8df: 4923 case X86::BI__builtin_ia32_scattersiv16sf: 4924 case X86::BI__builtin_ia32_scatterdiv8df: 4925 case X86::BI__builtin_ia32_scatterdiv16sf: 4926 case X86::BI__builtin_ia32_scattersiv8di: 4927 case X86::BI__builtin_ia32_scattersiv16si: 4928 case X86::BI__builtin_ia32_scatterdiv8di: 4929 case X86::BI__builtin_ia32_scatterdiv16si: 4930 ArgNum = 4; 4931 break; 4932 } 4933 4934 llvm::APSInt Result; 4935 4936 // We can't check the value of a dependent argument. 4937 Expr *Arg = TheCall->getArg(ArgNum); 4938 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4939 return false; 4940 4941 // Check constant-ness first. 4942 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4943 return true; 4944 4945 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 4946 return false; 4947 4948 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 4949 << Arg->getSourceRange(); 4950 } 4951 4952 enum { TileRegLow = 0, TileRegHigh = 7 }; 4953 4954 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 4955 ArrayRef<int> ArgNums) { 4956 for (int ArgNum : ArgNums) { 4957 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 4958 return true; 4959 } 4960 return false; 4961 } 4962 4963 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 4964 ArrayRef<int> ArgNums) { 4965 // Because the max number of tile register is TileRegHigh + 1, so here we use 4966 // each bit to represent the usage of them in bitset. 4967 std::bitset<TileRegHigh + 1> ArgValues; 4968 for (int ArgNum : ArgNums) { 4969 Expr *Arg = TheCall->getArg(ArgNum); 4970 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4971 continue; 4972 4973 llvm::APSInt Result; 4974 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4975 return true; 4976 int ArgExtValue = Result.getExtValue(); 4977 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 4978 "Incorrect tile register num."); 4979 if (ArgValues.test(ArgExtValue)) 4980 return Diag(TheCall->getBeginLoc(), 4981 diag::err_x86_builtin_tile_arg_duplicate) 4982 << TheCall->getArg(ArgNum)->getSourceRange(); 4983 ArgValues.set(ArgExtValue); 4984 } 4985 return false; 4986 } 4987 4988 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 4989 ArrayRef<int> ArgNums) { 4990 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 4991 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 4992 } 4993 4994 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 4995 switch (BuiltinID) { 4996 default: 4997 return false; 4998 case X86::BI__builtin_ia32_tileloadd64: 4999 case X86::BI__builtin_ia32_tileloaddt164: 5000 case X86::BI__builtin_ia32_tilestored64: 5001 case X86::BI__builtin_ia32_tilezero: 5002 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 5003 case X86::BI__builtin_ia32_tdpbssd: 5004 case X86::BI__builtin_ia32_tdpbsud: 5005 case X86::BI__builtin_ia32_tdpbusd: 5006 case X86::BI__builtin_ia32_tdpbuud: 5007 case X86::BI__builtin_ia32_tdpbf16ps: 5008 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 5009 } 5010 } 5011 static bool isX86_32Builtin(unsigned BuiltinID) { 5012 // These builtins only work on x86-32 targets. 5013 switch (BuiltinID) { 5014 case X86::BI__builtin_ia32_readeflags_u32: 5015 case X86::BI__builtin_ia32_writeeflags_u32: 5016 return true; 5017 } 5018 5019 return false; 5020 } 5021 5022 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 5023 CallExpr *TheCall) { 5024 if (BuiltinID == X86::BI__builtin_cpu_supports) 5025 return SemaBuiltinCpuSupports(*this, TI, TheCall); 5026 5027 if (BuiltinID == X86::BI__builtin_cpu_is) 5028 return SemaBuiltinCpuIs(*this, TI, TheCall); 5029 5030 // Check for 32-bit only builtins on a 64-bit target. 5031 const llvm::Triple &TT = TI.getTriple(); 5032 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 5033 return Diag(TheCall->getCallee()->getBeginLoc(), 5034 diag::err_32_bit_builtin_64_bit_tgt); 5035 5036 // If the intrinsic has rounding or SAE make sure its valid. 5037 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 5038 return true; 5039 5040 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 5041 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 5042 return true; 5043 5044 // If the intrinsic has a tile arguments, make sure they are valid. 5045 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 5046 return true; 5047 5048 // For intrinsics which take an immediate value as part of the instruction, 5049 // range check them here. 5050 int i = 0, l = 0, u = 0; 5051 switch (BuiltinID) { 5052 default: 5053 return false; 5054 case X86::BI__builtin_ia32_vec_ext_v2si: 5055 case X86::BI__builtin_ia32_vec_ext_v2di: 5056 case X86::BI__builtin_ia32_vextractf128_pd256: 5057 case X86::BI__builtin_ia32_vextractf128_ps256: 5058 case X86::BI__builtin_ia32_vextractf128_si256: 5059 case X86::BI__builtin_ia32_extract128i256: 5060 case X86::BI__builtin_ia32_extractf64x4_mask: 5061 case X86::BI__builtin_ia32_extracti64x4_mask: 5062 case X86::BI__builtin_ia32_extractf32x8_mask: 5063 case X86::BI__builtin_ia32_extracti32x8_mask: 5064 case X86::BI__builtin_ia32_extractf64x2_256_mask: 5065 case X86::BI__builtin_ia32_extracti64x2_256_mask: 5066 case X86::BI__builtin_ia32_extractf32x4_256_mask: 5067 case X86::BI__builtin_ia32_extracti32x4_256_mask: 5068 i = 1; l = 0; u = 1; 5069 break; 5070 case X86::BI__builtin_ia32_vec_set_v2di: 5071 case X86::BI__builtin_ia32_vinsertf128_pd256: 5072 case X86::BI__builtin_ia32_vinsertf128_ps256: 5073 case X86::BI__builtin_ia32_vinsertf128_si256: 5074 case X86::BI__builtin_ia32_insert128i256: 5075 case X86::BI__builtin_ia32_insertf32x8: 5076 case X86::BI__builtin_ia32_inserti32x8: 5077 case X86::BI__builtin_ia32_insertf64x4: 5078 case X86::BI__builtin_ia32_inserti64x4: 5079 case X86::BI__builtin_ia32_insertf64x2_256: 5080 case X86::BI__builtin_ia32_inserti64x2_256: 5081 case X86::BI__builtin_ia32_insertf32x4_256: 5082 case X86::BI__builtin_ia32_inserti32x4_256: 5083 i = 2; l = 0; u = 1; 5084 break; 5085 case X86::BI__builtin_ia32_vpermilpd: 5086 case X86::BI__builtin_ia32_vec_ext_v4hi: 5087 case X86::BI__builtin_ia32_vec_ext_v4si: 5088 case X86::BI__builtin_ia32_vec_ext_v4sf: 5089 case X86::BI__builtin_ia32_vec_ext_v4di: 5090 case X86::BI__builtin_ia32_extractf32x4_mask: 5091 case X86::BI__builtin_ia32_extracti32x4_mask: 5092 case X86::BI__builtin_ia32_extractf64x2_512_mask: 5093 case X86::BI__builtin_ia32_extracti64x2_512_mask: 5094 i = 1; l = 0; u = 3; 5095 break; 5096 case X86::BI_mm_prefetch: 5097 case X86::BI__builtin_ia32_vec_ext_v8hi: 5098 case X86::BI__builtin_ia32_vec_ext_v8si: 5099 i = 1; l = 0; u = 7; 5100 break; 5101 case X86::BI__builtin_ia32_sha1rnds4: 5102 case X86::BI__builtin_ia32_blendpd: 5103 case X86::BI__builtin_ia32_shufpd: 5104 case X86::BI__builtin_ia32_vec_set_v4hi: 5105 case X86::BI__builtin_ia32_vec_set_v4si: 5106 case X86::BI__builtin_ia32_vec_set_v4di: 5107 case X86::BI__builtin_ia32_shuf_f32x4_256: 5108 case X86::BI__builtin_ia32_shuf_f64x2_256: 5109 case X86::BI__builtin_ia32_shuf_i32x4_256: 5110 case X86::BI__builtin_ia32_shuf_i64x2_256: 5111 case X86::BI__builtin_ia32_insertf64x2_512: 5112 case X86::BI__builtin_ia32_inserti64x2_512: 5113 case X86::BI__builtin_ia32_insertf32x4: 5114 case X86::BI__builtin_ia32_inserti32x4: 5115 i = 2; l = 0; u = 3; 5116 break; 5117 case X86::BI__builtin_ia32_vpermil2pd: 5118 case X86::BI__builtin_ia32_vpermil2pd256: 5119 case X86::BI__builtin_ia32_vpermil2ps: 5120 case X86::BI__builtin_ia32_vpermil2ps256: 5121 i = 3; l = 0; u = 3; 5122 break; 5123 case X86::BI__builtin_ia32_cmpb128_mask: 5124 case X86::BI__builtin_ia32_cmpw128_mask: 5125 case X86::BI__builtin_ia32_cmpd128_mask: 5126 case X86::BI__builtin_ia32_cmpq128_mask: 5127 case X86::BI__builtin_ia32_cmpb256_mask: 5128 case X86::BI__builtin_ia32_cmpw256_mask: 5129 case X86::BI__builtin_ia32_cmpd256_mask: 5130 case X86::BI__builtin_ia32_cmpq256_mask: 5131 case X86::BI__builtin_ia32_cmpb512_mask: 5132 case X86::BI__builtin_ia32_cmpw512_mask: 5133 case X86::BI__builtin_ia32_cmpd512_mask: 5134 case X86::BI__builtin_ia32_cmpq512_mask: 5135 case X86::BI__builtin_ia32_ucmpb128_mask: 5136 case X86::BI__builtin_ia32_ucmpw128_mask: 5137 case X86::BI__builtin_ia32_ucmpd128_mask: 5138 case X86::BI__builtin_ia32_ucmpq128_mask: 5139 case X86::BI__builtin_ia32_ucmpb256_mask: 5140 case X86::BI__builtin_ia32_ucmpw256_mask: 5141 case X86::BI__builtin_ia32_ucmpd256_mask: 5142 case X86::BI__builtin_ia32_ucmpq256_mask: 5143 case X86::BI__builtin_ia32_ucmpb512_mask: 5144 case X86::BI__builtin_ia32_ucmpw512_mask: 5145 case X86::BI__builtin_ia32_ucmpd512_mask: 5146 case X86::BI__builtin_ia32_ucmpq512_mask: 5147 case X86::BI__builtin_ia32_vpcomub: 5148 case X86::BI__builtin_ia32_vpcomuw: 5149 case X86::BI__builtin_ia32_vpcomud: 5150 case X86::BI__builtin_ia32_vpcomuq: 5151 case X86::BI__builtin_ia32_vpcomb: 5152 case X86::BI__builtin_ia32_vpcomw: 5153 case X86::BI__builtin_ia32_vpcomd: 5154 case X86::BI__builtin_ia32_vpcomq: 5155 case X86::BI__builtin_ia32_vec_set_v8hi: 5156 case X86::BI__builtin_ia32_vec_set_v8si: 5157 i = 2; l = 0; u = 7; 5158 break; 5159 case X86::BI__builtin_ia32_vpermilpd256: 5160 case X86::BI__builtin_ia32_roundps: 5161 case X86::BI__builtin_ia32_roundpd: 5162 case X86::BI__builtin_ia32_roundps256: 5163 case X86::BI__builtin_ia32_roundpd256: 5164 case X86::BI__builtin_ia32_getmantpd128_mask: 5165 case X86::BI__builtin_ia32_getmantpd256_mask: 5166 case X86::BI__builtin_ia32_getmantps128_mask: 5167 case X86::BI__builtin_ia32_getmantps256_mask: 5168 case X86::BI__builtin_ia32_getmantpd512_mask: 5169 case X86::BI__builtin_ia32_getmantps512_mask: 5170 case X86::BI__builtin_ia32_getmantph128_mask: 5171 case X86::BI__builtin_ia32_getmantph256_mask: 5172 case X86::BI__builtin_ia32_getmantph512_mask: 5173 case X86::BI__builtin_ia32_vec_ext_v16qi: 5174 case X86::BI__builtin_ia32_vec_ext_v16hi: 5175 i = 1; l = 0; u = 15; 5176 break; 5177 case X86::BI__builtin_ia32_pblendd128: 5178 case X86::BI__builtin_ia32_blendps: 5179 case X86::BI__builtin_ia32_blendpd256: 5180 case X86::BI__builtin_ia32_shufpd256: 5181 case X86::BI__builtin_ia32_roundss: 5182 case X86::BI__builtin_ia32_roundsd: 5183 case X86::BI__builtin_ia32_rangepd128_mask: 5184 case X86::BI__builtin_ia32_rangepd256_mask: 5185 case X86::BI__builtin_ia32_rangepd512_mask: 5186 case X86::BI__builtin_ia32_rangeps128_mask: 5187 case X86::BI__builtin_ia32_rangeps256_mask: 5188 case X86::BI__builtin_ia32_rangeps512_mask: 5189 case X86::BI__builtin_ia32_getmantsd_round_mask: 5190 case X86::BI__builtin_ia32_getmantss_round_mask: 5191 case X86::BI__builtin_ia32_getmantsh_round_mask: 5192 case X86::BI__builtin_ia32_vec_set_v16qi: 5193 case X86::BI__builtin_ia32_vec_set_v16hi: 5194 i = 2; l = 0; u = 15; 5195 break; 5196 case X86::BI__builtin_ia32_vec_ext_v32qi: 5197 i = 1; l = 0; u = 31; 5198 break; 5199 case X86::BI__builtin_ia32_cmpps: 5200 case X86::BI__builtin_ia32_cmpss: 5201 case X86::BI__builtin_ia32_cmppd: 5202 case X86::BI__builtin_ia32_cmpsd: 5203 case X86::BI__builtin_ia32_cmpps256: 5204 case X86::BI__builtin_ia32_cmppd256: 5205 case X86::BI__builtin_ia32_cmpps128_mask: 5206 case X86::BI__builtin_ia32_cmppd128_mask: 5207 case X86::BI__builtin_ia32_cmpps256_mask: 5208 case X86::BI__builtin_ia32_cmppd256_mask: 5209 case X86::BI__builtin_ia32_cmpps512_mask: 5210 case X86::BI__builtin_ia32_cmppd512_mask: 5211 case X86::BI__builtin_ia32_cmpsd_mask: 5212 case X86::BI__builtin_ia32_cmpss_mask: 5213 case X86::BI__builtin_ia32_vec_set_v32qi: 5214 i = 2; l = 0; u = 31; 5215 break; 5216 case X86::BI__builtin_ia32_permdf256: 5217 case X86::BI__builtin_ia32_permdi256: 5218 case X86::BI__builtin_ia32_permdf512: 5219 case X86::BI__builtin_ia32_permdi512: 5220 case X86::BI__builtin_ia32_vpermilps: 5221 case X86::BI__builtin_ia32_vpermilps256: 5222 case X86::BI__builtin_ia32_vpermilpd512: 5223 case X86::BI__builtin_ia32_vpermilps512: 5224 case X86::BI__builtin_ia32_pshufd: 5225 case X86::BI__builtin_ia32_pshufd256: 5226 case X86::BI__builtin_ia32_pshufd512: 5227 case X86::BI__builtin_ia32_pshufhw: 5228 case X86::BI__builtin_ia32_pshufhw256: 5229 case X86::BI__builtin_ia32_pshufhw512: 5230 case X86::BI__builtin_ia32_pshuflw: 5231 case X86::BI__builtin_ia32_pshuflw256: 5232 case X86::BI__builtin_ia32_pshuflw512: 5233 case X86::BI__builtin_ia32_vcvtps2ph: 5234 case X86::BI__builtin_ia32_vcvtps2ph_mask: 5235 case X86::BI__builtin_ia32_vcvtps2ph256: 5236 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 5237 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 5238 case X86::BI__builtin_ia32_rndscaleps_128_mask: 5239 case X86::BI__builtin_ia32_rndscalepd_128_mask: 5240 case X86::BI__builtin_ia32_rndscaleps_256_mask: 5241 case X86::BI__builtin_ia32_rndscalepd_256_mask: 5242 case X86::BI__builtin_ia32_rndscaleps_mask: 5243 case X86::BI__builtin_ia32_rndscalepd_mask: 5244 case X86::BI__builtin_ia32_rndscaleph_mask: 5245 case X86::BI__builtin_ia32_reducepd128_mask: 5246 case X86::BI__builtin_ia32_reducepd256_mask: 5247 case X86::BI__builtin_ia32_reducepd512_mask: 5248 case X86::BI__builtin_ia32_reduceps128_mask: 5249 case X86::BI__builtin_ia32_reduceps256_mask: 5250 case X86::BI__builtin_ia32_reduceps512_mask: 5251 case X86::BI__builtin_ia32_reduceph128_mask: 5252 case X86::BI__builtin_ia32_reduceph256_mask: 5253 case X86::BI__builtin_ia32_reduceph512_mask: 5254 case X86::BI__builtin_ia32_prold512: 5255 case X86::BI__builtin_ia32_prolq512: 5256 case X86::BI__builtin_ia32_prold128: 5257 case X86::BI__builtin_ia32_prold256: 5258 case X86::BI__builtin_ia32_prolq128: 5259 case X86::BI__builtin_ia32_prolq256: 5260 case X86::BI__builtin_ia32_prord512: 5261 case X86::BI__builtin_ia32_prorq512: 5262 case X86::BI__builtin_ia32_prord128: 5263 case X86::BI__builtin_ia32_prord256: 5264 case X86::BI__builtin_ia32_prorq128: 5265 case X86::BI__builtin_ia32_prorq256: 5266 case X86::BI__builtin_ia32_fpclasspd128_mask: 5267 case X86::BI__builtin_ia32_fpclasspd256_mask: 5268 case X86::BI__builtin_ia32_fpclassps128_mask: 5269 case X86::BI__builtin_ia32_fpclassps256_mask: 5270 case X86::BI__builtin_ia32_fpclassps512_mask: 5271 case X86::BI__builtin_ia32_fpclasspd512_mask: 5272 case X86::BI__builtin_ia32_fpclassph128_mask: 5273 case X86::BI__builtin_ia32_fpclassph256_mask: 5274 case X86::BI__builtin_ia32_fpclassph512_mask: 5275 case X86::BI__builtin_ia32_fpclasssd_mask: 5276 case X86::BI__builtin_ia32_fpclassss_mask: 5277 case X86::BI__builtin_ia32_fpclasssh_mask: 5278 case X86::BI__builtin_ia32_pslldqi128_byteshift: 5279 case X86::BI__builtin_ia32_pslldqi256_byteshift: 5280 case X86::BI__builtin_ia32_pslldqi512_byteshift: 5281 case X86::BI__builtin_ia32_psrldqi128_byteshift: 5282 case X86::BI__builtin_ia32_psrldqi256_byteshift: 5283 case X86::BI__builtin_ia32_psrldqi512_byteshift: 5284 case X86::BI__builtin_ia32_kshiftliqi: 5285 case X86::BI__builtin_ia32_kshiftlihi: 5286 case X86::BI__builtin_ia32_kshiftlisi: 5287 case X86::BI__builtin_ia32_kshiftlidi: 5288 case X86::BI__builtin_ia32_kshiftriqi: 5289 case X86::BI__builtin_ia32_kshiftrihi: 5290 case X86::BI__builtin_ia32_kshiftrisi: 5291 case X86::BI__builtin_ia32_kshiftridi: 5292 i = 1; l = 0; u = 255; 5293 break; 5294 case X86::BI__builtin_ia32_vperm2f128_pd256: 5295 case X86::BI__builtin_ia32_vperm2f128_ps256: 5296 case X86::BI__builtin_ia32_vperm2f128_si256: 5297 case X86::BI__builtin_ia32_permti256: 5298 case X86::BI__builtin_ia32_pblendw128: 5299 case X86::BI__builtin_ia32_pblendw256: 5300 case X86::BI__builtin_ia32_blendps256: 5301 case X86::BI__builtin_ia32_pblendd256: 5302 case X86::BI__builtin_ia32_palignr128: 5303 case X86::BI__builtin_ia32_palignr256: 5304 case X86::BI__builtin_ia32_palignr512: 5305 case X86::BI__builtin_ia32_alignq512: 5306 case X86::BI__builtin_ia32_alignd512: 5307 case X86::BI__builtin_ia32_alignd128: 5308 case X86::BI__builtin_ia32_alignd256: 5309 case X86::BI__builtin_ia32_alignq128: 5310 case X86::BI__builtin_ia32_alignq256: 5311 case X86::BI__builtin_ia32_vcomisd: 5312 case X86::BI__builtin_ia32_vcomiss: 5313 case X86::BI__builtin_ia32_shuf_f32x4: 5314 case X86::BI__builtin_ia32_shuf_f64x2: 5315 case X86::BI__builtin_ia32_shuf_i32x4: 5316 case X86::BI__builtin_ia32_shuf_i64x2: 5317 case X86::BI__builtin_ia32_shufpd512: 5318 case X86::BI__builtin_ia32_shufps: 5319 case X86::BI__builtin_ia32_shufps256: 5320 case X86::BI__builtin_ia32_shufps512: 5321 case X86::BI__builtin_ia32_dbpsadbw128: 5322 case X86::BI__builtin_ia32_dbpsadbw256: 5323 case X86::BI__builtin_ia32_dbpsadbw512: 5324 case X86::BI__builtin_ia32_vpshldd128: 5325 case X86::BI__builtin_ia32_vpshldd256: 5326 case X86::BI__builtin_ia32_vpshldd512: 5327 case X86::BI__builtin_ia32_vpshldq128: 5328 case X86::BI__builtin_ia32_vpshldq256: 5329 case X86::BI__builtin_ia32_vpshldq512: 5330 case X86::BI__builtin_ia32_vpshldw128: 5331 case X86::BI__builtin_ia32_vpshldw256: 5332 case X86::BI__builtin_ia32_vpshldw512: 5333 case X86::BI__builtin_ia32_vpshrdd128: 5334 case X86::BI__builtin_ia32_vpshrdd256: 5335 case X86::BI__builtin_ia32_vpshrdd512: 5336 case X86::BI__builtin_ia32_vpshrdq128: 5337 case X86::BI__builtin_ia32_vpshrdq256: 5338 case X86::BI__builtin_ia32_vpshrdq512: 5339 case X86::BI__builtin_ia32_vpshrdw128: 5340 case X86::BI__builtin_ia32_vpshrdw256: 5341 case X86::BI__builtin_ia32_vpshrdw512: 5342 i = 2; l = 0; u = 255; 5343 break; 5344 case X86::BI__builtin_ia32_fixupimmpd512_mask: 5345 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 5346 case X86::BI__builtin_ia32_fixupimmps512_mask: 5347 case X86::BI__builtin_ia32_fixupimmps512_maskz: 5348 case X86::BI__builtin_ia32_fixupimmsd_mask: 5349 case X86::BI__builtin_ia32_fixupimmsd_maskz: 5350 case X86::BI__builtin_ia32_fixupimmss_mask: 5351 case X86::BI__builtin_ia32_fixupimmss_maskz: 5352 case X86::BI__builtin_ia32_fixupimmpd128_mask: 5353 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 5354 case X86::BI__builtin_ia32_fixupimmpd256_mask: 5355 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 5356 case X86::BI__builtin_ia32_fixupimmps128_mask: 5357 case X86::BI__builtin_ia32_fixupimmps128_maskz: 5358 case X86::BI__builtin_ia32_fixupimmps256_mask: 5359 case X86::BI__builtin_ia32_fixupimmps256_maskz: 5360 case X86::BI__builtin_ia32_pternlogd512_mask: 5361 case X86::BI__builtin_ia32_pternlogd512_maskz: 5362 case X86::BI__builtin_ia32_pternlogq512_mask: 5363 case X86::BI__builtin_ia32_pternlogq512_maskz: 5364 case X86::BI__builtin_ia32_pternlogd128_mask: 5365 case X86::BI__builtin_ia32_pternlogd128_maskz: 5366 case X86::BI__builtin_ia32_pternlogd256_mask: 5367 case X86::BI__builtin_ia32_pternlogd256_maskz: 5368 case X86::BI__builtin_ia32_pternlogq128_mask: 5369 case X86::BI__builtin_ia32_pternlogq128_maskz: 5370 case X86::BI__builtin_ia32_pternlogq256_mask: 5371 case X86::BI__builtin_ia32_pternlogq256_maskz: 5372 i = 3; l = 0; u = 255; 5373 break; 5374 case X86::BI__builtin_ia32_gatherpfdpd: 5375 case X86::BI__builtin_ia32_gatherpfdps: 5376 case X86::BI__builtin_ia32_gatherpfqpd: 5377 case X86::BI__builtin_ia32_gatherpfqps: 5378 case X86::BI__builtin_ia32_scatterpfdpd: 5379 case X86::BI__builtin_ia32_scatterpfdps: 5380 case X86::BI__builtin_ia32_scatterpfqpd: 5381 case X86::BI__builtin_ia32_scatterpfqps: 5382 i = 4; l = 2; u = 3; 5383 break; 5384 case X86::BI__builtin_ia32_reducesd_mask: 5385 case X86::BI__builtin_ia32_reducess_mask: 5386 case X86::BI__builtin_ia32_rndscalesd_round_mask: 5387 case X86::BI__builtin_ia32_rndscaless_round_mask: 5388 case X86::BI__builtin_ia32_rndscalesh_round_mask: 5389 case X86::BI__builtin_ia32_reducesh_mask: 5390 i = 4; l = 0; u = 255; 5391 break; 5392 } 5393 5394 // Note that we don't force a hard error on the range check here, allowing 5395 // template-generated or macro-generated dead code to potentially have out-of- 5396 // range values. These need to code generate, but don't need to necessarily 5397 // make any sense. We use a warning that defaults to an error. 5398 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 5399 } 5400 5401 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 5402 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 5403 /// Returns true when the format fits the function and the FormatStringInfo has 5404 /// been populated. 5405 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 5406 FormatStringInfo *FSI) { 5407 FSI->HasVAListArg = Format->getFirstArg() == 0; 5408 FSI->FormatIdx = Format->getFormatIdx() - 1; 5409 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 5410 5411 // The way the format attribute works in GCC, the implicit this argument 5412 // of member functions is counted. However, it doesn't appear in our own 5413 // lists, so decrement format_idx in that case. 5414 if (IsCXXMember) { 5415 if(FSI->FormatIdx == 0) 5416 return false; 5417 --FSI->FormatIdx; 5418 if (FSI->FirstDataArg != 0) 5419 --FSI->FirstDataArg; 5420 } 5421 return true; 5422 } 5423 5424 /// Checks if a the given expression evaluates to null. 5425 /// 5426 /// Returns true if the value evaluates to null. 5427 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 5428 // If the expression has non-null type, it doesn't evaluate to null. 5429 if (auto nullability 5430 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 5431 if (*nullability == NullabilityKind::NonNull) 5432 return false; 5433 } 5434 5435 // As a special case, transparent unions initialized with zero are 5436 // considered null for the purposes of the nonnull attribute. 5437 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 5438 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 5439 if (const CompoundLiteralExpr *CLE = 5440 dyn_cast<CompoundLiteralExpr>(Expr)) 5441 if (const InitListExpr *ILE = 5442 dyn_cast<InitListExpr>(CLE->getInitializer())) 5443 Expr = ILE->getInit(0); 5444 } 5445 5446 bool Result; 5447 return (!Expr->isValueDependent() && 5448 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 5449 !Result); 5450 } 5451 5452 static void CheckNonNullArgument(Sema &S, 5453 const Expr *ArgExpr, 5454 SourceLocation CallSiteLoc) { 5455 if (CheckNonNullExpr(S, ArgExpr)) 5456 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 5457 S.PDiag(diag::warn_null_arg) 5458 << ArgExpr->getSourceRange()); 5459 } 5460 5461 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 5462 FormatStringInfo FSI; 5463 if ((GetFormatStringType(Format) == FST_NSString) && 5464 getFormatStringInfo(Format, false, &FSI)) { 5465 Idx = FSI.FormatIdx; 5466 return true; 5467 } 5468 return false; 5469 } 5470 5471 /// Diagnose use of %s directive in an NSString which is being passed 5472 /// as formatting string to formatting method. 5473 static void 5474 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 5475 const NamedDecl *FDecl, 5476 Expr **Args, 5477 unsigned NumArgs) { 5478 unsigned Idx = 0; 5479 bool Format = false; 5480 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 5481 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 5482 Idx = 2; 5483 Format = true; 5484 } 5485 else 5486 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5487 if (S.GetFormatNSStringIdx(I, Idx)) { 5488 Format = true; 5489 break; 5490 } 5491 } 5492 if (!Format || NumArgs <= Idx) 5493 return; 5494 const Expr *FormatExpr = Args[Idx]; 5495 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 5496 FormatExpr = CSCE->getSubExpr(); 5497 const StringLiteral *FormatString; 5498 if (const ObjCStringLiteral *OSL = 5499 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 5500 FormatString = OSL->getString(); 5501 else 5502 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 5503 if (!FormatString) 5504 return; 5505 if (S.FormatStringHasSArg(FormatString)) { 5506 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 5507 << "%s" << 1 << 1; 5508 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 5509 << FDecl->getDeclName(); 5510 } 5511 } 5512 5513 /// Determine whether the given type has a non-null nullability annotation. 5514 static bool isNonNullType(ASTContext &ctx, QualType type) { 5515 if (auto nullability = type->getNullability(ctx)) 5516 return *nullability == NullabilityKind::NonNull; 5517 5518 return false; 5519 } 5520 5521 static void CheckNonNullArguments(Sema &S, 5522 const NamedDecl *FDecl, 5523 const FunctionProtoType *Proto, 5524 ArrayRef<const Expr *> Args, 5525 SourceLocation CallSiteLoc) { 5526 assert((FDecl || Proto) && "Need a function declaration or prototype"); 5527 5528 // Already checked by by constant evaluator. 5529 if (S.isConstantEvaluated()) 5530 return; 5531 // Check the attributes attached to the method/function itself. 5532 llvm::SmallBitVector NonNullArgs; 5533 if (FDecl) { 5534 // Handle the nonnull attribute on the function/method declaration itself. 5535 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 5536 if (!NonNull->args_size()) { 5537 // Easy case: all pointer arguments are nonnull. 5538 for (const auto *Arg : Args) 5539 if (S.isValidPointerAttrType(Arg->getType())) 5540 CheckNonNullArgument(S, Arg, CallSiteLoc); 5541 return; 5542 } 5543 5544 for (const ParamIdx &Idx : NonNull->args()) { 5545 unsigned IdxAST = Idx.getASTIndex(); 5546 if (IdxAST >= Args.size()) 5547 continue; 5548 if (NonNullArgs.empty()) 5549 NonNullArgs.resize(Args.size()); 5550 NonNullArgs.set(IdxAST); 5551 } 5552 } 5553 } 5554 5555 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 5556 // Handle the nonnull attribute on the parameters of the 5557 // function/method. 5558 ArrayRef<ParmVarDecl*> parms; 5559 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 5560 parms = FD->parameters(); 5561 else 5562 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 5563 5564 unsigned ParamIndex = 0; 5565 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 5566 I != E; ++I, ++ParamIndex) { 5567 const ParmVarDecl *PVD = *I; 5568 if (PVD->hasAttr<NonNullAttr>() || 5569 isNonNullType(S.Context, PVD->getType())) { 5570 if (NonNullArgs.empty()) 5571 NonNullArgs.resize(Args.size()); 5572 5573 NonNullArgs.set(ParamIndex); 5574 } 5575 } 5576 } else { 5577 // If we have a non-function, non-method declaration but no 5578 // function prototype, try to dig out the function prototype. 5579 if (!Proto) { 5580 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 5581 QualType type = VD->getType().getNonReferenceType(); 5582 if (auto pointerType = type->getAs<PointerType>()) 5583 type = pointerType->getPointeeType(); 5584 else if (auto blockType = type->getAs<BlockPointerType>()) 5585 type = blockType->getPointeeType(); 5586 // FIXME: data member pointers? 5587 5588 // Dig out the function prototype, if there is one. 5589 Proto = type->getAs<FunctionProtoType>(); 5590 } 5591 } 5592 5593 // Fill in non-null argument information from the nullability 5594 // information on the parameter types (if we have them). 5595 if (Proto) { 5596 unsigned Index = 0; 5597 for (auto paramType : Proto->getParamTypes()) { 5598 if (isNonNullType(S.Context, paramType)) { 5599 if (NonNullArgs.empty()) 5600 NonNullArgs.resize(Args.size()); 5601 5602 NonNullArgs.set(Index); 5603 } 5604 5605 ++Index; 5606 } 5607 } 5608 } 5609 5610 // Check for non-null arguments. 5611 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 5612 ArgIndex != ArgIndexEnd; ++ArgIndex) { 5613 if (NonNullArgs[ArgIndex]) 5614 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 5615 } 5616 } 5617 5618 /// Warn if a pointer or reference argument passed to a function points to an 5619 /// object that is less aligned than the parameter. This can happen when 5620 /// creating a typedef with a lower alignment than the original type and then 5621 /// calling functions defined in terms of the original type. 5622 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 5623 StringRef ParamName, QualType ArgTy, 5624 QualType ParamTy) { 5625 5626 // If a function accepts a pointer or reference type 5627 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 5628 return; 5629 5630 // If the parameter is a pointer type, get the pointee type for the 5631 // argument too. If the parameter is a reference type, don't try to get 5632 // the pointee type for the argument. 5633 if (ParamTy->isPointerType()) 5634 ArgTy = ArgTy->getPointeeType(); 5635 5636 // Remove reference or pointer 5637 ParamTy = ParamTy->getPointeeType(); 5638 5639 // Find expected alignment, and the actual alignment of the passed object. 5640 // getTypeAlignInChars requires complete types 5641 if (ArgTy.isNull() || ParamTy->isIncompleteType() || 5642 ArgTy->isIncompleteType() || ParamTy->isUndeducedType() || 5643 ArgTy->isUndeducedType()) 5644 return; 5645 5646 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 5647 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 5648 5649 // If the argument is less aligned than the parameter, there is a 5650 // potential alignment issue. 5651 if (ArgAlign < ParamAlign) 5652 Diag(Loc, diag::warn_param_mismatched_alignment) 5653 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 5654 << ParamName << (FDecl != nullptr) << FDecl; 5655 } 5656 5657 /// Handles the checks for format strings, non-POD arguments to vararg 5658 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 5659 /// attributes. 5660 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 5661 const Expr *ThisArg, ArrayRef<const Expr *> Args, 5662 bool IsMemberFunction, SourceLocation Loc, 5663 SourceRange Range, VariadicCallType CallType) { 5664 // FIXME: We should check as much as we can in the template definition. 5665 if (CurContext->isDependentContext()) 5666 return; 5667 5668 // Printf and scanf checking. 5669 llvm::SmallBitVector CheckedVarArgs; 5670 if (FDecl) { 5671 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5672 // Only create vector if there are format attributes. 5673 CheckedVarArgs.resize(Args.size()); 5674 5675 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 5676 CheckedVarArgs); 5677 } 5678 } 5679 5680 // Refuse POD arguments that weren't caught by the format string 5681 // checks above. 5682 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 5683 if (CallType != VariadicDoesNotApply && 5684 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 5685 unsigned NumParams = Proto ? Proto->getNumParams() 5686 : FDecl && isa<FunctionDecl>(FDecl) 5687 ? cast<FunctionDecl>(FDecl)->getNumParams() 5688 : FDecl && isa<ObjCMethodDecl>(FDecl) 5689 ? cast<ObjCMethodDecl>(FDecl)->param_size() 5690 : 0; 5691 5692 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 5693 // Args[ArgIdx] can be null in malformed code. 5694 if (const Expr *Arg = Args[ArgIdx]) { 5695 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 5696 checkVariadicArgument(Arg, CallType); 5697 } 5698 } 5699 } 5700 5701 if (FDecl || Proto) { 5702 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 5703 5704 // Type safety checking. 5705 if (FDecl) { 5706 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 5707 CheckArgumentWithTypeTag(I, Args, Loc); 5708 } 5709 } 5710 5711 // Check that passed arguments match the alignment of original arguments. 5712 // Try to get the missing prototype from the declaration. 5713 if (!Proto && FDecl) { 5714 const auto *FT = FDecl->getFunctionType(); 5715 if (isa_and_nonnull<FunctionProtoType>(FT)) 5716 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 5717 } 5718 if (Proto) { 5719 // For variadic functions, we may have more args than parameters. 5720 // For some K&R functions, we may have less args than parameters. 5721 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 5722 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 5723 // Args[ArgIdx] can be null in malformed code. 5724 if (const Expr *Arg = Args[ArgIdx]) { 5725 if (Arg->containsErrors()) 5726 continue; 5727 5728 QualType ParamTy = Proto->getParamType(ArgIdx); 5729 QualType ArgTy = Arg->getType(); 5730 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 5731 ArgTy, ParamTy); 5732 } 5733 } 5734 } 5735 5736 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 5737 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 5738 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 5739 if (!Arg->isValueDependent()) { 5740 Expr::EvalResult Align; 5741 if (Arg->EvaluateAsInt(Align, Context)) { 5742 const llvm::APSInt &I = Align.Val.getInt(); 5743 if (!I.isPowerOf2()) 5744 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 5745 << Arg->getSourceRange(); 5746 5747 if (I > Sema::MaximumAlignment) 5748 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 5749 << Arg->getSourceRange() << Sema::MaximumAlignment; 5750 } 5751 } 5752 } 5753 5754 if (FD) 5755 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 5756 } 5757 5758 /// CheckConstructorCall - Check a constructor call for correctness and safety 5759 /// properties not enforced by the C type system. 5760 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 5761 ArrayRef<const Expr *> Args, 5762 const FunctionProtoType *Proto, 5763 SourceLocation Loc) { 5764 VariadicCallType CallType = 5765 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 5766 5767 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 5768 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType), 5769 Context.getPointerType(Ctor->getThisObjectType())); 5770 5771 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 5772 Loc, SourceRange(), CallType); 5773 } 5774 5775 /// CheckFunctionCall - Check a direct function call for various correctness 5776 /// and safety properties not strictly enforced by the C type system. 5777 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 5778 const FunctionProtoType *Proto) { 5779 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 5780 isa<CXXMethodDecl>(FDecl); 5781 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 5782 IsMemberOperatorCall; 5783 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 5784 TheCall->getCallee()); 5785 Expr** Args = TheCall->getArgs(); 5786 unsigned NumArgs = TheCall->getNumArgs(); 5787 5788 Expr *ImplicitThis = nullptr; 5789 if (IsMemberOperatorCall) { 5790 // If this is a call to a member operator, hide the first argument 5791 // from checkCall. 5792 // FIXME: Our choice of AST representation here is less than ideal. 5793 ImplicitThis = Args[0]; 5794 ++Args; 5795 --NumArgs; 5796 } else if (IsMemberFunction) 5797 ImplicitThis = 5798 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 5799 5800 if (ImplicitThis) { 5801 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 5802 // used. 5803 QualType ThisType = ImplicitThis->getType(); 5804 if (!ThisType->isPointerType()) { 5805 assert(!ThisType->isReferenceType()); 5806 ThisType = Context.getPointerType(ThisType); 5807 } 5808 5809 QualType ThisTypeFromDecl = 5810 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType()); 5811 5812 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 5813 ThisTypeFromDecl); 5814 } 5815 5816 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 5817 IsMemberFunction, TheCall->getRParenLoc(), 5818 TheCall->getCallee()->getSourceRange(), CallType); 5819 5820 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 5821 // None of the checks below are needed for functions that don't have 5822 // simple names (e.g., C++ conversion functions). 5823 if (!FnInfo) 5824 return false; 5825 5826 // Enforce TCB except for builtin calls, which are always allowed. 5827 if (FDecl->getBuiltinID() == 0) 5828 CheckTCBEnforcement(TheCall->getExprLoc(), FDecl); 5829 5830 CheckAbsoluteValueFunction(TheCall, FDecl); 5831 CheckMaxUnsignedZero(TheCall, FDecl); 5832 5833 if (getLangOpts().ObjC) 5834 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 5835 5836 unsigned CMId = FDecl->getMemoryFunctionKind(); 5837 5838 // Handle memory setting and copying functions. 5839 switch (CMId) { 5840 case 0: 5841 return false; 5842 case Builtin::BIstrlcpy: // fallthrough 5843 case Builtin::BIstrlcat: 5844 CheckStrlcpycatArguments(TheCall, FnInfo); 5845 break; 5846 case Builtin::BIstrncat: 5847 CheckStrncatArguments(TheCall, FnInfo); 5848 break; 5849 case Builtin::BIfree: 5850 CheckFreeArguments(TheCall); 5851 break; 5852 default: 5853 CheckMemaccessArguments(TheCall, CMId, FnInfo); 5854 } 5855 5856 return false; 5857 } 5858 5859 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 5860 ArrayRef<const Expr *> Args) { 5861 VariadicCallType CallType = 5862 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 5863 5864 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 5865 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 5866 CallType); 5867 5868 CheckTCBEnforcement(lbrac, Method); 5869 5870 return false; 5871 } 5872 5873 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 5874 const FunctionProtoType *Proto) { 5875 QualType Ty; 5876 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 5877 Ty = V->getType().getNonReferenceType(); 5878 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 5879 Ty = F->getType().getNonReferenceType(); 5880 else 5881 return false; 5882 5883 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 5884 !Ty->isFunctionProtoType()) 5885 return false; 5886 5887 VariadicCallType CallType; 5888 if (!Proto || !Proto->isVariadic()) { 5889 CallType = VariadicDoesNotApply; 5890 } else if (Ty->isBlockPointerType()) { 5891 CallType = VariadicBlock; 5892 } else { // Ty->isFunctionPointerType() 5893 CallType = VariadicFunction; 5894 } 5895 5896 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 5897 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5898 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5899 TheCall->getCallee()->getSourceRange(), CallType); 5900 5901 return false; 5902 } 5903 5904 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 5905 /// such as function pointers returned from functions. 5906 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 5907 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 5908 TheCall->getCallee()); 5909 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 5910 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5911 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5912 TheCall->getCallee()->getSourceRange(), CallType); 5913 5914 return false; 5915 } 5916 5917 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 5918 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 5919 return false; 5920 5921 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 5922 switch (Op) { 5923 case AtomicExpr::AO__c11_atomic_init: 5924 case AtomicExpr::AO__opencl_atomic_init: 5925 llvm_unreachable("There is no ordering argument for an init"); 5926 5927 case AtomicExpr::AO__c11_atomic_load: 5928 case AtomicExpr::AO__opencl_atomic_load: 5929 case AtomicExpr::AO__hip_atomic_load: 5930 case AtomicExpr::AO__atomic_load_n: 5931 case AtomicExpr::AO__atomic_load: 5932 return OrderingCABI != llvm::AtomicOrderingCABI::release && 5933 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5934 5935 case AtomicExpr::AO__c11_atomic_store: 5936 case AtomicExpr::AO__opencl_atomic_store: 5937 case AtomicExpr::AO__hip_atomic_store: 5938 case AtomicExpr::AO__atomic_store: 5939 case AtomicExpr::AO__atomic_store_n: 5940 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 5941 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 5942 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5943 5944 default: 5945 return true; 5946 } 5947 } 5948 5949 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 5950 AtomicExpr::AtomicOp Op) { 5951 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 5952 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5953 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 5954 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 5955 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 5956 Op); 5957 } 5958 5959 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 5960 SourceLocation RParenLoc, MultiExprArg Args, 5961 AtomicExpr::AtomicOp Op, 5962 AtomicArgumentOrder ArgOrder) { 5963 // All the non-OpenCL operations take one of the following forms. 5964 // The OpenCL operations take the __c11 forms with one extra argument for 5965 // synchronization scope. 5966 enum { 5967 // C __c11_atomic_init(A *, C) 5968 Init, 5969 5970 // C __c11_atomic_load(A *, int) 5971 Load, 5972 5973 // void __atomic_load(A *, CP, int) 5974 LoadCopy, 5975 5976 // void __atomic_store(A *, CP, int) 5977 Copy, 5978 5979 // C __c11_atomic_add(A *, M, int) 5980 Arithmetic, 5981 5982 // C __atomic_exchange_n(A *, CP, int) 5983 Xchg, 5984 5985 // void __atomic_exchange(A *, C *, CP, int) 5986 GNUXchg, 5987 5988 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 5989 C11CmpXchg, 5990 5991 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 5992 GNUCmpXchg 5993 } Form = Init; 5994 5995 const unsigned NumForm = GNUCmpXchg + 1; 5996 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 5997 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 5998 // where: 5999 // C is an appropriate type, 6000 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 6001 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 6002 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 6003 // the int parameters are for orderings. 6004 6005 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 6006 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 6007 "need to update code for modified forms"); 6008 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 6009 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 6010 AtomicExpr::AO__atomic_load, 6011 "need to update code for modified C11 atomics"); 6012 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 6013 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 6014 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load && 6015 Op <= AtomicExpr::AO__hip_atomic_fetch_max; 6016 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 6017 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 6018 IsOpenCL; 6019 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 6020 Op == AtomicExpr::AO__atomic_store_n || 6021 Op == AtomicExpr::AO__atomic_exchange_n || 6022 Op == AtomicExpr::AO__atomic_compare_exchange_n; 6023 bool IsAddSub = false; 6024 6025 switch (Op) { 6026 case AtomicExpr::AO__c11_atomic_init: 6027 case AtomicExpr::AO__opencl_atomic_init: 6028 Form = Init; 6029 break; 6030 6031 case AtomicExpr::AO__c11_atomic_load: 6032 case AtomicExpr::AO__opencl_atomic_load: 6033 case AtomicExpr::AO__hip_atomic_load: 6034 case AtomicExpr::AO__atomic_load_n: 6035 Form = Load; 6036 break; 6037 6038 case AtomicExpr::AO__atomic_load: 6039 Form = LoadCopy; 6040 break; 6041 6042 case AtomicExpr::AO__c11_atomic_store: 6043 case AtomicExpr::AO__opencl_atomic_store: 6044 case AtomicExpr::AO__hip_atomic_store: 6045 case AtomicExpr::AO__atomic_store: 6046 case AtomicExpr::AO__atomic_store_n: 6047 Form = Copy; 6048 break; 6049 case AtomicExpr::AO__hip_atomic_fetch_add: 6050 case AtomicExpr::AO__hip_atomic_fetch_min: 6051 case AtomicExpr::AO__hip_atomic_fetch_max: 6052 case AtomicExpr::AO__c11_atomic_fetch_add: 6053 case AtomicExpr::AO__c11_atomic_fetch_sub: 6054 case AtomicExpr::AO__opencl_atomic_fetch_add: 6055 case AtomicExpr::AO__opencl_atomic_fetch_sub: 6056 case AtomicExpr::AO__atomic_fetch_add: 6057 case AtomicExpr::AO__atomic_fetch_sub: 6058 case AtomicExpr::AO__atomic_add_fetch: 6059 case AtomicExpr::AO__atomic_sub_fetch: 6060 IsAddSub = true; 6061 Form = Arithmetic; 6062 break; 6063 case AtomicExpr::AO__c11_atomic_fetch_and: 6064 case AtomicExpr::AO__c11_atomic_fetch_or: 6065 case AtomicExpr::AO__c11_atomic_fetch_xor: 6066 case AtomicExpr::AO__hip_atomic_fetch_and: 6067 case AtomicExpr::AO__hip_atomic_fetch_or: 6068 case AtomicExpr::AO__hip_atomic_fetch_xor: 6069 case AtomicExpr::AO__c11_atomic_fetch_nand: 6070 case AtomicExpr::AO__opencl_atomic_fetch_and: 6071 case AtomicExpr::AO__opencl_atomic_fetch_or: 6072 case AtomicExpr::AO__opencl_atomic_fetch_xor: 6073 case AtomicExpr::AO__atomic_fetch_and: 6074 case AtomicExpr::AO__atomic_fetch_or: 6075 case AtomicExpr::AO__atomic_fetch_xor: 6076 case AtomicExpr::AO__atomic_fetch_nand: 6077 case AtomicExpr::AO__atomic_and_fetch: 6078 case AtomicExpr::AO__atomic_or_fetch: 6079 case AtomicExpr::AO__atomic_xor_fetch: 6080 case AtomicExpr::AO__atomic_nand_fetch: 6081 Form = Arithmetic; 6082 break; 6083 case AtomicExpr::AO__c11_atomic_fetch_min: 6084 case AtomicExpr::AO__c11_atomic_fetch_max: 6085 case AtomicExpr::AO__opencl_atomic_fetch_min: 6086 case AtomicExpr::AO__opencl_atomic_fetch_max: 6087 case AtomicExpr::AO__atomic_min_fetch: 6088 case AtomicExpr::AO__atomic_max_fetch: 6089 case AtomicExpr::AO__atomic_fetch_min: 6090 case AtomicExpr::AO__atomic_fetch_max: 6091 Form = Arithmetic; 6092 break; 6093 6094 case AtomicExpr::AO__c11_atomic_exchange: 6095 case AtomicExpr::AO__hip_atomic_exchange: 6096 case AtomicExpr::AO__opencl_atomic_exchange: 6097 case AtomicExpr::AO__atomic_exchange_n: 6098 Form = Xchg; 6099 break; 6100 6101 case AtomicExpr::AO__atomic_exchange: 6102 Form = GNUXchg; 6103 break; 6104 6105 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 6106 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 6107 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 6108 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 6109 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 6110 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 6111 Form = C11CmpXchg; 6112 break; 6113 6114 case AtomicExpr::AO__atomic_compare_exchange: 6115 case AtomicExpr::AO__atomic_compare_exchange_n: 6116 Form = GNUCmpXchg; 6117 break; 6118 } 6119 6120 unsigned AdjustedNumArgs = NumArgs[Form]; 6121 if ((IsOpenCL || IsHIP) && Op != AtomicExpr::AO__opencl_atomic_init) 6122 ++AdjustedNumArgs; 6123 // Check we have the right number of arguments. 6124 if (Args.size() < AdjustedNumArgs) { 6125 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 6126 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 6127 << ExprRange; 6128 return ExprError(); 6129 } else if (Args.size() > AdjustedNumArgs) { 6130 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 6131 diag::err_typecheck_call_too_many_args) 6132 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 6133 << ExprRange; 6134 return ExprError(); 6135 } 6136 6137 // Inspect the first argument of the atomic operation. 6138 Expr *Ptr = Args[0]; 6139 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 6140 if (ConvertedPtr.isInvalid()) 6141 return ExprError(); 6142 6143 Ptr = ConvertedPtr.get(); 6144 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 6145 if (!pointerType) { 6146 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 6147 << Ptr->getType() << Ptr->getSourceRange(); 6148 return ExprError(); 6149 } 6150 6151 // For a __c11 builtin, this should be a pointer to an _Atomic type. 6152 QualType AtomTy = pointerType->getPointeeType(); // 'A' 6153 QualType ValType = AtomTy; // 'C' 6154 if (IsC11) { 6155 if (!AtomTy->isAtomicType()) { 6156 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 6157 << Ptr->getType() << Ptr->getSourceRange(); 6158 return ExprError(); 6159 } 6160 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 6161 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 6162 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 6163 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 6164 << Ptr->getSourceRange(); 6165 return ExprError(); 6166 } 6167 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 6168 } else if (Form != Load && Form != LoadCopy) { 6169 if (ValType.isConstQualified()) { 6170 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 6171 << Ptr->getType() << Ptr->getSourceRange(); 6172 return ExprError(); 6173 } 6174 } 6175 6176 // For an arithmetic operation, the implied arithmetic must be well-formed. 6177 if (Form == Arithmetic) { 6178 // GCC does not enforce these rules for GNU atomics, but we do to help catch 6179 // trivial type errors. 6180 auto IsAllowedValueType = [&](QualType ValType) { 6181 if (ValType->isIntegerType()) 6182 return true; 6183 if (ValType->isPointerType()) 6184 return true; 6185 if (!ValType->isFloatingType()) 6186 return false; 6187 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 6188 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 6189 &Context.getTargetInfo().getLongDoubleFormat() == 6190 &llvm::APFloat::x87DoubleExtended()) 6191 return false; 6192 return true; 6193 }; 6194 if (IsAddSub && !IsAllowedValueType(ValType)) { 6195 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp) 6196 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6197 return ExprError(); 6198 } 6199 if (!IsAddSub && !ValType->isIntegerType()) { 6200 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 6201 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6202 return ExprError(); 6203 } 6204 if (IsC11 && ValType->isPointerType() && 6205 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 6206 diag::err_incomplete_type)) { 6207 return ExprError(); 6208 } 6209 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 6210 // For __atomic_*_n operations, the value type must be a scalar integral or 6211 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 6212 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 6213 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6214 return ExprError(); 6215 } 6216 6217 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 6218 !AtomTy->isScalarType()) { 6219 // For GNU atomics, require a trivially-copyable type. This is not part of 6220 // the GNU atomics specification but we enforce it for consistency with 6221 // other atomics which generally all require a trivially-copyable type. This 6222 // is because atomics just copy bits. 6223 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 6224 << Ptr->getType() << Ptr->getSourceRange(); 6225 return ExprError(); 6226 } 6227 6228 switch (ValType.getObjCLifetime()) { 6229 case Qualifiers::OCL_None: 6230 case Qualifiers::OCL_ExplicitNone: 6231 // okay 6232 break; 6233 6234 case Qualifiers::OCL_Weak: 6235 case Qualifiers::OCL_Strong: 6236 case Qualifiers::OCL_Autoreleasing: 6237 // FIXME: Can this happen? By this point, ValType should be known 6238 // to be trivially copyable. 6239 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 6240 << ValType << Ptr->getSourceRange(); 6241 return ExprError(); 6242 } 6243 6244 // All atomic operations have an overload which takes a pointer to a volatile 6245 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 6246 // into the result or the other operands. Similarly atomic_load takes a 6247 // pointer to a const 'A'. 6248 ValType.removeLocalVolatile(); 6249 ValType.removeLocalConst(); 6250 QualType ResultType = ValType; 6251 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 6252 Form == Init) 6253 ResultType = Context.VoidTy; 6254 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 6255 ResultType = Context.BoolTy; 6256 6257 // The type of a parameter passed 'by value'. In the GNU atomics, such 6258 // arguments are actually passed as pointers. 6259 QualType ByValType = ValType; // 'CP' 6260 bool IsPassedByAddress = false; 6261 if (!IsC11 && !IsHIP && !IsN) { 6262 ByValType = Ptr->getType(); 6263 IsPassedByAddress = true; 6264 } 6265 6266 SmallVector<Expr *, 5> APIOrderedArgs; 6267 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 6268 APIOrderedArgs.push_back(Args[0]); 6269 switch (Form) { 6270 case Init: 6271 case Load: 6272 APIOrderedArgs.push_back(Args[1]); // Val1/Order 6273 break; 6274 case LoadCopy: 6275 case Copy: 6276 case Arithmetic: 6277 case Xchg: 6278 APIOrderedArgs.push_back(Args[2]); // Val1 6279 APIOrderedArgs.push_back(Args[1]); // Order 6280 break; 6281 case GNUXchg: 6282 APIOrderedArgs.push_back(Args[2]); // Val1 6283 APIOrderedArgs.push_back(Args[3]); // Val2 6284 APIOrderedArgs.push_back(Args[1]); // Order 6285 break; 6286 case C11CmpXchg: 6287 APIOrderedArgs.push_back(Args[2]); // Val1 6288 APIOrderedArgs.push_back(Args[4]); // Val2 6289 APIOrderedArgs.push_back(Args[1]); // Order 6290 APIOrderedArgs.push_back(Args[3]); // OrderFail 6291 break; 6292 case GNUCmpXchg: 6293 APIOrderedArgs.push_back(Args[2]); // Val1 6294 APIOrderedArgs.push_back(Args[4]); // Val2 6295 APIOrderedArgs.push_back(Args[5]); // Weak 6296 APIOrderedArgs.push_back(Args[1]); // Order 6297 APIOrderedArgs.push_back(Args[3]); // OrderFail 6298 break; 6299 } 6300 } else 6301 APIOrderedArgs.append(Args.begin(), Args.end()); 6302 6303 // The first argument's non-CV pointer type is used to deduce the type of 6304 // subsequent arguments, except for: 6305 // - weak flag (always converted to bool) 6306 // - memory order (always converted to int) 6307 // - scope (always converted to int) 6308 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 6309 QualType Ty; 6310 if (i < NumVals[Form] + 1) { 6311 switch (i) { 6312 case 0: 6313 // The first argument is always a pointer. It has a fixed type. 6314 // It is always dereferenced, a nullptr is undefined. 6315 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 6316 // Nothing else to do: we already know all we want about this pointer. 6317 continue; 6318 case 1: 6319 // The second argument is the non-atomic operand. For arithmetic, this 6320 // is always passed by value, and for a compare_exchange it is always 6321 // passed by address. For the rest, GNU uses by-address and C11 uses 6322 // by-value. 6323 assert(Form != Load); 6324 if (Form == Arithmetic && ValType->isPointerType()) 6325 Ty = Context.getPointerDiffType(); 6326 else if (Form == Init || Form == Arithmetic) 6327 Ty = ValType; 6328 else if (Form == Copy || Form == Xchg) { 6329 if (IsPassedByAddress) { 6330 // The value pointer is always dereferenced, a nullptr is undefined. 6331 CheckNonNullArgument(*this, APIOrderedArgs[i], 6332 ExprRange.getBegin()); 6333 } 6334 Ty = ByValType; 6335 } else { 6336 Expr *ValArg = APIOrderedArgs[i]; 6337 // The value pointer is always dereferenced, a nullptr is undefined. 6338 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 6339 LangAS AS = LangAS::Default; 6340 // Keep address space of non-atomic pointer type. 6341 if (const PointerType *PtrTy = 6342 ValArg->getType()->getAs<PointerType>()) { 6343 AS = PtrTy->getPointeeType().getAddressSpace(); 6344 } 6345 Ty = Context.getPointerType( 6346 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 6347 } 6348 break; 6349 case 2: 6350 // The third argument to compare_exchange / GNU exchange is the desired 6351 // value, either by-value (for the C11 and *_n variant) or as a pointer. 6352 if (IsPassedByAddress) 6353 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 6354 Ty = ByValType; 6355 break; 6356 case 3: 6357 // The fourth argument to GNU compare_exchange is a 'weak' flag. 6358 Ty = Context.BoolTy; 6359 break; 6360 } 6361 } else { 6362 // The order(s) and scope are always converted to int. 6363 Ty = Context.IntTy; 6364 } 6365 6366 InitializedEntity Entity = 6367 InitializedEntity::InitializeParameter(Context, Ty, false); 6368 ExprResult Arg = APIOrderedArgs[i]; 6369 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6370 if (Arg.isInvalid()) 6371 return true; 6372 APIOrderedArgs[i] = Arg.get(); 6373 } 6374 6375 // Permute the arguments into a 'consistent' order. 6376 SmallVector<Expr*, 5> SubExprs; 6377 SubExprs.push_back(Ptr); 6378 switch (Form) { 6379 case Init: 6380 // Note, AtomicExpr::getVal1() has a special case for this atomic. 6381 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6382 break; 6383 case Load: 6384 SubExprs.push_back(APIOrderedArgs[1]); // Order 6385 break; 6386 case LoadCopy: 6387 case Copy: 6388 case Arithmetic: 6389 case Xchg: 6390 SubExprs.push_back(APIOrderedArgs[2]); // Order 6391 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6392 break; 6393 case GNUXchg: 6394 // Note, AtomicExpr::getVal2() has a special case for this atomic. 6395 SubExprs.push_back(APIOrderedArgs[3]); // Order 6396 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6397 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6398 break; 6399 case C11CmpXchg: 6400 SubExprs.push_back(APIOrderedArgs[3]); // Order 6401 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6402 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 6403 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6404 break; 6405 case GNUCmpXchg: 6406 SubExprs.push_back(APIOrderedArgs[4]); // Order 6407 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6408 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 6409 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6410 SubExprs.push_back(APIOrderedArgs[3]); // Weak 6411 break; 6412 } 6413 6414 if (SubExprs.size() >= 2 && Form != Init) { 6415 if (Optional<llvm::APSInt> Result = 6416 SubExprs[1]->getIntegerConstantExpr(Context)) 6417 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 6418 Diag(SubExprs[1]->getBeginLoc(), 6419 diag::warn_atomic_op_has_invalid_memory_order) 6420 << SubExprs[1]->getSourceRange(); 6421 } 6422 6423 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 6424 auto *Scope = Args[Args.size() - 1]; 6425 if (Optional<llvm::APSInt> Result = 6426 Scope->getIntegerConstantExpr(Context)) { 6427 if (!ScopeModel->isValid(Result->getZExtValue())) 6428 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 6429 << Scope->getSourceRange(); 6430 } 6431 SubExprs.push_back(Scope); 6432 } 6433 6434 AtomicExpr *AE = new (Context) 6435 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 6436 6437 if ((Op == AtomicExpr::AO__c11_atomic_load || 6438 Op == AtomicExpr::AO__c11_atomic_store || 6439 Op == AtomicExpr::AO__opencl_atomic_load || 6440 Op == AtomicExpr::AO__hip_atomic_load || 6441 Op == AtomicExpr::AO__opencl_atomic_store || 6442 Op == AtomicExpr::AO__hip_atomic_store) && 6443 Context.AtomicUsesUnsupportedLibcall(AE)) 6444 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 6445 << ((Op == AtomicExpr::AO__c11_atomic_load || 6446 Op == AtomicExpr::AO__opencl_atomic_load || 6447 Op == AtomicExpr::AO__hip_atomic_load) 6448 ? 0 6449 : 1); 6450 6451 if (ValType->isBitIntType()) { 6452 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit); 6453 return ExprError(); 6454 } 6455 6456 return AE; 6457 } 6458 6459 /// checkBuiltinArgument - Given a call to a builtin function, perform 6460 /// normal type-checking on the given argument, updating the call in 6461 /// place. This is useful when a builtin function requires custom 6462 /// type-checking for some of its arguments but not necessarily all of 6463 /// them. 6464 /// 6465 /// Returns true on error. 6466 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 6467 FunctionDecl *Fn = E->getDirectCallee(); 6468 assert(Fn && "builtin call without direct callee!"); 6469 6470 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 6471 InitializedEntity Entity = 6472 InitializedEntity::InitializeParameter(S.Context, Param); 6473 6474 ExprResult Arg = E->getArg(ArgIndex); 6475 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 6476 if (Arg.isInvalid()) 6477 return true; 6478 6479 E->setArg(ArgIndex, Arg.get()); 6480 return false; 6481 } 6482 6483 /// We have a call to a function like __sync_fetch_and_add, which is an 6484 /// overloaded function based on the pointer type of its first argument. 6485 /// The main BuildCallExpr routines have already promoted the types of 6486 /// arguments because all of these calls are prototyped as void(...). 6487 /// 6488 /// This function goes through and does final semantic checking for these 6489 /// builtins, as well as generating any warnings. 6490 ExprResult 6491 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 6492 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 6493 Expr *Callee = TheCall->getCallee(); 6494 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 6495 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6496 6497 // Ensure that we have at least one argument to do type inference from. 6498 if (TheCall->getNumArgs() < 1) { 6499 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6500 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 6501 return ExprError(); 6502 } 6503 6504 // Inspect the first argument of the atomic builtin. This should always be 6505 // a pointer type, whose element is an integral scalar or pointer type. 6506 // Because it is a pointer type, we don't have to worry about any implicit 6507 // casts here. 6508 // FIXME: We don't allow floating point scalars as input. 6509 Expr *FirstArg = TheCall->getArg(0); 6510 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 6511 if (FirstArgResult.isInvalid()) 6512 return ExprError(); 6513 FirstArg = FirstArgResult.get(); 6514 TheCall->setArg(0, FirstArg); 6515 6516 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 6517 if (!pointerType) { 6518 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 6519 << FirstArg->getType() << FirstArg->getSourceRange(); 6520 return ExprError(); 6521 } 6522 6523 QualType ValType = pointerType->getPointeeType(); 6524 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6525 !ValType->isBlockPointerType()) { 6526 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 6527 << FirstArg->getType() << FirstArg->getSourceRange(); 6528 return ExprError(); 6529 } 6530 6531 if (ValType.isConstQualified()) { 6532 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 6533 << FirstArg->getType() << FirstArg->getSourceRange(); 6534 return ExprError(); 6535 } 6536 6537 switch (ValType.getObjCLifetime()) { 6538 case Qualifiers::OCL_None: 6539 case Qualifiers::OCL_ExplicitNone: 6540 // okay 6541 break; 6542 6543 case Qualifiers::OCL_Weak: 6544 case Qualifiers::OCL_Strong: 6545 case Qualifiers::OCL_Autoreleasing: 6546 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 6547 << ValType << FirstArg->getSourceRange(); 6548 return ExprError(); 6549 } 6550 6551 // Strip any qualifiers off ValType. 6552 ValType = ValType.getUnqualifiedType(); 6553 6554 // The majority of builtins return a value, but a few have special return 6555 // types, so allow them to override appropriately below. 6556 QualType ResultType = ValType; 6557 6558 // We need to figure out which concrete builtin this maps onto. For example, 6559 // __sync_fetch_and_add with a 2 byte object turns into 6560 // __sync_fetch_and_add_2. 6561 #define BUILTIN_ROW(x) \ 6562 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 6563 Builtin::BI##x##_8, Builtin::BI##x##_16 } 6564 6565 static const unsigned BuiltinIndices[][5] = { 6566 BUILTIN_ROW(__sync_fetch_and_add), 6567 BUILTIN_ROW(__sync_fetch_and_sub), 6568 BUILTIN_ROW(__sync_fetch_and_or), 6569 BUILTIN_ROW(__sync_fetch_and_and), 6570 BUILTIN_ROW(__sync_fetch_and_xor), 6571 BUILTIN_ROW(__sync_fetch_and_nand), 6572 6573 BUILTIN_ROW(__sync_add_and_fetch), 6574 BUILTIN_ROW(__sync_sub_and_fetch), 6575 BUILTIN_ROW(__sync_and_and_fetch), 6576 BUILTIN_ROW(__sync_or_and_fetch), 6577 BUILTIN_ROW(__sync_xor_and_fetch), 6578 BUILTIN_ROW(__sync_nand_and_fetch), 6579 6580 BUILTIN_ROW(__sync_val_compare_and_swap), 6581 BUILTIN_ROW(__sync_bool_compare_and_swap), 6582 BUILTIN_ROW(__sync_lock_test_and_set), 6583 BUILTIN_ROW(__sync_lock_release), 6584 BUILTIN_ROW(__sync_swap) 6585 }; 6586 #undef BUILTIN_ROW 6587 6588 // Determine the index of the size. 6589 unsigned SizeIndex; 6590 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 6591 case 1: SizeIndex = 0; break; 6592 case 2: SizeIndex = 1; break; 6593 case 4: SizeIndex = 2; break; 6594 case 8: SizeIndex = 3; break; 6595 case 16: SizeIndex = 4; break; 6596 default: 6597 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 6598 << FirstArg->getType() << FirstArg->getSourceRange(); 6599 return ExprError(); 6600 } 6601 6602 // Each of these builtins has one pointer argument, followed by some number of 6603 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 6604 // that we ignore. Find out which row of BuiltinIndices to read from as well 6605 // as the number of fixed args. 6606 unsigned BuiltinID = FDecl->getBuiltinID(); 6607 unsigned BuiltinIndex, NumFixed = 1; 6608 bool WarnAboutSemanticsChange = false; 6609 switch (BuiltinID) { 6610 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 6611 case Builtin::BI__sync_fetch_and_add: 6612 case Builtin::BI__sync_fetch_and_add_1: 6613 case Builtin::BI__sync_fetch_and_add_2: 6614 case Builtin::BI__sync_fetch_and_add_4: 6615 case Builtin::BI__sync_fetch_and_add_8: 6616 case Builtin::BI__sync_fetch_and_add_16: 6617 BuiltinIndex = 0; 6618 break; 6619 6620 case Builtin::BI__sync_fetch_and_sub: 6621 case Builtin::BI__sync_fetch_and_sub_1: 6622 case Builtin::BI__sync_fetch_and_sub_2: 6623 case Builtin::BI__sync_fetch_and_sub_4: 6624 case Builtin::BI__sync_fetch_and_sub_8: 6625 case Builtin::BI__sync_fetch_and_sub_16: 6626 BuiltinIndex = 1; 6627 break; 6628 6629 case Builtin::BI__sync_fetch_and_or: 6630 case Builtin::BI__sync_fetch_and_or_1: 6631 case Builtin::BI__sync_fetch_and_or_2: 6632 case Builtin::BI__sync_fetch_and_or_4: 6633 case Builtin::BI__sync_fetch_and_or_8: 6634 case Builtin::BI__sync_fetch_and_or_16: 6635 BuiltinIndex = 2; 6636 break; 6637 6638 case Builtin::BI__sync_fetch_and_and: 6639 case Builtin::BI__sync_fetch_and_and_1: 6640 case Builtin::BI__sync_fetch_and_and_2: 6641 case Builtin::BI__sync_fetch_and_and_4: 6642 case Builtin::BI__sync_fetch_and_and_8: 6643 case Builtin::BI__sync_fetch_and_and_16: 6644 BuiltinIndex = 3; 6645 break; 6646 6647 case Builtin::BI__sync_fetch_and_xor: 6648 case Builtin::BI__sync_fetch_and_xor_1: 6649 case Builtin::BI__sync_fetch_and_xor_2: 6650 case Builtin::BI__sync_fetch_and_xor_4: 6651 case Builtin::BI__sync_fetch_and_xor_8: 6652 case Builtin::BI__sync_fetch_and_xor_16: 6653 BuiltinIndex = 4; 6654 break; 6655 6656 case Builtin::BI__sync_fetch_and_nand: 6657 case Builtin::BI__sync_fetch_and_nand_1: 6658 case Builtin::BI__sync_fetch_and_nand_2: 6659 case Builtin::BI__sync_fetch_and_nand_4: 6660 case Builtin::BI__sync_fetch_and_nand_8: 6661 case Builtin::BI__sync_fetch_and_nand_16: 6662 BuiltinIndex = 5; 6663 WarnAboutSemanticsChange = true; 6664 break; 6665 6666 case Builtin::BI__sync_add_and_fetch: 6667 case Builtin::BI__sync_add_and_fetch_1: 6668 case Builtin::BI__sync_add_and_fetch_2: 6669 case Builtin::BI__sync_add_and_fetch_4: 6670 case Builtin::BI__sync_add_and_fetch_8: 6671 case Builtin::BI__sync_add_and_fetch_16: 6672 BuiltinIndex = 6; 6673 break; 6674 6675 case Builtin::BI__sync_sub_and_fetch: 6676 case Builtin::BI__sync_sub_and_fetch_1: 6677 case Builtin::BI__sync_sub_and_fetch_2: 6678 case Builtin::BI__sync_sub_and_fetch_4: 6679 case Builtin::BI__sync_sub_and_fetch_8: 6680 case Builtin::BI__sync_sub_and_fetch_16: 6681 BuiltinIndex = 7; 6682 break; 6683 6684 case Builtin::BI__sync_and_and_fetch: 6685 case Builtin::BI__sync_and_and_fetch_1: 6686 case Builtin::BI__sync_and_and_fetch_2: 6687 case Builtin::BI__sync_and_and_fetch_4: 6688 case Builtin::BI__sync_and_and_fetch_8: 6689 case Builtin::BI__sync_and_and_fetch_16: 6690 BuiltinIndex = 8; 6691 break; 6692 6693 case Builtin::BI__sync_or_and_fetch: 6694 case Builtin::BI__sync_or_and_fetch_1: 6695 case Builtin::BI__sync_or_and_fetch_2: 6696 case Builtin::BI__sync_or_and_fetch_4: 6697 case Builtin::BI__sync_or_and_fetch_8: 6698 case Builtin::BI__sync_or_and_fetch_16: 6699 BuiltinIndex = 9; 6700 break; 6701 6702 case Builtin::BI__sync_xor_and_fetch: 6703 case Builtin::BI__sync_xor_and_fetch_1: 6704 case Builtin::BI__sync_xor_and_fetch_2: 6705 case Builtin::BI__sync_xor_and_fetch_4: 6706 case Builtin::BI__sync_xor_and_fetch_8: 6707 case Builtin::BI__sync_xor_and_fetch_16: 6708 BuiltinIndex = 10; 6709 break; 6710 6711 case Builtin::BI__sync_nand_and_fetch: 6712 case Builtin::BI__sync_nand_and_fetch_1: 6713 case Builtin::BI__sync_nand_and_fetch_2: 6714 case Builtin::BI__sync_nand_and_fetch_4: 6715 case Builtin::BI__sync_nand_and_fetch_8: 6716 case Builtin::BI__sync_nand_and_fetch_16: 6717 BuiltinIndex = 11; 6718 WarnAboutSemanticsChange = true; 6719 break; 6720 6721 case Builtin::BI__sync_val_compare_and_swap: 6722 case Builtin::BI__sync_val_compare_and_swap_1: 6723 case Builtin::BI__sync_val_compare_and_swap_2: 6724 case Builtin::BI__sync_val_compare_and_swap_4: 6725 case Builtin::BI__sync_val_compare_and_swap_8: 6726 case Builtin::BI__sync_val_compare_and_swap_16: 6727 BuiltinIndex = 12; 6728 NumFixed = 2; 6729 break; 6730 6731 case Builtin::BI__sync_bool_compare_and_swap: 6732 case Builtin::BI__sync_bool_compare_and_swap_1: 6733 case Builtin::BI__sync_bool_compare_and_swap_2: 6734 case Builtin::BI__sync_bool_compare_and_swap_4: 6735 case Builtin::BI__sync_bool_compare_and_swap_8: 6736 case Builtin::BI__sync_bool_compare_and_swap_16: 6737 BuiltinIndex = 13; 6738 NumFixed = 2; 6739 ResultType = Context.BoolTy; 6740 break; 6741 6742 case Builtin::BI__sync_lock_test_and_set: 6743 case Builtin::BI__sync_lock_test_and_set_1: 6744 case Builtin::BI__sync_lock_test_and_set_2: 6745 case Builtin::BI__sync_lock_test_and_set_4: 6746 case Builtin::BI__sync_lock_test_and_set_8: 6747 case Builtin::BI__sync_lock_test_and_set_16: 6748 BuiltinIndex = 14; 6749 break; 6750 6751 case Builtin::BI__sync_lock_release: 6752 case Builtin::BI__sync_lock_release_1: 6753 case Builtin::BI__sync_lock_release_2: 6754 case Builtin::BI__sync_lock_release_4: 6755 case Builtin::BI__sync_lock_release_8: 6756 case Builtin::BI__sync_lock_release_16: 6757 BuiltinIndex = 15; 6758 NumFixed = 0; 6759 ResultType = Context.VoidTy; 6760 break; 6761 6762 case Builtin::BI__sync_swap: 6763 case Builtin::BI__sync_swap_1: 6764 case Builtin::BI__sync_swap_2: 6765 case Builtin::BI__sync_swap_4: 6766 case Builtin::BI__sync_swap_8: 6767 case Builtin::BI__sync_swap_16: 6768 BuiltinIndex = 16; 6769 break; 6770 } 6771 6772 // Now that we know how many fixed arguments we expect, first check that we 6773 // have at least that many. 6774 if (TheCall->getNumArgs() < 1+NumFixed) { 6775 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6776 << 0 << 1 + NumFixed << TheCall->getNumArgs() 6777 << Callee->getSourceRange(); 6778 return ExprError(); 6779 } 6780 6781 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 6782 << Callee->getSourceRange(); 6783 6784 if (WarnAboutSemanticsChange) { 6785 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 6786 << Callee->getSourceRange(); 6787 } 6788 6789 // Get the decl for the concrete builtin from this, we can tell what the 6790 // concrete integer type we should convert to is. 6791 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 6792 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 6793 FunctionDecl *NewBuiltinDecl; 6794 if (NewBuiltinID == BuiltinID) 6795 NewBuiltinDecl = FDecl; 6796 else { 6797 // Perform builtin lookup to avoid redeclaring it. 6798 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 6799 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 6800 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 6801 assert(Res.getFoundDecl()); 6802 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 6803 if (!NewBuiltinDecl) 6804 return ExprError(); 6805 } 6806 6807 // The first argument --- the pointer --- has a fixed type; we 6808 // deduce the types of the rest of the arguments accordingly. Walk 6809 // the remaining arguments, converting them to the deduced value type. 6810 for (unsigned i = 0; i != NumFixed; ++i) { 6811 ExprResult Arg = TheCall->getArg(i+1); 6812 6813 // GCC does an implicit conversion to the pointer or integer ValType. This 6814 // can fail in some cases (1i -> int**), check for this error case now. 6815 // Initialize the argument. 6816 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6817 ValType, /*consume*/ false); 6818 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6819 if (Arg.isInvalid()) 6820 return ExprError(); 6821 6822 // Okay, we have something that *can* be converted to the right type. Check 6823 // to see if there is a potentially weird extension going on here. This can 6824 // happen when you do an atomic operation on something like an char* and 6825 // pass in 42. The 42 gets converted to char. This is even more strange 6826 // for things like 45.123 -> char, etc. 6827 // FIXME: Do this check. 6828 TheCall->setArg(i+1, Arg.get()); 6829 } 6830 6831 // Create a new DeclRefExpr to refer to the new decl. 6832 DeclRefExpr *NewDRE = DeclRefExpr::Create( 6833 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 6834 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 6835 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 6836 6837 // Set the callee in the CallExpr. 6838 // FIXME: This loses syntactic information. 6839 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 6840 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 6841 CK_BuiltinFnToFnPtr); 6842 TheCall->setCallee(PromotedCall.get()); 6843 6844 // Change the result type of the call to match the original value type. This 6845 // is arbitrary, but the codegen for these builtins ins design to handle it 6846 // gracefully. 6847 TheCall->setType(ResultType); 6848 6849 // Prohibit problematic uses of bit-precise integer types with atomic 6850 // builtins. The arguments would have already been converted to the first 6851 // argument's type, so only need to check the first argument. 6852 const auto *BitIntValType = ValType->getAs<BitIntType>(); 6853 if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) { 6854 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 6855 return ExprError(); 6856 } 6857 6858 return TheCallResult; 6859 } 6860 6861 /// SemaBuiltinNontemporalOverloaded - We have a call to 6862 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 6863 /// overloaded function based on the pointer type of its last argument. 6864 /// 6865 /// This function goes through and does final semantic checking for these 6866 /// builtins. 6867 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 6868 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 6869 DeclRefExpr *DRE = 6870 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6871 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6872 unsigned BuiltinID = FDecl->getBuiltinID(); 6873 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 6874 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 6875 "Unexpected nontemporal load/store builtin!"); 6876 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 6877 unsigned numArgs = isStore ? 2 : 1; 6878 6879 // Ensure that we have the proper number of arguments. 6880 if (checkArgCount(*this, TheCall, numArgs)) 6881 return ExprError(); 6882 6883 // Inspect the last argument of the nontemporal builtin. This should always 6884 // be a pointer type, from which we imply the type of the memory access. 6885 // Because it is a pointer type, we don't have to worry about any implicit 6886 // casts here. 6887 Expr *PointerArg = TheCall->getArg(numArgs - 1); 6888 ExprResult PointerArgResult = 6889 DefaultFunctionArrayLvalueConversion(PointerArg); 6890 6891 if (PointerArgResult.isInvalid()) 6892 return ExprError(); 6893 PointerArg = PointerArgResult.get(); 6894 TheCall->setArg(numArgs - 1, PointerArg); 6895 6896 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 6897 if (!pointerType) { 6898 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 6899 << PointerArg->getType() << PointerArg->getSourceRange(); 6900 return ExprError(); 6901 } 6902 6903 QualType ValType = pointerType->getPointeeType(); 6904 6905 // Strip any qualifiers off ValType. 6906 ValType = ValType.getUnqualifiedType(); 6907 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6908 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 6909 !ValType->isVectorType()) { 6910 Diag(DRE->getBeginLoc(), 6911 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 6912 << PointerArg->getType() << PointerArg->getSourceRange(); 6913 return ExprError(); 6914 } 6915 6916 if (!isStore) { 6917 TheCall->setType(ValType); 6918 return TheCallResult; 6919 } 6920 6921 ExprResult ValArg = TheCall->getArg(0); 6922 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6923 Context, ValType, /*consume*/ false); 6924 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 6925 if (ValArg.isInvalid()) 6926 return ExprError(); 6927 6928 TheCall->setArg(0, ValArg.get()); 6929 TheCall->setType(Context.VoidTy); 6930 return TheCallResult; 6931 } 6932 6933 /// CheckObjCString - Checks that the argument to the builtin 6934 /// CFString constructor is correct 6935 /// Note: It might also make sense to do the UTF-16 conversion here (would 6936 /// simplify the backend). 6937 bool Sema::CheckObjCString(Expr *Arg) { 6938 Arg = Arg->IgnoreParenCasts(); 6939 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 6940 6941 if (!Literal || !Literal->isAscii()) { 6942 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 6943 << Arg->getSourceRange(); 6944 return true; 6945 } 6946 6947 if (Literal->containsNonAsciiOrNull()) { 6948 StringRef String = Literal->getString(); 6949 unsigned NumBytes = String.size(); 6950 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 6951 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 6952 llvm::UTF16 *ToPtr = &ToBuf[0]; 6953 6954 llvm::ConversionResult Result = 6955 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 6956 ToPtr + NumBytes, llvm::strictConversion); 6957 // Check for conversion failure. 6958 if (Result != llvm::conversionOK) 6959 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 6960 << Arg->getSourceRange(); 6961 } 6962 return false; 6963 } 6964 6965 /// CheckObjCString - Checks that the format string argument to the os_log() 6966 /// and os_trace() functions is correct, and converts it to const char *. 6967 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 6968 Arg = Arg->IgnoreParenCasts(); 6969 auto *Literal = dyn_cast<StringLiteral>(Arg); 6970 if (!Literal) { 6971 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 6972 Literal = ObjcLiteral->getString(); 6973 } 6974 } 6975 6976 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 6977 return ExprError( 6978 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 6979 << Arg->getSourceRange()); 6980 } 6981 6982 ExprResult Result(Literal); 6983 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 6984 InitializedEntity Entity = 6985 InitializedEntity::InitializeParameter(Context, ResultTy, false); 6986 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 6987 return Result; 6988 } 6989 6990 /// Check that the user is calling the appropriate va_start builtin for the 6991 /// target and calling convention. 6992 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 6993 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 6994 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 6995 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 6996 TT.getArch() == llvm::Triple::aarch64_32); 6997 bool IsWindows = TT.isOSWindows(); 6998 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 6999 if (IsX64 || IsAArch64) { 7000 CallingConv CC = CC_C; 7001 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 7002 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 7003 if (IsMSVAStart) { 7004 // Don't allow this in System V ABI functions. 7005 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 7006 return S.Diag(Fn->getBeginLoc(), 7007 diag::err_ms_va_start_used_in_sysv_function); 7008 } else { 7009 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 7010 // On x64 Windows, don't allow this in System V ABI functions. 7011 // (Yes, that means there's no corresponding way to support variadic 7012 // System V ABI functions on Windows.) 7013 if ((IsWindows && CC == CC_X86_64SysV) || 7014 (!IsWindows && CC == CC_Win64)) 7015 return S.Diag(Fn->getBeginLoc(), 7016 diag::err_va_start_used_in_wrong_abi_function) 7017 << !IsWindows; 7018 } 7019 return false; 7020 } 7021 7022 if (IsMSVAStart) 7023 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 7024 return false; 7025 } 7026 7027 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 7028 ParmVarDecl **LastParam = nullptr) { 7029 // Determine whether the current function, block, or obj-c method is variadic 7030 // and get its parameter list. 7031 bool IsVariadic = false; 7032 ArrayRef<ParmVarDecl *> Params; 7033 DeclContext *Caller = S.CurContext; 7034 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 7035 IsVariadic = Block->isVariadic(); 7036 Params = Block->parameters(); 7037 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 7038 IsVariadic = FD->isVariadic(); 7039 Params = FD->parameters(); 7040 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 7041 IsVariadic = MD->isVariadic(); 7042 // FIXME: This isn't correct for methods (results in bogus warning). 7043 Params = MD->parameters(); 7044 } else if (isa<CapturedDecl>(Caller)) { 7045 // We don't support va_start in a CapturedDecl. 7046 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 7047 return true; 7048 } else { 7049 // This must be some other declcontext that parses exprs. 7050 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 7051 return true; 7052 } 7053 7054 if (!IsVariadic) { 7055 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 7056 return true; 7057 } 7058 7059 if (LastParam) 7060 *LastParam = Params.empty() ? nullptr : Params.back(); 7061 7062 return false; 7063 } 7064 7065 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 7066 /// for validity. Emit an error and return true on failure; return false 7067 /// on success. 7068 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 7069 Expr *Fn = TheCall->getCallee(); 7070 7071 if (checkVAStartABI(*this, BuiltinID, Fn)) 7072 return true; 7073 7074 if (checkArgCount(*this, TheCall, 2)) 7075 return true; 7076 7077 // Type-check the first argument normally. 7078 if (checkBuiltinArgument(*this, TheCall, 0)) 7079 return true; 7080 7081 // Check that the current function is variadic, and get its last parameter. 7082 ParmVarDecl *LastParam; 7083 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 7084 return true; 7085 7086 // Verify that the second argument to the builtin is the last argument of the 7087 // current function or method. 7088 bool SecondArgIsLastNamedArgument = false; 7089 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 7090 7091 // These are valid if SecondArgIsLastNamedArgument is false after the next 7092 // block. 7093 QualType Type; 7094 SourceLocation ParamLoc; 7095 bool IsCRegister = false; 7096 7097 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 7098 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 7099 SecondArgIsLastNamedArgument = PV == LastParam; 7100 7101 Type = PV->getType(); 7102 ParamLoc = PV->getLocation(); 7103 IsCRegister = 7104 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 7105 } 7106 } 7107 7108 if (!SecondArgIsLastNamedArgument) 7109 Diag(TheCall->getArg(1)->getBeginLoc(), 7110 diag::warn_second_arg_of_va_start_not_last_named_param); 7111 else if (IsCRegister || Type->isReferenceType() || 7112 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 7113 // Promotable integers are UB, but enumerations need a bit of 7114 // extra checking to see what their promotable type actually is. 7115 if (!Type->isPromotableIntegerType()) 7116 return false; 7117 if (!Type->isEnumeralType()) 7118 return true; 7119 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 7120 return !(ED && 7121 Context.typesAreCompatible(ED->getPromotionType(), Type)); 7122 }()) { 7123 unsigned Reason = 0; 7124 if (Type->isReferenceType()) Reason = 1; 7125 else if (IsCRegister) Reason = 2; 7126 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 7127 Diag(ParamLoc, diag::note_parameter_type) << Type; 7128 } 7129 7130 TheCall->setType(Context.VoidTy); 7131 return false; 7132 } 7133 7134 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 7135 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool { 7136 const LangOptions &LO = getLangOpts(); 7137 7138 if (LO.CPlusPlus) 7139 return Arg->getType() 7140 .getCanonicalType() 7141 .getTypePtr() 7142 ->getPointeeType() 7143 .withoutLocalFastQualifiers() == Context.CharTy; 7144 7145 // In C, allow aliasing through `char *`, this is required for AArch64 at 7146 // least. 7147 return true; 7148 }; 7149 7150 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 7151 // const char *named_addr); 7152 7153 Expr *Func = Call->getCallee(); 7154 7155 if (Call->getNumArgs() < 3) 7156 return Diag(Call->getEndLoc(), 7157 diag::err_typecheck_call_too_few_args_at_least) 7158 << 0 /*function call*/ << 3 << Call->getNumArgs(); 7159 7160 // Type-check the first argument normally. 7161 if (checkBuiltinArgument(*this, Call, 0)) 7162 return true; 7163 7164 // Check that the current function is variadic. 7165 if (checkVAStartIsInVariadicFunction(*this, Func)) 7166 return true; 7167 7168 // __va_start on Windows does not validate the parameter qualifiers 7169 7170 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 7171 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 7172 7173 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 7174 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 7175 7176 const QualType &ConstCharPtrTy = 7177 Context.getPointerType(Context.CharTy.withConst()); 7178 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1)) 7179 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 7180 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 7181 << 0 /* qualifier difference */ 7182 << 3 /* parameter mismatch */ 7183 << 2 << Arg1->getType() << ConstCharPtrTy; 7184 7185 const QualType SizeTy = Context.getSizeType(); 7186 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 7187 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 7188 << Arg2->getType() << SizeTy << 1 /* different class */ 7189 << 0 /* qualifier difference */ 7190 << 3 /* parameter mismatch */ 7191 << 3 << Arg2->getType() << SizeTy; 7192 7193 return false; 7194 } 7195 7196 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 7197 /// friends. This is declared to take (...), so we have to check everything. 7198 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 7199 if (checkArgCount(*this, TheCall, 2)) 7200 return true; 7201 7202 ExprResult OrigArg0 = TheCall->getArg(0); 7203 ExprResult OrigArg1 = TheCall->getArg(1); 7204 7205 // Do standard promotions between the two arguments, returning their common 7206 // type. 7207 QualType Res = UsualArithmeticConversions( 7208 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 7209 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 7210 return true; 7211 7212 // Make sure any conversions are pushed back into the call; this is 7213 // type safe since unordered compare builtins are declared as "_Bool 7214 // foo(...)". 7215 TheCall->setArg(0, OrigArg0.get()); 7216 TheCall->setArg(1, OrigArg1.get()); 7217 7218 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 7219 return false; 7220 7221 // If the common type isn't a real floating type, then the arguments were 7222 // invalid for this operation. 7223 if (Res.isNull() || !Res->isRealFloatingType()) 7224 return Diag(OrigArg0.get()->getBeginLoc(), 7225 diag::err_typecheck_call_invalid_ordered_compare) 7226 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 7227 << SourceRange(OrigArg0.get()->getBeginLoc(), 7228 OrigArg1.get()->getEndLoc()); 7229 7230 return false; 7231 } 7232 7233 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 7234 /// __builtin_isnan and friends. This is declared to take (...), so we have 7235 /// to check everything. We expect the last argument to be a floating point 7236 /// value. 7237 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 7238 if (checkArgCount(*this, TheCall, NumArgs)) 7239 return true; 7240 7241 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 7242 // on all preceding parameters just being int. Try all of those. 7243 for (unsigned i = 0; i < NumArgs - 1; ++i) { 7244 Expr *Arg = TheCall->getArg(i); 7245 7246 if (Arg->isTypeDependent()) 7247 return false; 7248 7249 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 7250 7251 if (Res.isInvalid()) 7252 return true; 7253 TheCall->setArg(i, Res.get()); 7254 } 7255 7256 Expr *OrigArg = TheCall->getArg(NumArgs-1); 7257 7258 if (OrigArg->isTypeDependent()) 7259 return false; 7260 7261 // Usual Unary Conversions will convert half to float, which we want for 7262 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 7263 // type how it is, but do normal L->Rvalue conversions. 7264 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 7265 OrigArg = UsualUnaryConversions(OrigArg).get(); 7266 else 7267 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 7268 TheCall->setArg(NumArgs - 1, OrigArg); 7269 7270 // This operation requires a non-_Complex floating-point number. 7271 if (!OrigArg->getType()->isRealFloatingType()) 7272 return Diag(OrigArg->getBeginLoc(), 7273 diag::err_typecheck_call_invalid_unary_fp) 7274 << OrigArg->getType() << OrigArg->getSourceRange(); 7275 7276 return false; 7277 } 7278 7279 /// Perform semantic analysis for a call to __builtin_complex. 7280 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 7281 if (checkArgCount(*this, TheCall, 2)) 7282 return true; 7283 7284 bool Dependent = false; 7285 for (unsigned I = 0; I != 2; ++I) { 7286 Expr *Arg = TheCall->getArg(I); 7287 QualType T = Arg->getType(); 7288 if (T->isDependentType()) { 7289 Dependent = true; 7290 continue; 7291 } 7292 7293 // Despite supporting _Complex int, GCC requires a real floating point type 7294 // for the operands of __builtin_complex. 7295 if (!T->isRealFloatingType()) { 7296 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 7297 << Arg->getType() << Arg->getSourceRange(); 7298 } 7299 7300 ExprResult Converted = DefaultLvalueConversion(Arg); 7301 if (Converted.isInvalid()) 7302 return true; 7303 TheCall->setArg(I, Converted.get()); 7304 } 7305 7306 if (Dependent) { 7307 TheCall->setType(Context.DependentTy); 7308 return false; 7309 } 7310 7311 Expr *Real = TheCall->getArg(0); 7312 Expr *Imag = TheCall->getArg(1); 7313 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 7314 return Diag(Real->getBeginLoc(), 7315 diag::err_typecheck_call_different_arg_types) 7316 << Real->getType() << Imag->getType() 7317 << Real->getSourceRange() << Imag->getSourceRange(); 7318 } 7319 7320 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 7321 // don't allow this builtin to form those types either. 7322 // FIXME: Should we allow these types? 7323 if (Real->getType()->isFloat16Type()) 7324 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 7325 << "_Float16"; 7326 if (Real->getType()->isHalfType()) 7327 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 7328 << "half"; 7329 7330 TheCall->setType(Context.getComplexType(Real->getType())); 7331 return false; 7332 } 7333 7334 // Customized Sema Checking for VSX builtins that have the following signature: 7335 // vector [...] builtinName(vector [...], vector [...], const int); 7336 // Which takes the same type of vectors (any legal vector type) for the first 7337 // two arguments and takes compile time constant for the third argument. 7338 // Example builtins are : 7339 // vector double vec_xxpermdi(vector double, vector double, int); 7340 // vector short vec_xxsldwi(vector short, vector short, int); 7341 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 7342 unsigned ExpectedNumArgs = 3; 7343 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 7344 return true; 7345 7346 // Check the third argument is a compile time constant 7347 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 7348 return Diag(TheCall->getBeginLoc(), 7349 diag::err_vsx_builtin_nonconstant_argument) 7350 << 3 /* argument index */ << TheCall->getDirectCallee() 7351 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 7352 TheCall->getArg(2)->getEndLoc()); 7353 7354 QualType Arg1Ty = TheCall->getArg(0)->getType(); 7355 QualType Arg2Ty = TheCall->getArg(1)->getType(); 7356 7357 // Check the type of argument 1 and argument 2 are vectors. 7358 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 7359 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 7360 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 7361 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 7362 << TheCall->getDirectCallee() 7363 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7364 TheCall->getArg(1)->getEndLoc()); 7365 } 7366 7367 // Check the first two arguments are the same type. 7368 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 7369 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 7370 << TheCall->getDirectCallee() 7371 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7372 TheCall->getArg(1)->getEndLoc()); 7373 } 7374 7375 // When default clang type checking is turned off and the customized type 7376 // checking is used, the returning type of the function must be explicitly 7377 // set. Otherwise it is _Bool by default. 7378 TheCall->setType(Arg1Ty); 7379 7380 return false; 7381 } 7382 7383 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 7384 // This is declared to take (...), so we have to check everything. 7385 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 7386 if (TheCall->getNumArgs() < 2) 7387 return ExprError(Diag(TheCall->getEndLoc(), 7388 diag::err_typecheck_call_too_few_args_at_least) 7389 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 7390 << TheCall->getSourceRange()); 7391 7392 // Determine which of the following types of shufflevector we're checking: 7393 // 1) unary, vector mask: (lhs, mask) 7394 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 7395 QualType resType = TheCall->getArg(0)->getType(); 7396 unsigned numElements = 0; 7397 7398 if (!TheCall->getArg(0)->isTypeDependent() && 7399 !TheCall->getArg(1)->isTypeDependent()) { 7400 QualType LHSType = TheCall->getArg(0)->getType(); 7401 QualType RHSType = TheCall->getArg(1)->getType(); 7402 7403 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 7404 return ExprError( 7405 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 7406 << TheCall->getDirectCallee() 7407 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7408 TheCall->getArg(1)->getEndLoc())); 7409 7410 numElements = LHSType->castAs<VectorType>()->getNumElements(); 7411 unsigned numResElements = TheCall->getNumArgs() - 2; 7412 7413 // Check to see if we have a call with 2 vector arguments, the unary shuffle 7414 // with mask. If so, verify that RHS is an integer vector type with the 7415 // same number of elts as lhs. 7416 if (TheCall->getNumArgs() == 2) { 7417 if (!RHSType->hasIntegerRepresentation() || 7418 RHSType->castAs<VectorType>()->getNumElements() != numElements) 7419 return ExprError(Diag(TheCall->getBeginLoc(), 7420 diag::err_vec_builtin_incompatible_vector) 7421 << TheCall->getDirectCallee() 7422 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 7423 TheCall->getArg(1)->getEndLoc())); 7424 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 7425 return ExprError(Diag(TheCall->getBeginLoc(), 7426 diag::err_vec_builtin_incompatible_vector) 7427 << TheCall->getDirectCallee() 7428 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7429 TheCall->getArg(1)->getEndLoc())); 7430 } else if (numElements != numResElements) { 7431 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 7432 resType = Context.getVectorType(eltType, numResElements, 7433 VectorType::GenericVector); 7434 } 7435 } 7436 7437 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 7438 if (TheCall->getArg(i)->isTypeDependent() || 7439 TheCall->getArg(i)->isValueDependent()) 7440 continue; 7441 7442 Optional<llvm::APSInt> Result; 7443 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 7444 return ExprError(Diag(TheCall->getBeginLoc(), 7445 diag::err_shufflevector_nonconstant_argument) 7446 << TheCall->getArg(i)->getSourceRange()); 7447 7448 // Allow -1 which will be translated to undef in the IR. 7449 if (Result->isSigned() && Result->isAllOnes()) 7450 continue; 7451 7452 if (Result->getActiveBits() > 64 || 7453 Result->getZExtValue() >= numElements * 2) 7454 return ExprError(Diag(TheCall->getBeginLoc(), 7455 diag::err_shufflevector_argument_too_large) 7456 << TheCall->getArg(i)->getSourceRange()); 7457 } 7458 7459 SmallVector<Expr*, 32> exprs; 7460 7461 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 7462 exprs.push_back(TheCall->getArg(i)); 7463 TheCall->setArg(i, nullptr); 7464 } 7465 7466 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 7467 TheCall->getCallee()->getBeginLoc(), 7468 TheCall->getRParenLoc()); 7469 } 7470 7471 /// SemaConvertVectorExpr - Handle __builtin_convertvector 7472 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 7473 SourceLocation BuiltinLoc, 7474 SourceLocation RParenLoc) { 7475 ExprValueKind VK = VK_PRValue; 7476 ExprObjectKind OK = OK_Ordinary; 7477 QualType DstTy = TInfo->getType(); 7478 QualType SrcTy = E->getType(); 7479 7480 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 7481 return ExprError(Diag(BuiltinLoc, 7482 diag::err_convertvector_non_vector) 7483 << E->getSourceRange()); 7484 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 7485 return ExprError(Diag(BuiltinLoc, 7486 diag::err_convertvector_non_vector_type)); 7487 7488 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 7489 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 7490 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 7491 if (SrcElts != DstElts) 7492 return ExprError(Diag(BuiltinLoc, 7493 diag::err_convertvector_incompatible_vector) 7494 << E->getSourceRange()); 7495 } 7496 7497 return new (Context) 7498 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 7499 } 7500 7501 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 7502 // This is declared to take (const void*, ...) and can take two 7503 // optional constant int args. 7504 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 7505 unsigned NumArgs = TheCall->getNumArgs(); 7506 7507 if (NumArgs > 3) 7508 return Diag(TheCall->getEndLoc(), 7509 diag::err_typecheck_call_too_many_args_at_most) 7510 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7511 7512 // Argument 0 is checked for us and the remaining arguments must be 7513 // constant integers. 7514 for (unsigned i = 1; i != NumArgs; ++i) 7515 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 7516 return true; 7517 7518 return false; 7519 } 7520 7521 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence. 7522 bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) { 7523 if (!Context.getTargetInfo().checkArithmeticFenceSupported()) 7524 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 7525 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7526 if (checkArgCount(*this, TheCall, 1)) 7527 return true; 7528 Expr *Arg = TheCall->getArg(0); 7529 if (Arg->isInstantiationDependent()) 7530 return false; 7531 7532 QualType ArgTy = Arg->getType(); 7533 if (!ArgTy->hasFloatingRepresentation()) 7534 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector) 7535 << ArgTy; 7536 if (Arg->isLValue()) { 7537 ExprResult FirstArg = DefaultLvalueConversion(Arg); 7538 TheCall->setArg(0, FirstArg.get()); 7539 } 7540 TheCall->setType(TheCall->getArg(0)->getType()); 7541 return false; 7542 } 7543 7544 /// SemaBuiltinAssume - Handle __assume (MS Extension). 7545 // __assume does not evaluate its arguments, and should warn if its argument 7546 // has side effects. 7547 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 7548 Expr *Arg = TheCall->getArg(0); 7549 if (Arg->isInstantiationDependent()) return false; 7550 7551 if (Arg->HasSideEffects(Context)) 7552 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 7553 << Arg->getSourceRange() 7554 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 7555 7556 return false; 7557 } 7558 7559 /// Handle __builtin_alloca_with_align. This is declared 7560 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 7561 /// than 8. 7562 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 7563 // The alignment must be a constant integer. 7564 Expr *Arg = TheCall->getArg(1); 7565 7566 // We can't check the value of a dependent argument. 7567 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7568 if (const auto *UE = 7569 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 7570 if (UE->getKind() == UETT_AlignOf || 7571 UE->getKind() == UETT_PreferredAlignOf) 7572 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 7573 << Arg->getSourceRange(); 7574 7575 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 7576 7577 if (!Result.isPowerOf2()) 7578 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7579 << Arg->getSourceRange(); 7580 7581 if (Result < Context.getCharWidth()) 7582 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 7583 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 7584 7585 if (Result > std::numeric_limits<int32_t>::max()) 7586 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 7587 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 7588 } 7589 7590 return false; 7591 } 7592 7593 /// Handle __builtin_assume_aligned. This is declared 7594 /// as (const void*, size_t, ...) and can take one optional constant int arg. 7595 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 7596 unsigned NumArgs = TheCall->getNumArgs(); 7597 7598 if (NumArgs > 3) 7599 return Diag(TheCall->getEndLoc(), 7600 diag::err_typecheck_call_too_many_args_at_most) 7601 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7602 7603 // The alignment must be a constant integer. 7604 Expr *Arg = TheCall->getArg(1); 7605 7606 // We can't check the value of a dependent argument. 7607 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7608 llvm::APSInt Result; 7609 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7610 return true; 7611 7612 if (!Result.isPowerOf2()) 7613 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7614 << Arg->getSourceRange(); 7615 7616 if (Result > Sema::MaximumAlignment) 7617 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 7618 << Arg->getSourceRange() << Sema::MaximumAlignment; 7619 } 7620 7621 if (NumArgs > 2) { 7622 ExprResult Arg(TheCall->getArg(2)); 7623 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 7624 Context.getSizeType(), false); 7625 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7626 if (Arg.isInvalid()) return true; 7627 TheCall->setArg(2, Arg.get()); 7628 } 7629 7630 return false; 7631 } 7632 7633 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 7634 unsigned BuiltinID = 7635 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 7636 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 7637 7638 unsigned NumArgs = TheCall->getNumArgs(); 7639 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 7640 if (NumArgs < NumRequiredArgs) { 7641 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 7642 << 0 /* function call */ << NumRequiredArgs << NumArgs 7643 << TheCall->getSourceRange(); 7644 } 7645 if (NumArgs >= NumRequiredArgs + 0x100) { 7646 return Diag(TheCall->getEndLoc(), 7647 diag::err_typecheck_call_too_many_args_at_most) 7648 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 7649 << TheCall->getSourceRange(); 7650 } 7651 unsigned i = 0; 7652 7653 // For formatting call, check buffer arg. 7654 if (!IsSizeCall) { 7655 ExprResult Arg(TheCall->getArg(i)); 7656 InitializedEntity Entity = InitializedEntity::InitializeParameter( 7657 Context, Context.VoidPtrTy, false); 7658 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7659 if (Arg.isInvalid()) 7660 return true; 7661 TheCall->setArg(i, Arg.get()); 7662 i++; 7663 } 7664 7665 // Check string literal arg. 7666 unsigned FormatIdx = i; 7667 { 7668 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 7669 if (Arg.isInvalid()) 7670 return true; 7671 TheCall->setArg(i, Arg.get()); 7672 i++; 7673 } 7674 7675 // Make sure variadic args are scalar. 7676 unsigned FirstDataArg = i; 7677 while (i < NumArgs) { 7678 ExprResult Arg = DefaultVariadicArgumentPromotion( 7679 TheCall->getArg(i), VariadicFunction, nullptr); 7680 if (Arg.isInvalid()) 7681 return true; 7682 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 7683 if (ArgSize.getQuantity() >= 0x100) { 7684 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 7685 << i << (int)ArgSize.getQuantity() << 0xff 7686 << TheCall->getSourceRange(); 7687 } 7688 TheCall->setArg(i, Arg.get()); 7689 i++; 7690 } 7691 7692 // Check formatting specifiers. NOTE: We're only doing this for the non-size 7693 // call to avoid duplicate diagnostics. 7694 if (!IsSizeCall) { 7695 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 7696 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 7697 bool Success = CheckFormatArguments( 7698 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 7699 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 7700 CheckedVarArgs); 7701 if (!Success) 7702 return true; 7703 } 7704 7705 if (IsSizeCall) { 7706 TheCall->setType(Context.getSizeType()); 7707 } else { 7708 TheCall->setType(Context.VoidPtrTy); 7709 } 7710 return false; 7711 } 7712 7713 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 7714 /// TheCall is a constant expression. 7715 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 7716 llvm::APSInt &Result) { 7717 Expr *Arg = TheCall->getArg(ArgNum); 7718 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7719 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7720 7721 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 7722 7723 Optional<llvm::APSInt> R; 7724 if (!(R = Arg->getIntegerConstantExpr(Context))) 7725 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 7726 << FDecl->getDeclName() << Arg->getSourceRange(); 7727 Result = *R; 7728 return false; 7729 } 7730 7731 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 7732 /// TheCall is a constant expression in the range [Low, High]. 7733 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 7734 int Low, int High, bool RangeIsError) { 7735 if (isConstantEvaluated()) 7736 return false; 7737 llvm::APSInt Result; 7738 7739 // We can't check the value of a dependent argument. 7740 Expr *Arg = TheCall->getArg(ArgNum); 7741 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7742 return false; 7743 7744 // Check constant-ness first. 7745 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7746 return true; 7747 7748 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 7749 if (RangeIsError) 7750 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 7751 << toString(Result, 10) << Low << High << Arg->getSourceRange(); 7752 else 7753 // Defer the warning until we know if the code will be emitted so that 7754 // dead code can ignore this. 7755 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 7756 PDiag(diag::warn_argument_invalid_range) 7757 << toString(Result, 10) << Low << High 7758 << Arg->getSourceRange()); 7759 } 7760 7761 return false; 7762 } 7763 7764 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 7765 /// TheCall is a constant expression is a multiple of Num.. 7766 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 7767 unsigned Num) { 7768 llvm::APSInt Result; 7769 7770 // We can't check the value of a dependent argument. 7771 Expr *Arg = TheCall->getArg(ArgNum); 7772 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7773 return false; 7774 7775 // Check constant-ness first. 7776 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7777 return true; 7778 7779 if (Result.getSExtValue() % Num != 0) 7780 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 7781 << Num << Arg->getSourceRange(); 7782 7783 return false; 7784 } 7785 7786 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 7787 /// constant expression representing a power of 2. 7788 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 7789 llvm::APSInt Result; 7790 7791 // We can't check the value of a dependent argument. 7792 Expr *Arg = TheCall->getArg(ArgNum); 7793 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7794 return false; 7795 7796 // Check constant-ness first. 7797 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7798 return true; 7799 7800 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 7801 // and only if x is a power of 2. 7802 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 7803 return false; 7804 7805 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 7806 << Arg->getSourceRange(); 7807 } 7808 7809 static bool IsShiftedByte(llvm::APSInt Value) { 7810 if (Value.isNegative()) 7811 return false; 7812 7813 // Check if it's a shifted byte, by shifting it down 7814 while (true) { 7815 // If the value fits in the bottom byte, the check passes. 7816 if (Value < 0x100) 7817 return true; 7818 7819 // Otherwise, if the value has _any_ bits in the bottom byte, the check 7820 // fails. 7821 if ((Value & 0xFF) != 0) 7822 return false; 7823 7824 // If the bottom 8 bits are all 0, but something above that is nonzero, 7825 // then shifting the value right by 8 bits won't affect whether it's a 7826 // shifted byte or not. So do that, and go round again. 7827 Value >>= 8; 7828 } 7829 } 7830 7831 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 7832 /// a constant expression representing an arbitrary byte value shifted left by 7833 /// a multiple of 8 bits. 7834 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 7835 unsigned ArgBits) { 7836 llvm::APSInt Result; 7837 7838 // We can't check the value of a dependent argument. 7839 Expr *Arg = TheCall->getArg(ArgNum); 7840 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7841 return false; 7842 7843 // Check constant-ness first. 7844 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7845 return true; 7846 7847 // Truncate to the given size. 7848 Result = Result.getLoBits(ArgBits); 7849 Result.setIsUnsigned(true); 7850 7851 if (IsShiftedByte(Result)) 7852 return false; 7853 7854 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 7855 << Arg->getSourceRange(); 7856 } 7857 7858 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 7859 /// TheCall is a constant expression representing either a shifted byte value, 7860 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 7861 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 7862 /// Arm MVE intrinsics. 7863 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 7864 int ArgNum, 7865 unsigned ArgBits) { 7866 llvm::APSInt Result; 7867 7868 // We can't check the value of a dependent argument. 7869 Expr *Arg = TheCall->getArg(ArgNum); 7870 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7871 return false; 7872 7873 // Check constant-ness first. 7874 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7875 return true; 7876 7877 // Truncate to the given size. 7878 Result = Result.getLoBits(ArgBits); 7879 Result.setIsUnsigned(true); 7880 7881 // Check to see if it's in either of the required forms. 7882 if (IsShiftedByte(Result) || 7883 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 7884 return false; 7885 7886 return Diag(TheCall->getBeginLoc(), 7887 diag::err_argument_not_shifted_byte_or_xxff) 7888 << Arg->getSourceRange(); 7889 } 7890 7891 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 7892 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 7893 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 7894 if (checkArgCount(*this, TheCall, 2)) 7895 return true; 7896 Expr *Arg0 = TheCall->getArg(0); 7897 Expr *Arg1 = TheCall->getArg(1); 7898 7899 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7900 if (FirstArg.isInvalid()) 7901 return true; 7902 QualType FirstArgType = FirstArg.get()->getType(); 7903 if (!FirstArgType->isAnyPointerType()) 7904 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7905 << "first" << FirstArgType << Arg0->getSourceRange(); 7906 TheCall->setArg(0, FirstArg.get()); 7907 7908 ExprResult SecArg = DefaultLvalueConversion(Arg1); 7909 if (SecArg.isInvalid()) 7910 return true; 7911 QualType SecArgType = SecArg.get()->getType(); 7912 if (!SecArgType->isIntegerType()) 7913 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7914 << "second" << SecArgType << Arg1->getSourceRange(); 7915 7916 // Derive the return type from the pointer argument. 7917 TheCall->setType(FirstArgType); 7918 return false; 7919 } 7920 7921 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 7922 if (checkArgCount(*this, TheCall, 2)) 7923 return true; 7924 7925 Expr *Arg0 = TheCall->getArg(0); 7926 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7927 if (FirstArg.isInvalid()) 7928 return true; 7929 QualType FirstArgType = FirstArg.get()->getType(); 7930 if (!FirstArgType->isAnyPointerType()) 7931 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7932 << "first" << FirstArgType << Arg0->getSourceRange(); 7933 TheCall->setArg(0, FirstArg.get()); 7934 7935 // Derive the return type from the pointer argument. 7936 TheCall->setType(FirstArgType); 7937 7938 // Second arg must be an constant in range [0,15] 7939 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7940 } 7941 7942 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 7943 if (checkArgCount(*this, TheCall, 2)) 7944 return true; 7945 Expr *Arg0 = TheCall->getArg(0); 7946 Expr *Arg1 = TheCall->getArg(1); 7947 7948 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7949 if (FirstArg.isInvalid()) 7950 return true; 7951 QualType FirstArgType = FirstArg.get()->getType(); 7952 if (!FirstArgType->isAnyPointerType()) 7953 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7954 << "first" << FirstArgType << Arg0->getSourceRange(); 7955 7956 QualType SecArgType = Arg1->getType(); 7957 if (!SecArgType->isIntegerType()) 7958 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7959 << "second" << SecArgType << Arg1->getSourceRange(); 7960 TheCall->setType(Context.IntTy); 7961 return false; 7962 } 7963 7964 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 7965 BuiltinID == AArch64::BI__builtin_arm_stg) { 7966 if (checkArgCount(*this, TheCall, 1)) 7967 return true; 7968 Expr *Arg0 = TheCall->getArg(0); 7969 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7970 if (FirstArg.isInvalid()) 7971 return true; 7972 7973 QualType FirstArgType = FirstArg.get()->getType(); 7974 if (!FirstArgType->isAnyPointerType()) 7975 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7976 << "first" << FirstArgType << Arg0->getSourceRange(); 7977 TheCall->setArg(0, FirstArg.get()); 7978 7979 // Derive the return type from the pointer argument. 7980 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 7981 TheCall->setType(FirstArgType); 7982 return false; 7983 } 7984 7985 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 7986 Expr *ArgA = TheCall->getArg(0); 7987 Expr *ArgB = TheCall->getArg(1); 7988 7989 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 7990 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 7991 7992 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 7993 return true; 7994 7995 QualType ArgTypeA = ArgExprA.get()->getType(); 7996 QualType ArgTypeB = ArgExprB.get()->getType(); 7997 7998 auto isNull = [&] (Expr *E) -> bool { 7999 return E->isNullPointerConstant( 8000 Context, Expr::NPC_ValueDependentIsNotNull); }; 8001 8002 // argument should be either a pointer or null 8003 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 8004 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 8005 << "first" << ArgTypeA << ArgA->getSourceRange(); 8006 8007 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 8008 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 8009 << "second" << ArgTypeB << ArgB->getSourceRange(); 8010 8011 // Ensure Pointee types are compatible 8012 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 8013 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 8014 QualType pointeeA = ArgTypeA->getPointeeType(); 8015 QualType pointeeB = ArgTypeB->getPointeeType(); 8016 if (!Context.typesAreCompatible( 8017 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 8018 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 8019 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 8020 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 8021 << ArgB->getSourceRange(); 8022 } 8023 } 8024 8025 // at least one argument should be pointer type 8026 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 8027 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 8028 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 8029 8030 if (isNull(ArgA)) // adopt type of the other pointer 8031 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 8032 8033 if (isNull(ArgB)) 8034 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 8035 8036 TheCall->setArg(0, ArgExprA.get()); 8037 TheCall->setArg(1, ArgExprB.get()); 8038 TheCall->setType(Context.LongLongTy); 8039 return false; 8040 } 8041 assert(false && "Unhandled ARM MTE intrinsic"); 8042 return true; 8043 } 8044 8045 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 8046 /// TheCall is an ARM/AArch64 special register string literal. 8047 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 8048 int ArgNum, unsigned ExpectedFieldNum, 8049 bool AllowName) { 8050 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 8051 BuiltinID == ARM::BI__builtin_arm_wsr64 || 8052 BuiltinID == ARM::BI__builtin_arm_rsr || 8053 BuiltinID == ARM::BI__builtin_arm_rsrp || 8054 BuiltinID == ARM::BI__builtin_arm_wsr || 8055 BuiltinID == ARM::BI__builtin_arm_wsrp; 8056 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 8057 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 8058 BuiltinID == AArch64::BI__builtin_arm_rsr || 8059 BuiltinID == AArch64::BI__builtin_arm_rsrp || 8060 BuiltinID == AArch64::BI__builtin_arm_wsr || 8061 BuiltinID == AArch64::BI__builtin_arm_wsrp; 8062 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 8063 8064 // We can't check the value of a dependent argument. 8065 Expr *Arg = TheCall->getArg(ArgNum); 8066 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8067 return false; 8068 8069 // Check if the argument is a string literal. 8070 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 8071 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 8072 << Arg->getSourceRange(); 8073 8074 // Check the type of special register given. 8075 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 8076 SmallVector<StringRef, 6> Fields; 8077 Reg.split(Fields, ":"); 8078 8079 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 8080 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 8081 << Arg->getSourceRange(); 8082 8083 // If the string is the name of a register then we cannot check that it is 8084 // valid here but if the string is of one the forms described in ACLE then we 8085 // can check that the supplied fields are integers and within the valid 8086 // ranges. 8087 if (Fields.size() > 1) { 8088 bool FiveFields = Fields.size() == 5; 8089 8090 bool ValidString = true; 8091 if (IsARMBuiltin) { 8092 ValidString &= Fields[0].startswith_insensitive("cp") || 8093 Fields[0].startswith_insensitive("p"); 8094 if (ValidString) 8095 Fields[0] = Fields[0].drop_front( 8096 Fields[0].startswith_insensitive("cp") ? 2 : 1); 8097 8098 ValidString &= Fields[2].startswith_insensitive("c"); 8099 if (ValidString) 8100 Fields[2] = Fields[2].drop_front(1); 8101 8102 if (FiveFields) { 8103 ValidString &= Fields[3].startswith_insensitive("c"); 8104 if (ValidString) 8105 Fields[3] = Fields[3].drop_front(1); 8106 } 8107 } 8108 8109 SmallVector<int, 5> Ranges; 8110 if (FiveFields) 8111 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 8112 else 8113 Ranges.append({15, 7, 15}); 8114 8115 for (unsigned i=0; i<Fields.size(); ++i) { 8116 int IntField; 8117 ValidString &= !Fields[i].getAsInteger(10, IntField); 8118 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 8119 } 8120 8121 if (!ValidString) 8122 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 8123 << Arg->getSourceRange(); 8124 } else if (IsAArch64Builtin && Fields.size() == 1) { 8125 // If the register name is one of those that appear in the condition below 8126 // and the special register builtin being used is one of the write builtins, 8127 // then we require that the argument provided for writing to the register 8128 // is an integer constant expression. This is because it will be lowered to 8129 // an MSR (immediate) instruction, so we need to know the immediate at 8130 // compile time. 8131 if (TheCall->getNumArgs() != 2) 8132 return false; 8133 8134 std::string RegLower = Reg.lower(); 8135 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 8136 RegLower != "pan" && RegLower != "uao") 8137 return false; 8138 8139 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 8140 } 8141 8142 return false; 8143 } 8144 8145 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 8146 /// Emit an error and return true on failure; return false on success. 8147 /// TypeStr is a string containing the type descriptor of the value returned by 8148 /// the builtin and the descriptors of the expected type of the arguments. 8149 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, 8150 const char *TypeStr) { 8151 8152 assert((TypeStr[0] != '\0') && 8153 "Invalid types in PPC MMA builtin declaration"); 8154 8155 switch (BuiltinID) { 8156 default: 8157 // This function is called in CheckPPCBuiltinFunctionCall where the 8158 // BuiltinID is guaranteed to be an MMA or pair vector memop builtin, here 8159 // we are isolating the pair vector memop builtins that can be used with mma 8160 // off so the default case is every builtin that requires mma and paired 8161 // vector memops. 8162 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 8163 diag::err_ppc_builtin_only_on_arch, "10") || 8164 SemaFeatureCheck(*this, TheCall, "mma", 8165 diag::err_ppc_builtin_only_on_arch, "10")) 8166 return true; 8167 break; 8168 case PPC::BI__builtin_vsx_lxvp: 8169 case PPC::BI__builtin_vsx_stxvp: 8170 case PPC::BI__builtin_vsx_assemble_pair: 8171 case PPC::BI__builtin_vsx_disassemble_pair: 8172 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 8173 diag::err_ppc_builtin_only_on_arch, "10")) 8174 return true; 8175 break; 8176 } 8177 8178 unsigned Mask = 0; 8179 unsigned ArgNum = 0; 8180 8181 // The first type in TypeStr is the type of the value returned by the 8182 // builtin. So we first read that type and change the type of TheCall. 8183 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8184 TheCall->setType(type); 8185 8186 while (*TypeStr != '\0') { 8187 Mask = 0; 8188 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8189 if (ArgNum >= TheCall->getNumArgs()) { 8190 ArgNum++; 8191 break; 8192 } 8193 8194 Expr *Arg = TheCall->getArg(ArgNum); 8195 QualType PassedType = Arg->getType(); 8196 QualType StrippedRVType = PassedType.getCanonicalType(); 8197 8198 // Strip Restrict/Volatile qualifiers. 8199 if (StrippedRVType.isRestrictQualified() || 8200 StrippedRVType.isVolatileQualified()) 8201 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType(); 8202 8203 // The only case where the argument type and expected type are allowed to 8204 // mismatch is if the argument type is a non-void pointer (or array) and 8205 // expected type is a void pointer. 8206 if (StrippedRVType != ExpectedType) 8207 if (!(ExpectedType->isVoidPointerType() && 8208 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType()))) 8209 return Diag(Arg->getBeginLoc(), 8210 diag::err_typecheck_convert_incompatible) 8211 << PassedType << ExpectedType << 1 << 0 << 0; 8212 8213 // If the value of the Mask is not 0, we have a constraint in the size of 8214 // the integer argument so here we ensure the argument is a constant that 8215 // is in the valid range. 8216 if (Mask != 0 && 8217 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 8218 return true; 8219 8220 ArgNum++; 8221 } 8222 8223 // In case we exited early from the previous loop, there are other types to 8224 // read from TypeStr. So we need to read them all to ensure we have the right 8225 // number of arguments in TheCall and if it is not the case, to display a 8226 // better error message. 8227 while (*TypeStr != '\0') { 8228 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8229 ArgNum++; 8230 } 8231 if (checkArgCount(*this, TheCall, ArgNum)) 8232 return true; 8233 8234 return false; 8235 } 8236 8237 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 8238 /// This checks that the target supports __builtin_longjmp and 8239 /// that val is a constant 1. 8240 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 8241 if (!Context.getTargetInfo().hasSjLjLowering()) 8242 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 8243 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8244 8245 Expr *Arg = TheCall->getArg(1); 8246 llvm::APSInt Result; 8247 8248 // TODO: This is less than ideal. Overload this to take a value. 8249 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 8250 return true; 8251 8252 if (Result != 1) 8253 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 8254 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 8255 8256 return false; 8257 } 8258 8259 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 8260 /// This checks that the target supports __builtin_setjmp. 8261 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 8262 if (!Context.getTargetInfo().hasSjLjLowering()) 8263 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 8264 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8265 return false; 8266 } 8267 8268 namespace { 8269 8270 class UncoveredArgHandler { 8271 enum { Unknown = -1, AllCovered = -2 }; 8272 8273 signed FirstUncoveredArg = Unknown; 8274 SmallVector<const Expr *, 4> DiagnosticExprs; 8275 8276 public: 8277 UncoveredArgHandler() = default; 8278 8279 bool hasUncoveredArg() const { 8280 return (FirstUncoveredArg >= 0); 8281 } 8282 8283 unsigned getUncoveredArg() const { 8284 assert(hasUncoveredArg() && "no uncovered argument"); 8285 return FirstUncoveredArg; 8286 } 8287 8288 void setAllCovered() { 8289 // A string has been found with all arguments covered, so clear out 8290 // the diagnostics. 8291 DiagnosticExprs.clear(); 8292 FirstUncoveredArg = AllCovered; 8293 } 8294 8295 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 8296 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 8297 8298 // Don't update if a previous string covers all arguments. 8299 if (FirstUncoveredArg == AllCovered) 8300 return; 8301 8302 // UncoveredArgHandler tracks the highest uncovered argument index 8303 // and with it all the strings that match this index. 8304 if (NewFirstUncoveredArg == FirstUncoveredArg) 8305 DiagnosticExprs.push_back(StrExpr); 8306 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 8307 DiagnosticExprs.clear(); 8308 DiagnosticExprs.push_back(StrExpr); 8309 FirstUncoveredArg = NewFirstUncoveredArg; 8310 } 8311 } 8312 8313 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 8314 }; 8315 8316 enum StringLiteralCheckType { 8317 SLCT_NotALiteral, 8318 SLCT_UncheckedLiteral, 8319 SLCT_CheckedLiteral 8320 }; 8321 8322 } // namespace 8323 8324 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 8325 BinaryOperatorKind BinOpKind, 8326 bool AddendIsRight) { 8327 unsigned BitWidth = Offset.getBitWidth(); 8328 unsigned AddendBitWidth = Addend.getBitWidth(); 8329 // There might be negative interim results. 8330 if (Addend.isUnsigned()) { 8331 Addend = Addend.zext(++AddendBitWidth); 8332 Addend.setIsSigned(true); 8333 } 8334 // Adjust the bit width of the APSInts. 8335 if (AddendBitWidth > BitWidth) { 8336 Offset = Offset.sext(AddendBitWidth); 8337 BitWidth = AddendBitWidth; 8338 } else if (BitWidth > AddendBitWidth) { 8339 Addend = Addend.sext(BitWidth); 8340 } 8341 8342 bool Ov = false; 8343 llvm::APSInt ResOffset = Offset; 8344 if (BinOpKind == BO_Add) 8345 ResOffset = Offset.sadd_ov(Addend, Ov); 8346 else { 8347 assert(AddendIsRight && BinOpKind == BO_Sub && 8348 "operator must be add or sub with addend on the right"); 8349 ResOffset = Offset.ssub_ov(Addend, Ov); 8350 } 8351 8352 // We add an offset to a pointer here so we should support an offset as big as 8353 // possible. 8354 if (Ov) { 8355 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 8356 "index (intermediate) result too big"); 8357 Offset = Offset.sext(2 * BitWidth); 8358 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 8359 return; 8360 } 8361 8362 Offset = ResOffset; 8363 } 8364 8365 namespace { 8366 8367 // This is a wrapper class around StringLiteral to support offsetted string 8368 // literals as format strings. It takes the offset into account when returning 8369 // the string and its length or the source locations to display notes correctly. 8370 class FormatStringLiteral { 8371 const StringLiteral *FExpr; 8372 int64_t Offset; 8373 8374 public: 8375 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 8376 : FExpr(fexpr), Offset(Offset) {} 8377 8378 StringRef getString() const { 8379 return FExpr->getString().drop_front(Offset); 8380 } 8381 8382 unsigned getByteLength() const { 8383 return FExpr->getByteLength() - getCharByteWidth() * Offset; 8384 } 8385 8386 unsigned getLength() const { return FExpr->getLength() - Offset; } 8387 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 8388 8389 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 8390 8391 QualType getType() const { return FExpr->getType(); } 8392 8393 bool isAscii() const { return FExpr->isAscii(); } 8394 bool isWide() const { return FExpr->isWide(); } 8395 bool isUTF8() const { return FExpr->isUTF8(); } 8396 bool isUTF16() const { return FExpr->isUTF16(); } 8397 bool isUTF32() const { return FExpr->isUTF32(); } 8398 bool isPascal() const { return FExpr->isPascal(); } 8399 8400 SourceLocation getLocationOfByte( 8401 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 8402 const TargetInfo &Target, unsigned *StartToken = nullptr, 8403 unsigned *StartTokenByteOffset = nullptr) const { 8404 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 8405 StartToken, StartTokenByteOffset); 8406 } 8407 8408 SourceLocation getBeginLoc() const LLVM_READONLY { 8409 return FExpr->getBeginLoc().getLocWithOffset(Offset); 8410 } 8411 8412 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 8413 }; 8414 8415 } // namespace 8416 8417 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 8418 const Expr *OrigFormatExpr, 8419 ArrayRef<const Expr *> Args, 8420 bool HasVAListArg, unsigned format_idx, 8421 unsigned firstDataArg, 8422 Sema::FormatStringType Type, 8423 bool inFunctionCall, 8424 Sema::VariadicCallType CallType, 8425 llvm::SmallBitVector &CheckedVarArgs, 8426 UncoveredArgHandler &UncoveredArg, 8427 bool IgnoreStringsWithoutSpecifiers); 8428 8429 // Determine if an expression is a string literal or constant string. 8430 // If this function returns false on the arguments to a function expecting a 8431 // format string, we will usually need to emit a warning. 8432 // True string literals are then checked by CheckFormatString. 8433 static StringLiteralCheckType 8434 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 8435 bool HasVAListArg, unsigned format_idx, 8436 unsigned firstDataArg, Sema::FormatStringType Type, 8437 Sema::VariadicCallType CallType, bool InFunctionCall, 8438 llvm::SmallBitVector &CheckedVarArgs, 8439 UncoveredArgHandler &UncoveredArg, 8440 llvm::APSInt Offset, 8441 bool IgnoreStringsWithoutSpecifiers = false) { 8442 if (S.isConstantEvaluated()) 8443 return SLCT_NotALiteral; 8444 tryAgain: 8445 assert(Offset.isSigned() && "invalid offset"); 8446 8447 if (E->isTypeDependent() || E->isValueDependent()) 8448 return SLCT_NotALiteral; 8449 8450 E = E->IgnoreParenCasts(); 8451 8452 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 8453 // Technically -Wformat-nonliteral does not warn about this case. 8454 // The behavior of printf and friends in this case is implementation 8455 // dependent. Ideally if the format string cannot be null then 8456 // it should have a 'nonnull' attribute in the function prototype. 8457 return SLCT_UncheckedLiteral; 8458 8459 switch (E->getStmtClass()) { 8460 case Stmt::BinaryConditionalOperatorClass: 8461 case Stmt::ConditionalOperatorClass: { 8462 // The expression is a literal if both sub-expressions were, and it was 8463 // completely checked only if both sub-expressions were checked. 8464 const AbstractConditionalOperator *C = 8465 cast<AbstractConditionalOperator>(E); 8466 8467 // Determine whether it is necessary to check both sub-expressions, for 8468 // example, because the condition expression is a constant that can be 8469 // evaluated at compile time. 8470 bool CheckLeft = true, CheckRight = true; 8471 8472 bool Cond; 8473 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 8474 S.isConstantEvaluated())) { 8475 if (Cond) 8476 CheckRight = false; 8477 else 8478 CheckLeft = false; 8479 } 8480 8481 // We need to maintain the offsets for the right and the left hand side 8482 // separately to check if every possible indexed expression is a valid 8483 // string literal. They might have different offsets for different string 8484 // literals in the end. 8485 StringLiteralCheckType Left; 8486 if (!CheckLeft) 8487 Left = SLCT_UncheckedLiteral; 8488 else { 8489 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 8490 HasVAListArg, format_idx, firstDataArg, 8491 Type, CallType, InFunctionCall, 8492 CheckedVarArgs, UncoveredArg, Offset, 8493 IgnoreStringsWithoutSpecifiers); 8494 if (Left == SLCT_NotALiteral || !CheckRight) { 8495 return Left; 8496 } 8497 } 8498 8499 StringLiteralCheckType Right = checkFormatStringExpr( 8500 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 8501 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8502 IgnoreStringsWithoutSpecifiers); 8503 8504 return (CheckLeft && Left < Right) ? Left : Right; 8505 } 8506 8507 case Stmt::ImplicitCastExprClass: 8508 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 8509 goto tryAgain; 8510 8511 case Stmt::OpaqueValueExprClass: 8512 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 8513 E = src; 8514 goto tryAgain; 8515 } 8516 return SLCT_NotALiteral; 8517 8518 case Stmt::PredefinedExprClass: 8519 // While __func__, etc., are technically not string literals, they 8520 // cannot contain format specifiers and thus are not a security 8521 // liability. 8522 return SLCT_UncheckedLiteral; 8523 8524 case Stmt::DeclRefExprClass: { 8525 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 8526 8527 // As an exception, do not flag errors for variables binding to 8528 // const string literals. 8529 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 8530 bool isConstant = false; 8531 QualType T = DR->getType(); 8532 8533 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 8534 isConstant = AT->getElementType().isConstant(S.Context); 8535 } else if (const PointerType *PT = T->getAs<PointerType>()) { 8536 isConstant = T.isConstant(S.Context) && 8537 PT->getPointeeType().isConstant(S.Context); 8538 } else if (T->isObjCObjectPointerType()) { 8539 // In ObjC, there is usually no "const ObjectPointer" type, 8540 // so don't check if the pointee type is constant. 8541 isConstant = T.isConstant(S.Context); 8542 } 8543 8544 if (isConstant) { 8545 if (const Expr *Init = VD->getAnyInitializer()) { 8546 // Look through initializers like const char c[] = { "foo" } 8547 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 8548 if (InitList->isStringLiteralInit()) 8549 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 8550 } 8551 return checkFormatStringExpr(S, Init, Args, 8552 HasVAListArg, format_idx, 8553 firstDataArg, Type, CallType, 8554 /*InFunctionCall*/ false, CheckedVarArgs, 8555 UncoveredArg, Offset); 8556 } 8557 } 8558 8559 // For vprintf* functions (i.e., HasVAListArg==true), we add a 8560 // special check to see if the format string is a function parameter 8561 // of the function calling the printf function. If the function 8562 // has an attribute indicating it is a printf-like function, then we 8563 // should suppress warnings concerning non-literals being used in a call 8564 // to a vprintf function. For example: 8565 // 8566 // void 8567 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 8568 // va_list ap; 8569 // va_start(ap, fmt); 8570 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 8571 // ... 8572 // } 8573 if (HasVAListArg) { 8574 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 8575 if (const Decl *D = dyn_cast<Decl>(PV->getDeclContext())) { 8576 int PVIndex = PV->getFunctionScopeIndex() + 1; 8577 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) { 8578 // adjust for implicit parameter 8579 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) 8580 if (MD->isInstance()) 8581 ++PVIndex; 8582 // We also check if the formats are compatible. 8583 // We can't pass a 'scanf' string to a 'printf' function. 8584 if (PVIndex == PVFormat->getFormatIdx() && 8585 Type == S.GetFormatStringType(PVFormat)) 8586 return SLCT_UncheckedLiteral; 8587 } 8588 } 8589 } 8590 } 8591 } 8592 8593 return SLCT_NotALiteral; 8594 } 8595 8596 case Stmt::CallExprClass: 8597 case Stmt::CXXMemberCallExprClass: { 8598 const CallExpr *CE = cast<CallExpr>(E); 8599 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 8600 bool IsFirst = true; 8601 StringLiteralCheckType CommonResult; 8602 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 8603 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 8604 StringLiteralCheckType Result = checkFormatStringExpr( 8605 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 8606 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8607 IgnoreStringsWithoutSpecifiers); 8608 if (IsFirst) { 8609 CommonResult = Result; 8610 IsFirst = false; 8611 } 8612 } 8613 if (!IsFirst) 8614 return CommonResult; 8615 8616 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 8617 unsigned BuiltinID = FD->getBuiltinID(); 8618 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 8619 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 8620 const Expr *Arg = CE->getArg(0); 8621 return checkFormatStringExpr(S, Arg, Args, 8622 HasVAListArg, format_idx, 8623 firstDataArg, Type, CallType, 8624 InFunctionCall, CheckedVarArgs, 8625 UncoveredArg, Offset, 8626 IgnoreStringsWithoutSpecifiers); 8627 } 8628 } 8629 } 8630 8631 return SLCT_NotALiteral; 8632 } 8633 case Stmt::ObjCMessageExprClass: { 8634 const auto *ME = cast<ObjCMessageExpr>(E); 8635 if (const auto *MD = ME->getMethodDecl()) { 8636 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 8637 // As a special case heuristic, if we're using the method -[NSBundle 8638 // localizedStringForKey:value:table:], ignore any key strings that lack 8639 // format specifiers. The idea is that if the key doesn't have any 8640 // format specifiers then its probably just a key to map to the 8641 // localized strings. If it does have format specifiers though, then its 8642 // likely that the text of the key is the format string in the 8643 // programmer's language, and should be checked. 8644 const ObjCInterfaceDecl *IFace; 8645 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 8646 IFace->getIdentifier()->isStr("NSBundle") && 8647 MD->getSelector().isKeywordSelector( 8648 {"localizedStringForKey", "value", "table"})) { 8649 IgnoreStringsWithoutSpecifiers = true; 8650 } 8651 8652 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 8653 return checkFormatStringExpr( 8654 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 8655 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8656 IgnoreStringsWithoutSpecifiers); 8657 } 8658 } 8659 8660 return SLCT_NotALiteral; 8661 } 8662 case Stmt::ObjCStringLiteralClass: 8663 case Stmt::StringLiteralClass: { 8664 const StringLiteral *StrE = nullptr; 8665 8666 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 8667 StrE = ObjCFExpr->getString(); 8668 else 8669 StrE = cast<StringLiteral>(E); 8670 8671 if (StrE) { 8672 if (Offset.isNegative() || Offset > StrE->getLength()) { 8673 // TODO: It would be better to have an explicit warning for out of 8674 // bounds literals. 8675 return SLCT_NotALiteral; 8676 } 8677 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 8678 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 8679 firstDataArg, Type, InFunctionCall, CallType, 8680 CheckedVarArgs, UncoveredArg, 8681 IgnoreStringsWithoutSpecifiers); 8682 return SLCT_CheckedLiteral; 8683 } 8684 8685 return SLCT_NotALiteral; 8686 } 8687 case Stmt::BinaryOperatorClass: { 8688 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 8689 8690 // A string literal + an int offset is still a string literal. 8691 if (BinOp->isAdditiveOp()) { 8692 Expr::EvalResult LResult, RResult; 8693 8694 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 8695 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8696 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 8697 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8698 8699 if (LIsInt != RIsInt) { 8700 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 8701 8702 if (LIsInt) { 8703 if (BinOpKind == BO_Add) { 8704 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 8705 E = BinOp->getRHS(); 8706 goto tryAgain; 8707 } 8708 } else { 8709 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 8710 E = BinOp->getLHS(); 8711 goto tryAgain; 8712 } 8713 } 8714 } 8715 8716 return SLCT_NotALiteral; 8717 } 8718 case Stmt::UnaryOperatorClass: { 8719 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 8720 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 8721 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 8722 Expr::EvalResult IndexResult; 8723 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 8724 Expr::SE_NoSideEffects, 8725 S.isConstantEvaluated())) { 8726 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 8727 /*RHS is int*/ true); 8728 E = ASE->getBase(); 8729 goto tryAgain; 8730 } 8731 } 8732 8733 return SLCT_NotALiteral; 8734 } 8735 8736 default: 8737 return SLCT_NotALiteral; 8738 } 8739 } 8740 8741 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 8742 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 8743 .Case("scanf", FST_Scanf) 8744 .Cases("printf", "printf0", FST_Printf) 8745 .Cases("NSString", "CFString", FST_NSString) 8746 .Case("strftime", FST_Strftime) 8747 .Case("strfmon", FST_Strfmon) 8748 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 8749 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 8750 .Case("os_trace", FST_OSLog) 8751 .Case("os_log", FST_OSLog) 8752 .Default(FST_Unknown); 8753 } 8754 8755 /// CheckFormatArguments - Check calls to printf and scanf (and similar 8756 /// functions) for correct use of format strings. 8757 /// Returns true if a format string has been fully checked. 8758 bool Sema::CheckFormatArguments(const FormatAttr *Format, 8759 ArrayRef<const Expr *> Args, 8760 bool IsCXXMember, 8761 VariadicCallType CallType, 8762 SourceLocation Loc, SourceRange Range, 8763 llvm::SmallBitVector &CheckedVarArgs) { 8764 FormatStringInfo FSI; 8765 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 8766 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 8767 FSI.FirstDataArg, GetFormatStringType(Format), 8768 CallType, Loc, Range, CheckedVarArgs); 8769 return false; 8770 } 8771 8772 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 8773 bool HasVAListArg, unsigned format_idx, 8774 unsigned firstDataArg, FormatStringType Type, 8775 VariadicCallType CallType, 8776 SourceLocation Loc, SourceRange Range, 8777 llvm::SmallBitVector &CheckedVarArgs) { 8778 // CHECK: printf/scanf-like function is called with no format string. 8779 if (format_idx >= Args.size()) { 8780 Diag(Loc, diag::warn_missing_format_string) << Range; 8781 return false; 8782 } 8783 8784 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 8785 8786 // CHECK: format string is not a string literal. 8787 // 8788 // Dynamically generated format strings are difficult to 8789 // automatically vet at compile time. Requiring that format strings 8790 // are string literals: (1) permits the checking of format strings by 8791 // the compiler and thereby (2) can practically remove the source of 8792 // many format string exploits. 8793 8794 // Format string can be either ObjC string (e.g. @"%d") or 8795 // C string (e.g. "%d") 8796 // ObjC string uses the same format specifiers as C string, so we can use 8797 // the same format string checking logic for both ObjC and C strings. 8798 UncoveredArgHandler UncoveredArg; 8799 StringLiteralCheckType CT = 8800 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 8801 format_idx, firstDataArg, Type, CallType, 8802 /*IsFunctionCall*/ true, CheckedVarArgs, 8803 UncoveredArg, 8804 /*no string offset*/ llvm::APSInt(64, false) = 0); 8805 8806 // Generate a diagnostic where an uncovered argument is detected. 8807 if (UncoveredArg.hasUncoveredArg()) { 8808 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 8809 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 8810 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 8811 } 8812 8813 if (CT != SLCT_NotALiteral) 8814 // Literal format string found, check done! 8815 return CT == SLCT_CheckedLiteral; 8816 8817 // Strftime is particular as it always uses a single 'time' argument, 8818 // so it is safe to pass a non-literal string. 8819 if (Type == FST_Strftime) 8820 return false; 8821 8822 // Do not emit diag when the string param is a macro expansion and the 8823 // format is either NSString or CFString. This is a hack to prevent 8824 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 8825 // which are usually used in place of NS and CF string literals. 8826 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 8827 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 8828 return false; 8829 8830 // If there are no arguments specified, warn with -Wformat-security, otherwise 8831 // warn only with -Wformat-nonliteral. 8832 if (Args.size() == firstDataArg) { 8833 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 8834 << OrigFormatExpr->getSourceRange(); 8835 switch (Type) { 8836 default: 8837 break; 8838 case FST_Kprintf: 8839 case FST_FreeBSDKPrintf: 8840 case FST_Printf: 8841 Diag(FormatLoc, diag::note_format_security_fixit) 8842 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 8843 break; 8844 case FST_NSString: 8845 Diag(FormatLoc, diag::note_format_security_fixit) 8846 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 8847 break; 8848 } 8849 } else { 8850 Diag(FormatLoc, diag::warn_format_nonliteral) 8851 << OrigFormatExpr->getSourceRange(); 8852 } 8853 return false; 8854 } 8855 8856 namespace { 8857 8858 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 8859 protected: 8860 Sema &S; 8861 const FormatStringLiteral *FExpr; 8862 const Expr *OrigFormatExpr; 8863 const Sema::FormatStringType FSType; 8864 const unsigned FirstDataArg; 8865 const unsigned NumDataArgs; 8866 const char *Beg; // Start of format string. 8867 const bool HasVAListArg; 8868 ArrayRef<const Expr *> Args; 8869 unsigned FormatIdx; 8870 llvm::SmallBitVector CoveredArgs; 8871 bool usesPositionalArgs = false; 8872 bool atFirstArg = true; 8873 bool inFunctionCall; 8874 Sema::VariadicCallType CallType; 8875 llvm::SmallBitVector &CheckedVarArgs; 8876 UncoveredArgHandler &UncoveredArg; 8877 8878 public: 8879 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 8880 const Expr *origFormatExpr, 8881 const Sema::FormatStringType type, unsigned firstDataArg, 8882 unsigned numDataArgs, const char *beg, bool hasVAListArg, 8883 ArrayRef<const Expr *> Args, unsigned formatIdx, 8884 bool inFunctionCall, Sema::VariadicCallType callType, 8885 llvm::SmallBitVector &CheckedVarArgs, 8886 UncoveredArgHandler &UncoveredArg) 8887 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 8888 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 8889 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 8890 inFunctionCall(inFunctionCall), CallType(callType), 8891 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 8892 CoveredArgs.resize(numDataArgs); 8893 CoveredArgs.reset(); 8894 } 8895 8896 void DoneProcessing(); 8897 8898 void HandleIncompleteSpecifier(const char *startSpecifier, 8899 unsigned specifierLen) override; 8900 8901 void HandleInvalidLengthModifier( 8902 const analyze_format_string::FormatSpecifier &FS, 8903 const analyze_format_string::ConversionSpecifier &CS, 8904 const char *startSpecifier, unsigned specifierLen, 8905 unsigned DiagID); 8906 8907 void HandleNonStandardLengthModifier( 8908 const analyze_format_string::FormatSpecifier &FS, 8909 const char *startSpecifier, unsigned specifierLen); 8910 8911 void HandleNonStandardConversionSpecifier( 8912 const analyze_format_string::ConversionSpecifier &CS, 8913 const char *startSpecifier, unsigned specifierLen); 8914 8915 void HandlePosition(const char *startPos, unsigned posLen) override; 8916 8917 void HandleInvalidPosition(const char *startSpecifier, 8918 unsigned specifierLen, 8919 analyze_format_string::PositionContext p) override; 8920 8921 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 8922 8923 void HandleNullChar(const char *nullCharacter) override; 8924 8925 template <typename Range> 8926 static void 8927 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 8928 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 8929 bool IsStringLocation, Range StringRange, 8930 ArrayRef<FixItHint> Fixit = None); 8931 8932 protected: 8933 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 8934 const char *startSpec, 8935 unsigned specifierLen, 8936 const char *csStart, unsigned csLen); 8937 8938 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 8939 const char *startSpec, 8940 unsigned specifierLen); 8941 8942 SourceRange getFormatStringRange(); 8943 CharSourceRange getSpecifierRange(const char *startSpecifier, 8944 unsigned specifierLen); 8945 SourceLocation getLocationOfByte(const char *x); 8946 8947 const Expr *getDataArg(unsigned i) const; 8948 8949 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 8950 const analyze_format_string::ConversionSpecifier &CS, 8951 const char *startSpecifier, unsigned specifierLen, 8952 unsigned argIndex); 8953 8954 template <typename Range> 8955 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 8956 bool IsStringLocation, Range StringRange, 8957 ArrayRef<FixItHint> Fixit = None); 8958 }; 8959 8960 } // namespace 8961 8962 SourceRange CheckFormatHandler::getFormatStringRange() { 8963 return OrigFormatExpr->getSourceRange(); 8964 } 8965 8966 CharSourceRange CheckFormatHandler:: 8967 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 8968 SourceLocation Start = getLocationOfByte(startSpecifier); 8969 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 8970 8971 // Advance the end SourceLocation by one due to half-open ranges. 8972 End = End.getLocWithOffset(1); 8973 8974 return CharSourceRange::getCharRange(Start, End); 8975 } 8976 8977 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 8978 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 8979 S.getLangOpts(), S.Context.getTargetInfo()); 8980 } 8981 8982 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 8983 unsigned specifierLen){ 8984 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 8985 getLocationOfByte(startSpecifier), 8986 /*IsStringLocation*/true, 8987 getSpecifierRange(startSpecifier, specifierLen)); 8988 } 8989 8990 void CheckFormatHandler::HandleInvalidLengthModifier( 8991 const analyze_format_string::FormatSpecifier &FS, 8992 const analyze_format_string::ConversionSpecifier &CS, 8993 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 8994 using namespace analyze_format_string; 8995 8996 const LengthModifier &LM = FS.getLengthModifier(); 8997 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 8998 8999 // See if we know how to fix this length modifier. 9000 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 9001 if (FixedLM) { 9002 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 9003 getLocationOfByte(LM.getStart()), 9004 /*IsStringLocation*/true, 9005 getSpecifierRange(startSpecifier, specifierLen)); 9006 9007 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 9008 << FixedLM->toString() 9009 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 9010 9011 } else { 9012 FixItHint Hint; 9013 if (DiagID == diag::warn_format_nonsensical_length) 9014 Hint = FixItHint::CreateRemoval(LMRange); 9015 9016 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 9017 getLocationOfByte(LM.getStart()), 9018 /*IsStringLocation*/true, 9019 getSpecifierRange(startSpecifier, specifierLen), 9020 Hint); 9021 } 9022 } 9023 9024 void CheckFormatHandler::HandleNonStandardLengthModifier( 9025 const analyze_format_string::FormatSpecifier &FS, 9026 const char *startSpecifier, unsigned specifierLen) { 9027 using namespace analyze_format_string; 9028 9029 const LengthModifier &LM = FS.getLengthModifier(); 9030 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 9031 9032 // See if we know how to fix this length modifier. 9033 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 9034 if (FixedLM) { 9035 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9036 << LM.toString() << 0, 9037 getLocationOfByte(LM.getStart()), 9038 /*IsStringLocation*/true, 9039 getSpecifierRange(startSpecifier, specifierLen)); 9040 9041 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 9042 << FixedLM->toString() 9043 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 9044 9045 } else { 9046 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9047 << LM.toString() << 0, 9048 getLocationOfByte(LM.getStart()), 9049 /*IsStringLocation*/true, 9050 getSpecifierRange(startSpecifier, specifierLen)); 9051 } 9052 } 9053 9054 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 9055 const analyze_format_string::ConversionSpecifier &CS, 9056 const char *startSpecifier, unsigned specifierLen) { 9057 using namespace analyze_format_string; 9058 9059 // See if we know how to fix this conversion specifier. 9060 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 9061 if (FixedCS) { 9062 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9063 << CS.toString() << /*conversion specifier*/1, 9064 getLocationOfByte(CS.getStart()), 9065 /*IsStringLocation*/true, 9066 getSpecifierRange(startSpecifier, specifierLen)); 9067 9068 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 9069 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 9070 << FixedCS->toString() 9071 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 9072 } else { 9073 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9074 << CS.toString() << /*conversion specifier*/1, 9075 getLocationOfByte(CS.getStart()), 9076 /*IsStringLocation*/true, 9077 getSpecifierRange(startSpecifier, specifierLen)); 9078 } 9079 } 9080 9081 void CheckFormatHandler::HandlePosition(const char *startPos, 9082 unsigned posLen) { 9083 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 9084 getLocationOfByte(startPos), 9085 /*IsStringLocation*/true, 9086 getSpecifierRange(startPos, posLen)); 9087 } 9088 9089 void 9090 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 9091 analyze_format_string::PositionContext p) { 9092 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 9093 << (unsigned) p, 9094 getLocationOfByte(startPos), /*IsStringLocation*/true, 9095 getSpecifierRange(startPos, posLen)); 9096 } 9097 9098 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 9099 unsigned posLen) { 9100 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 9101 getLocationOfByte(startPos), 9102 /*IsStringLocation*/true, 9103 getSpecifierRange(startPos, posLen)); 9104 } 9105 9106 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 9107 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 9108 // The presence of a null character is likely an error. 9109 EmitFormatDiagnostic( 9110 S.PDiag(diag::warn_printf_format_string_contains_null_char), 9111 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 9112 getFormatStringRange()); 9113 } 9114 } 9115 9116 // Note that this may return NULL if there was an error parsing or building 9117 // one of the argument expressions. 9118 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 9119 return Args[FirstDataArg + i]; 9120 } 9121 9122 void CheckFormatHandler::DoneProcessing() { 9123 // Does the number of data arguments exceed the number of 9124 // format conversions in the format string? 9125 if (!HasVAListArg) { 9126 // Find any arguments that weren't covered. 9127 CoveredArgs.flip(); 9128 signed notCoveredArg = CoveredArgs.find_first(); 9129 if (notCoveredArg >= 0) { 9130 assert((unsigned)notCoveredArg < NumDataArgs); 9131 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 9132 } else { 9133 UncoveredArg.setAllCovered(); 9134 } 9135 } 9136 } 9137 9138 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 9139 const Expr *ArgExpr) { 9140 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 9141 "Invalid state"); 9142 9143 if (!ArgExpr) 9144 return; 9145 9146 SourceLocation Loc = ArgExpr->getBeginLoc(); 9147 9148 if (S.getSourceManager().isInSystemMacro(Loc)) 9149 return; 9150 9151 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 9152 for (auto E : DiagnosticExprs) 9153 PDiag << E->getSourceRange(); 9154 9155 CheckFormatHandler::EmitFormatDiagnostic( 9156 S, IsFunctionCall, DiagnosticExprs[0], 9157 PDiag, Loc, /*IsStringLocation*/false, 9158 DiagnosticExprs[0]->getSourceRange()); 9159 } 9160 9161 bool 9162 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 9163 SourceLocation Loc, 9164 const char *startSpec, 9165 unsigned specifierLen, 9166 const char *csStart, 9167 unsigned csLen) { 9168 bool keepGoing = true; 9169 if (argIndex < NumDataArgs) { 9170 // Consider the argument coverered, even though the specifier doesn't 9171 // make sense. 9172 CoveredArgs.set(argIndex); 9173 } 9174 else { 9175 // If argIndex exceeds the number of data arguments we 9176 // don't issue a warning because that is just a cascade of warnings (and 9177 // they may have intended '%%' anyway). We don't want to continue processing 9178 // the format string after this point, however, as we will like just get 9179 // gibberish when trying to match arguments. 9180 keepGoing = false; 9181 } 9182 9183 StringRef Specifier(csStart, csLen); 9184 9185 // If the specifier in non-printable, it could be the first byte of a UTF-8 9186 // sequence. In that case, print the UTF-8 code point. If not, print the byte 9187 // hex value. 9188 std::string CodePointStr; 9189 if (!llvm::sys::locale::isPrint(*csStart)) { 9190 llvm::UTF32 CodePoint; 9191 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 9192 const llvm::UTF8 *E = 9193 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 9194 llvm::ConversionResult Result = 9195 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 9196 9197 if (Result != llvm::conversionOK) { 9198 unsigned char FirstChar = *csStart; 9199 CodePoint = (llvm::UTF32)FirstChar; 9200 } 9201 9202 llvm::raw_string_ostream OS(CodePointStr); 9203 if (CodePoint < 256) 9204 OS << "\\x" << llvm::format("%02x", CodePoint); 9205 else if (CodePoint <= 0xFFFF) 9206 OS << "\\u" << llvm::format("%04x", CodePoint); 9207 else 9208 OS << "\\U" << llvm::format("%08x", CodePoint); 9209 OS.flush(); 9210 Specifier = CodePointStr; 9211 } 9212 9213 EmitFormatDiagnostic( 9214 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 9215 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 9216 9217 return keepGoing; 9218 } 9219 9220 void 9221 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 9222 const char *startSpec, 9223 unsigned specifierLen) { 9224 EmitFormatDiagnostic( 9225 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 9226 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 9227 } 9228 9229 bool 9230 CheckFormatHandler::CheckNumArgs( 9231 const analyze_format_string::FormatSpecifier &FS, 9232 const analyze_format_string::ConversionSpecifier &CS, 9233 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 9234 9235 if (argIndex >= NumDataArgs) { 9236 PartialDiagnostic PDiag = FS.usesPositionalArg() 9237 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 9238 << (argIndex+1) << NumDataArgs) 9239 : S.PDiag(diag::warn_printf_insufficient_data_args); 9240 EmitFormatDiagnostic( 9241 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 9242 getSpecifierRange(startSpecifier, specifierLen)); 9243 9244 // Since more arguments than conversion tokens are given, by extension 9245 // all arguments are covered, so mark this as so. 9246 UncoveredArg.setAllCovered(); 9247 return false; 9248 } 9249 return true; 9250 } 9251 9252 template<typename Range> 9253 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 9254 SourceLocation Loc, 9255 bool IsStringLocation, 9256 Range StringRange, 9257 ArrayRef<FixItHint> FixIt) { 9258 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 9259 Loc, IsStringLocation, StringRange, FixIt); 9260 } 9261 9262 /// If the format string is not within the function call, emit a note 9263 /// so that the function call and string are in diagnostic messages. 9264 /// 9265 /// \param InFunctionCall if true, the format string is within the function 9266 /// call and only one diagnostic message will be produced. Otherwise, an 9267 /// extra note will be emitted pointing to location of the format string. 9268 /// 9269 /// \param ArgumentExpr the expression that is passed as the format string 9270 /// argument in the function call. Used for getting locations when two 9271 /// diagnostics are emitted. 9272 /// 9273 /// \param PDiag the callee should already have provided any strings for the 9274 /// diagnostic message. This function only adds locations and fixits 9275 /// to diagnostics. 9276 /// 9277 /// \param Loc primary location for diagnostic. If two diagnostics are 9278 /// required, one will be at Loc and a new SourceLocation will be created for 9279 /// the other one. 9280 /// 9281 /// \param IsStringLocation if true, Loc points to the format string should be 9282 /// used for the note. Otherwise, Loc points to the argument list and will 9283 /// be used with PDiag. 9284 /// 9285 /// \param StringRange some or all of the string to highlight. This is 9286 /// templated so it can accept either a CharSourceRange or a SourceRange. 9287 /// 9288 /// \param FixIt optional fix it hint for the format string. 9289 template <typename Range> 9290 void CheckFormatHandler::EmitFormatDiagnostic( 9291 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 9292 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 9293 Range StringRange, ArrayRef<FixItHint> FixIt) { 9294 if (InFunctionCall) { 9295 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 9296 D << StringRange; 9297 D << FixIt; 9298 } else { 9299 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 9300 << ArgumentExpr->getSourceRange(); 9301 9302 const Sema::SemaDiagnosticBuilder &Note = 9303 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 9304 diag::note_format_string_defined); 9305 9306 Note << StringRange; 9307 Note << FixIt; 9308 } 9309 } 9310 9311 //===--- CHECK: Printf format string checking ------------------------------===// 9312 9313 namespace { 9314 9315 class CheckPrintfHandler : public CheckFormatHandler { 9316 public: 9317 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 9318 const Expr *origFormatExpr, 9319 const Sema::FormatStringType type, unsigned firstDataArg, 9320 unsigned numDataArgs, bool isObjC, const char *beg, 9321 bool hasVAListArg, ArrayRef<const Expr *> Args, 9322 unsigned formatIdx, bool inFunctionCall, 9323 Sema::VariadicCallType CallType, 9324 llvm::SmallBitVector &CheckedVarArgs, 9325 UncoveredArgHandler &UncoveredArg) 9326 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 9327 numDataArgs, beg, hasVAListArg, Args, formatIdx, 9328 inFunctionCall, CallType, CheckedVarArgs, 9329 UncoveredArg) {} 9330 9331 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 9332 9333 /// Returns true if '%@' specifiers are allowed in the format string. 9334 bool allowsObjCArg() const { 9335 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 9336 FSType == Sema::FST_OSTrace; 9337 } 9338 9339 bool HandleInvalidPrintfConversionSpecifier( 9340 const analyze_printf::PrintfSpecifier &FS, 9341 const char *startSpecifier, 9342 unsigned specifierLen) override; 9343 9344 void handleInvalidMaskType(StringRef MaskType) override; 9345 9346 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 9347 const char *startSpecifier, unsigned specifierLen, 9348 const TargetInfo &Target) override; 9349 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9350 const char *StartSpecifier, 9351 unsigned SpecifierLen, 9352 const Expr *E); 9353 9354 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 9355 const char *startSpecifier, unsigned specifierLen); 9356 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 9357 const analyze_printf::OptionalAmount &Amt, 9358 unsigned type, 9359 const char *startSpecifier, unsigned specifierLen); 9360 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9361 const analyze_printf::OptionalFlag &flag, 9362 const char *startSpecifier, unsigned specifierLen); 9363 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 9364 const analyze_printf::OptionalFlag &ignoredFlag, 9365 const analyze_printf::OptionalFlag &flag, 9366 const char *startSpecifier, unsigned specifierLen); 9367 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 9368 const Expr *E); 9369 9370 void HandleEmptyObjCModifierFlag(const char *startFlag, 9371 unsigned flagLen) override; 9372 9373 void HandleInvalidObjCModifierFlag(const char *startFlag, 9374 unsigned flagLen) override; 9375 9376 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 9377 const char *flagsEnd, 9378 const char *conversionPosition) 9379 override; 9380 }; 9381 9382 } // namespace 9383 9384 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 9385 const analyze_printf::PrintfSpecifier &FS, 9386 const char *startSpecifier, 9387 unsigned specifierLen) { 9388 const analyze_printf::PrintfConversionSpecifier &CS = 9389 FS.getConversionSpecifier(); 9390 9391 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 9392 getLocationOfByte(CS.getStart()), 9393 startSpecifier, specifierLen, 9394 CS.getStart(), CS.getLength()); 9395 } 9396 9397 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 9398 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 9399 } 9400 9401 bool CheckPrintfHandler::HandleAmount( 9402 const analyze_format_string::OptionalAmount &Amt, 9403 unsigned k, const char *startSpecifier, 9404 unsigned specifierLen) { 9405 if (Amt.hasDataArgument()) { 9406 if (!HasVAListArg) { 9407 unsigned argIndex = Amt.getArgIndex(); 9408 if (argIndex >= NumDataArgs) { 9409 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 9410 << k, 9411 getLocationOfByte(Amt.getStart()), 9412 /*IsStringLocation*/true, 9413 getSpecifierRange(startSpecifier, specifierLen)); 9414 // Don't do any more checking. We will just emit 9415 // spurious errors. 9416 return false; 9417 } 9418 9419 // Type check the data argument. It should be an 'int'. 9420 // Although not in conformance with C99, we also allow the argument to be 9421 // an 'unsigned int' as that is a reasonably safe case. GCC also 9422 // doesn't emit a warning for that case. 9423 CoveredArgs.set(argIndex); 9424 const Expr *Arg = getDataArg(argIndex); 9425 if (!Arg) 9426 return false; 9427 9428 QualType T = Arg->getType(); 9429 9430 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 9431 assert(AT.isValid()); 9432 9433 if (!AT.matchesType(S.Context, T)) { 9434 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 9435 << k << AT.getRepresentativeTypeName(S.Context) 9436 << T << Arg->getSourceRange(), 9437 getLocationOfByte(Amt.getStart()), 9438 /*IsStringLocation*/true, 9439 getSpecifierRange(startSpecifier, specifierLen)); 9440 // Don't do any more checking. We will just emit 9441 // spurious errors. 9442 return false; 9443 } 9444 } 9445 } 9446 return true; 9447 } 9448 9449 void CheckPrintfHandler::HandleInvalidAmount( 9450 const analyze_printf::PrintfSpecifier &FS, 9451 const analyze_printf::OptionalAmount &Amt, 9452 unsigned type, 9453 const char *startSpecifier, 9454 unsigned specifierLen) { 9455 const analyze_printf::PrintfConversionSpecifier &CS = 9456 FS.getConversionSpecifier(); 9457 9458 FixItHint fixit = 9459 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 9460 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 9461 Amt.getConstantLength())) 9462 : FixItHint(); 9463 9464 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 9465 << type << CS.toString(), 9466 getLocationOfByte(Amt.getStart()), 9467 /*IsStringLocation*/true, 9468 getSpecifierRange(startSpecifier, specifierLen), 9469 fixit); 9470 } 9471 9472 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9473 const analyze_printf::OptionalFlag &flag, 9474 const char *startSpecifier, 9475 unsigned specifierLen) { 9476 // Warn about pointless flag with a fixit removal. 9477 const analyze_printf::PrintfConversionSpecifier &CS = 9478 FS.getConversionSpecifier(); 9479 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 9480 << flag.toString() << CS.toString(), 9481 getLocationOfByte(flag.getPosition()), 9482 /*IsStringLocation*/true, 9483 getSpecifierRange(startSpecifier, specifierLen), 9484 FixItHint::CreateRemoval( 9485 getSpecifierRange(flag.getPosition(), 1))); 9486 } 9487 9488 void CheckPrintfHandler::HandleIgnoredFlag( 9489 const analyze_printf::PrintfSpecifier &FS, 9490 const analyze_printf::OptionalFlag &ignoredFlag, 9491 const analyze_printf::OptionalFlag &flag, 9492 const char *startSpecifier, 9493 unsigned specifierLen) { 9494 // Warn about ignored flag with a fixit removal. 9495 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 9496 << ignoredFlag.toString() << flag.toString(), 9497 getLocationOfByte(ignoredFlag.getPosition()), 9498 /*IsStringLocation*/true, 9499 getSpecifierRange(startSpecifier, specifierLen), 9500 FixItHint::CreateRemoval( 9501 getSpecifierRange(ignoredFlag.getPosition(), 1))); 9502 } 9503 9504 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 9505 unsigned flagLen) { 9506 // Warn about an empty flag. 9507 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 9508 getLocationOfByte(startFlag), 9509 /*IsStringLocation*/true, 9510 getSpecifierRange(startFlag, flagLen)); 9511 } 9512 9513 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 9514 unsigned flagLen) { 9515 // Warn about an invalid flag. 9516 auto Range = getSpecifierRange(startFlag, flagLen); 9517 StringRef flag(startFlag, flagLen); 9518 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 9519 getLocationOfByte(startFlag), 9520 /*IsStringLocation*/true, 9521 Range, FixItHint::CreateRemoval(Range)); 9522 } 9523 9524 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 9525 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 9526 // Warn about using '[...]' without a '@' conversion. 9527 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 9528 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 9529 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 9530 getLocationOfByte(conversionPosition), 9531 /*IsStringLocation*/true, 9532 Range, FixItHint::CreateRemoval(Range)); 9533 } 9534 9535 // Determines if the specified is a C++ class or struct containing 9536 // a member with the specified name and kind (e.g. a CXXMethodDecl named 9537 // "c_str()"). 9538 template<typename MemberKind> 9539 static llvm::SmallPtrSet<MemberKind*, 1> 9540 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 9541 const RecordType *RT = Ty->getAs<RecordType>(); 9542 llvm::SmallPtrSet<MemberKind*, 1> Results; 9543 9544 if (!RT) 9545 return Results; 9546 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 9547 if (!RD || !RD->getDefinition()) 9548 return Results; 9549 9550 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 9551 Sema::LookupMemberName); 9552 R.suppressDiagnostics(); 9553 9554 // We just need to include all members of the right kind turned up by the 9555 // filter, at this point. 9556 if (S.LookupQualifiedName(R, RT->getDecl())) 9557 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 9558 NamedDecl *decl = (*I)->getUnderlyingDecl(); 9559 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 9560 Results.insert(FK); 9561 } 9562 return Results; 9563 } 9564 9565 /// Check if we could call '.c_str()' on an object. 9566 /// 9567 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 9568 /// allow the call, or if it would be ambiguous). 9569 bool Sema::hasCStrMethod(const Expr *E) { 9570 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9571 9572 MethodSet Results = 9573 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 9574 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9575 MI != ME; ++MI) 9576 if ((*MI)->getMinRequiredArguments() == 0) 9577 return true; 9578 return false; 9579 } 9580 9581 // Check if a (w)string was passed when a (w)char* was needed, and offer a 9582 // better diagnostic if so. AT is assumed to be valid. 9583 // Returns true when a c_str() conversion method is found. 9584 bool CheckPrintfHandler::checkForCStrMembers( 9585 const analyze_printf::ArgType &AT, const Expr *E) { 9586 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9587 9588 MethodSet Results = 9589 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 9590 9591 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9592 MI != ME; ++MI) { 9593 const CXXMethodDecl *Method = *MI; 9594 if (Method->getMinRequiredArguments() == 0 && 9595 AT.matchesType(S.Context, Method->getReturnType())) { 9596 // FIXME: Suggest parens if the expression needs them. 9597 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 9598 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 9599 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 9600 return true; 9601 } 9602 } 9603 9604 return false; 9605 } 9606 9607 bool CheckPrintfHandler::HandlePrintfSpecifier( 9608 const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, 9609 unsigned specifierLen, const TargetInfo &Target) { 9610 using namespace analyze_format_string; 9611 using namespace analyze_printf; 9612 9613 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 9614 9615 if (FS.consumesDataArgument()) { 9616 if (atFirstArg) { 9617 atFirstArg = false; 9618 usesPositionalArgs = FS.usesPositionalArg(); 9619 } 9620 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9621 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9622 startSpecifier, specifierLen); 9623 return false; 9624 } 9625 } 9626 9627 // First check if the field width, precision, and conversion specifier 9628 // have matching data arguments. 9629 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 9630 startSpecifier, specifierLen)) { 9631 return false; 9632 } 9633 9634 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 9635 startSpecifier, specifierLen)) { 9636 return false; 9637 } 9638 9639 if (!CS.consumesDataArgument()) { 9640 // FIXME: Technically specifying a precision or field width here 9641 // makes no sense. Worth issuing a warning at some point. 9642 return true; 9643 } 9644 9645 // Consume the argument. 9646 unsigned argIndex = FS.getArgIndex(); 9647 if (argIndex < NumDataArgs) { 9648 // The check to see if the argIndex is valid will come later. 9649 // We set the bit here because we may exit early from this 9650 // function if we encounter some other error. 9651 CoveredArgs.set(argIndex); 9652 } 9653 9654 // FreeBSD kernel extensions. 9655 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 9656 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 9657 // We need at least two arguments. 9658 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 9659 return false; 9660 9661 // Claim the second argument. 9662 CoveredArgs.set(argIndex + 1); 9663 9664 // Type check the first argument (int for %b, pointer for %D) 9665 const Expr *Ex = getDataArg(argIndex); 9666 const analyze_printf::ArgType &AT = 9667 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 9668 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 9669 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 9670 EmitFormatDiagnostic( 9671 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9672 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 9673 << false << Ex->getSourceRange(), 9674 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9675 getSpecifierRange(startSpecifier, specifierLen)); 9676 9677 // Type check the second argument (char * for both %b and %D) 9678 Ex = getDataArg(argIndex + 1); 9679 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 9680 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 9681 EmitFormatDiagnostic( 9682 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9683 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 9684 << false << Ex->getSourceRange(), 9685 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9686 getSpecifierRange(startSpecifier, specifierLen)); 9687 9688 return true; 9689 } 9690 9691 // Check for using an Objective-C specific conversion specifier 9692 // in a non-ObjC literal. 9693 if (!allowsObjCArg() && CS.isObjCArg()) { 9694 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9695 specifierLen); 9696 } 9697 9698 // %P can only be used with os_log. 9699 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 9700 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9701 specifierLen); 9702 } 9703 9704 // %n is not allowed with os_log. 9705 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 9706 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 9707 getLocationOfByte(CS.getStart()), 9708 /*IsStringLocation*/ false, 9709 getSpecifierRange(startSpecifier, specifierLen)); 9710 9711 return true; 9712 } 9713 9714 // Only scalars are allowed for os_trace. 9715 if (FSType == Sema::FST_OSTrace && 9716 (CS.getKind() == ConversionSpecifier::PArg || 9717 CS.getKind() == ConversionSpecifier::sArg || 9718 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 9719 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9720 specifierLen); 9721 } 9722 9723 // Check for use of public/private annotation outside of os_log(). 9724 if (FSType != Sema::FST_OSLog) { 9725 if (FS.isPublic().isSet()) { 9726 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9727 << "public", 9728 getLocationOfByte(FS.isPublic().getPosition()), 9729 /*IsStringLocation*/ false, 9730 getSpecifierRange(startSpecifier, specifierLen)); 9731 } 9732 if (FS.isPrivate().isSet()) { 9733 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9734 << "private", 9735 getLocationOfByte(FS.isPrivate().getPosition()), 9736 /*IsStringLocation*/ false, 9737 getSpecifierRange(startSpecifier, specifierLen)); 9738 } 9739 } 9740 9741 const llvm::Triple &Triple = Target.getTriple(); 9742 if (CS.getKind() == ConversionSpecifier::nArg && 9743 (Triple.isAndroid() || Triple.isOSFuchsia())) { 9744 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported), 9745 getLocationOfByte(CS.getStart()), 9746 /*IsStringLocation*/ false, 9747 getSpecifierRange(startSpecifier, specifierLen)); 9748 } 9749 9750 // Check for invalid use of field width 9751 if (!FS.hasValidFieldWidth()) { 9752 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 9753 startSpecifier, specifierLen); 9754 } 9755 9756 // Check for invalid use of precision 9757 if (!FS.hasValidPrecision()) { 9758 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 9759 startSpecifier, specifierLen); 9760 } 9761 9762 // Precision is mandatory for %P specifier. 9763 if (CS.getKind() == ConversionSpecifier::PArg && 9764 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 9765 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 9766 getLocationOfByte(startSpecifier), 9767 /*IsStringLocation*/ false, 9768 getSpecifierRange(startSpecifier, specifierLen)); 9769 } 9770 9771 // Check each flag does not conflict with any other component. 9772 if (!FS.hasValidThousandsGroupingPrefix()) 9773 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 9774 if (!FS.hasValidLeadingZeros()) 9775 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 9776 if (!FS.hasValidPlusPrefix()) 9777 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 9778 if (!FS.hasValidSpacePrefix()) 9779 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 9780 if (!FS.hasValidAlternativeForm()) 9781 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 9782 if (!FS.hasValidLeftJustified()) 9783 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 9784 9785 // Check that flags are not ignored by another flag 9786 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 9787 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 9788 startSpecifier, specifierLen); 9789 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 9790 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 9791 startSpecifier, specifierLen); 9792 9793 // Check the length modifier is valid with the given conversion specifier. 9794 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9795 S.getLangOpts())) 9796 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9797 diag::warn_format_nonsensical_length); 9798 else if (!FS.hasStandardLengthModifier()) 9799 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9800 else if (!FS.hasStandardLengthConversionCombination()) 9801 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9802 diag::warn_format_non_standard_conversion_spec); 9803 9804 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9805 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9806 9807 // The remaining checks depend on the data arguments. 9808 if (HasVAListArg) 9809 return true; 9810 9811 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9812 return false; 9813 9814 const Expr *Arg = getDataArg(argIndex); 9815 if (!Arg) 9816 return true; 9817 9818 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 9819 } 9820 9821 static bool requiresParensToAddCast(const Expr *E) { 9822 // FIXME: We should have a general way to reason about operator 9823 // precedence and whether parens are actually needed here. 9824 // Take care of a few common cases where they aren't. 9825 const Expr *Inside = E->IgnoreImpCasts(); 9826 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 9827 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 9828 9829 switch (Inside->getStmtClass()) { 9830 case Stmt::ArraySubscriptExprClass: 9831 case Stmt::CallExprClass: 9832 case Stmt::CharacterLiteralClass: 9833 case Stmt::CXXBoolLiteralExprClass: 9834 case Stmt::DeclRefExprClass: 9835 case Stmt::FloatingLiteralClass: 9836 case Stmt::IntegerLiteralClass: 9837 case Stmt::MemberExprClass: 9838 case Stmt::ObjCArrayLiteralClass: 9839 case Stmt::ObjCBoolLiteralExprClass: 9840 case Stmt::ObjCBoxedExprClass: 9841 case Stmt::ObjCDictionaryLiteralClass: 9842 case Stmt::ObjCEncodeExprClass: 9843 case Stmt::ObjCIvarRefExprClass: 9844 case Stmt::ObjCMessageExprClass: 9845 case Stmt::ObjCPropertyRefExprClass: 9846 case Stmt::ObjCStringLiteralClass: 9847 case Stmt::ObjCSubscriptRefExprClass: 9848 case Stmt::ParenExprClass: 9849 case Stmt::StringLiteralClass: 9850 case Stmt::UnaryOperatorClass: 9851 return false; 9852 default: 9853 return true; 9854 } 9855 } 9856 9857 static std::pair<QualType, StringRef> 9858 shouldNotPrintDirectly(const ASTContext &Context, 9859 QualType IntendedTy, 9860 const Expr *E) { 9861 // Use a 'while' to peel off layers of typedefs. 9862 QualType TyTy = IntendedTy; 9863 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 9864 StringRef Name = UserTy->getDecl()->getName(); 9865 QualType CastTy = llvm::StringSwitch<QualType>(Name) 9866 .Case("CFIndex", Context.getNSIntegerType()) 9867 .Case("NSInteger", Context.getNSIntegerType()) 9868 .Case("NSUInteger", Context.getNSUIntegerType()) 9869 .Case("SInt32", Context.IntTy) 9870 .Case("UInt32", Context.UnsignedIntTy) 9871 .Default(QualType()); 9872 9873 if (!CastTy.isNull()) 9874 return std::make_pair(CastTy, Name); 9875 9876 TyTy = UserTy->desugar(); 9877 } 9878 9879 // Strip parens if necessary. 9880 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 9881 return shouldNotPrintDirectly(Context, 9882 PE->getSubExpr()->getType(), 9883 PE->getSubExpr()); 9884 9885 // If this is a conditional expression, then its result type is constructed 9886 // via usual arithmetic conversions and thus there might be no necessary 9887 // typedef sugar there. Recurse to operands to check for NSInteger & 9888 // Co. usage condition. 9889 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 9890 QualType TrueTy, FalseTy; 9891 StringRef TrueName, FalseName; 9892 9893 std::tie(TrueTy, TrueName) = 9894 shouldNotPrintDirectly(Context, 9895 CO->getTrueExpr()->getType(), 9896 CO->getTrueExpr()); 9897 std::tie(FalseTy, FalseName) = 9898 shouldNotPrintDirectly(Context, 9899 CO->getFalseExpr()->getType(), 9900 CO->getFalseExpr()); 9901 9902 if (TrueTy == FalseTy) 9903 return std::make_pair(TrueTy, TrueName); 9904 else if (TrueTy.isNull()) 9905 return std::make_pair(FalseTy, FalseName); 9906 else if (FalseTy.isNull()) 9907 return std::make_pair(TrueTy, TrueName); 9908 } 9909 9910 return std::make_pair(QualType(), StringRef()); 9911 } 9912 9913 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 9914 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 9915 /// type do not count. 9916 static bool 9917 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 9918 QualType From = ICE->getSubExpr()->getType(); 9919 QualType To = ICE->getType(); 9920 // It's an integer promotion if the destination type is the promoted 9921 // source type. 9922 if (ICE->getCastKind() == CK_IntegralCast && 9923 From->isPromotableIntegerType() && 9924 S.Context.getPromotedIntegerType(From) == To) 9925 return true; 9926 // Look through vector types, since we do default argument promotion for 9927 // those in OpenCL. 9928 if (const auto *VecTy = From->getAs<ExtVectorType>()) 9929 From = VecTy->getElementType(); 9930 if (const auto *VecTy = To->getAs<ExtVectorType>()) 9931 To = VecTy->getElementType(); 9932 // It's a floating promotion if the source type is a lower rank. 9933 return ICE->getCastKind() == CK_FloatingCast && 9934 S.Context.getFloatingTypeOrder(From, To) < 0; 9935 } 9936 9937 bool 9938 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9939 const char *StartSpecifier, 9940 unsigned SpecifierLen, 9941 const Expr *E) { 9942 using namespace analyze_format_string; 9943 using namespace analyze_printf; 9944 9945 // Now type check the data expression that matches the 9946 // format specifier. 9947 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 9948 if (!AT.isValid()) 9949 return true; 9950 9951 QualType ExprTy = E->getType(); 9952 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 9953 ExprTy = TET->getUnderlyingExpr()->getType(); 9954 } 9955 9956 // Diagnose attempts to print a boolean value as a character. Unlike other 9957 // -Wformat diagnostics, this is fine from a type perspective, but it still 9958 // doesn't make sense. 9959 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 9960 E->isKnownToHaveBooleanValue()) { 9961 const CharSourceRange &CSR = 9962 getSpecifierRange(StartSpecifier, SpecifierLen); 9963 SmallString<4> FSString; 9964 llvm::raw_svector_ostream os(FSString); 9965 FS.toString(os); 9966 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 9967 << FSString, 9968 E->getExprLoc(), false, CSR); 9969 return true; 9970 } 9971 9972 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 9973 if (Match == analyze_printf::ArgType::Match) 9974 return true; 9975 9976 // Look through argument promotions for our error message's reported type. 9977 // This includes the integral and floating promotions, but excludes array 9978 // and function pointer decay (seeing that an argument intended to be a 9979 // string has type 'char [6]' is probably more confusing than 'char *') and 9980 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 9981 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 9982 if (isArithmeticArgumentPromotion(S, ICE)) { 9983 E = ICE->getSubExpr(); 9984 ExprTy = E->getType(); 9985 9986 // Check if we didn't match because of an implicit cast from a 'char' 9987 // or 'short' to an 'int'. This is done because printf is a varargs 9988 // function. 9989 if (ICE->getType() == S.Context.IntTy || 9990 ICE->getType() == S.Context.UnsignedIntTy) { 9991 // All further checking is done on the subexpression 9992 const analyze_printf::ArgType::MatchKind ImplicitMatch = 9993 AT.matchesType(S.Context, ExprTy); 9994 if (ImplicitMatch == analyze_printf::ArgType::Match) 9995 return true; 9996 if (ImplicitMatch == ArgType::NoMatchPedantic || 9997 ImplicitMatch == ArgType::NoMatchTypeConfusion) 9998 Match = ImplicitMatch; 9999 } 10000 } 10001 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 10002 // Special case for 'a', which has type 'int' in C. 10003 // Note, however, that we do /not/ want to treat multibyte constants like 10004 // 'MooV' as characters! This form is deprecated but still exists. In 10005 // addition, don't treat expressions as of type 'char' if one byte length 10006 // modifier is provided. 10007 if (ExprTy == S.Context.IntTy && 10008 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 10009 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 10010 ExprTy = S.Context.CharTy; 10011 } 10012 10013 // Look through enums to their underlying type. 10014 bool IsEnum = false; 10015 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 10016 ExprTy = EnumTy->getDecl()->getIntegerType(); 10017 IsEnum = true; 10018 } 10019 10020 // %C in an Objective-C context prints a unichar, not a wchar_t. 10021 // If the argument is an integer of some kind, believe the %C and suggest 10022 // a cast instead of changing the conversion specifier. 10023 QualType IntendedTy = ExprTy; 10024 if (isObjCContext() && 10025 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 10026 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 10027 !ExprTy->isCharType()) { 10028 // 'unichar' is defined as a typedef of unsigned short, but we should 10029 // prefer using the typedef if it is visible. 10030 IntendedTy = S.Context.UnsignedShortTy; 10031 10032 // While we are here, check if the value is an IntegerLiteral that happens 10033 // to be within the valid range. 10034 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 10035 const llvm::APInt &V = IL->getValue(); 10036 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 10037 return true; 10038 } 10039 10040 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 10041 Sema::LookupOrdinaryName); 10042 if (S.LookupName(Result, S.getCurScope())) { 10043 NamedDecl *ND = Result.getFoundDecl(); 10044 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 10045 if (TD->getUnderlyingType() == IntendedTy) 10046 IntendedTy = S.Context.getTypedefType(TD); 10047 } 10048 } 10049 } 10050 10051 // Special-case some of Darwin's platform-independence types by suggesting 10052 // casts to primitive types that are known to be large enough. 10053 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 10054 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 10055 QualType CastTy; 10056 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 10057 if (!CastTy.isNull()) { 10058 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 10059 // (long in ASTContext). Only complain to pedants. 10060 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 10061 (AT.isSizeT() || AT.isPtrdiffT()) && 10062 AT.matchesType(S.Context, CastTy)) 10063 Match = ArgType::NoMatchPedantic; 10064 IntendedTy = CastTy; 10065 ShouldNotPrintDirectly = true; 10066 } 10067 } 10068 10069 // We may be able to offer a FixItHint if it is a supported type. 10070 PrintfSpecifier fixedFS = FS; 10071 bool Success = 10072 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 10073 10074 if (Success) { 10075 // Get the fix string from the fixed format specifier 10076 SmallString<16> buf; 10077 llvm::raw_svector_ostream os(buf); 10078 fixedFS.toString(os); 10079 10080 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 10081 10082 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 10083 unsigned Diag; 10084 switch (Match) { 10085 case ArgType::Match: llvm_unreachable("expected non-matching"); 10086 case ArgType::NoMatchPedantic: 10087 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 10088 break; 10089 case ArgType::NoMatchTypeConfusion: 10090 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 10091 break; 10092 case ArgType::NoMatch: 10093 Diag = diag::warn_format_conversion_argument_type_mismatch; 10094 break; 10095 } 10096 10097 // In this case, the specifier is wrong and should be changed to match 10098 // the argument. 10099 EmitFormatDiagnostic(S.PDiag(Diag) 10100 << AT.getRepresentativeTypeName(S.Context) 10101 << IntendedTy << IsEnum << E->getSourceRange(), 10102 E->getBeginLoc(), 10103 /*IsStringLocation*/ false, SpecRange, 10104 FixItHint::CreateReplacement(SpecRange, os.str())); 10105 } else { 10106 // The canonical type for formatting this value is different from the 10107 // actual type of the expression. (This occurs, for example, with Darwin's 10108 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 10109 // should be printed as 'long' for 64-bit compatibility.) 10110 // Rather than emitting a normal format/argument mismatch, we want to 10111 // add a cast to the recommended type (and correct the format string 10112 // if necessary). 10113 SmallString<16> CastBuf; 10114 llvm::raw_svector_ostream CastFix(CastBuf); 10115 CastFix << "("; 10116 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 10117 CastFix << ")"; 10118 10119 SmallVector<FixItHint,4> Hints; 10120 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 10121 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 10122 10123 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 10124 // If there's already a cast present, just replace it. 10125 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 10126 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 10127 10128 } else if (!requiresParensToAddCast(E)) { 10129 // If the expression has high enough precedence, 10130 // just write the C-style cast. 10131 Hints.push_back( 10132 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 10133 } else { 10134 // Otherwise, add parens around the expression as well as the cast. 10135 CastFix << "("; 10136 Hints.push_back( 10137 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 10138 10139 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 10140 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 10141 } 10142 10143 if (ShouldNotPrintDirectly) { 10144 // The expression has a type that should not be printed directly. 10145 // We extract the name from the typedef because we don't want to show 10146 // the underlying type in the diagnostic. 10147 StringRef Name; 10148 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 10149 Name = TypedefTy->getDecl()->getName(); 10150 else 10151 Name = CastTyName; 10152 unsigned Diag = Match == ArgType::NoMatchPedantic 10153 ? diag::warn_format_argument_needs_cast_pedantic 10154 : diag::warn_format_argument_needs_cast; 10155 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 10156 << E->getSourceRange(), 10157 E->getBeginLoc(), /*IsStringLocation=*/false, 10158 SpecRange, Hints); 10159 } else { 10160 // In this case, the expression could be printed using a different 10161 // specifier, but we've decided that the specifier is probably correct 10162 // and we should cast instead. Just use the normal warning message. 10163 EmitFormatDiagnostic( 10164 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10165 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 10166 << E->getSourceRange(), 10167 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 10168 } 10169 } 10170 } else { 10171 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 10172 SpecifierLen); 10173 // Since the warning for passing non-POD types to variadic functions 10174 // was deferred until now, we emit a warning for non-POD 10175 // arguments here. 10176 switch (S.isValidVarArgType(ExprTy)) { 10177 case Sema::VAK_Valid: 10178 case Sema::VAK_ValidInCXX11: { 10179 unsigned Diag; 10180 switch (Match) { 10181 case ArgType::Match: llvm_unreachable("expected non-matching"); 10182 case ArgType::NoMatchPedantic: 10183 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 10184 break; 10185 case ArgType::NoMatchTypeConfusion: 10186 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 10187 break; 10188 case ArgType::NoMatch: 10189 Diag = diag::warn_format_conversion_argument_type_mismatch; 10190 break; 10191 } 10192 10193 EmitFormatDiagnostic( 10194 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 10195 << IsEnum << CSR << E->getSourceRange(), 10196 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10197 break; 10198 } 10199 case Sema::VAK_Undefined: 10200 case Sema::VAK_MSVCUndefined: 10201 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 10202 << S.getLangOpts().CPlusPlus11 << ExprTy 10203 << CallType 10204 << AT.getRepresentativeTypeName(S.Context) << CSR 10205 << E->getSourceRange(), 10206 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10207 checkForCStrMembers(AT, E); 10208 break; 10209 10210 case Sema::VAK_Invalid: 10211 if (ExprTy->isObjCObjectType()) 10212 EmitFormatDiagnostic( 10213 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 10214 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 10215 << AT.getRepresentativeTypeName(S.Context) << CSR 10216 << E->getSourceRange(), 10217 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10218 else 10219 // FIXME: If this is an initializer list, suggest removing the braces 10220 // or inserting a cast to the target type. 10221 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 10222 << isa<InitListExpr>(E) << ExprTy << CallType 10223 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 10224 break; 10225 } 10226 10227 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 10228 "format string specifier index out of range"); 10229 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 10230 } 10231 10232 return true; 10233 } 10234 10235 //===--- CHECK: Scanf format string checking ------------------------------===// 10236 10237 namespace { 10238 10239 class CheckScanfHandler : public CheckFormatHandler { 10240 public: 10241 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 10242 const Expr *origFormatExpr, Sema::FormatStringType type, 10243 unsigned firstDataArg, unsigned numDataArgs, 10244 const char *beg, bool hasVAListArg, 10245 ArrayRef<const Expr *> Args, unsigned formatIdx, 10246 bool inFunctionCall, Sema::VariadicCallType CallType, 10247 llvm::SmallBitVector &CheckedVarArgs, 10248 UncoveredArgHandler &UncoveredArg) 10249 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 10250 numDataArgs, beg, hasVAListArg, Args, formatIdx, 10251 inFunctionCall, CallType, CheckedVarArgs, 10252 UncoveredArg) {} 10253 10254 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 10255 const char *startSpecifier, 10256 unsigned specifierLen) override; 10257 10258 bool HandleInvalidScanfConversionSpecifier( 10259 const analyze_scanf::ScanfSpecifier &FS, 10260 const char *startSpecifier, 10261 unsigned specifierLen) override; 10262 10263 void HandleIncompleteScanList(const char *start, const char *end) override; 10264 }; 10265 10266 } // namespace 10267 10268 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 10269 const char *end) { 10270 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 10271 getLocationOfByte(end), /*IsStringLocation*/true, 10272 getSpecifierRange(start, end - start)); 10273 } 10274 10275 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 10276 const analyze_scanf::ScanfSpecifier &FS, 10277 const char *startSpecifier, 10278 unsigned specifierLen) { 10279 const analyze_scanf::ScanfConversionSpecifier &CS = 10280 FS.getConversionSpecifier(); 10281 10282 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 10283 getLocationOfByte(CS.getStart()), 10284 startSpecifier, specifierLen, 10285 CS.getStart(), CS.getLength()); 10286 } 10287 10288 bool CheckScanfHandler::HandleScanfSpecifier( 10289 const analyze_scanf::ScanfSpecifier &FS, 10290 const char *startSpecifier, 10291 unsigned specifierLen) { 10292 using namespace analyze_scanf; 10293 using namespace analyze_format_string; 10294 10295 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 10296 10297 // Handle case where '%' and '*' don't consume an argument. These shouldn't 10298 // be used to decide if we are using positional arguments consistently. 10299 if (FS.consumesDataArgument()) { 10300 if (atFirstArg) { 10301 atFirstArg = false; 10302 usesPositionalArgs = FS.usesPositionalArg(); 10303 } 10304 else if (usesPositionalArgs != FS.usesPositionalArg()) { 10305 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 10306 startSpecifier, specifierLen); 10307 return false; 10308 } 10309 } 10310 10311 // Check if the field with is non-zero. 10312 const OptionalAmount &Amt = FS.getFieldWidth(); 10313 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 10314 if (Amt.getConstantAmount() == 0) { 10315 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 10316 Amt.getConstantLength()); 10317 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 10318 getLocationOfByte(Amt.getStart()), 10319 /*IsStringLocation*/true, R, 10320 FixItHint::CreateRemoval(R)); 10321 } 10322 } 10323 10324 if (!FS.consumesDataArgument()) { 10325 // FIXME: Technically specifying a precision or field width here 10326 // makes no sense. Worth issuing a warning at some point. 10327 return true; 10328 } 10329 10330 // Consume the argument. 10331 unsigned argIndex = FS.getArgIndex(); 10332 if (argIndex < NumDataArgs) { 10333 // The check to see if the argIndex is valid will come later. 10334 // We set the bit here because we may exit early from this 10335 // function if we encounter some other error. 10336 CoveredArgs.set(argIndex); 10337 } 10338 10339 // Check the length modifier is valid with the given conversion specifier. 10340 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 10341 S.getLangOpts())) 10342 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10343 diag::warn_format_nonsensical_length); 10344 else if (!FS.hasStandardLengthModifier()) 10345 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 10346 else if (!FS.hasStandardLengthConversionCombination()) 10347 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10348 diag::warn_format_non_standard_conversion_spec); 10349 10350 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 10351 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 10352 10353 // The remaining checks depend on the data arguments. 10354 if (HasVAListArg) 10355 return true; 10356 10357 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 10358 return false; 10359 10360 // Check that the argument type matches the format specifier. 10361 const Expr *Ex = getDataArg(argIndex); 10362 if (!Ex) 10363 return true; 10364 10365 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 10366 10367 if (!AT.isValid()) { 10368 return true; 10369 } 10370 10371 analyze_format_string::ArgType::MatchKind Match = 10372 AT.matchesType(S.Context, Ex->getType()); 10373 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 10374 if (Match == analyze_format_string::ArgType::Match) 10375 return true; 10376 10377 ScanfSpecifier fixedFS = FS; 10378 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 10379 S.getLangOpts(), S.Context); 10380 10381 unsigned Diag = 10382 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 10383 : diag::warn_format_conversion_argument_type_mismatch; 10384 10385 if (Success) { 10386 // Get the fix string from the fixed format specifier. 10387 SmallString<128> buf; 10388 llvm::raw_svector_ostream os(buf); 10389 fixedFS.toString(os); 10390 10391 EmitFormatDiagnostic( 10392 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 10393 << Ex->getType() << false << Ex->getSourceRange(), 10394 Ex->getBeginLoc(), 10395 /*IsStringLocation*/ false, 10396 getSpecifierRange(startSpecifier, specifierLen), 10397 FixItHint::CreateReplacement( 10398 getSpecifierRange(startSpecifier, specifierLen), os.str())); 10399 } else { 10400 EmitFormatDiagnostic(S.PDiag(Diag) 10401 << AT.getRepresentativeTypeName(S.Context) 10402 << Ex->getType() << false << Ex->getSourceRange(), 10403 Ex->getBeginLoc(), 10404 /*IsStringLocation*/ false, 10405 getSpecifierRange(startSpecifier, specifierLen)); 10406 } 10407 10408 return true; 10409 } 10410 10411 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 10412 const Expr *OrigFormatExpr, 10413 ArrayRef<const Expr *> Args, 10414 bool HasVAListArg, unsigned format_idx, 10415 unsigned firstDataArg, 10416 Sema::FormatStringType Type, 10417 bool inFunctionCall, 10418 Sema::VariadicCallType CallType, 10419 llvm::SmallBitVector &CheckedVarArgs, 10420 UncoveredArgHandler &UncoveredArg, 10421 bool IgnoreStringsWithoutSpecifiers) { 10422 // CHECK: is the format string a wide literal? 10423 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 10424 CheckFormatHandler::EmitFormatDiagnostic( 10425 S, inFunctionCall, Args[format_idx], 10426 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 10427 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10428 return; 10429 } 10430 10431 // Str - The format string. NOTE: this is NOT null-terminated! 10432 StringRef StrRef = FExpr->getString(); 10433 const char *Str = StrRef.data(); 10434 // Account for cases where the string literal is truncated in a declaration. 10435 const ConstantArrayType *T = 10436 S.Context.getAsConstantArrayType(FExpr->getType()); 10437 assert(T && "String literal not of constant array type!"); 10438 size_t TypeSize = T->getSize().getZExtValue(); 10439 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10440 const unsigned numDataArgs = Args.size() - firstDataArg; 10441 10442 if (IgnoreStringsWithoutSpecifiers && 10443 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 10444 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 10445 return; 10446 10447 // Emit a warning if the string literal is truncated and does not contain an 10448 // embedded null character. 10449 if (TypeSize <= StrRef.size() && !StrRef.substr(0, TypeSize).contains('\0')) { 10450 CheckFormatHandler::EmitFormatDiagnostic( 10451 S, inFunctionCall, Args[format_idx], 10452 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 10453 FExpr->getBeginLoc(), 10454 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 10455 return; 10456 } 10457 10458 // CHECK: empty format string? 10459 if (StrLen == 0 && numDataArgs > 0) { 10460 CheckFormatHandler::EmitFormatDiagnostic( 10461 S, inFunctionCall, Args[format_idx], 10462 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 10463 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10464 return; 10465 } 10466 10467 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 10468 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 10469 Type == Sema::FST_OSTrace) { 10470 CheckPrintfHandler H( 10471 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 10472 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 10473 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 10474 CheckedVarArgs, UncoveredArg); 10475 10476 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 10477 S.getLangOpts(), 10478 S.Context.getTargetInfo(), 10479 Type == Sema::FST_FreeBSDKPrintf)) 10480 H.DoneProcessing(); 10481 } else if (Type == Sema::FST_Scanf) { 10482 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 10483 numDataArgs, Str, HasVAListArg, Args, format_idx, 10484 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 10485 10486 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 10487 S.getLangOpts(), 10488 S.Context.getTargetInfo())) 10489 H.DoneProcessing(); 10490 } // TODO: handle other formats 10491 } 10492 10493 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 10494 // Str - The format string. NOTE: this is NOT null-terminated! 10495 StringRef StrRef = FExpr->getString(); 10496 const char *Str = StrRef.data(); 10497 // Account for cases where the string literal is truncated in a declaration. 10498 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 10499 assert(T && "String literal not of constant array type!"); 10500 size_t TypeSize = T->getSize().getZExtValue(); 10501 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10502 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 10503 getLangOpts(), 10504 Context.getTargetInfo()); 10505 } 10506 10507 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 10508 10509 // Returns the related absolute value function that is larger, of 0 if one 10510 // does not exist. 10511 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 10512 switch (AbsFunction) { 10513 default: 10514 return 0; 10515 10516 case Builtin::BI__builtin_abs: 10517 return Builtin::BI__builtin_labs; 10518 case Builtin::BI__builtin_labs: 10519 return Builtin::BI__builtin_llabs; 10520 case Builtin::BI__builtin_llabs: 10521 return 0; 10522 10523 case Builtin::BI__builtin_fabsf: 10524 return Builtin::BI__builtin_fabs; 10525 case Builtin::BI__builtin_fabs: 10526 return Builtin::BI__builtin_fabsl; 10527 case Builtin::BI__builtin_fabsl: 10528 return 0; 10529 10530 case Builtin::BI__builtin_cabsf: 10531 return Builtin::BI__builtin_cabs; 10532 case Builtin::BI__builtin_cabs: 10533 return Builtin::BI__builtin_cabsl; 10534 case Builtin::BI__builtin_cabsl: 10535 return 0; 10536 10537 case Builtin::BIabs: 10538 return Builtin::BIlabs; 10539 case Builtin::BIlabs: 10540 return Builtin::BIllabs; 10541 case Builtin::BIllabs: 10542 return 0; 10543 10544 case Builtin::BIfabsf: 10545 return Builtin::BIfabs; 10546 case Builtin::BIfabs: 10547 return Builtin::BIfabsl; 10548 case Builtin::BIfabsl: 10549 return 0; 10550 10551 case Builtin::BIcabsf: 10552 return Builtin::BIcabs; 10553 case Builtin::BIcabs: 10554 return Builtin::BIcabsl; 10555 case Builtin::BIcabsl: 10556 return 0; 10557 } 10558 } 10559 10560 // Returns the argument type of the absolute value function. 10561 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 10562 unsigned AbsType) { 10563 if (AbsType == 0) 10564 return QualType(); 10565 10566 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 10567 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 10568 if (Error != ASTContext::GE_None) 10569 return QualType(); 10570 10571 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 10572 if (!FT) 10573 return QualType(); 10574 10575 if (FT->getNumParams() != 1) 10576 return QualType(); 10577 10578 return FT->getParamType(0); 10579 } 10580 10581 // Returns the best absolute value function, or zero, based on type and 10582 // current absolute value function. 10583 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 10584 unsigned AbsFunctionKind) { 10585 unsigned BestKind = 0; 10586 uint64_t ArgSize = Context.getTypeSize(ArgType); 10587 for (unsigned Kind = AbsFunctionKind; Kind != 0; 10588 Kind = getLargerAbsoluteValueFunction(Kind)) { 10589 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 10590 if (Context.getTypeSize(ParamType) >= ArgSize) { 10591 if (BestKind == 0) 10592 BestKind = Kind; 10593 else if (Context.hasSameType(ParamType, ArgType)) { 10594 BestKind = Kind; 10595 break; 10596 } 10597 } 10598 } 10599 return BestKind; 10600 } 10601 10602 enum AbsoluteValueKind { 10603 AVK_Integer, 10604 AVK_Floating, 10605 AVK_Complex 10606 }; 10607 10608 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 10609 if (T->isIntegralOrEnumerationType()) 10610 return AVK_Integer; 10611 if (T->isRealFloatingType()) 10612 return AVK_Floating; 10613 if (T->isAnyComplexType()) 10614 return AVK_Complex; 10615 10616 llvm_unreachable("Type not integer, floating, or complex"); 10617 } 10618 10619 // Changes the absolute value function to a different type. Preserves whether 10620 // the function is a builtin. 10621 static unsigned changeAbsFunction(unsigned AbsKind, 10622 AbsoluteValueKind ValueKind) { 10623 switch (ValueKind) { 10624 case AVK_Integer: 10625 switch (AbsKind) { 10626 default: 10627 return 0; 10628 case Builtin::BI__builtin_fabsf: 10629 case Builtin::BI__builtin_fabs: 10630 case Builtin::BI__builtin_fabsl: 10631 case Builtin::BI__builtin_cabsf: 10632 case Builtin::BI__builtin_cabs: 10633 case Builtin::BI__builtin_cabsl: 10634 return Builtin::BI__builtin_abs; 10635 case Builtin::BIfabsf: 10636 case Builtin::BIfabs: 10637 case Builtin::BIfabsl: 10638 case Builtin::BIcabsf: 10639 case Builtin::BIcabs: 10640 case Builtin::BIcabsl: 10641 return Builtin::BIabs; 10642 } 10643 case AVK_Floating: 10644 switch (AbsKind) { 10645 default: 10646 return 0; 10647 case Builtin::BI__builtin_abs: 10648 case Builtin::BI__builtin_labs: 10649 case Builtin::BI__builtin_llabs: 10650 case Builtin::BI__builtin_cabsf: 10651 case Builtin::BI__builtin_cabs: 10652 case Builtin::BI__builtin_cabsl: 10653 return Builtin::BI__builtin_fabsf; 10654 case Builtin::BIabs: 10655 case Builtin::BIlabs: 10656 case Builtin::BIllabs: 10657 case Builtin::BIcabsf: 10658 case Builtin::BIcabs: 10659 case Builtin::BIcabsl: 10660 return Builtin::BIfabsf; 10661 } 10662 case AVK_Complex: 10663 switch (AbsKind) { 10664 default: 10665 return 0; 10666 case Builtin::BI__builtin_abs: 10667 case Builtin::BI__builtin_labs: 10668 case Builtin::BI__builtin_llabs: 10669 case Builtin::BI__builtin_fabsf: 10670 case Builtin::BI__builtin_fabs: 10671 case Builtin::BI__builtin_fabsl: 10672 return Builtin::BI__builtin_cabsf; 10673 case Builtin::BIabs: 10674 case Builtin::BIlabs: 10675 case Builtin::BIllabs: 10676 case Builtin::BIfabsf: 10677 case Builtin::BIfabs: 10678 case Builtin::BIfabsl: 10679 return Builtin::BIcabsf; 10680 } 10681 } 10682 llvm_unreachable("Unable to convert function"); 10683 } 10684 10685 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 10686 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 10687 if (!FnInfo) 10688 return 0; 10689 10690 switch (FDecl->getBuiltinID()) { 10691 default: 10692 return 0; 10693 case Builtin::BI__builtin_abs: 10694 case Builtin::BI__builtin_fabs: 10695 case Builtin::BI__builtin_fabsf: 10696 case Builtin::BI__builtin_fabsl: 10697 case Builtin::BI__builtin_labs: 10698 case Builtin::BI__builtin_llabs: 10699 case Builtin::BI__builtin_cabs: 10700 case Builtin::BI__builtin_cabsf: 10701 case Builtin::BI__builtin_cabsl: 10702 case Builtin::BIabs: 10703 case Builtin::BIlabs: 10704 case Builtin::BIllabs: 10705 case Builtin::BIfabs: 10706 case Builtin::BIfabsf: 10707 case Builtin::BIfabsl: 10708 case Builtin::BIcabs: 10709 case Builtin::BIcabsf: 10710 case Builtin::BIcabsl: 10711 return FDecl->getBuiltinID(); 10712 } 10713 llvm_unreachable("Unknown Builtin type"); 10714 } 10715 10716 // If the replacement is valid, emit a note with replacement function. 10717 // Additionally, suggest including the proper header if not already included. 10718 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 10719 unsigned AbsKind, QualType ArgType) { 10720 bool EmitHeaderHint = true; 10721 const char *HeaderName = nullptr; 10722 const char *FunctionName = nullptr; 10723 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 10724 FunctionName = "std::abs"; 10725 if (ArgType->isIntegralOrEnumerationType()) { 10726 HeaderName = "cstdlib"; 10727 } else if (ArgType->isRealFloatingType()) { 10728 HeaderName = "cmath"; 10729 } else { 10730 llvm_unreachable("Invalid Type"); 10731 } 10732 10733 // Lookup all std::abs 10734 if (NamespaceDecl *Std = S.getStdNamespace()) { 10735 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 10736 R.suppressDiagnostics(); 10737 S.LookupQualifiedName(R, Std); 10738 10739 for (const auto *I : R) { 10740 const FunctionDecl *FDecl = nullptr; 10741 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 10742 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 10743 } else { 10744 FDecl = dyn_cast<FunctionDecl>(I); 10745 } 10746 if (!FDecl) 10747 continue; 10748 10749 // Found std::abs(), check that they are the right ones. 10750 if (FDecl->getNumParams() != 1) 10751 continue; 10752 10753 // Check that the parameter type can handle the argument. 10754 QualType ParamType = FDecl->getParamDecl(0)->getType(); 10755 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 10756 S.Context.getTypeSize(ArgType) <= 10757 S.Context.getTypeSize(ParamType)) { 10758 // Found a function, don't need the header hint. 10759 EmitHeaderHint = false; 10760 break; 10761 } 10762 } 10763 } 10764 } else { 10765 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 10766 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 10767 10768 if (HeaderName) { 10769 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 10770 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 10771 R.suppressDiagnostics(); 10772 S.LookupName(R, S.getCurScope()); 10773 10774 if (R.isSingleResult()) { 10775 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 10776 if (FD && FD->getBuiltinID() == AbsKind) { 10777 EmitHeaderHint = false; 10778 } else { 10779 return; 10780 } 10781 } else if (!R.empty()) { 10782 return; 10783 } 10784 } 10785 } 10786 10787 S.Diag(Loc, diag::note_replace_abs_function) 10788 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 10789 10790 if (!HeaderName) 10791 return; 10792 10793 if (!EmitHeaderHint) 10794 return; 10795 10796 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 10797 << FunctionName; 10798 } 10799 10800 template <std::size_t StrLen> 10801 static bool IsStdFunction(const FunctionDecl *FDecl, 10802 const char (&Str)[StrLen]) { 10803 if (!FDecl) 10804 return false; 10805 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 10806 return false; 10807 if (!FDecl->isInStdNamespace()) 10808 return false; 10809 10810 return true; 10811 } 10812 10813 // Warn when using the wrong abs() function. 10814 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 10815 const FunctionDecl *FDecl) { 10816 if (Call->getNumArgs() != 1) 10817 return; 10818 10819 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 10820 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 10821 if (AbsKind == 0 && !IsStdAbs) 10822 return; 10823 10824 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10825 QualType ParamType = Call->getArg(0)->getType(); 10826 10827 // Unsigned types cannot be negative. Suggest removing the absolute value 10828 // function call. 10829 if (ArgType->isUnsignedIntegerType()) { 10830 const char *FunctionName = 10831 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 10832 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 10833 Diag(Call->getExprLoc(), diag::note_remove_abs) 10834 << FunctionName 10835 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 10836 return; 10837 } 10838 10839 // Taking the absolute value of a pointer is very suspicious, they probably 10840 // wanted to index into an array, dereference a pointer, call a function, etc. 10841 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 10842 unsigned DiagType = 0; 10843 if (ArgType->isFunctionType()) 10844 DiagType = 1; 10845 else if (ArgType->isArrayType()) 10846 DiagType = 2; 10847 10848 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 10849 return; 10850 } 10851 10852 // std::abs has overloads which prevent most of the absolute value problems 10853 // from occurring. 10854 if (IsStdAbs) 10855 return; 10856 10857 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 10858 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 10859 10860 // The argument and parameter are the same kind. Check if they are the right 10861 // size. 10862 if (ArgValueKind == ParamValueKind) { 10863 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 10864 return; 10865 10866 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 10867 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 10868 << FDecl << ArgType << ParamType; 10869 10870 if (NewAbsKind == 0) 10871 return; 10872 10873 emitReplacement(*this, Call->getExprLoc(), 10874 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10875 return; 10876 } 10877 10878 // ArgValueKind != ParamValueKind 10879 // The wrong type of absolute value function was used. Attempt to find the 10880 // proper one. 10881 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 10882 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 10883 if (NewAbsKind == 0) 10884 return; 10885 10886 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 10887 << FDecl << ParamValueKind << ArgValueKind; 10888 10889 emitReplacement(*this, Call->getExprLoc(), 10890 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10891 } 10892 10893 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 10894 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 10895 const FunctionDecl *FDecl) { 10896 if (!Call || !FDecl) return; 10897 10898 // Ignore template specializations and macros. 10899 if (inTemplateInstantiation()) return; 10900 if (Call->getExprLoc().isMacroID()) return; 10901 10902 // Only care about the one template argument, two function parameter std::max 10903 if (Call->getNumArgs() != 2) return; 10904 if (!IsStdFunction(FDecl, "max")) return; 10905 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 10906 if (!ArgList) return; 10907 if (ArgList->size() != 1) return; 10908 10909 // Check that template type argument is unsigned integer. 10910 const auto& TA = ArgList->get(0); 10911 if (TA.getKind() != TemplateArgument::Type) return; 10912 QualType ArgType = TA.getAsType(); 10913 if (!ArgType->isUnsignedIntegerType()) return; 10914 10915 // See if either argument is a literal zero. 10916 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 10917 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 10918 if (!MTE) return false; 10919 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 10920 if (!Num) return false; 10921 if (Num->getValue() != 0) return false; 10922 return true; 10923 }; 10924 10925 const Expr *FirstArg = Call->getArg(0); 10926 const Expr *SecondArg = Call->getArg(1); 10927 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 10928 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 10929 10930 // Only warn when exactly one argument is zero. 10931 if (IsFirstArgZero == IsSecondArgZero) return; 10932 10933 SourceRange FirstRange = FirstArg->getSourceRange(); 10934 SourceRange SecondRange = SecondArg->getSourceRange(); 10935 10936 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 10937 10938 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 10939 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 10940 10941 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 10942 SourceRange RemovalRange; 10943 if (IsFirstArgZero) { 10944 RemovalRange = SourceRange(FirstRange.getBegin(), 10945 SecondRange.getBegin().getLocWithOffset(-1)); 10946 } else { 10947 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 10948 SecondRange.getEnd()); 10949 } 10950 10951 Diag(Call->getExprLoc(), diag::note_remove_max_call) 10952 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 10953 << FixItHint::CreateRemoval(RemovalRange); 10954 } 10955 10956 //===--- CHECK: Standard memory functions ---------------------------------===// 10957 10958 /// Takes the expression passed to the size_t parameter of functions 10959 /// such as memcmp, strncat, etc and warns if it's a comparison. 10960 /// 10961 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 10962 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 10963 IdentifierInfo *FnName, 10964 SourceLocation FnLoc, 10965 SourceLocation RParenLoc) { 10966 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 10967 if (!Size) 10968 return false; 10969 10970 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 10971 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 10972 return false; 10973 10974 SourceRange SizeRange = Size->getSourceRange(); 10975 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 10976 << SizeRange << FnName; 10977 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 10978 << FnName 10979 << FixItHint::CreateInsertion( 10980 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 10981 << FixItHint::CreateRemoval(RParenLoc); 10982 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 10983 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 10984 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 10985 ")"); 10986 10987 return true; 10988 } 10989 10990 /// Determine whether the given type is or contains a dynamic class type 10991 /// (e.g., whether it has a vtable). 10992 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 10993 bool &IsContained) { 10994 // Look through array types while ignoring qualifiers. 10995 const Type *Ty = T->getBaseElementTypeUnsafe(); 10996 IsContained = false; 10997 10998 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 10999 RD = RD ? RD->getDefinition() : nullptr; 11000 if (!RD || RD->isInvalidDecl()) 11001 return nullptr; 11002 11003 if (RD->isDynamicClass()) 11004 return RD; 11005 11006 // Check all the fields. If any bases were dynamic, the class is dynamic. 11007 // It's impossible for a class to transitively contain itself by value, so 11008 // infinite recursion is impossible. 11009 for (auto *FD : RD->fields()) { 11010 bool SubContained; 11011 if (const CXXRecordDecl *ContainedRD = 11012 getContainedDynamicClass(FD->getType(), SubContained)) { 11013 IsContained = true; 11014 return ContainedRD; 11015 } 11016 } 11017 11018 return nullptr; 11019 } 11020 11021 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 11022 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 11023 if (Unary->getKind() == UETT_SizeOf) 11024 return Unary; 11025 return nullptr; 11026 } 11027 11028 /// If E is a sizeof expression, returns its argument expression, 11029 /// otherwise returns NULL. 11030 static const Expr *getSizeOfExprArg(const Expr *E) { 11031 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 11032 if (!SizeOf->isArgumentType()) 11033 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 11034 return nullptr; 11035 } 11036 11037 /// If E is a sizeof expression, returns its argument type. 11038 static QualType getSizeOfArgType(const Expr *E) { 11039 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 11040 return SizeOf->getTypeOfArgument(); 11041 return QualType(); 11042 } 11043 11044 namespace { 11045 11046 struct SearchNonTrivialToInitializeField 11047 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 11048 using Super = 11049 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 11050 11051 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 11052 11053 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 11054 SourceLocation SL) { 11055 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 11056 asDerived().visitArray(PDIK, AT, SL); 11057 return; 11058 } 11059 11060 Super::visitWithKind(PDIK, FT, SL); 11061 } 11062 11063 void visitARCStrong(QualType FT, SourceLocation SL) { 11064 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 11065 } 11066 void visitARCWeak(QualType FT, SourceLocation SL) { 11067 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 11068 } 11069 void visitStruct(QualType FT, SourceLocation SL) { 11070 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 11071 visit(FD->getType(), FD->getLocation()); 11072 } 11073 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 11074 const ArrayType *AT, SourceLocation SL) { 11075 visit(getContext().getBaseElementType(AT), SL); 11076 } 11077 void visitTrivial(QualType FT, SourceLocation SL) {} 11078 11079 static void diag(QualType RT, const Expr *E, Sema &S) { 11080 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 11081 } 11082 11083 ASTContext &getContext() { return S.getASTContext(); } 11084 11085 const Expr *E; 11086 Sema &S; 11087 }; 11088 11089 struct SearchNonTrivialToCopyField 11090 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 11091 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 11092 11093 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 11094 11095 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 11096 SourceLocation SL) { 11097 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 11098 asDerived().visitArray(PCK, AT, SL); 11099 return; 11100 } 11101 11102 Super::visitWithKind(PCK, FT, SL); 11103 } 11104 11105 void visitARCStrong(QualType FT, SourceLocation SL) { 11106 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 11107 } 11108 void visitARCWeak(QualType FT, SourceLocation SL) { 11109 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 11110 } 11111 void visitStruct(QualType FT, SourceLocation SL) { 11112 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 11113 visit(FD->getType(), FD->getLocation()); 11114 } 11115 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 11116 SourceLocation SL) { 11117 visit(getContext().getBaseElementType(AT), SL); 11118 } 11119 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 11120 SourceLocation SL) {} 11121 void visitTrivial(QualType FT, SourceLocation SL) {} 11122 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 11123 11124 static void diag(QualType RT, const Expr *E, Sema &S) { 11125 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 11126 } 11127 11128 ASTContext &getContext() { return S.getASTContext(); } 11129 11130 const Expr *E; 11131 Sema &S; 11132 }; 11133 11134 } 11135 11136 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 11137 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 11138 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 11139 11140 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 11141 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 11142 return false; 11143 11144 return doesExprLikelyComputeSize(BO->getLHS()) || 11145 doesExprLikelyComputeSize(BO->getRHS()); 11146 } 11147 11148 return getAsSizeOfExpr(SizeofExpr) != nullptr; 11149 } 11150 11151 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 11152 /// 11153 /// \code 11154 /// #define MACRO 0 11155 /// foo(MACRO); 11156 /// foo(0); 11157 /// \endcode 11158 /// 11159 /// This should return true for the first call to foo, but not for the second 11160 /// (regardless of whether foo is a macro or function). 11161 static bool isArgumentExpandedFromMacro(SourceManager &SM, 11162 SourceLocation CallLoc, 11163 SourceLocation ArgLoc) { 11164 if (!CallLoc.isMacroID()) 11165 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 11166 11167 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 11168 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 11169 } 11170 11171 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 11172 /// last two arguments transposed. 11173 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 11174 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 11175 return; 11176 11177 const Expr *SizeArg = 11178 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 11179 11180 auto isLiteralZero = [](const Expr *E) { 11181 return (isa<IntegerLiteral>(E) && 11182 cast<IntegerLiteral>(E)->getValue() == 0) || 11183 (isa<CharacterLiteral>(E) && 11184 cast<CharacterLiteral>(E)->getValue() == 0); 11185 }; 11186 11187 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 11188 SourceLocation CallLoc = Call->getRParenLoc(); 11189 SourceManager &SM = S.getSourceManager(); 11190 if (isLiteralZero(SizeArg) && 11191 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 11192 11193 SourceLocation DiagLoc = SizeArg->getExprLoc(); 11194 11195 // Some platforms #define bzero to __builtin_memset. See if this is the 11196 // case, and if so, emit a better diagnostic. 11197 if (BId == Builtin::BIbzero || 11198 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 11199 CallLoc, SM, S.getLangOpts()) == "bzero")) { 11200 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 11201 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 11202 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 11203 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 11204 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 11205 } 11206 return; 11207 } 11208 11209 // If the second argument to a memset is a sizeof expression and the third 11210 // isn't, this is also likely an error. This should catch 11211 // 'memset(buf, sizeof(buf), 0xff)'. 11212 if (BId == Builtin::BImemset && 11213 doesExprLikelyComputeSize(Call->getArg(1)) && 11214 !doesExprLikelyComputeSize(Call->getArg(2))) { 11215 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 11216 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 11217 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 11218 return; 11219 } 11220 } 11221 11222 /// Check for dangerous or invalid arguments to memset(). 11223 /// 11224 /// This issues warnings on known problematic, dangerous or unspecified 11225 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 11226 /// function calls. 11227 /// 11228 /// \param Call The call expression to diagnose. 11229 void Sema::CheckMemaccessArguments(const CallExpr *Call, 11230 unsigned BId, 11231 IdentifierInfo *FnName) { 11232 assert(BId != 0); 11233 11234 // It is possible to have a non-standard definition of memset. Validate 11235 // we have enough arguments, and if not, abort further checking. 11236 unsigned ExpectedNumArgs = 11237 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 11238 if (Call->getNumArgs() < ExpectedNumArgs) 11239 return; 11240 11241 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 11242 BId == Builtin::BIstrndup ? 1 : 2); 11243 unsigned LenArg = 11244 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 11245 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 11246 11247 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 11248 Call->getBeginLoc(), Call->getRParenLoc())) 11249 return; 11250 11251 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 11252 CheckMemaccessSize(*this, BId, Call); 11253 11254 // We have special checking when the length is a sizeof expression. 11255 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 11256 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 11257 llvm::FoldingSetNodeID SizeOfArgID; 11258 11259 // Although widely used, 'bzero' is not a standard function. Be more strict 11260 // with the argument types before allowing diagnostics and only allow the 11261 // form bzero(ptr, sizeof(...)). 11262 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 11263 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 11264 return; 11265 11266 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 11267 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 11268 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 11269 11270 QualType DestTy = Dest->getType(); 11271 QualType PointeeTy; 11272 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 11273 PointeeTy = DestPtrTy->getPointeeType(); 11274 11275 // Never warn about void type pointers. This can be used to suppress 11276 // false positives. 11277 if (PointeeTy->isVoidType()) 11278 continue; 11279 11280 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 11281 // actually comparing the expressions for equality. Because computing the 11282 // expression IDs can be expensive, we only do this if the diagnostic is 11283 // enabled. 11284 if (SizeOfArg && 11285 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 11286 SizeOfArg->getExprLoc())) { 11287 // We only compute IDs for expressions if the warning is enabled, and 11288 // cache the sizeof arg's ID. 11289 if (SizeOfArgID == llvm::FoldingSetNodeID()) 11290 SizeOfArg->Profile(SizeOfArgID, Context, true); 11291 llvm::FoldingSetNodeID DestID; 11292 Dest->Profile(DestID, Context, true); 11293 if (DestID == SizeOfArgID) { 11294 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 11295 // over sizeof(src) as well. 11296 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 11297 StringRef ReadableName = FnName->getName(); 11298 11299 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 11300 if (UnaryOp->getOpcode() == UO_AddrOf) 11301 ActionIdx = 1; // If its an address-of operator, just remove it. 11302 if (!PointeeTy->isIncompleteType() && 11303 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 11304 ActionIdx = 2; // If the pointee's size is sizeof(char), 11305 // suggest an explicit length. 11306 11307 // If the function is defined as a builtin macro, do not show macro 11308 // expansion. 11309 SourceLocation SL = SizeOfArg->getExprLoc(); 11310 SourceRange DSR = Dest->getSourceRange(); 11311 SourceRange SSR = SizeOfArg->getSourceRange(); 11312 SourceManager &SM = getSourceManager(); 11313 11314 if (SM.isMacroArgExpansion(SL)) { 11315 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 11316 SL = SM.getSpellingLoc(SL); 11317 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 11318 SM.getSpellingLoc(DSR.getEnd())); 11319 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 11320 SM.getSpellingLoc(SSR.getEnd())); 11321 } 11322 11323 DiagRuntimeBehavior(SL, SizeOfArg, 11324 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 11325 << ReadableName 11326 << PointeeTy 11327 << DestTy 11328 << DSR 11329 << SSR); 11330 DiagRuntimeBehavior(SL, SizeOfArg, 11331 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 11332 << ActionIdx 11333 << SSR); 11334 11335 break; 11336 } 11337 } 11338 11339 // Also check for cases where the sizeof argument is the exact same 11340 // type as the memory argument, and where it points to a user-defined 11341 // record type. 11342 if (SizeOfArgTy != QualType()) { 11343 if (PointeeTy->isRecordType() && 11344 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 11345 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 11346 PDiag(diag::warn_sizeof_pointer_type_memaccess) 11347 << FnName << SizeOfArgTy << ArgIdx 11348 << PointeeTy << Dest->getSourceRange() 11349 << LenExpr->getSourceRange()); 11350 break; 11351 } 11352 } 11353 } else if (DestTy->isArrayType()) { 11354 PointeeTy = DestTy; 11355 } 11356 11357 if (PointeeTy == QualType()) 11358 continue; 11359 11360 // Always complain about dynamic classes. 11361 bool IsContained; 11362 if (const CXXRecordDecl *ContainedRD = 11363 getContainedDynamicClass(PointeeTy, IsContained)) { 11364 11365 unsigned OperationType = 0; 11366 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 11367 // "overwritten" if we're warning about the destination for any call 11368 // but memcmp; otherwise a verb appropriate to the call. 11369 if (ArgIdx != 0 || IsCmp) { 11370 if (BId == Builtin::BImemcpy) 11371 OperationType = 1; 11372 else if(BId == Builtin::BImemmove) 11373 OperationType = 2; 11374 else if (IsCmp) 11375 OperationType = 3; 11376 } 11377 11378 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11379 PDiag(diag::warn_dyn_class_memaccess) 11380 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 11381 << IsContained << ContainedRD << OperationType 11382 << Call->getCallee()->getSourceRange()); 11383 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 11384 BId != Builtin::BImemset) 11385 DiagRuntimeBehavior( 11386 Dest->getExprLoc(), Dest, 11387 PDiag(diag::warn_arc_object_memaccess) 11388 << ArgIdx << FnName << PointeeTy 11389 << Call->getCallee()->getSourceRange()); 11390 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 11391 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 11392 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 11393 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11394 PDiag(diag::warn_cstruct_memaccess) 11395 << ArgIdx << FnName << PointeeTy << 0); 11396 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 11397 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 11398 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 11399 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11400 PDiag(diag::warn_cstruct_memaccess) 11401 << ArgIdx << FnName << PointeeTy << 1); 11402 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 11403 } else { 11404 continue; 11405 } 11406 } else 11407 continue; 11408 11409 DiagRuntimeBehavior( 11410 Dest->getExprLoc(), Dest, 11411 PDiag(diag::note_bad_memaccess_silence) 11412 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 11413 break; 11414 } 11415 } 11416 11417 // A little helper routine: ignore addition and subtraction of integer literals. 11418 // This intentionally does not ignore all integer constant expressions because 11419 // we don't want to remove sizeof(). 11420 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 11421 Ex = Ex->IgnoreParenCasts(); 11422 11423 while (true) { 11424 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 11425 if (!BO || !BO->isAdditiveOp()) 11426 break; 11427 11428 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 11429 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 11430 11431 if (isa<IntegerLiteral>(RHS)) 11432 Ex = LHS; 11433 else if (isa<IntegerLiteral>(LHS)) 11434 Ex = RHS; 11435 else 11436 break; 11437 } 11438 11439 return Ex; 11440 } 11441 11442 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 11443 ASTContext &Context) { 11444 // Only handle constant-sized or VLAs, but not flexible members. 11445 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 11446 // Only issue the FIXIT for arrays of size > 1. 11447 if (CAT->getSize().getSExtValue() <= 1) 11448 return false; 11449 } else if (!Ty->isVariableArrayType()) { 11450 return false; 11451 } 11452 return true; 11453 } 11454 11455 // Warn if the user has made the 'size' argument to strlcpy or strlcat 11456 // be the size of the source, instead of the destination. 11457 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 11458 IdentifierInfo *FnName) { 11459 11460 // Don't crash if the user has the wrong number of arguments 11461 unsigned NumArgs = Call->getNumArgs(); 11462 if ((NumArgs != 3) && (NumArgs != 4)) 11463 return; 11464 11465 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 11466 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 11467 const Expr *CompareWithSrc = nullptr; 11468 11469 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 11470 Call->getBeginLoc(), Call->getRParenLoc())) 11471 return; 11472 11473 // Look for 'strlcpy(dst, x, sizeof(x))' 11474 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 11475 CompareWithSrc = Ex; 11476 else { 11477 // Look for 'strlcpy(dst, x, strlen(x))' 11478 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 11479 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 11480 SizeCall->getNumArgs() == 1) 11481 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 11482 } 11483 } 11484 11485 if (!CompareWithSrc) 11486 return; 11487 11488 // Determine if the argument to sizeof/strlen is equal to the source 11489 // argument. In principle there's all kinds of things you could do 11490 // here, for instance creating an == expression and evaluating it with 11491 // EvaluateAsBooleanCondition, but this uses a more direct technique: 11492 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 11493 if (!SrcArgDRE) 11494 return; 11495 11496 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 11497 if (!CompareWithSrcDRE || 11498 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 11499 return; 11500 11501 const Expr *OriginalSizeArg = Call->getArg(2); 11502 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 11503 << OriginalSizeArg->getSourceRange() << FnName; 11504 11505 // Output a FIXIT hint if the destination is an array (rather than a 11506 // pointer to an array). This could be enhanced to handle some 11507 // pointers if we know the actual size, like if DstArg is 'array+2' 11508 // we could say 'sizeof(array)-2'. 11509 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 11510 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 11511 return; 11512 11513 SmallString<128> sizeString; 11514 llvm::raw_svector_ostream OS(sizeString); 11515 OS << "sizeof("; 11516 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11517 OS << ")"; 11518 11519 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 11520 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 11521 OS.str()); 11522 } 11523 11524 /// Check if two expressions refer to the same declaration. 11525 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 11526 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 11527 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 11528 return D1->getDecl() == D2->getDecl(); 11529 return false; 11530 } 11531 11532 static const Expr *getStrlenExprArg(const Expr *E) { 11533 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 11534 const FunctionDecl *FD = CE->getDirectCallee(); 11535 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 11536 return nullptr; 11537 return CE->getArg(0)->IgnoreParenCasts(); 11538 } 11539 return nullptr; 11540 } 11541 11542 // Warn on anti-patterns as the 'size' argument to strncat. 11543 // The correct size argument should look like following: 11544 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 11545 void Sema::CheckStrncatArguments(const CallExpr *CE, 11546 IdentifierInfo *FnName) { 11547 // Don't crash if the user has the wrong number of arguments. 11548 if (CE->getNumArgs() < 3) 11549 return; 11550 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 11551 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 11552 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 11553 11554 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 11555 CE->getRParenLoc())) 11556 return; 11557 11558 // Identify common expressions, which are wrongly used as the size argument 11559 // to strncat and may lead to buffer overflows. 11560 unsigned PatternType = 0; 11561 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 11562 // - sizeof(dst) 11563 if (referToTheSameDecl(SizeOfArg, DstArg)) 11564 PatternType = 1; 11565 // - sizeof(src) 11566 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 11567 PatternType = 2; 11568 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 11569 if (BE->getOpcode() == BO_Sub) { 11570 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 11571 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 11572 // - sizeof(dst) - strlen(dst) 11573 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 11574 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 11575 PatternType = 1; 11576 // - sizeof(src) - (anything) 11577 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 11578 PatternType = 2; 11579 } 11580 } 11581 11582 if (PatternType == 0) 11583 return; 11584 11585 // Generate the diagnostic. 11586 SourceLocation SL = LenArg->getBeginLoc(); 11587 SourceRange SR = LenArg->getSourceRange(); 11588 SourceManager &SM = getSourceManager(); 11589 11590 // If the function is defined as a builtin macro, do not show macro expansion. 11591 if (SM.isMacroArgExpansion(SL)) { 11592 SL = SM.getSpellingLoc(SL); 11593 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 11594 SM.getSpellingLoc(SR.getEnd())); 11595 } 11596 11597 // Check if the destination is an array (rather than a pointer to an array). 11598 QualType DstTy = DstArg->getType(); 11599 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 11600 Context); 11601 if (!isKnownSizeArray) { 11602 if (PatternType == 1) 11603 Diag(SL, diag::warn_strncat_wrong_size) << SR; 11604 else 11605 Diag(SL, diag::warn_strncat_src_size) << SR; 11606 return; 11607 } 11608 11609 if (PatternType == 1) 11610 Diag(SL, diag::warn_strncat_large_size) << SR; 11611 else 11612 Diag(SL, diag::warn_strncat_src_size) << SR; 11613 11614 SmallString<128> sizeString; 11615 llvm::raw_svector_ostream OS(sizeString); 11616 OS << "sizeof("; 11617 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11618 OS << ") - "; 11619 OS << "strlen("; 11620 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11621 OS << ") - 1"; 11622 11623 Diag(SL, diag::note_strncat_wrong_size) 11624 << FixItHint::CreateReplacement(SR, OS.str()); 11625 } 11626 11627 namespace { 11628 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 11629 const UnaryOperator *UnaryExpr, const Decl *D) { 11630 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 11631 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 11632 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 11633 return; 11634 } 11635 } 11636 11637 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 11638 const UnaryOperator *UnaryExpr) { 11639 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 11640 const Decl *D = Lvalue->getDecl(); 11641 if (isa<DeclaratorDecl>(D)) 11642 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType()) 11643 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 11644 } 11645 11646 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 11647 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 11648 Lvalue->getMemberDecl()); 11649 } 11650 11651 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 11652 const UnaryOperator *UnaryExpr) { 11653 const auto *Lambda = dyn_cast<LambdaExpr>( 11654 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 11655 if (!Lambda) 11656 return; 11657 11658 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 11659 << CalleeName << 2 /*object: lambda expression*/; 11660 } 11661 11662 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 11663 const DeclRefExpr *Lvalue) { 11664 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 11665 if (Var == nullptr) 11666 return; 11667 11668 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 11669 << CalleeName << 0 /*object: */ << Var; 11670 } 11671 11672 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 11673 const CastExpr *Cast) { 11674 SmallString<128> SizeString; 11675 llvm::raw_svector_ostream OS(SizeString); 11676 11677 clang::CastKind Kind = Cast->getCastKind(); 11678 if (Kind == clang::CK_BitCast && 11679 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 11680 return; 11681 if (Kind == clang::CK_IntegralToPointer && 11682 !isa<IntegerLiteral>( 11683 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 11684 return; 11685 11686 switch (Cast->getCastKind()) { 11687 case clang::CK_BitCast: 11688 case clang::CK_IntegralToPointer: 11689 case clang::CK_FunctionToPointerDecay: 11690 OS << '\''; 11691 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 11692 OS << '\''; 11693 break; 11694 default: 11695 return; 11696 } 11697 11698 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 11699 << CalleeName << 0 /*object: */ << OS.str(); 11700 } 11701 } // namespace 11702 11703 /// Alerts the user that they are attempting to free a non-malloc'd object. 11704 void Sema::CheckFreeArguments(const CallExpr *E) { 11705 const std::string CalleeName = 11706 cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 11707 11708 { // Prefer something that doesn't involve a cast to make things simpler. 11709 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 11710 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 11711 switch (UnaryExpr->getOpcode()) { 11712 case UnaryOperator::Opcode::UO_AddrOf: 11713 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 11714 case UnaryOperator::Opcode::UO_Plus: 11715 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 11716 default: 11717 break; 11718 } 11719 11720 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 11721 if (Lvalue->getType()->isArrayType()) 11722 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 11723 11724 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 11725 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 11726 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 11727 return; 11728 } 11729 11730 if (isa<BlockExpr>(Arg)) { 11731 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 11732 << CalleeName << 1 /*object: block*/; 11733 return; 11734 } 11735 } 11736 // Maybe the cast was important, check after the other cases. 11737 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 11738 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 11739 } 11740 11741 void 11742 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 11743 SourceLocation ReturnLoc, 11744 bool isObjCMethod, 11745 const AttrVec *Attrs, 11746 const FunctionDecl *FD) { 11747 // Check if the return value is null but should not be. 11748 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 11749 (!isObjCMethod && isNonNullType(Context, lhsType))) && 11750 CheckNonNullExpr(*this, RetValExp)) 11751 Diag(ReturnLoc, diag::warn_null_ret) 11752 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 11753 11754 // C++11 [basic.stc.dynamic.allocation]p4: 11755 // If an allocation function declared with a non-throwing 11756 // exception-specification fails to allocate storage, it shall return 11757 // a null pointer. Any other allocation function that fails to allocate 11758 // storage shall indicate failure only by throwing an exception [...] 11759 if (FD) { 11760 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 11761 if (Op == OO_New || Op == OO_Array_New) { 11762 const FunctionProtoType *Proto 11763 = FD->getType()->castAs<FunctionProtoType>(); 11764 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 11765 CheckNonNullExpr(*this, RetValExp)) 11766 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 11767 << FD << getLangOpts().CPlusPlus11; 11768 } 11769 } 11770 11771 // PPC MMA non-pointer types are not allowed as return type. Checking the type 11772 // here prevent the user from using a PPC MMA type as trailing return type. 11773 if (Context.getTargetInfo().getTriple().isPPC64()) 11774 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 11775 } 11776 11777 /// Check for comparisons of floating-point values using == and !=. Issue a 11778 /// warning if the comparison is not likely to do what the programmer intended. 11779 void Sema::CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS, 11780 BinaryOperatorKind Opcode) { 11781 // Match and capture subexpressions such as "(float) X == 0.1". 11782 FloatingLiteral *FPLiteral; 11783 CastExpr *FPCast; 11784 auto getCastAndLiteral = [&FPLiteral, &FPCast](Expr *L, Expr *R) { 11785 FPLiteral = dyn_cast<FloatingLiteral>(L->IgnoreParens()); 11786 FPCast = dyn_cast<CastExpr>(R->IgnoreParens()); 11787 return FPLiteral && FPCast; 11788 }; 11789 11790 if (getCastAndLiteral(LHS, RHS) || getCastAndLiteral(RHS, LHS)) { 11791 auto *SourceTy = FPCast->getSubExpr()->getType()->getAs<BuiltinType>(); 11792 auto *TargetTy = FPLiteral->getType()->getAs<BuiltinType>(); 11793 if (SourceTy && TargetTy && SourceTy->isFloatingPoint() && 11794 TargetTy->isFloatingPoint()) { 11795 bool Lossy; 11796 llvm::APFloat TargetC = FPLiteral->getValue(); 11797 TargetC.convert(Context.getFloatTypeSemantics(QualType(SourceTy, 0)), 11798 llvm::APFloat::rmNearestTiesToEven, &Lossy); 11799 if (Lossy) { 11800 // If the literal cannot be represented in the source type, then a 11801 // check for == is always false and check for != is always true. 11802 Diag(Loc, diag::warn_float_compare_literal) 11803 << (Opcode == BO_EQ) << QualType(SourceTy, 0) 11804 << LHS->getSourceRange() << RHS->getSourceRange(); 11805 return; 11806 } 11807 } 11808 } 11809 11810 // Match a more general floating-point equality comparison (-Wfloat-equal). 11811 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 11812 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 11813 11814 // Special case: check for x == x (which is OK). 11815 // Do not emit warnings for such cases. 11816 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 11817 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 11818 if (DRL->getDecl() == DRR->getDecl()) 11819 return; 11820 11821 // Special case: check for comparisons against literals that can be exactly 11822 // represented by APFloat. In such cases, do not emit a warning. This 11823 // is a heuristic: often comparison against such literals are used to 11824 // detect if a value in a variable has not changed. This clearly can 11825 // lead to false negatives. 11826 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 11827 if (FLL->isExact()) 11828 return; 11829 } else 11830 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 11831 if (FLR->isExact()) 11832 return; 11833 11834 // Check for comparisons with builtin types. 11835 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 11836 if (CL->getBuiltinCallee()) 11837 return; 11838 11839 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 11840 if (CR->getBuiltinCallee()) 11841 return; 11842 11843 // Emit the diagnostic. 11844 Diag(Loc, diag::warn_floatingpoint_eq) 11845 << LHS->getSourceRange() << RHS->getSourceRange(); 11846 } 11847 11848 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 11849 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 11850 11851 namespace { 11852 11853 /// Structure recording the 'active' range of an integer-valued 11854 /// expression. 11855 struct IntRange { 11856 /// The number of bits active in the int. Note that this includes exactly one 11857 /// sign bit if !NonNegative. 11858 unsigned Width; 11859 11860 /// True if the int is known not to have negative values. If so, all leading 11861 /// bits before Width are known zero, otherwise they are known to be the 11862 /// same as the MSB within Width. 11863 bool NonNegative; 11864 11865 IntRange(unsigned Width, bool NonNegative) 11866 : Width(Width), NonNegative(NonNegative) {} 11867 11868 /// Number of bits excluding the sign bit. 11869 unsigned valueBits() const { 11870 return NonNegative ? Width : Width - 1; 11871 } 11872 11873 /// Returns the range of the bool type. 11874 static IntRange forBoolType() { 11875 return IntRange(1, true); 11876 } 11877 11878 /// Returns the range of an opaque value of the given integral type. 11879 static IntRange forValueOfType(ASTContext &C, QualType T) { 11880 return forValueOfCanonicalType(C, 11881 T->getCanonicalTypeInternal().getTypePtr()); 11882 } 11883 11884 /// Returns the range of an opaque value of a canonical integral type. 11885 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 11886 assert(T->isCanonicalUnqualified()); 11887 11888 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11889 T = VT->getElementType().getTypePtr(); 11890 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11891 T = CT->getElementType().getTypePtr(); 11892 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11893 T = AT->getValueType().getTypePtr(); 11894 11895 if (!C.getLangOpts().CPlusPlus) { 11896 // For enum types in C code, use the underlying datatype. 11897 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11898 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 11899 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 11900 // For enum types in C++, use the known bit width of the enumerators. 11901 EnumDecl *Enum = ET->getDecl(); 11902 // In C++11, enums can have a fixed underlying type. Use this type to 11903 // compute the range. 11904 if (Enum->isFixed()) { 11905 return IntRange(C.getIntWidth(QualType(T, 0)), 11906 !ET->isSignedIntegerOrEnumerationType()); 11907 } 11908 11909 unsigned NumPositive = Enum->getNumPositiveBits(); 11910 unsigned NumNegative = Enum->getNumNegativeBits(); 11911 11912 if (NumNegative == 0) 11913 return IntRange(NumPositive, true/*NonNegative*/); 11914 else 11915 return IntRange(std::max(NumPositive + 1, NumNegative), 11916 false/*NonNegative*/); 11917 } 11918 11919 if (const auto *EIT = dyn_cast<BitIntType>(T)) 11920 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11921 11922 const BuiltinType *BT = cast<BuiltinType>(T); 11923 assert(BT->isInteger()); 11924 11925 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11926 } 11927 11928 /// Returns the "target" range of a canonical integral type, i.e. 11929 /// the range of values expressible in the type. 11930 /// 11931 /// This matches forValueOfCanonicalType except that enums have the 11932 /// full range of their type, not the range of their enumerators. 11933 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 11934 assert(T->isCanonicalUnqualified()); 11935 11936 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11937 T = VT->getElementType().getTypePtr(); 11938 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11939 T = CT->getElementType().getTypePtr(); 11940 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11941 T = AT->getValueType().getTypePtr(); 11942 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11943 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 11944 11945 if (const auto *EIT = dyn_cast<BitIntType>(T)) 11946 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11947 11948 const BuiltinType *BT = cast<BuiltinType>(T); 11949 assert(BT->isInteger()); 11950 11951 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11952 } 11953 11954 /// Returns the supremum of two ranges: i.e. their conservative merge. 11955 static IntRange join(IntRange L, IntRange R) { 11956 bool Unsigned = L.NonNegative && R.NonNegative; 11957 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 11958 L.NonNegative && R.NonNegative); 11959 } 11960 11961 /// Return the range of a bitwise-AND of the two ranges. 11962 static IntRange bit_and(IntRange L, IntRange R) { 11963 unsigned Bits = std::max(L.Width, R.Width); 11964 bool NonNegative = false; 11965 if (L.NonNegative) { 11966 Bits = std::min(Bits, L.Width); 11967 NonNegative = true; 11968 } 11969 if (R.NonNegative) { 11970 Bits = std::min(Bits, R.Width); 11971 NonNegative = true; 11972 } 11973 return IntRange(Bits, NonNegative); 11974 } 11975 11976 /// Return the range of a sum of the two ranges. 11977 static IntRange sum(IntRange L, IntRange R) { 11978 bool Unsigned = L.NonNegative && R.NonNegative; 11979 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 11980 Unsigned); 11981 } 11982 11983 /// Return the range of a difference of the two ranges. 11984 static IntRange difference(IntRange L, IntRange R) { 11985 // We need a 1-bit-wider range if: 11986 // 1) LHS can be negative: least value can be reduced. 11987 // 2) RHS can be negative: greatest value can be increased. 11988 bool CanWiden = !L.NonNegative || !R.NonNegative; 11989 bool Unsigned = L.NonNegative && R.Width == 0; 11990 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 11991 !Unsigned, 11992 Unsigned); 11993 } 11994 11995 /// Return the range of a product of the two ranges. 11996 static IntRange product(IntRange L, IntRange R) { 11997 // If both LHS and RHS can be negative, we can form 11998 // -2^L * -2^R = 2^(L + R) 11999 // which requires L + R + 1 value bits to represent. 12000 bool CanWiden = !L.NonNegative && !R.NonNegative; 12001 bool Unsigned = L.NonNegative && R.NonNegative; 12002 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 12003 Unsigned); 12004 } 12005 12006 /// Return the range of a remainder operation between the two ranges. 12007 static IntRange rem(IntRange L, IntRange R) { 12008 // The result of a remainder can't be larger than the result of 12009 // either side. The sign of the result is the sign of the LHS. 12010 bool Unsigned = L.NonNegative; 12011 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 12012 Unsigned); 12013 } 12014 }; 12015 12016 } // namespace 12017 12018 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 12019 unsigned MaxWidth) { 12020 if (value.isSigned() && value.isNegative()) 12021 return IntRange(value.getMinSignedBits(), false); 12022 12023 if (value.getBitWidth() > MaxWidth) 12024 value = value.trunc(MaxWidth); 12025 12026 // isNonNegative() just checks the sign bit without considering 12027 // signedness. 12028 return IntRange(value.getActiveBits(), true); 12029 } 12030 12031 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 12032 unsigned MaxWidth) { 12033 if (result.isInt()) 12034 return GetValueRange(C, result.getInt(), MaxWidth); 12035 12036 if (result.isVector()) { 12037 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 12038 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 12039 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 12040 R = IntRange::join(R, El); 12041 } 12042 return R; 12043 } 12044 12045 if (result.isComplexInt()) { 12046 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 12047 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 12048 return IntRange::join(R, I); 12049 } 12050 12051 // This can happen with lossless casts to intptr_t of "based" lvalues. 12052 // Assume it might use arbitrary bits. 12053 // FIXME: The only reason we need to pass the type in here is to get 12054 // the sign right on this one case. It would be nice if APValue 12055 // preserved this. 12056 assert(result.isLValue() || result.isAddrLabelDiff()); 12057 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 12058 } 12059 12060 static QualType GetExprType(const Expr *E) { 12061 QualType Ty = E->getType(); 12062 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 12063 Ty = AtomicRHS->getValueType(); 12064 return Ty; 12065 } 12066 12067 /// Pseudo-evaluate the given integer expression, estimating the 12068 /// range of values it might take. 12069 /// 12070 /// \param MaxWidth The width to which the value will be truncated. 12071 /// \param Approximate If \c true, return a likely range for the result: in 12072 /// particular, assume that arithmetic on narrower types doesn't leave 12073 /// those types. If \c false, return a range including all possible 12074 /// result values. 12075 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 12076 bool InConstantContext, bool Approximate) { 12077 E = E->IgnoreParens(); 12078 12079 // Try a full evaluation first. 12080 Expr::EvalResult result; 12081 if (E->EvaluateAsRValue(result, C, InConstantContext)) 12082 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 12083 12084 // I think we only want to look through implicit casts here; if the 12085 // user has an explicit widening cast, we should treat the value as 12086 // being of the new, wider type. 12087 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 12088 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 12089 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 12090 Approximate); 12091 12092 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 12093 12094 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 12095 CE->getCastKind() == CK_BooleanToSignedIntegral; 12096 12097 // Assume that non-integer casts can span the full range of the type. 12098 if (!isIntegerCast) 12099 return OutputTypeRange; 12100 12101 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 12102 std::min(MaxWidth, OutputTypeRange.Width), 12103 InConstantContext, Approximate); 12104 12105 // Bail out if the subexpr's range is as wide as the cast type. 12106 if (SubRange.Width >= OutputTypeRange.Width) 12107 return OutputTypeRange; 12108 12109 // Otherwise, we take the smaller width, and we're non-negative if 12110 // either the output type or the subexpr is. 12111 return IntRange(SubRange.Width, 12112 SubRange.NonNegative || OutputTypeRange.NonNegative); 12113 } 12114 12115 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 12116 // If we can fold the condition, just take that operand. 12117 bool CondResult; 12118 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 12119 return GetExprRange(C, 12120 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 12121 MaxWidth, InConstantContext, Approximate); 12122 12123 // Otherwise, conservatively merge. 12124 // GetExprRange requires an integer expression, but a throw expression 12125 // results in a void type. 12126 Expr *E = CO->getTrueExpr(); 12127 IntRange L = E->getType()->isVoidType() 12128 ? IntRange{0, true} 12129 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 12130 E = CO->getFalseExpr(); 12131 IntRange R = E->getType()->isVoidType() 12132 ? IntRange{0, true} 12133 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 12134 return IntRange::join(L, R); 12135 } 12136 12137 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 12138 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 12139 12140 switch (BO->getOpcode()) { 12141 case BO_Cmp: 12142 llvm_unreachable("builtin <=> should have class type"); 12143 12144 // Boolean-valued operations are single-bit and positive. 12145 case BO_LAnd: 12146 case BO_LOr: 12147 case BO_LT: 12148 case BO_GT: 12149 case BO_LE: 12150 case BO_GE: 12151 case BO_EQ: 12152 case BO_NE: 12153 return IntRange::forBoolType(); 12154 12155 // The type of the assignments is the type of the LHS, so the RHS 12156 // is not necessarily the same type. 12157 case BO_MulAssign: 12158 case BO_DivAssign: 12159 case BO_RemAssign: 12160 case BO_AddAssign: 12161 case BO_SubAssign: 12162 case BO_XorAssign: 12163 case BO_OrAssign: 12164 // TODO: bitfields? 12165 return IntRange::forValueOfType(C, GetExprType(E)); 12166 12167 // Simple assignments just pass through the RHS, which will have 12168 // been coerced to the LHS type. 12169 case BO_Assign: 12170 // TODO: bitfields? 12171 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 12172 Approximate); 12173 12174 // Operations with opaque sources are black-listed. 12175 case BO_PtrMemD: 12176 case BO_PtrMemI: 12177 return IntRange::forValueOfType(C, GetExprType(E)); 12178 12179 // Bitwise-and uses the *infinum* of the two source ranges. 12180 case BO_And: 12181 case BO_AndAssign: 12182 Combine = IntRange::bit_and; 12183 break; 12184 12185 // Left shift gets black-listed based on a judgement call. 12186 case BO_Shl: 12187 // ...except that we want to treat '1 << (blah)' as logically 12188 // positive. It's an important idiom. 12189 if (IntegerLiteral *I 12190 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 12191 if (I->getValue() == 1) { 12192 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 12193 return IntRange(R.Width, /*NonNegative*/ true); 12194 } 12195 } 12196 LLVM_FALLTHROUGH; 12197 12198 case BO_ShlAssign: 12199 return IntRange::forValueOfType(C, GetExprType(E)); 12200 12201 // Right shift by a constant can narrow its left argument. 12202 case BO_Shr: 12203 case BO_ShrAssign: { 12204 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 12205 Approximate); 12206 12207 // If the shift amount is a positive constant, drop the width by 12208 // that much. 12209 if (Optional<llvm::APSInt> shift = 12210 BO->getRHS()->getIntegerConstantExpr(C)) { 12211 if (shift->isNonNegative()) { 12212 unsigned zext = shift->getZExtValue(); 12213 if (zext >= L.Width) 12214 L.Width = (L.NonNegative ? 0 : 1); 12215 else 12216 L.Width -= zext; 12217 } 12218 } 12219 12220 return L; 12221 } 12222 12223 // Comma acts as its right operand. 12224 case BO_Comma: 12225 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 12226 Approximate); 12227 12228 case BO_Add: 12229 if (!Approximate) 12230 Combine = IntRange::sum; 12231 break; 12232 12233 case BO_Sub: 12234 if (BO->getLHS()->getType()->isPointerType()) 12235 return IntRange::forValueOfType(C, GetExprType(E)); 12236 if (!Approximate) 12237 Combine = IntRange::difference; 12238 break; 12239 12240 case BO_Mul: 12241 if (!Approximate) 12242 Combine = IntRange::product; 12243 break; 12244 12245 // The width of a division result is mostly determined by the size 12246 // of the LHS. 12247 case BO_Div: { 12248 // Don't 'pre-truncate' the operands. 12249 unsigned opWidth = C.getIntWidth(GetExprType(E)); 12250 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 12251 Approximate); 12252 12253 // If the divisor is constant, use that. 12254 if (Optional<llvm::APSInt> divisor = 12255 BO->getRHS()->getIntegerConstantExpr(C)) { 12256 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 12257 if (log2 >= L.Width) 12258 L.Width = (L.NonNegative ? 0 : 1); 12259 else 12260 L.Width = std::min(L.Width - log2, MaxWidth); 12261 return L; 12262 } 12263 12264 // Otherwise, just use the LHS's width. 12265 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 12266 // could be -1. 12267 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 12268 Approximate); 12269 return IntRange(L.Width, L.NonNegative && R.NonNegative); 12270 } 12271 12272 case BO_Rem: 12273 Combine = IntRange::rem; 12274 break; 12275 12276 // The default behavior is okay for these. 12277 case BO_Xor: 12278 case BO_Or: 12279 break; 12280 } 12281 12282 // Combine the two ranges, but limit the result to the type in which we 12283 // performed the computation. 12284 QualType T = GetExprType(E); 12285 unsigned opWidth = C.getIntWidth(T); 12286 IntRange L = 12287 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 12288 IntRange R = 12289 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 12290 IntRange C = Combine(L, R); 12291 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 12292 C.Width = std::min(C.Width, MaxWidth); 12293 return C; 12294 } 12295 12296 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 12297 switch (UO->getOpcode()) { 12298 // Boolean-valued operations are white-listed. 12299 case UO_LNot: 12300 return IntRange::forBoolType(); 12301 12302 // Operations with opaque sources are black-listed. 12303 case UO_Deref: 12304 case UO_AddrOf: // should be impossible 12305 return IntRange::forValueOfType(C, GetExprType(E)); 12306 12307 default: 12308 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 12309 Approximate); 12310 } 12311 } 12312 12313 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 12314 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 12315 Approximate); 12316 12317 if (const auto *BitField = E->getSourceBitField()) 12318 return IntRange(BitField->getBitWidthValue(C), 12319 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 12320 12321 return IntRange::forValueOfType(C, GetExprType(E)); 12322 } 12323 12324 static IntRange GetExprRange(ASTContext &C, const Expr *E, 12325 bool InConstantContext, bool Approximate) { 12326 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 12327 Approximate); 12328 } 12329 12330 /// Checks whether the given value, which currently has the given 12331 /// source semantics, has the same value when coerced through the 12332 /// target semantics. 12333 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 12334 const llvm::fltSemantics &Src, 12335 const llvm::fltSemantics &Tgt) { 12336 llvm::APFloat truncated = value; 12337 12338 bool ignored; 12339 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 12340 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 12341 12342 return truncated.bitwiseIsEqual(value); 12343 } 12344 12345 /// Checks whether the given value, which currently has the given 12346 /// source semantics, has the same value when coerced through the 12347 /// target semantics. 12348 /// 12349 /// The value might be a vector of floats (or a complex number). 12350 static bool IsSameFloatAfterCast(const APValue &value, 12351 const llvm::fltSemantics &Src, 12352 const llvm::fltSemantics &Tgt) { 12353 if (value.isFloat()) 12354 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 12355 12356 if (value.isVector()) { 12357 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 12358 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 12359 return false; 12360 return true; 12361 } 12362 12363 assert(value.isComplexFloat()); 12364 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 12365 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 12366 } 12367 12368 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 12369 bool IsListInit = false); 12370 12371 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 12372 // Suppress cases where we are comparing against an enum constant. 12373 if (const DeclRefExpr *DR = 12374 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 12375 if (isa<EnumConstantDecl>(DR->getDecl())) 12376 return true; 12377 12378 // Suppress cases where the value is expanded from a macro, unless that macro 12379 // is how a language represents a boolean literal. This is the case in both C 12380 // and Objective-C. 12381 SourceLocation BeginLoc = E->getBeginLoc(); 12382 if (BeginLoc.isMacroID()) { 12383 StringRef MacroName = Lexer::getImmediateMacroName( 12384 BeginLoc, S.getSourceManager(), S.getLangOpts()); 12385 return MacroName != "YES" && MacroName != "NO" && 12386 MacroName != "true" && MacroName != "false"; 12387 } 12388 12389 return false; 12390 } 12391 12392 static bool isKnownToHaveUnsignedValue(Expr *E) { 12393 return E->getType()->isIntegerType() && 12394 (!E->getType()->isSignedIntegerType() || 12395 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 12396 } 12397 12398 namespace { 12399 /// The promoted range of values of a type. In general this has the 12400 /// following structure: 12401 /// 12402 /// |-----------| . . . |-----------| 12403 /// ^ ^ ^ ^ 12404 /// Min HoleMin HoleMax Max 12405 /// 12406 /// ... where there is only a hole if a signed type is promoted to unsigned 12407 /// (in which case Min and Max are the smallest and largest representable 12408 /// values). 12409 struct PromotedRange { 12410 // Min, or HoleMax if there is a hole. 12411 llvm::APSInt PromotedMin; 12412 // Max, or HoleMin if there is a hole. 12413 llvm::APSInt PromotedMax; 12414 12415 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 12416 if (R.Width == 0) 12417 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 12418 else if (R.Width >= BitWidth && !Unsigned) { 12419 // Promotion made the type *narrower*. This happens when promoting 12420 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 12421 // Treat all values of 'signed int' as being in range for now. 12422 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 12423 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 12424 } else { 12425 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 12426 .extOrTrunc(BitWidth); 12427 PromotedMin.setIsUnsigned(Unsigned); 12428 12429 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 12430 .extOrTrunc(BitWidth); 12431 PromotedMax.setIsUnsigned(Unsigned); 12432 } 12433 } 12434 12435 // Determine whether this range is contiguous (has no hole). 12436 bool isContiguous() const { return PromotedMin <= PromotedMax; } 12437 12438 // Where a constant value is within the range. 12439 enum ComparisonResult { 12440 LT = 0x1, 12441 LE = 0x2, 12442 GT = 0x4, 12443 GE = 0x8, 12444 EQ = 0x10, 12445 NE = 0x20, 12446 InRangeFlag = 0x40, 12447 12448 Less = LE | LT | NE, 12449 Min = LE | InRangeFlag, 12450 InRange = InRangeFlag, 12451 Max = GE | InRangeFlag, 12452 Greater = GE | GT | NE, 12453 12454 OnlyValue = LE | GE | EQ | InRangeFlag, 12455 InHole = NE 12456 }; 12457 12458 ComparisonResult compare(const llvm::APSInt &Value) const { 12459 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 12460 Value.isUnsigned() == PromotedMin.isUnsigned()); 12461 if (!isContiguous()) { 12462 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 12463 if (Value.isMinValue()) return Min; 12464 if (Value.isMaxValue()) return Max; 12465 if (Value >= PromotedMin) return InRange; 12466 if (Value <= PromotedMax) return InRange; 12467 return InHole; 12468 } 12469 12470 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 12471 case -1: return Less; 12472 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 12473 case 1: 12474 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 12475 case -1: return InRange; 12476 case 0: return Max; 12477 case 1: return Greater; 12478 } 12479 } 12480 12481 llvm_unreachable("impossible compare result"); 12482 } 12483 12484 static llvm::Optional<StringRef> 12485 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 12486 if (Op == BO_Cmp) { 12487 ComparisonResult LTFlag = LT, GTFlag = GT; 12488 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 12489 12490 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 12491 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 12492 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 12493 return llvm::None; 12494 } 12495 12496 ComparisonResult TrueFlag, FalseFlag; 12497 if (Op == BO_EQ) { 12498 TrueFlag = EQ; 12499 FalseFlag = NE; 12500 } else if (Op == BO_NE) { 12501 TrueFlag = NE; 12502 FalseFlag = EQ; 12503 } else { 12504 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 12505 TrueFlag = LT; 12506 FalseFlag = GE; 12507 } else { 12508 TrueFlag = GT; 12509 FalseFlag = LE; 12510 } 12511 if (Op == BO_GE || Op == BO_LE) 12512 std::swap(TrueFlag, FalseFlag); 12513 } 12514 if (R & TrueFlag) 12515 return StringRef("true"); 12516 if (R & FalseFlag) 12517 return StringRef("false"); 12518 return llvm::None; 12519 } 12520 }; 12521 } 12522 12523 static bool HasEnumType(Expr *E) { 12524 // Strip off implicit integral promotions. 12525 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 12526 if (ICE->getCastKind() != CK_IntegralCast && 12527 ICE->getCastKind() != CK_NoOp) 12528 break; 12529 E = ICE->getSubExpr(); 12530 } 12531 12532 return E->getType()->isEnumeralType(); 12533 } 12534 12535 static int classifyConstantValue(Expr *Constant) { 12536 // The values of this enumeration are used in the diagnostics 12537 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 12538 enum ConstantValueKind { 12539 Miscellaneous = 0, 12540 LiteralTrue, 12541 LiteralFalse 12542 }; 12543 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 12544 return BL->getValue() ? ConstantValueKind::LiteralTrue 12545 : ConstantValueKind::LiteralFalse; 12546 return ConstantValueKind::Miscellaneous; 12547 } 12548 12549 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 12550 Expr *Constant, Expr *Other, 12551 const llvm::APSInt &Value, 12552 bool RhsConstant) { 12553 if (S.inTemplateInstantiation()) 12554 return false; 12555 12556 Expr *OriginalOther = Other; 12557 12558 Constant = Constant->IgnoreParenImpCasts(); 12559 Other = Other->IgnoreParenImpCasts(); 12560 12561 // Suppress warnings on tautological comparisons between values of the same 12562 // enumeration type. There are only two ways we could warn on this: 12563 // - If the constant is outside the range of representable values of 12564 // the enumeration. In such a case, we should warn about the cast 12565 // to enumeration type, not about the comparison. 12566 // - If the constant is the maximum / minimum in-range value. For an 12567 // enumeratin type, such comparisons can be meaningful and useful. 12568 if (Constant->getType()->isEnumeralType() && 12569 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 12570 return false; 12571 12572 IntRange OtherValueRange = GetExprRange( 12573 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 12574 12575 QualType OtherT = Other->getType(); 12576 if (const auto *AT = OtherT->getAs<AtomicType>()) 12577 OtherT = AT->getValueType(); 12578 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 12579 12580 // Special case for ObjC BOOL on targets where its a typedef for a signed char 12581 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 12582 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 12583 S.NSAPIObj->isObjCBOOLType(OtherT) && 12584 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 12585 12586 // Whether we're treating Other as being a bool because of the form of 12587 // expression despite it having another type (typically 'int' in C). 12588 bool OtherIsBooleanDespiteType = 12589 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 12590 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 12591 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 12592 12593 // Check if all values in the range of possible values of this expression 12594 // lead to the same comparison outcome. 12595 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 12596 Value.isUnsigned()); 12597 auto Cmp = OtherPromotedValueRange.compare(Value); 12598 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 12599 if (!Result) 12600 return false; 12601 12602 // Also consider the range determined by the type alone. This allows us to 12603 // classify the warning under the proper diagnostic group. 12604 bool TautologicalTypeCompare = false; 12605 { 12606 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 12607 Value.isUnsigned()); 12608 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 12609 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 12610 RhsConstant)) { 12611 TautologicalTypeCompare = true; 12612 Cmp = TypeCmp; 12613 Result = TypeResult; 12614 } 12615 } 12616 12617 // Don't warn if the non-constant operand actually always evaluates to the 12618 // same value. 12619 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 12620 return false; 12621 12622 // Suppress the diagnostic for an in-range comparison if the constant comes 12623 // from a macro or enumerator. We don't want to diagnose 12624 // 12625 // some_long_value <= INT_MAX 12626 // 12627 // when sizeof(int) == sizeof(long). 12628 bool InRange = Cmp & PromotedRange::InRangeFlag; 12629 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 12630 return false; 12631 12632 // A comparison of an unsigned bit-field against 0 is really a type problem, 12633 // even though at the type level the bit-field might promote to 'signed int'. 12634 if (Other->refersToBitField() && InRange && Value == 0 && 12635 Other->getType()->isUnsignedIntegerOrEnumerationType()) 12636 TautologicalTypeCompare = true; 12637 12638 // If this is a comparison to an enum constant, include that 12639 // constant in the diagnostic. 12640 const EnumConstantDecl *ED = nullptr; 12641 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 12642 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 12643 12644 // Should be enough for uint128 (39 decimal digits) 12645 SmallString<64> PrettySourceValue; 12646 llvm::raw_svector_ostream OS(PrettySourceValue); 12647 if (ED) { 12648 OS << '\'' << *ED << "' (" << Value << ")"; 12649 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 12650 Constant->IgnoreParenImpCasts())) { 12651 OS << (BL->getValue() ? "YES" : "NO"); 12652 } else { 12653 OS << Value; 12654 } 12655 12656 if (!TautologicalTypeCompare) { 12657 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 12658 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 12659 << E->getOpcodeStr() << OS.str() << *Result 12660 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12661 return true; 12662 } 12663 12664 if (IsObjCSignedCharBool) { 12665 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12666 S.PDiag(diag::warn_tautological_compare_objc_bool) 12667 << OS.str() << *Result); 12668 return true; 12669 } 12670 12671 // FIXME: We use a somewhat different formatting for the in-range cases and 12672 // cases involving boolean values for historical reasons. We should pick a 12673 // consistent way of presenting these diagnostics. 12674 if (!InRange || Other->isKnownToHaveBooleanValue()) { 12675 12676 S.DiagRuntimeBehavior( 12677 E->getOperatorLoc(), E, 12678 S.PDiag(!InRange ? diag::warn_out_of_range_compare 12679 : diag::warn_tautological_bool_compare) 12680 << OS.str() << classifyConstantValue(Constant) << OtherT 12681 << OtherIsBooleanDespiteType << *Result 12682 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 12683 } else { 12684 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 12685 unsigned Diag = 12686 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 12687 ? (HasEnumType(OriginalOther) 12688 ? diag::warn_unsigned_enum_always_true_comparison 12689 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 12690 : diag::warn_unsigned_always_true_comparison) 12691 : diag::warn_tautological_constant_compare; 12692 12693 S.Diag(E->getOperatorLoc(), Diag) 12694 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 12695 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12696 } 12697 12698 return true; 12699 } 12700 12701 /// Analyze the operands of the given comparison. Implements the 12702 /// fallback case from AnalyzeComparison. 12703 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 12704 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12705 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12706 } 12707 12708 /// Implements -Wsign-compare. 12709 /// 12710 /// \param E the binary operator to check for warnings 12711 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 12712 // The type the comparison is being performed in. 12713 QualType T = E->getLHS()->getType(); 12714 12715 // Only analyze comparison operators where both sides have been converted to 12716 // the same type. 12717 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 12718 return AnalyzeImpConvsInComparison(S, E); 12719 12720 // Don't analyze value-dependent comparisons directly. 12721 if (E->isValueDependent()) 12722 return AnalyzeImpConvsInComparison(S, E); 12723 12724 Expr *LHS = E->getLHS(); 12725 Expr *RHS = E->getRHS(); 12726 12727 if (T->isIntegralType(S.Context)) { 12728 Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context); 12729 Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context); 12730 12731 // We don't care about expressions whose result is a constant. 12732 if (RHSValue && LHSValue) 12733 return AnalyzeImpConvsInComparison(S, E); 12734 12735 // We only care about expressions where just one side is literal 12736 if ((bool)RHSValue ^ (bool)LHSValue) { 12737 // Is the constant on the RHS or LHS? 12738 const bool RhsConstant = (bool)RHSValue; 12739 Expr *Const = RhsConstant ? RHS : LHS; 12740 Expr *Other = RhsConstant ? LHS : RHS; 12741 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 12742 12743 // Check whether an integer constant comparison results in a value 12744 // of 'true' or 'false'. 12745 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 12746 return AnalyzeImpConvsInComparison(S, E); 12747 } 12748 } 12749 12750 if (!T->hasUnsignedIntegerRepresentation()) { 12751 // We don't do anything special if this isn't an unsigned integral 12752 // comparison: we're only interested in integral comparisons, and 12753 // signed comparisons only happen in cases we don't care to warn about. 12754 return AnalyzeImpConvsInComparison(S, E); 12755 } 12756 12757 LHS = LHS->IgnoreParenImpCasts(); 12758 RHS = RHS->IgnoreParenImpCasts(); 12759 12760 if (!S.getLangOpts().CPlusPlus) { 12761 // Avoid warning about comparison of integers with different signs when 12762 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 12763 // the type of `E`. 12764 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 12765 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12766 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 12767 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12768 } 12769 12770 // Check to see if one of the (unmodified) operands is of different 12771 // signedness. 12772 Expr *signedOperand, *unsignedOperand; 12773 if (LHS->getType()->hasSignedIntegerRepresentation()) { 12774 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 12775 "unsigned comparison between two signed integer expressions?"); 12776 signedOperand = LHS; 12777 unsignedOperand = RHS; 12778 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 12779 signedOperand = RHS; 12780 unsignedOperand = LHS; 12781 } else { 12782 return AnalyzeImpConvsInComparison(S, E); 12783 } 12784 12785 // Otherwise, calculate the effective range of the signed operand. 12786 IntRange signedRange = GetExprRange( 12787 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 12788 12789 // Go ahead and analyze implicit conversions in the operands. Note 12790 // that we skip the implicit conversions on both sides. 12791 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 12792 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 12793 12794 // If the signed range is non-negative, -Wsign-compare won't fire. 12795 if (signedRange.NonNegative) 12796 return; 12797 12798 // For (in)equality comparisons, if the unsigned operand is a 12799 // constant which cannot collide with a overflowed signed operand, 12800 // then reinterpreting the signed operand as unsigned will not 12801 // change the result of the comparison. 12802 if (E->isEqualityOp()) { 12803 unsigned comparisonWidth = S.Context.getIntWidth(T); 12804 IntRange unsignedRange = 12805 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 12806 /*Approximate*/ true); 12807 12808 // We should never be unable to prove that the unsigned operand is 12809 // non-negative. 12810 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 12811 12812 if (unsignedRange.Width < comparisonWidth) 12813 return; 12814 } 12815 12816 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12817 S.PDiag(diag::warn_mixed_sign_comparison) 12818 << LHS->getType() << RHS->getType() 12819 << LHS->getSourceRange() << RHS->getSourceRange()); 12820 } 12821 12822 /// Analyzes an attempt to assign the given value to a bitfield. 12823 /// 12824 /// Returns true if there was something fishy about the attempt. 12825 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 12826 SourceLocation InitLoc) { 12827 assert(Bitfield->isBitField()); 12828 if (Bitfield->isInvalidDecl()) 12829 return false; 12830 12831 // White-list bool bitfields. 12832 QualType BitfieldType = Bitfield->getType(); 12833 if (BitfieldType->isBooleanType()) 12834 return false; 12835 12836 if (BitfieldType->isEnumeralType()) { 12837 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 12838 // If the underlying enum type was not explicitly specified as an unsigned 12839 // type and the enum contain only positive values, MSVC++ will cause an 12840 // inconsistency by storing this as a signed type. 12841 if (S.getLangOpts().CPlusPlus11 && 12842 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 12843 BitfieldEnumDecl->getNumPositiveBits() > 0 && 12844 BitfieldEnumDecl->getNumNegativeBits() == 0) { 12845 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 12846 << BitfieldEnumDecl; 12847 } 12848 } 12849 12850 if (Bitfield->getType()->isBooleanType()) 12851 return false; 12852 12853 // Ignore value- or type-dependent expressions. 12854 if (Bitfield->getBitWidth()->isValueDependent() || 12855 Bitfield->getBitWidth()->isTypeDependent() || 12856 Init->isValueDependent() || 12857 Init->isTypeDependent()) 12858 return false; 12859 12860 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 12861 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 12862 12863 Expr::EvalResult Result; 12864 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 12865 Expr::SE_AllowSideEffects)) { 12866 // The RHS is not constant. If the RHS has an enum type, make sure the 12867 // bitfield is wide enough to hold all the values of the enum without 12868 // truncation. 12869 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 12870 EnumDecl *ED = EnumTy->getDecl(); 12871 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 12872 12873 // Enum types are implicitly signed on Windows, so check if there are any 12874 // negative enumerators to see if the enum was intended to be signed or 12875 // not. 12876 bool SignedEnum = ED->getNumNegativeBits() > 0; 12877 12878 // Check for surprising sign changes when assigning enum values to a 12879 // bitfield of different signedness. If the bitfield is signed and we 12880 // have exactly the right number of bits to store this unsigned enum, 12881 // suggest changing the enum to an unsigned type. This typically happens 12882 // on Windows where unfixed enums always use an underlying type of 'int'. 12883 unsigned DiagID = 0; 12884 if (SignedEnum && !SignedBitfield) { 12885 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 12886 } else if (SignedBitfield && !SignedEnum && 12887 ED->getNumPositiveBits() == FieldWidth) { 12888 DiagID = diag::warn_signed_bitfield_enum_conversion; 12889 } 12890 12891 if (DiagID) { 12892 S.Diag(InitLoc, DiagID) << Bitfield << ED; 12893 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 12894 SourceRange TypeRange = 12895 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 12896 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 12897 << SignedEnum << TypeRange; 12898 } 12899 12900 // Compute the required bitwidth. If the enum has negative values, we need 12901 // one more bit than the normal number of positive bits to represent the 12902 // sign bit. 12903 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 12904 ED->getNumNegativeBits()) 12905 : ED->getNumPositiveBits(); 12906 12907 // Check the bitwidth. 12908 if (BitsNeeded > FieldWidth) { 12909 Expr *WidthExpr = Bitfield->getBitWidth(); 12910 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 12911 << Bitfield << ED; 12912 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 12913 << BitsNeeded << ED << WidthExpr->getSourceRange(); 12914 } 12915 } 12916 12917 return false; 12918 } 12919 12920 llvm::APSInt Value = Result.Val.getInt(); 12921 12922 unsigned OriginalWidth = Value.getBitWidth(); 12923 12924 if (!Value.isSigned() || Value.isNegative()) 12925 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 12926 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 12927 OriginalWidth = Value.getMinSignedBits(); 12928 12929 if (OriginalWidth <= FieldWidth) 12930 return false; 12931 12932 // Compute the value which the bitfield will contain. 12933 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 12934 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 12935 12936 // Check whether the stored value is equal to the original value. 12937 TruncatedValue = TruncatedValue.extend(OriginalWidth); 12938 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 12939 return false; 12940 12941 // Special-case bitfields of width 1: booleans are naturally 0/1, and 12942 // therefore don't strictly fit into a signed bitfield of width 1. 12943 if (FieldWidth == 1 && Value == 1) 12944 return false; 12945 12946 std::string PrettyValue = toString(Value, 10); 12947 std::string PrettyTrunc = toString(TruncatedValue, 10); 12948 12949 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 12950 << PrettyValue << PrettyTrunc << OriginalInit->getType() 12951 << Init->getSourceRange(); 12952 12953 return true; 12954 } 12955 12956 /// Analyze the given simple or compound assignment for warning-worthy 12957 /// operations. 12958 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 12959 // Just recurse on the LHS. 12960 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12961 12962 // We want to recurse on the RHS as normal unless we're assigning to 12963 // a bitfield. 12964 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 12965 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 12966 E->getOperatorLoc())) { 12967 // Recurse, ignoring any implicit conversions on the RHS. 12968 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 12969 E->getOperatorLoc()); 12970 } 12971 } 12972 12973 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12974 12975 // Diagnose implicitly sequentially-consistent atomic assignment. 12976 if (E->getLHS()->getType()->isAtomicType()) 12977 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 12978 } 12979 12980 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12981 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 12982 SourceLocation CContext, unsigned diag, 12983 bool pruneControlFlow = false) { 12984 if (pruneControlFlow) { 12985 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12986 S.PDiag(diag) 12987 << SourceType << T << E->getSourceRange() 12988 << SourceRange(CContext)); 12989 return; 12990 } 12991 S.Diag(E->getExprLoc(), diag) 12992 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 12993 } 12994 12995 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12996 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 12997 SourceLocation CContext, 12998 unsigned diag, bool pruneControlFlow = false) { 12999 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 13000 } 13001 13002 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 13003 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 13004 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 13005 } 13006 13007 static void adornObjCBoolConversionDiagWithTernaryFixit( 13008 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 13009 Expr *Ignored = SourceExpr->IgnoreImplicit(); 13010 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 13011 Ignored = OVE->getSourceExpr(); 13012 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 13013 isa<BinaryOperator>(Ignored) || 13014 isa<CXXOperatorCallExpr>(Ignored); 13015 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 13016 if (NeedsParens) 13017 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 13018 << FixItHint::CreateInsertion(EndLoc, ")"); 13019 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 13020 } 13021 13022 /// Diagnose an implicit cast from a floating point value to an integer value. 13023 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 13024 SourceLocation CContext) { 13025 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 13026 const bool PruneWarnings = S.inTemplateInstantiation(); 13027 13028 Expr *InnerE = E->IgnoreParenImpCasts(); 13029 // We also want to warn on, e.g., "int i = -1.234" 13030 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 13031 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 13032 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 13033 13034 const bool IsLiteral = 13035 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 13036 13037 llvm::APFloat Value(0.0); 13038 bool IsConstant = 13039 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 13040 if (!IsConstant) { 13041 if (isObjCSignedCharBool(S, T)) { 13042 return adornObjCBoolConversionDiagWithTernaryFixit( 13043 S, E, 13044 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 13045 << E->getType()); 13046 } 13047 13048 return DiagnoseImpCast(S, E, T, CContext, 13049 diag::warn_impcast_float_integer, PruneWarnings); 13050 } 13051 13052 bool isExact = false; 13053 13054 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 13055 T->hasUnsignedIntegerRepresentation()); 13056 llvm::APFloat::opStatus Result = Value.convertToInteger( 13057 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 13058 13059 // FIXME: Force the precision of the source value down so we don't print 13060 // digits which are usually useless (we don't really care here if we 13061 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 13062 // would automatically print the shortest representation, but it's a bit 13063 // tricky to implement. 13064 SmallString<16> PrettySourceValue; 13065 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 13066 precision = (precision * 59 + 195) / 196; 13067 Value.toString(PrettySourceValue, precision); 13068 13069 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 13070 return adornObjCBoolConversionDiagWithTernaryFixit( 13071 S, E, 13072 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 13073 << PrettySourceValue); 13074 } 13075 13076 if (Result == llvm::APFloat::opOK && isExact) { 13077 if (IsLiteral) return; 13078 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 13079 PruneWarnings); 13080 } 13081 13082 // Conversion of a floating-point value to a non-bool integer where the 13083 // integral part cannot be represented by the integer type is undefined. 13084 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 13085 return DiagnoseImpCast( 13086 S, E, T, CContext, 13087 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 13088 : diag::warn_impcast_float_to_integer_out_of_range, 13089 PruneWarnings); 13090 13091 unsigned DiagID = 0; 13092 if (IsLiteral) { 13093 // Warn on floating point literal to integer. 13094 DiagID = diag::warn_impcast_literal_float_to_integer; 13095 } else if (IntegerValue == 0) { 13096 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 13097 return DiagnoseImpCast(S, E, T, CContext, 13098 diag::warn_impcast_float_integer, PruneWarnings); 13099 } 13100 // Warn on non-zero to zero conversion. 13101 DiagID = diag::warn_impcast_float_to_integer_zero; 13102 } else { 13103 if (IntegerValue.isUnsigned()) { 13104 if (!IntegerValue.isMaxValue()) { 13105 return DiagnoseImpCast(S, E, T, CContext, 13106 diag::warn_impcast_float_integer, PruneWarnings); 13107 } 13108 } else { // IntegerValue.isSigned() 13109 if (!IntegerValue.isMaxSignedValue() && 13110 !IntegerValue.isMinSignedValue()) { 13111 return DiagnoseImpCast(S, E, T, CContext, 13112 diag::warn_impcast_float_integer, PruneWarnings); 13113 } 13114 } 13115 // Warn on evaluatable floating point expression to integer conversion. 13116 DiagID = diag::warn_impcast_float_to_integer; 13117 } 13118 13119 SmallString<16> PrettyTargetValue; 13120 if (IsBool) 13121 PrettyTargetValue = Value.isZero() ? "false" : "true"; 13122 else 13123 IntegerValue.toString(PrettyTargetValue); 13124 13125 if (PruneWarnings) { 13126 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13127 S.PDiag(DiagID) 13128 << E->getType() << T.getUnqualifiedType() 13129 << PrettySourceValue << PrettyTargetValue 13130 << E->getSourceRange() << SourceRange(CContext)); 13131 } else { 13132 S.Diag(E->getExprLoc(), DiagID) 13133 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 13134 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 13135 } 13136 } 13137 13138 /// Analyze the given compound assignment for the possible losing of 13139 /// floating-point precision. 13140 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 13141 assert(isa<CompoundAssignOperator>(E) && 13142 "Must be compound assignment operation"); 13143 // Recurse on the LHS and RHS in here 13144 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 13145 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 13146 13147 if (E->getLHS()->getType()->isAtomicType()) 13148 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 13149 13150 // Now check the outermost expression 13151 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 13152 const auto *RBT = cast<CompoundAssignOperator>(E) 13153 ->getComputationResultType() 13154 ->getAs<BuiltinType>(); 13155 13156 // The below checks assume source is floating point. 13157 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 13158 13159 // If source is floating point but target is an integer. 13160 if (ResultBT->isInteger()) 13161 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 13162 E->getExprLoc(), diag::warn_impcast_float_integer); 13163 13164 if (!ResultBT->isFloatingPoint()) 13165 return; 13166 13167 // If both source and target are floating points, warn about losing precision. 13168 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 13169 QualType(ResultBT, 0), QualType(RBT, 0)); 13170 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 13171 // warn about dropping FP rank. 13172 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 13173 diag::warn_impcast_float_result_precision); 13174 } 13175 13176 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 13177 IntRange Range) { 13178 if (!Range.Width) return "0"; 13179 13180 llvm::APSInt ValueInRange = Value; 13181 ValueInRange.setIsSigned(!Range.NonNegative); 13182 ValueInRange = ValueInRange.trunc(Range.Width); 13183 return toString(ValueInRange, 10); 13184 } 13185 13186 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 13187 if (!isa<ImplicitCastExpr>(Ex)) 13188 return false; 13189 13190 Expr *InnerE = Ex->IgnoreParenImpCasts(); 13191 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 13192 const Type *Source = 13193 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 13194 if (Target->isDependentType()) 13195 return false; 13196 13197 const BuiltinType *FloatCandidateBT = 13198 dyn_cast<BuiltinType>(ToBool ? Source : Target); 13199 const Type *BoolCandidateType = ToBool ? Target : Source; 13200 13201 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 13202 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 13203 } 13204 13205 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 13206 SourceLocation CC) { 13207 unsigned NumArgs = TheCall->getNumArgs(); 13208 for (unsigned i = 0; i < NumArgs; ++i) { 13209 Expr *CurrA = TheCall->getArg(i); 13210 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 13211 continue; 13212 13213 bool IsSwapped = ((i > 0) && 13214 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 13215 IsSwapped |= ((i < (NumArgs - 1)) && 13216 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 13217 if (IsSwapped) { 13218 // Warn on this floating-point to bool conversion. 13219 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 13220 CurrA->getType(), CC, 13221 diag::warn_impcast_floating_point_to_bool); 13222 } 13223 } 13224 } 13225 13226 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 13227 SourceLocation CC) { 13228 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 13229 E->getExprLoc())) 13230 return; 13231 13232 // Don't warn on functions which have return type nullptr_t. 13233 if (isa<CallExpr>(E)) 13234 return; 13235 13236 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 13237 const Expr::NullPointerConstantKind NullKind = 13238 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 13239 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 13240 return; 13241 13242 // Return if target type is a safe conversion. 13243 if (T->isAnyPointerType() || T->isBlockPointerType() || 13244 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 13245 return; 13246 13247 SourceLocation Loc = E->getSourceRange().getBegin(); 13248 13249 // Venture through the macro stacks to get to the source of macro arguments. 13250 // The new location is a better location than the complete location that was 13251 // passed in. 13252 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 13253 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 13254 13255 // __null is usually wrapped in a macro. Go up a macro if that is the case. 13256 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 13257 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 13258 Loc, S.SourceMgr, S.getLangOpts()); 13259 if (MacroName == "NULL") 13260 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 13261 } 13262 13263 // Only warn if the null and context location are in the same macro expansion. 13264 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 13265 return; 13266 13267 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 13268 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 13269 << FixItHint::CreateReplacement(Loc, 13270 S.getFixItZeroLiteralForType(T, Loc)); 13271 } 13272 13273 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 13274 ObjCArrayLiteral *ArrayLiteral); 13275 13276 static void 13277 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 13278 ObjCDictionaryLiteral *DictionaryLiteral); 13279 13280 /// Check a single element within a collection literal against the 13281 /// target element type. 13282 static void checkObjCCollectionLiteralElement(Sema &S, 13283 QualType TargetElementType, 13284 Expr *Element, 13285 unsigned ElementKind) { 13286 // Skip a bitcast to 'id' or qualified 'id'. 13287 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 13288 if (ICE->getCastKind() == CK_BitCast && 13289 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 13290 Element = ICE->getSubExpr(); 13291 } 13292 13293 QualType ElementType = Element->getType(); 13294 ExprResult ElementResult(Element); 13295 if (ElementType->getAs<ObjCObjectPointerType>() && 13296 S.CheckSingleAssignmentConstraints(TargetElementType, 13297 ElementResult, 13298 false, false) 13299 != Sema::Compatible) { 13300 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 13301 << ElementType << ElementKind << TargetElementType 13302 << Element->getSourceRange(); 13303 } 13304 13305 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 13306 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 13307 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 13308 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 13309 } 13310 13311 /// Check an Objective-C array literal being converted to the given 13312 /// target type. 13313 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 13314 ObjCArrayLiteral *ArrayLiteral) { 13315 if (!S.NSArrayDecl) 13316 return; 13317 13318 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 13319 if (!TargetObjCPtr) 13320 return; 13321 13322 if (TargetObjCPtr->isUnspecialized() || 13323 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 13324 != S.NSArrayDecl->getCanonicalDecl()) 13325 return; 13326 13327 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 13328 if (TypeArgs.size() != 1) 13329 return; 13330 13331 QualType TargetElementType = TypeArgs[0]; 13332 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 13333 checkObjCCollectionLiteralElement(S, TargetElementType, 13334 ArrayLiteral->getElement(I), 13335 0); 13336 } 13337 } 13338 13339 /// Check an Objective-C dictionary literal being converted to the given 13340 /// target type. 13341 static void 13342 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 13343 ObjCDictionaryLiteral *DictionaryLiteral) { 13344 if (!S.NSDictionaryDecl) 13345 return; 13346 13347 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 13348 if (!TargetObjCPtr) 13349 return; 13350 13351 if (TargetObjCPtr->isUnspecialized() || 13352 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 13353 != S.NSDictionaryDecl->getCanonicalDecl()) 13354 return; 13355 13356 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 13357 if (TypeArgs.size() != 2) 13358 return; 13359 13360 QualType TargetKeyType = TypeArgs[0]; 13361 QualType TargetObjectType = TypeArgs[1]; 13362 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 13363 auto Element = DictionaryLiteral->getKeyValueElement(I); 13364 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 13365 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 13366 } 13367 } 13368 13369 // Helper function to filter out cases for constant width constant conversion. 13370 // Don't warn on char array initialization or for non-decimal values. 13371 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 13372 SourceLocation CC) { 13373 // If initializing from a constant, and the constant starts with '0', 13374 // then it is a binary, octal, or hexadecimal. Allow these constants 13375 // to fill all the bits, even if there is a sign change. 13376 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 13377 const char FirstLiteralCharacter = 13378 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 13379 if (FirstLiteralCharacter == '0') 13380 return false; 13381 } 13382 13383 // If the CC location points to a '{', and the type is char, then assume 13384 // assume it is an array initialization. 13385 if (CC.isValid() && T->isCharType()) { 13386 const char FirstContextCharacter = 13387 S.getSourceManager().getCharacterData(CC)[0]; 13388 if (FirstContextCharacter == '{') 13389 return false; 13390 } 13391 13392 return true; 13393 } 13394 13395 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 13396 const auto *IL = dyn_cast<IntegerLiteral>(E); 13397 if (!IL) { 13398 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 13399 if (UO->getOpcode() == UO_Minus) 13400 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 13401 } 13402 } 13403 13404 return IL; 13405 } 13406 13407 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 13408 E = E->IgnoreParenImpCasts(); 13409 SourceLocation ExprLoc = E->getExprLoc(); 13410 13411 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 13412 BinaryOperator::Opcode Opc = BO->getOpcode(); 13413 Expr::EvalResult Result; 13414 // Do not diagnose unsigned shifts. 13415 if (Opc == BO_Shl) { 13416 const auto *LHS = getIntegerLiteral(BO->getLHS()); 13417 const auto *RHS = getIntegerLiteral(BO->getRHS()); 13418 if (LHS && LHS->getValue() == 0) 13419 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 13420 else if (!E->isValueDependent() && LHS && RHS && 13421 RHS->getValue().isNonNegative() && 13422 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 13423 S.Diag(ExprLoc, diag::warn_left_shift_always) 13424 << (Result.Val.getInt() != 0); 13425 else if (E->getType()->isSignedIntegerType()) 13426 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 13427 } 13428 } 13429 13430 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 13431 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 13432 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 13433 if (!LHS || !RHS) 13434 return; 13435 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 13436 (RHS->getValue() == 0 || RHS->getValue() == 1)) 13437 // Do not diagnose common idioms. 13438 return; 13439 if (LHS->getValue() != 0 && RHS->getValue() != 0) 13440 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 13441 } 13442 } 13443 13444 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 13445 SourceLocation CC, 13446 bool *ICContext = nullptr, 13447 bool IsListInit = false) { 13448 if (E->isTypeDependent() || E->isValueDependent()) return; 13449 13450 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 13451 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 13452 if (Source == Target) return; 13453 if (Target->isDependentType()) return; 13454 13455 // If the conversion context location is invalid don't complain. We also 13456 // don't want to emit a warning if the issue occurs from the expansion of 13457 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 13458 // delay this check as long as possible. Once we detect we are in that 13459 // scenario, we just return. 13460 if (CC.isInvalid()) 13461 return; 13462 13463 if (Source->isAtomicType()) 13464 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 13465 13466 // Diagnose implicit casts to bool. 13467 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 13468 if (isa<StringLiteral>(E)) 13469 // Warn on string literal to bool. Checks for string literals in logical 13470 // and expressions, for instance, assert(0 && "error here"), are 13471 // prevented by a check in AnalyzeImplicitConversions(). 13472 return DiagnoseImpCast(S, E, T, CC, 13473 diag::warn_impcast_string_literal_to_bool); 13474 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 13475 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 13476 // This covers the literal expressions that evaluate to Objective-C 13477 // objects. 13478 return DiagnoseImpCast(S, E, T, CC, 13479 diag::warn_impcast_objective_c_literal_to_bool); 13480 } 13481 if (Source->isPointerType() || Source->canDecayToPointerType()) { 13482 // Warn on pointer to bool conversion that is always true. 13483 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 13484 SourceRange(CC)); 13485 } 13486 } 13487 13488 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 13489 // is a typedef for signed char (macOS), then that constant value has to be 1 13490 // or 0. 13491 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 13492 Expr::EvalResult Result; 13493 if (E->EvaluateAsInt(Result, S.getASTContext(), 13494 Expr::SE_AllowSideEffects)) { 13495 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 13496 adornObjCBoolConversionDiagWithTernaryFixit( 13497 S, E, 13498 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 13499 << toString(Result.Val.getInt(), 10)); 13500 } 13501 return; 13502 } 13503 } 13504 13505 // Check implicit casts from Objective-C collection literals to specialized 13506 // collection types, e.g., NSArray<NSString *> *. 13507 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 13508 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 13509 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 13510 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 13511 13512 // Strip vector types. 13513 if (isa<VectorType>(Source)) { 13514 if (Target->isVLSTBuiltinType() && 13515 (S.Context.areCompatibleSveTypes(QualType(Target, 0), 13516 QualType(Source, 0)) || 13517 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), 13518 QualType(Source, 0)))) 13519 return; 13520 13521 if (!isa<VectorType>(Target)) { 13522 if (S.SourceMgr.isInSystemMacro(CC)) 13523 return; 13524 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 13525 } 13526 13527 // If the vector cast is cast between two vectors of the same size, it is 13528 // a bitcast, not a conversion. 13529 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 13530 return; 13531 13532 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 13533 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 13534 } 13535 if (auto VecTy = dyn_cast<VectorType>(Target)) 13536 Target = VecTy->getElementType().getTypePtr(); 13537 13538 // Strip complex types. 13539 if (isa<ComplexType>(Source)) { 13540 if (!isa<ComplexType>(Target)) { 13541 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 13542 return; 13543 13544 return DiagnoseImpCast(S, E, T, CC, 13545 S.getLangOpts().CPlusPlus 13546 ? diag::err_impcast_complex_scalar 13547 : diag::warn_impcast_complex_scalar); 13548 } 13549 13550 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 13551 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 13552 } 13553 13554 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 13555 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 13556 13557 // Strip SVE vector types 13558 if (SourceBT && SourceBT->isVLSTBuiltinType()) { 13559 // Need the original target type for vector type checks 13560 const Type *OriginalTarget = S.Context.getCanonicalType(T).getTypePtr(); 13561 // Handle conversion from scalable to fixed when msve-vector-bits is 13562 // specified 13563 if (S.Context.areCompatibleSveTypes(QualType(OriginalTarget, 0), 13564 QualType(Source, 0)) || 13565 S.Context.areLaxCompatibleSveTypes(QualType(OriginalTarget, 0), 13566 QualType(Source, 0))) 13567 return; 13568 13569 // If the vector cast is cast between two vectors of the same size, it is 13570 // a bitcast, not a conversion. 13571 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 13572 return; 13573 13574 Source = SourceBT->getSveEltType(S.Context).getTypePtr(); 13575 } 13576 13577 if (TargetBT && TargetBT->isVLSTBuiltinType()) 13578 Target = TargetBT->getSveEltType(S.Context).getTypePtr(); 13579 13580 // If the source is floating point... 13581 if (SourceBT && SourceBT->isFloatingPoint()) { 13582 // ...and the target is floating point... 13583 if (TargetBT && TargetBT->isFloatingPoint()) { 13584 // ...then warn if we're dropping FP rank. 13585 13586 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 13587 QualType(SourceBT, 0), QualType(TargetBT, 0)); 13588 if (Order > 0) { 13589 // Don't warn about float constants that are precisely 13590 // representable in the target type. 13591 Expr::EvalResult result; 13592 if (E->EvaluateAsRValue(result, S.Context)) { 13593 // Value might be a float, a float vector, or a float complex. 13594 if (IsSameFloatAfterCast(result.Val, 13595 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 13596 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 13597 return; 13598 } 13599 13600 if (S.SourceMgr.isInSystemMacro(CC)) 13601 return; 13602 13603 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 13604 } 13605 // ... or possibly if we're increasing rank, too 13606 else if (Order < 0) { 13607 if (S.SourceMgr.isInSystemMacro(CC)) 13608 return; 13609 13610 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 13611 } 13612 return; 13613 } 13614 13615 // If the target is integral, always warn. 13616 if (TargetBT && TargetBT->isInteger()) { 13617 if (S.SourceMgr.isInSystemMacro(CC)) 13618 return; 13619 13620 DiagnoseFloatingImpCast(S, E, T, CC); 13621 } 13622 13623 // Detect the case where a call result is converted from floating-point to 13624 // to bool, and the final argument to the call is converted from bool, to 13625 // discover this typo: 13626 // 13627 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 13628 // 13629 // FIXME: This is an incredibly special case; is there some more general 13630 // way to detect this class of misplaced-parentheses bug? 13631 if (Target->isBooleanType() && isa<CallExpr>(E)) { 13632 // Check last argument of function call to see if it is an 13633 // implicit cast from a type matching the type the result 13634 // is being cast to. 13635 CallExpr *CEx = cast<CallExpr>(E); 13636 if (unsigned NumArgs = CEx->getNumArgs()) { 13637 Expr *LastA = CEx->getArg(NumArgs - 1); 13638 Expr *InnerE = LastA->IgnoreParenImpCasts(); 13639 if (isa<ImplicitCastExpr>(LastA) && 13640 InnerE->getType()->isBooleanType()) { 13641 // Warn on this floating-point to bool conversion 13642 DiagnoseImpCast(S, E, T, CC, 13643 diag::warn_impcast_floating_point_to_bool); 13644 } 13645 } 13646 } 13647 return; 13648 } 13649 13650 // Valid casts involving fixed point types should be accounted for here. 13651 if (Source->isFixedPointType()) { 13652 if (Target->isUnsaturatedFixedPointType()) { 13653 Expr::EvalResult Result; 13654 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 13655 S.isConstantEvaluated())) { 13656 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 13657 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 13658 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 13659 if (Value > MaxVal || Value < MinVal) { 13660 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13661 S.PDiag(diag::warn_impcast_fixed_point_range) 13662 << Value.toString() << T 13663 << E->getSourceRange() 13664 << clang::SourceRange(CC)); 13665 return; 13666 } 13667 } 13668 } else if (Target->isIntegerType()) { 13669 Expr::EvalResult Result; 13670 if (!S.isConstantEvaluated() && 13671 E->EvaluateAsFixedPoint(Result, S.Context, 13672 Expr::SE_AllowSideEffects)) { 13673 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 13674 13675 bool Overflowed; 13676 llvm::APSInt IntResult = FXResult.convertToInt( 13677 S.Context.getIntWidth(T), 13678 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 13679 13680 if (Overflowed) { 13681 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13682 S.PDiag(diag::warn_impcast_fixed_point_range) 13683 << FXResult.toString() << T 13684 << E->getSourceRange() 13685 << clang::SourceRange(CC)); 13686 return; 13687 } 13688 } 13689 } 13690 } else if (Target->isUnsaturatedFixedPointType()) { 13691 if (Source->isIntegerType()) { 13692 Expr::EvalResult Result; 13693 if (!S.isConstantEvaluated() && 13694 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 13695 llvm::APSInt Value = Result.Val.getInt(); 13696 13697 bool Overflowed; 13698 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 13699 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 13700 13701 if (Overflowed) { 13702 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13703 S.PDiag(diag::warn_impcast_fixed_point_range) 13704 << toString(Value, /*Radix=*/10) << T 13705 << E->getSourceRange() 13706 << clang::SourceRange(CC)); 13707 return; 13708 } 13709 } 13710 } 13711 } 13712 13713 // If we are casting an integer type to a floating point type without 13714 // initialization-list syntax, we might lose accuracy if the floating 13715 // point type has a narrower significand than the integer type. 13716 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 13717 TargetBT->isFloatingType() && !IsListInit) { 13718 // Determine the number of precision bits in the source integer type. 13719 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 13720 /*Approximate*/ true); 13721 unsigned int SourcePrecision = SourceRange.Width; 13722 13723 // Determine the number of precision bits in the 13724 // target floating point type. 13725 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 13726 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13727 13728 if (SourcePrecision > 0 && TargetPrecision > 0 && 13729 SourcePrecision > TargetPrecision) { 13730 13731 if (Optional<llvm::APSInt> SourceInt = 13732 E->getIntegerConstantExpr(S.Context)) { 13733 // If the source integer is a constant, convert it to the target 13734 // floating point type. Issue a warning if the value changes 13735 // during the whole conversion. 13736 llvm::APFloat TargetFloatValue( 13737 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13738 llvm::APFloat::opStatus ConversionStatus = 13739 TargetFloatValue.convertFromAPInt( 13740 *SourceInt, SourceBT->isSignedInteger(), 13741 llvm::APFloat::rmNearestTiesToEven); 13742 13743 if (ConversionStatus != llvm::APFloat::opOK) { 13744 SmallString<32> PrettySourceValue; 13745 SourceInt->toString(PrettySourceValue, 10); 13746 SmallString<32> PrettyTargetValue; 13747 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 13748 13749 S.DiagRuntimeBehavior( 13750 E->getExprLoc(), E, 13751 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 13752 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13753 << E->getSourceRange() << clang::SourceRange(CC)); 13754 } 13755 } else { 13756 // Otherwise, the implicit conversion may lose precision. 13757 DiagnoseImpCast(S, E, T, CC, 13758 diag::warn_impcast_integer_float_precision); 13759 } 13760 } 13761 } 13762 13763 DiagnoseNullConversion(S, E, T, CC); 13764 13765 S.DiscardMisalignedMemberAddress(Target, E); 13766 13767 if (Target->isBooleanType()) 13768 DiagnoseIntInBoolContext(S, E); 13769 13770 if (!Source->isIntegerType() || !Target->isIntegerType()) 13771 return; 13772 13773 // TODO: remove this early return once the false positives for constant->bool 13774 // in templates, macros, etc, are reduced or removed. 13775 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 13776 return; 13777 13778 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 13779 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 13780 return adornObjCBoolConversionDiagWithTernaryFixit( 13781 S, E, 13782 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 13783 << E->getType()); 13784 } 13785 13786 IntRange SourceTypeRange = 13787 IntRange::forTargetOfCanonicalType(S.Context, Source); 13788 IntRange LikelySourceRange = 13789 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 13790 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 13791 13792 if (LikelySourceRange.Width > TargetRange.Width) { 13793 // If the source is a constant, use a default-on diagnostic. 13794 // TODO: this should happen for bitfield stores, too. 13795 Expr::EvalResult Result; 13796 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 13797 S.isConstantEvaluated())) { 13798 llvm::APSInt Value(32); 13799 Value = Result.Val.getInt(); 13800 13801 if (S.SourceMgr.isInSystemMacro(CC)) 13802 return; 13803 13804 std::string PrettySourceValue = toString(Value, 10); 13805 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13806 13807 S.DiagRuntimeBehavior( 13808 E->getExprLoc(), E, 13809 S.PDiag(diag::warn_impcast_integer_precision_constant) 13810 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13811 << E->getSourceRange() << SourceRange(CC)); 13812 return; 13813 } 13814 13815 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 13816 if (S.SourceMgr.isInSystemMacro(CC)) 13817 return; 13818 13819 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 13820 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 13821 /* pruneControlFlow */ true); 13822 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 13823 } 13824 13825 if (TargetRange.Width > SourceTypeRange.Width) { 13826 if (auto *UO = dyn_cast<UnaryOperator>(E)) 13827 if (UO->getOpcode() == UO_Minus) 13828 if (Source->isUnsignedIntegerType()) { 13829 if (Target->isUnsignedIntegerType()) 13830 return DiagnoseImpCast(S, E, T, CC, 13831 diag::warn_impcast_high_order_zero_bits); 13832 if (Target->isSignedIntegerType()) 13833 return DiagnoseImpCast(S, E, T, CC, 13834 diag::warn_impcast_nonnegative_result); 13835 } 13836 } 13837 13838 if (TargetRange.Width == LikelySourceRange.Width && 13839 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 13840 Source->isSignedIntegerType()) { 13841 // Warn when doing a signed to signed conversion, warn if the positive 13842 // source value is exactly the width of the target type, which will 13843 // cause a negative value to be stored. 13844 13845 Expr::EvalResult Result; 13846 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 13847 !S.SourceMgr.isInSystemMacro(CC)) { 13848 llvm::APSInt Value = Result.Val.getInt(); 13849 if (isSameWidthConstantConversion(S, E, T, CC)) { 13850 std::string PrettySourceValue = toString(Value, 10); 13851 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13852 13853 S.DiagRuntimeBehavior( 13854 E->getExprLoc(), E, 13855 S.PDiag(diag::warn_impcast_integer_precision_constant) 13856 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13857 << E->getSourceRange() << SourceRange(CC)); 13858 return; 13859 } 13860 } 13861 13862 // Fall through for non-constants to give a sign conversion warning. 13863 } 13864 13865 if ((!isa<EnumType>(Target) || !isa<EnumType>(Source)) && 13866 ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 13867 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 13868 LikelySourceRange.Width == TargetRange.Width))) { 13869 if (S.SourceMgr.isInSystemMacro(CC)) 13870 return; 13871 13872 unsigned DiagID = diag::warn_impcast_integer_sign; 13873 13874 // Traditionally, gcc has warned about this under -Wsign-compare. 13875 // We also want to warn about it in -Wconversion. 13876 // So if -Wconversion is off, use a completely identical diagnostic 13877 // in the sign-compare group. 13878 // The conditional-checking code will 13879 if (ICContext) { 13880 DiagID = diag::warn_impcast_integer_sign_conditional; 13881 *ICContext = true; 13882 } 13883 13884 return DiagnoseImpCast(S, E, T, CC, DiagID); 13885 } 13886 13887 // Diagnose conversions between different enumeration types. 13888 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 13889 // type, to give us better diagnostics. 13890 QualType SourceType = E->getType(); 13891 if (!S.getLangOpts().CPlusPlus) { 13892 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 13893 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 13894 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 13895 SourceType = S.Context.getTypeDeclType(Enum); 13896 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 13897 } 13898 } 13899 13900 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 13901 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 13902 if (SourceEnum->getDecl()->hasNameForLinkage() && 13903 TargetEnum->getDecl()->hasNameForLinkage() && 13904 SourceEnum != TargetEnum) { 13905 if (S.SourceMgr.isInSystemMacro(CC)) 13906 return; 13907 13908 return DiagnoseImpCast(S, E, SourceType, T, CC, 13909 diag::warn_impcast_different_enum_types); 13910 } 13911 } 13912 13913 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13914 SourceLocation CC, QualType T); 13915 13916 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 13917 SourceLocation CC, bool &ICContext) { 13918 E = E->IgnoreParenImpCasts(); 13919 13920 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 13921 return CheckConditionalOperator(S, CO, CC, T); 13922 13923 AnalyzeImplicitConversions(S, E, CC); 13924 if (E->getType() != T) 13925 return CheckImplicitConversion(S, E, T, CC, &ICContext); 13926 } 13927 13928 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13929 SourceLocation CC, QualType T) { 13930 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 13931 13932 Expr *TrueExpr = E->getTrueExpr(); 13933 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 13934 TrueExpr = BCO->getCommon(); 13935 13936 bool Suspicious = false; 13937 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 13938 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 13939 13940 if (T->isBooleanType()) 13941 DiagnoseIntInBoolContext(S, E); 13942 13943 // If -Wconversion would have warned about either of the candidates 13944 // for a signedness conversion to the context type... 13945 if (!Suspicious) return; 13946 13947 // ...but it's currently ignored... 13948 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 13949 return; 13950 13951 // ...then check whether it would have warned about either of the 13952 // candidates for a signedness conversion to the condition type. 13953 if (E->getType() == T) return; 13954 13955 Suspicious = false; 13956 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 13957 E->getType(), CC, &Suspicious); 13958 if (!Suspicious) 13959 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 13960 E->getType(), CC, &Suspicious); 13961 } 13962 13963 /// Check conversion of given expression to boolean. 13964 /// Input argument E is a logical expression. 13965 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 13966 if (S.getLangOpts().Bool) 13967 return; 13968 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 13969 return; 13970 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 13971 } 13972 13973 namespace { 13974 struct AnalyzeImplicitConversionsWorkItem { 13975 Expr *E; 13976 SourceLocation CC; 13977 bool IsListInit; 13978 }; 13979 } 13980 13981 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 13982 /// that should be visited are added to WorkList. 13983 static void AnalyzeImplicitConversions( 13984 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 13985 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 13986 Expr *OrigE = Item.E; 13987 SourceLocation CC = Item.CC; 13988 13989 QualType T = OrigE->getType(); 13990 Expr *E = OrigE->IgnoreParenImpCasts(); 13991 13992 // Propagate whether we are in a C++ list initialization expression. 13993 // If so, we do not issue warnings for implicit int-float conversion 13994 // precision loss, because C++11 narrowing already handles it. 13995 bool IsListInit = Item.IsListInit || 13996 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 13997 13998 if (E->isTypeDependent() || E->isValueDependent()) 13999 return; 14000 14001 Expr *SourceExpr = E; 14002 // Examine, but don't traverse into the source expression of an 14003 // OpaqueValueExpr, since it may have multiple parents and we don't want to 14004 // emit duplicate diagnostics. Its fine to examine the form or attempt to 14005 // evaluate it in the context of checking the specific conversion to T though. 14006 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 14007 if (auto *Src = OVE->getSourceExpr()) 14008 SourceExpr = Src; 14009 14010 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 14011 if (UO->getOpcode() == UO_Not && 14012 UO->getSubExpr()->isKnownToHaveBooleanValue()) 14013 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 14014 << OrigE->getSourceRange() << T->isBooleanType() 14015 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 14016 14017 if (const auto *BO = dyn_cast<BinaryOperator>(SourceExpr)) 14018 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) && 14019 BO->getLHS()->isKnownToHaveBooleanValue() && 14020 BO->getRHS()->isKnownToHaveBooleanValue() && 14021 BO->getLHS()->HasSideEffects(S.Context) && 14022 BO->getRHS()->HasSideEffects(S.Context)) { 14023 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical) 14024 << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange() 14025 << FixItHint::CreateReplacement( 14026 BO->getOperatorLoc(), 14027 (BO->getOpcode() == BO_And ? "&&" : "||")); 14028 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int); 14029 } 14030 14031 // For conditional operators, we analyze the arguments as if they 14032 // were being fed directly into the output. 14033 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 14034 CheckConditionalOperator(S, CO, CC, T); 14035 return; 14036 } 14037 14038 // Check implicit argument conversions for function calls. 14039 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 14040 CheckImplicitArgumentConversions(S, Call, CC); 14041 14042 // Go ahead and check any implicit conversions we might have skipped. 14043 // The non-canonical typecheck is just an optimization; 14044 // CheckImplicitConversion will filter out dead implicit conversions. 14045 if (SourceExpr->getType() != T) 14046 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 14047 14048 // Now continue drilling into this expression. 14049 14050 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 14051 // The bound subexpressions in a PseudoObjectExpr are not reachable 14052 // as transitive children. 14053 // FIXME: Use a more uniform representation for this. 14054 for (auto *SE : POE->semantics()) 14055 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 14056 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 14057 } 14058 14059 // Skip past explicit casts. 14060 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 14061 E = CE->getSubExpr()->IgnoreParenImpCasts(); 14062 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 14063 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 14064 WorkList.push_back({E, CC, IsListInit}); 14065 return; 14066 } 14067 14068 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 14069 // Do a somewhat different check with comparison operators. 14070 if (BO->isComparisonOp()) 14071 return AnalyzeComparison(S, BO); 14072 14073 // And with simple assignments. 14074 if (BO->getOpcode() == BO_Assign) 14075 return AnalyzeAssignment(S, BO); 14076 // And with compound assignments. 14077 if (BO->isAssignmentOp()) 14078 return AnalyzeCompoundAssignment(S, BO); 14079 } 14080 14081 // These break the otherwise-useful invariant below. Fortunately, 14082 // we don't really need to recurse into them, because any internal 14083 // expressions should have been analyzed already when they were 14084 // built into statements. 14085 if (isa<StmtExpr>(E)) return; 14086 14087 // Don't descend into unevaluated contexts. 14088 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 14089 14090 // Now just recurse over the expression's children. 14091 CC = E->getExprLoc(); 14092 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 14093 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 14094 for (Stmt *SubStmt : E->children()) { 14095 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 14096 if (!ChildExpr) 14097 continue; 14098 14099 if (auto *CSE = dyn_cast<CoroutineSuspendExpr>(E)) 14100 if (ChildExpr == CSE->getOperand()) 14101 // Do not recurse over a CoroutineSuspendExpr's operand. 14102 // The operand is also a subexpression of getCommonExpr(), and 14103 // recursing into it directly would produce duplicate diagnostics. 14104 continue; 14105 14106 if (IsLogicalAndOperator && 14107 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 14108 // Ignore checking string literals that are in logical and operators. 14109 // This is a common pattern for asserts. 14110 continue; 14111 WorkList.push_back({ChildExpr, CC, IsListInit}); 14112 } 14113 14114 if (BO && BO->isLogicalOp()) { 14115 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 14116 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 14117 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 14118 14119 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 14120 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 14121 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 14122 } 14123 14124 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 14125 if (U->getOpcode() == UO_LNot) { 14126 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 14127 } else if (U->getOpcode() != UO_AddrOf) { 14128 if (U->getSubExpr()->getType()->isAtomicType()) 14129 S.Diag(U->getSubExpr()->getBeginLoc(), 14130 diag::warn_atomic_implicit_seq_cst); 14131 } 14132 } 14133 } 14134 14135 /// AnalyzeImplicitConversions - Find and report any interesting 14136 /// implicit conversions in the given expression. There are a couple 14137 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 14138 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 14139 bool IsListInit/*= false*/) { 14140 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 14141 WorkList.push_back({OrigE, CC, IsListInit}); 14142 while (!WorkList.empty()) 14143 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 14144 } 14145 14146 /// Diagnose integer type and any valid implicit conversion to it. 14147 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 14148 // Taking into account implicit conversions, 14149 // allow any integer. 14150 if (!E->getType()->isIntegerType()) { 14151 S.Diag(E->getBeginLoc(), 14152 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 14153 return true; 14154 } 14155 // Potentially emit standard warnings for implicit conversions if enabled 14156 // using -Wconversion. 14157 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 14158 return false; 14159 } 14160 14161 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 14162 // Returns true when emitting a warning about taking the address of a reference. 14163 static bool CheckForReference(Sema &SemaRef, const Expr *E, 14164 const PartialDiagnostic &PD) { 14165 E = E->IgnoreParenImpCasts(); 14166 14167 const FunctionDecl *FD = nullptr; 14168 14169 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 14170 if (!DRE->getDecl()->getType()->isReferenceType()) 14171 return false; 14172 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 14173 if (!M->getMemberDecl()->getType()->isReferenceType()) 14174 return false; 14175 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 14176 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 14177 return false; 14178 FD = Call->getDirectCallee(); 14179 } else { 14180 return false; 14181 } 14182 14183 SemaRef.Diag(E->getExprLoc(), PD); 14184 14185 // If possible, point to location of function. 14186 if (FD) { 14187 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 14188 } 14189 14190 return true; 14191 } 14192 14193 // Returns true if the SourceLocation is expanded from any macro body. 14194 // Returns false if the SourceLocation is invalid, is from not in a macro 14195 // expansion, or is from expanded from a top-level macro argument. 14196 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 14197 if (Loc.isInvalid()) 14198 return false; 14199 14200 while (Loc.isMacroID()) { 14201 if (SM.isMacroBodyExpansion(Loc)) 14202 return true; 14203 Loc = SM.getImmediateMacroCallerLoc(Loc); 14204 } 14205 14206 return false; 14207 } 14208 14209 /// Diagnose pointers that are always non-null. 14210 /// \param E the expression containing the pointer 14211 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 14212 /// compared to a null pointer 14213 /// \param IsEqual True when the comparison is equal to a null pointer 14214 /// \param Range Extra SourceRange to highlight in the diagnostic 14215 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 14216 Expr::NullPointerConstantKind NullKind, 14217 bool IsEqual, SourceRange Range) { 14218 if (!E) 14219 return; 14220 14221 // Don't warn inside macros. 14222 if (E->getExprLoc().isMacroID()) { 14223 const SourceManager &SM = getSourceManager(); 14224 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 14225 IsInAnyMacroBody(SM, Range.getBegin())) 14226 return; 14227 } 14228 E = E->IgnoreImpCasts(); 14229 14230 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 14231 14232 if (isa<CXXThisExpr>(E)) { 14233 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 14234 : diag::warn_this_bool_conversion; 14235 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 14236 return; 14237 } 14238 14239 bool IsAddressOf = false; 14240 14241 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 14242 if (UO->getOpcode() != UO_AddrOf) 14243 return; 14244 IsAddressOf = true; 14245 E = UO->getSubExpr(); 14246 } 14247 14248 if (IsAddressOf) { 14249 unsigned DiagID = IsCompare 14250 ? diag::warn_address_of_reference_null_compare 14251 : diag::warn_address_of_reference_bool_conversion; 14252 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 14253 << IsEqual; 14254 if (CheckForReference(*this, E, PD)) { 14255 return; 14256 } 14257 } 14258 14259 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 14260 bool IsParam = isa<NonNullAttr>(NonnullAttr); 14261 std::string Str; 14262 llvm::raw_string_ostream S(Str); 14263 E->printPretty(S, nullptr, getPrintingPolicy()); 14264 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 14265 : diag::warn_cast_nonnull_to_bool; 14266 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 14267 << E->getSourceRange() << Range << IsEqual; 14268 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 14269 }; 14270 14271 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 14272 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 14273 if (auto *Callee = Call->getDirectCallee()) { 14274 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 14275 ComplainAboutNonnullParamOrCall(A); 14276 return; 14277 } 14278 } 14279 } 14280 14281 // Expect to find a single Decl. Skip anything more complicated. 14282 ValueDecl *D = nullptr; 14283 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 14284 D = R->getDecl(); 14285 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 14286 D = M->getMemberDecl(); 14287 } 14288 14289 // Weak Decls can be null. 14290 if (!D || D->isWeak()) 14291 return; 14292 14293 // Check for parameter decl with nonnull attribute 14294 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 14295 if (getCurFunction() && 14296 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 14297 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 14298 ComplainAboutNonnullParamOrCall(A); 14299 return; 14300 } 14301 14302 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 14303 // Skip function template not specialized yet. 14304 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 14305 return; 14306 auto ParamIter = llvm::find(FD->parameters(), PV); 14307 assert(ParamIter != FD->param_end()); 14308 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 14309 14310 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 14311 if (!NonNull->args_size()) { 14312 ComplainAboutNonnullParamOrCall(NonNull); 14313 return; 14314 } 14315 14316 for (const ParamIdx &ArgNo : NonNull->args()) { 14317 if (ArgNo.getASTIndex() == ParamNo) { 14318 ComplainAboutNonnullParamOrCall(NonNull); 14319 return; 14320 } 14321 } 14322 } 14323 } 14324 } 14325 } 14326 14327 QualType T = D->getType(); 14328 const bool IsArray = T->isArrayType(); 14329 const bool IsFunction = T->isFunctionType(); 14330 14331 // Address of function is used to silence the function warning. 14332 if (IsAddressOf && IsFunction) { 14333 return; 14334 } 14335 14336 // Found nothing. 14337 if (!IsAddressOf && !IsFunction && !IsArray) 14338 return; 14339 14340 // Pretty print the expression for the diagnostic. 14341 std::string Str; 14342 llvm::raw_string_ostream S(Str); 14343 E->printPretty(S, nullptr, getPrintingPolicy()); 14344 14345 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 14346 : diag::warn_impcast_pointer_to_bool; 14347 enum { 14348 AddressOf, 14349 FunctionPointer, 14350 ArrayPointer 14351 } DiagType; 14352 if (IsAddressOf) 14353 DiagType = AddressOf; 14354 else if (IsFunction) 14355 DiagType = FunctionPointer; 14356 else if (IsArray) 14357 DiagType = ArrayPointer; 14358 else 14359 llvm_unreachable("Could not determine diagnostic."); 14360 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 14361 << Range << IsEqual; 14362 14363 if (!IsFunction) 14364 return; 14365 14366 // Suggest '&' to silence the function warning. 14367 Diag(E->getExprLoc(), diag::note_function_warning_silence) 14368 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 14369 14370 // Check to see if '()' fixit should be emitted. 14371 QualType ReturnType; 14372 UnresolvedSet<4> NonTemplateOverloads; 14373 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 14374 if (ReturnType.isNull()) 14375 return; 14376 14377 if (IsCompare) { 14378 // There are two cases here. If there is null constant, the only suggest 14379 // for a pointer return type. If the null is 0, then suggest if the return 14380 // type is a pointer or an integer type. 14381 if (!ReturnType->isPointerType()) { 14382 if (NullKind == Expr::NPCK_ZeroExpression || 14383 NullKind == Expr::NPCK_ZeroLiteral) { 14384 if (!ReturnType->isIntegerType()) 14385 return; 14386 } else { 14387 return; 14388 } 14389 } 14390 } else { // !IsCompare 14391 // For function to bool, only suggest if the function pointer has bool 14392 // return type. 14393 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 14394 return; 14395 } 14396 Diag(E->getExprLoc(), diag::note_function_to_function_call) 14397 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 14398 } 14399 14400 /// Diagnoses "dangerous" implicit conversions within the given 14401 /// expression (which is a full expression). Implements -Wconversion 14402 /// and -Wsign-compare. 14403 /// 14404 /// \param CC the "context" location of the implicit conversion, i.e. 14405 /// the most location of the syntactic entity requiring the implicit 14406 /// conversion 14407 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 14408 // Don't diagnose in unevaluated contexts. 14409 if (isUnevaluatedContext()) 14410 return; 14411 14412 // Don't diagnose for value- or type-dependent expressions. 14413 if (E->isTypeDependent() || E->isValueDependent()) 14414 return; 14415 14416 // Check for array bounds violations in cases where the check isn't triggered 14417 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 14418 // ArraySubscriptExpr is on the RHS of a variable initialization. 14419 CheckArrayAccess(E); 14420 14421 // This is not the right CC for (e.g.) a variable initialization. 14422 AnalyzeImplicitConversions(*this, E, CC); 14423 } 14424 14425 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 14426 /// Input argument E is a logical expression. 14427 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 14428 ::CheckBoolLikeConversion(*this, E, CC); 14429 } 14430 14431 /// Diagnose when expression is an integer constant expression and its evaluation 14432 /// results in integer overflow 14433 void Sema::CheckForIntOverflow (Expr *E) { 14434 // Use a work list to deal with nested struct initializers. 14435 SmallVector<Expr *, 2> Exprs(1, E); 14436 14437 do { 14438 Expr *OriginalE = Exprs.pop_back_val(); 14439 Expr *E = OriginalE->IgnoreParenCasts(); 14440 14441 if (isa<BinaryOperator>(E)) { 14442 E->EvaluateForOverflow(Context); 14443 continue; 14444 } 14445 14446 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 14447 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 14448 else if (isa<ObjCBoxedExpr>(OriginalE)) 14449 E->EvaluateForOverflow(Context); 14450 else if (auto Call = dyn_cast<CallExpr>(E)) 14451 Exprs.append(Call->arg_begin(), Call->arg_end()); 14452 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 14453 Exprs.append(Message->arg_begin(), Message->arg_end()); 14454 } while (!Exprs.empty()); 14455 } 14456 14457 namespace { 14458 14459 /// Visitor for expressions which looks for unsequenced operations on the 14460 /// same object. 14461 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 14462 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 14463 14464 /// A tree of sequenced regions within an expression. Two regions are 14465 /// unsequenced if one is an ancestor or a descendent of the other. When we 14466 /// finish processing an expression with sequencing, such as a comma 14467 /// expression, we fold its tree nodes into its parent, since they are 14468 /// unsequenced with respect to nodes we will visit later. 14469 class SequenceTree { 14470 struct Value { 14471 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 14472 unsigned Parent : 31; 14473 unsigned Merged : 1; 14474 }; 14475 SmallVector<Value, 8> Values; 14476 14477 public: 14478 /// A region within an expression which may be sequenced with respect 14479 /// to some other region. 14480 class Seq { 14481 friend class SequenceTree; 14482 14483 unsigned Index; 14484 14485 explicit Seq(unsigned N) : Index(N) {} 14486 14487 public: 14488 Seq() : Index(0) {} 14489 }; 14490 14491 SequenceTree() { Values.push_back(Value(0)); } 14492 Seq root() const { return Seq(0); } 14493 14494 /// Create a new sequence of operations, which is an unsequenced 14495 /// subset of \p Parent. This sequence of operations is sequenced with 14496 /// respect to other children of \p Parent. 14497 Seq allocate(Seq Parent) { 14498 Values.push_back(Value(Parent.Index)); 14499 return Seq(Values.size() - 1); 14500 } 14501 14502 /// Merge a sequence of operations into its parent. 14503 void merge(Seq S) { 14504 Values[S.Index].Merged = true; 14505 } 14506 14507 /// Determine whether two operations are unsequenced. This operation 14508 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 14509 /// should have been merged into its parent as appropriate. 14510 bool isUnsequenced(Seq Cur, Seq Old) { 14511 unsigned C = representative(Cur.Index); 14512 unsigned Target = representative(Old.Index); 14513 while (C >= Target) { 14514 if (C == Target) 14515 return true; 14516 C = Values[C].Parent; 14517 } 14518 return false; 14519 } 14520 14521 private: 14522 /// Pick a representative for a sequence. 14523 unsigned representative(unsigned K) { 14524 if (Values[K].Merged) 14525 // Perform path compression as we go. 14526 return Values[K].Parent = representative(Values[K].Parent); 14527 return K; 14528 } 14529 }; 14530 14531 /// An object for which we can track unsequenced uses. 14532 using Object = const NamedDecl *; 14533 14534 /// Different flavors of object usage which we track. We only track the 14535 /// least-sequenced usage of each kind. 14536 enum UsageKind { 14537 /// A read of an object. Multiple unsequenced reads are OK. 14538 UK_Use, 14539 14540 /// A modification of an object which is sequenced before the value 14541 /// computation of the expression, such as ++n in C++. 14542 UK_ModAsValue, 14543 14544 /// A modification of an object which is not sequenced before the value 14545 /// computation of the expression, such as n++. 14546 UK_ModAsSideEffect, 14547 14548 UK_Count = UK_ModAsSideEffect + 1 14549 }; 14550 14551 /// Bundle together a sequencing region and the expression corresponding 14552 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 14553 struct Usage { 14554 const Expr *UsageExpr; 14555 SequenceTree::Seq Seq; 14556 14557 Usage() : UsageExpr(nullptr) {} 14558 }; 14559 14560 struct UsageInfo { 14561 Usage Uses[UK_Count]; 14562 14563 /// Have we issued a diagnostic for this object already? 14564 bool Diagnosed; 14565 14566 UsageInfo() : Diagnosed(false) {} 14567 }; 14568 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 14569 14570 Sema &SemaRef; 14571 14572 /// Sequenced regions within the expression. 14573 SequenceTree Tree; 14574 14575 /// Declaration modifications and references which we have seen. 14576 UsageInfoMap UsageMap; 14577 14578 /// The region we are currently within. 14579 SequenceTree::Seq Region; 14580 14581 /// Filled in with declarations which were modified as a side-effect 14582 /// (that is, post-increment operations). 14583 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 14584 14585 /// Expressions to check later. We defer checking these to reduce 14586 /// stack usage. 14587 SmallVectorImpl<const Expr *> &WorkList; 14588 14589 /// RAII object wrapping the visitation of a sequenced subexpression of an 14590 /// expression. At the end of this process, the side-effects of the evaluation 14591 /// become sequenced with respect to the value computation of the result, so 14592 /// we downgrade any UK_ModAsSideEffect within the evaluation to 14593 /// UK_ModAsValue. 14594 struct SequencedSubexpression { 14595 SequencedSubexpression(SequenceChecker &Self) 14596 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 14597 Self.ModAsSideEffect = &ModAsSideEffect; 14598 } 14599 14600 ~SequencedSubexpression() { 14601 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 14602 // Add a new usage with usage kind UK_ModAsValue, and then restore 14603 // the previous usage with UK_ModAsSideEffect (thus clearing it if 14604 // the previous one was empty). 14605 UsageInfo &UI = Self.UsageMap[M.first]; 14606 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 14607 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 14608 SideEffectUsage = M.second; 14609 } 14610 Self.ModAsSideEffect = OldModAsSideEffect; 14611 } 14612 14613 SequenceChecker &Self; 14614 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 14615 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 14616 }; 14617 14618 /// RAII object wrapping the visitation of a subexpression which we might 14619 /// choose to evaluate as a constant. If any subexpression is evaluated and 14620 /// found to be non-constant, this allows us to suppress the evaluation of 14621 /// the outer expression. 14622 class EvaluationTracker { 14623 public: 14624 EvaluationTracker(SequenceChecker &Self) 14625 : Self(Self), Prev(Self.EvalTracker) { 14626 Self.EvalTracker = this; 14627 } 14628 14629 ~EvaluationTracker() { 14630 Self.EvalTracker = Prev; 14631 if (Prev) 14632 Prev->EvalOK &= EvalOK; 14633 } 14634 14635 bool evaluate(const Expr *E, bool &Result) { 14636 if (!EvalOK || E->isValueDependent()) 14637 return false; 14638 EvalOK = E->EvaluateAsBooleanCondition( 14639 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 14640 return EvalOK; 14641 } 14642 14643 private: 14644 SequenceChecker &Self; 14645 EvaluationTracker *Prev; 14646 bool EvalOK = true; 14647 } *EvalTracker = nullptr; 14648 14649 /// Find the object which is produced by the specified expression, 14650 /// if any. 14651 Object getObject(const Expr *E, bool Mod) const { 14652 E = E->IgnoreParenCasts(); 14653 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 14654 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 14655 return getObject(UO->getSubExpr(), Mod); 14656 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 14657 if (BO->getOpcode() == BO_Comma) 14658 return getObject(BO->getRHS(), Mod); 14659 if (Mod && BO->isAssignmentOp()) 14660 return getObject(BO->getLHS(), Mod); 14661 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 14662 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 14663 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 14664 return ME->getMemberDecl(); 14665 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 14666 // FIXME: If this is a reference, map through to its value. 14667 return DRE->getDecl(); 14668 return nullptr; 14669 } 14670 14671 /// Note that an object \p O was modified or used by an expression 14672 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 14673 /// the object \p O as obtained via the \p UsageMap. 14674 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 14675 // Get the old usage for the given object and usage kind. 14676 Usage &U = UI.Uses[UK]; 14677 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 14678 // If we have a modification as side effect and are in a sequenced 14679 // subexpression, save the old Usage so that we can restore it later 14680 // in SequencedSubexpression::~SequencedSubexpression. 14681 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 14682 ModAsSideEffect->push_back(std::make_pair(O, U)); 14683 // Then record the new usage with the current sequencing region. 14684 U.UsageExpr = UsageExpr; 14685 U.Seq = Region; 14686 } 14687 } 14688 14689 /// Check whether a modification or use of an object \p O in an expression 14690 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 14691 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 14692 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 14693 /// usage and false we are checking for a mod-use unsequenced usage. 14694 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 14695 UsageKind OtherKind, bool IsModMod) { 14696 if (UI.Diagnosed) 14697 return; 14698 14699 const Usage &U = UI.Uses[OtherKind]; 14700 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 14701 return; 14702 14703 const Expr *Mod = U.UsageExpr; 14704 const Expr *ModOrUse = UsageExpr; 14705 if (OtherKind == UK_Use) 14706 std::swap(Mod, ModOrUse); 14707 14708 SemaRef.DiagRuntimeBehavior( 14709 Mod->getExprLoc(), {Mod, ModOrUse}, 14710 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 14711 : diag::warn_unsequenced_mod_use) 14712 << O << SourceRange(ModOrUse->getExprLoc())); 14713 UI.Diagnosed = true; 14714 } 14715 14716 // A note on note{Pre, Post}{Use, Mod}: 14717 // 14718 // (It helps to follow the algorithm with an expression such as 14719 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 14720 // operations before C++17 and both are well-defined in C++17). 14721 // 14722 // When visiting a node which uses/modify an object we first call notePreUse 14723 // or notePreMod before visiting its sub-expression(s). At this point the 14724 // children of the current node have not yet been visited and so the eventual 14725 // uses/modifications resulting from the children of the current node have not 14726 // been recorded yet. 14727 // 14728 // We then visit the children of the current node. After that notePostUse or 14729 // notePostMod is called. These will 1) detect an unsequenced modification 14730 // as side effect (as in "k++ + k") and 2) add a new usage with the 14731 // appropriate usage kind. 14732 // 14733 // We also have to be careful that some operation sequences modification as 14734 // side effect as well (for example: || or ,). To account for this we wrap 14735 // the visitation of such a sub-expression (for example: the LHS of || or ,) 14736 // with SequencedSubexpression. SequencedSubexpression is an RAII object 14737 // which record usages which are modifications as side effect, and then 14738 // downgrade them (or more accurately restore the previous usage which was a 14739 // modification as side effect) when exiting the scope of the sequenced 14740 // subexpression. 14741 14742 void notePreUse(Object O, const Expr *UseExpr) { 14743 UsageInfo &UI = UsageMap[O]; 14744 // Uses conflict with other modifications. 14745 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 14746 } 14747 14748 void notePostUse(Object O, const Expr *UseExpr) { 14749 UsageInfo &UI = UsageMap[O]; 14750 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 14751 /*IsModMod=*/false); 14752 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 14753 } 14754 14755 void notePreMod(Object O, const Expr *ModExpr) { 14756 UsageInfo &UI = UsageMap[O]; 14757 // Modifications conflict with other modifications and with uses. 14758 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 14759 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 14760 } 14761 14762 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 14763 UsageInfo &UI = UsageMap[O]; 14764 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 14765 /*IsModMod=*/true); 14766 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 14767 } 14768 14769 public: 14770 SequenceChecker(Sema &S, const Expr *E, 14771 SmallVectorImpl<const Expr *> &WorkList) 14772 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 14773 Visit(E); 14774 // Silence a -Wunused-private-field since WorkList is now unused. 14775 // TODO: Evaluate if it can be used, and if not remove it. 14776 (void)this->WorkList; 14777 } 14778 14779 void VisitStmt(const Stmt *S) { 14780 // Skip all statements which aren't expressions for now. 14781 } 14782 14783 void VisitExpr(const Expr *E) { 14784 // By default, just recurse to evaluated subexpressions. 14785 Base::VisitStmt(E); 14786 } 14787 14788 void VisitCastExpr(const CastExpr *E) { 14789 Object O = Object(); 14790 if (E->getCastKind() == CK_LValueToRValue) 14791 O = getObject(E->getSubExpr(), false); 14792 14793 if (O) 14794 notePreUse(O, E); 14795 VisitExpr(E); 14796 if (O) 14797 notePostUse(O, E); 14798 } 14799 14800 void VisitSequencedExpressions(const Expr *SequencedBefore, 14801 const Expr *SequencedAfter) { 14802 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 14803 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 14804 SequenceTree::Seq OldRegion = Region; 14805 14806 { 14807 SequencedSubexpression SeqBefore(*this); 14808 Region = BeforeRegion; 14809 Visit(SequencedBefore); 14810 } 14811 14812 Region = AfterRegion; 14813 Visit(SequencedAfter); 14814 14815 Region = OldRegion; 14816 14817 Tree.merge(BeforeRegion); 14818 Tree.merge(AfterRegion); 14819 } 14820 14821 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 14822 // C++17 [expr.sub]p1: 14823 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 14824 // expression E1 is sequenced before the expression E2. 14825 if (SemaRef.getLangOpts().CPlusPlus17) 14826 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 14827 else { 14828 Visit(ASE->getLHS()); 14829 Visit(ASE->getRHS()); 14830 } 14831 } 14832 14833 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14834 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14835 void VisitBinPtrMem(const BinaryOperator *BO) { 14836 // C++17 [expr.mptr.oper]p4: 14837 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 14838 // the expression E1 is sequenced before the expression E2. 14839 if (SemaRef.getLangOpts().CPlusPlus17) 14840 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14841 else { 14842 Visit(BO->getLHS()); 14843 Visit(BO->getRHS()); 14844 } 14845 } 14846 14847 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14848 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14849 void VisitBinShlShr(const BinaryOperator *BO) { 14850 // C++17 [expr.shift]p4: 14851 // The expression E1 is sequenced before the expression E2. 14852 if (SemaRef.getLangOpts().CPlusPlus17) 14853 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14854 else { 14855 Visit(BO->getLHS()); 14856 Visit(BO->getRHS()); 14857 } 14858 } 14859 14860 void VisitBinComma(const BinaryOperator *BO) { 14861 // C++11 [expr.comma]p1: 14862 // Every value computation and side effect associated with the left 14863 // expression is sequenced before every value computation and side 14864 // effect associated with the right expression. 14865 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14866 } 14867 14868 void VisitBinAssign(const BinaryOperator *BO) { 14869 SequenceTree::Seq RHSRegion; 14870 SequenceTree::Seq LHSRegion; 14871 if (SemaRef.getLangOpts().CPlusPlus17) { 14872 RHSRegion = Tree.allocate(Region); 14873 LHSRegion = Tree.allocate(Region); 14874 } else { 14875 RHSRegion = Region; 14876 LHSRegion = Region; 14877 } 14878 SequenceTree::Seq OldRegion = Region; 14879 14880 // C++11 [expr.ass]p1: 14881 // [...] the assignment is sequenced after the value computation 14882 // of the right and left operands, [...] 14883 // 14884 // so check it before inspecting the operands and update the 14885 // map afterwards. 14886 Object O = getObject(BO->getLHS(), /*Mod=*/true); 14887 if (O) 14888 notePreMod(O, BO); 14889 14890 if (SemaRef.getLangOpts().CPlusPlus17) { 14891 // C++17 [expr.ass]p1: 14892 // [...] The right operand is sequenced before the left operand. [...] 14893 { 14894 SequencedSubexpression SeqBefore(*this); 14895 Region = RHSRegion; 14896 Visit(BO->getRHS()); 14897 } 14898 14899 Region = LHSRegion; 14900 Visit(BO->getLHS()); 14901 14902 if (O && isa<CompoundAssignOperator>(BO)) 14903 notePostUse(O, BO); 14904 14905 } else { 14906 // C++11 does not specify any sequencing between the LHS and RHS. 14907 Region = LHSRegion; 14908 Visit(BO->getLHS()); 14909 14910 if (O && isa<CompoundAssignOperator>(BO)) 14911 notePostUse(O, BO); 14912 14913 Region = RHSRegion; 14914 Visit(BO->getRHS()); 14915 } 14916 14917 // C++11 [expr.ass]p1: 14918 // the assignment is sequenced [...] before the value computation of the 14919 // assignment expression. 14920 // C11 6.5.16/3 has no such rule. 14921 Region = OldRegion; 14922 if (O) 14923 notePostMod(O, BO, 14924 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14925 : UK_ModAsSideEffect); 14926 if (SemaRef.getLangOpts().CPlusPlus17) { 14927 Tree.merge(RHSRegion); 14928 Tree.merge(LHSRegion); 14929 } 14930 } 14931 14932 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 14933 VisitBinAssign(CAO); 14934 } 14935 14936 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14937 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14938 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 14939 Object O = getObject(UO->getSubExpr(), true); 14940 if (!O) 14941 return VisitExpr(UO); 14942 14943 notePreMod(O, UO); 14944 Visit(UO->getSubExpr()); 14945 // C++11 [expr.pre.incr]p1: 14946 // the expression ++x is equivalent to x+=1 14947 notePostMod(O, UO, 14948 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14949 : UK_ModAsSideEffect); 14950 } 14951 14952 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14953 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14954 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 14955 Object O = getObject(UO->getSubExpr(), true); 14956 if (!O) 14957 return VisitExpr(UO); 14958 14959 notePreMod(O, UO); 14960 Visit(UO->getSubExpr()); 14961 notePostMod(O, UO, UK_ModAsSideEffect); 14962 } 14963 14964 void VisitBinLOr(const BinaryOperator *BO) { 14965 // C++11 [expr.log.or]p2: 14966 // If the second expression is evaluated, every value computation and 14967 // side effect associated with the first expression is sequenced before 14968 // every value computation and side effect associated with the 14969 // second expression. 14970 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14971 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14972 SequenceTree::Seq OldRegion = Region; 14973 14974 EvaluationTracker Eval(*this); 14975 { 14976 SequencedSubexpression Sequenced(*this); 14977 Region = LHSRegion; 14978 Visit(BO->getLHS()); 14979 } 14980 14981 // C++11 [expr.log.or]p1: 14982 // [...] the second operand is not evaluated if the first operand 14983 // evaluates to true. 14984 bool EvalResult = false; 14985 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 14986 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 14987 if (ShouldVisitRHS) { 14988 Region = RHSRegion; 14989 Visit(BO->getRHS()); 14990 } 14991 14992 Region = OldRegion; 14993 Tree.merge(LHSRegion); 14994 Tree.merge(RHSRegion); 14995 } 14996 14997 void VisitBinLAnd(const BinaryOperator *BO) { 14998 // C++11 [expr.log.and]p2: 14999 // If the second expression is evaluated, every value computation and 15000 // side effect associated with the first expression is sequenced before 15001 // every value computation and side effect associated with the 15002 // second expression. 15003 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 15004 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 15005 SequenceTree::Seq OldRegion = Region; 15006 15007 EvaluationTracker Eval(*this); 15008 { 15009 SequencedSubexpression Sequenced(*this); 15010 Region = LHSRegion; 15011 Visit(BO->getLHS()); 15012 } 15013 15014 // C++11 [expr.log.and]p1: 15015 // [...] the second operand is not evaluated if the first operand is false. 15016 bool EvalResult = false; 15017 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 15018 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 15019 if (ShouldVisitRHS) { 15020 Region = RHSRegion; 15021 Visit(BO->getRHS()); 15022 } 15023 15024 Region = OldRegion; 15025 Tree.merge(LHSRegion); 15026 Tree.merge(RHSRegion); 15027 } 15028 15029 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 15030 // C++11 [expr.cond]p1: 15031 // [...] Every value computation and side effect associated with the first 15032 // expression is sequenced before every value computation and side effect 15033 // associated with the second or third expression. 15034 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 15035 15036 // No sequencing is specified between the true and false expression. 15037 // However since exactly one of both is going to be evaluated we can 15038 // consider them to be sequenced. This is needed to avoid warning on 15039 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 15040 // both the true and false expressions because we can't evaluate x. 15041 // This will still allow us to detect an expression like (pre C++17) 15042 // "(x ? y += 1 : y += 2) = y". 15043 // 15044 // We don't wrap the visitation of the true and false expression with 15045 // SequencedSubexpression because we don't want to downgrade modifications 15046 // as side effect in the true and false expressions after the visition 15047 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 15048 // not warn between the two "y++", but we should warn between the "y++" 15049 // and the "y". 15050 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 15051 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 15052 SequenceTree::Seq OldRegion = Region; 15053 15054 EvaluationTracker Eval(*this); 15055 { 15056 SequencedSubexpression Sequenced(*this); 15057 Region = ConditionRegion; 15058 Visit(CO->getCond()); 15059 } 15060 15061 // C++11 [expr.cond]p1: 15062 // [...] The first expression is contextually converted to bool (Clause 4). 15063 // It is evaluated and if it is true, the result of the conditional 15064 // expression is the value of the second expression, otherwise that of the 15065 // third expression. Only one of the second and third expressions is 15066 // evaluated. [...] 15067 bool EvalResult = false; 15068 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 15069 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 15070 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 15071 if (ShouldVisitTrueExpr) { 15072 Region = TrueRegion; 15073 Visit(CO->getTrueExpr()); 15074 } 15075 if (ShouldVisitFalseExpr) { 15076 Region = FalseRegion; 15077 Visit(CO->getFalseExpr()); 15078 } 15079 15080 Region = OldRegion; 15081 Tree.merge(ConditionRegion); 15082 Tree.merge(TrueRegion); 15083 Tree.merge(FalseRegion); 15084 } 15085 15086 void VisitCallExpr(const CallExpr *CE) { 15087 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 15088 15089 if (CE->isUnevaluatedBuiltinCall(Context)) 15090 return; 15091 15092 // C++11 [intro.execution]p15: 15093 // When calling a function [...], every value computation and side effect 15094 // associated with any argument expression, or with the postfix expression 15095 // designating the called function, is sequenced before execution of every 15096 // expression or statement in the body of the function [and thus before 15097 // the value computation of its result]. 15098 SequencedSubexpression Sequenced(*this); 15099 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 15100 // C++17 [expr.call]p5 15101 // The postfix-expression is sequenced before each expression in the 15102 // expression-list and any default argument. [...] 15103 SequenceTree::Seq CalleeRegion; 15104 SequenceTree::Seq OtherRegion; 15105 if (SemaRef.getLangOpts().CPlusPlus17) { 15106 CalleeRegion = Tree.allocate(Region); 15107 OtherRegion = Tree.allocate(Region); 15108 } else { 15109 CalleeRegion = Region; 15110 OtherRegion = Region; 15111 } 15112 SequenceTree::Seq OldRegion = Region; 15113 15114 // Visit the callee expression first. 15115 Region = CalleeRegion; 15116 if (SemaRef.getLangOpts().CPlusPlus17) { 15117 SequencedSubexpression Sequenced(*this); 15118 Visit(CE->getCallee()); 15119 } else { 15120 Visit(CE->getCallee()); 15121 } 15122 15123 // Then visit the argument expressions. 15124 Region = OtherRegion; 15125 for (const Expr *Argument : CE->arguments()) 15126 Visit(Argument); 15127 15128 Region = OldRegion; 15129 if (SemaRef.getLangOpts().CPlusPlus17) { 15130 Tree.merge(CalleeRegion); 15131 Tree.merge(OtherRegion); 15132 } 15133 }); 15134 } 15135 15136 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 15137 // C++17 [over.match.oper]p2: 15138 // [...] the operator notation is first transformed to the equivalent 15139 // function-call notation as summarized in Table 12 (where @ denotes one 15140 // of the operators covered in the specified subclause). However, the 15141 // operands are sequenced in the order prescribed for the built-in 15142 // operator (Clause 8). 15143 // 15144 // From the above only overloaded binary operators and overloaded call 15145 // operators have sequencing rules in C++17 that we need to handle 15146 // separately. 15147 if (!SemaRef.getLangOpts().CPlusPlus17 || 15148 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 15149 return VisitCallExpr(CXXOCE); 15150 15151 enum { 15152 NoSequencing, 15153 LHSBeforeRHS, 15154 RHSBeforeLHS, 15155 LHSBeforeRest 15156 } SequencingKind; 15157 switch (CXXOCE->getOperator()) { 15158 case OO_Equal: 15159 case OO_PlusEqual: 15160 case OO_MinusEqual: 15161 case OO_StarEqual: 15162 case OO_SlashEqual: 15163 case OO_PercentEqual: 15164 case OO_CaretEqual: 15165 case OO_AmpEqual: 15166 case OO_PipeEqual: 15167 case OO_LessLessEqual: 15168 case OO_GreaterGreaterEqual: 15169 SequencingKind = RHSBeforeLHS; 15170 break; 15171 15172 case OO_LessLess: 15173 case OO_GreaterGreater: 15174 case OO_AmpAmp: 15175 case OO_PipePipe: 15176 case OO_Comma: 15177 case OO_ArrowStar: 15178 case OO_Subscript: 15179 SequencingKind = LHSBeforeRHS; 15180 break; 15181 15182 case OO_Call: 15183 SequencingKind = LHSBeforeRest; 15184 break; 15185 15186 default: 15187 SequencingKind = NoSequencing; 15188 break; 15189 } 15190 15191 if (SequencingKind == NoSequencing) 15192 return VisitCallExpr(CXXOCE); 15193 15194 // This is a call, so all subexpressions are sequenced before the result. 15195 SequencedSubexpression Sequenced(*this); 15196 15197 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 15198 assert(SemaRef.getLangOpts().CPlusPlus17 && 15199 "Should only get there with C++17 and above!"); 15200 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 15201 "Should only get there with an overloaded binary operator" 15202 " or an overloaded call operator!"); 15203 15204 if (SequencingKind == LHSBeforeRest) { 15205 assert(CXXOCE->getOperator() == OO_Call && 15206 "We should only have an overloaded call operator here!"); 15207 15208 // This is very similar to VisitCallExpr, except that we only have the 15209 // C++17 case. The postfix-expression is the first argument of the 15210 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 15211 // are in the following arguments. 15212 // 15213 // Note that we intentionally do not visit the callee expression since 15214 // it is just a decayed reference to a function. 15215 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 15216 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 15217 SequenceTree::Seq OldRegion = Region; 15218 15219 assert(CXXOCE->getNumArgs() >= 1 && 15220 "An overloaded call operator must have at least one argument" 15221 " for the postfix-expression!"); 15222 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 15223 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 15224 CXXOCE->getNumArgs() - 1); 15225 15226 // Visit the postfix-expression first. 15227 { 15228 Region = PostfixExprRegion; 15229 SequencedSubexpression Sequenced(*this); 15230 Visit(PostfixExpr); 15231 } 15232 15233 // Then visit the argument expressions. 15234 Region = ArgsRegion; 15235 for (const Expr *Arg : Args) 15236 Visit(Arg); 15237 15238 Region = OldRegion; 15239 Tree.merge(PostfixExprRegion); 15240 Tree.merge(ArgsRegion); 15241 } else { 15242 assert(CXXOCE->getNumArgs() == 2 && 15243 "Should only have two arguments here!"); 15244 assert((SequencingKind == LHSBeforeRHS || 15245 SequencingKind == RHSBeforeLHS) && 15246 "Unexpected sequencing kind!"); 15247 15248 // We do not visit the callee expression since it is just a decayed 15249 // reference to a function. 15250 const Expr *E1 = CXXOCE->getArg(0); 15251 const Expr *E2 = CXXOCE->getArg(1); 15252 if (SequencingKind == RHSBeforeLHS) 15253 std::swap(E1, E2); 15254 15255 return VisitSequencedExpressions(E1, E2); 15256 } 15257 }); 15258 } 15259 15260 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 15261 // This is a call, so all subexpressions are sequenced before the result. 15262 SequencedSubexpression Sequenced(*this); 15263 15264 if (!CCE->isListInitialization()) 15265 return VisitExpr(CCE); 15266 15267 // In C++11, list initializations are sequenced. 15268 SmallVector<SequenceTree::Seq, 32> Elts; 15269 SequenceTree::Seq Parent = Region; 15270 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 15271 E = CCE->arg_end(); 15272 I != E; ++I) { 15273 Region = Tree.allocate(Parent); 15274 Elts.push_back(Region); 15275 Visit(*I); 15276 } 15277 15278 // Forget that the initializers are sequenced. 15279 Region = Parent; 15280 for (unsigned I = 0; I < Elts.size(); ++I) 15281 Tree.merge(Elts[I]); 15282 } 15283 15284 void VisitInitListExpr(const InitListExpr *ILE) { 15285 if (!SemaRef.getLangOpts().CPlusPlus11) 15286 return VisitExpr(ILE); 15287 15288 // In C++11, list initializations are sequenced. 15289 SmallVector<SequenceTree::Seq, 32> Elts; 15290 SequenceTree::Seq Parent = Region; 15291 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 15292 const Expr *E = ILE->getInit(I); 15293 if (!E) 15294 continue; 15295 Region = Tree.allocate(Parent); 15296 Elts.push_back(Region); 15297 Visit(E); 15298 } 15299 15300 // Forget that the initializers are sequenced. 15301 Region = Parent; 15302 for (unsigned I = 0; I < Elts.size(); ++I) 15303 Tree.merge(Elts[I]); 15304 } 15305 }; 15306 15307 } // namespace 15308 15309 void Sema::CheckUnsequencedOperations(const Expr *E) { 15310 SmallVector<const Expr *, 8> WorkList; 15311 WorkList.push_back(E); 15312 while (!WorkList.empty()) { 15313 const Expr *Item = WorkList.pop_back_val(); 15314 SequenceChecker(*this, Item, WorkList); 15315 } 15316 } 15317 15318 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 15319 bool IsConstexpr) { 15320 llvm::SaveAndRestore<bool> ConstantContext( 15321 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 15322 CheckImplicitConversions(E, CheckLoc); 15323 if (!E->isInstantiationDependent()) 15324 CheckUnsequencedOperations(E); 15325 if (!IsConstexpr && !E->isValueDependent()) 15326 CheckForIntOverflow(E); 15327 DiagnoseMisalignedMembers(); 15328 } 15329 15330 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 15331 FieldDecl *BitField, 15332 Expr *Init) { 15333 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 15334 } 15335 15336 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 15337 SourceLocation Loc) { 15338 if (!PType->isVariablyModifiedType()) 15339 return; 15340 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 15341 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 15342 return; 15343 } 15344 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 15345 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 15346 return; 15347 } 15348 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 15349 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 15350 return; 15351 } 15352 15353 const ArrayType *AT = S.Context.getAsArrayType(PType); 15354 if (!AT) 15355 return; 15356 15357 if (AT->getSizeModifier() != ArrayType::Star) { 15358 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 15359 return; 15360 } 15361 15362 S.Diag(Loc, diag::err_array_star_in_function_definition); 15363 } 15364 15365 /// CheckParmsForFunctionDef - Check that the parameters of the given 15366 /// function are appropriate for the definition of a function. This 15367 /// takes care of any checks that cannot be performed on the 15368 /// declaration itself, e.g., that the types of each of the function 15369 /// parameters are complete. 15370 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 15371 bool CheckParameterNames) { 15372 bool HasInvalidParm = false; 15373 for (ParmVarDecl *Param : Parameters) { 15374 // C99 6.7.5.3p4: the parameters in a parameter type list in a 15375 // function declarator that is part of a function definition of 15376 // that function shall not have incomplete type. 15377 // 15378 // This is also C++ [dcl.fct]p6. 15379 if (!Param->isInvalidDecl() && 15380 RequireCompleteType(Param->getLocation(), Param->getType(), 15381 diag::err_typecheck_decl_incomplete_type)) { 15382 Param->setInvalidDecl(); 15383 HasInvalidParm = true; 15384 } 15385 15386 // C99 6.9.1p5: If the declarator includes a parameter type list, the 15387 // declaration of each parameter shall include an identifier. 15388 if (CheckParameterNames && Param->getIdentifier() == nullptr && 15389 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 15390 // Diagnose this as an extension in C17 and earlier. 15391 if (!getLangOpts().C2x) 15392 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 15393 } 15394 15395 // C99 6.7.5.3p12: 15396 // If the function declarator is not part of a definition of that 15397 // function, parameters may have incomplete type and may use the [*] 15398 // notation in their sequences of declarator specifiers to specify 15399 // variable length array types. 15400 QualType PType = Param->getOriginalType(); 15401 // FIXME: This diagnostic should point the '[*]' if source-location 15402 // information is added for it. 15403 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 15404 15405 // If the parameter is a c++ class type and it has to be destructed in the 15406 // callee function, declare the destructor so that it can be called by the 15407 // callee function. Do not perform any direct access check on the dtor here. 15408 if (!Param->isInvalidDecl()) { 15409 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 15410 if (!ClassDecl->isInvalidDecl() && 15411 !ClassDecl->hasIrrelevantDestructor() && 15412 !ClassDecl->isDependentContext() && 15413 ClassDecl->isParamDestroyedInCallee()) { 15414 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 15415 MarkFunctionReferenced(Param->getLocation(), Destructor); 15416 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 15417 } 15418 } 15419 } 15420 15421 // Parameters with the pass_object_size attribute only need to be marked 15422 // constant at function definitions. Because we lack information about 15423 // whether we're on a declaration or definition when we're instantiating the 15424 // attribute, we need to check for constness here. 15425 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 15426 if (!Param->getType().isConstQualified()) 15427 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 15428 << Attr->getSpelling() << 1; 15429 15430 // Check for parameter names shadowing fields from the class. 15431 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 15432 // The owning context for the parameter should be the function, but we 15433 // want to see if this function's declaration context is a record. 15434 DeclContext *DC = Param->getDeclContext(); 15435 if (DC && DC->isFunctionOrMethod()) { 15436 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 15437 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 15438 RD, /*DeclIsField*/ false); 15439 } 15440 } 15441 } 15442 15443 return HasInvalidParm; 15444 } 15445 15446 Optional<std::pair<CharUnits, CharUnits>> 15447 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 15448 15449 /// Compute the alignment and offset of the base class object given the 15450 /// derived-to-base cast expression and the alignment and offset of the derived 15451 /// class object. 15452 static std::pair<CharUnits, CharUnits> 15453 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 15454 CharUnits BaseAlignment, CharUnits Offset, 15455 ASTContext &Ctx) { 15456 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 15457 ++PathI) { 15458 const CXXBaseSpecifier *Base = *PathI; 15459 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 15460 if (Base->isVirtual()) { 15461 // The complete object may have a lower alignment than the non-virtual 15462 // alignment of the base, in which case the base may be misaligned. Choose 15463 // the smaller of the non-virtual alignment and BaseAlignment, which is a 15464 // conservative lower bound of the complete object alignment. 15465 CharUnits NonVirtualAlignment = 15466 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 15467 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 15468 Offset = CharUnits::Zero(); 15469 } else { 15470 const ASTRecordLayout &RL = 15471 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 15472 Offset += RL.getBaseClassOffset(BaseDecl); 15473 } 15474 DerivedType = Base->getType(); 15475 } 15476 15477 return std::make_pair(BaseAlignment, Offset); 15478 } 15479 15480 /// Compute the alignment and offset of a binary additive operator. 15481 static Optional<std::pair<CharUnits, CharUnits>> 15482 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 15483 bool IsSub, ASTContext &Ctx) { 15484 QualType PointeeType = PtrE->getType()->getPointeeType(); 15485 15486 if (!PointeeType->isConstantSizeType()) 15487 return llvm::None; 15488 15489 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 15490 15491 if (!P) 15492 return llvm::None; 15493 15494 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 15495 if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 15496 CharUnits Offset = EltSize * IdxRes->getExtValue(); 15497 if (IsSub) 15498 Offset = -Offset; 15499 return std::make_pair(P->first, P->second + Offset); 15500 } 15501 15502 // If the integer expression isn't a constant expression, compute the lower 15503 // bound of the alignment using the alignment and offset of the pointer 15504 // expression and the element size. 15505 return std::make_pair( 15506 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 15507 CharUnits::Zero()); 15508 } 15509 15510 /// This helper function takes an lvalue expression and returns the alignment of 15511 /// a VarDecl and a constant offset from the VarDecl. 15512 Optional<std::pair<CharUnits, CharUnits>> 15513 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 15514 E = E->IgnoreParens(); 15515 switch (E->getStmtClass()) { 15516 default: 15517 break; 15518 case Stmt::CStyleCastExprClass: 15519 case Stmt::CXXStaticCastExprClass: 15520 case Stmt::ImplicitCastExprClass: { 15521 auto *CE = cast<CastExpr>(E); 15522 const Expr *From = CE->getSubExpr(); 15523 switch (CE->getCastKind()) { 15524 default: 15525 break; 15526 case CK_NoOp: 15527 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15528 case CK_UncheckedDerivedToBase: 15529 case CK_DerivedToBase: { 15530 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15531 if (!P) 15532 break; 15533 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 15534 P->second, Ctx); 15535 } 15536 } 15537 break; 15538 } 15539 case Stmt::ArraySubscriptExprClass: { 15540 auto *ASE = cast<ArraySubscriptExpr>(E); 15541 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 15542 false, Ctx); 15543 } 15544 case Stmt::DeclRefExprClass: { 15545 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 15546 // FIXME: If VD is captured by copy or is an escaping __block variable, 15547 // use the alignment of VD's type. 15548 if (!VD->getType()->isReferenceType()) 15549 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 15550 if (VD->hasInit()) 15551 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 15552 } 15553 break; 15554 } 15555 case Stmt::MemberExprClass: { 15556 auto *ME = cast<MemberExpr>(E); 15557 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 15558 if (!FD || FD->getType()->isReferenceType() || 15559 FD->getParent()->isInvalidDecl()) 15560 break; 15561 Optional<std::pair<CharUnits, CharUnits>> P; 15562 if (ME->isArrow()) 15563 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 15564 else 15565 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 15566 if (!P) 15567 break; 15568 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 15569 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 15570 return std::make_pair(P->first, 15571 P->second + CharUnits::fromQuantity(Offset)); 15572 } 15573 case Stmt::UnaryOperatorClass: { 15574 auto *UO = cast<UnaryOperator>(E); 15575 switch (UO->getOpcode()) { 15576 default: 15577 break; 15578 case UO_Deref: 15579 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 15580 } 15581 break; 15582 } 15583 case Stmt::BinaryOperatorClass: { 15584 auto *BO = cast<BinaryOperator>(E); 15585 auto Opcode = BO->getOpcode(); 15586 switch (Opcode) { 15587 default: 15588 break; 15589 case BO_Comma: 15590 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 15591 } 15592 break; 15593 } 15594 } 15595 return llvm::None; 15596 } 15597 15598 /// This helper function takes a pointer expression and returns the alignment of 15599 /// a VarDecl and a constant offset from the VarDecl. 15600 Optional<std::pair<CharUnits, CharUnits>> 15601 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 15602 E = E->IgnoreParens(); 15603 switch (E->getStmtClass()) { 15604 default: 15605 break; 15606 case Stmt::CStyleCastExprClass: 15607 case Stmt::CXXStaticCastExprClass: 15608 case Stmt::ImplicitCastExprClass: { 15609 auto *CE = cast<CastExpr>(E); 15610 const Expr *From = CE->getSubExpr(); 15611 switch (CE->getCastKind()) { 15612 default: 15613 break; 15614 case CK_NoOp: 15615 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15616 case CK_ArrayToPointerDecay: 15617 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15618 case CK_UncheckedDerivedToBase: 15619 case CK_DerivedToBase: { 15620 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15621 if (!P) 15622 break; 15623 return getDerivedToBaseAlignmentAndOffset( 15624 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 15625 } 15626 } 15627 break; 15628 } 15629 case Stmt::CXXThisExprClass: { 15630 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 15631 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 15632 return std::make_pair(Alignment, CharUnits::Zero()); 15633 } 15634 case Stmt::UnaryOperatorClass: { 15635 auto *UO = cast<UnaryOperator>(E); 15636 if (UO->getOpcode() == UO_AddrOf) 15637 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 15638 break; 15639 } 15640 case Stmt::BinaryOperatorClass: { 15641 auto *BO = cast<BinaryOperator>(E); 15642 auto Opcode = BO->getOpcode(); 15643 switch (Opcode) { 15644 default: 15645 break; 15646 case BO_Add: 15647 case BO_Sub: { 15648 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 15649 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 15650 std::swap(LHS, RHS); 15651 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 15652 Ctx); 15653 } 15654 case BO_Comma: 15655 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 15656 } 15657 break; 15658 } 15659 } 15660 return llvm::None; 15661 } 15662 15663 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 15664 // See if we can compute the alignment of a VarDecl and an offset from it. 15665 Optional<std::pair<CharUnits, CharUnits>> P = 15666 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 15667 15668 if (P) 15669 return P->first.alignmentAtOffset(P->second); 15670 15671 // If that failed, return the type's alignment. 15672 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 15673 } 15674 15675 /// CheckCastAlign - Implements -Wcast-align, which warns when a 15676 /// pointer cast increases the alignment requirements. 15677 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 15678 // This is actually a lot of work to potentially be doing on every 15679 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 15680 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 15681 return; 15682 15683 // Ignore dependent types. 15684 if (T->isDependentType() || Op->getType()->isDependentType()) 15685 return; 15686 15687 // Require that the destination be a pointer type. 15688 const PointerType *DestPtr = T->getAs<PointerType>(); 15689 if (!DestPtr) return; 15690 15691 // If the destination has alignment 1, we're done. 15692 QualType DestPointee = DestPtr->getPointeeType(); 15693 if (DestPointee->isIncompleteType()) return; 15694 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 15695 if (DestAlign.isOne()) return; 15696 15697 // Require that the source be a pointer type. 15698 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 15699 if (!SrcPtr) return; 15700 QualType SrcPointee = SrcPtr->getPointeeType(); 15701 15702 // Explicitly allow casts from cv void*. We already implicitly 15703 // allowed casts to cv void*, since they have alignment 1. 15704 // Also allow casts involving incomplete types, which implicitly 15705 // includes 'void'. 15706 if (SrcPointee->isIncompleteType()) return; 15707 15708 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 15709 15710 if (SrcAlign >= DestAlign) return; 15711 15712 Diag(TRange.getBegin(), diag::warn_cast_align) 15713 << Op->getType() << T 15714 << static_cast<unsigned>(SrcAlign.getQuantity()) 15715 << static_cast<unsigned>(DestAlign.getQuantity()) 15716 << TRange << Op->getSourceRange(); 15717 } 15718 15719 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 15720 const ArraySubscriptExpr *ASE, 15721 bool AllowOnePastEnd, bool IndexNegated) { 15722 // Already diagnosed by the constant evaluator. 15723 if (isConstantEvaluated()) 15724 return; 15725 15726 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 15727 if (IndexExpr->isValueDependent()) 15728 return; 15729 15730 const Type *EffectiveType = 15731 BaseExpr->getType()->getPointeeOrArrayElementType(); 15732 BaseExpr = BaseExpr->IgnoreParenCasts(); 15733 const ConstantArrayType *ArrayTy = 15734 Context.getAsConstantArrayType(BaseExpr->getType()); 15735 15736 const Type *BaseType = 15737 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); 15738 bool IsUnboundedArray = (BaseType == nullptr); 15739 if (EffectiveType->isDependentType() || 15740 (!IsUnboundedArray && BaseType->isDependentType())) 15741 return; 15742 15743 Expr::EvalResult Result; 15744 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 15745 return; 15746 15747 llvm::APSInt index = Result.Val.getInt(); 15748 if (IndexNegated) { 15749 index.setIsUnsigned(false); 15750 index = -index; 15751 } 15752 15753 const NamedDecl *ND = nullptr; 15754 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15755 ND = DRE->getDecl(); 15756 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 15757 ND = ME->getMemberDecl(); 15758 15759 if (IsUnboundedArray) { 15760 if (EffectiveType->isFunctionType()) 15761 return; 15762 if (index.isUnsigned() || !index.isNegative()) { 15763 const auto &ASTC = getASTContext(); 15764 unsigned AddrBits = 15765 ASTC.getTargetInfo().getPointerWidth(ASTC.getTargetAddressSpace( 15766 EffectiveType->getCanonicalTypeInternal())); 15767 if (index.getBitWidth() < AddrBits) 15768 index = index.zext(AddrBits); 15769 Optional<CharUnits> ElemCharUnits = 15770 ASTC.getTypeSizeInCharsIfKnown(EffectiveType); 15771 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void 15772 // pointer) bounds-checking isn't meaningful. 15773 if (!ElemCharUnits) 15774 return; 15775 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity()); 15776 // If index has more active bits than address space, we already know 15777 // we have a bounds violation to warn about. Otherwise, compute 15778 // address of (index + 1)th element, and warn about bounds violation 15779 // only if that address exceeds address space. 15780 if (index.getActiveBits() <= AddrBits) { 15781 bool Overflow; 15782 llvm::APInt Product(index); 15783 Product += 1; 15784 Product = Product.umul_ov(ElemBytes, Overflow); 15785 if (!Overflow && Product.getActiveBits() <= AddrBits) 15786 return; 15787 } 15788 15789 // Need to compute max possible elements in address space, since that 15790 // is included in diag message. 15791 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits); 15792 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth())); 15793 MaxElems += 1; 15794 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth()); 15795 MaxElems = MaxElems.udiv(ElemBytes); 15796 15797 unsigned DiagID = 15798 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds 15799 : diag::warn_ptr_arith_exceeds_max_addressable_bounds; 15800 15801 // Diag message shows element size in bits and in "bytes" (platform- 15802 // dependent CharUnits) 15803 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15804 PDiag(DiagID) 15805 << toString(index, 10, true) << AddrBits 15806 << (unsigned)ASTC.toBits(*ElemCharUnits) 15807 << toString(ElemBytes, 10, false) 15808 << toString(MaxElems, 10, false) 15809 << (unsigned)MaxElems.getLimitedValue(~0U) 15810 << IndexExpr->getSourceRange()); 15811 15812 if (!ND) { 15813 // Try harder to find a NamedDecl to point at in the note. 15814 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15815 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15816 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15817 ND = DRE->getDecl(); 15818 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15819 ND = ME->getMemberDecl(); 15820 } 15821 15822 if (ND) 15823 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15824 PDiag(diag::note_array_declared_here) << ND); 15825 } 15826 return; 15827 } 15828 15829 if (index.isUnsigned() || !index.isNegative()) { 15830 // It is possible that the type of the base expression after 15831 // IgnoreParenCasts is incomplete, even though the type of the base 15832 // expression before IgnoreParenCasts is complete (see PR39746 for an 15833 // example). In this case we have no information about whether the array 15834 // access exceeds the array bounds. However we can still diagnose an array 15835 // access which precedes the array bounds. 15836 if (BaseType->isIncompleteType()) 15837 return; 15838 15839 llvm::APInt size = ArrayTy->getSize(); 15840 if (!size.isStrictlyPositive()) 15841 return; 15842 15843 if (BaseType != EffectiveType) { 15844 // Make sure we're comparing apples to apples when comparing index to size 15845 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 15846 uint64_t array_typesize = Context.getTypeSize(BaseType); 15847 // Handle ptrarith_typesize being zero, such as when casting to void* 15848 if (!ptrarith_typesize) ptrarith_typesize = 1; 15849 if (ptrarith_typesize != array_typesize) { 15850 // There's a cast to a different size type involved 15851 uint64_t ratio = array_typesize / ptrarith_typesize; 15852 // TODO: Be smarter about handling cases where array_typesize is not a 15853 // multiple of ptrarith_typesize 15854 if (ptrarith_typesize * ratio == array_typesize) 15855 size *= llvm::APInt(size.getBitWidth(), ratio); 15856 } 15857 } 15858 15859 if (size.getBitWidth() > index.getBitWidth()) 15860 index = index.zext(size.getBitWidth()); 15861 else if (size.getBitWidth() < index.getBitWidth()) 15862 size = size.zext(index.getBitWidth()); 15863 15864 // For array subscripting the index must be less than size, but for pointer 15865 // arithmetic also allow the index (offset) to be equal to size since 15866 // computing the next address after the end of the array is legal and 15867 // commonly done e.g. in C++ iterators and range-based for loops. 15868 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 15869 return; 15870 15871 // Also don't warn for flexible array members. 15872 if (BaseExpr->isFlexibleArrayMember(Context, 15873 getLangOpts().StrictFlexArrays)) 15874 return; 15875 15876 // Suppress the warning if the subscript expression (as identified by the 15877 // ']' location) and the index expression are both from macro expansions 15878 // within a system header. 15879 if (ASE) { 15880 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 15881 ASE->getRBracketLoc()); 15882 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 15883 SourceLocation IndexLoc = 15884 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 15885 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 15886 return; 15887 } 15888 } 15889 15890 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds 15891 : diag::warn_ptr_arith_exceeds_bounds; 15892 15893 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15894 PDiag(DiagID) << toString(index, 10, true) 15895 << toString(size, 10, true) 15896 << (unsigned)size.getLimitedValue(~0U) 15897 << IndexExpr->getSourceRange()); 15898 } else { 15899 unsigned DiagID = diag::warn_array_index_precedes_bounds; 15900 if (!ASE) { 15901 DiagID = diag::warn_ptr_arith_precedes_bounds; 15902 if (index.isNegative()) index = -index; 15903 } 15904 15905 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15906 PDiag(DiagID) << toString(index, 10, true) 15907 << IndexExpr->getSourceRange()); 15908 } 15909 15910 if (!ND) { 15911 // Try harder to find a NamedDecl to point at in the note. 15912 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15913 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15914 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15915 ND = DRE->getDecl(); 15916 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15917 ND = ME->getMemberDecl(); 15918 } 15919 15920 if (ND) 15921 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15922 PDiag(diag::note_array_declared_here) << ND); 15923 } 15924 15925 void Sema::CheckArrayAccess(const Expr *expr) { 15926 int AllowOnePastEnd = 0; 15927 while (expr) { 15928 expr = expr->IgnoreParenImpCasts(); 15929 switch (expr->getStmtClass()) { 15930 case Stmt::ArraySubscriptExprClass: { 15931 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 15932 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 15933 AllowOnePastEnd > 0); 15934 expr = ASE->getBase(); 15935 break; 15936 } 15937 case Stmt::MemberExprClass: { 15938 expr = cast<MemberExpr>(expr)->getBase(); 15939 break; 15940 } 15941 case Stmt::OMPArraySectionExprClass: { 15942 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 15943 if (ASE->getLowerBound()) 15944 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 15945 /*ASE=*/nullptr, AllowOnePastEnd > 0); 15946 return; 15947 } 15948 case Stmt::UnaryOperatorClass: { 15949 // Only unwrap the * and & unary operators 15950 const UnaryOperator *UO = cast<UnaryOperator>(expr); 15951 expr = UO->getSubExpr(); 15952 switch (UO->getOpcode()) { 15953 case UO_AddrOf: 15954 AllowOnePastEnd++; 15955 break; 15956 case UO_Deref: 15957 AllowOnePastEnd--; 15958 break; 15959 default: 15960 return; 15961 } 15962 break; 15963 } 15964 case Stmt::ConditionalOperatorClass: { 15965 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 15966 if (const Expr *lhs = cond->getLHS()) 15967 CheckArrayAccess(lhs); 15968 if (const Expr *rhs = cond->getRHS()) 15969 CheckArrayAccess(rhs); 15970 return; 15971 } 15972 case Stmt::CXXOperatorCallExprClass: { 15973 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 15974 for (const auto *Arg : OCE->arguments()) 15975 CheckArrayAccess(Arg); 15976 return; 15977 } 15978 default: 15979 return; 15980 } 15981 } 15982 } 15983 15984 //===--- CHECK: Objective-C retain cycles ----------------------------------// 15985 15986 namespace { 15987 15988 struct RetainCycleOwner { 15989 VarDecl *Variable = nullptr; 15990 SourceRange Range; 15991 SourceLocation Loc; 15992 bool Indirect = false; 15993 15994 RetainCycleOwner() = default; 15995 15996 void setLocsFrom(Expr *e) { 15997 Loc = e->getExprLoc(); 15998 Range = e->getSourceRange(); 15999 } 16000 }; 16001 16002 } // namespace 16003 16004 /// Consider whether capturing the given variable can possibly lead to 16005 /// a retain cycle. 16006 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 16007 // In ARC, it's captured strongly iff the variable has __strong 16008 // lifetime. In MRR, it's captured strongly if the variable is 16009 // __block and has an appropriate type. 16010 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 16011 return false; 16012 16013 owner.Variable = var; 16014 if (ref) 16015 owner.setLocsFrom(ref); 16016 return true; 16017 } 16018 16019 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 16020 while (true) { 16021 e = e->IgnoreParens(); 16022 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 16023 switch (cast->getCastKind()) { 16024 case CK_BitCast: 16025 case CK_LValueBitCast: 16026 case CK_LValueToRValue: 16027 case CK_ARCReclaimReturnedObject: 16028 e = cast->getSubExpr(); 16029 continue; 16030 16031 default: 16032 return false; 16033 } 16034 } 16035 16036 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 16037 ObjCIvarDecl *ivar = ref->getDecl(); 16038 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 16039 return false; 16040 16041 // Try to find a retain cycle in the base. 16042 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 16043 return false; 16044 16045 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 16046 owner.Indirect = true; 16047 return true; 16048 } 16049 16050 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 16051 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 16052 if (!var) return false; 16053 return considerVariable(var, ref, owner); 16054 } 16055 16056 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 16057 if (member->isArrow()) return false; 16058 16059 // Don't count this as an indirect ownership. 16060 e = member->getBase(); 16061 continue; 16062 } 16063 16064 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 16065 // Only pay attention to pseudo-objects on property references. 16066 ObjCPropertyRefExpr *pre 16067 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 16068 ->IgnoreParens()); 16069 if (!pre) return false; 16070 if (pre->isImplicitProperty()) return false; 16071 ObjCPropertyDecl *property = pre->getExplicitProperty(); 16072 if (!property->isRetaining() && 16073 !(property->getPropertyIvarDecl() && 16074 property->getPropertyIvarDecl()->getType() 16075 .getObjCLifetime() == Qualifiers::OCL_Strong)) 16076 return false; 16077 16078 owner.Indirect = true; 16079 if (pre->isSuperReceiver()) { 16080 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 16081 if (!owner.Variable) 16082 return false; 16083 owner.Loc = pre->getLocation(); 16084 owner.Range = pre->getSourceRange(); 16085 return true; 16086 } 16087 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 16088 ->getSourceExpr()); 16089 continue; 16090 } 16091 16092 // Array ivars? 16093 16094 return false; 16095 } 16096 } 16097 16098 namespace { 16099 16100 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 16101 ASTContext &Context; 16102 VarDecl *Variable; 16103 Expr *Capturer = nullptr; 16104 bool VarWillBeReased = false; 16105 16106 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 16107 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 16108 Context(Context), Variable(variable) {} 16109 16110 void VisitDeclRefExpr(DeclRefExpr *ref) { 16111 if (ref->getDecl() == Variable && !Capturer) 16112 Capturer = ref; 16113 } 16114 16115 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 16116 if (Capturer) return; 16117 Visit(ref->getBase()); 16118 if (Capturer && ref->isFreeIvar()) 16119 Capturer = ref; 16120 } 16121 16122 void VisitBlockExpr(BlockExpr *block) { 16123 // Look inside nested blocks 16124 if (block->getBlockDecl()->capturesVariable(Variable)) 16125 Visit(block->getBlockDecl()->getBody()); 16126 } 16127 16128 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 16129 if (Capturer) return; 16130 if (OVE->getSourceExpr()) 16131 Visit(OVE->getSourceExpr()); 16132 } 16133 16134 void VisitBinaryOperator(BinaryOperator *BinOp) { 16135 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 16136 return; 16137 Expr *LHS = BinOp->getLHS(); 16138 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 16139 if (DRE->getDecl() != Variable) 16140 return; 16141 if (Expr *RHS = BinOp->getRHS()) { 16142 RHS = RHS->IgnoreParenCasts(); 16143 Optional<llvm::APSInt> Value; 16144 VarWillBeReased = 16145 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 16146 *Value == 0); 16147 } 16148 } 16149 } 16150 }; 16151 16152 } // namespace 16153 16154 /// Check whether the given argument is a block which captures a 16155 /// variable. 16156 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 16157 assert(owner.Variable && owner.Loc.isValid()); 16158 16159 e = e->IgnoreParenCasts(); 16160 16161 // Look through [^{...} copy] and Block_copy(^{...}). 16162 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 16163 Selector Cmd = ME->getSelector(); 16164 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 16165 e = ME->getInstanceReceiver(); 16166 if (!e) 16167 return nullptr; 16168 e = e->IgnoreParenCasts(); 16169 } 16170 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 16171 if (CE->getNumArgs() == 1) { 16172 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 16173 if (Fn) { 16174 const IdentifierInfo *FnI = Fn->getIdentifier(); 16175 if (FnI && FnI->isStr("_Block_copy")) { 16176 e = CE->getArg(0)->IgnoreParenCasts(); 16177 } 16178 } 16179 } 16180 } 16181 16182 BlockExpr *block = dyn_cast<BlockExpr>(e); 16183 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 16184 return nullptr; 16185 16186 FindCaptureVisitor visitor(S.Context, owner.Variable); 16187 visitor.Visit(block->getBlockDecl()->getBody()); 16188 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 16189 } 16190 16191 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 16192 RetainCycleOwner &owner) { 16193 assert(capturer); 16194 assert(owner.Variable && owner.Loc.isValid()); 16195 16196 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 16197 << owner.Variable << capturer->getSourceRange(); 16198 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 16199 << owner.Indirect << owner.Range; 16200 } 16201 16202 /// Check for a keyword selector that starts with the word 'add' or 16203 /// 'set'. 16204 static bool isSetterLikeSelector(Selector sel) { 16205 if (sel.isUnarySelector()) return false; 16206 16207 StringRef str = sel.getNameForSlot(0); 16208 while (!str.empty() && str.front() == '_') str = str.substr(1); 16209 if (str.startswith("set")) 16210 str = str.substr(3); 16211 else if (str.startswith("add")) { 16212 // Specially allow 'addOperationWithBlock:'. 16213 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 16214 return false; 16215 str = str.substr(3); 16216 } 16217 else 16218 return false; 16219 16220 if (str.empty()) return true; 16221 return !isLowercase(str.front()); 16222 } 16223 16224 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 16225 ObjCMessageExpr *Message) { 16226 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 16227 Message->getReceiverInterface(), 16228 NSAPI::ClassId_NSMutableArray); 16229 if (!IsMutableArray) { 16230 return None; 16231 } 16232 16233 Selector Sel = Message->getSelector(); 16234 16235 Optional<NSAPI::NSArrayMethodKind> MKOpt = 16236 S.NSAPIObj->getNSArrayMethodKind(Sel); 16237 if (!MKOpt) { 16238 return None; 16239 } 16240 16241 NSAPI::NSArrayMethodKind MK = *MKOpt; 16242 16243 switch (MK) { 16244 case NSAPI::NSMutableArr_addObject: 16245 case NSAPI::NSMutableArr_insertObjectAtIndex: 16246 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 16247 return 0; 16248 case NSAPI::NSMutableArr_replaceObjectAtIndex: 16249 return 1; 16250 16251 default: 16252 return None; 16253 } 16254 16255 return None; 16256 } 16257 16258 static 16259 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 16260 ObjCMessageExpr *Message) { 16261 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 16262 Message->getReceiverInterface(), 16263 NSAPI::ClassId_NSMutableDictionary); 16264 if (!IsMutableDictionary) { 16265 return None; 16266 } 16267 16268 Selector Sel = Message->getSelector(); 16269 16270 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 16271 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 16272 if (!MKOpt) { 16273 return None; 16274 } 16275 16276 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 16277 16278 switch (MK) { 16279 case NSAPI::NSMutableDict_setObjectForKey: 16280 case NSAPI::NSMutableDict_setValueForKey: 16281 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 16282 return 0; 16283 16284 default: 16285 return None; 16286 } 16287 16288 return None; 16289 } 16290 16291 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 16292 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 16293 Message->getReceiverInterface(), 16294 NSAPI::ClassId_NSMutableSet); 16295 16296 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 16297 Message->getReceiverInterface(), 16298 NSAPI::ClassId_NSMutableOrderedSet); 16299 if (!IsMutableSet && !IsMutableOrderedSet) { 16300 return None; 16301 } 16302 16303 Selector Sel = Message->getSelector(); 16304 16305 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 16306 if (!MKOpt) { 16307 return None; 16308 } 16309 16310 NSAPI::NSSetMethodKind MK = *MKOpt; 16311 16312 switch (MK) { 16313 case NSAPI::NSMutableSet_addObject: 16314 case NSAPI::NSOrderedSet_setObjectAtIndex: 16315 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 16316 case NSAPI::NSOrderedSet_insertObjectAtIndex: 16317 return 0; 16318 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 16319 return 1; 16320 } 16321 16322 return None; 16323 } 16324 16325 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 16326 if (!Message->isInstanceMessage()) { 16327 return; 16328 } 16329 16330 Optional<int> ArgOpt; 16331 16332 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 16333 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 16334 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 16335 return; 16336 } 16337 16338 int ArgIndex = *ArgOpt; 16339 16340 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 16341 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 16342 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 16343 } 16344 16345 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 16346 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 16347 if (ArgRE->isObjCSelfExpr()) { 16348 Diag(Message->getSourceRange().getBegin(), 16349 diag::warn_objc_circular_container) 16350 << ArgRE->getDecl() << StringRef("'super'"); 16351 } 16352 } 16353 } else { 16354 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 16355 16356 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 16357 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 16358 } 16359 16360 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 16361 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 16362 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 16363 ValueDecl *Decl = ReceiverRE->getDecl(); 16364 Diag(Message->getSourceRange().getBegin(), 16365 diag::warn_objc_circular_container) 16366 << Decl << Decl; 16367 if (!ArgRE->isObjCSelfExpr()) { 16368 Diag(Decl->getLocation(), 16369 diag::note_objc_circular_container_declared_here) 16370 << Decl; 16371 } 16372 } 16373 } 16374 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 16375 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 16376 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 16377 ObjCIvarDecl *Decl = IvarRE->getDecl(); 16378 Diag(Message->getSourceRange().getBegin(), 16379 diag::warn_objc_circular_container) 16380 << Decl << Decl; 16381 Diag(Decl->getLocation(), 16382 diag::note_objc_circular_container_declared_here) 16383 << Decl; 16384 } 16385 } 16386 } 16387 } 16388 } 16389 16390 /// Check a message send to see if it's likely to cause a retain cycle. 16391 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 16392 // Only check instance methods whose selector looks like a setter. 16393 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 16394 return; 16395 16396 // Try to find a variable that the receiver is strongly owned by. 16397 RetainCycleOwner owner; 16398 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 16399 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 16400 return; 16401 } else { 16402 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 16403 owner.Variable = getCurMethodDecl()->getSelfDecl(); 16404 owner.Loc = msg->getSuperLoc(); 16405 owner.Range = msg->getSuperLoc(); 16406 } 16407 16408 // Check whether the receiver is captured by any of the arguments. 16409 const ObjCMethodDecl *MD = msg->getMethodDecl(); 16410 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 16411 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 16412 // noescape blocks should not be retained by the method. 16413 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 16414 continue; 16415 return diagnoseRetainCycle(*this, capturer, owner); 16416 } 16417 } 16418 } 16419 16420 /// Check a property assign to see if it's likely to cause a retain cycle. 16421 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 16422 RetainCycleOwner owner; 16423 if (!findRetainCycleOwner(*this, receiver, owner)) 16424 return; 16425 16426 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 16427 diagnoseRetainCycle(*this, capturer, owner); 16428 } 16429 16430 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 16431 RetainCycleOwner Owner; 16432 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 16433 return; 16434 16435 // Because we don't have an expression for the variable, we have to set the 16436 // location explicitly here. 16437 Owner.Loc = Var->getLocation(); 16438 Owner.Range = Var->getSourceRange(); 16439 16440 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 16441 diagnoseRetainCycle(*this, Capturer, Owner); 16442 } 16443 16444 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 16445 Expr *RHS, bool isProperty) { 16446 // Check if RHS is an Objective-C object literal, which also can get 16447 // immediately zapped in a weak reference. Note that we explicitly 16448 // allow ObjCStringLiterals, since those are designed to never really die. 16449 RHS = RHS->IgnoreParenImpCasts(); 16450 16451 // This enum needs to match with the 'select' in 16452 // warn_objc_arc_literal_assign (off-by-1). 16453 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 16454 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 16455 return false; 16456 16457 S.Diag(Loc, diag::warn_arc_literal_assign) 16458 << (unsigned) Kind 16459 << (isProperty ? 0 : 1) 16460 << RHS->getSourceRange(); 16461 16462 return true; 16463 } 16464 16465 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 16466 Qualifiers::ObjCLifetime LT, 16467 Expr *RHS, bool isProperty) { 16468 // Strip off any implicit cast added to get to the one ARC-specific. 16469 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16470 if (cast->getCastKind() == CK_ARCConsumeObject) { 16471 S.Diag(Loc, diag::warn_arc_retained_assign) 16472 << (LT == Qualifiers::OCL_ExplicitNone) 16473 << (isProperty ? 0 : 1) 16474 << RHS->getSourceRange(); 16475 return true; 16476 } 16477 RHS = cast->getSubExpr(); 16478 } 16479 16480 if (LT == Qualifiers::OCL_Weak && 16481 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 16482 return true; 16483 16484 return false; 16485 } 16486 16487 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 16488 QualType LHS, Expr *RHS) { 16489 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 16490 16491 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 16492 return false; 16493 16494 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 16495 return true; 16496 16497 return false; 16498 } 16499 16500 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 16501 Expr *LHS, Expr *RHS) { 16502 QualType LHSType; 16503 // PropertyRef on LHS type need be directly obtained from 16504 // its declaration as it has a PseudoType. 16505 ObjCPropertyRefExpr *PRE 16506 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 16507 if (PRE && !PRE->isImplicitProperty()) { 16508 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16509 if (PD) 16510 LHSType = PD->getType(); 16511 } 16512 16513 if (LHSType.isNull()) 16514 LHSType = LHS->getType(); 16515 16516 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 16517 16518 if (LT == Qualifiers::OCL_Weak) { 16519 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 16520 getCurFunction()->markSafeWeakUse(LHS); 16521 } 16522 16523 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 16524 return; 16525 16526 // FIXME. Check for other life times. 16527 if (LT != Qualifiers::OCL_None) 16528 return; 16529 16530 if (PRE) { 16531 if (PRE->isImplicitProperty()) 16532 return; 16533 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16534 if (!PD) 16535 return; 16536 16537 unsigned Attributes = PD->getPropertyAttributes(); 16538 if (Attributes & ObjCPropertyAttribute::kind_assign) { 16539 // when 'assign' attribute was not explicitly specified 16540 // by user, ignore it and rely on property type itself 16541 // for lifetime info. 16542 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 16543 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 16544 LHSType->isObjCRetainableType()) 16545 return; 16546 16547 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16548 if (cast->getCastKind() == CK_ARCConsumeObject) { 16549 Diag(Loc, diag::warn_arc_retained_property_assign) 16550 << RHS->getSourceRange(); 16551 return; 16552 } 16553 RHS = cast->getSubExpr(); 16554 } 16555 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 16556 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 16557 return; 16558 } 16559 } 16560 } 16561 16562 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 16563 16564 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 16565 SourceLocation StmtLoc, 16566 const NullStmt *Body) { 16567 // Do not warn if the body is a macro that expands to nothing, e.g: 16568 // 16569 // #define CALL(x) 16570 // if (condition) 16571 // CALL(0); 16572 if (Body->hasLeadingEmptyMacro()) 16573 return false; 16574 16575 // Get line numbers of statement and body. 16576 bool StmtLineInvalid; 16577 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 16578 &StmtLineInvalid); 16579 if (StmtLineInvalid) 16580 return false; 16581 16582 bool BodyLineInvalid; 16583 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 16584 &BodyLineInvalid); 16585 if (BodyLineInvalid) 16586 return false; 16587 16588 // Warn if null statement and body are on the same line. 16589 if (StmtLine != BodyLine) 16590 return false; 16591 16592 return true; 16593 } 16594 16595 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 16596 const Stmt *Body, 16597 unsigned DiagID) { 16598 // Since this is a syntactic check, don't emit diagnostic for template 16599 // instantiations, this just adds noise. 16600 if (CurrentInstantiationScope) 16601 return; 16602 16603 // The body should be a null statement. 16604 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16605 if (!NBody) 16606 return; 16607 16608 // Do the usual checks. 16609 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16610 return; 16611 16612 Diag(NBody->getSemiLoc(), DiagID); 16613 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16614 } 16615 16616 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 16617 const Stmt *PossibleBody) { 16618 assert(!CurrentInstantiationScope); // Ensured by caller 16619 16620 SourceLocation StmtLoc; 16621 const Stmt *Body; 16622 unsigned DiagID; 16623 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 16624 StmtLoc = FS->getRParenLoc(); 16625 Body = FS->getBody(); 16626 DiagID = diag::warn_empty_for_body; 16627 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 16628 StmtLoc = WS->getRParenLoc(); 16629 Body = WS->getBody(); 16630 DiagID = diag::warn_empty_while_body; 16631 } else 16632 return; // Neither `for' nor `while'. 16633 16634 // The body should be a null statement. 16635 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16636 if (!NBody) 16637 return; 16638 16639 // Skip expensive checks if diagnostic is disabled. 16640 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 16641 return; 16642 16643 // Do the usual checks. 16644 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16645 return; 16646 16647 // `for(...);' and `while(...);' are popular idioms, so in order to keep 16648 // noise level low, emit diagnostics only if for/while is followed by a 16649 // CompoundStmt, e.g.: 16650 // for (int i = 0; i < n; i++); 16651 // { 16652 // a(i); 16653 // } 16654 // or if for/while is followed by a statement with more indentation 16655 // than for/while itself: 16656 // for (int i = 0; i < n; i++); 16657 // a(i); 16658 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 16659 if (!ProbableTypo) { 16660 bool BodyColInvalid; 16661 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 16662 PossibleBody->getBeginLoc(), &BodyColInvalid); 16663 if (BodyColInvalid) 16664 return; 16665 16666 bool StmtColInvalid; 16667 unsigned StmtCol = 16668 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 16669 if (StmtColInvalid) 16670 return; 16671 16672 if (BodyCol > StmtCol) 16673 ProbableTypo = true; 16674 } 16675 16676 if (ProbableTypo) { 16677 Diag(NBody->getSemiLoc(), DiagID); 16678 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16679 } 16680 } 16681 16682 //===--- CHECK: Warn on self move with std::move. -------------------------===// 16683 16684 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 16685 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 16686 SourceLocation OpLoc) { 16687 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 16688 return; 16689 16690 if (inTemplateInstantiation()) 16691 return; 16692 16693 // Strip parens and casts away. 16694 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 16695 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 16696 16697 // Check for a call expression 16698 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 16699 if (!CE || CE->getNumArgs() != 1) 16700 return; 16701 16702 // Check for a call to std::move 16703 if (!CE->isCallToStdMove()) 16704 return; 16705 16706 // Get argument from std::move 16707 RHSExpr = CE->getArg(0); 16708 16709 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 16710 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 16711 16712 // Two DeclRefExpr's, check that the decls are the same. 16713 if (LHSDeclRef && RHSDeclRef) { 16714 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16715 return; 16716 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16717 RHSDeclRef->getDecl()->getCanonicalDecl()) 16718 return; 16719 16720 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16721 << LHSExpr->getSourceRange() 16722 << RHSExpr->getSourceRange(); 16723 return; 16724 } 16725 16726 // Member variables require a different approach to check for self moves. 16727 // MemberExpr's are the same if every nested MemberExpr refers to the same 16728 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 16729 // the base Expr's are CXXThisExpr's. 16730 const Expr *LHSBase = LHSExpr; 16731 const Expr *RHSBase = RHSExpr; 16732 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 16733 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 16734 if (!LHSME || !RHSME) 16735 return; 16736 16737 while (LHSME && RHSME) { 16738 if (LHSME->getMemberDecl()->getCanonicalDecl() != 16739 RHSME->getMemberDecl()->getCanonicalDecl()) 16740 return; 16741 16742 LHSBase = LHSME->getBase(); 16743 RHSBase = RHSME->getBase(); 16744 LHSME = dyn_cast<MemberExpr>(LHSBase); 16745 RHSME = dyn_cast<MemberExpr>(RHSBase); 16746 } 16747 16748 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 16749 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 16750 if (LHSDeclRef && RHSDeclRef) { 16751 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16752 return; 16753 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16754 RHSDeclRef->getDecl()->getCanonicalDecl()) 16755 return; 16756 16757 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16758 << LHSExpr->getSourceRange() 16759 << RHSExpr->getSourceRange(); 16760 return; 16761 } 16762 16763 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 16764 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16765 << LHSExpr->getSourceRange() 16766 << RHSExpr->getSourceRange(); 16767 } 16768 16769 //===--- Layout compatibility ----------------------------------------------// 16770 16771 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 16772 16773 /// Check if two enumeration types are layout-compatible. 16774 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 16775 // C++11 [dcl.enum] p8: 16776 // Two enumeration types are layout-compatible if they have the same 16777 // underlying type. 16778 return ED1->isComplete() && ED2->isComplete() && 16779 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 16780 } 16781 16782 /// Check if two fields are layout-compatible. 16783 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 16784 FieldDecl *Field2) { 16785 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 16786 return false; 16787 16788 if (Field1->isBitField() != Field2->isBitField()) 16789 return false; 16790 16791 if (Field1->isBitField()) { 16792 // Make sure that the bit-fields are the same length. 16793 unsigned Bits1 = Field1->getBitWidthValue(C); 16794 unsigned Bits2 = Field2->getBitWidthValue(C); 16795 16796 if (Bits1 != Bits2) 16797 return false; 16798 } 16799 16800 return true; 16801 } 16802 16803 /// Check if two standard-layout structs are layout-compatible. 16804 /// (C++11 [class.mem] p17) 16805 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 16806 RecordDecl *RD2) { 16807 // If both records are C++ classes, check that base classes match. 16808 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 16809 // If one of records is a CXXRecordDecl we are in C++ mode, 16810 // thus the other one is a CXXRecordDecl, too. 16811 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 16812 // Check number of base classes. 16813 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 16814 return false; 16815 16816 // Check the base classes. 16817 for (CXXRecordDecl::base_class_const_iterator 16818 Base1 = D1CXX->bases_begin(), 16819 BaseEnd1 = D1CXX->bases_end(), 16820 Base2 = D2CXX->bases_begin(); 16821 Base1 != BaseEnd1; 16822 ++Base1, ++Base2) { 16823 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 16824 return false; 16825 } 16826 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 16827 // If only RD2 is a C++ class, it should have zero base classes. 16828 if (D2CXX->getNumBases() > 0) 16829 return false; 16830 } 16831 16832 // Check the fields. 16833 RecordDecl::field_iterator Field2 = RD2->field_begin(), 16834 Field2End = RD2->field_end(), 16835 Field1 = RD1->field_begin(), 16836 Field1End = RD1->field_end(); 16837 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 16838 if (!isLayoutCompatible(C, *Field1, *Field2)) 16839 return false; 16840 } 16841 if (Field1 != Field1End || Field2 != Field2End) 16842 return false; 16843 16844 return true; 16845 } 16846 16847 /// Check if two standard-layout unions are layout-compatible. 16848 /// (C++11 [class.mem] p18) 16849 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 16850 RecordDecl *RD2) { 16851 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 16852 for (auto *Field2 : RD2->fields()) 16853 UnmatchedFields.insert(Field2); 16854 16855 for (auto *Field1 : RD1->fields()) { 16856 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 16857 I = UnmatchedFields.begin(), 16858 E = UnmatchedFields.end(); 16859 16860 for ( ; I != E; ++I) { 16861 if (isLayoutCompatible(C, Field1, *I)) { 16862 bool Result = UnmatchedFields.erase(*I); 16863 (void) Result; 16864 assert(Result); 16865 break; 16866 } 16867 } 16868 if (I == E) 16869 return false; 16870 } 16871 16872 return UnmatchedFields.empty(); 16873 } 16874 16875 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 16876 RecordDecl *RD2) { 16877 if (RD1->isUnion() != RD2->isUnion()) 16878 return false; 16879 16880 if (RD1->isUnion()) 16881 return isLayoutCompatibleUnion(C, RD1, RD2); 16882 else 16883 return isLayoutCompatibleStruct(C, RD1, RD2); 16884 } 16885 16886 /// Check if two types are layout-compatible in C++11 sense. 16887 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 16888 if (T1.isNull() || T2.isNull()) 16889 return false; 16890 16891 // C++11 [basic.types] p11: 16892 // If two types T1 and T2 are the same type, then T1 and T2 are 16893 // layout-compatible types. 16894 if (C.hasSameType(T1, T2)) 16895 return true; 16896 16897 T1 = T1.getCanonicalType().getUnqualifiedType(); 16898 T2 = T2.getCanonicalType().getUnqualifiedType(); 16899 16900 const Type::TypeClass TC1 = T1->getTypeClass(); 16901 const Type::TypeClass TC2 = T2->getTypeClass(); 16902 16903 if (TC1 != TC2) 16904 return false; 16905 16906 if (TC1 == Type::Enum) { 16907 return isLayoutCompatible(C, 16908 cast<EnumType>(T1)->getDecl(), 16909 cast<EnumType>(T2)->getDecl()); 16910 } else if (TC1 == Type::Record) { 16911 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 16912 return false; 16913 16914 return isLayoutCompatible(C, 16915 cast<RecordType>(T1)->getDecl(), 16916 cast<RecordType>(T2)->getDecl()); 16917 } 16918 16919 return false; 16920 } 16921 16922 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 16923 16924 /// Given a type tag expression find the type tag itself. 16925 /// 16926 /// \param TypeExpr Type tag expression, as it appears in user's code. 16927 /// 16928 /// \param VD Declaration of an identifier that appears in a type tag. 16929 /// 16930 /// \param MagicValue Type tag magic value. 16931 /// 16932 /// \param isConstantEvaluated whether the evalaution should be performed in 16933 16934 /// constant context. 16935 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 16936 const ValueDecl **VD, uint64_t *MagicValue, 16937 bool isConstantEvaluated) { 16938 while(true) { 16939 if (!TypeExpr) 16940 return false; 16941 16942 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 16943 16944 switch (TypeExpr->getStmtClass()) { 16945 case Stmt::UnaryOperatorClass: { 16946 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 16947 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 16948 TypeExpr = UO->getSubExpr(); 16949 continue; 16950 } 16951 return false; 16952 } 16953 16954 case Stmt::DeclRefExprClass: { 16955 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 16956 *VD = DRE->getDecl(); 16957 return true; 16958 } 16959 16960 case Stmt::IntegerLiteralClass: { 16961 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 16962 llvm::APInt MagicValueAPInt = IL->getValue(); 16963 if (MagicValueAPInt.getActiveBits() <= 64) { 16964 *MagicValue = MagicValueAPInt.getZExtValue(); 16965 return true; 16966 } else 16967 return false; 16968 } 16969 16970 case Stmt::BinaryConditionalOperatorClass: 16971 case Stmt::ConditionalOperatorClass: { 16972 const AbstractConditionalOperator *ACO = 16973 cast<AbstractConditionalOperator>(TypeExpr); 16974 bool Result; 16975 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 16976 isConstantEvaluated)) { 16977 if (Result) 16978 TypeExpr = ACO->getTrueExpr(); 16979 else 16980 TypeExpr = ACO->getFalseExpr(); 16981 continue; 16982 } 16983 return false; 16984 } 16985 16986 case Stmt::BinaryOperatorClass: { 16987 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 16988 if (BO->getOpcode() == BO_Comma) { 16989 TypeExpr = BO->getRHS(); 16990 continue; 16991 } 16992 return false; 16993 } 16994 16995 default: 16996 return false; 16997 } 16998 } 16999 } 17000 17001 /// Retrieve the C type corresponding to type tag TypeExpr. 17002 /// 17003 /// \param TypeExpr Expression that specifies a type tag. 17004 /// 17005 /// \param MagicValues Registered magic values. 17006 /// 17007 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 17008 /// kind. 17009 /// 17010 /// \param TypeInfo Information about the corresponding C type. 17011 /// 17012 /// \param isConstantEvaluated whether the evalaution should be performed in 17013 /// constant context. 17014 /// 17015 /// \returns true if the corresponding C type was found. 17016 static bool GetMatchingCType( 17017 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 17018 const ASTContext &Ctx, 17019 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 17020 *MagicValues, 17021 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 17022 bool isConstantEvaluated) { 17023 FoundWrongKind = false; 17024 17025 // Variable declaration that has type_tag_for_datatype attribute. 17026 const ValueDecl *VD = nullptr; 17027 17028 uint64_t MagicValue; 17029 17030 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 17031 return false; 17032 17033 if (VD) { 17034 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 17035 if (I->getArgumentKind() != ArgumentKind) { 17036 FoundWrongKind = true; 17037 return false; 17038 } 17039 TypeInfo.Type = I->getMatchingCType(); 17040 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 17041 TypeInfo.MustBeNull = I->getMustBeNull(); 17042 return true; 17043 } 17044 return false; 17045 } 17046 17047 if (!MagicValues) 17048 return false; 17049 17050 llvm::DenseMap<Sema::TypeTagMagicValue, 17051 Sema::TypeTagData>::const_iterator I = 17052 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 17053 if (I == MagicValues->end()) 17054 return false; 17055 17056 TypeInfo = I->second; 17057 return true; 17058 } 17059 17060 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 17061 uint64_t MagicValue, QualType Type, 17062 bool LayoutCompatible, 17063 bool MustBeNull) { 17064 if (!TypeTagForDatatypeMagicValues) 17065 TypeTagForDatatypeMagicValues.reset( 17066 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 17067 17068 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 17069 (*TypeTagForDatatypeMagicValues)[Magic] = 17070 TypeTagData(Type, LayoutCompatible, MustBeNull); 17071 } 17072 17073 static bool IsSameCharType(QualType T1, QualType T2) { 17074 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 17075 if (!BT1) 17076 return false; 17077 17078 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 17079 if (!BT2) 17080 return false; 17081 17082 BuiltinType::Kind T1Kind = BT1->getKind(); 17083 BuiltinType::Kind T2Kind = BT2->getKind(); 17084 17085 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 17086 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 17087 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 17088 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 17089 } 17090 17091 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 17092 const ArrayRef<const Expr *> ExprArgs, 17093 SourceLocation CallSiteLoc) { 17094 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 17095 bool IsPointerAttr = Attr->getIsPointer(); 17096 17097 // Retrieve the argument representing the 'type_tag'. 17098 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 17099 if (TypeTagIdxAST >= ExprArgs.size()) { 17100 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 17101 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 17102 return; 17103 } 17104 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 17105 bool FoundWrongKind; 17106 TypeTagData TypeInfo; 17107 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 17108 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 17109 TypeInfo, isConstantEvaluated())) { 17110 if (FoundWrongKind) 17111 Diag(TypeTagExpr->getExprLoc(), 17112 diag::warn_type_tag_for_datatype_wrong_kind) 17113 << TypeTagExpr->getSourceRange(); 17114 return; 17115 } 17116 17117 // Retrieve the argument representing the 'arg_idx'. 17118 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 17119 if (ArgumentIdxAST >= ExprArgs.size()) { 17120 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 17121 << 1 << Attr->getArgumentIdx().getSourceIndex(); 17122 return; 17123 } 17124 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 17125 if (IsPointerAttr) { 17126 // Skip implicit cast of pointer to `void *' (as a function argument). 17127 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 17128 if (ICE->getType()->isVoidPointerType() && 17129 ICE->getCastKind() == CK_BitCast) 17130 ArgumentExpr = ICE->getSubExpr(); 17131 } 17132 QualType ArgumentType = ArgumentExpr->getType(); 17133 17134 // Passing a `void*' pointer shouldn't trigger a warning. 17135 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 17136 return; 17137 17138 if (TypeInfo.MustBeNull) { 17139 // Type tag with matching void type requires a null pointer. 17140 if (!ArgumentExpr->isNullPointerConstant(Context, 17141 Expr::NPC_ValueDependentIsNotNull)) { 17142 Diag(ArgumentExpr->getExprLoc(), 17143 diag::warn_type_safety_null_pointer_required) 17144 << ArgumentKind->getName() 17145 << ArgumentExpr->getSourceRange() 17146 << TypeTagExpr->getSourceRange(); 17147 } 17148 return; 17149 } 17150 17151 QualType RequiredType = TypeInfo.Type; 17152 if (IsPointerAttr) 17153 RequiredType = Context.getPointerType(RequiredType); 17154 17155 bool mismatch = false; 17156 if (!TypeInfo.LayoutCompatible) { 17157 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 17158 17159 // C++11 [basic.fundamental] p1: 17160 // Plain char, signed char, and unsigned char are three distinct types. 17161 // 17162 // But we treat plain `char' as equivalent to `signed char' or `unsigned 17163 // char' depending on the current char signedness mode. 17164 if (mismatch) 17165 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 17166 RequiredType->getPointeeType())) || 17167 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 17168 mismatch = false; 17169 } else 17170 if (IsPointerAttr) 17171 mismatch = !isLayoutCompatible(Context, 17172 ArgumentType->getPointeeType(), 17173 RequiredType->getPointeeType()); 17174 else 17175 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 17176 17177 if (mismatch) 17178 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 17179 << ArgumentType << ArgumentKind 17180 << TypeInfo.LayoutCompatible << RequiredType 17181 << ArgumentExpr->getSourceRange() 17182 << TypeTagExpr->getSourceRange(); 17183 } 17184 17185 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 17186 CharUnits Alignment) { 17187 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 17188 } 17189 17190 void Sema::DiagnoseMisalignedMembers() { 17191 for (MisalignedMember &m : MisalignedMembers) { 17192 const NamedDecl *ND = m.RD; 17193 if (ND->getName().empty()) { 17194 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 17195 ND = TD; 17196 } 17197 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 17198 << m.MD << ND << m.E->getSourceRange(); 17199 } 17200 MisalignedMembers.clear(); 17201 } 17202 17203 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 17204 E = E->IgnoreParens(); 17205 if (!T->isPointerType() && !T->isIntegerType()) 17206 return; 17207 if (isa<UnaryOperator>(E) && 17208 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 17209 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 17210 if (isa<MemberExpr>(Op)) { 17211 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 17212 if (MA != MisalignedMembers.end() && 17213 (T->isIntegerType() || 17214 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 17215 Context.getTypeAlignInChars( 17216 T->getPointeeType()) <= MA->Alignment)))) 17217 MisalignedMembers.erase(MA); 17218 } 17219 } 17220 } 17221 17222 void Sema::RefersToMemberWithReducedAlignment( 17223 Expr *E, 17224 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 17225 Action) { 17226 const auto *ME = dyn_cast<MemberExpr>(E); 17227 if (!ME) 17228 return; 17229 17230 // No need to check expressions with an __unaligned-qualified type. 17231 if (E->getType().getQualifiers().hasUnaligned()) 17232 return; 17233 17234 // For a chain of MemberExpr like "a.b.c.d" this list 17235 // will keep FieldDecl's like [d, c, b]. 17236 SmallVector<FieldDecl *, 4> ReverseMemberChain; 17237 const MemberExpr *TopME = nullptr; 17238 bool AnyIsPacked = false; 17239 do { 17240 QualType BaseType = ME->getBase()->getType(); 17241 if (BaseType->isDependentType()) 17242 return; 17243 if (ME->isArrow()) 17244 BaseType = BaseType->getPointeeType(); 17245 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 17246 if (RD->isInvalidDecl()) 17247 return; 17248 17249 ValueDecl *MD = ME->getMemberDecl(); 17250 auto *FD = dyn_cast<FieldDecl>(MD); 17251 // We do not care about non-data members. 17252 if (!FD || FD->isInvalidDecl()) 17253 return; 17254 17255 AnyIsPacked = 17256 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 17257 ReverseMemberChain.push_back(FD); 17258 17259 TopME = ME; 17260 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 17261 } while (ME); 17262 assert(TopME && "We did not compute a topmost MemberExpr!"); 17263 17264 // Not the scope of this diagnostic. 17265 if (!AnyIsPacked) 17266 return; 17267 17268 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 17269 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 17270 // TODO: The innermost base of the member expression may be too complicated. 17271 // For now, just disregard these cases. This is left for future 17272 // improvement. 17273 if (!DRE && !isa<CXXThisExpr>(TopBase)) 17274 return; 17275 17276 // Alignment expected by the whole expression. 17277 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 17278 17279 // No need to do anything else with this case. 17280 if (ExpectedAlignment.isOne()) 17281 return; 17282 17283 // Synthesize offset of the whole access. 17284 CharUnits Offset; 17285 for (const FieldDecl *FD : llvm::reverse(ReverseMemberChain)) 17286 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(FD)); 17287 17288 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 17289 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 17290 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 17291 17292 // The base expression of the innermost MemberExpr may give 17293 // stronger guarantees than the class containing the member. 17294 if (DRE && !TopME->isArrow()) { 17295 const ValueDecl *VD = DRE->getDecl(); 17296 if (!VD->getType()->isReferenceType()) 17297 CompleteObjectAlignment = 17298 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 17299 } 17300 17301 // Check if the synthesized offset fulfills the alignment. 17302 if (Offset % ExpectedAlignment != 0 || 17303 // It may fulfill the offset it but the effective alignment may still be 17304 // lower than the expected expression alignment. 17305 CompleteObjectAlignment < ExpectedAlignment) { 17306 // If this happens, we want to determine a sensible culprit of this. 17307 // Intuitively, watching the chain of member expressions from right to 17308 // left, we start with the required alignment (as required by the field 17309 // type) but some packed attribute in that chain has reduced the alignment. 17310 // It may happen that another packed structure increases it again. But if 17311 // we are here such increase has not been enough. So pointing the first 17312 // FieldDecl that either is packed or else its RecordDecl is, 17313 // seems reasonable. 17314 FieldDecl *FD = nullptr; 17315 CharUnits Alignment; 17316 for (FieldDecl *FDI : ReverseMemberChain) { 17317 if (FDI->hasAttr<PackedAttr>() || 17318 FDI->getParent()->hasAttr<PackedAttr>()) { 17319 FD = FDI; 17320 Alignment = std::min( 17321 Context.getTypeAlignInChars(FD->getType()), 17322 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 17323 break; 17324 } 17325 } 17326 assert(FD && "We did not find a packed FieldDecl!"); 17327 Action(E, FD->getParent(), FD, Alignment); 17328 } 17329 } 17330 17331 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 17332 using namespace std::placeholders; 17333 17334 RefersToMemberWithReducedAlignment( 17335 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 17336 _2, _3, _4)); 17337 } 17338 17339 // Check if \p Ty is a valid type for the elementwise math builtins. If it is 17340 // not a valid type, emit an error message and return true. Otherwise return 17341 // false. 17342 static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc, 17343 QualType Ty) { 17344 if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) { 17345 S.Diag(Loc, diag::err_builtin_invalid_arg_type) 17346 << 1 << /* vector, integer or float ty*/ 0 << Ty; 17347 return true; 17348 } 17349 return false; 17350 } 17351 17352 bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) { 17353 if (checkArgCount(*this, TheCall, 1)) 17354 return true; 17355 17356 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 17357 if (A.isInvalid()) 17358 return true; 17359 17360 TheCall->setArg(0, A.get()); 17361 QualType TyA = A.get()->getType(); 17362 17363 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 17364 return true; 17365 17366 TheCall->setType(TyA); 17367 return false; 17368 } 17369 17370 bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) { 17371 if (checkArgCount(*this, TheCall, 2)) 17372 return true; 17373 17374 ExprResult A = TheCall->getArg(0); 17375 ExprResult B = TheCall->getArg(1); 17376 // Do standard promotions between the two arguments, returning their common 17377 // type. 17378 QualType Res = 17379 UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison); 17380 if (A.isInvalid() || B.isInvalid()) 17381 return true; 17382 17383 QualType TyA = A.get()->getType(); 17384 QualType TyB = B.get()->getType(); 17385 17386 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType()) 17387 return Diag(A.get()->getBeginLoc(), 17388 diag::err_typecheck_call_different_arg_types) 17389 << TyA << TyB; 17390 17391 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 17392 return true; 17393 17394 TheCall->setArg(0, A.get()); 17395 TheCall->setArg(1, B.get()); 17396 TheCall->setType(Res); 17397 return false; 17398 } 17399 17400 bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) { 17401 if (checkArgCount(*this, TheCall, 1)) 17402 return true; 17403 17404 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 17405 if (A.isInvalid()) 17406 return true; 17407 17408 TheCall->setArg(0, A.get()); 17409 return false; 17410 } 17411 17412 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 17413 ExprResult CallResult) { 17414 if (checkArgCount(*this, TheCall, 1)) 17415 return ExprError(); 17416 17417 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 17418 if (MatrixArg.isInvalid()) 17419 return MatrixArg; 17420 Expr *Matrix = MatrixArg.get(); 17421 17422 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 17423 if (!MType) { 17424 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17425 << 1 << /* matrix ty*/ 1 << Matrix->getType(); 17426 return ExprError(); 17427 } 17428 17429 // Create returned matrix type by swapping rows and columns of the argument 17430 // matrix type. 17431 QualType ResultType = Context.getConstantMatrixType( 17432 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 17433 17434 // Change the return type to the type of the returned matrix. 17435 TheCall->setType(ResultType); 17436 17437 // Update call argument to use the possibly converted matrix argument. 17438 TheCall->setArg(0, Matrix); 17439 return CallResult; 17440 } 17441 17442 // Get and verify the matrix dimensions. 17443 static llvm::Optional<unsigned> 17444 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 17445 SourceLocation ErrorPos; 17446 Optional<llvm::APSInt> Value = 17447 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 17448 if (!Value) { 17449 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 17450 << Name; 17451 return {}; 17452 } 17453 uint64_t Dim = Value->getZExtValue(); 17454 if (!ConstantMatrixType::isDimensionValid(Dim)) { 17455 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 17456 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 17457 return {}; 17458 } 17459 return Dim; 17460 } 17461 17462 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 17463 ExprResult CallResult) { 17464 if (!getLangOpts().MatrixTypes) { 17465 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 17466 return ExprError(); 17467 } 17468 17469 if (checkArgCount(*this, TheCall, 4)) 17470 return ExprError(); 17471 17472 unsigned PtrArgIdx = 0; 17473 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17474 Expr *RowsExpr = TheCall->getArg(1); 17475 Expr *ColumnsExpr = TheCall->getArg(2); 17476 Expr *StrideExpr = TheCall->getArg(3); 17477 17478 bool ArgError = false; 17479 17480 // Check pointer argument. 17481 { 17482 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17483 if (PtrConv.isInvalid()) 17484 return PtrConv; 17485 PtrExpr = PtrConv.get(); 17486 TheCall->setArg(0, PtrExpr); 17487 if (PtrExpr->isTypeDependent()) { 17488 TheCall->setType(Context.DependentTy); 17489 return TheCall; 17490 } 17491 } 17492 17493 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17494 QualType ElementTy; 17495 if (!PtrTy) { 17496 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17497 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17498 ArgError = true; 17499 } else { 17500 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 17501 17502 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 17503 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17504 << PtrArgIdx + 1 << /* pointer to element ty*/ 2 17505 << PtrExpr->getType(); 17506 ArgError = true; 17507 } 17508 } 17509 17510 // Apply default Lvalue conversions and convert the expression to size_t. 17511 auto ApplyArgumentConversions = [this](Expr *E) { 17512 ExprResult Conv = DefaultLvalueConversion(E); 17513 if (Conv.isInvalid()) 17514 return Conv; 17515 17516 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 17517 }; 17518 17519 // Apply conversion to row and column expressions. 17520 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 17521 if (!RowsConv.isInvalid()) { 17522 RowsExpr = RowsConv.get(); 17523 TheCall->setArg(1, RowsExpr); 17524 } else 17525 RowsExpr = nullptr; 17526 17527 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 17528 if (!ColumnsConv.isInvalid()) { 17529 ColumnsExpr = ColumnsConv.get(); 17530 TheCall->setArg(2, ColumnsExpr); 17531 } else 17532 ColumnsExpr = nullptr; 17533 17534 // If any any part of the result matrix type is still pending, just use 17535 // Context.DependentTy, until all parts are resolved. 17536 if ((RowsExpr && RowsExpr->isTypeDependent()) || 17537 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 17538 TheCall->setType(Context.DependentTy); 17539 return CallResult; 17540 } 17541 17542 // Check row and column dimensions. 17543 llvm::Optional<unsigned> MaybeRows; 17544 if (RowsExpr) 17545 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 17546 17547 llvm::Optional<unsigned> MaybeColumns; 17548 if (ColumnsExpr) 17549 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 17550 17551 // Check stride argument. 17552 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 17553 if (StrideConv.isInvalid()) 17554 return ExprError(); 17555 StrideExpr = StrideConv.get(); 17556 TheCall->setArg(3, StrideExpr); 17557 17558 if (MaybeRows) { 17559 if (Optional<llvm::APSInt> Value = 17560 StrideExpr->getIntegerConstantExpr(Context)) { 17561 uint64_t Stride = Value->getZExtValue(); 17562 if (Stride < *MaybeRows) { 17563 Diag(StrideExpr->getBeginLoc(), 17564 diag::err_builtin_matrix_stride_too_small); 17565 ArgError = true; 17566 } 17567 } 17568 } 17569 17570 if (ArgError || !MaybeRows || !MaybeColumns) 17571 return ExprError(); 17572 17573 TheCall->setType( 17574 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 17575 return CallResult; 17576 } 17577 17578 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 17579 ExprResult CallResult) { 17580 if (checkArgCount(*this, TheCall, 3)) 17581 return ExprError(); 17582 17583 unsigned PtrArgIdx = 1; 17584 Expr *MatrixExpr = TheCall->getArg(0); 17585 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17586 Expr *StrideExpr = TheCall->getArg(2); 17587 17588 bool ArgError = false; 17589 17590 { 17591 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 17592 if (MatrixConv.isInvalid()) 17593 return MatrixConv; 17594 MatrixExpr = MatrixConv.get(); 17595 TheCall->setArg(0, MatrixExpr); 17596 } 17597 if (MatrixExpr->isTypeDependent()) { 17598 TheCall->setType(Context.DependentTy); 17599 return TheCall; 17600 } 17601 17602 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 17603 if (!MatrixTy) { 17604 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17605 << 1 << /*matrix ty */ 1 << MatrixExpr->getType(); 17606 ArgError = true; 17607 } 17608 17609 { 17610 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17611 if (PtrConv.isInvalid()) 17612 return PtrConv; 17613 PtrExpr = PtrConv.get(); 17614 TheCall->setArg(1, PtrExpr); 17615 if (PtrExpr->isTypeDependent()) { 17616 TheCall->setType(Context.DependentTy); 17617 return TheCall; 17618 } 17619 } 17620 17621 // Check pointer argument. 17622 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17623 if (!PtrTy) { 17624 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17625 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17626 ArgError = true; 17627 } else { 17628 QualType ElementTy = PtrTy->getPointeeType(); 17629 if (ElementTy.isConstQualified()) { 17630 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 17631 ArgError = true; 17632 } 17633 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 17634 if (MatrixTy && 17635 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 17636 Diag(PtrExpr->getBeginLoc(), 17637 diag::err_builtin_matrix_pointer_arg_mismatch) 17638 << ElementTy << MatrixTy->getElementType(); 17639 ArgError = true; 17640 } 17641 } 17642 17643 // Apply default Lvalue conversions and convert the stride expression to 17644 // size_t. 17645 { 17646 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 17647 if (StrideConv.isInvalid()) 17648 return StrideConv; 17649 17650 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 17651 if (StrideConv.isInvalid()) 17652 return StrideConv; 17653 StrideExpr = StrideConv.get(); 17654 TheCall->setArg(2, StrideExpr); 17655 } 17656 17657 // Check stride argument. 17658 if (MatrixTy) { 17659 if (Optional<llvm::APSInt> Value = 17660 StrideExpr->getIntegerConstantExpr(Context)) { 17661 uint64_t Stride = Value->getZExtValue(); 17662 if (Stride < MatrixTy->getNumRows()) { 17663 Diag(StrideExpr->getBeginLoc(), 17664 diag::err_builtin_matrix_stride_too_small); 17665 ArgError = true; 17666 } 17667 } 17668 } 17669 17670 if (ArgError) 17671 return ExprError(); 17672 17673 return CallResult; 17674 } 17675 17676 /// \brief Enforce the bounds of a TCB 17677 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 17678 /// directly calls other functions in the same TCB as marked by the enforce_tcb 17679 /// and enforce_tcb_leaf attributes. 17680 void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc, 17681 const NamedDecl *Callee) { 17682 const NamedDecl *Caller = getCurFunctionOrMethodDecl(); 17683 17684 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>()) 17685 return; 17686 17687 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 17688 // all TCBs the callee is a part of. 17689 llvm::StringSet<> CalleeTCBs; 17690 for (const auto *A : Callee->specific_attrs<EnforceTCBAttr>()) 17691 CalleeTCBs.insert(A->getTCBName()); 17692 for (const auto *A : Callee->specific_attrs<EnforceTCBLeafAttr>()) 17693 CalleeTCBs.insert(A->getTCBName()); 17694 17695 // Go through the TCBs the caller is a part of and emit warnings if Caller 17696 // is in a TCB that the Callee is not. 17697 for (const auto *A : Caller->specific_attrs<EnforceTCBAttr>()) { 17698 StringRef CallerTCB = A->getTCBName(); 17699 if (CalleeTCBs.count(CallerTCB) == 0) { 17700 this->Diag(CallExprLoc, diag::warn_tcb_enforcement_violation) 17701 << Callee << CallerTCB; 17702 } 17703 } 17704 } 17705