1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSet.h" 79 #include "llvm/ADT/StringSwitch.h" 80 #include "llvm/ADT/Triple.h" 81 #include "llvm/Support/AtomicOrdering.h" 82 #include "llvm/Support/Casting.h" 83 #include "llvm/Support/Compiler.h" 84 #include "llvm/Support/ConvertUTF.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/Format.h" 87 #include "llvm/Support/Locale.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/SaveAndRestore.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include <algorithm> 92 #include <bitset> 93 #include <cassert> 94 #include <cctype> 95 #include <cstddef> 96 #include <cstdint> 97 #include <functional> 98 #include <limits> 99 #include <string> 100 #include <tuple> 101 #include <utility> 102 103 using namespace clang; 104 using namespace sema; 105 106 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 107 unsigned ByteNo) const { 108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 109 Context.getTargetInfo()); 110 } 111 112 /// Checks that a call expression's argument count is at least the desired 113 /// number. This is useful when doing custom type-checking on a variadic 114 /// function. Returns true on error. 115 static bool checkArgCountAtLeast(Sema &S, CallExpr *Call, 116 unsigned MinArgCount) { 117 unsigned ArgCount = Call->getNumArgs(); 118 if (ArgCount >= MinArgCount) 119 return false; 120 121 return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args) 122 << 0 /*function call*/ << MinArgCount << ArgCount 123 << Call->getSourceRange(); 124 } 125 126 /// Checks that a call expression's argument count is the desired number. 127 /// This is useful when doing custom type-checking. Returns true on error. 128 static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) { 129 unsigned ArgCount = Call->getNumArgs(); 130 if (ArgCount == DesiredArgCount) 131 return false; 132 133 if (checkArgCountAtLeast(S, Call, DesiredArgCount)) 134 return true; 135 assert(ArgCount > DesiredArgCount && "should have diagnosed this"); 136 137 // Highlight all the excess arguments. 138 SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(), 139 Call->getArg(ArgCount - 1)->getEndLoc()); 140 141 return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args) 142 << 0 /*function call*/ << DesiredArgCount << ArgCount 143 << Call->getArg(1)->getSourceRange(); 144 } 145 146 /// Check that the first argument to __builtin_annotation is an integer 147 /// and the second argument is a non-wide string literal. 148 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 149 if (checkArgCount(S, TheCall, 2)) 150 return true; 151 152 // First argument should be an integer. 153 Expr *ValArg = TheCall->getArg(0); 154 QualType Ty = ValArg->getType(); 155 if (!Ty->isIntegerType()) { 156 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 157 << ValArg->getSourceRange(); 158 return true; 159 } 160 161 // Second argument should be a constant string. 162 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 163 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 164 if (!Literal || !Literal->isAscii()) { 165 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 166 << StrArg->getSourceRange(); 167 return true; 168 } 169 170 TheCall->setType(Ty); 171 return false; 172 } 173 174 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 175 // We need at least one argument. 176 if (TheCall->getNumArgs() < 1) { 177 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 178 << 0 << 1 << TheCall->getNumArgs() 179 << TheCall->getCallee()->getSourceRange(); 180 return true; 181 } 182 183 // All arguments should be wide string literals. 184 for (Expr *Arg : TheCall->arguments()) { 185 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 186 if (!Literal || !Literal->isWide()) { 187 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 188 << Arg->getSourceRange(); 189 return true; 190 } 191 } 192 193 return false; 194 } 195 196 /// Check that the argument to __builtin_addressof is a glvalue, and set the 197 /// result type to the corresponding pointer type. 198 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 199 if (checkArgCount(S, TheCall, 1)) 200 return true; 201 202 ExprResult Arg(TheCall->getArg(0)); 203 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 204 if (ResultType.isNull()) 205 return true; 206 207 TheCall->setArg(0, Arg.get()); 208 TheCall->setType(ResultType); 209 return false; 210 } 211 212 /// Check that the argument to __builtin_function_start is a function. 213 static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) { 214 if (checkArgCount(S, TheCall, 1)) 215 return true; 216 217 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 218 if (Arg.isInvalid()) 219 return true; 220 221 TheCall->setArg(0, Arg.get()); 222 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>( 223 Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext())); 224 225 if (!FD) { 226 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type) 227 << TheCall->getSourceRange(); 228 return true; 229 } 230 231 return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true, 232 TheCall->getBeginLoc()); 233 } 234 235 /// Check the number of arguments and set the result type to 236 /// the argument type. 237 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 238 if (checkArgCount(S, TheCall, 1)) 239 return true; 240 241 TheCall->setType(TheCall->getArg(0)->getType()); 242 return false; 243 } 244 245 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 246 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 247 /// type (but not a function pointer) and that the alignment is a power-of-two. 248 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 249 if (checkArgCount(S, TheCall, 2)) 250 return true; 251 252 clang::Expr *Source = TheCall->getArg(0); 253 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 254 255 auto IsValidIntegerType = [](QualType Ty) { 256 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 257 }; 258 QualType SrcTy = Source->getType(); 259 // We should also be able to use it with arrays (but not functions!). 260 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 261 SrcTy = S.Context.getDecayedType(SrcTy); 262 } 263 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 264 SrcTy->isFunctionPointerType()) { 265 // FIXME: this is not quite the right error message since we don't allow 266 // floating point types, or member pointers. 267 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 268 << SrcTy; 269 return true; 270 } 271 272 clang::Expr *AlignOp = TheCall->getArg(1); 273 if (!IsValidIntegerType(AlignOp->getType())) { 274 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 275 << AlignOp->getType(); 276 return true; 277 } 278 Expr::EvalResult AlignResult; 279 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 280 // We can't check validity of alignment if it is value dependent. 281 if (!AlignOp->isValueDependent() && 282 AlignOp->EvaluateAsInt(AlignResult, S.Context, 283 Expr::SE_AllowSideEffects)) { 284 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 285 llvm::APSInt MaxValue( 286 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 287 if (AlignValue < 1) { 288 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 289 return true; 290 } 291 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 292 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 293 << toString(MaxValue, 10); 294 return true; 295 } 296 if (!AlignValue.isPowerOf2()) { 297 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 298 return true; 299 } 300 if (AlignValue == 1) { 301 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 302 << IsBooleanAlignBuiltin; 303 } 304 } 305 306 ExprResult SrcArg = S.PerformCopyInitialization( 307 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 308 SourceLocation(), Source); 309 if (SrcArg.isInvalid()) 310 return true; 311 TheCall->setArg(0, SrcArg.get()); 312 ExprResult AlignArg = 313 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 314 S.Context, AlignOp->getType(), false), 315 SourceLocation(), AlignOp); 316 if (AlignArg.isInvalid()) 317 return true; 318 TheCall->setArg(1, AlignArg.get()); 319 // For align_up/align_down, the return type is the same as the (potentially 320 // decayed) argument type including qualifiers. For is_aligned(), the result 321 // is always bool. 322 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 323 return false; 324 } 325 326 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 327 unsigned BuiltinID) { 328 if (checkArgCount(S, TheCall, 3)) 329 return true; 330 331 // First two arguments should be integers. 332 for (unsigned I = 0; I < 2; ++I) { 333 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 334 if (Arg.isInvalid()) return true; 335 TheCall->setArg(I, Arg.get()); 336 337 QualType Ty = Arg.get()->getType(); 338 if (!Ty->isIntegerType()) { 339 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 340 << Ty << Arg.get()->getSourceRange(); 341 return true; 342 } 343 } 344 345 // Third argument should be a pointer to a non-const integer. 346 // IRGen correctly handles volatile, restrict, and address spaces, and 347 // the other qualifiers aren't possible. 348 { 349 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 350 if (Arg.isInvalid()) return true; 351 TheCall->setArg(2, Arg.get()); 352 353 QualType Ty = Arg.get()->getType(); 354 const auto *PtrTy = Ty->getAs<PointerType>(); 355 if (!PtrTy || 356 !PtrTy->getPointeeType()->isIntegerType() || 357 PtrTy->getPointeeType().isConstQualified()) { 358 S.Diag(Arg.get()->getBeginLoc(), 359 diag::err_overflow_builtin_must_be_ptr_int) 360 << Ty << Arg.get()->getSourceRange(); 361 return true; 362 } 363 } 364 365 // Disallow signed bit-precise integer args larger than 128 bits to mul 366 // function until we improve backend support. 367 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 368 for (unsigned I = 0; I < 3; ++I) { 369 const auto Arg = TheCall->getArg(I); 370 // Third argument will be a pointer. 371 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 372 if (Ty->isBitIntType() && Ty->isSignedIntegerType() && 373 S.getASTContext().getIntWidth(Ty) > 128) 374 return S.Diag(Arg->getBeginLoc(), 375 diag::err_overflow_builtin_bit_int_max_size) 376 << 128; 377 } 378 } 379 380 return false; 381 } 382 383 namespace { 384 struct BuiltinDumpStructGenerator { 385 Sema &S; 386 CallExpr *TheCall; 387 SourceLocation Loc = TheCall->getBeginLoc(); 388 SmallVector<Expr *, 32> Actions; 389 DiagnosticErrorTrap ErrorTracker; 390 PrintingPolicy Policy; 391 392 BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall) 393 : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()), 394 Policy(S.Context.getPrintingPolicy()) { 395 Policy.AnonymousTagLocations = false; 396 } 397 398 Expr *makeOpaqueValueExpr(Expr *Inner) { 399 auto *OVE = new (S.Context) 400 OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(), 401 Inner->getObjectKind(), Inner); 402 Actions.push_back(OVE); 403 return OVE; 404 } 405 406 Expr *getStringLiteral(llvm::StringRef Str) { 407 Expr *Lit = S.Context.getPredefinedStringLiteralFromCache(Str); 408 // Wrap the literal in parentheses to attach a source location. 409 return new (S.Context) ParenExpr(Loc, Loc, Lit); 410 } 411 412 bool callPrintFunction(llvm::StringRef Format, 413 llvm::ArrayRef<Expr *> Exprs = {}) { 414 SmallVector<Expr *, 8> Args; 415 assert(TheCall->getNumArgs() >= 2); 416 Args.reserve((TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size()); 417 Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end()); 418 Args.push_back(getStringLiteral(Format)); 419 Args.insert(Args.end(), Exprs.begin(), Exprs.end()); 420 421 // Register a note to explain why we're performing the call. 422 Sema::CodeSynthesisContext Ctx; 423 Ctx.Kind = Sema::CodeSynthesisContext::BuildingBuiltinDumpStructCall; 424 Ctx.PointOfInstantiation = Loc; 425 Ctx.CallArgs = Args.data(); 426 Ctx.NumCallArgs = Args.size(); 427 S.pushCodeSynthesisContext(Ctx); 428 429 ExprResult RealCall = 430 S.BuildCallExpr(/*Scope=*/nullptr, TheCall->getArg(1), 431 TheCall->getBeginLoc(), Args, TheCall->getRParenLoc()); 432 433 S.popCodeSynthesisContext(); 434 if (!RealCall.isInvalid()) 435 Actions.push_back(RealCall.get()); 436 // Bail out if we've hit any errors, even if we managed to build the 437 // call. We don't want to produce more than one error. 438 return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred(); 439 } 440 441 Expr *getIndentString(unsigned Depth) { 442 if (!Depth) 443 return nullptr; 444 445 llvm::SmallString<32> Indent; 446 Indent.resize(Depth * Policy.Indentation, ' '); 447 return getStringLiteral(Indent); 448 } 449 450 Expr *getTypeString(QualType T) { 451 return getStringLiteral(T.getAsString(Policy)); 452 } 453 454 bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) { 455 llvm::raw_svector_ostream OS(Str); 456 457 // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather 458 // than trying to print a single character. 459 if (auto *BT = T->getAs<BuiltinType>()) { 460 switch (BT->getKind()) { 461 case BuiltinType::Bool: 462 OS << "%d"; 463 return true; 464 case BuiltinType::Char_U: 465 case BuiltinType::UChar: 466 OS << "%hhu"; 467 return true; 468 case BuiltinType::Char_S: 469 case BuiltinType::SChar: 470 OS << "%hhd"; 471 return true; 472 default: 473 break; 474 } 475 } 476 477 analyze_printf::PrintfSpecifier Specifier; 478 if (Specifier.fixType(T, S.getLangOpts(), S.Context, /*IsObjCLiteral=*/false)) { 479 // We were able to guess how to format this. 480 if (Specifier.getConversionSpecifier().getKind() == 481 analyze_printf::PrintfConversionSpecifier::sArg) { 482 // Wrap double-quotes around a '%s' specifier and limit its maximum 483 // length. Ideally we'd also somehow escape special characters in the 484 // contents but printf doesn't support that. 485 // FIXME: '%s' formatting is not safe in general. 486 OS << '"'; 487 Specifier.setPrecision(analyze_printf::OptionalAmount(32u)); 488 Specifier.toString(OS); 489 OS << '"'; 490 // FIXME: It would be nice to include a '...' if the string doesn't fit 491 // in the length limit. 492 } else { 493 Specifier.toString(OS); 494 } 495 return true; 496 } 497 498 if (T->isPointerType()) { 499 // Format all pointers with '%p'. 500 OS << "%p"; 501 return true; 502 } 503 504 return false; 505 } 506 507 bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) { 508 Expr *IndentLit = getIndentString(Depth); 509 Expr *TypeLit = getTypeString(S.Context.getRecordType(RD)); 510 if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit}) 511 : callPrintFunction("%s", {TypeLit})) 512 return true; 513 514 return dumpRecordValue(RD, E, IndentLit, Depth); 515 } 516 517 // Dump a record value. E should be a pointer or lvalue referring to an RD. 518 bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent, 519 unsigned Depth) { 520 // FIXME: Decide what to do if RD is a union. At least we should probably 521 // turn off printing `const char*` members with `%s`, because that is very 522 // likely to crash if that's not the active member. Whatever we decide, we 523 // should document it. 524 525 // Build an OpaqueValueExpr so we can refer to E more than once without 526 // triggering re-evaluation. 527 Expr *RecordArg = makeOpaqueValueExpr(E); 528 bool RecordArgIsPtr = RecordArg->getType()->isPointerType(); 529 530 if (callPrintFunction(" {\n")) 531 return true; 532 533 // Dump each base class, regardless of whether they're aggregates. 534 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 535 for (const auto &Base : CXXRD->bases()) { 536 QualType BaseType = 537 RecordArgIsPtr ? S.Context.getPointerType(Base.getType()) 538 : S.Context.getLValueReferenceType(Base.getType()); 539 ExprResult BasePtr = S.BuildCStyleCastExpr( 540 Loc, S.Context.getTrivialTypeSourceInfo(BaseType, Loc), Loc, 541 RecordArg); 542 if (BasePtr.isInvalid() || 543 dumpUnnamedRecord(Base.getType()->getAsRecordDecl(), BasePtr.get(), 544 Depth + 1)) 545 return true; 546 } 547 } 548 549 Expr *FieldIndentArg = getIndentString(Depth + 1); 550 551 // Dump each field. 552 for (auto *D : RD->decls()) { 553 auto *IFD = dyn_cast<IndirectFieldDecl>(D); 554 auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D); 555 if (!FD || FD->isUnnamedBitfield() || FD->isAnonymousStructOrUnion()) 556 continue; 557 558 llvm::SmallString<20> Format = llvm::StringRef("%s%s %s "); 559 llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg, 560 getTypeString(FD->getType()), 561 getStringLiteral(FD->getName())}; 562 563 if (FD->isBitField()) { 564 Format += ": %zu "; 565 QualType SizeT = S.Context.getSizeType(); 566 llvm::APInt BitWidth(S.Context.getIntWidth(SizeT), 567 FD->getBitWidthValue(S.Context)); 568 Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc)); 569 } 570 571 Format += "="; 572 573 ExprResult Field = 574 IFD ? S.BuildAnonymousStructUnionMemberReference( 575 CXXScopeSpec(), Loc, IFD, 576 DeclAccessPair::make(IFD, AS_public), RecordArg, Loc) 577 : S.BuildFieldReferenceExpr( 578 RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD, 579 DeclAccessPair::make(FD, AS_public), 580 DeclarationNameInfo(FD->getDeclName(), Loc)); 581 if (Field.isInvalid()) 582 return true; 583 584 auto *InnerRD = FD->getType()->getAsRecordDecl(); 585 auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD); 586 if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) { 587 // Recursively print the values of members of aggregate record type. 588 if (callPrintFunction(Format, Args) || 589 dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1)) 590 return true; 591 } else { 592 Format += " "; 593 if (appendFormatSpecifier(FD->getType(), Format)) { 594 // We know how to print this field. 595 Args.push_back(Field.get()); 596 } else { 597 // We don't know how to print this field. Print out its address 598 // with a format specifier that a smart tool will be able to 599 // recognize and treat specially. 600 Format += "*%p"; 601 ExprResult FieldAddr = 602 S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get()); 603 if (FieldAddr.isInvalid()) 604 return true; 605 Args.push_back(FieldAddr.get()); 606 } 607 Format += "\n"; 608 if (callPrintFunction(Format, Args)) 609 return true; 610 } 611 } 612 613 return RecordIndent ? callPrintFunction("%s}\n", RecordIndent) 614 : callPrintFunction("}\n"); 615 } 616 617 Expr *buildWrapper() { 618 auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions, 619 PseudoObjectExpr::NoResult); 620 TheCall->setType(Wrapper->getType()); 621 TheCall->setValueKind(Wrapper->getValueKind()); 622 return Wrapper; 623 } 624 }; 625 } // namespace 626 627 static ExprResult SemaBuiltinDumpStruct(Sema &S, CallExpr *TheCall) { 628 if (checkArgCountAtLeast(S, TheCall, 2)) 629 return ExprError(); 630 631 ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0)); 632 if (PtrArgResult.isInvalid()) 633 return ExprError(); 634 TheCall->setArg(0, PtrArgResult.get()); 635 636 // First argument should be a pointer to a struct. 637 QualType PtrArgType = PtrArgResult.get()->getType(); 638 if (!PtrArgType->isPointerType() || 639 !PtrArgType->getPointeeType()->isRecordType()) { 640 S.Diag(PtrArgResult.get()->getBeginLoc(), 641 diag::err_expected_struct_pointer_argument) 642 << 1 << TheCall->getDirectCallee() << PtrArgType; 643 return ExprError(); 644 } 645 const RecordDecl *RD = PtrArgType->getPointeeType()->getAsRecordDecl(); 646 647 // Second argument is a callable, but we can't fully validate it until we try 648 // calling it. 649 QualType FnArgType = TheCall->getArg(1)->getType(); 650 if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() && 651 !FnArgType->isBlockPointerType() && 652 !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) { 653 auto *BT = FnArgType->getAs<BuiltinType>(); 654 switch (BT ? BT->getKind() : BuiltinType::Void) { 655 case BuiltinType::Dependent: 656 case BuiltinType::Overload: 657 case BuiltinType::BoundMember: 658 case BuiltinType::PseudoObject: 659 case BuiltinType::UnknownAny: 660 case BuiltinType::BuiltinFn: 661 // This might be a callable. 662 break; 663 664 default: 665 S.Diag(TheCall->getArg(1)->getBeginLoc(), 666 diag::err_expected_callable_argument) 667 << 2 << TheCall->getDirectCallee() << FnArgType; 668 return ExprError(); 669 } 670 } 671 672 BuiltinDumpStructGenerator Generator(S, TheCall); 673 674 // Wrap parentheses around the given pointer. This is not necessary for 675 // correct code generation, but it means that when we pretty-print the call 676 // arguments in our diagnostics we will produce '(&s)->n' instead of the 677 // incorrect '&s->n'. 678 Expr *PtrArg = PtrArgResult.get(); 679 PtrArg = new (S.Context) 680 ParenExpr(PtrArg->getBeginLoc(), 681 S.getLocForEndOfToken(PtrArg->getEndLoc()), PtrArg); 682 if (Generator.dumpUnnamedRecord(RD, PtrArg, 0)) 683 return ExprError(); 684 685 return Generator.buildWrapper(); 686 } 687 688 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 689 if (checkArgCount(S, BuiltinCall, 2)) 690 return true; 691 692 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 693 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 694 Expr *Call = BuiltinCall->getArg(0); 695 Expr *Chain = BuiltinCall->getArg(1); 696 697 if (Call->getStmtClass() != Stmt::CallExprClass) { 698 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 699 << Call->getSourceRange(); 700 return true; 701 } 702 703 auto CE = cast<CallExpr>(Call); 704 if (CE->getCallee()->getType()->isBlockPointerType()) { 705 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 706 << Call->getSourceRange(); 707 return true; 708 } 709 710 const Decl *TargetDecl = CE->getCalleeDecl(); 711 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 712 if (FD->getBuiltinID()) { 713 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 714 << Call->getSourceRange(); 715 return true; 716 } 717 718 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 719 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 720 << Call->getSourceRange(); 721 return true; 722 } 723 724 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 725 if (ChainResult.isInvalid()) 726 return true; 727 if (!ChainResult.get()->getType()->isPointerType()) { 728 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 729 << Chain->getSourceRange(); 730 return true; 731 } 732 733 QualType ReturnTy = CE->getCallReturnType(S.Context); 734 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 735 QualType BuiltinTy = S.Context.getFunctionType( 736 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 737 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 738 739 Builtin = 740 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 741 742 BuiltinCall->setType(CE->getType()); 743 BuiltinCall->setValueKind(CE->getValueKind()); 744 BuiltinCall->setObjectKind(CE->getObjectKind()); 745 BuiltinCall->setCallee(Builtin); 746 BuiltinCall->setArg(1, ChainResult.get()); 747 748 return false; 749 } 750 751 namespace { 752 753 class ScanfDiagnosticFormatHandler 754 : public analyze_format_string::FormatStringHandler { 755 // Accepts the argument index (relative to the first destination index) of the 756 // argument whose size we want. 757 using ComputeSizeFunction = 758 llvm::function_ref<Optional<llvm::APSInt>(unsigned)>; 759 760 // Accepts the argument index (relative to the first destination index), the 761 // destination size, and the source size). 762 using DiagnoseFunction = 763 llvm::function_ref<void(unsigned, unsigned, unsigned)>; 764 765 ComputeSizeFunction ComputeSizeArgument; 766 DiagnoseFunction Diagnose; 767 768 public: 769 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument, 770 DiagnoseFunction Diagnose) 771 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {} 772 773 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 774 const char *StartSpecifier, 775 unsigned specifierLen) override { 776 if (!FS.consumesDataArgument()) 777 return true; 778 779 unsigned NulByte = 0; 780 switch ((FS.getConversionSpecifier().getKind())) { 781 default: 782 return true; 783 case analyze_format_string::ConversionSpecifier::sArg: 784 case analyze_format_string::ConversionSpecifier::ScanListArg: 785 NulByte = 1; 786 break; 787 case analyze_format_string::ConversionSpecifier::cArg: 788 break; 789 } 790 791 analyze_format_string::OptionalAmount FW = FS.getFieldWidth(); 792 if (FW.getHowSpecified() != 793 analyze_format_string::OptionalAmount::HowSpecified::Constant) 794 return true; 795 796 unsigned SourceSize = FW.getConstantAmount() + NulByte; 797 798 Optional<llvm::APSInt> DestSizeAPS = ComputeSizeArgument(FS.getArgIndex()); 799 if (!DestSizeAPS) 800 return true; 801 802 unsigned DestSize = DestSizeAPS->getZExtValue(); 803 804 if (DestSize < SourceSize) 805 Diagnose(FS.getArgIndex(), DestSize, SourceSize); 806 807 return true; 808 } 809 }; 810 811 class EstimateSizeFormatHandler 812 : public analyze_format_string::FormatStringHandler { 813 size_t Size; 814 815 public: 816 EstimateSizeFormatHandler(StringRef Format) 817 : Size(std::min(Format.find(0), Format.size()) + 818 1 /* null byte always written by sprintf */) {} 819 820 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 821 const char *, unsigned SpecifierLen, 822 const TargetInfo &) override { 823 824 const size_t FieldWidth = computeFieldWidth(FS); 825 const size_t Precision = computePrecision(FS); 826 827 // The actual format. 828 switch (FS.getConversionSpecifier().getKind()) { 829 // Just a char. 830 case analyze_format_string::ConversionSpecifier::cArg: 831 case analyze_format_string::ConversionSpecifier::CArg: 832 Size += std::max(FieldWidth, (size_t)1); 833 break; 834 // Just an integer. 835 case analyze_format_string::ConversionSpecifier::dArg: 836 case analyze_format_string::ConversionSpecifier::DArg: 837 case analyze_format_string::ConversionSpecifier::iArg: 838 case analyze_format_string::ConversionSpecifier::oArg: 839 case analyze_format_string::ConversionSpecifier::OArg: 840 case analyze_format_string::ConversionSpecifier::uArg: 841 case analyze_format_string::ConversionSpecifier::UArg: 842 case analyze_format_string::ConversionSpecifier::xArg: 843 case analyze_format_string::ConversionSpecifier::XArg: 844 Size += std::max(FieldWidth, Precision); 845 break; 846 847 // %g style conversion switches between %f or %e style dynamically. 848 // %f always takes less space, so default to it. 849 case analyze_format_string::ConversionSpecifier::gArg: 850 case analyze_format_string::ConversionSpecifier::GArg: 851 852 // Floating point number in the form '[+]ddd.ddd'. 853 case analyze_format_string::ConversionSpecifier::fArg: 854 case analyze_format_string::ConversionSpecifier::FArg: 855 Size += std::max(FieldWidth, 1 /* integer part */ + 856 (Precision ? 1 + Precision 857 : 0) /* period + decimal */); 858 break; 859 860 // Floating point number in the form '[-]d.ddde[+-]dd'. 861 case analyze_format_string::ConversionSpecifier::eArg: 862 case analyze_format_string::ConversionSpecifier::EArg: 863 Size += 864 std::max(FieldWidth, 865 1 /* integer part */ + 866 (Precision ? 1 + Precision : 0) /* period + decimal */ + 867 1 /* e or E letter */ + 2 /* exponent */); 868 break; 869 870 // Floating point number in the form '[-]0xh.hhhhp±dd'. 871 case analyze_format_string::ConversionSpecifier::aArg: 872 case analyze_format_string::ConversionSpecifier::AArg: 873 Size += 874 std::max(FieldWidth, 875 2 /* 0x */ + 1 /* integer part */ + 876 (Precision ? 1 + Precision : 0) /* period + decimal */ + 877 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 878 break; 879 880 // Just a string. 881 case analyze_format_string::ConversionSpecifier::sArg: 882 case analyze_format_string::ConversionSpecifier::SArg: 883 Size += FieldWidth; 884 break; 885 886 // Just a pointer in the form '0xddd'. 887 case analyze_format_string::ConversionSpecifier::pArg: 888 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 889 break; 890 891 // A plain percent. 892 case analyze_format_string::ConversionSpecifier::PercentArg: 893 Size += 1; 894 break; 895 896 default: 897 break; 898 } 899 900 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 901 902 if (FS.hasAlternativeForm()) { 903 switch (FS.getConversionSpecifier().getKind()) { 904 default: 905 break; 906 // Force a leading '0'. 907 case analyze_format_string::ConversionSpecifier::oArg: 908 Size += 1; 909 break; 910 // Force a leading '0x'. 911 case analyze_format_string::ConversionSpecifier::xArg: 912 case analyze_format_string::ConversionSpecifier::XArg: 913 Size += 2; 914 break; 915 // Force a period '.' before decimal, even if precision is 0. 916 case analyze_format_string::ConversionSpecifier::aArg: 917 case analyze_format_string::ConversionSpecifier::AArg: 918 case analyze_format_string::ConversionSpecifier::eArg: 919 case analyze_format_string::ConversionSpecifier::EArg: 920 case analyze_format_string::ConversionSpecifier::fArg: 921 case analyze_format_string::ConversionSpecifier::FArg: 922 case analyze_format_string::ConversionSpecifier::gArg: 923 case analyze_format_string::ConversionSpecifier::GArg: 924 Size += (Precision ? 0 : 1); 925 break; 926 } 927 } 928 assert(SpecifierLen <= Size && "no underflow"); 929 Size -= SpecifierLen; 930 return true; 931 } 932 933 size_t getSizeLowerBound() const { return Size; } 934 935 private: 936 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 937 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 938 size_t FieldWidth = 0; 939 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 940 FieldWidth = FW.getConstantAmount(); 941 return FieldWidth; 942 } 943 944 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 945 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 946 size_t Precision = 0; 947 948 // See man 3 printf for default precision value based on the specifier. 949 switch (FW.getHowSpecified()) { 950 case analyze_format_string::OptionalAmount::NotSpecified: 951 switch (FS.getConversionSpecifier().getKind()) { 952 default: 953 break; 954 case analyze_format_string::ConversionSpecifier::dArg: // %d 955 case analyze_format_string::ConversionSpecifier::DArg: // %D 956 case analyze_format_string::ConversionSpecifier::iArg: // %i 957 Precision = 1; 958 break; 959 case analyze_format_string::ConversionSpecifier::oArg: // %d 960 case analyze_format_string::ConversionSpecifier::OArg: // %D 961 case analyze_format_string::ConversionSpecifier::uArg: // %d 962 case analyze_format_string::ConversionSpecifier::UArg: // %D 963 case analyze_format_string::ConversionSpecifier::xArg: // %d 964 case analyze_format_string::ConversionSpecifier::XArg: // %D 965 Precision = 1; 966 break; 967 case analyze_format_string::ConversionSpecifier::fArg: // %f 968 case analyze_format_string::ConversionSpecifier::FArg: // %F 969 case analyze_format_string::ConversionSpecifier::eArg: // %e 970 case analyze_format_string::ConversionSpecifier::EArg: // %E 971 case analyze_format_string::ConversionSpecifier::gArg: // %g 972 case analyze_format_string::ConversionSpecifier::GArg: // %G 973 Precision = 6; 974 break; 975 case analyze_format_string::ConversionSpecifier::pArg: // %d 976 Precision = 1; 977 break; 978 } 979 break; 980 case analyze_format_string::OptionalAmount::Constant: 981 Precision = FW.getConstantAmount(); 982 break; 983 default: 984 break; 985 } 986 return Precision; 987 } 988 }; 989 990 } // namespace 991 992 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 993 CallExpr *TheCall) { 994 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 995 isConstantEvaluated()) 996 return; 997 998 bool UseDABAttr = false; 999 const FunctionDecl *UseDecl = FD; 1000 1001 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>(); 1002 if (DABAttr) { 1003 UseDecl = DABAttr->getFunction(); 1004 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!"); 1005 UseDABAttr = true; 1006 } 1007 1008 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true); 1009 1010 if (!BuiltinID) 1011 return; 1012 1013 const TargetInfo &TI = getASTContext().getTargetInfo(); 1014 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 1015 1016 auto TranslateIndex = [&](unsigned Index) -> Optional<unsigned> { 1017 // If we refer to a diagnose_as_builtin attribute, we need to change the 1018 // argument index to refer to the arguments of the called function. Unless 1019 // the index is out of bounds, which presumably means it's a variadic 1020 // function. 1021 if (!UseDABAttr) 1022 return Index; 1023 unsigned DABIndices = DABAttr->argIndices_size(); 1024 unsigned NewIndex = Index < DABIndices 1025 ? DABAttr->argIndices_begin()[Index] 1026 : Index - DABIndices + FD->getNumParams(); 1027 if (NewIndex >= TheCall->getNumArgs()) 1028 return llvm::None; 1029 return NewIndex; 1030 }; 1031 1032 auto ComputeExplicitObjectSizeArgument = 1033 [&](unsigned Index) -> Optional<llvm::APSInt> { 1034 Optional<unsigned> IndexOptional = TranslateIndex(Index); 1035 if (!IndexOptional) 1036 return llvm::None; 1037 unsigned NewIndex = IndexOptional.getValue(); 1038 Expr::EvalResult Result; 1039 Expr *SizeArg = TheCall->getArg(NewIndex); 1040 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 1041 return llvm::None; 1042 llvm::APSInt Integer = Result.Val.getInt(); 1043 Integer.setIsUnsigned(true); 1044 return Integer; 1045 }; 1046 1047 auto ComputeSizeArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 1048 // If the parameter has a pass_object_size attribute, then we should use its 1049 // (potentially) more strict checking mode. Otherwise, conservatively assume 1050 // type 0. 1051 int BOSType = 0; 1052 // This check can fail for variadic functions. 1053 if (Index < FD->getNumParams()) { 1054 if (const auto *POS = 1055 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>()) 1056 BOSType = POS->getType(); 1057 } 1058 1059 Optional<unsigned> IndexOptional = TranslateIndex(Index); 1060 if (!IndexOptional) 1061 return llvm::None; 1062 unsigned NewIndex = IndexOptional.getValue(); 1063 1064 const Expr *ObjArg = TheCall->getArg(NewIndex); 1065 uint64_t Result; 1066 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 1067 return llvm::None; 1068 1069 // Get the object size in the target's size_t width. 1070 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 1071 }; 1072 1073 auto ComputeStrLenArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 1074 Optional<unsigned> IndexOptional = TranslateIndex(Index); 1075 if (!IndexOptional) 1076 return llvm::None; 1077 unsigned NewIndex = IndexOptional.getValue(); 1078 1079 const Expr *ObjArg = TheCall->getArg(NewIndex); 1080 uint64_t Result; 1081 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext())) 1082 return llvm::None; 1083 // Add 1 for null byte. 1084 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth); 1085 }; 1086 1087 Optional<llvm::APSInt> SourceSize; 1088 Optional<llvm::APSInt> DestinationSize; 1089 unsigned DiagID = 0; 1090 bool IsChkVariant = false; 1091 1092 auto GetFunctionName = [&]() { 1093 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 1094 // Skim off the details of whichever builtin was called to produce a better 1095 // diagnostic, as it's unlikely that the user wrote the __builtin 1096 // explicitly. 1097 if (IsChkVariant) { 1098 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 1099 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 1100 } else if (FunctionName.startswith("__builtin_")) { 1101 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 1102 } 1103 return FunctionName; 1104 }; 1105 1106 switch (BuiltinID) { 1107 default: 1108 return; 1109 case Builtin::BI__builtin_strcpy: 1110 case Builtin::BIstrcpy: { 1111 DiagID = diag::warn_fortify_strlen_overflow; 1112 SourceSize = ComputeStrLenArgument(1); 1113 DestinationSize = ComputeSizeArgument(0); 1114 break; 1115 } 1116 1117 case Builtin::BI__builtin___strcpy_chk: { 1118 DiagID = diag::warn_fortify_strlen_overflow; 1119 SourceSize = ComputeStrLenArgument(1); 1120 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1121 IsChkVariant = true; 1122 break; 1123 } 1124 1125 case Builtin::BIscanf: 1126 case Builtin::BIfscanf: 1127 case Builtin::BIsscanf: { 1128 unsigned FormatIndex = 1; 1129 unsigned DataIndex = 2; 1130 if (BuiltinID == Builtin::BIscanf) { 1131 FormatIndex = 0; 1132 DataIndex = 1; 1133 } 1134 1135 const auto *FormatExpr = 1136 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1137 1138 const auto *Format = dyn_cast<StringLiteral>(FormatExpr); 1139 if (!Format) 1140 return; 1141 1142 if (!Format->isAscii() && !Format->isUTF8()) 1143 return; 1144 1145 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize, 1146 unsigned SourceSize) { 1147 DiagID = diag::warn_fortify_scanf_overflow; 1148 unsigned Index = ArgIndex + DataIndex; 1149 StringRef FunctionName = GetFunctionName(); 1150 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall, 1151 PDiag(DiagID) << FunctionName << (Index + 1) 1152 << DestSize << SourceSize); 1153 }; 1154 1155 StringRef FormatStrRef = Format->getString(); 1156 auto ShiftedComputeSizeArgument = [&](unsigned Index) { 1157 return ComputeSizeArgument(Index + DataIndex); 1158 }; 1159 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose); 1160 const char *FormatBytes = FormatStrRef.data(); 1161 const ConstantArrayType *T = 1162 Context.getAsConstantArrayType(Format->getType()); 1163 assert(T && "String literal not of constant array type!"); 1164 size_t TypeSize = T->getSize().getZExtValue(); 1165 1166 // In case there's a null byte somewhere. 1167 size_t StrLen = 1168 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1169 1170 analyze_format_string::ParseScanfString(H, FormatBytes, 1171 FormatBytes + StrLen, getLangOpts(), 1172 Context.getTargetInfo()); 1173 1174 // Unlike the other cases, in this one we have already issued the diagnostic 1175 // here, so no need to continue (because unlike the other cases, here the 1176 // diagnostic refers to the argument number). 1177 return; 1178 } 1179 1180 case Builtin::BIsprintf: 1181 case Builtin::BI__builtin___sprintf_chk: { 1182 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 1183 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1184 1185 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 1186 1187 if (!Format->isAscii() && !Format->isUTF8()) 1188 return; 1189 1190 StringRef FormatStrRef = Format->getString(); 1191 EstimateSizeFormatHandler H(FormatStrRef); 1192 const char *FormatBytes = FormatStrRef.data(); 1193 const ConstantArrayType *T = 1194 Context.getAsConstantArrayType(Format->getType()); 1195 assert(T && "String literal not of constant array type!"); 1196 size_t TypeSize = T->getSize().getZExtValue(); 1197 1198 // In case there's a null byte somewhere. 1199 size_t StrLen = 1200 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1201 if (!analyze_format_string::ParsePrintfString( 1202 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 1203 Context.getTargetInfo(), false)) { 1204 DiagID = diag::warn_fortify_source_format_overflow; 1205 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 1206 .extOrTrunc(SizeTypeWidth); 1207 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 1208 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1209 IsChkVariant = true; 1210 } else { 1211 DestinationSize = ComputeSizeArgument(0); 1212 } 1213 break; 1214 } 1215 } 1216 return; 1217 } 1218 case Builtin::BI__builtin___memcpy_chk: 1219 case Builtin::BI__builtin___memmove_chk: 1220 case Builtin::BI__builtin___memset_chk: 1221 case Builtin::BI__builtin___strlcat_chk: 1222 case Builtin::BI__builtin___strlcpy_chk: 1223 case Builtin::BI__builtin___strncat_chk: 1224 case Builtin::BI__builtin___strncpy_chk: 1225 case Builtin::BI__builtin___stpncpy_chk: 1226 case Builtin::BI__builtin___memccpy_chk: 1227 case Builtin::BI__builtin___mempcpy_chk: { 1228 DiagID = diag::warn_builtin_chk_overflow; 1229 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2); 1230 DestinationSize = 1231 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1232 IsChkVariant = true; 1233 break; 1234 } 1235 1236 case Builtin::BI__builtin___snprintf_chk: 1237 case Builtin::BI__builtin___vsnprintf_chk: { 1238 DiagID = diag::warn_builtin_chk_overflow; 1239 SourceSize = ComputeExplicitObjectSizeArgument(1); 1240 DestinationSize = ComputeExplicitObjectSizeArgument(3); 1241 IsChkVariant = true; 1242 break; 1243 } 1244 1245 case Builtin::BIstrncat: 1246 case Builtin::BI__builtin_strncat: 1247 case Builtin::BIstrncpy: 1248 case Builtin::BI__builtin_strncpy: 1249 case Builtin::BIstpncpy: 1250 case Builtin::BI__builtin_stpncpy: { 1251 // Whether these functions overflow depends on the runtime strlen of the 1252 // string, not just the buffer size, so emitting the "always overflow" 1253 // diagnostic isn't quite right. We should still diagnose passing a buffer 1254 // size larger than the destination buffer though; this is a runtime abort 1255 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 1256 DiagID = diag::warn_fortify_source_size_mismatch; 1257 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1258 DestinationSize = ComputeSizeArgument(0); 1259 break; 1260 } 1261 1262 case Builtin::BImemcpy: 1263 case Builtin::BI__builtin_memcpy: 1264 case Builtin::BImemmove: 1265 case Builtin::BI__builtin_memmove: 1266 case Builtin::BImemset: 1267 case Builtin::BI__builtin_memset: 1268 case Builtin::BImempcpy: 1269 case Builtin::BI__builtin_mempcpy: { 1270 DiagID = diag::warn_fortify_source_overflow; 1271 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1272 DestinationSize = ComputeSizeArgument(0); 1273 break; 1274 } 1275 case Builtin::BIsnprintf: 1276 case Builtin::BI__builtin_snprintf: 1277 case Builtin::BIvsnprintf: 1278 case Builtin::BI__builtin_vsnprintf: { 1279 DiagID = diag::warn_fortify_source_size_mismatch; 1280 SourceSize = ComputeExplicitObjectSizeArgument(1); 1281 DestinationSize = ComputeSizeArgument(0); 1282 break; 1283 } 1284 } 1285 1286 if (!SourceSize || !DestinationSize || 1287 llvm::APSInt::compareValues(SourceSize.getValue(), 1288 DestinationSize.getValue()) <= 0) 1289 return; 1290 1291 StringRef FunctionName = GetFunctionName(); 1292 1293 SmallString<16> DestinationStr; 1294 SmallString<16> SourceStr; 1295 DestinationSize->toString(DestinationStr, /*Radix=*/10); 1296 SourceSize->toString(SourceStr, /*Radix=*/10); 1297 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 1298 PDiag(DiagID) 1299 << FunctionName << DestinationStr << SourceStr); 1300 } 1301 1302 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 1303 Scope::ScopeFlags NeededScopeFlags, 1304 unsigned DiagID) { 1305 // Scopes aren't available during instantiation. Fortunately, builtin 1306 // functions cannot be template args so they cannot be formed through template 1307 // instantiation. Therefore checking once during the parse is sufficient. 1308 if (SemaRef.inTemplateInstantiation()) 1309 return false; 1310 1311 Scope *S = SemaRef.getCurScope(); 1312 while (S && !S->isSEHExceptScope()) 1313 S = S->getParent(); 1314 if (!S || !(S->getFlags() & NeededScopeFlags)) { 1315 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 1316 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 1317 << DRE->getDecl()->getIdentifier(); 1318 return true; 1319 } 1320 1321 return false; 1322 } 1323 1324 static inline bool isBlockPointer(Expr *Arg) { 1325 return Arg->getType()->isBlockPointerType(); 1326 } 1327 1328 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 1329 /// void*, which is a requirement of device side enqueue. 1330 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 1331 const BlockPointerType *BPT = 1332 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1333 ArrayRef<QualType> Params = 1334 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 1335 unsigned ArgCounter = 0; 1336 bool IllegalParams = false; 1337 // Iterate through the block parameters until either one is found that is not 1338 // a local void*, or the block is valid. 1339 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 1340 I != E; ++I, ++ArgCounter) { 1341 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 1342 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 1343 LangAS::opencl_local) { 1344 // Get the location of the error. If a block literal has been passed 1345 // (BlockExpr) then we can point straight to the offending argument, 1346 // else we just point to the variable reference. 1347 SourceLocation ErrorLoc; 1348 if (isa<BlockExpr>(BlockArg)) { 1349 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 1350 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 1351 } else if (isa<DeclRefExpr>(BlockArg)) { 1352 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 1353 } 1354 S.Diag(ErrorLoc, 1355 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 1356 IllegalParams = true; 1357 } 1358 } 1359 1360 return IllegalParams; 1361 } 1362 1363 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 1364 // OpenCL device can support extension but not the feature as extension 1365 // requires subgroup independent forward progress, but subgroup independent 1366 // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature. 1367 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts()) && 1368 !S.getOpenCLOptions().isSupported("__opencl_c_subgroups", 1369 S.getLangOpts())) { 1370 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 1371 << 1 << Call->getDirectCallee() 1372 << "cl_khr_subgroups or __opencl_c_subgroups"; 1373 return true; 1374 } 1375 return false; 1376 } 1377 1378 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 1379 if (checkArgCount(S, TheCall, 2)) 1380 return true; 1381 1382 if (checkOpenCLSubgroupExt(S, TheCall)) 1383 return true; 1384 1385 // First argument is an ndrange_t type. 1386 Expr *NDRangeArg = TheCall->getArg(0); 1387 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1388 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1389 << TheCall->getDirectCallee() << "'ndrange_t'"; 1390 return true; 1391 } 1392 1393 Expr *BlockArg = TheCall->getArg(1); 1394 if (!isBlockPointer(BlockArg)) { 1395 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1396 << TheCall->getDirectCallee() << "block"; 1397 return true; 1398 } 1399 return checkOpenCLBlockArgs(S, BlockArg); 1400 } 1401 1402 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 1403 /// get_kernel_work_group_size 1404 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 1405 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 1406 if (checkArgCount(S, TheCall, 1)) 1407 return true; 1408 1409 Expr *BlockArg = TheCall->getArg(0); 1410 if (!isBlockPointer(BlockArg)) { 1411 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1412 << TheCall->getDirectCallee() << "block"; 1413 return true; 1414 } 1415 return checkOpenCLBlockArgs(S, BlockArg); 1416 } 1417 1418 /// Diagnose integer type and any valid implicit conversion to it. 1419 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 1420 const QualType &IntType); 1421 1422 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 1423 unsigned Start, unsigned End) { 1424 bool IllegalParams = false; 1425 for (unsigned I = Start; I <= End; ++I) 1426 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 1427 S.Context.getSizeType()); 1428 return IllegalParams; 1429 } 1430 1431 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 1432 /// 'local void*' parameter of passed block. 1433 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 1434 Expr *BlockArg, 1435 unsigned NumNonVarArgs) { 1436 const BlockPointerType *BPT = 1437 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1438 unsigned NumBlockParams = 1439 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 1440 unsigned TotalNumArgs = TheCall->getNumArgs(); 1441 1442 // For each argument passed to the block, a corresponding uint needs to 1443 // be passed to describe the size of the local memory. 1444 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 1445 S.Diag(TheCall->getBeginLoc(), 1446 diag::err_opencl_enqueue_kernel_local_size_args); 1447 return true; 1448 } 1449 1450 // Check that the sizes of the local memory are specified by integers. 1451 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 1452 TotalNumArgs - 1); 1453 } 1454 1455 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 1456 /// overload formats specified in Table 6.13.17.1. 1457 /// int enqueue_kernel(queue_t queue, 1458 /// kernel_enqueue_flags_t flags, 1459 /// const ndrange_t ndrange, 1460 /// void (^block)(void)) 1461 /// int enqueue_kernel(queue_t queue, 1462 /// kernel_enqueue_flags_t flags, 1463 /// const ndrange_t ndrange, 1464 /// uint num_events_in_wait_list, 1465 /// clk_event_t *event_wait_list, 1466 /// clk_event_t *event_ret, 1467 /// void (^block)(void)) 1468 /// int enqueue_kernel(queue_t queue, 1469 /// kernel_enqueue_flags_t flags, 1470 /// const ndrange_t ndrange, 1471 /// void (^block)(local void*, ...), 1472 /// uint size0, ...) 1473 /// int enqueue_kernel(queue_t queue, 1474 /// kernel_enqueue_flags_t flags, 1475 /// const ndrange_t ndrange, 1476 /// uint num_events_in_wait_list, 1477 /// clk_event_t *event_wait_list, 1478 /// clk_event_t *event_ret, 1479 /// void (^block)(local void*, ...), 1480 /// uint size0, ...) 1481 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 1482 unsigned NumArgs = TheCall->getNumArgs(); 1483 1484 if (NumArgs < 4) { 1485 S.Diag(TheCall->getBeginLoc(), 1486 diag::err_typecheck_call_too_few_args_at_least) 1487 << 0 << 4 << NumArgs; 1488 return true; 1489 } 1490 1491 Expr *Arg0 = TheCall->getArg(0); 1492 Expr *Arg1 = TheCall->getArg(1); 1493 Expr *Arg2 = TheCall->getArg(2); 1494 Expr *Arg3 = TheCall->getArg(3); 1495 1496 // First argument always needs to be a queue_t type. 1497 if (!Arg0->getType()->isQueueT()) { 1498 S.Diag(TheCall->getArg(0)->getBeginLoc(), 1499 diag::err_opencl_builtin_expected_type) 1500 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 1501 return true; 1502 } 1503 1504 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 1505 if (!Arg1->getType()->isIntegerType()) { 1506 S.Diag(TheCall->getArg(1)->getBeginLoc(), 1507 diag::err_opencl_builtin_expected_type) 1508 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 1509 return true; 1510 } 1511 1512 // Third argument is always an ndrange_t type. 1513 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1514 S.Diag(TheCall->getArg(2)->getBeginLoc(), 1515 diag::err_opencl_builtin_expected_type) 1516 << TheCall->getDirectCallee() << "'ndrange_t'"; 1517 return true; 1518 } 1519 1520 // With four arguments, there is only one form that the function could be 1521 // called in: no events and no variable arguments. 1522 if (NumArgs == 4) { 1523 // check that the last argument is the right block type. 1524 if (!isBlockPointer(Arg3)) { 1525 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1526 << TheCall->getDirectCallee() << "block"; 1527 return true; 1528 } 1529 // we have a block type, check the prototype 1530 const BlockPointerType *BPT = 1531 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1532 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1533 S.Diag(Arg3->getBeginLoc(), 1534 diag::err_opencl_enqueue_kernel_blocks_no_args); 1535 return true; 1536 } 1537 return false; 1538 } 1539 // we can have block + varargs. 1540 if (isBlockPointer(Arg3)) 1541 return (checkOpenCLBlockArgs(S, Arg3) || 1542 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1543 // last two cases with either exactly 7 args or 7 args and varargs. 1544 if (NumArgs >= 7) { 1545 // check common block argument. 1546 Expr *Arg6 = TheCall->getArg(6); 1547 if (!isBlockPointer(Arg6)) { 1548 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1549 << TheCall->getDirectCallee() << "block"; 1550 return true; 1551 } 1552 if (checkOpenCLBlockArgs(S, Arg6)) 1553 return true; 1554 1555 // Forth argument has to be any integer type. 1556 if (!Arg3->getType()->isIntegerType()) { 1557 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1558 diag::err_opencl_builtin_expected_type) 1559 << TheCall->getDirectCallee() << "integer"; 1560 return true; 1561 } 1562 // check remaining common arguments. 1563 Expr *Arg4 = TheCall->getArg(4); 1564 Expr *Arg5 = TheCall->getArg(5); 1565 1566 // Fifth argument is always passed as a pointer to clk_event_t. 1567 if (!Arg4->isNullPointerConstant(S.Context, 1568 Expr::NPC_ValueDependentIsNotNull) && 1569 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1570 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1571 diag::err_opencl_builtin_expected_type) 1572 << TheCall->getDirectCallee() 1573 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1574 return true; 1575 } 1576 1577 // Sixth argument is always passed as a pointer to clk_event_t. 1578 if (!Arg5->isNullPointerConstant(S.Context, 1579 Expr::NPC_ValueDependentIsNotNull) && 1580 !(Arg5->getType()->isPointerType() && 1581 Arg5->getType()->getPointeeType()->isClkEventT())) { 1582 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1583 diag::err_opencl_builtin_expected_type) 1584 << TheCall->getDirectCallee() 1585 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1586 return true; 1587 } 1588 1589 if (NumArgs == 7) 1590 return false; 1591 1592 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1593 } 1594 1595 // None of the specific case has been detected, give generic error 1596 S.Diag(TheCall->getBeginLoc(), 1597 diag::err_opencl_enqueue_kernel_incorrect_args); 1598 return true; 1599 } 1600 1601 /// Returns OpenCL access qual. 1602 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1603 return D->getAttr<OpenCLAccessAttr>(); 1604 } 1605 1606 /// Returns true if pipe element type is different from the pointer. 1607 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1608 const Expr *Arg0 = Call->getArg(0); 1609 // First argument type should always be pipe. 1610 if (!Arg0->getType()->isPipeType()) { 1611 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1612 << Call->getDirectCallee() << Arg0->getSourceRange(); 1613 return true; 1614 } 1615 OpenCLAccessAttr *AccessQual = 1616 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1617 // Validates the access qualifier is compatible with the call. 1618 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1619 // read_only and write_only, and assumed to be read_only if no qualifier is 1620 // specified. 1621 switch (Call->getDirectCallee()->getBuiltinID()) { 1622 case Builtin::BIread_pipe: 1623 case Builtin::BIreserve_read_pipe: 1624 case Builtin::BIcommit_read_pipe: 1625 case Builtin::BIwork_group_reserve_read_pipe: 1626 case Builtin::BIsub_group_reserve_read_pipe: 1627 case Builtin::BIwork_group_commit_read_pipe: 1628 case Builtin::BIsub_group_commit_read_pipe: 1629 if (!(!AccessQual || AccessQual->isReadOnly())) { 1630 S.Diag(Arg0->getBeginLoc(), 1631 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1632 << "read_only" << Arg0->getSourceRange(); 1633 return true; 1634 } 1635 break; 1636 case Builtin::BIwrite_pipe: 1637 case Builtin::BIreserve_write_pipe: 1638 case Builtin::BIcommit_write_pipe: 1639 case Builtin::BIwork_group_reserve_write_pipe: 1640 case Builtin::BIsub_group_reserve_write_pipe: 1641 case Builtin::BIwork_group_commit_write_pipe: 1642 case Builtin::BIsub_group_commit_write_pipe: 1643 if (!(AccessQual && AccessQual->isWriteOnly())) { 1644 S.Diag(Arg0->getBeginLoc(), 1645 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1646 << "write_only" << Arg0->getSourceRange(); 1647 return true; 1648 } 1649 break; 1650 default: 1651 break; 1652 } 1653 return false; 1654 } 1655 1656 /// Returns true if pipe element type is different from the pointer. 1657 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1658 const Expr *Arg0 = Call->getArg(0); 1659 const Expr *ArgIdx = Call->getArg(Idx); 1660 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1661 const QualType EltTy = PipeTy->getElementType(); 1662 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1663 // The Idx argument should be a pointer and the type of the pointer and 1664 // the type of pipe element should also be the same. 1665 if (!ArgTy || 1666 !S.Context.hasSameType( 1667 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1668 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1669 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1670 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1671 return true; 1672 } 1673 return false; 1674 } 1675 1676 // Performs semantic analysis for the read/write_pipe call. 1677 // \param S Reference to the semantic analyzer. 1678 // \param Call A pointer to the builtin call. 1679 // \return True if a semantic error has been found, false otherwise. 1680 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1681 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1682 // functions have two forms. 1683 switch (Call->getNumArgs()) { 1684 case 2: 1685 if (checkOpenCLPipeArg(S, Call)) 1686 return true; 1687 // The call with 2 arguments should be 1688 // read/write_pipe(pipe T, T*). 1689 // Check packet type T. 1690 if (checkOpenCLPipePacketType(S, Call, 1)) 1691 return true; 1692 break; 1693 1694 case 4: { 1695 if (checkOpenCLPipeArg(S, Call)) 1696 return true; 1697 // The call with 4 arguments should be 1698 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1699 // Check reserve_id_t. 1700 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1701 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1702 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1703 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1704 return true; 1705 } 1706 1707 // Check the index. 1708 const Expr *Arg2 = Call->getArg(2); 1709 if (!Arg2->getType()->isIntegerType() && 1710 !Arg2->getType()->isUnsignedIntegerType()) { 1711 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1712 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1713 << Arg2->getType() << Arg2->getSourceRange(); 1714 return true; 1715 } 1716 1717 // Check packet type T. 1718 if (checkOpenCLPipePacketType(S, Call, 3)) 1719 return true; 1720 } break; 1721 default: 1722 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1723 << Call->getDirectCallee() << Call->getSourceRange(); 1724 return true; 1725 } 1726 1727 return false; 1728 } 1729 1730 // Performs a semantic analysis on the {work_group_/sub_group_ 1731 // /_}reserve_{read/write}_pipe 1732 // \param S Reference to the semantic analyzer. 1733 // \param Call The call to the builtin function to be analyzed. 1734 // \return True if a semantic error was found, false otherwise. 1735 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1736 if (checkArgCount(S, Call, 2)) 1737 return true; 1738 1739 if (checkOpenCLPipeArg(S, Call)) 1740 return true; 1741 1742 // Check the reserve size. 1743 if (!Call->getArg(1)->getType()->isIntegerType() && 1744 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1745 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1746 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1747 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1748 return true; 1749 } 1750 1751 // Since return type of reserve_read/write_pipe built-in function is 1752 // reserve_id_t, which is not defined in the builtin def file , we used int 1753 // as return type and need to override the return type of these functions. 1754 Call->setType(S.Context.OCLReserveIDTy); 1755 1756 return false; 1757 } 1758 1759 // Performs a semantic analysis on {work_group_/sub_group_ 1760 // /_}commit_{read/write}_pipe 1761 // \param S Reference to the semantic analyzer. 1762 // \param Call The call to the builtin function to be analyzed. 1763 // \return True if a semantic error was found, false otherwise. 1764 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1765 if (checkArgCount(S, Call, 2)) 1766 return true; 1767 1768 if (checkOpenCLPipeArg(S, Call)) 1769 return true; 1770 1771 // Check reserve_id_t. 1772 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1773 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1774 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1775 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1776 return true; 1777 } 1778 1779 return false; 1780 } 1781 1782 // Performs a semantic analysis on the call to built-in Pipe 1783 // Query Functions. 1784 // \param S Reference to the semantic analyzer. 1785 // \param Call The call to the builtin function to be analyzed. 1786 // \return True if a semantic error was found, false otherwise. 1787 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1788 if (checkArgCount(S, Call, 1)) 1789 return true; 1790 1791 if (!Call->getArg(0)->getType()->isPipeType()) { 1792 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1793 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1794 return true; 1795 } 1796 1797 return false; 1798 } 1799 1800 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1801 // Performs semantic analysis for the to_global/local/private call. 1802 // \param S Reference to the semantic analyzer. 1803 // \param BuiltinID ID of the builtin function. 1804 // \param Call A pointer to the builtin call. 1805 // \return True if a semantic error has been found, false otherwise. 1806 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1807 CallExpr *Call) { 1808 if (checkArgCount(S, Call, 1)) 1809 return true; 1810 1811 auto RT = Call->getArg(0)->getType(); 1812 if (!RT->isPointerType() || RT->getPointeeType() 1813 .getAddressSpace() == LangAS::opencl_constant) { 1814 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1815 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1816 return true; 1817 } 1818 1819 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1820 S.Diag(Call->getArg(0)->getBeginLoc(), 1821 diag::warn_opencl_generic_address_space_arg) 1822 << Call->getDirectCallee()->getNameInfo().getAsString() 1823 << Call->getArg(0)->getSourceRange(); 1824 } 1825 1826 RT = RT->getPointeeType(); 1827 auto Qual = RT.getQualifiers(); 1828 switch (BuiltinID) { 1829 case Builtin::BIto_global: 1830 Qual.setAddressSpace(LangAS::opencl_global); 1831 break; 1832 case Builtin::BIto_local: 1833 Qual.setAddressSpace(LangAS::opencl_local); 1834 break; 1835 case Builtin::BIto_private: 1836 Qual.setAddressSpace(LangAS::opencl_private); 1837 break; 1838 default: 1839 llvm_unreachable("Invalid builtin function"); 1840 } 1841 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1842 RT.getUnqualifiedType(), Qual))); 1843 1844 return false; 1845 } 1846 1847 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1848 if (checkArgCount(S, TheCall, 1)) 1849 return ExprError(); 1850 1851 // Compute __builtin_launder's parameter type from the argument. 1852 // The parameter type is: 1853 // * The type of the argument if it's not an array or function type, 1854 // Otherwise, 1855 // * The decayed argument type. 1856 QualType ParamTy = [&]() { 1857 QualType ArgTy = TheCall->getArg(0)->getType(); 1858 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1859 return S.Context.getPointerType(Ty->getElementType()); 1860 if (ArgTy->isFunctionType()) { 1861 return S.Context.getPointerType(ArgTy); 1862 } 1863 return ArgTy; 1864 }(); 1865 1866 TheCall->setType(ParamTy); 1867 1868 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1869 if (!ParamTy->isPointerType()) 1870 return 0; 1871 if (ParamTy->isFunctionPointerType()) 1872 return 1; 1873 if (ParamTy->isVoidPointerType()) 1874 return 2; 1875 return llvm::Optional<unsigned>{}; 1876 }(); 1877 if (DiagSelect.hasValue()) { 1878 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1879 << DiagSelect.getValue() << TheCall->getSourceRange(); 1880 return ExprError(); 1881 } 1882 1883 // We either have an incomplete class type, or we have a class template 1884 // whose instantiation has not been forced. Example: 1885 // 1886 // template <class T> struct Foo { T value; }; 1887 // Foo<int> *p = nullptr; 1888 // auto *d = __builtin_launder(p); 1889 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1890 diag::err_incomplete_type)) 1891 return ExprError(); 1892 1893 assert(ParamTy->getPointeeType()->isObjectType() && 1894 "Unhandled non-object pointer case"); 1895 1896 InitializedEntity Entity = 1897 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1898 ExprResult Arg = 1899 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1900 if (Arg.isInvalid()) 1901 return ExprError(); 1902 TheCall->setArg(0, Arg.get()); 1903 1904 return TheCall; 1905 } 1906 1907 // Emit an error and return true if the current object format type is in the 1908 // list of unsupported types. 1909 static bool CheckBuiltinTargetNotInUnsupported( 1910 Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1911 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) { 1912 llvm::Triple::ObjectFormatType CurObjFormat = 1913 S.getASTContext().getTargetInfo().getTriple().getObjectFormat(); 1914 if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) { 1915 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1916 << TheCall->getSourceRange(); 1917 return true; 1918 } 1919 return false; 1920 } 1921 1922 // Emit an error and return true if the current architecture is not in the list 1923 // of supported architectures. 1924 static bool 1925 CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1926 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1927 llvm::Triple::ArchType CurArch = 1928 S.getASTContext().getTargetInfo().getTriple().getArch(); 1929 if (llvm::is_contained(SupportedArchs, CurArch)) 1930 return false; 1931 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1932 << TheCall->getSourceRange(); 1933 return true; 1934 } 1935 1936 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1937 SourceLocation CallSiteLoc); 1938 1939 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1940 CallExpr *TheCall) { 1941 switch (TI.getTriple().getArch()) { 1942 default: 1943 // Some builtins don't require additional checking, so just consider these 1944 // acceptable. 1945 return false; 1946 case llvm::Triple::arm: 1947 case llvm::Triple::armeb: 1948 case llvm::Triple::thumb: 1949 case llvm::Triple::thumbeb: 1950 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1951 case llvm::Triple::aarch64: 1952 case llvm::Triple::aarch64_32: 1953 case llvm::Triple::aarch64_be: 1954 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1955 case llvm::Triple::bpfeb: 1956 case llvm::Triple::bpfel: 1957 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1958 case llvm::Triple::hexagon: 1959 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1960 case llvm::Triple::mips: 1961 case llvm::Triple::mipsel: 1962 case llvm::Triple::mips64: 1963 case llvm::Triple::mips64el: 1964 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1965 case llvm::Triple::systemz: 1966 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1967 case llvm::Triple::x86: 1968 case llvm::Triple::x86_64: 1969 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1970 case llvm::Triple::ppc: 1971 case llvm::Triple::ppcle: 1972 case llvm::Triple::ppc64: 1973 case llvm::Triple::ppc64le: 1974 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1975 case llvm::Triple::amdgcn: 1976 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1977 case llvm::Triple::riscv32: 1978 case llvm::Triple::riscv64: 1979 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 1980 } 1981 } 1982 1983 ExprResult 1984 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1985 CallExpr *TheCall) { 1986 ExprResult TheCallResult(TheCall); 1987 1988 // Find out if any arguments are required to be integer constant expressions. 1989 unsigned ICEArguments = 0; 1990 ASTContext::GetBuiltinTypeError Error; 1991 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1992 if (Error != ASTContext::GE_None) 1993 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1994 1995 // If any arguments are required to be ICE's, check and diagnose. 1996 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1997 // Skip arguments not required to be ICE's. 1998 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1999 2000 llvm::APSInt Result; 2001 // If we don't have enough arguments, continue so we can issue better 2002 // diagnostic in checkArgCount(...) 2003 if (ArgNo < TheCall->getNumArgs() && 2004 SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 2005 return true; 2006 ICEArguments &= ~(1 << ArgNo); 2007 } 2008 2009 switch (BuiltinID) { 2010 case Builtin::BI__builtin___CFStringMakeConstantString: 2011 // CFStringMakeConstantString is currently not implemented for GOFF (i.e., 2012 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported 2013 if (CheckBuiltinTargetNotInUnsupported( 2014 *this, BuiltinID, TheCall, 2015 {llvm::Triple::GOFF, llvm::Triple::XCOFF})) 2016 return ExprError(); 2017 assert(TheCall->getNumArgs() == 1 && 2018 "Wrong # arguments to builtin CFStringMakeConstantString"); 2019 if (CheckObjCString(TheCall->getArg(0))) 2020 return ExprError(); 2021 break; 2022 case Builtin::BI__builtin_ms_va_start: 2023 case Builtin::BI__builtin_stdarg_start: 2024 case Builtin::BI__builtin_va_start: 2025 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2026 return ExprError(); 2027 break; 2028 case Builtin::BI__va_start: { 2029 switch (Context.getTargetInfo().getTriple().getArch()) { 2030 case llvm::Triple::aarch64: 2031 case llvm::Triple::arm: 2032 case llvm::Triple::thumb: 2033 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 2034 return ExprError(); 2035 break; 2036 default: 2037 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2038 return ExprError(); 2039 break; 2040 } 2041 break; 2042 } 2043 2044 // The acquire, release, and no fence variants are ARM and AArch64 only. 2045 case Builtin::BI_interlockedbittestandset_acq: 2046 case Builtin::BI_interlockedbittestandset_rel: 2047 case Builtin::BI_interlockedbittestandset_nf: 2048 case Builtin::BI_interlockedbittestandreset_acq: 2049 case Builtin::BI_interlockedbittestandreset_rel: 2050 case Builtin::BI_interlockedbittestandreset_nf: 2051 if (CheckBuiltinTargetInSupported( 2052 *this, BuiltinID, TheCall, 2053 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 2054 return ExprError(); 2055 break; 2056 2057 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 2058 case Builtin::BI_bittest64: 2059 case Builtin::BI_bittestandcomplement64: 2060 case Builtin::BI_bittestandreset64: 2061 case Builtin::BI_bittestandset64: 2062 case Builtin::BI_interlockedbittestandreset64: 2063 case Builtin::BI_interlockedbittestandset64: 2064 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall, 2065 {llvm::Triple::x86_64, llvm::Triple::arm, 2066 llvm::Triple::thumb, 2067 llvm::Triple::aarch64})) 2068 return ExprError(); 2069 break; 2070 2071 case Builtin::BI__builtin_isgreater: 2072 case Builtin::BI__builtin_isgreaterequal: 2073 case Builtin::BI__builtin_isless: 2074 case Builtin::BI__builtin_islessequal: 2075 case Builtin::BI__builtin_islessgreater: 2076 case Builtin::BI__builtin_isunordered: 2077 if (SemaBuiltinUnorderedCompare(TheCall)) 2078 return ExprError(); 2079 break; 2080 case Builtin::BI__builtin_fpclassify: 2081 if (SemaBuiltinFPClassification(TheCall, 6)) 2082 return ExprError(); 2083 break; 2084 case Builtin::BI__builtin_isfinite: 2085 case Builtin::BI__builtin_isinf: 2086 case Builtin::BI__builtin_isinf_sign: 2087 case Builtin::BI__builtin_isnan: 2088 case Builtin::BI__builtin_isnormal: 2089 case Builtin::BI__builtin_signbit: 2090 case Builtin::BI__builtin_signbitf: 2091 case Builtin::BI__builtin_signbitl: 2092 if (SemaBuiltinFPClassification(TheCall, 1)) 2093 return ExprError(); 2094 break; 2095 case Builtin::BI__builtin_shufflevector: 2096 return SemaBuiltinShuffleVector(TheCall); 2097 // TheCall will be freed by the smart pointer here, but that's fine, since 2098 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 2099 case Builtin::BI__builtin_prefetch: 2100 if (SemaBuiltinPrefetch(TheCall)) 2101 return ExprError(); 2102 break; 2103 case Builtin::BI__builtin_alloca_with_align: 2104 case Builtin::BI__builtin_alloca_with_align_uninitialized: 2105 if (SemaBuiltinAllocaWithAlign(TheCall)) 2106 return ExprError(); 2107 LLVM_FALLTHROUGH; 2108 case Builtin::BI__builtin_alloca: 2109 case Builtin::BI__builtin_alloca_uninitialized: 2110 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 2111 << TheCall->getDirectCallee(); 2112 break; 2113 case Builtin::BI__arithmetic_fence: 2114 if (SemaBuiltinArithmeticFence(TheCall)) 2115 return ExprError(); 2116 break; 2117 case Builtin::BI__assume: 2118 case Builtin::BI__builtin_assume: 2119 if (SemaBuiltinAssume(TheCall)) 2120 return ExprError(); 2121 break; 2122 case Builtin::BI__builtin_assume_aligned: 2123 if (SemaBuiltinAssumeAligned(TheCall)) 2124 return ExprError(); 2125 break; 2126 case Builtin::BI__builtin_dynamic_object_size: 2127 case Builtin::BI__builtin_object_size: 2128 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 2129 return ExprError(); 2130 break; 2131 case Builtin::BI__builtin_longjmp: 2132 if (SemaBuiltinLongjmp(TheCall)) 2133 return ExprError(); 2134 break; 2135 case Builtin::BI__builtin_setjmp: 2136 if (SemaBuiltinSetjmp(TheCall)) 2137 return ExprError(); 2138 break; 2139 case Builtin::BI__builtin_classify_type: 2140 if (checkArgCount(*this, TheCall, 1)) return true; 2141 TheCall->setType(Context.IntTy); 2142 break; 2143 case Builtin::BI__builtin_complex: 2144 if (SemaBuiltinComplex(TheCall)) 2145 return ExprError(); 2146 break; 2147 case Builtin::BI__builtin_constant_p: { 2148 if (checkArgCount(*this, TheCall, 1)) return true; 2149 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 2150 if (Arg.isInvalid()) return true; 2151 TheCall->setArg(0, Arg.get()); 2152 TheCall->setType(Context.IntTy); 2153 break; 2154 } 2155 case Builtin::BI__builtin_launder: 2156 return SemaBuiltinLaunder(*this, TheCall); 2157 case Builtin::BI__sync_fetch_and_add: 2158 case Builtin::BI__sync_fetch_and_add_1: 2159 case Builtin::BI__sync_fetch_and_add_2: 2160 case Builtin::BI__sync_fetch_and_add_4: 2161 case Builtin::BI__sync_fetch_and_add_8: 2162 case Builtin::BI__sync_fetch_and_add_16: 2163 case Builtin::BI__sync_fetch_and_sub: 2164 case Builtin::BI__sync_fetch_and_sub_1: 2165 case Builtin::BI__sync_fetch_and_sub_2: 2166 case Builtin::BI__sync_fetch_and_sub_4: 2167 case Builtin::BI__sync_fetch_and_sub_8: 2168 case Builtin::BI__sync_fetch_and_sub_16: 2169 case Builtin::BI__sync_fetch_and_or: 2170 case Builtin::BI__sync_fetch_and_or_1: 2171 case Builtin::BI__sync_fetch_and_or_2: 2172 case Builtin::BI__sync_fetch_and_or_4: 2173 case Builtin::BI__sync_fetch_and_or_8: 2174 case Builtin::BI__sync_fetch_and_or_16: 2175 case Builtin::BI__sync_fetch_and_and: 2176 case Builtin::BI__sync_fetch_and_and_1: 2177 case Builtin::BI__sync_fetch_and_and_2: 2178 case Builtin::BI__sync_fetch_and_and_4: 2179 case Builtin::BI__sync_fetch_and_and_8: 2180 case Builtin::BI__sync_fetch_and_and_16: 2181 case Builtin::BI__sync_fetch_and_xor: 2182 case Builtin::BI__sync_fetch_and_xor_1: 2183 case Builtin::BI__sync_fetch_and_xor_2: 2184 case Builtin::BI__sync_fetch_and_xor_4: 2185 case Builtin::BI__sync_fetch_and_xor_8: 2186 case Builtin::BI__sync_fetch_and_xor_16: 2187 case Builtin::BI__sync_fetch_and_nand: 2188 case Builtin::BI__sync_fetch_and_nand_1: 2189 case Builtin::BI__sync_fetch_and_nand_2: 2190 case Builtin::BI__sync_fetch_and_nand_4: 2191 case Builtin::BI__sync_fetch_and_nand_8: 2192 case Builtin::BI__sync_fetch_and_nand_16: 2193 case Builtin::BI__sync_add_and_fetch: 2194 case Builtin::BI__sync_add_and_fetch_1: 2195 case Builtin::BI__sync_add_and_fetch_2: 2196 case Builtin::BI__sync_add_and_fetch_4: 2197 case Builtin::BI__sync_add_and_fetch_8: 2198 case Builtin::BI__sync_add_and_fetch_16: 2199 case Builtin::BI__sync_sub_and_fetch: 2200 case Builtin::BI__sync_sub_and_fetch_1: 2201 case Builtin::BI__sync_sub_and_fetch_2: 2202 case Builtin::BI__sync_sub_and_fetch_4: 2203 case Builtin::BI__sync_sub_and_fetch_8: 2204 case Builtin::BI__sync_sub_and_fetch_16: 2205 case Builtin::BI__sync_and_and_fetch: 2206 case Builtin::BI__sync_and_and_fetch_1: 2207 case Builtin::BI__sync_and_and_fetch_2: 2208 case Builtin::BI__sync_and_and_fetch_4: 2209 case Builtin::BI__sync_and_and_fetch_8: 2210 case Builtin::BI__sync_and_and_fetch_16: 2211 case Builtin::BI__sync_or_and_fetch: 2212 case Builtin::BI__sync_or_and_fetch_1: 2213 case Builtin::BI__sync_or_and_fetch_2: 2214 case Builtin::BI__sync_or_and_fetch_4: 2215 case Builtin::BI__sync_or_and_fetch_8: 2216 case Builtin::BI__sync_or_and_fetch_16: 2217 case Builtin::BI__sync_xor_and_fetch: 2218 case Builtin::BI__sync_xor_and_fetch_1: 2219 case Builtin::BI__sync_xor_and_fetch_2: 2220 case Builtin::BI__sync_xor_and_fetch_4: 2221 case Builtin::BI__sync_xor_and_fetch_8: 2222 case Builtin::BI__sync_xor_and_fetch_16: 2223 case Builtin::BI__sync_nand_and_fetch: 2224 case Builtin::BI__sync_nand_and_fetch_1: 2225 case Builtin::BI__sync_nand_and_fetch_2: 2226 case Builtin::BI__sync_nand_and_fetch_4: 2227 case Builtin::BI__sync_nand_and_fetch_8: 2228 case Builtin::BI__sync_nand_and_fetch_16: 2229 case Builtin::BI__sync_val_compare_and_swap: 2230 case Builtin::BI__sync_val_compare_and_swap_1: 2231 case Builtin::BI__sync_val_compare_and_swap_2: 2232 case Builtin::BI__sync_val_compare_and_swap_4: 2233 case Builtin::BI__sync_val_compare_and_swap_8: 2234 case Builtin::BI__sync_val_compare_and_swap_16: 2235 case Builtin::BI__sync_bool_compare_and_swap: 2236 case Builtin::BI__sync_bool_compare_and_swap_1: 2237 case Builtin::BI__sync_bool_compare_and_swap_2: 2238 case Builtin::BI__sync_bool_compare_and_swap_4: 2239 case Builtin::BI__sync_bool_compare_and_swap_8: 2240 case Builtin::BI__sync_bool_compare_and_swap_16: 2241 case Builtin::BI__sync_lock_test_and_set: 2242 case Builtin::BI__sync_lock_test_and_set_1: 2243 case Builtin::BI__sync_lock_test_and_set_2: 2244 case Builtin::BI__sync_lock_test_and_set_4: 2245 case Builtin::BI__sync_lock_test_and_set_8: 2246 case Builtin::BI__sync_lock_test_and_set_16: 2247 case Builtin::BI__sync_lock_release: 2248 case Builtin::BI__sync_lock_release_1: 2249 case Builtin::BI__sync_lock_release_2: 2250 case Builtin::BI__sync_lock_release_4: 2251 case Builtin::BI__sync_lock_release_8: 2252 case Builtin::BI__sync_lock_release_16: 2253 case Builtin::BI__sync_swap: 2254 case Builtin::BI__sync_swap_1: 2255 case Builtin::BI__sync_swap_2: 2256 case Builtin::BI__sync_swap_4: 2257 case Builtin::BI__sync_swap_8: 2258 case Builtin::BI__sync_swap_16: 2259 return SemaBuiltinAtomicOverloaded(TheCallResult); 2260 case Builtin::BI__sync_synchronize: 2261 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 2262 << TheCall->getCallee()->getSourceRange(); 2263 break; 2264 case Builtin::BI__builtin_nontemporal_load: 2265 case Builtin::BI__builtin_nontemporal_store: 2266 return SemaBuiltinNontemporalOverloaded(TheCallResult); 2267 case Builtin::BI__builtin_memcpy_inline: { 2268 clang::Expr *SizeOp = TheCall->getArg(2); 2269 // We warn about copying to or from `nullptr` pointers when `size` is 2270 // greater than 0. When `size` is value dependent we cannot evaluate its 2271 // value so we bail out. 2272 if (SizeOp->isValueDependent()) 2273 break; 2274 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) { 2275 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2276 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 2277 } 2278 break; 2279 } 2280 case Builtin::BI__builtin_memset_inline: { 2281 clang::Expr *SizeOp = TheCall->getArg(2); 2282 // We warn about filling to `nullptr` pointers when `size` is greater than 2283 // 0. When `size` is value dependent we cannot evaluate its value so we bail 2284 // out. 2285 if (SizeOp->isValueDependent()) 2286 break; 2287 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) 2288 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2289 break; 2290 } 2291 #define BUILTIN(ID, TYPE, ATTRS) 2292 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 2293 case Builtin::BI##ID: \ 2294 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 2295 #include "clang/Basic/Builtins.def" 2296 case Builtin::BI__annotation: 2297 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 2298 return ExprError(); 2299 break; 2300 case Builtin::BI__builtin_annotation: 2301 if (SemaBuiltinAnnotation(*this, TheCall)) 2302 return ExprError(); 2303 break; 2304 case Builtin::BI__builtin_addressof: 2305 if (SemaBuiltinAddressof(*this, TheCall)) 2306 return ExprError(); 2307 break; 2308 case Builtin::BI__builtin_function_start: 2309 if (SemaBuiltinFunctionStart(*this, TheCall)) 2310 return ExprError(); 2311 break; 2312 case Builtin::BI__builtin_is_aligned: 2313 case Builtin::BI__builtin_align_up: 2314 case Builtin::BI__builtin_align_down: 2315 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 2316 return ExprError(); 2317 break; 2318 case Builtin::BI__builtin_add_overflow: 2319 case Builtin::BI__builtin_sub_overflow: 2320 case Builtin::BI__builtin_mul_overflow: 2321 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 2322 return ExprError(); 2323 break; 2324 case Builtin::BI__builtin_operator_new: 2325 case Builtin::BI__builtin_operator_delete: { 2326 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 2327 ExprResult Res = 2328 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 2329 if (Res.isInvalid()) 2330 CorrectDelayedTyposInExpr(TheCallResult.get()); 2331 return Res; 2332 } 2333 case Builtin::BI__builtin_dump_struct: 2334 return SemaBuiltinDumpStruct(*this, TheCall); 2335 case Builtin::BI__builtin_expect_with_probability: { 2336 // We first want to ensure we are called with 3 arguments 2337 if (checkArgCount(*this, TheCall, 3)) 2338 return ExprError(); 2339 // then check probability is constant float in range [0.0, 1.0] 2340 const Expr *ProbArg = TheCall->getArg(2); 2341 SmallVector<PartialDiagnosticAt, 8> Notes; 2342 Expr::EvalResult Eval; 2343 Eval.Diag = &Notes; 2344 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 2345 !Eval.Val.isFloat()) { 2346 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 2347 << ProbArg->getSourceRange(); 2348 for (const PartialDiagnosticAt &PDiag : Notes) 2349 Diag(PDiag.first, PDiag.second); 2350 return ExprError(); 2351 } 2352 llvm::APFloat Probability = Eval.Val.getFloat(); 2353 bool LoseInfo = false; 2354 Probability.convert(llvm::APFloat::IEEEdouble(), 2355 llvm::RoundingMode::Dynamic, &LoseInfo); 2356 if (!(Probability >= llvm::APFloat(0.0) && 2357 Probability <= llvm::APFloat(1.0))) { 2358 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 2359 << ProbArg->getSourceRange(); 2360 return ExprError(); 2361 } 2362 break; 2363 } 2364 case Builtin::BI__builtin_preserve_access_index: 2365 if (SemaBuiltinPreserveAI(*this, TheCall)) 2366 return ExprError(); 2367 break; 2368 case Builtin::BI__builtin_call_with_static_chain: 2369 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 2370 return ExprError(); 2371 break; 2372 case Builtin::BI__exception_code: 2373 case Builtin::BI_exception_code: 2374 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 2375 diag::err_seh___except_block)) 2376 return ExprError(); 2377 break; 2378 case Builtin::BI__exception_info: 2379 case Builtin::BI_exception_info: 2380 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 2381 diag::err_seh___except_filter)) 2382 return ExprError(); 2383 break; 2384 case Builtin::BI__GetExceptionInfo: 2385 if (checkArgCount(*this, TheCall, 1)) 2386 return ExprError(); 2387 2388 if (CheckCXXThrowOperand( 2389 TheCall->getBeginLoc(), 2390 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 2391 TheCall)) 2392 return ExprError(); 2393 2394 TheCall->setType(Context.VoidPtrTy); 2395 break; 2396 case Builtin::BIaddressof: 2397 case Builtin::BI__addressof: 2398 case Builtin::BIforward: 2399 case Builtin::BImove: 2400 case Builtin::BImove_if_noexcept: 2401 case Builtin::BIas_const: { 2402 // These are all expected to be of the form 2403 // T &/&&/* f(U &/&&) 2404 // where T and U only differ in qualification. 2405 if (checkArgCount(*this, TheCall, 1)) 2406 return ExprError(); 2407 QualType Param = FDecl->getParamDecl(0)->getType(); 2408 QualType Result = FDecl->getReturnType(); 2409 bool ReturnsPointer = BuiltinID == Builtin::BIaddressof || 2410 BuiltinID == Builtin::BI__addressof; 2411 if (!(Param->isReferenceType() && 2412 (ReturnsPointer ? Result->isPointerType() 2413 : Result->isReferenceType()) && 2414 Context.hasSameUnqualifiedType(Param->getPointeeType(), 2415 Result->getPointeeType()))) { 2416 Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported) 2417 << FDecl; 2418 return ExprError(); 2419 } 2420 break; 2421 } 2422 // OpenCL v2.0, s6.13.16 - Pipe functions 2423 case Builtin::BIread_pipe: 2424 case Builtin::BIwrite_pipe: 2425 // Since those two functions are declared with var args, we need a semantic 2426 // check for the argument. 2427 if (SemaBuiltinRWPipe(*this, TheCall)) 2428 return ExprError(); 2429 break; 2430 case Builtin::BIreserve_read_pipe: 2431 case Builtin::BIreserve_write_pipe: 2432 case Builtin::BIwork_group_reserve_read_pipe: 2433 case Builtin::BIwork_group_reserve_write_pipe: 2434 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 2435 return ExprError(); 2436 break; 2437 case Builtin::BIsub_group_reserve_read_pipe: 2438 case Builtin::BIsub_group_reserve_write_pipe: 2439 if (checkOpenCLSubgroupExt(*this, TheCall) || 2440 SemaBuiltinReserveRWPipe(*this, TheCall)) 2441 return ExprError(); 2442 break; 2443 case Builtin::BIcommit_read_pipe: 2444 case Builtin::BIcommit_write_pipe: 2445 case Builtin::BIwork_group_commit_read_pipe: 2446 case Builtin::BIwork_group_commit_write_pipe: 2447 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 2448 return ExprError(); 2449 break; 2450 case Builtin::BIsub_group_commit_read_pipe: 2451 case Builtin::BIsub_group_commit_write_pipe: 2452 if (checkOpenCLSubgroupExt(*this, TheCall) || 2453 SemaBuiltinCommitRWPipe(*this, TheCall)) 2454 return ExprError(); 2455 break; 2456 case Builtin::BIget_pipe_num_packets: 2457 case Builtin::BIget_pipe_max_packets: 2458 if (SemaBuiltinPipePackets(*this, TheCall)) 2459 return ExprError(); 2460 break; 2461 case Builtin::BIto_global: 2462 case Builtin::BIto_local: 2463 case Builtin::BIto_private: 2464 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 2465 return ExprError(); 2466 break; 2467 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 2468 case Builtin::BIenqueue_kernel: 2469 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 2470 return ExprError(); 2471 break; 2472 case Builtin::BIget_kernel_work_group_size: 2473 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 2474 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 2475 return ExprError(); 2476 break; 2477 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 2478 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 2479 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 2480 return ExprError(); 2481 break; 2482 case Builtin::BI__builtin_os_log_format: 2483 Cleanup.setExprNeedsCleanups(true); 2484 LLVM_FALLTHROUGH; 2485 case Builtin::BI__builtin_os_log_format_buffer_size: 2486 if (SemaBuiltinOSLogFormat(TheCall)) 2487 return ExprError(); 2488 break; 2489 case Builtin::BI__builtin_frame_address: 2490 case Builtin::BI__builtin_return_address: { 2491 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 2492 return ExprError(); 2493 2494 // -Wframe-address warning if non-zero passed to builtin 2495 // return/frame address. 2496 Expr::EvalResult Result; 2497 if (!TheCall->getArg(0)->isValueDependent() && 2498 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 2499 Result.Val.getInt() != 0) 2500 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 2501 << ((BuiltinID == Builtin::BI__builtin_return_address) 2502 ? "__builtin_return_address" 2503 : "__builtin_frame_address") 2504 << TheCall->getSourceRange(); 2505 break; 2506 } 2507 2508 // __builtin_elementwise_abs restricts the element type to signed integers or 2509 // floating point types only. 2510 case Builtin::BI__builtin_elementwise_abs: { 2511 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2512 return ExprError(); 2513 2514 QualType ArgTy = TheCall->getArg(0)->getType(); 2515 QualType EltTy = ArgTy; 2516 2517 if (auto *VecTy = EltTy->getAs<VectorType>()) 2518 EltTy = VecTy->getElementType(); 2519 if (EltTy->isUnsignedIntegerType()) { 2520 Diag(TheCall->getArg(0)->getBeginLoc(), 2521 diag::err_builtin_invalid_arg_type) 2522 << 1 << /* signed integer or float ty*/ 3 << ArgTy; 2523 return ExprError(); 2524 } 2525 break; 2526 } 2527 2528 // These builtins restrict the element type to floating point 2529 // types only. 2530 case Builtin::BI__builtin_elementwise_ceil: 2531 case Builtin::BI__builtin_elementwise_floor: 2532 case Builtin::BI__builtin_elementwise_roundeven: 2533 case Builtin::BI__builtin_elementwise_trunc: { 2534 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2535 return ExprError(); 2536 2537 QualType ArgTy = TheCall->getArg(0)->getType(); 2538 QualType EltTy = ArgTy; 2539 2540 if (auto *VecTy = EltTy->getAs<VectorType>()) 2541 EltTy = VecTy->getElementType(); 2542 if (!EltTy->isFloatingType()) { 2543 Diag(TheCall->getArg(0)->getBeginLoc(), 2544 diag::err_builtin_invalid_arg_type) 2545 << 1 << /* float ty*/ 5 << ArgTy; 2546 2547 return ExprError(); 2548 } 2549 break; 2550 } 2551 2552 // These builtins restrict the element type to integer 2553 // types only. 2554 case Builtin::BI__builtin_elementwise_add_sat: 2555 case Builtin::BI__builtin_elementwise_sub_sat: { 2556 if (SemaBuiltinElementwiseMath(TheCall)) 2557 return ExprError(); 2558 2559 const Expr *Arg = TheCall->getArg(0); 2560 QualType ArgTy = Arg->getType(); 2561 QualType EltTy = ArgTy; 2562 2563 if (auto *VecTy = EltTy->getAs<VectorType>()) 2564 EltTy = VecTy->getElementType(); 2565 2566 if (!EltTy->isIntegerType()) { 2567 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2568 << 1 << /* integer ty */ 6 << ArgTy; 2569 return ExprError(); 2570 } 2571 break; 2572 } 2573 2574 case Builtin::BI__builtin_elementwise_min: 2575 case Builtin::BI__builtin_elementwise_max: 2576 if (SemaBuiltinElementwiseMath(TheCall)) 2577 return ExprError(); 2578 break; 2579 case Builtin::BI__builtin_reduce_max: 2580 case Builtin::BI__builtin_reduce_min: { 2581 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2582 return ExprError(); 2583 2584 const Expr *Arg = TheCall->getArg(0); 2585 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2586 if (!TyA) { 2587 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2588 << 1 << /* vector ty*/ 4 << Arg->getType(); 2589 return ExprError(); 2590 } 2591 2592 TheCall->setType(TyA->getElementType()); 2593 break; 2594 } 2595 2596 // These builtins support vectors of integers only. 2597 // TODO: ADD/MUL should support floating-point types. 2598 case Builtin::BI__builtin_reduce_add: 2599 case Builtin::BI__builtin_reduce_mul: 2600 case Builtin::BI__builtin_reduce_xor: 2601 case Builtin::BI__builtin_reduce_or: 2602 case Builtin::BI__builtin_reduce_and: { 2603 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2604 return ExprError(); 2605 2606 const Expr *Arg = TheCall->getArg(0); 2607 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2608 if (!TyA || !TyA->getElementType()->isIntegerType()) { 2609 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2610 << 1 << /* vector of integers */ 6 << Arg->getType(); 2611 return ExprError(); 2612 } 2613 TheCall->setType(TyA->getElementType()); 2614 break; 2615 } 2616 2617 case Builtin::BI__builtin_matrix_transpose: 2618 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 2619 2620 case Builtin::BI__builtin_matrix_column_major_load: 2621 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 2622 2623 case Builtin::BI__builtin_matrix_column_major_store: 2624 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 2625 2626 case Builtin::BI__builtin_get_device_side_mangled_name: { 2627 auto Check = [](CallExpr *TheCall) { 2628 if (TheCall->getNumArgs() != 1) 2629 return false; 2630 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 2631 if (!DRE) 2632 return false; 2633 auto *D = DRE->getDecl(); 2634 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 2635 return false; 2636 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 2637 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 2638 }; 2639 if (!Check(TheCall)) { 2640 Diag(TheCall->getBeginLoc(), 2641 diag::err_hip_invalid_args_builtin_mangled_name); 2642 return ExprError(); 2643 } 2644 } 2645 } 2646 2647 // Since the target specific builtins for each arch overlap, only check those 2648 // of the arch we are compiling for. 2649 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 2650 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 2651 assert(Context.getAuxTargetInfo() && 2652 "Aux Target Builtin, but not an aux target?"); 2653 2654 if (CheckTSBuiltinFunctionCall( 2655 *Context.getAuxTargetInfo(), 2656 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2657 return ExprError(); 2658 } else { 2659 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2660 TheCall)) 2661 return ExprError(); 2662 } 2663 } 2664 2665 return TheCallResult; 2666 } 2667 2668 // Get the valid immediate range for the specified NEON type code. 2669 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2670 NeonTypeFlags Type(t); 2671 int IsQuad = ForceQuad ? true : Type.isQuad(); 2672 switch (Type.getEltType()) { 2673 case NeonTypeFlags::Int8: 2674 case NeonTypeFlags::Poly8: 2675 return shift ? 7 : (8 << IsQuad) - 1; 2676 case NeonTypeFlags::Int16: 2677 case NeonTypeFlags::Poly16: 2678 return shift ? 15 : (4 << IsQuad) - 1; 2679 case NeonTypeFlags::Int32: 2680 return shift ? 31 : (2 << IsQuad) - 1; 2681 case NeonTypeFlags::Int64: 2682 case NeonTypeFlags::Poly64: 2683 return shift ? 63 : (1 << IsQuad) - 1; 2684 case NeonTypeFlags::Poly128: 2685 return shift ? 127 : (1 << IsQuad) - 1; 2686 case NeonTypeFlags::Float16: 2687 assert(!shift && "cannot shift float types!"); 2688 return (4 << IsQuad) - 1; 2689 case NeonTypeFlags::Float32: 2690 assert(!shift && "cannot shift float types!"); 2691 return (2 << IsQuad) - 1; 2692 case NeonTypeFlags::Float64: 2693 assert(!shift && "cannot shift float types!"); 2694 return (1 << IsQuad) - 1; 2695 case NeonTypeFlags::BFloat16: 2696 assert(!shift && "cannot shift float types!"); 2697 return (4 << IsQuad) - 1; 2698 } 2699 llvm_unreachable("Invalid NeonTypeFlag!"); 2700 } 2701 2702 /// getNeonEltType - Return the QualType corresponding to the elements of 2703 /// the vector type specified by the NeonTypeFlags. This is used to check 2704 /// the pointer arguments for Neon load/store intrinsics. 2705 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2706 bool IsPolyUnsigned, bool IsInt64Long) { 2707 switch (Flags.getEltType()) { 2708 case NeonTypeFlags::Int8: 2709 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2710 case NeonTypeFlags::Int16: 2711 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2712 case NeonTypeFlags::Int32: 2713 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2714 case NeonTypeFlags::Int64: 2715 if (IsInt64Long) 2716 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2717 else 2718 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2719 : Context.LongLongTy; 2720 case NeonTypeFlags::Poly8: 2721 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2722 case NeonTypeFlags::Poly16: 2723 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2724 case NeonTypeFlags::Poly64: 2725 if (IsInt64Long) 2726 return Context.UnsignedLongTy; 2727 else 2728 return Context.UnsignedLongLongTy; 2729 case NeonTypeFlags::Poly128: 2730 break; 2731 case NeonTypeFlags::Float16: 2732 return Context.HalfTy; 2733 case NeonTypeFlags::Float32: 2734 return Context.FloatTy; 2735 case NeonTypeFlags::Float64: 2736 return Context.DoubleTy; 2737 case NeonTypeFlags::BFloat16: 2738 return Context.BFloat16Ty; 2739 } 2740 llvm_unreachable("Invalid NeonTypeFlag!"); 2741 } 2742 2743 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2744 // Range check SVE intrinsics that take immediate values. 2745 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2746 2747 switch (BuiltinID) { 2748 default: 2749 return false; 2750 #define GET_SVE_IMMEDIATE_CHECK 2751 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2752 #undef GET_SVE_IMMEDIATE_CHECK 2753 } 2754 2755 // Perform all the immediate checks for this builtin call. 2756 bool HasError = false; 2757 for (auto &I : ImmChecks) { 2758 int ArgNum, CheckTy, ElementSizeInBits; 2759 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2760 2761 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2762 2763 // Function that checks whether the operand (ArgNum) is an immediate 2764 // that is one of the predefined values. 2765 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2766 int ErrDiag) -> bool { 2767 // We can't check the value of a dependent argument. 2768 Expr *Arg = TheCall->getArg(ArgNum); 2769 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2770 return false; 2771 2772 // Check constant-ness first. 2773 llvm::APSInt Imm; 2774 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2775 return true; 2776 2777 if (!CheckImm(Imm.getSExtValue())) 2778 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2779 return false; 2780 }; 2781 2782 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2783 case SVETypeFlags::ImmCheck0_31: 2784 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2785 HasError = true; 2786 break; 2787 case SVETypeFlags::ImmCheck0_13: 2788 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2789 HasError = true; 2790 break; 2791 case SVETypeFlags::ImmCheck1_16: 2792 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2793 HasError = true; 2794 break; 2795 case SVETypeFlags::ImmCheck0_7: 2796 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2797 HasError = true; 2798 break; 2799 case SVETypeFlags::ImmCheckExtract: 2800 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2801 (2048 / ElementSizeInBits) - 1)) 2802 HasError = true; 2803 break; 2804 case SVETypeFlags::ImmCheckShiftRight: 2805 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2806 HasError = true; 2807 break; 2808 case SVETypeFlags::ImmCheckShiftRightNarrow: 2809 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2810 ElementSizeInBits / 2)) 2811 HasError = true; 2812 break; 2813 case SVETypeFlags::ImmCheckShiftLeft: 2814 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2815 ElementSizeInBits - 1)) 2816 HasError = true; 2817 break; 2818 case SVETypeFlags::ImmCheckLaneIndex: 2819 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2820 (128 / (1 * ElementSizeInBits)) - 1)) 2821 HasError = true; 2822 break; 2823 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2824 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2825 (128 / (2 * ElementSizeInBits)) - 1)) 2826 HasError = true; 2827 break; 2828 case SVETypeFlags::ImmCheckLaneIndexDot: 2829 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2830 (128 / (4 * ElementSizeInBits)) - 1)) 2831 HasError = true; 2832 break; 2833 case SVETypeFlags::ImmCheckComplexRot90_270: 2834 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2835 diag::err_rotation_argument_to_cadd)) 2836 HasError = true; 2837 break; 2838 case SVETypeFlags::ImmCheckComplexRotAll90: 2839 if (CheckImmediateInSet( 2840 [](int64_t V) { 2841 return V == 0 || V == 90 || V == 180 || V == 270; 2842 }, 2843 diag::err_rotation_argument_to_cmla)) 2844 HasError = true; 2845 break; 2846 case SVETypeFlags::ImmCheck0_1: 2847 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2848 HasError = true; 2849 break; 2850 case SVETypeFlags::ImmCheck0_2: 2851 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2852 HasError = true; 2853 break; 2854 case SVETypeFlags::ImmCheck0_3: 2855 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2856 HasError = true; 2857 break; 2858 } 2859 } 2860 2861 return HasError; 2862 } 2863 2864 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2865 unsigned BuiltinID, CallExpr *TheCall) { 2866 llvm::APSInt Result; 2867 uint64_t mask = 0; 2868 unsigned TV = 0; 2869 int PtrArgNum = -1; 2870 bool HasConstPtr = false; 2871 switch (BuiltinID) { 2872 #define GET_NEON_OVERLOAD_CHECK 2873 #include "clang/Basic/arm_neon.inc" 2874 #include "clang/Basic/arm_fp16.inc" 2875 #undef GET_NEON_OVERLOAD_CHECK 2876 } 2877 2878 // For NEON intrinsics which are overloaded on vector element type, validate 2879 // the immediate which specifies which variant to emit. 2880 unsigned ImmArg = TheCall->getNumArgs()-1; 2881 if (mask) { 2882 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2883 return true; 2884 2885 TV = Result.getLimitedValue(64); 2886 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2887 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2888 << TheCall->getArg(ImmArg)->getSourceRange(); 2889 } 2890 2891 if (PtrArgNum >= 0) { 2892 // Check that pointer arguments have the specified type. 2893 Expr *Arg = TheCall->getArg(PtrArgNum); 2894 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2895 Arg = ICE->getSubExpr(); 2896 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2897 QualType RHSTy = RHS.get()->getType(); 2898 2899 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2900 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2901 Arch == llvm::Triple::aarch64_32 || 2902 Arch == llvm::Triple::aarch64_be; 2903 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2904 QualType EltTy = 2905 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2906 if (HasConstPtr) 2907 EltTy = EltTy.withConst(); 2908 QualType LHSTy = Context.getPointerType(EltTy); 2909 AssignConvertType ConvTy; 2910 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2911 if (RHS.isInvalid()) 2912 return true; 2913 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2914 RHS.get(), AA_Assigning)) 2915 return true; 2916 } 2917 2918 // For NEON intrinsics which take an immediate value as part of the 2919 // instruction, range check them here. 2920 unsigned i = 0, l = 0, u = 0; 2921 switch (BuiltinID) { 2922 default: 2923 return false; 2924 #define GET_NEON_IMMEDIATE_CHECK 2925 #include "clang/Basic/arm_neon.inc" 2926 #include "clang/Basic/arm_fp16.inc" 2927 #undef GET_NEON_IMMEDIATE_CHECK 2928 } 2929 2930 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2931 } 2932 2933 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2934 switch (BuiltinID) { 2935 default: 2936 return false; 2937 #include "clang/Basic/arm_mve_builtin_sema.inc" 2938 } 2939 } 2940 2941 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2942 CallExpr *TheCall) { 2943 bool Err = false; 2944 switch (BuiltinID) { 2945 default: 2946 return false; 2947 #include "clang/Basic/arm_cde_builtin_sema.inc" 2948 } 2949 2950 if (Err) 2951 return true; 2952 2953 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2954 } 2955 2956 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2957 const Expr *CoprocArg, bool WantCDE) { 2958 if (isConstantEvaluated()) 2959 return false; 2960 2961 // We can't check the value of a dependent argument. 2962 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2963 return false; 2964 2965 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 2966 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2967 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2968 2969 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2970 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2971 2972 if (IsCDECoproc != WantCDE) 2973 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2974 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2975 2976 return false; 2977 } 2978 2979 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2980 unsigned MaxWidth) { 2981 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2982 BuiltinID == ARM::BI__builtin_arm_ldaex || 2983 BuiltinID == ARM::BI__builtin_arm_strex || 2984 BuiltinID == ARM::BI__builtin_arm_stlex || 2985 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2986 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2987 BuiltinID == AArch64::BI__builtin_arm_strex || 2988 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2989 "unexpected ARM builtin"); 2990 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2991 BuiltinID == ARM::BI__builtin_arm_ldaex || 2992 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2993 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2994 2995 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 2996 2997 // Ensure that we have the proper number of arguments. 2998 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 2999 return true; 3000 3001 // Inspect the pointer argument of the atomic builtin. This should always be 3002 // a pointer type, whose element is an integral scalar or pointer type. 3003 // Because it is a pointer type, we don't have to worry about any implicit 3004 // casts here. 3005 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 3006 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 3007 if (PointerArgRes.isInvalid()) 3008 return true; 3009 PointerArg = PointerArgRes.get(); 3010 3011 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 3012 if (!pointerType) { 3013 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 3014 << PointerArg->getType() << PointerArg->getSourceRange(); 3015 return true; 3016 } 3017 3018 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 3019 // task is to insert the appropriate casts into the AST. First work out just 3020 // what the appropriate type is. 3021 QualType ValType = pointerType->getPointeeType(); 3022 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 3023 if (IsLdrex) 3024 AddrType.addConst(); 3025 3026 // Issue a warning if the cast is dodgy. 3027 CastKind CastNeeded = CK_NoOp; 3028 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 3029 CastNeeded = CK_BitCast; 3030 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 3031 << PointerArg->getType() << Context.getPointerType(AddrType) 3032 << AA_Passing << PointerArg->getSourceRange(); 3033 } 3034 3035 // Finally, do the cast and replace the argument with the corrected version. 3036 AddrType = Context.getPointerType(AddrType); 3037 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 3038 if (PointerArgRes.isInvalid()) 3039 return true; 3040 PointerArg = PointerArgRes.get(); 3041 3042 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 3043 3044 // In general, we allow ints, floats and pointers to be loaded and stored. 3045 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 3046 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 3047 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 3048 << PointerArg->getType() << PointerArg->getSourceRange(); 3049 return true; 3050 } 3051 3052 // But ARM doesn't have instructions to deal with 128-bit versions. 3053 if (Context.getTypeSize(ValType) > MaxWidth) { 3054 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 3055 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 3056 << PointerArg->getType() << PointerArg->getSourceRange(); 3057 return true; 3058 } 3059 3060 switch (ValType.getObjCLifetime()) { 3061 case Qualifiers::OCL_None: 3062 case Qualifiers::OCL_ExplicitNone: 3063 // okay 3064 break; 3065 3066 case Qualifiers::OCL_Weak: 3067 case Qualifiers::OCL_Strong: 3068 case Qualifiers::OCL_Autoreleasing: 3069 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 3070 << ValType << PointerArg->getSourceRange(); 3071 return true; 3072 } 3073 3074 if (IsLdrex) { 3075 TheCall->setType(ValType); 3076 return false; 3077 } 3078 3079 // Initialize the argument to be stored. 3080 ExprResult ValArg = TheCall->getArg(0); 3081 InitializedEntity Entity = InitializedEntity::InitializeParameter( 3082 Context, ValType, /*consume*/ false); 3083 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 3084 if (ValArg.isInvalid()) 3085 return true; 3086 TheCall->setArg(0, ValArg.get()); 3087 3088 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 3089 // but the custom checker bypasses all default analysis. 3090 TheCall->setType(Context.IntTy); 3091 return false; 3092 } 3093 3094 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3095 CallExpr *TheCall) { 3096 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 3097 BuiltinID == ARM::BI__builtin_arm_ldaex || 3098 BuiltinID == ARM::BI__builtin_arm_strex || 3099 BuiltinID == ARM::BI__builtin_arm_stlex) { 3100 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 3101 } 3102 3103 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 3104 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3105 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 3106 } 3107 3108 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 3109 BuiltinID == ARM::BI__builtin_arm_wsr64) 3110 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 3111 3112 if (BuiltinID == ARM::BI__builtin_arm_rsr || 3113 BuiltinID == ARM::BI__builtin_arm_rsrp || 3114 BuiltinID == ARM::BI__builtin_arm_wsr || 3115 BuiltinID == ARM::BI__builtin_arm_wsrp) 3116 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3117 3118 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3119 return true; 3120 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 3121 return true; 3122 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3123 return true; 3124 3125 // For intrinsics which take an immediate value as part of the instruction, 3126 // range check them here. 3127 // FIXME: VFP Intrinsics should error if VFP not present. 3128 switch (BuiltinID) { 3129 default: return false; 3130 case ARM::BI__builtin_arm_ssat: 3131 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 3132 case ARM::BI__builtin_arm_usat: 3133 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 3134 case ARM::BI__builtin_arm_ssat16: 3135 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 3136 case ARM::BI__builtin_arm_usat16: 3137 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 3138 case ARM::BI__builtin_arm_vcvtr_f: 3139 case ARM::BI__builtin_arm_vcvtr_d: 3140 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3141 case ARM::BI__builtin_arm_dmb: 3142 case ARM::BI__builtin_arm_dsb: 3143 case ARM::BI__builtin_arm_isb: 3144 case ARM::BI__builtin_arm_dbg: 3145 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 3146 case ARM::BI__builtin_arm_cdp: 3147 case ARM::BI__builtin_arm_cdp2: 3148 case ARM::BI__builtin_arm_mcr: 3149 case ARM::BI__builtin_arm_mcr2: 3150 case ARM::BI__builtin_arm_mrc: 3151 case ARM::BI__builtin_arm_mrc2: 3152 case ARM::BI__builtin_arm_mcrr: 3153 case ARM::BI__builtin_arm_mcrr2: 3154 case ARM::BI__builtin_arm_mrrc: 3155 case ARM::BI__builtin_arm_mrrc2: 3156 case ARM::BI__builtin_arm_ldc: 3157 case ARM::BI__builtin_arm_ldcl: 3158 case ARM::BI__builtin_arm_ldc2: 3159 case ARM::BI__builtin_arm_ldc2l: 3160 case ARM::BI__builtin_arm_stc: 3161 case ARM::BI__builtin_arm_stcl: 3162 case ARM::BI__builtin_arm_stc2: 3163 case ARM::BI__builtin_arm_stc2l: 3164 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 3165 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 3166 /*WantCDE*/ false); 3167 } 3168 } 3169 3170 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 3171 unsigned BuiltinID, 3172 CallExpr *TheCall) { 3173 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 3174 BuiltinID == AArch64::BI__builtin_arm_ldaex || 3175 BuiltinID == AArch64::BI__builtin_arm_strex || 3176 BuiltinID == AArch64::BI__builtin_arm_stlex) { 3177 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 3178 } 3179 3180 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 3181 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3182 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 3183 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 3184 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 3185 } 3186 3187 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 3188 BuiltinID == AArch64::BI__builtin_arm_wsr64) 3189 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3190 3191 // Memory Tagging Extensions (MTE) Intrinsics 3192 if (BuiltinID == AArch64::BI__builtin_arm_irg || 3193 BuiltinID == AArch64::BI__builtin_arm_addg || 3194 BuiltinID == AArch64::BI__builtin_arm_gmi || 3195 BuiltinID == AArch64::BI__builtin_arm_ldg || 3196 BuiltinID == AArch64::BI__builtin_arm_stg || 3197 BuiltinID == AArch64::BI__builtin_arm_subp) { 3198 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 3199 } 3200 3201 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 3202 BuiltinID == AArch64::BI__builtin_arm_rsrp || 3203 BuiltinID == AArch64::BI__builtin_arm_wsr || 3204 BuiltinID == AArch64::BI__builtin_arm_wsrp) 3205 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3206 3207 // Only check the valid encoding range. Any constant in this range would be 3208 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 3209 // an exception for incorrect registers. This matches MSVC behavior. 3210 if (BuiltinID == AArch64::BI_ReadStatusReg || 3211 BuiltinID == AArch64::BI_WriteStatusReg) 3212 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 3213 3214 if (BuiltinID == AArch64::BI__getReg) 3215 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 3216 3217 if (BuiltinID == AArch64::BI__break) 3218 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xffff); 3219 3220 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3221 return true; 3222 3223 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 3224 return true; 3225 3226 // For intrinsics which take an immediate value as part of the instruction, 3227 // range check them here. 3228 unsigned i = 0, l = 0, u = 0; 3229 switch (BuiltinID) { 3230 default: return false; 3231 case AArch64::BI__builtin_arm_dmb: 3232 case AArch64::BI__builtin_arm_dsb: 3233 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 3234 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 3235 } 3236 3237 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 3238 } 3239 3240 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 3241 if (Arg->getType()->getAsPlaceholderType()) 3242 return false; 3243 3244 // The first argument needs to be a record field access. 3245 // If it is an array element access, we delay decision 3246 // to BPF backend to check whether the access is a 3247 // field access or not. 3248 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 3249 isa<MemberExpr>(Arg->IgnoreParens()) || 3250 isa<ArraySubscriptExpr>(Arg->IgnoreParens())); 3251 } 3252 3253 static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S, 3254 QualType VectorTy, QualType EltTy) { 3255 QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType(); 3256 if (!Context.hasSameType(VectorEltTy, EltTy)) { 3257 S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types) 3258 << Call->getSourceRange() << VectorEltTy << EltTy; 3259 return false; 3260 } 3261 return true; 3262 } 3263 3264 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 3265 QualType ArgType = Arg->getType(); 3266 if (ArgType->getAsPlaceholderType()) 3267 return false; 3268 3269 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type 3270 // format: 3271 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 3272 // 2. <type> var; 3273 // __builtin_preserve_type_info(var, flag); 3274 if (!isa<DeclRefExpr>(Arg->IgnoreParens()) && 3275 !isa<UnaryOperator>(Arg->IgnoreParens())) 3276 return false; 3277 3278 // Typedef type. 3279 if (ArgType->getAs<TypedefType>()) 3280 return true; 3281 3282 // Record type or Enum type. 3283 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3284 if (const auto *RT = Ty->getAs<RecordType>()) { 3285 if (!RT->getDecl()->getDeclName().isEmpty()) 3286 return true; 3287 } else if (const auto *ET = Ty->getAs<EnumType>()) { 3288 if (!ET->getDecl()->getDeclName().isEmpty()) 3289 return true; 3290 } 3291 3292 return false; 3293 } 3294 3295 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 3296 QualType ArgType = Arg->getType(); 3297 if (ArgType->getAsPlaceholderType()) 3298 return false; 3299 3300 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 3301 // format: 3302 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 3303 // flag); 3304 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 3305 if (!UO) 3306 return false; 3307 3308 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 3309 if (!CE) 3310 return false; 3311 if (CE->getCastKind() != CK_IntegralToPointer && 3312 CE->getCastKind() != CK_NullToPointer) 3313 return false; 3314 3315 // The integer must be from an EnumConstantDecl. 3316 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 3317 if (!DR) 3318 return false; 3319 3320 const EnumConstantDecl *Enumerator = 3321 dyn_cast<EnumConstantDecl>(DR->getDecl()); 3322 if (!Enumerator) 3323 return false; 3324 3325 // The type must be EnumType. 3326 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3327 const auto *ET = Ty->getAs<EnumType>(); 3328 if (!ET) 3329 return false; 3330 3331 // The enum value must be supported. 3332 return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator); 3333 } 3334 3335 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 3336 CallExpr *TheCall) { 3337 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 3338 BuiltinID == BPF::BI__builtin_btf_type_id || 3339 BuiltinID == BPF::BI__builtin_preserve_type_info || 3340 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 3341 "unexpected BPF builtin"); 3342 3343 if (checkArgCount(*this, TheCall, 2)) 3344 return true; 3345 3346 // The second argument needs to be a constant int 3347 Expr *Arg = TheCall->getArg(1); 3348 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 3349 diag::kind kind; 3350 if (!Value) { 3351 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 3352 kind = diag::err_preserve_field_info_not_const; 3353 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 3354 kind = diag::err_btf_type_id_not_const; 3355 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 3356 kind = diag::err_preserve_type_info_not_const; 3357 else 3358 kind = diag::err_preserve_enum_value_not_const; 3359 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 3360 return true; 3361 } 3362 3363 // The first argument 3364 Arg = TheCall->getArg(0); 3365 bool InvalidArg = false; 3366 bool ReturnUnsignedInt = true; 3367 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 3368 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 3369 InvalidArg = true; 3370 kind = diag::err_preserve_field_info_not_field; 3371 } 3372 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 3373 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 3374 InvalidArg = true; 3375 kind = diag::err_preserve_type_info_invalid; 3376 } 3377 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 3378 if (!isValidBPFPreserveEnumValueArg(Arg)) { 3379 InvalidArg = true; 3380 kind = diag::err_preserve_enum_value_invalid; 3381 } 3382 ReturnUnsignedInt = false; 3383 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 3384 ReturnUnsignedInt = false; 3385 } 3386 3387 if (InvalidArg) { 3388 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 3389 return true; 3390 } 3391 3392 if (ReturnUnsignedInt) 3393 TheCall->setType(Context.UnsignedIntTy); 3394 else 3395 TheCall->setType(Context.UnsignedLongTy); 3396 return false; 3397 } 3398 3399 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3400 struct ArgInfo { 3401 uint8_t OpNum; 3402 bool IsSigned; 3403 uint8_t BitWidth; 3404 uint8_t Align; 3405 }; 3406 struct BuiltinInfo { 3407 unsigned BuiltinID; 3408 ArgInfo Infos[2]; 3409 }; 3410 3411 static BuiltinInfo Infos[] = { 3412 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 3413 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 3414 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 3415 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 3416 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 3417 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 3418 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 3419 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 3420 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 3421 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 3422 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 3423 3424 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 3425 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 3426 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 3427 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 3428 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 3429 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 3430 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 3431 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 3432 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 3433 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 3434 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 3435 3436 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 3437 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 3438 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 3439 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 3440 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 3441 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 3442 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 3443 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 3444 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 3445 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 3446 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 3447 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 3448 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 3449 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 3450 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 3451 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 3452 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 3453 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 3454 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 3455 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 3456 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 3457 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 3458 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 3459 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 3460 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 3461 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 3462 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 3463 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 3464 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 3465 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 3466 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 3467 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 3468 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 3469 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 3470 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 3471 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 3472 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 3473 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 3474 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 3475 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 3476 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 3477 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 3478 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 3479 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 3480 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 3481 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 3482 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 3483 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 3484 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 3485 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 3486 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 3487 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 3488 {{ 1, false, 6, 0 }} }, 3489 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 3490 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 3491 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 3492 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 3493 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 3494 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 3495 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 3496 {{ 1, false, 5, 0 }} }, 3497 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 3498 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 3499 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 3500 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 3501 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 3502 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 3503 { 2, false, 5, 0 }} }, 3504 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 3505 { 2, false, 6, 0 }} }, 3506 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 3507 { 3, false, 5, 0 }} }, 3508 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 3509 { 3, false, 6, 0 }} }, 3510 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 3511 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 3512 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 3513 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 3514 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 3515 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 3516 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 3517 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 3518 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 3519 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 3520 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 3521 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 3522 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 3523 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 3524 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 3525 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 3526 {{ 2, false, 4, 0 }, 3527 { 3, false, 5, 0 }} }, 3528 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 3529 {{ 2, false, 4, 0 }, 3530 { 3, false, 5, 0 }} }, 3531 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 3532 {{ 2, false, 4, 0 }, 3533 { 3, false, 5, 0 }} }, 3534 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 3535 {{ 2, false, 4, 0 }, 3536 { 3, false, 5, 0 }} }, 3537 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 3538 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 3539 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 3540 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 3541 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 3542 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 3543 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 3544 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 3545 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 3546 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 3547 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 3548 { 2, false, 5, 0 }} }, 3549 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 3550 { 2, false, 6, 0 }} }, 3551 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 3552 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 3553 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 3554 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 3555 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 3556 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 3557 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 3558 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 3559 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 3560 {{ 1, false, 4, 0 }} }, 3561 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 3562 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 3563 {{ 1, false, 4, 0 }} }, 3564 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 3565 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 3566 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 3567 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 3568 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 3569 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 3570 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 3571 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 3572 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 3573 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 3574 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 3575 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 3576 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 3577 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 3578 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 3579 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 3580 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 3581 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 3582 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 3583 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 3584 {{ 3, false, 1, 0 }} }, 3585 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 3586 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 3587 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 3588 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 3589 {{ 3, false, 1, 0 }} }, 3590 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 3591 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 3592 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 3593 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 3594 {{ 3, false, 1, 0 }} }, 3595 }; 3596 3597 // Use a dynamically initialized static to sort the table exactly once on 3598 // first run. 3599 static const bool SortOnce = 3600 (llvm::sort(Infos, 3601 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 3602 return LHS.BuiltinID < RHS.BuiltinID; 3603 }), 3604 true); 3605 (void)SortOnce; 3606 3607 const BuiltinInfo *F = llvm::partition_point( 3608 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 3609 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 3610 return false; 3611 3612 bool Error = false; 3613 3614 for (const ArgInfo &A : F->Infos) { 3615 // Ignore empty ArgInfo elements. 3616 if (A.BitWidth == 0) 3617 continue; 3618 3619 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 3620 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 3621 if (!A.Align) { 3622 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3623 } else { 3624 unsigned M = 1 << A.Align; 3625 Min *= M; 3626 Max *= M; 3627 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3628 Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 3629 } 3630 } 3631 return Error; 3632 } 3633 3634 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 3635 CallExpr *TheCall) { 3636 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 3637 } 3638 3639 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 3640 unsigned BuiltinID, CallExpr *TheCall) { 3641 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 3642 CheckMipsBuiltinArgument(BuiltinID, TheCall); 3643 } 3644 3645 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 3646 CallExpr *TheCall) { 3647 3648 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 3649 BuiltinID <= Mips::BI__builtin_mips_lwx) { 3650 if (!TI.hasFeature("dsp")) 3651 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 3652 } 3653 3654 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 3655 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 3656 if (!TI.hasFeature("dspr2")) 3657 return Diag(TheCall->getBeginLoc(), 3658 diag::err_mips_builtin_requires_dspr2); 3659 } 3660 3661 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 3662 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 3663 if (!TI.hasFeature("msa")) 3664 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 3665 } 3666 3667 return false; 3668 } 3669 3670 // CheckMipsBuiltinArgument - Checks the constant value passed to the 3671 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3672 // ordering for DSP is unspecified. MSA is ordered by the data format used 3673 // by the underlying instruction i.e., df/m, df/n and then by size. 3674 // 3675 // FIXME: The size tests here should instead be tablegen'd along with the 3676 // definitions from include/clang/Basic/BuiltinsMips.def. 3677 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3678 // be too. 3679 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3680 unsigned i = 0, l = 0, u = 0, m = 0; 3681 switch (BuiltinID) { 3682 default: return false; 3683 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3684 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3685 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3686 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3687 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3688 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3689 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3690 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3691 // df/m field. 3692 // These intrinsics take an unsigned 3 bit immediate. 3693 case Mips::BI__builtin_msa_bclri_b: 3694 case Mips::BI__builtin_msa_bnegi_b: 3695 case Mips::BI__builtin_msa_bseti_b: 3696 case Mips::BI__builtin_msa_sat_s_b: 3697 case Mips::BI__builtin_msa_sat_u_b: 3698 case Mips::BI__builtin_msa_slli_b: 3699 case Mips::BI__builtin_msa_srai_b: 3700 case Mips::BI__builtin_msa_srari_b: 3701 case Mips::BI__builtin_msa_srli_b: 3702 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3703 case Mips::BI__builtin_msa_binsli_b: 3704 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3705 // These intrinsics take an unsigned 4 bit immediate. 3706 case Mips::BI__builtin_msa_bclri_h: 3707 case Mips::BI__builtin_msa_bnegi_h: 3708 case Mips::BI__builtin_msa_bseti_h: 3709 case Mips::BI__builtin_msa_sat_s_h: 3710 case Mips::BI__builtin_msa_sat_u_h: 3711 case Mips::BI__builtin_msa_slli_h: 3712 case Mips::BI__builtin_msa_srai_h: 3713 case Mips::BI__builtin_msa_srari_h: 3714 case Mips::BI__builtin_msa_srli_h: 3715 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3716 case Mips::BI__builtin_msa_binsli_h: 3717 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3718 // These intrinsics take an unsigned 5 bit immediate. 3719 // The first block of intrinsics actually have an unsigned 5 bit field, 3720 // not a df/n field. 3721 case Mips::BI__builtin_msa_cfcmsa: 3722 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3723 case Mips::BI__builtin_msa_clei_u_b: 3724 case Mips::BI__builtin_msa_clei_u_h: 3725 case Mips::BI__builtin_msa_clei_u_w: 3726 case Mips::BI__builtin_msa_clei_u_d: 3727 case Mips::BI__builtin_msa_clti_u_b: 3728 case Mips::BI__builtin_msa_clti_u_h: 3729 case Mips::BI__builtin_msa_clti_u_w: 3730 case Mips::BI__builtin_msa_clti_u_d: 3731 case Mips::BI__builtin_msa_maxi_u_b: 3732 case Mips::BI__builtin_msa_maxi_u_h: 3733 case Mips::BI__builtin_msa_maxi_u_w: 3734 case Mips::BI__builtin_msa_maxi_u_d: 3735 case Mips::BI__builtin_msa_mini_u_b: 3736 case Mips::BI__builtin_msa_mini_u_h: 3737 case Mips::BI__builtin_msa_mini_u_w: 3738 case Mips::BI__builtin_msa_mini_u_d: 3739 case Mips::BI__builtin_msa_addvi_b: 3740 case Mips::BI__builtin_msa_addvi_h: 3741 case Mips::BI__builtin_msa_addvi_w: 3742 case Mips::BI__builtin_msa_addvi_d: 3743 case Mips::BI__builtin_msa_bclri_w: 3744 case Mips::BI__builtin_msa_bnegi_w: 3745 case Mips::BI__builtin_msa_bseti_w: 3746 case Mips::BI__builtin_msa_sat_s_w: 3747 case Mips::BI__builtin_msa_sat_u_w: 3748 case Mips::BI__builtin_msa_slli_w: 3749 case Mips::BI__builtin_msa_srai_w: 3750 case Mips::BI__builtin_msa_srari_w: 3751 case Mips::BI__builtin_msa_srli_w: 3752 case Mips::BI__builtin_msa_srlri_w: 3753 case Mips::BI__builtin_msa_subvi_b: 3754 case Mips::BI__builtin_msa_subvi_h: 3755 case Mips::BI__builtin_msa_subvi_w: 3756 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3757 case Mips::BI__builtin_msa_binsli_w: 3758 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3759 // These intrinsics take an unsigned 6 bit immediate. 3760 case Mips::BI__builtin_msa_bclri_d: 3761 case Mips::BI__builtin_msa_bnegi_d: 3762 case Mips::BI__builtin_msa_bseti_d: 3763 case Mips::BI__builtin_msa_sat_s_d: 3764 case Mips::BI__builtin_msa_sat_u_d: 3765 case Mips::BI__builtin_msa_slli_d: 3766 case Mips::BI__builtin_msa_srai_d: 3767 case Mips::BI__builtin_msa_srari_d: 3768 case Mips::BI__builtin_msa_srli_d: 3769 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3770 case Mips::BI__builtin_msa_binsli_d: 3771 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3772 // These intrinsics take a signed 5 bit immediate. 3773 case Mips::BI__builtin_msa_ceqi_b: 3774 case Mips::BI__builtin_msa_ceqi_h: 3775 case Mips::BI__builtin_msa_ceqi_w: 3776 case Mips::BI__builtin_msa_ceqi_d: 3777 case Mips::BI__builtin_msa_clti_s_b: 3778 case Mips::BI__builtin_msa_clti_s_h: 3779 case Mips::BI__builtin_msa_clti_s_w: 3780 case Mips::BI__builtin_msa_clti_s_d: 3781 case Mips::BI__builtin_msa_clei_s_b: 3782 case Mips::BI__builtin_msa_clei_s_h: 3783 case Mips::BI__builtin_msa_clei_s_w: 3784 case Mips::BI__builtin_msa_clei_s_d: 3785 case Mips::BI__builtin_msa_maxi_s_b: 3786 case Mips::BI__builtin_msa_maxi_s_h: 3787 case Mips::BI__builtin_msa_maxi_s_w: 3788 case Mips::BI__builtin_msa_maxi_s_d: 3789 case Mips::BI__builtin_msa_mini_s_b: 3790 case Mips::BI__builtin_msa_mini_s_h: 3791 case Mips::BI__builtin_msa_mini_s_w: 3792 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3793 // These intrinsics take an unsigned 8 bit immediate. 3794 case Mips::BI__builtin_msa_andi_b: 3795 case Mips::BI__builtin_msa_nori_b: 3796 case Mips::BI__builtin_msa_ori_b: 3797 case Mips::BI__builtin_msa_shf_b: 3798 case Mips::BI__builtin_msa_shf_h: 3799 case Mips::BI__builtin_msa_shf_w: 3800 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3801 case Mips::BI__builtin_msa_bseli_b: 3802 case Mips::BI__builtin_msa_bmnzi_b: 3803 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3804 // df/n format 3805 // These intrinsics take an unsigned 4 bit immediate. 3806 case Mips::BI__builtin_msa_copy_s_b: 3807 case Mips::BI__builtin_msa_copy_u_b: 3808 case Mips::BI__builtin_msa_insve_b: 3809 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3810 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3811 // These intrinsics take an unsigned 3 bit immediate. 3812 case Mips::BI__builtin_msa_copy_s_h: 3813 case Mips::BI__builtin_msa_copy_u_h: 3814 case Mips::BI__builtin_msa_insve_h: 3815 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3816 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3817 // These intrinsics take an unsigned 2 bit immediate. 3818 case Mips::BI__builtin_msa_copy_s_w: 3819 case Mips::BI__builtin_msa_copy_u_w: 3820 case Mips::BI__builtin_msa_insve_w: 3821 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3822 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3823 // These intrinsics take an unsigned 1 bit immediate. 3824 case Mips::BI__builtin_msa_copy_s_d: 3825 case Mips::BI__builtin_msa_copy_u_d: 3826 case Mips::BI__builtin_msa_insve_d: 3827 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3828 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3829 // Memory offsets and immediate loads. 3830 // These intrinsics take a signed 10 bit immediate. 3831 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3832 case Mips::BI__builtin_msa_ldi_h: 3833 case Mips::BI__builtin_msa_ldi_w: 3834 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3835 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3836 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3837 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3838 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3839 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 3840 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 3841 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3842 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3843 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3844 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3845 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 3846 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 3847 } 3848 3849 if (!m) 3850 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3851 3852 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3853 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3854 } 3855 3856 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 3857 /// advancing the pointer over the consumed characters. The decoded type is 3858 /// returned. If the decoded type represents a constant integer with a 3859 /// constraint on its value then Mask is set to that value. The type descriptors 3860 /// used in Str are specific to PPC MMA builtins and are documented in the file 3861 /// defining the PPC builtins. 3862 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 3863 unsigned &Mask) { 3864 bool RequireICE = false; 3865 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 3866 switch (*Str++) { 3867 case 'V': 3868 return Context.getVectorType(Context.UnsignedCharTy, 16, 3869 VectorType::VectorKind::AltiVecVector); 3870 case 'i': { 3871 char *End; 3872 unsigned size = strtoul(Str, &End, 10); 3873 assert(End != Str && "Missing constant parameter constraint"); 3874 Str = End; 3875 Mask = size; 3876 return Context.IntTy; 3877 } 3878 case 'W': { 3879 char *End; 3880 unsigned size = strtoul(Str, &End, 10); 3881 assert(End != Str && "Missing PowerPC MMA type size"); 3882 Str = End; 3883 QualType Type; 3884 switch (size) { 3885 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 3886 case size: Type = Context.Id##Ty; break; 3887 #include "clang/Basic/PPCTypes.def" 3888 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 3889 } 3890 bool CheckVectorArgs = false; 3891 while (!CheckVectorArgs) { 3892 switch (*Str++) { 3893 case '*': 3894 Type = Context.getPointerType(Type); 3895 break; 3896 case 'C': 3897 Type = Type.withConst(); 3898 break; 3899 default: 3900 CheckVectorArgs = true; 3901 --Str; 3902 break; 3903 } 3904 } 3905 return Type; 3906 } 3907 default: 3908 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 3909 } 3910 } 3911 3912 static bool isPPC_64Builtin(unsigned BuiltinID) { 3913 // These builtins only work on PPC 64bit targets. 3914 switch (BuiltinID) { 3915 case PPC::BI__builtin_divde: 3916 case PPC::BI__builtin_divdeu: 3917 case PPC::BI__builtin_bpermd: 3918 case PPC::BI__builtin_pdepd: 3919 case PPC::BI__builtin_pextd: 3920 case PPC::BI__builtin_ppc_ldarx: 3921 case PPC::BI__builtin_ppc_stdcx: 3922 case PPC::BI__builtin_ppc_tdw: 3923 case PPC::BI__builtin_ppc_trapd: 3924 case PPC::BI__builtin_ppc_cmpeqb: 3925 case PPC::BI__builtin_ppc_setb: 3926 case PPC::BI__builtin_ppc_mulhd: 3927 case PPC::BI__builtin_ppc_mulhdu: 3928 case PPC::BI__builtin_ppc_maddhd: 3929 case PPC::BI__builtin_ppc_maddhdu: 3930 case PPC::BI__builtin_ppc_maddld: 3931 case PPC::BI__builtin_ppc_load8r: 3932 case PPC::BI__builtin_ppc_store8r: 3933 case PPC::BI__builtin_ppc_insert_exp: 3934 case PPC::BI__builtin_ppc_extract_sig: 3935 case PPC::BI__builtin_ppc_addex: 3936 case PPC::BI__builtin_darn: 3937 case PPC::BI__builtin_darn_raw: 3938 case PPC::BI__builtin_ppc_compare_and_swaplp: 3939 case PPC::BI__builtin_ppc_fetch_and_addlp: 3940 case PPC::BI__builtin_ppc_fetch_and_andlp: 3941 case PPC::BI__builtin_ppc_fetch_and_orlp: 3942 case PPC::BI__builtin_ppc_fetch_and_swaplp: 3943 return true; 3944 } 3945 return false; 3946 } 3947 3948 static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall, 3949 StringRef FeatureToCheck, unsigned DiagID, 3950 StringRef DiagArg = "") { 3951 if (S.Context.getTargetInfo().hasFeature(FeatureToCheck)) 3952 return false; 3953 3954 if (DiagArg.empty()) 3955 S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange(); 3956 else 3957 S.Diag(TheCall->getBeginLoc(), DiagID) 3958 << DiagArg << TheCall->getSourceRange(); 3959 3960 return true; 3961 } 3962 3963 /// Returns true if the argument consists of one contiguous run of 1s with any 3964 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so 3965 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not, 3966 /// since all 1s are not contiguous. 3967 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) { 3968 llvm::APSInt Result; 3969 // We can't check the value of a dependent argument. 3970 Expr *Arg = TheCall->getArg(ArgNum); 3971 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3972 return false; 3973 3974 // Check constant-ness first. 3975 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3976 return true; 3977 3978 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s. 3979 if (Result.isShiftedMask() || (~Result).isShiftedMask()) 3980 return false; 3981 3982 return Diag(TheCall->getBeginLoc(), 3983 diag::err_argument_not_contiguous_bit_field) 3984 << ArgNum << Arg->getSourceRange(); 3985 } 3986 3987 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3988 CallExpr *TheCall) { 3989 unsigned i = 0, l = 0, u = 0; 3990 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3991 llvm::APSInt Result; 3992 3993 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit) 3994 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3995 << TheCall->getSourceRange(); 3996 3997 switch (BuiltinID) { 3998 default: return false; 3999 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 4000 case PPC::BI__builtin_altivec_crypto_vshasigmad: 4001 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 4002 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4003 case PPC::BI__builtin_altivec_dss: 4004 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 4005 case PPC::BI__builtin_tbegin: 4006 case PPC::BI__builtin_tend: 4007 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) || 4008 SemaFeatureCheck(*this, TheCall, "htm", 4009 diag::err_ppc_builtin_requires_htm); 4010 case PPC::BI__builtin_tsr: 4011 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 4012 SemaFeatureCheck(*this, TheCall, "htm", 4013 diag::err_ppc_builtin_requires_htm); 4014 case PPC::BI__builtin_tabortwc: 4015 case PPC::BI__builtin_tabortdc: 4016 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 4017 SemaFeatureCheck(*this, TheCall, "htm", 4018 diag::err_ppc_builtin_requires_htm); 4019 case PPC::BI__builtin_tabortwci: 4020 case PPC::BI__builtin_tabortdci: 4021 return SemaFeatureCheck(*this, TheCall, "htm", 4022 diag::err_ppc_builtin_requires_htm) || 4023 (SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 4024 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31)); 4025 case PPC::BI__builtin_tabort: 4026 case PPC::BI__builtin_tcheck: 4027 case PPC::BI__builtin_treclaim: 4028 case PPC::BI__builtin_trechkpt: 4029 case PPC::BI__builtin_tendall: 4030 case PPC::BI__builtin_tresume: 4031 case PPC::BI__builtin_tsuspend: 4032 case PPC::BI__builtin_get_texasr: 4033 case PPC::BI__builtin_get_texasru: 4034 case PPC::BI__builtin_get_tfhar: 4035 case PPC::BI__builtin_get_tfiar: 4036 case PPC::BI__builtin_set_texasr: 4037 case PPC::BI__builtin_set_texasru: 4038 case PPC::BI__builtin_set_tfhar: 4039 case PPC::BI__builtin_set_tfiar: 4040 case PPC::BI__builtin_ttest: 4041 return SemaFeatureCheck(*this, TheCall, "htm", 4042 diag::err_ppc_builtin_requires_htm); 4043 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05', 4044 // __builtin_(un)pack_longdouble are available only if long double uses IBM 4045 // extended double representation. 4046 case PPC::BI__builtin_unpack_longdouble: 4047 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1)) 4048 return true; 4049 LLVM_FALLTHROUGH; 4050 case PPC::BI__builtin_pack_longdouble: 4051 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble()) 4052 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi) 4053 << "ibmlongdouble"; 4054 return false; 4055 case PPC::BI__builtin_altivec_dst: 4056 case PPC::BI__builtin_altivec_dstt: 4057 case PPC::BI__builtin_altivec_dstst: 4058 case PPC::BI__builtin_altivec_dststt: 4059 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4060 case PPC::BI__builtin_vsx_xxpermdi: 4061 case PPC::BI__builtin_vsx_xxsldwi: 4062 return SemaBuiltinVSX(TheCall); 4063 case PPC::BI__builtin_divwe: 4064 case PPC::BI__builtin_divweu: 4065 case PPC::BI__builtin_divde: 4066 case PPC::BI__builtin_divdeu: 4067 return SemaFeatureCheck(*this, TheCall, "extdiv", 4068 diag::err_ppc_builtin_only_on_arch, "7"); 4069 case PPC::BI__builtin_bpermd: 4070 return SemaFeatureCheck(*this, TheCall, "bpermd", 4071 diag::err_ppc_builtin_only_on_arch, "7"); 4072 case PPC::BI__builtin_unpack_vector_int128: 4073 return SemaFeatureCheck(*this, TheCall, "vsx", 4074 diag::err_ppc_builtin_only_on_arch, "7") || 4075 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4076 case PPC::BI__builtin_pack_vector_int128: 4077 return SemaFeatureCheck(*this, TheCall, "vsx", 4078 diag::err_ppc_builtin_only_on_arch, "7"); 4079 case PPC::BI__builtin_pdepd: 4080 case PPC::BI__builtin_pextd: 4081 return SemaFeatureCheck(*this, TheCall, "isa-v31-instructions", 4082 diag::err_ppc_builtin_only_on_arch, "10"); 4083 case PPC::BI__builtin_altivec_vgnb: 4084 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 4085 case PPC::BI__builtin_altivec_vec_replace_elt: 4086 case PPC::BI__builtin_altivec_vec_replace_unaligned: { 4087 QualType VecTy = TheCall->getArg(0)->getType(); 4088 QualType EltTy = TheCall->getArg(1)->getType(); 4089 unsigned Width = Context.getIntWidth(EltTy); 4090 return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) || 4091 !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy); 4092 } 4093 case PPC::BI__builtin_vsx_xxeval: 4094 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 4095 case PPC::BI__builtin_altivec_vsldbi: 4096 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4097 case PPC::BI__builtin_altivec_vsrdbi: 4098 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4099 case PPC::BI__builtin_vsx_xxpermx: 4100 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 4101 case PPC::BI__builtin_ppc_tw: 4102 case PPC::BI__builtin_ppc_tdw: 4103 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); 4104 case PPC::BI__builtin_ppc_cmpeqb: 4105 case PPC::BI__builtin_ppc_setb: 4106 case PPC::BI__builtin_ppc_maddhd: 4107 case PPC::BI__builtin_ppc_maddhdu: 4108 case PPC::BI__builtin_ppc_maddld: 4109 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4110 diag::err_ppc_builtin_only_on_arch, "9"); 4111 case PPC::BI__builtin_ppc_cmprb: 4112 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4113 diag::err_ppc_builtin_only_on_arch, "9") || 4114 SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 4115 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must 4116 // be a constant that represents a contiguous bit field. 4117 case PPC::BI__builtin_ppc_rlwnm: 4118 return SemaValueIsRunOfOnes(TheCall, 2); 4119 case PPC::BI__builtin_ppc_rlwimi: 4120 case PPC::BI__builtin_ppc_rldimi: 4121 return SemaBuiltinConstantArg(TheCall, 2, Result) || 4122 SemaValueIsRunOfOnes(TheCall, 3); 4123 case PPC::BI__builtin_ppc_extract_exp: 4124 case PPC::BI__builtin_ppc_extract_sig: 4125 case PPC::BI__builtin_ppc_insert_exp: 4126 return SemaFeatureCheck(*this, TheCall, "power9-vector", 4127 diag::err_ppc_builtin_only_on_arch, "9"); 4128 case PPC::BI__builtin_ppc_addex: { 4129 if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4130 diag::err_ppc_builtin_only_on_arch, "9") || 4131 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3)) 4132 return true; 4133 // Output warning for reserved values 1 to 3. 4134 int ArgValue = 4135 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue(); 4136 if (ArgValue != 0) 4137 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour) 4138 << ArgValue; 4139 return false; 4140 } 4141 case PPC::BI__builtin_ppc_mtfsb0: 4142 case PPC::BI__builtin_ppc_mtfsb1: 4143 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 4144 case PPC::BI__builtin_ppc_mtfsf: 4145 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255); 4146 case PPC::BI__builtin_ppc_mtfsfi: 4147 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 4148 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 4149 case PPC::BI__builtin_ppc_alignx: 4150 return SemaBuiltinConstantArgPower2(TheCall, 0); 4151 case PPC::BI__builtin_ppc_rdlam: 4152 return SemaValueIsRunOfOnes(TheCall, 2); 4153 case PPC::BI__builtin_ppc_icbt: 4154 case PPC::BI__builtin_ppc_sthcx: 4155 case PPC::BI__builtin_ppc_stbcx: 4156 case PPC::BI__builtin_ppc_lharx: 4157 case PPC::BI__builtin_ppc_lbarx: 4158 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 4159 diag::err_ppc_builtin_only_on_arch, "8"); 4160 case PPC::BI__builtin_vsx_ldrmb: 4161 case PPC::BI__builtin_vsx_strmb: 4162 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 4163 diag::err_ppc_builtin_only_on_arch, "8") || 4164 SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 4165 case PPC::BI__builtin_altivec_vcntmbb: 4166 case PPC::BI__builtin_altivec_vcntmbh: 4167 case PPC::BI__builtin_altivec_vcntmbw: 4168 case PPC::BI__builtin_altivec_vcntmbd: 4169 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4170 case PPC::BI__builtin_darn: 4171 case PPC::BI__builtin_darn_raw: 4172 case PPC::BI__builtin_darn_32: 4173 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4174 diag::err_ppc_builtin_only_on_arch, "9"); 4175 case PPC::BI__builtin_vsx_xxgenpcvbm: 4176 case PPC::BI__builtin_vsx_xxgenpcvhm: 4177 case PPC::BI__builtin_vsx_xxgenpcvwm: 4178 case PPC::BI__builtin_vsx_xxgenpcvdm: 4179 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 4180 case PPC::BI__builtin_ppc_compare_exp_uo: 4181 case PPC::BI__builtin_ppc_compare_exp_lt: 4182 case PPC::BI__builtin_ppc_compare_exp_gt: 4183 case PPC::BI__builtin_ppc_compare_exp_eq: 4184 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4185 diag::err_ppc_builtin_only_on_arch, "9") || 4186 SemaFeatureCheck(*this, TheCall, "vsx", 4187 diag::err_ppc_builtin_requires_vsx); 4188 case PPC::BI__builtin_ppc_test_data_class: { 4189 // Check if the first argument of the __builtin_ppc_test_data_class call is 4190 // valid. The argument must be either a 'float' or a 'double'. 4191 QualType ArgType = TheCall->getArg(0)->getType(); 4192 if (ArgType != QualType(Context.FloatTy) && 4193 ArgType != QualType(Context.DoubleTy)) 4194 return Diag(TheCall->getBeginLoc(), 4195 diag::err_ppc_invalid_test_data_class_type); 4196 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4197 diag::err_ppc_builtin_only_on_arch, "9") || 4198 SemaFeatureCheck(*this, TheCall, "vsx", 4199 diag::err_ppc_builtin_requires_vsx) || 4200 SemaBuiltinConstantArgRange(TheCall, 1, 0, 127); 4201 } 4202 case PPC::BI__builtin_ppc_maxfe: 4203 case PPC::BI__builtin_ppc_minfe: 4204 case PPC::BI__builtin_ppc_maxfl: 4205 case PPC::BI__builtin_ppc_minfl: 4206 case PPC::BI__builtin_ppc_maxfs: 4207 case PPC::BI__builtin_ppc_minfs: { 4208 if (Context.getTargetInfo().getTriple().isOSAIX() && 4209 (BuiltinID == PPC::BI__builtin_ppc_maxfe || 4210 BuiltinID == PPC::BI__builtin_ppc_minfe)) 4211 return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type) 4212 << "builtin" << true << 128 << QualType(Context.LongDoubleTy) 4213 << false << Context.getTargetInfo().getTriple().str(); 4214 // Argument type should be exact. 4215 QualType ArgType = QualType(Context.LongDoubleTy); 4216 if (BuiltinID == PPC::BI__builtin_ppc_maxfl || 4217 BuiltinID == PPC::BI__builtin_ppc_minfl) 4218 ArgType = QualType(Context.DoubleTy); 4219 else if (BuiltinID == PPC::BI__builtin_ppc_maxfs || 4220 BuiltinID == PPC::BI__builtin_ppc_minfs) 4221 ArgType = QualType(Context.FloatTy); 4222 for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I) 4223 if (TheCall->getArg(I)->getType() != ArgType) 4224 return Diag(TheCall->getBeginLoc(), 4225 diag::err_typecheck_convert_incompatible) 4226 << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0; 4227 return false; 4228 } 4229 case PPC::BI__builtin_ppc_load8r: 4230 case PPC::BI__builtin_ppc_store8r: 4231 return SemaFeatureCheck(*this, TheCall, "isa-v206-instructions", 4232 diag::err_ppc_builtin_only_on_arch, "7"); 4233 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ 4234 case PPC::BI__builtin_##Name: \ 4235 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types); 4236 #include "clang/Basic/BuiltinsPPC.def" 4237 } 4238 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4239 } 4240 4241 // Check if the given type is a non-pointer PPC MMA type. This function is used 4242 // in Sema to prevent invalid uses of restricted PPC MMA types. 4243 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 4244 if (Type->isPointerType() || Type->isArrayType()) 4245 return false; 4246 4247 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 4248 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 4249 if (false 4250 #include "clang/Basic/PPCTypes.def" 4251 ) { 4252 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 4253 return true; 4254 } 4255 return false; 4256 } 4257 4258 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 4259 CallExpr *TheCall) { 4260 // position of memory order and scope arguments in the builtin 4261 unsigned OrderIndex, ScopeIndex; 4262 switch (BuiltinID) { 4263 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 4264 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 4265 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 4266 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 4267 OrderIndex = 2; 4268 ScopeIndex = 3; 4269 break; 4270 case AMDGPU::BI__builtin_amdgcn_fence: 4271 OrderIndex = 0; 4272 ScopeIndex = 1; 4273 break; 4274 default: 4275 return false; 4276 } 4277 4278 ExprResult Arg = TheCall->getArg(OrderIndex); 4279 auto ArgExpr = Arg.get(); 4280 Expr::EvalResult ArgResult; 4281 4282 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 4283 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 4284 << ArgExpr->getType(); 4285 auto Ord = ArgResult.Val.getInt().getZExtValue(); 4286 4287 // Check validity of memory ordering as per C11 / C++11's memody model. 4288 // Only fence needs check. Atomic dec/inc allow all memory orders. 4289 if (!llvm::isValidAtomicOrderingCABI(Ord)) 4290 return Diag(ArgExpr->getBeginLoc(), 4291 diag::warn_atomic_op_has_invalid_memory_order) 4292 << ArgExpr->getSourceRange(); 4293 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 4294 case llvm::AtomicOrderingCABI::relaxed: 4295 case llvm::AtomicOrderingCABI::consume: 4296 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 4297 return Diag(ArgExpr->getBeginLoc(), 4298 diag::warn_atomic_op_has_invalid_memory_order) 4299 << ArgExpr->getSourceRange(); 4300 break; 4301 case llvm::AtomicOrderingCABI::acquire: 4302 case llvm::AtomicOrderingCABI::release: 4303 case llvm::AtomicOrderingCABI::acq_rel: 4304 case llvm::AtomicOrderingCABI::seq_cst: 4305 break; 4306 } 4307 4308 Arg = TheCall->getArg(ScopeIndex); 4309 ArgExpr = Arg.get(); 4310 Expr::EvalResult ArgResult1; 4311 // Check that sync scope is a constant literal 4312 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 4313 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 4314 << ArgExpr->getType(); 4315 4316 return false; 4317 } 4318 4319 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 4320 llvm::APSInt Result; 4321 4322 // We can't check the value of a dependent argument. 4323 Expr *Arg = TheCall->getArg(ArgNum); 4324 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4325 return false; 4326 4327 // Check constant-ness first. 4328 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4329 return true; 4330 4331 int64_t Val = Result.getSExtValue(); 4332 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 4333 return false; 4334 4335 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 4336 << Arg->getSourceRange(); 4337 } 4338 4339 static bool isRISCV32Builtin(unsigned BuiltinID) { 4340 // These builtins only work on riscv32 targets. 4341 switch (BuiltinID) { 4342 case RISCV::BI__builtin_riscv_zip_32: 4343 case RISCV::BI__builtin_riscv_unzip_32: 4344 case RISCV::BI__builtin_riscv_aes32dsi_32: 4345 case RISCV::BI__builtin_riscv_aes32dsmi_32: 4346 case RISCV::BI__builtin_riscv_aes32esi_32: 4347 case RISCV::BI__builtin_riscv_aes32esmi_32: 4348 case RISCV::BI__builtin_riscv_sha512sig0h_32: 4349 case RISCV::BI__builtin_riscv_sha512sig0l_32: 4350 case RISCV::BI__builtin_riscv_sha512sig1h_32: 4351 case RISCV::BI__builtin_riscv_sha512sig1l_32: 4352 case RISCV::BI__builtin_riscv_sha512sum0r_32: 4353 case RISCV::BI__builtin_riscv_sha512sum1r_32: 4354 return true; 4355 } 4356 4357 return false; 4358 } 4359 4360 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 4361 unsigned BuiltinID, 4362 CallExpr *TheCall) { 4363 // CodeGenFunction can also detect this, but this gives a better error 4364 // message. 4365 bool FeatureMissing = false; 4366 SmallVector<StringRef> ReqFeatures; 4367 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 4368 Features.split(ReqFeatures, ','); 4369 4370 // Check for 32-bit only builtins on a 64-bit target. 4371 const llvm::Triple &TT = TI.getTriple(); 4372 if (TT.getArch() != llvm::Triple::riscv32 && isRISCV32Builtin(BuiltinID)) 4373 return Diag(TheCall->getCallee()->getBeginLoc(), 4374 diag::err_32_bit_builtin_64_bit_tgt); 4375 4376 // Check if each required feature is included 4377 for (StringRef F : ReqFeatures) { 4378 SmallVector<StringRef> ReqOpFeatures; 4379 F.split(ReqOpFeatures, '|'); 4380 bool HasFeature = false; 4381 for (StringRef OF : ReqOpFeatures) { 4382 if (TI.hasFeature(OF)) { 4383 HasFeature = true; 4384 continue; 4385 } 4386 } 4387 4388 if (!HasFeature) { 4389 std::string FeatureStrs; 4390 for (StringRef OF : ReqOpFeatures) { 4391 // If the feature is 64bit, alter the string so it will print better in 4392 // the diagnostic. 4393 if (OF == "64bit") 4394 OF = "RV64"; 4395 4396 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 4397 OF.consume_front("experimental-"); 4398 std::string FeatureStr = OF.str(); 4399 FeatureStr[0] = std::toupper(FeatureStr[0]); 4400 // Combine strings. 4401 FeatureStrs += FeatureStrs == "" ? "" : ", "; 4402 FeatureStrs += "'"; 4403 FeatureStrs += FeatureStr; 4404 FeatureStrs += "'"; 4405 } 4406 // Error message 4407 FeatureMissing = true; 4408 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 4409 << TheCall->getSourceRange() << StringRef(FeatureStrs); 4410 } 4411 } 4412 4413 if (FeatureMissing) 4414 return true; 4415 4416 switch (BuiltinID) { 4417 case RISCVVector::BI__builtin_rvv_vsetvli: 4418 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 4419 CheckRISCVLMUL(TheCall, 2); 4420 case RISCVVector::BI__builtin_rvv_vsetvlimax: 4421 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4422 CheckRISCVLMUL(TheCall, 1); 4423 case RISCVVector::BI__builtin_rvv_vget_v: { 4424 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4425 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4426 TheCall->getType().getCanonicalType().getTypePtr())); 4427 ASTContext::BuiltinVectorTypeInfo VecInfo = 4428 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4429 TheCall->getArg(0)->getType().getCanonicalType().getTypePtr())); 4430 unsigned MaxIndex = 4431 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) / 4432 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors); 4433 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4434 } 4435 case RISCVVector::BI__builtin_rvv_vset_v: { 4436 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4437 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4438 TheCall->getType().getCanonicalType().getTypePtr())); 4439 ASTContext::BuiltinVectorTypeInfo VecInfo = 4440 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4441 TheCall->getArg(2)->getType().getCanonicalType().getTypePtr())); 4442 unsigned MaxIndex = 4443 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) / 4444 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors); 4445 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4446 } 4447 // Check if byteselect is in [0, 3] 4448 case RISCV::BI__builtin_riscv_aes32dsi_32: 4449 case RISCV::BI__builtin_riscv_aes32dsmi_32: 4450 case RISCV::BI__builtin_riscv_aes32esi_32: 4451 case RISCV::BI__builtin_riscv_aes32esmi_32: 4452 case RISCV::BI__builtin_riscv_sm4ks: 4453 case RISCV::BI__builtin_riscv_sm4ed: 4454 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4455 // Check if rnum is in [0, 10] 4456 case RISCV::BI__builtin_riscv_aes64ks1i_64: 4457 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10); 4458 } 4459 4460 return false; 4461 } 4462 4463 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 4464 CallExpr *TheCall) { 4465 if (BuiltinID == SystemZ::BI__builtin_tabort) { 4466 Expr *Arg = TheCall->getArg(0); 4467 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context)) 4468 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 4469 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 4470 << Arg->getSourceRange(); 4471 } 4472 4473 // For intrinsics which take an immediate value as part of the instruction, 4474 // range check them here. 4475 unsigned i = 0, l = 0, u = 0; 4476 switch (BuiltinID) { 4477 default: return false; 4478 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 4479 case SystemZ::BI__builtin_s390_verimb: 4480 case SystemZ::BI__builtin_s390_verimh: 4481 case SystemZ::BI__builtin_s390_verimf: 4482 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 4483 case SystemZ::BI__builtin_s390_vfaeb: 4484 case SystemZ::BI__builtin_s390_vfaeh: 4485 case SystemZ::BI__builtin_s390_vfaef: 4486 case SystemZ::BI__builtin_s390_vfaebs: 4487 case SystemZ::BI__builtin_s390_vfaehs: 4488 case SystemZ::BI__builtin_s390_vfaefs: 4489 case SystemZ::BI__builtin_s390_vfaezb: 4490 case SystemZ::BI__builtin_s390_vfaezh: 4491 case SystemZ::BI__builtin_s390_vfaezf: 4492 case SystemZ::BI__builtin_s390_vfaezbs: 4493 case SystemZ::BI__builtin_s390_vfaezhs: 4494 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 4495 case SystemZ::BI__builtin_s390_vfisb: 4496 case SystemZ::BI__builtin_s390_vfidb: 4497 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 4498 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4499 case SystemZ::BI__builtin_s390_vftcisb: 4500 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 4501 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 4502 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 4503 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 4504 case SystemZ::BI__builtin_s390_vstrcb: 4505 case SystemZ::BI__builtin_s390_vstrch: 4506 case SystemZ::BI__builtin_s390_vstrcf: 4507 case SystemZ::BI__builtin_s390_vstrczb: 4508 case SystemZ::BI__builtin_s390_vstrczh: 4509 case SystemZ::BI__builtin_s390_vstrczf: 4510 case SystemZ::BI__builtin_s390_vstrcbs: 4511 case SystemZ::BI__builtin_s390_vstrchs: 4512 case SystemZ::BI__builtin_s390_vstrcfs: 4513 case SystemZ::BI__builtin_s390_vstrczbs: 4514 case SystemZ::BI__builtin_s390_vstrczhs: 4515 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 4516 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 4517 case SystemZ::BI__builtin_s390_vfminsb: 4518 case SystemZ::BI__builtin_s390_vfmaxsb: 4519 case SystemZ::BI__builtin_s390_vfmindb: 4520 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 4521 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 4522 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 4523 case SystemZ::BI__builtin_s390_vclfnhs: 4524 case SystemZ::BI__builtin_s390_vclfnls: 4525 case SystemZ::BI__builtin_s390_vcfn: 4526 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; 4527 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; 4528 } 4529 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4530 } 4531 4532 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 4533 /// This checks that the target supports __builtin_cpu_supports and 4534 /// that the string argument is constant and valid. 4535 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 4536 CallExpr *TheCall) { 4537 Expr *Arg = TheCall->getArg(0); 4538 4539 // Check if the argument is a string literal. 4540 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4541 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4542 << Arg->getSourceRange(); 4543 4544 // Check the contents of the string. 4545 StringRef Feature = 4546 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4547 if (!TI.validateCpuSupports(Feature)) 4548 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 4549 << Arg->getSourceRange(); 4550 return false; 4551 } 4552 4553 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 4554 /// This checks that the target supports __builtin_cpu_is and 4555 /// that the string argument is constant and valid. 4556 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 4557 Expr *Arg = TheCall->getArg(0); 4558 4559 // Check if the argument is a string literal. 4560 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4561 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4562 << Arg->getSourceRange(); 4563 4564 // Check the contents of the string. 4565 StringRef Feature = 4566 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4567 if (!TI.validateCpuIs(Feature)) 4568 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 4569 << Arg->getSourceRange(); 4570 return false; 4571 } 4572 4573 // Check if the rounding mode is legal. 4574 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 4575 // Indicates if this instruction has rounding control or just SAE. 4576 bool HasRC = false; 4577 4578 unsigned ArgNum = 0; 4579 switch (BuiltinID) { 4580 default: 4581 return false; 4582 case X86::BI__builtin_ia32_vcvttsd2si32: 4583 case X86::BI__builtin_ia32_vcvttsd2si64: 4584 case X86::BI__builtin_ia32_vcvttsd2usi32: 4585 case X86::BI__builtin_ia32_vcvttsd2usi64: 4586 case X86::BI__builtin_ia32_vcvttss2si32: 4587 case X86::BI__builtin_ia32_vcvttss2si64: 4588 case X86::BI__builtin_ia32_vcvttss2usi32: 4589 case X86::BI__builtin_ia32_vcvttss2usi64: 4590 case X86::BI__builtin_ia32_vcvttsh2si32: 4591 case X86::BI__builtin_ia32_vcvttsh2si64: 4592 case X86::BI__builtin_ia32_vcvttsh2usi32: 4593 case X86::BI__builtin_ia32_vcvttsh2usi64: 4594 ArgNum = 1; 4595 break; 4596 case X86::BI__builtin_ia32_maxpd512: 4597 case X86::BI__builtin_ia32_maxps512: 4598 case X86::BI__builtin_ia32_minpd512: 4599 case X86::BI__builtin_ia32_minps512: 4600 case X86::BI__builtin_ia32_maxph512: 4601 case X86::BI__builtin_ia32_minph512: 4602 ArgNum = 2; 4603 break; 4604 case X86::BI__builtin_ia32_vcvtph2pd512_mask: 4605 case X86::BI__builtin_ia32_vcvtph2psx512_mask: 4606 case X86::BI__builtin_ia32_cvtps2pd512_mask: 4607 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 4608 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 4609 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 4610 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 4611 case X86::BI__builtin_ia32_cvttps2dq512_mask: 4612 case X86::BI__builtin_ia32_cvttps2qq512_mask: 4613 case X86::BI__builtin_ia32_cvttps2udq512_mask: 4614 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 4615 case X86::BI__builtin_ia32_vcvttph2w512_mask: 4616 case X86::BI__builtin_ia32_vcvttph2uw512_mask: 4617 case X86::BI__builtin_ia32_vcvttph2dq512_mask: 4618 case X86::BI__builtin_ia32_vcvttph2udq512_mask: 4619 case X86::BI__builtin_ia32_vcvttph2qq512_mask: 4620 case X86::BI__builtin_ia32_vcvttph2uqq512_mask: 4621 case X86::BI__builtin_ia32_exp2pd_mask: 4622 case X86::BI__builtin_ia32_exp2ps_mask: 4623 case X86::BI__builtin_ia32_getexppd512_mask: 4624 case X86::BI__builtin_ia32_getexpps512_mask: 4625 case X86::BI__builtin_ia32_getexpph512_mask: 4626 case X86::BI__builtin_ia32_rcp28pd_mask: 4627 case X86::BI__builtin_ia32_rcp28ps_mask: 4628 case X86::BI__builtin_ia32_rsqrt28pd_mask: 4629 case X86::BI__builtin_ia32_rsqrt28ps_mask: 4630 case X86::BI__builtin_ia32_vcomisd: 4631 case X86::BI__builtin_ia32_vcomiss: 4632 case X86::BI__builtin_ia32_vcomish: 4633 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 4634 ArgNum = 3; 4635 break; 4636 case X86::BI__builtin_ia32_cmppd512_mask: 4637 case X86::BI__builtin_ia32_cmpps512_mask: 4638 case X86::BI__builtin_ia32_cmpsd_mask: 4639 case X86::BI__builtin_ia32_cmpss_mask: 4640 case X86::BI__builtin_ia32_cmpsh_mask: 4641 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask: 4642 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask: 4643 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 4644 case X86::BI__builtin_ia32_getexpsd128_round_mask: 4645 case X86::BI__builtin_ia32_getexpss128_round_mask: 4646 case X86::BI__builtin_ia32_getexpsh128_round_mask: 4647 case X86::BI__builtin_ia32_getmantpd512_mask: 4648 case X86::BI__builtin_ia32_getmantps512_mask: 4649 case X86::BI__builtin_ia32_getmantph512_mask: 4650 case X86::BI__builtin_ia32_maxsd_round_mask: 4651 case X86::BI__builtin_ia32_maxss_round_mask: 4652 case X86::BI__builtin_ia32_maxsh_round_mask: 4653 case X86::BI__builtin_ia32_minsd_round_mask: 4654 case X86::BI__builtin_ia32_minss_round_mask: 4655 case X86::BI__builtin_ia32_minsh_round_mask: 4656 case X86::BI__builtin_ia32_rcp28sd_round_mask: 4657 case X86::BI__builtin_ia32_rcp28ss_round_mask: 4658 case X86::BI__builtin_ia32_reducepd512_mask: 4659 case X86::BI__builtin_ia32_reduceps512_mask: 4660 case X86::BI__builtin_ia32_reduceph512_mask: 4661 case X86::BI__builtin_ia32_rndscalepd_mask: 4662 case X86::BI__builtin_ia32_rndscaleps_mask: 4663 case X86::BI__builtin_ia32_rndscaleph_mask: 4664 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 4665 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 4666 ArgNum = 4; 4667 break; 4668 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4669 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4670 case X86::BI__builtin_ia32_fixupimmps512_mask: 4671 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4672 case X86::BI__builtin_ia32_fixupimmsd_mask: 4673 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4674 case X86::BI__builtin_ia32_fixupimmss_mask: 4675 case X86::BI__builtin_ia32_fixupimmss_maskz: 4676 case X86::BI__builtin_ia32_getmantsd_round_mask: 4677 case X86::BI__builtin_ia32_getmantss_round_mask: 4678 case X86::BI__builtin_ia32_getmantsh_round_mask: 4679 case X86::BI__builtin_ia32_rangepd512_mask: 4680 case X86::BI__builtin_ia32_rangeps512_mask: 4681 case X86::BI__builtin_ia32_rangesd128_round_mask: 4682 case X86::BI__builtin_ia32_rangess128_round_mask: 4683 case X86::BI__builtin_ia32_reducesd_mask: 4684 case X86::BI__builtin_ia32_reducess_mask: 4685 case X86::BI__builtin_ia32_reducesh_mask: 4686 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4687 case X86::BI__builtin_ia32_rndscaless_round_mask: 4688 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4689 ArgNum = 5; 4690 break; 4691 case X86::BI__builtin_ia32_vcvtsd2si64: 4692 case X86::BI__builtin_ia32_vcvtsd2si32: 4693 case X86::BI__builtin_ia32_vcvtsd2usi32: 4694 case X86::BI__builtin_ia32_vcvtsd2usi64: 4695 case X86::BI__builtin_ia32_vcvtss2si32: 4696 case X86::BI__builtin_ia32_vcvtss2si64: 4697 case X86::BI__builtin_ia32_vcvtss2usi32: 4698 case X86::BI__builtin_ia32_vcvtss2usi64: 4699 case X86::BI__builtin_ia32_vcvtsh2si32: 4700 case X86::BI__builtin_ia32_vcvtsh2si64: 4701 case X86::BI__builtin_ia32_vcvtsh2usi32: 4702 case X86::BI__builtin_ia32_vcvtsh2usi64: 4703 case X86::BI__builtin_ia32_sqrtpd512: 4704 case X86::BI__builtin_ia32_sqrtps512: 4705 case X86::BI__builtin_ia32_sqrtph512: 4706 ArgNum = 1; 4707 HasRC = true; 4708 break; 4709 case X86::BI__builtin_ia32_addph512: 4710 case X86::BI__builtin_ia32_divph512: 4711 case X86::BI__builtin_ia32_mulph512: 4712 case X86::BI__builtin_ia32_subph512: 4713 case X86::BI__builtin_ia32_addpd512: 4714 case X86::BI__builtin_ia32_addps512: 4715 case X86::BI__builtin_ia32_divpd512: 4716 case X86::BI__builtin_ia32_divps512: 4717 case X86::BI__builtin_ia32_mulpd512: 4718 case X86::BI__builtin_ia32_mulps512: 4719 case X86::BI__builtin_ia32_subpd512: 4720 case X86::BI__builtin_ia32_subps512: 4721 case X86::BI__builtin_ia32_cvtsi2sd64: 4722 case X86::BI__builtin_ia32_cvtsi2ss32: 4723 case X86::BI__builtin_ia32_cvtsi2ss64: 4724 case X86::BI__builtin_ia32_cvtusi2sd64: 4725 case X86::BI__builtin_ia32_cvtusi2ss32: 4726 case X86::BI__builtin_ia32_cvtusi2ss64: 4727 case X86::BI__builtin_ia32_vcvtusi2sh: 4728 case X86::BI__builtin_ia32_vcvtusi642sh: 4729 case X86::BI__builtin_ia32_vcvtsi2sh: 4730 case X86::BI__builtin_ia32_vcvtsi642sh: 4731 ArgNum = 2; 4732 HasRC = true; 4733 break; 4734 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 4735 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 4736 case X86::BI__builtin_ia32_vcvtpd2ph512_mask: 4737 case X86::BI__builtin_ia32_vcvtps2phx512_mask: 4738 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 4739 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 4740 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 4741 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 4742 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 4743 case X86::BI__builtin_ia32_cvtps2dq512_mask: 4744 case X86::BI__builtin_ia32_cvtps2qq512_mask: 4745 case X86::BI__builtin_ia32_cvtps2udq512_mask: 4746 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 4747 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 4748 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 4749 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 4750 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 4751 case X86::BI__builtin_ia32_vcvtdq2ph512_mask: 4752 case X86::BI__builtin_ia32_vcvtudq2ph512_mask: 4753 case X86::BI__builtin_ia32_vcvtw2ph512_mask: 4754 case X86::BI__builtin_ia32_vcvtuw2ph512_mask: 4755 case X86::BI__builtin_ia32_vcvtph2w512_mask: 4756 case X86::BI__builtin_ia32_vcvtph2uw512_mask: 4757 case X86::BI__builtin_ia32_vcvtph2dq512_mask: 4758 case X86::BI__builtin_ia32_vcvtph2udq512_mask: 4759 case X86::BI__builtin_ia32_vcvtph2qq512_mask: 4760 case X86::BI__builtin_ia32_vcvtph2uqq512_mask: 4761 case X86::BI__builtin_ia32_vcvtqq2ph512_mask: 4762 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: 4763 ArgNum = 3; 4764 HasRC = true; 4765 break; 4766 case X86::BI__builtin_ia32_addsh_round_mask: 4767 case X86::BI__builtin_ia32_addss_round_mask: 4768 case X86::BI__builtin_ia32_addsd_round_mask: 4769 case X86::BI__builtin_ia32_divsh_round_mask: 4770 case X86::BI__builtin_ia32_divss_round_mask: 4771 case X86::BI__builtin_ia32_divsd_round_mask: 4772 case X86::BI__builtin_ia32_mulsh_round_mask: 4773 case X86::BI__builtin_ia32_mulss_round_mask: 4774 case X86::BI__builtin_ia32_mulsd_round_mask: 4775 case X86::BI__builtin_ia32_subsh_round_mask: 4776 case X86::BI__builtin_ia32_subss_round_mask: 4777 case X86::BI__builtin_ia32_subsd_round_mask: 4778 case X86::BI__builtin_ia32_scalefph512_mask: 4779 case X86::BI__builtin_ia32_scalefpd512_mask: 4780 case X86::BI__builtin_ia32_scalefps512_mask: 4781 case X86::BI__builtin_ia32_scalefsd_round_mask: 4782 case X86::BI__builtin_ia32_scalefss_round_mask: 4783 case X86::BI__builtin_ia32_scalefsh_round_mask: 4784 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 4785 case X86::BI__builtin_ia32_vcvtss2sh_round_mask: 4786 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask: 4787 case X86::BI__builtin_ia32_sqrtsd_round_mask: 4788 case X86::BI__builtin_ia32_sqrtss_round_mask: 4789 case X86::BI__builtin_ia32_sqrtsh_round_mask: 4790 case X86::BI__builtin_ia32_vfmaddsd3_mask: 4791 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 4792 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 4793 case X86::BI__builtin_ia32_vfmaddss3_mask: 4794 case X86::BI__builtin_ia32_vfmaddss3_maskz: 4795 case X86::BI__builtin_ia32_vfmaddss3_mask3: 4796 case X86::BI__builtin_ia32_vfmaddsh3_mask: 4797 case X86::BI__builtin_ia32_vfmaddsh3_maskz: 4798 case X86::BI__builtin_ia32_vfmaddsh3_mask3: 4799 case X86::BI__builtin_ia32_vfmaddpd512_mask: 4800 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 4801 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 4802 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 4803 case X86::BI__builtin_ia32_vfmaddps512_mask: 4804 case X86::BI__builtin_ia32_vfmaddps512_maskz: 4805 case X86::BI__builtin_ia32_vfmaddps512_mask3: 4806 case X86::BI__builtin_ia32_vfmsubps512_mask3: 4807 case X86::BI__builtin_ia32_vfmaddph512_mask: 4808 case X86::BI__builtin_ia32_vfmaddph512_maskz: 4809 case X86::BI__builtin_ia32_vfmaddph512_mask3: 4810 case X86::BI__builtin_ia32_vfmsubph512_mask3: 4811 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 4812 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 4813 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 4814 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 4815 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 4816 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 4817 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 4818 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 4819 case X86::BI__builtin_ia32_vfmaddsubph512_mask: 4820 case X86::BI__builtin_ia32_vfmaddsubph512_maskz: 4821 case X86::BI__builtin_ia32_vfmaddsubph512_mask3: 4822 case X86::BI__builtin_ia32_vfmsubaddph512_mask3: 4823 case X86::BI__builtin_ia32_vfmaddcsh_mask: 4824 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: 4825 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: 4826 case X86::BI__builtin_ia32_vfmaddcph512_mask: 4827 case X86::BI__builtin_ia32_vfmaddcph512_maskz: 4828 case X86::BI__builtin_ia32_vfmaddcph512_mask3: 4829 case X86::BI__builtin_ia32_vfcmaddcsh_mask: 4830 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: 4831 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: 4832 case X86::BI__builtin_ia32_vfcmaddcph512_mask: 4833 case X86::BI__builtin_ia32_vfcmaddcph512_maskz: 4834 case X86::BI__builtin_ia32_vfcmaddcph512_mask3: 4835 case X86::BI__builtin_ia32_vfmulcsh_mask: 4836 case X86::BI__builtin_ia32_vfmulcph512_mask: 4837 case X86::BI__builtin_ia32_vfcmulcsh_mask: 4838 case X86::BI__builtin_ia32_vfcmulcph512_mask: 4839 ArgNum = 4; 4840 HasRC = true; 4841 break; 4842 } 4843 4844 llvm::APSInt Result; 4845 4846 // We can't check the value of a dependent argument. 4847 Expr *Arg = TheCall->getArg(ArgNum); 4848 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4849 return false; 4850 4851 // Check constant-ness first. 4852 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4853 return true; 4854 4855 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 4856 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 4857 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 4858 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 4859 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 4860 Result == 8/*ROUND_NO_EXC*/ || 4861 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 4862 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 4863 return false; 4864 4865 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 4866 << Arg->getSourceRange(); 4867 } 4868 4869 // Check if the gather/scatter scale is legal. 4870 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 4871 CallExpr *TheCall) { 4872 unsigned ArgNum = 0; 4873 switch (BuiltinID) { 4874 default: 4875 return false; 4876 case X86::BI__builtin_ia32_gatherpfdpd: 4877 case X86::BI__builtin_ia32_gatherpfdps: 4878 case X86::BI__builtin_ia32_gatherpfqpd: 4879 case X86::BI__builtin_ia32_gatherpfqps: 4880 case X86::BI__builtin_ia32_scatterpfdpd: 4881 case X86::BI__builtin_ia32_scatterpfdps: 4882 case X86::BI__builtin_ia32_scatterpfqpd: 4883 case X86::BI__builtin_ia32_scatterpfqps: 4884 ArgNum = 3; 4885 break; 4886 case X86::BI__builtin_ia32_gatherd_pd: 4887 case X86::BI__builtin_ia32_gatherd_pd256: 4888 case X86::BI__builtin_ia32_gatherq_pd: 4889 case X86::BI__builtin_ia32_gatherq_pd256: 4890 case X86::BI__builtin_ia32_gatherd_ps: 4891 case X86::BI__builtin_ia32_gatherd_ps256: 4892 case X86::BI__builtin_ia32_gatherq_ps: 4893 case X86::BI__builtin_ia32_gatherq_ps256: 4894 case X86::BI__builtin_ia32_gatherd_q: 4895 case X86::BI__builtin_ia32_gatherd_q256: 4896 case X86::BI__builtin_ia32_gatherq_q: 4897 case X86::BI__builtin_ia32_gatherq_q256: 4898 case X86::BI__builtin_ia32_gatherd_d: 4899 case X86::BI__builtin_ia32_gatherd_d256: 4900 case X86::BI__builtin_ia32_gatherq_d: 4901 case X86::BI__builtin_ia32_gatherq_d256: 4902 case X86::BI__builtin_ia32_gather3div2df: 4903 case X86::BI__builtin_ia32_gather3div2di: 4904 case X86::BI__builtin_ia32_gather3div4df: 4905 case X86::BI__builtin_ia32_gather3div4di: 4906 case X86::BI__builtin_ia32_gather3div4sf: 4907 case X86::BI__builtin_ia32_gather3div4si: 4908 case X86::BI__builtin_ia32_gather3div8sf: 4909 case X86::BI__builtin_ia32_gather3div8si: 4910 case X86::BI__builtin_ia32_gather3siv2df: 4911 case X86::BI__builtin_ia32_gather3siv2di: 4912 case X86::BI__builtin_ia32_gather3siv4df: 4913 case X86::BI__builtin_ia32_gather3siv4di: 4914 case X86::BI__builtin_ia32_gather3siv4sf: 4915 case X86::BI__builtin_ia32_gather3siv4si: 4916 case X86::BI__builtin_ia32_gather3siv8sf: 4917 case X86::BI__builtin_ia32_gather3siv8si: 4918 case X86::BI__builtin_ia32_gathersiv8df: 4919 case X86::BI__builtin_ia32_gathersiv16sf: 4920 case X86::BI__builtin_ia32_gatherdiv8df: 4921 case X86::BI__builtin_ia32_gatherdiv16sf: 4922 case X86::BI__builtin_ia32_gathersiv8di: 4923 case X86::BI__builtin_ia32_gathersiv16si: 4924 case X86::BI__builtin_ia32_gatherdiv8di: 4925 case X86::BI__builtin_ia32_gatherdiv16si: 4926 case X86::BI__builtin_ia32_scatterdiv2df: 4927 case X86::BI__builtin_ia32_scatterdiv2di: 4928 case X86::BI__builtin_ia32_scatterdiv4df: 4929 case X86::BI__builtin_ia32_scatterdiv4di: 4930 case X86::BI__builtin_ia32_scatterdiv4sf: 4931 case X86::BI__builtin_ia32_scatterdiv4si: 4932 case X86::BI__builtin_ia32_scatterdiv8sf: 4933 case X86::BI__builtin_ia32_scatterdiv8si: 4934 case X86::BI__builtin_ia32_scattersiv2df: 4935 case X86::BI__builtin_ia32_scattersiv2di: 4936 case X86::BI__builtin_ia32_scattersiv4df: 4937 case X86::BI__builtin_ia32_scattersiv4di: 4938 case X86::BI__builtin_ia32_scattersiv4sf: 4939 case X86::BI__builtin_ia32_scattersiv4si: 4940 case X86::BI__builtin_ia32_scattersiv8sf: 4941 case X86::BI__builtin_ia32_scattersiv8si: 4942 case X86::BI__builtin_ia32_scattersiv8df: 4943 case X86::BI__builtin_ia32_scattersiv16sf: 4944 case X86::BI__builtin_ia32_scatterdiv8df: 4945 case X86::BI__builtin_ia32_scatterdiv16sf: 4946 case X86::BI__builtin_ia32_scattersiv8di: 4947 case X86::BI__builtin_ia32_scattersiv16si: 4948 case X86::BI__builtin_ia32_scatterdiv8di: 4949 case X86::BI__builtin_ia32_scatterdiv16si: 4950 ArgNum = 4; 4951 break; 4952 } 4953 4954 llvm::APSInt Result; 4955 4956 // We can't check the value of a dependent argument. 4957 Expr *Arg = TheCall->getArg(ArgNum); 4958 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4959 return false; 4960 4961 // Check constant-ness first. 4962 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4963 return true; 4964 4965 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 4966 return false; 4967 4968 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 4969 << Arg->getSourceRange(); 4970 } 4971 4972 enum { TileRegLow = 0, TileRegHigh = 7 }; 4973 4974 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 4975 ArrayRef<int> ArgNums) { 4976 for (int ArgNum : ArgNums) { 4977 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 4978 return true; 4979 } 4980 return false; 4981 } 4982 4983 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 4984 ArrayRef<int> ArgNums) { 4985 // Because the max number of tile register is TileRegHigh + 1, so here we use 4986 // each bit to represent the usage of them in bitset. 4987 std::bitset<TileRegHigh + 1> ArgValues; 4988 for (int ArgNum : ArgNums) { 4989 Expr *Arg = TheCall->getArg(ArgNum); 4990 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4991 continue; 4992 4993 llvm::APSInt Result; 4994 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4995 return true; 4996 int ArgExtValue = Result.getExtValue(); 4997 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 4998 "Incorrect tile register num."); 4999 if (ArgValues.test(ArgExtValue)) 5000 return Diag(TheCall->getBeginLoc(), 5001 diag::err_x86_builtin_tile_arg_duplicate) 5002 << TheCall->getArg(ArgNum)->getSourceRange(); 5003 ArgValues.set(ArgExtValue); 5004 } 5005 return false; 5006 } 5007 5008 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 5009 ArrayRef<int> ArgNums) { 5010 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 5011 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 5012 } 5013 5014 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 5015 switch (BuiltinID) { 5016 default: 5017 return false; 5018 case X86::BI__builtin_ia32_tileloadd64: 5019 case X86::BI__builtin_ia32_tileloaddt164: 5020 case X86::BI__builtin_ia32_tilestored64: 5021 case X86::BI__builtin_ia32_tilezero: 5022 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 5023 case X86::BI__builtin_ia32_tdpbssd: 5024 case X86::BI__builtin_ia32_tdpbsud: 5025 case X86::BI__builtin_ia32_tdpbusd: 5026 case X86::BI__builtin_ia32_tdpbuud: 5027 case X86::BI__builtin_ia32_tdpbf16ps: 5028 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 5029 } 5030 } 5031 static bool isX86_32Builtin(unsigned BuiltinID) { 5032 // These builtins only work on x86-32 targets. 5033 switch (BuiltinID) { 5034 case X86::BI__builtin_ia32_readeflags_u32: 5035 case X86::BI__builtin_ia32_writeeflags_u32: 5036 return true; 5037 } 5038 5039 return false; 5040 } 5041 5042 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 5043 CallExpr *TheCall) { 5044 if (BuiltinID == X86::BI__builtin_cpu_supports) 5045 return SemaBuiltinCpuSupports(*this, TI, TheCall); 5046 5047 if (BuiltinID == X86::BI__builtin_cpu_is) 5048 return SemaBuiltinCpuIs(*this, TI, TheCall); 5049 5050 // Check for 32-bit only builtins on a 64-bit target. 5051 const llvm::Triple &TT = TI.getTriple(); 5052 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 5053 return Diag(TheCall->getCallee()->getBeginLoc(), 5054 diag::err_32_bit_builtin_64_bit_tgt); 5055 5056 // If the intrinsic has rounding or SAE make sure its valid. 5057 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 5058 return true; 5059 5060 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 5061 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 5062 return true; 5063 5064 // If the intrinsic has a tile arguments, make sure they are valid. 5065 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 5066 return true; 5067 5068 // For intrinsics which take an immediate value as part of the instruction, 5069 // range check them here. 5070 int i = 0, l = 0, u = 0; 5071 switch (BuiltinID) { 5072 default: 5073 return false; 5074 case X86::BI__builtin_ia32_vec_ext_v2si: 5075 case X86::BI__builtin_ia32_vec_ext_v2di: 5076 case X86::BI__builtin_ia32_vextractf128_pd256: 5077 case X86::BI__builtin_ia32_vextractf128_ps256: 5078 case X86::BI__builtin_ia32_vextractf128_si256: 5079 case X86::BI__builtin_ia32_extract128i256: 5080 case X86::BI__builtin_ia32_extractf64x4_mask: 5081 case X86::BI__builtin_ia32_extracti64x4_mask: 5082 case X86::BI__builtin_ia32_extractf32x8_mask: 5083 case X86::BI__builtin_ia32_extracti32x8_mask: 5084 case X86::BI__builtin_ia32_extractf64x2_256_mask: 5085 case X86::BI__builtin_ia32_extracti64x2_256_mask: 5086 case X86::BI__builtin_ia32_extractf32x4_256_mask: 5087 case X86::BI__builtin_ia32_extracti32x4_256_mask: 5088 i = 1; l = 0; u = 1; 5089 break; 5090 case X86::BI__builtin_ia32_vec_set_v2di: 5091 case X86::BI__builtin_ia32_vinsertf128_pd256: 5092 case X86::BI__builtin_ia32_vinsertf128_ps256: 5093 case X86::BI__builtin_ia32_vinsertf128_si256: 5094 case X86::BI__builtin_ia32_insert128i256: 5095 case X86::BI__builtin_ia32_insertf32x8: 5096 case X86::BI__builtin_ia32_inserti32x8: 5097 case X86::BI__builtin_ia32_insertf64x4: 5098 case X86::BI__builtin_ia32_inserti64x4: 5099 case X86::BI__builtin_ia32_insertf64x2_256: 5100 case X86::BI__builtin_ia32_inserti64x2_256: 5101 case X86::BI__builtin_ia32_insertf32x4_256: 5102 case X86::BI__builtin_ia32_inserti32x4_256: 5103 i = 2; l = 0; u = 1; 5104 break; 5105 case X86::BI__builtin_ia32_vpermilpd: 5106 case X86::BI__builtin_ia32_vec_ext_v4hi: 5107 case X86::BI__builtin_ia32_vec_ext_v4si: 5108 case X86::BI__builtin_ia32_vec_ext_v4sf: 5109 case X86::BI__builtin_ia32_vec_ext_v4di: 5110 case X86::BI__builtin_ia32_extractf32x4_mask: 5111 case X86::BI__builtin_ia32_extracti32x4_mask: 5112 case X86::BI__builtin_ia32_extractf64x2_512_mask: 5113 case X86::BI__builtin_ia32_extracti64x2_512_mask: 5114 i = 1; l = 0; u = 3; 5115 break; 5116 case X86::BI_mm_prefetch: 5117 case X86::BI__builtin_ia32_vec_ext_v8hi: 5118 case X86::BI__builtin_ia32_vec_ext_v8si: 5119 i = 1; l = 0; u = 7; 5120 break; 5121 case X86::BI__builtin_ia32_sha1rnds4: 5122 case X86::BI__builtin_ia32_blendpd: 5123 case X86::BI__builtin_ia32_shufpd: 5124 case X86::BI__builtin_ia32_vec_set_v4hi: 5125 case X86::BI__builtin_ia32_vec_set_v4si: 5126 case X86::BI__builtin_ia32_vec_set_v4di: 5127 case X86::BI__builtin_ia32_shuf_f32x4_256: 5128 case X86::BI__builtin_ia32_shuf_f64x2_256: 5129 case X86::BI__builtin_ia32_shuf_i32x4_256: 5130 case X86::BI__builtin_ia32_shuf_i64x2_256: 5131 case X86::BI__builtin_ia32_insertf64x2_512: 5132 case X86::BI__builtin_ia32_inserti64x2_512: 5133 case X86::BI__builtin_ia32_insertf32x4: 5134 case X86::BI__builtin_ia32_inserti32x4: 5135 i = 2; l = 0; u = 3; 5136 break; 5137 case X86::BI__builtin_ia32_vpermil2pd: 5138 case X86::BI__builtin_ia32_vpermil2pd256: 5139 case X86::BI__builtin_ia32_vpermil2ps: 5140 case X86::BI__builtin_ia32_vpermil2ps256: 5141 i = 3; l = 0; u = 3; 5142 break; 5143 case X86::BI__builtin_ia32_cmpb128_mask: 5144 case X86::BI__builtin_ia32_cmpw128_mask: 5145 case X86::BI__builtin_ia32_cmpd128_mask: 5146 case X86::BI__builtin_ia32_cmpq128_mask: 5147 case X86::BI__builtin_ia32_cmpb256_mask: 5148 case X86::BI__builtin_ia32_cmpw256_mask: 5149 case X86::BI__builtin_ia32_cmpd256_mask: 5150 case X86::BI__builtin_ia32_cmpq256_mask: 5151 case X86::BI__builtin_ia32_cmpb512_mask: 5152 case X86::BI__builtin_ia32_cmpw512_mask: 5153 case X86::BI__builtin_ia32_cmpd512_mask: 5154 case X86::BI__builtin_ia32_cmpq512_mask: 5155 case X86::BI__builtin_ia32_ucmpb128_mask: 5156 case X86::BI__builtin_ia32_ucmpw128_mask: 5157 case X86::BI__builtin_ia32_ucmpd128_mask: 5158 case X86::BI__builtin_ia32_ucmpq128_mask: 5159 case X86::BI__builtin_ia32_ucmpb256_mask: 5160 case X86::BI__builtin_ia32_ucmpw256_mask: 5161 case X86::BI__builtin_ia32_ucmpd256_mask: 5162 case X86::BI__builtin_ia32_ucmpq256_mask: 5163 case X86::BI__builtin_ia32_ucmpb512_mask: 5164 case X86::BI__builtin_ia32_ucmpw512_mask: 5165 case X86::BI__builtin_ia32_ucmpd512_mask: 5166 case X86::BI__builtin_ia32_ucmpq512_mask: 5167 case X86::BI__builtin_ia32_vpcomub: 5168 case X86::BI__builtin_ia32_vpcomuw: 5169 case X86::BI__builtin_ia32_vpcomud: 5170 case X86::BI__builtin_ia32_vpcomuq: 5171 case X86::BI__builtin_ia32_vpcomb: 5172 case X86::BI__builtin_ia32_vpcomw: 5173 case X86::BI__builtin_ia32_vpcomd: 5174 case X86::BI__builtin_ia32_vpcomq: 5175 case X86::BI__builtin_ia32_vec_set_v8hi: 5176 case X86::BI__builtin_ia32_vec_set_v8si: 5177 i = 2; l = 0; u = 7; 5178 break; 5179 case X86::BI__builtin_ia32_vpermilpd256: 5180 case X86::BI__builtin_ia32_roundps: 5181 case X86::BI__builtin_ia32_roundpd: 5182 case X86::BI__builtin_ia32_roundps256: 5183 case X86::BI__builtin_ia32_roundpd256: 5184 case X86::BI__builtin_ia32_getmantpd128_mask: 5185 case X86::BI__builtin_ia32_getmantpd256_mask: 5186 case X86::BI__builtin_ia32_getmantps128_mask: 5187 case X86::BI__builtin_ia32_getmantps256_mask: 5188 case X86::BI__builtin_ia32_getmantpd512_mask: 5189 case X86::BI__builtin_ia32_getmantps512_mask: 5190 case X86::BI__builtin_ia32_getmantph128_mask: 5191 case X86::BI__builtin_ia32_getmantph256_mask: 5192 case X86::BI__builtin_ia32_getmantph512_mask: 5193 case X86::BI__builtin_ia32_vec_ext_v16qi: 5194 case X86::BI__builtin_ia32_vec_ext_v16hi: 5195 i = 1; l = 0; u = 15; 5196 break; 5197 case X86::BI__builtin_ia32_pblendd128: 5198 case X86::BI__builtin_ia32_blendps: 5199 case X86::BI__builtin_ia32_blendpd256: 5200 case X86::BI__builtin_ia32_shufpd256: 5201 case X86::BI__builtin_ia32_roundss: 5202 case X86::BI__builtin_ia32_roundsd: 5203 case X86::BI__builtin_ia32_rangepd128_mask: 5204 case X86::BI__builtin_ia32_rangepd256_mask: 5205 case X86::BI__builtin_ia32_rangepd512_mask: 5206 case X86::BI__builtin_ia32_rangeps128_mask: 5207 case X86::BI__builtin_ia32_rangeps256_mask: 5208 case X86::BI__builtin_ia32_rangeps512_mask: 5209 case X86::BI__builtin_ia32_getmantsd_round_mask: 5210 case X86::BI__builtin_ia32_getmantss_round_mask: 5211 case X86::BI__builtin_ia32_getmantsh_round_mask: 5212 case X86::BI__builtin_ia32_vec_set_v16qi: 5213 case X86::BI__builtin_ia32_vec_set_v16hi: 5214 i = 2; l = 0; u = 15; 5215 break; 5216 case X86::BI__builtin_ia32_vec_ext_v32qi: 5217 i = 1; l = 0; u = 31; 5218 break; 5219 case X86::BI__builtin_ia32_cmpps: 5220 case X86::BI__builtin_ia32_cmpss: 5221 case X86::BI__builtin_ia32_cmppd: 5222 case X86::BI__builtin_ia32_cmpsd: 5223 case X86::BI__builtin_ia32_cmpps256: 5224 case X86::BI__builtin_ia32_cmppd256: 5225 case X86::BI__builtin_ia32_cmpps128_mask: 5226 case X86::BI__builtin_ia32_cmppd128_mask: 5227 case X86::BI__builtin_ia32_cmpps256_mask: 5228 case X86::BI__builtin_ia32_cmppd256_mask: 5229 case X86::BI__builtin_ia32_cmpps512_mask: 5230 case X86::BI__builtin_ia32_cmppd512_mask: 5231 case X86::BI__builtin_ia32_cmpsd_mask: 5232 case X86::BI__builtin_ia32_cmpss_mask: 5233 case X86::BI__builtin_ia32_vec_set_v32qi: 5234 i = 2; l = 0; u = 31; 5235 break; 5236 case X86::BI__builtin_ia32_permdf256: 5237 case X86::BI__builtin_ia32_permdi256: 5238 case X86::BI__builtin_ia32_permdf512: 5239 case X86::BI__builtin_ia32_permdi512: 5240 case X86::BI__builtin_ia32_vpermilps: 5241 case X86::BI__builtin_ia32_vpermilps256: 5242 case X86::BI__builtin_ia32_vpermilpd512: 5243 case X86::BI__builtin_ia32_vpermilps512: 5244 case X86::BI__builtin_ia32_pshufd: 5245 case X86::BI__builtin_ia32_pshufd256: 5246 case X86::BI__builtin_ia32_pshufd512: 5247 case X86::BI__builtin_ia32_pshufhw: 5248 case X86::BI__builtin_ia32_pshufhw256: 5249 case X86::BI__builtin_ia32_pshufhw512: 5250 case X86::BI__builtin_ia32_pshuflw: 5251 case X86::BI__builtin_ia32_pshuflw256: 5252 case X86::BI__builtin_ia32_pshuflw512: 5253 case X86::BI__builtin_ia32_vcvtps2ph: 5254 case X86::BI__builtin_ia32_vcvtps2ph_mask: 5255 case X86::BI__builtin_ia32_vcvtps2ph256: 5256 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 5257 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 5258 case X86::BI__builtin_ia32_rndscaleps_128_mask: 5259 case X86::BI__builtin_ia32_rndscalepd_128_mask: 5260 case X86::BI__builtin_ia32_rndscaleps_256_mask: 5261 case X86::BI__builtin_ia32_rndscalepd_256_mask: 5262 case X86::BI__builtin_ia32_rndscaleps_mask: 5263 case X86::BI__builtin_ia32_rndscalepd_mask: 5264 case X86::BI__builtin_ia32_rndscaleph_mask: 5265 case X86::BI__builtin_ia32_reducepd128_mask: 5266 case X86::BI__builtin_ia32_reducepd256_mask: 5267 case X86::BI__builtin_ia32_reducepd512_mask: 5268 case X86::BI__builtin_ia32_reduceps128_mask: 5269 case X86::BI__builtin_ia32_reduceps256_mask: 5270 case X86::BI__builtin_ia32_reduceps512_mask: 5271 case X86::BI__builtin_ia32_reduceph128_mask: 5272 case X86::BI__builtin_ia32_reduceph256_mask: 5273 case X86::BI__builtin_ia32_reduceph512_mask: 5274 case X86::BI__builtin_ia32_prold512: 5275 case X86::BI__builtin_ia32_prolq512: 5276 case X86::BI__builtin_ia32_prold128: 5277 case X86::BI__builtin_ia32_prold256: 5278 case X86::BI__builtin_ia32_prolq128: 5279 case X86::BI__builtin_ia32_prolq256: 5280 case X86::BI__builtin_ia32_prord512: 5281 case X86::BI__builtin_ia32_prorq512: 5282 case X86::BI__builtin_ia32_prord128: 5283 case X86::BI__builtin_ia32_prord256: 5284 case X86::BI__builtin_ia32_prorq128: 5285 case X86::BI__builtin_ia32_prorq256: 5286 case X86::BI__builtin_ia32_fpclasspd128_mask: 5287 case X86::BI__builtin_ia32_fpclasspd256_mask: 5288 case X86::BI__builtin_ia32_fpclassps128_mask: 5289 case X86::BI__builtin_ia32_fpclassps256_mask: 5290 case X86::BI__builtin_ia32_fpclassps512_mask: 5291 case X86::BI__builtin_ia32_fpclasspd512_mask: 5292 case X86::BI__builtin_ia32_fpclassph128_mask: 5293 case X86::BI__builtin_ia32_fpclassph256_mask: 5294 case X86::BI__builtin_ia32_fpclassph512_mask: 5295 case X86::BI__builtin_ia32_fpclasssd_mask: 5296 case X86::BI__builtin_ia32_fpclassss_mask: 5297 case X86::BI__builtin_ia32_fpclasssh_mask: 5298 case X86::BI__builtin_ia32_pslldqi128_byteshift: 5299 case X86::BI__builtin_ia32_pslldqi256_byteshift: 5300 case X86::BI__builtin_ia32_pslldqi512_byteshift: 5301 case X86::BI__builtin_ia32_psrldqi128_byteshift: 5302 case X86::BI__builtin_ia32_psrldqi256_byteshift: 5303 case X86::BI__builtin_ia32_psrldqi512_byteshift: 5304 case X86::BI__builtin_ia32_kshiftliqi: 5305 case X86::BI__builtin_ia32_kshiftlihi: 5306 case X86::BI__builtin_ia32_kshiftlisi: 5307 case X86::BI__builtin_ia32_kshiftlidi: 5308 case X86::BI__builtin_ia32_kshiftriqi: 5309 case X86::BI__builtin_ia32_kshiftrihi: 5310 case X86::BI__builtin_ia32_kshiftrisi: 5311 case X86::BI__builtin_ia32_kshiftridi: 5312 i = 1; l = 0; u = 255; 5313 break; 5314 case X86::BI__builtin_ia32_vperm2f128_pd256: 5315 case X86::BI__builtin_ia32_vperm2f128_ps256: 5316 case X86::BI__builtin_ia32_vperm2f128_si256: 5317 case X86::BI__builtin_ia32_permti256: 5318 case X86::BI__builtin_ia32_pblendw128: 5319 case X86::BI__builtin_ia32_pblendw256: 5320 case X86::BI__builtin_ia32_blendps256: 5321 case X86::BI__builtin_ia32_pblendd256: 5322 case X86::BI__builtin_ia32_palignr128: 5323 case X86::BI__builtin_ia32_palignr256: 5324 case X86::BI__builtin_ia32_palignr512: 5325 case X86::BI__builtin_ia32_alignq512: 5326 case X86::BI__builtin_ia32_alignd512: 5327 case X86::BI__builtin_ia32_alignd128: 5328 case X86::BI__builtin_ia32_alignd256: 5329 case X86::BI__builtin_ia32_alignq128: 5330 case X86::BI__builtin_ia32_alignq256: 5331 case X86::BI__builtin_ia32_vcomisd: 5332 case X86::BI__builtin_ia32_vcomiss: 5333 case X86::BI__builtin_ia32_shuf_f32x4: 5334 case X86::BI__builtin_ia32_shuf_f64x2: 5335 case X86::BI__builtin_ia32_shuf_i32x4: 5336 case X86::BI__builtin_ia32_shuf_i64x2: 5337 case X86::BI__builtin_ia32_shufpd512: 5338 case X86::BI__builtin_ia32_shufps: 5339 case X86::BI__builtin_ia32_shufps256: 5340 case X86::BI__builtin_ia32_shufps512: 5341 case X86::BI__builtin_ia32_dbpsadbw128: 5342 case X86::BI__builtin_ia32_dbpsadbw256: 5343 case X86::BI__builtin_ia32_dbpsadbw512: 5344 case X86::BI__builtin_ia32_vpshldd128: 5345 case X86::BI__builtin_ia32_vpshldd256: 5346 case X86::BI__builtin_ia32_vpshldd512: 5347 case X86::BI__builtin_ia32_vpshldq128: 5348 case X86::BI__builtin_ia32_vpshldq256: 5349 case X86::BI__builtin_ia32_vpshldq512: 5350 case X86::BI__builtin_ia32_vpshldw128: 5351 case X86::BI__builtin_ia32_vpshldw256: 5352 case X86::BI__builtin_ia32_vpshldw512: 5353 case X86::BI__builtin_ia32_vpshrdd128: 5354 case X86::BI__builtin_ia32_vpshrdd256: 5355 case X86::BI__builtin_ia32_vpshrdd512: 5356 case X86::BI__builtin_ia32_vpshrdq128: 5357 case X86::BI__builtin_ia32_vpshrdq256: 5358 case X86::BI__builtin_ia32_vpshrdq512: 5359 case X86::BI__builtin_ia32_vpshrdw128: 5360 case X86::BI__builtin_ia32_vpshrdw256: 5361 case X86::BI__builtin_ia32_vpshrdw512: 5362 i = 2; l = 0; u = 255; 5363 break; 5364 case X86::BI__builtin_ia32_fixupimmpd512_mask: 5365 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 5366 case X86::BI__builtin_ia32_fixupimmps512_mask: 5367 case X86::BI__builtin_ia32_fixupimmps512_maskz: 5368 case X86::BI__builtin_ia32_fixupimmsd_mask: 5369 case X86::BI__builtin_ia32_fixupimmsd_maskz: 5370 case X86::BI__builtin_ia32_fixupimmss_mask: 5371 case X86::BI__builtin_ia32_fixupimmss_maskz: 5372 case X86::BI__builtin_ia32_fixupimmpd128_mask: 5373 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 5374 case X86::BI__builtin_ia32_fixupimmpd256_mask: 5375 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 5376 case X86::BI__builtin_ia32_fixupimmps128_mask: 5377 case X86::BI__builtin_ia32_fixupimmps128_maskz: 5378 case X86::BI__builtin_ia32_fixupimmps256_mask: 5379 case X86::BI__builtin_ia32_fixupimmps256_maskz: 5380 case X86::BI__builtin_ia32_pternlogd512_mask: 5381 case X86::BI__builtin_ia32_pternlogd512_maskz: 5382 case X86::BI__builtin_ia32_pternlogq512_mask: 5383 case X86::BI__builtin_ia32_pternlogq512_maskz: 5384 case X86::BI__builtin_ia32_pternlogd128_mask: 5385 case X86::BI__builtin_ia32_pternlogd128_maskz: 5386 case X86::BI__builtin_ia32_pternlogd256_mask: 5387 case X86::BI__builtin_ia32_pternlogd256_maskz: 5388 case X86::BI__builtin_ia32_pternlogq128_mask: 5389 case X86::BI__builtin_ia32_pternlogq128_maskz: 5390 case X86::BI__builtin_ia32_pternlogq256_mask: 5391 case X86::BI__builtin_ia32_pternlogq256_maskz: 5392 i = 3; l = 0; u = 255; 5393 break; 5394 case X86::BI__builtin_ia32_gatherpfdpd: 5395 case X86::BI__builtin_ia32_gatherpfdps: 5396 case X86::BI__builtin_ia32_gatherpfqpd: 5397 case X86::BI__builtin_ia32_gatherpfqps: 5398 case X86::BI__builtin_ia32_scatterpfdpd: 5399 case X86::BI__builtin_ia32_scatterpfdps: 5400 case X86::BI__builtin_ia32_scatterpfqpd: 5401 case X86::BI__builtin_ia32_scatterpfqps: 5402 i = 4; l = 2; u = 3; 5403 break; 5404 case X86::BI__builtin_ia32_reducesd_mask: 5405 case X86::BI__builtin_ia32_reducess_mask: 5406 case X86::BI__builtin_ia32_rndscalesd_round_mask: 5407 case X86::BI__builtin_ia32_rndscaless_round_mask: 5408 case X86::BI__builtin_ia32_rndscalesh_round_mask: 5409 case X86::BI__builtin_ia32_reducesh_mask: 5410 i = 4; l = 0; u = 255; 5411 break; 5412 } 5413 5414 // Note that we don't force a hard error on the range check here, allowing 5415 // template-generated or macro-generated dead code to potentially have out-of- 5416 // range values. These need to code generate, but don't need to necessarily 5417 // make any sense. We use a warning that defaults to an error. 5418 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 5419 } 5420 5421 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 5422 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 5423 /// Returns true when the format fits the function and the FormatStringInfo has 5424 /// been populated. 5425 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 5426 FormatStringInfo *FSI) { 5427 FSI->HasVAListArg = Format->getFirstArg() == 0; 5428 FSI->FormatIdx = Format->getFormatIdx() - 1; 5429 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 5430 5431 // The way the format attribute works in GCC, the implicit this argument 5432 // of member functions is counted. However, it doesn't appear in our own 5433 // lists, so decrement format_idx in that case. 5434 if (IsCXXMember) { 5435 if(FSI->FormatIdx == 0) 5436 return false; 5437 --FSI->FormatIdx; 5438 if (FSI->FirstDataArg != 0) 5439 --FSI->FirstDataArg; 5440 } 5441 return true; 5442 } 5443 5444 /// Checks if a the given expression evaluates to null. 5445 /// 5446 /// Returns true if the value evaluates to null. 5447 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 5448 // If the expression has non-null type, it doesn't evaluate to null. 5449 if (auto nullability 5450 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 5451 if (*nullability == NullabilityKind::NonNull) 5452 return false; 5453 } 5454 5455 // As a special case, transparent unions initialized with zero are 5456 // considered null for the purposes of the nonnull attribute. 5457 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 5458 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 5459 if (const CompoundLiteralExpr *CLE = 5460 dyn_cast<CompoundLiteralExpr>(Expr)) 5461 if (const InitListExpr *ILE = 5462 dyn_cast<InitListExpr>(CLE->getInitializer())) 5463 Expr = ILE->getInit(0); 5464 } 5465 5466 bool Result; 5467 return (!Expr->isValueDependent() && 5468 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 5469 !Result); 5470 } 5471 5472 static void CheckNonNullArgument(Sema &S, 5473 const Expr *ArgExpr, 5474 SourceLocation CallSiteLoc) { 5475 if (CheckNonNullExpr(S, ArgExpr)) 5476 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 5477 S.PDiag(diag::warn_null_arg) 5478 << ArgExpr->getSourceRange()); 5479 } 5480 5481 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 5482 FormatStringInfo FSI; 5483 if ((GetFormatStringType(Format) == FST_NSString) && 5484 getFormatStringInfo(Format, false, &FSI)) { 5485 Idx = FSI.FormatIdx; 5486 return true; 5487 } 5488 return false; 5489 } 5490 5491 /// Diagnose use of %s directive in an NSString which is being passed 5492 /// as formatting string to formatting method. 5493 static void 5494 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 5495 const NamedDecl *FDecl, 5496 Expr **Args, 5497 unsigned NumArgs) { 5498 unsigned Idx = 0; 5499 bool Format = false; 5500 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 5501 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 5502 Idx = 2; 5503 Format = true; 5504 } 5505 else 5506 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5507 if (S.GetFormatNSStringIdx(I, Idx)) { 5508 Format = true; 5509 break; 5510 } 5511 } 5512 if (!Format || NumArgs <= Idx) 5513 return; 5514 const Expr *FormatExpr = Args[Idx]; 5515 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 5516 FormatExpr = CSCE->getSubExpr(); 5517 const StringLiteral *FormatString; 5518 if (const ObjCStringLiteral *OSL = 5519 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 5520 FormatString = OSL->getString(); 5521 else 5522 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 5523 if (!FormatString) 5524 return; 5525 if (S.FormatStringHasSArg(FormatString)) { 5526 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 5527 << "%s" << 1 << 1; 5528 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 5529 << FDecl->getDeclName(); 5530 } 5531 } 5532 5533 /// Determine whether the given type has a non-null nullability annotation. 5534 static bool isNonNullType(ASTContext &ctx, QualType type) { 5535 if (auto nullability = type->getNullability(ctx)) 5536 return *nullability == NullabilityKind::NonNull; 5537 5538 return false; 5539 } 5540 5541 static void CheckNonNullArguments(Sema &S, 5542 const NamedDecl *FDecl, 5543 const FunctionProtoType *Proto, 5544 ArrayRef<const Expr *> Args, 5545 SourceLocation CallSiteLoc) { 5546 assert((FDecl || Proto) && "Need a function declaration or prototype"); 5547 5548 // Already checked by by constant evaluator. 5549 if (S.isConstantEvaluated()) 5550 return; 5551 // Check the attributes attached to the method/function itself. 5552 llvm::SmallBitVector NonNullArgs; 5553 if (FDecl) { 5554 // Handle the nonnull attribute on the function/method declaration itself. 5555 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 5556 if (!NonNull->args_size()) { 5557 // Easy case: all pointer arguments are nonnull. 5558 for (const auto *Arg : Args) 5559 if (S.isValidPointerAttrType(Arg->getType())) 5560 CheckNonNullArgument(S, Arg, CallSiteLoc); 5561 return; 5562 } 5563 5564 for (const ParamIdx &Idx : NonNull->args()) { 5565 unsigned IdxAST = Idx.getASTIndex(); 5566 if (IdxAST >= Args.size()) 5567 continue; 5568 if (NonNullArgs.empty()) 5569 NonNullArgs.resize(Args.size()); 5570 NonNullArgs.set(IdxAST); 5571 } 5572 } 5573 } 5574 5575 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 5576 // Handle the nonnull attribute on the parameters of the 5577 // function/method. 5578 ArrayRef<ParmVarDecl*> parms; 5579 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 5580 parms = FD->parameters(); 5581 else 5582 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 5583 5584 unsigned ParamIndex = 0; 5585 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 5586 I != E; ++I, ++ParamIndex) { 5587 const ParmVarDecl *PVD = *I; 5588 if (PVD->hasAttr<NonNullAttr>() || 5589 isNonNullType(S.Context, PVD->getType())) { 5590 if (NonNullArgs.empty()) 5591 NonNullArgs.resize(Args.size()); 5592 5593 NonNullArgs.set(ParamIndex); 5594 } 5595 } 5596 } else { 5597 // If we have a non-function, non-method declaration but no 5598 // function prototype, try to dig out the function prototype. 5599 if (!Proto) { 5600 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 5601 QualType type = VD->getType().getNonReferenceType(); 5602 if (auto pointerType = type->getAs<PointerType>()) 5603 type = pointerType->getPointeeType(); 5604 else if (auto blockType = type->getAs<BlockPointerType>()) 5605 type = blockType->getPointeeType(); 5606 // FIXME: data member pointers? 5607 5608 // Dig out the function prototype, if there is one. 5609 Proto = type->getAs<FunctionProtoType>(); 5610 } 5611 } 5612 5613 // Fill in non-null argument information from the nullability 5614 // information on the parameter types (if we have them). 5615 if (Proto) { 5616 unsigned Index = 0; 5617 for (auto paramType : Proto->getParamTypes()) { 5618 if (isNonNullType(S.Context, paramType)) { 5619 if (NonNullArgs.empty()) 5620 NonNullArgs.resize(Args.size()); 5621 5622 NonNullArgs.set(Index); 5623 } 5624 5625 ++Index; 5626 } 5627 } 5628 } 5629 5630 // Check for non-null arguments. 5631 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 5632 ArgIndex != ArgIndexEnd; ++ArgIndex) { 5633 if (NonNullArgs[ArgIndex]) 5634 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 5635 } 5636 } 5637 5638 /// Warn if a pointer or reference argument passed to a function points to an 5639 /// object that is less aligned than the parameter. This can happen when 5640 /// creating a typedef with a lower alignment than the original type and then 5641 /// calling functions defined in terms of the original type. 5642 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 5643 StringRef ParamName, QualType ArgTy, 5644 QualType ParamTy) { 5645 5646 // If a function accepts a pointer or reference type 5647 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 5648 return; 5649 5650 // If the parameter is a pointer type, get the pointee type for the 5651 // argument too. If the parameter is a reference type, don't try to get 5652 // the pointee type for the argument. 5653 if (ParamTy->isPointerType()) 5654 ArgTy = ArgTy->getPointeeType(); 5655 5656 // Remove reference or pointer 5657 ParamTy = ParamTy->getPointeeType(); 5658 5659 // Find expected alignment, and the actual alignment of the passed object. 5660 // getTypeAlignInChars requires complete types 5661 if (ArgTy.isNull() || ParamTy->isIncompleteType() || 5662 ArgTy->isIncompleteType() || ParamTy->isUndeducedType() || 5663 ArgTy->isUndeducedType()) 5664 return; 5665 5666 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 5667 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 5668 5669 // If the argument is less aligned than the parameter, there is a 5670 // potential alignment issue. 5671 if (ArgAlign < ParamAlign) 5672 Diag(Loc, diag::warn_param_mismatched_alignment) 5673 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 5674 << ParamName << (FDecl != nullptr) << FDecl; 5675 } 5676 5677 /// Handles the checks for format strings, non-POD arguments to vararg 5678 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 5679 /// attributes. 5680 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 5681 const Expr *ThisArg, ArrayRef<const Expr *> Args, 5682 bool IsMemberFunction, SourceLocation Loc, 5683 SourceRange Range, VariadicCallType CallType) { 5684 // FIXME: We should check as much as we can in the template definition. 5685 if (CurContext->isDependentContext()) 5686 return; 5687 5688 // Printf and scanf checking. 5689 llvm::SmallBitVector CheckedVarArgs; 5690 if (FDecl) { 5691 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5692 // Only create vector if there are format attributes. 5693 CheckedVarArgs.resize(Args.size()); 5694 5695 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 5696 CheckedVarArgs); 5697 } 5698 } 5699 5700 // Refuse POD arguments that weren't caught by the format string 5701 // checks above. 5702 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 5703 if (CallType != VariadicDoesNotApply && 5704 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 5705 unsigned NumParams = Proto ? Proto->getNumParams() 5706 : FDecl && isa<FunctionDecl>(FDecl) 5707 ? cast<FunctionDecl>(FDecl)->getNumParams() 5708 : FDecl && isa<ObjCMethodDecl>(FDecl) 5709 ? cast<ObjCMethodDecl>(FDecl)->param_size() 5710 : 0; 5711 5712 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 5713 // Args[ArgIdx] can be null in malformed code. 5714 if (const Expr *Arg = Args[ArgIdx]) { 5715 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 5716 checkVariadicArgument(Arg, CallType); 5717 } 5718 } 5719 } 5720 5721 if (FDecl || Proto) { 5722 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 5723 5724 // Type safety checking. 5725 if (FDecl) { 5726 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 5727 CheckArgumentWithTypeTag(I, Args, Loc); 5728 } 5729 } 5730 5731 // Check that passed arguments match the alignment of original arguments. 5732 // Try to get the missing prototype from the declaration. 5733 if (!Proto && FDecl) { 5734 const auto *FT = FDecl->getFunctionType(); 5735 if (isa_and_nonnull<FunctionProtoType>(FT)) 5736 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 5737 } 5738 if (Proto) { 5739 // For variadic functions, we may have more args than parameters. 5740 // For some K&R functions, we may have less args than parameters. 5741 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 5742 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 5743 // Args[ArgIdx] can be null in malformed code. 5744 if (const Expr *Arg = Args[ArgIdx]) { 5745 if (Arg->containsErrors()) 5746 continue; 5747 5748 QualType ParamTy = Proto->getParamType(ArgIdx); 5749 QualType ArgTy = Arg->getType(); 5750 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 5751 ArgTy, ParamTy); 5752 } 5753 } 5754 } 5755 5756 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 5757 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 5758 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 5759 if (!Arg->isValueDependent()) { 5760 Expr::EvalResult Align; 5761 if (Arg->EvaluateAsInt(Align, Context)) { 5762 const llvm::APSInt &I = Align.Val.getInt(); 5763 if (!I.isPowerOf2()) 5764 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 5765 << Arg->getSourceRange(); 5766 5767 if (I > Sema::MaximumAlignment) 5768 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 5769 << Arg->getSourceRange() << Sema::MaximumAlignment; 5770 } 5771 } 5772 } 5773 5774 if (FD) 5775 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 5776 } 5777 5778 /// CheckConstructorCall - Check a constructor call for correctness and safety 5779 /// properties not enforced by the C type system. 5780 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 5781 ArrayRef<const Expr *> Args, 5782 const FunctionProtoType *Proto, 5783 SourceLocation Loc) { 5784 VariadicCallType CallType = 5785 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 5786 5787 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 5788 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType), 5789 Context.getPointerType(Ctor->getThisObjectType())); 5790 5791 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 5792 Loc, SourceRange(), CallType); 5793 } 5794 5795 /// CheckFunctionCall - Check a direct function call for various correctness 5796 /// and safety properties not strictly enforced by the C type system. 5797 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 5798 const FunctionProtoType *Proto) { 5799 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 5800 isa<CXXMethodDecl>(FDecl); 5801 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 5802 IsMemberOperatorCall; 5803 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 5804 TheCall->getCallee()); 5805 Expr** Args = TheCall->getArgs(); 5806 unsigned NumArgs = TheCall->getNumArgs(); 5807 5808 Expr *ImplicitThis = nullptr; 5809 if (IsMemberOperatorCall) { 5810 // If this is a call to a member operator, hide the first argument 5811 // from checkCall. 5812 // FIXME: Our choice of AST representation here is less than ideal. 5813 ImplicitThis = Args[0]; 5814 ++Args; 5815 --NumArgs; 5816 } else if (IsMemberFunction) 5817 ImplicitThis = 5818 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 5819 5820 if (ImplicitThis) { 5821 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 5822 // used. 5823 QualType ThisType = ImplicitThis->getType(); 5824 if (!ThisType->isPointerType()) { 5825 assert(!ThisType->isReferenceType()); 5826 ThisType = Context.getPointerType(ThisType); 5827 } 5828 5829 QualType ThisTypeFromDecl = 5830 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType()); 5831 5832 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 5833 ThisTypeFromDecl); 5834 } 5835 5836 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 5837 IsMemberFunction, TheCall->getRParenLoc(), 5838 TheCall->getCallee()->getSourceRange(), CallType); 5839 5840 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 5841 // None of the checks below are needed for functions that don't have 5842 // simple names (e.g., C++ conversion functions). 5843 if (!FnInfo) 5844 return false; 5845 5846 // Enforce TCB except for builtin calls, which are always allowed. 5847 if (FDecl->getBuiltinID() == 0) 5848 CheckTCBEnforcement(TheCall->getExprLoc(), FDecl); 5849 5850 CheckAbsoluteValueFunction(TheCall, FDecl); 5851 CheckMaxUnsignedZero(TheCall, FDecl); 5852 5853 if (getLangOpts().ObjC) 5854 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 5855 5856 unsigned CMId = FDecl->getMemoryFunctionKind(); 5857 5858 // Handle memory setting and copying functions. 5859 switch (CMId) { 5860 case 0: 5861 return false; 5862 case Builtin::BIstrlcpy: // fallthrough 5863 case Builtin::BIstrlcat: 5864 CheckStrlcpycatArguments(TheCall, FnInfo); 5865 break; 5866 case Builtin::BIstrncat: 5867 CheckStrncatArguments(TheCall, FnInfo); 5868 break; 5869 case Builtin::BIfree: 5870 CheckFreeArguments(TheCall); 5871 break; 5872 default: 5873 CheckMemaccessArguments(TheCall, CMId, FnInfo); 5874 } 5875 5876 return false; 5877 } 5878 5879 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 5880 ArrayRef<const Expr *> Args) { 5881 VariadicCallType CallType = 5882 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 5883 5884 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 5885 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 5886 CallType); 5887 5888 CheckTCBEnforcement(lbrac, Method); 5889 5890 return false; 5891 } 5892 5893 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 5894 const FunctionProtoType *Proto) { 5895 QualType Ty; 5896 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 5897 Ty = V->getType().getNonReferenceType(); 5898 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 5899 Ty = F->getType().getNonReferenceType(); 5900 else 5901 return false; 5902 5903 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 5904 !Ty->isFunctionProtoType()) 5905 return false; 5906 5907 VariadicCallType CallType; 5908 if (!Proto || !Proto->isVariadic()) { 5909 CallType = VariadicDoesNotApply; 5910 } else if (Ty->isBlockPointerType()) { 5911 CallType = VariadicBlock; 5912 } else { // Ty->isFunctionPointerType() 5913 CallType = VariadicFunction; 5914 } 5915 5916 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 5917 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5918 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5919 TheCall->getCallee()->getSourceRange(), CallType); 5920 5921 return false; 5922 } 5923 5924 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 5925 /// such as function pointers returned from functions. 5926 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 5927 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 5928 TheCall->getCallee()); 5929 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 5930 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5931 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5932 TheCall->getCallee()->getSourceRange(), CallType); 5933 5934 return false; 5935 } 5936 5937 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 5938 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 5939 return false; 5940 5941 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 5942 switch (Op) { 5943 case AtomicExpr::AO__c11_atomic_init: 5944 case AtomicExpr::AO__opencl_atomic_init: 5945 llvm_unreachable("There is no ordering argument for an init"); 5946 5947 case AtomicExpr::AO__c11_atomic_load: 5948 case AtomicExpr::AO__opencl_atomic_load: 5949 case AtomicExpr::AO__hip_atomic_load: 5950 case AtomicExpr::AO__atomic_load_n: 5951 case AtomicExpr::AO__atomic_load: 5952 return OrderingCABI != llvm::AtomicOrderingCABI::release && 5953 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5954 5955 case AtomicExpr::AO__c11_atomic_store: 5956 case AtomicExpr::AO__opencl_atomic_store: 5957 case AtomicExpr::AO__hip_atomic_store: 5958 case AtomicExpr::AO__atomic_store: 5959 case AtomicExpr::AO__atomic_store_n: 5960 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 5961 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 5962 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5963 5964 default: 5965 return true; 5966 } 5967 } 5968 5969 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 5970 AtomicExpr::AtomicOp Op) { 5971 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 5972 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5973 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 5974 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 5975 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 5976 Op); 5977 } 5978 5979 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 5980 SourceLocation RParenLoc, MultiExprArg Args, 5981 AtomicExpr::AtomicOp Op, 5982 AtomicArgumentOrder ArgOrder) { 5983 // All the non-OpenCL operations take one of the following forms. 5984 // The OpenCL operations take the __c11 forms with one extra argument for 5985 // synchronization scope. 5986 enum { 5987 // C __c11_atomic_init(A *, C) 5988 Init, 5989 5990 // C __c11_atomic_load(A *, int) 5991 Load, 5992 5993 // void __atomic_load(A *, CP, int) 5994 LoadCopy, 5995 5996 // void __atomic_store(A *, CP, int) 5997 Copy, 5998 5999 // C __c11_atomic_add(A *, M, int) 6000 Arithmetic, 6001 6002 // C __atomic_exchange_n(A *, CP, int) 6003 Xchg, 6004 6005 // void __atomic_exchange(A *, C *, CP, int) 6006 GNUXchg, 6007 6008 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 6009 C11CmpXchg, 6010 6011 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 6012 GNUCmpXchg 6013 } Form = Init; 6014 6015 const unsigned NumForm = GNUCmpXchg + 1; 6016 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 6017 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 6018 // where: 6019 // C is an appropriate type, 6020 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 6021 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 6022 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 6023 // the int parameters are for orderings. 6024 6025 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 6026 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 6027 "need to update code for modified forms"); 6028 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 6029 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 6030 AtomicExpr::AO__atomic_load, 6031 "need to update code for modified C11 atomics"); 6032 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 6033 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 6034 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load && 6035 Op <= AtomicExpr::AO__hip_atomic_fetch_max; 6036 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 6037 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 6038 IsOpenCL; 6039 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 6040 Op == AtomicExpr::AO__atomic_store_n || 6041 Op == AtomicExpr::AO__atomic_exchange_n || 6042 Op == AtomicExpr::AO__atomic_compare_exchange_n; 6043 bool IsAddSub = false; 6044 6045 switch (Op) { 6046 case AtomicExpr::AO__c11_atomic_init: 6047 case AtomicExpr::AO__opencl_atomic_init: 6048 Form = Init; 6049 break; 6050 6051 case AtomicExpr::AO__c11_atomic_load: 6052 case AtomicExpr::AO__opencl_atomic_load: 6053 case AtomicExpr::AO__hip_atomic_load: 6054 case AtomicExpr::AO__atomic_load_n: 6055 Form = Load; 6056 break; 6057 6058 case AtomicExpr::AO__atomic_load: 6059 Form = LoadCopy; 6060 break; 6061 6062 case AtomicExpr::AO__c11_atomic_store: 6063 case AtomicExpr::AO__opencl_atomic_store: 6064 case AtomicExpr::AO__hip_atomic_store: 6065 case AtomicExpr::AO__atomic_store: 6066 case AtomicExpr::AO__atomic_store_n: 6067 Form = Copy; 6068 break; 6069 case AtomicExpr::AO__hip_atomic_fetch_add: 6070 case AtomicExpr::AO__hip_atomic_fetch_min: 6071 case AtomicExpr::AO__hip_atomic_fetch_max: 6072 case AtomicExpr::AO__c11_atomic_fetch_add: 6073 case AtomicExpr::AO__c11_atomic_fetch_sub: 6074 case AtomicExpr::AO__opencl_atomic_fetch_add: 6075 case AtomicExpr::AO__opencl_atomic_fetch_sub: 6076 case AtomicExpr::AO__atomic_fetch_add: 6077 case AtomicExpr::AO__atomic_fetch_sub: 6078 case AtomicExpr::AO__atomic_add_fetch: 6079 case AtomicExpr::AO__atomic_sub_fetch: 6080 IsAddSub = true; 6081 Form = Arithmetic; 6082 break; 6083 case AtomicExpr::AO__c11_atomic_fetch_and: 6084 case AtomicExpr::AO__c11_atomic_fetch_or: 6085 case AtomicExpr::AO__c11_atomic_fetch_xor: 6086 case AtomicExpr::AO__hip_atomic_fetch_and: 6087 case AtomicExpr::AO__hip_atomic_fetch_or: 6088 case AtomicExpr::AO__hip_atomic_fetch_xor: 6089 case AtomicExpr::AO__c11_atomic_fetch_nand: 6090 case AtomicExpr::AO__opencl_atomic_fetch_and: 6091 case AtomicExpr::AO__opencl_atomic_fetch_or: 6092 case AtomicExpr::AO__opencl_atomic_fetch_xor: 6093 case AtomicExpr::AO__atomic_fetch_and: 6094 case AtomicExpr::AO__atomic_fetch_or: 6095 case AtomicExpr::AO__atomic_fetch_xor: 6096 case AtomicExpr::AO__atomic_fetch_nand: 6097 case AtomicExpr::AO__atomic_and_fetch: 6098 case AtomicExpr::AO__atomic_or_fetch: 6099 case AtomicExpr::AO__atomic_xor_fetch: 6100 case AtomicExpr::AO__atomic_nand_fetch: 6101 Form = Arithmetic; 6102 break; 6103 case AtomicExpr::AO__c11_atomic_fetch_min: 6104 case AtomicExpr::AO__c11_atomic_fetch_max: 6105 case AtomicExpr::AO__opencl_atomic_fetch_min: 6106 case AtomicExpr::AO__opencl_atomic_fetch_max: 6107 case AtomicExpr::AO__atomic_min_fetch: 6108 case AtomicExpr::AO__atomic_max_fetch: 6109 case AtomicExpr::AO__atomic_fetch_min: 6110 case AtomicExpr::AO__atomic_fetch_max: 6111 Form = Arithmetic; 6112 break; 6113 6114 case AtomicExpr::AO__c11_atomic_exchange: 6115 case AtomicExpr::AO__hip_atomic_exchange: 6116 case AtomicExpr::AO__opencl_atomic_exchange: 6117 case AtomicExpr::AO__atomic_exchange_n: 6118 Form = Xchg; 6119 break; 6120 6121 case AtomicExpr::AO__atomic_exchange: 6122 Form = GNUXchg; 6123 break; 6124 6125 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 6126 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 6127 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 6128 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 6129 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 6130 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 6131 Form = C11CmpXchg; 6132 break; 6133 6134 case AtomicExpr::AO__atomic_compare_exchange: 6135 case AtomicExpr::AO__atomic_compare_exchange_n: 6136 Form = GNUCmpXchg; 6137 break; 6138 } 6139 6140 unsigned AdjustedNumArgs = NumArgs[Form]; 6141 if ((IsOpenCL || IsHIP) && Op != AtomicExpr::AO__opencl_atomic_init) 6142 ++AdjustedNumArgs; 6143 // Check we have the right number of arguments. 6144 if (Args.size() < AdjustedNumArgs) { 6145 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 6146 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 6147 << ExprRange; 6148 return ExprError(); 6149 } else if (Args.size() > AdjustedNumArgs) { 6150 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 6151 diag::err_typecheck_call_too_many_args) 6152 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 6153 << ExprRange; 6154 return ExprError(); 6155 } 6156 6157 // Inspect the first argument of the atomic operation. 6158 Expr *Ptr = Args[0]; 6159 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 6160 if (ConvertedPtr.isInvalid()) 6161 return ExprError(); 6162 6163 Ptr = ConvertedPtr.get(); 6164 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 6165 if (!pointerType) { 6166 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 6167 << Ptr->getType() << Ptr->getSourceRange(); 6168 return ExprError(); 6169 } 6170 6171 // For a __c11 builtin, this should be a pointer to an _Atomic type. 6172 QualType AtomTy = pointerType->getPointeeType(); // 'A' 6173 QualType ValType = AtomTy; // 'C' 6174 if (IsC11) { 6175 if (!AtomTy->isAtomicType()) { 6176 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 6177 << Ptr->getType() << Ptr->getSourceRange(); 6178 return ExprError(); 6179 } 6180 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 6181 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 6182 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 6183 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 6184 << Ptr->getSourceRange(); 6185 return ExprError(); 6186 } 6187 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 6188 } else if (Form != Load && Form != LoadCopy) { 6189 if (ValType.isConstQualified()) { 6190 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 6191 << Ptr->getType() << Ptr->getSourceRange(); 6192 return ExprError(); 6193 } 6194 } 6195 6196 // For an arithmetic operation, the implied arithmetic must be well-formed. 6197 if (Form == Arithmetic) { 6198 // GCC does not enforce these rules for GNU atomics, but we do to help catch 6199 // trivial type errors. 6200 auto IsAllowedValueType = [&](QualType ValType) { 6201 if (ValType->isIntegerType()) 6202 return true; 6203 if (ValType->isPointerType()) 6204 return true; 6205 if (!ValType->isFloatingType()) 6206 return false; 6207 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 6208 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 6209 &Context.getTargetInfo().getLongDoubleFormat() == 6210 &llvm::APFloat::x87DoubleExtended()) 6211 return false; 6212 return true; 6213 }; 6214 if (IsAddSub && !IsAllowedValueType(ValType)) { 6215 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp) 6216 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6217 return ExprError(); 6218 } 6219 if (!IsAddSub && !ValType->isIntegerType()) { 6220 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 6221 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6222 return ExprError(); 6223 } 6224 if (IsC11 && ValType->isPointerType() && 6225 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 6226 diag::err_incomplete_type)) { 6227 return ExprError(); 6228 } 6229 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 6230 // For __atomic_*_n operations, the value type must be a scalar integral or 6231 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 6232 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 6233 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6234 return ExprError(); 6235 } 6236 6237 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 6238 !AtomTy->isScalarType()) { 6239 // For GNU atomics, require a trivially-copyable type. This is not part of 6240 // the GNU atomics specification but we enforce it for consistency with 6241 // other atomics which generally all require a trivially-copyable type. This 6242 // is because atomics just copy bits. 6243 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 6244 << Ptr->getType() << Ptr->getSourceRange(); 6245 return ExprError(); 6246 } 6247 6248 switch (ValType.getObjCLifetime()) { 6249 case Qualifiers::OCL_None: 6250 case Qualifiers::OCL_ExplicitNone: 6251 // okay 6252 break; 6253 6254 case Qualifiers::OCL_Weak: 6255 case Qualifiers::OCL_Strong: 6256 case Qualifiers::OCL_Autoreleasing: 6257 // FIXME: Can this happen? By this point, ValType should be known 6258 // to be trivially copyable. 6259 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 6260 << ValType << Ptr->getSourceRange(); 6261 return ExprError(); 6262 } 6263 6264 // All atomic operations have an overload which takes a pointer to a volatile 6265 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 6266 // into the result or the other operands. Similarly atomic_load takes a 6267 // pointer to a const 'A'. 6268 ValType.removeLocalVolatile(); 6269 ValType.removeLocalConst(); 6270 QualType ResultType = ValType; 6271 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 6272 Form == Init) 6273 ResultType = Context.VoidTy; 6274 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 6275 ResultType = Context.BoolTy; 6276 6277 // The type of a parameter passed 'by value'. In the GNU atomics, such 6278 // arguments are actually passed as pointers. 6279 QualType ByValType = ValType; // 'CP' 6280 bool IsPassedByAddress = false; 6281 if (!IsC11 && !IsHIP && !IsN) { 6282 ByValType = Ptr->getType(); 6283 IsPassedByAddress = true; 6284 } 6285 6286 SmallVector<Expr *, 5> APIOrderedArgs; 6287 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 6288 APIOrderedArgs.push_back(Args[0]); 6289 switch (Form) { 6290 case Init: 6291 case Load: 6292 APIOrderedArgs.push_back(Args[1]); // Val1/Order 6293 break; 6294 case LoadCopy: 6295 case Copy: 6296 case Arithmetic: 6297 case Xchg: 6298 APIOrderedArgs.push_back(Args[2]); // Val1 6299 APIOrderedArgs.push_back(Args[1]); // Order 6300 break; 6301 case GNUXchg: 6302 APIOrderedArgs.push_back(Args[2]); // Val1 6303 APIOrderedArgs.push_back(Args[3]); // Val2 6304 APIOrderedArgs.push_back(Args[1]); // Order 6305 break; 6306 case C11CmpXchg: 6307 APIOrderedArgs.push_back(Args[2]); // Val1 6308 APIOrderedArgs.push_back(Args[4]); // Val2 6309 APIOrderedArgs.push_back(Args[1]); // Order 6310 APIOrderedArgs.push_back(Args[3]); // OrderFail 6311 break; 6312 case GNUCmpXchg: 6313 APIOrderedArgs.push_back(Args[2]); // Val1 6314 APIOrderedArgs.push_back(Args[4]); // Val2 6315 APIOrderedArgs.push_back(Args[5]); // Weak 6316 APIOrderedArgs.push_back(Args[1]); // Order 6317 APIOrderedArgs.push_back(Args[3]); // OrderFail 6318 break; 6319 } 6320 } else 6321 APIOrderedArgs.append(Args.begin(), Args.end()); 6322 6323 // The first argument's non-CV pointer type is used to deduce the type of 6324 // subsequent arguments, except for: 6325 // - weak flag (always converted to bool) 6326 // - memory order (always converted to int) 6327 // - scope (always converted to int) 6328 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 6329 QualType Ty; 6330 if (i < NumVals[Form] + 1) { 6331 switch (i) { 6332 case 0: 6333 // The first argument is always a pointer. It has a fixed type. 6334 // It is always dereferenced, a nullptr is undefined. 6335 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 6336 // Nothing else to do: we already know all we want about this pointer. 6337 continue; 6338 case 1: 6339 // The second argument is the non-atomic operand. For arithmetic, this 6340 // is always passed by value, and for a compare_exchange it is always 6341 // passed by address. For the rest, GNU uses by-address and C11 uses 6342 // by-value. 6343 assert(Form != Load); 6344 if (Form == Arithmetic && ValType->isPointerType()) 6345 Ty = Context.getPointerDiffType(); 6346 else if (Form == Init || Form == Arithmetic) 6347 Ty = ValType; 6348 else if (Form == Copy || Form == Xchg) { 6349 if (IsPassedByAddress) { 6350 // The value pointer is always dereferenced, a nullptr is undefined. 6351 CheckNonNullArgument(*this, APIOrderedArgs[i], 6352 ExprRange.getBegin()); 6353 } 6354 Ty = ByValType; 6355 } else { 6356 Expr *ValArg = APIOrderedArgs[i]; 6357 // The value pointer is always dereferenced, a nullptr is undefined. 6358 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 6359 LangAS AS = LangAS::Default; 6360 // Keep address space of non-atomic pointer type. 6361 if (const PointerType *PtrTy = 6362 ValArg->getType()->getAs<PointerType>()) { 6363 AS = PtrTy->getPointeeType().getAddressSpace(); 6364 } 6365 Ty = Context.getPointerType( 6366 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 6367 } 6368 break; 6369 case 2: 6370 // The third argument to compare_exchange / GNU exchange is the desired 6371 // value, either by-value (for the C11 and *_n variant) or as a pointer. 6372 if (IsPassedByAddress) 6373 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 6374 Ty = ByValType; 6375 break; 6376 case 3: 6377 // The fourth argument to GNU compare_exchange is a 'weak' flag. 6378 Ty = Context.BoolTy; 6379 break; 6380 } 6381 } else { 6382 // The order(s) and scope are always converted to int. 6383 Ty = Context.IntTy; 6384 } 6385 6386 InitializedEntity Entity = 6387 InitializedEntity::InitializeParameter(Context, Ty, false); 6388 ExprResult Arg = APIOrderedArgs[i]; 6389 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6390 if (Arg.isInvalid()) 6391 return true; 6392 APIOrderedArgs[i] = Arg.get(); 6393 } 6394 6395 // Permute the arguments into a 'consistent' order. 6396 SmallVector<Expr*, 5> SubExprs; 6397 SubExprs.push_back(Ptr); 6398 switch (Form) { 6399 case Init: 6400 // Note, AtomicExpr::getVal1() has a special case for this atomic. 6401 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6402 break; 6403 case Load: 6404 SubExprs.push_back(APIOrderedArgs[1]); // Order 6405 break; 6406 case LoadCopy: 6407 case Copy: 6408 case Arithmetic: 6409 case Xchg: 6410 SubExprs.push_back(APIOrderedArgs[2]); // Order 6411 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6412 break; 6413 case GNUXchg: 6414 // Note, AtomicExpr::getVal2() has a special case for this atomic. 6415 SubExprs.push_back(APIOrderedArgs[3]); // Order 6416 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6417 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6418 break; 6419 case C11CmpXchg: 6420 SubExprs.push_back(APIOrderedArgs[3]); // Order 6421 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6422 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 6423 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6424 break; 6425 case GNUCmpXchg: 6426 SubExprs.push_back(APIOrderedArgs[4]); // Order 6427 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6428 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 6429 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6430 SubExprs.push_back(APIOrderedArgs[3]); // Weak 6431 break; 6432 } 6433 6434 if (SubExprs.size() >= 2 && Form != Init) { 6435 if (Optional<llvm::APSInt> Result = 6436 SubExprs[1]->getIntegerConstantExpr(Context)) 6437 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 6438 Diag(SubExprs[1]->getBeginLoc(), 6439 diag::warn_atomic_op_has_invalid_memory_order) 6440 << SubExprs[1]->getSourceRange(); 6441 } 6442 6443 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 6444 auto *Scope = Args[Args.size() - 1]; 6445 if (Optional<llvm::APSInt> Result = 6446 Scope->getIntegerConstantExpr(Context)) { 6447 if (!ScopeModel->isValid(Result->getZExtValue())) 6448 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 6449 << Scope->getSourceRange(); 6450 } 6451 SubExprs.push_back(Scope); 6452 } 6453 6454 AtomicExpr *AE = new (Context) 6455 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 6456 6457 if ((Op == AtomicExpr::AO__c11_atomic_load || 6458 Op == AtomicExpr::AO__c11_atomic_store || 6459 Op == AtomicExpr::AO__opencl_atomic_load || 6460 Op == AtomicExpr::AO__hip_atomic_load || 6461 Op == AtomicExpr::AO__opencl_atomic_store || 6462 Op == AtomicExpr::AO__hip_atomic_store) && 6463 Context.AtomicUsesUnsupportedLibcall(AE)) 6464 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 6465 << ((Op == AtomicExpr::AO__c11_atomic_load || 6466 Op == AtomicExpr::AO__opencl_atomic_load || 6467 Op == AtomicExpr::AO__hip_atomic_load) 6468 ? 0 6469 : 1); 6470 6471 if (ValType->isBitIntType()) { 6472 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit); 6473 return ExprError(); 6474 } 6475 6476 return AE; 6477 } 6478 6479 /// checkBuiltinArgument - Given a call to a builtin function, perform 6480 /// normal type-checking on the given argument, updating the call in 6481 /// place. This is useful when a builtin function requires custom 6482 /// type-checking for some of its arguments but not necessarily all of 6483 /// them. 6484 /// 6485 /// Returns true on error. 6486 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 6487 FunctionDecl *Fn = E->getDirectCallee(); 6488 assert(Fn && "builtin call without direct callee!"); 6489 6490 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 6491 InitializedEntity Entity = 6492 InitializedEntity::InitializeParameter(S.Context, Param); 6493 6494 ExprResult Arg = E->getArg(ArgIndex); 6495 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 6496 if (Arg.isInvalid()) 6497 return true; 6498 6499 E->setArg(ArgIndex, Arg.get()); 6500 return false; 6501 } 6502 6503 /// We have a call to a function like __sync_fetch_and_add, which is an 6504 /// overloaded function based on the pointer type of its first argument. 6505 /// The main BuildCallExpr routines have already promoted the types of 6506 /// arguments because all of these calls are prototyped as void(...). 6507 /// 6508 /// This function goes through and does final semantic checking for these 6509 /// builtins, as well as generating any warnings. 6510 ExprResult 6511 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 6512 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 6513 Expr *Callee = TheCall->getCallee(); 6514 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 6515 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6516 6517 // Ensure that we have at least one argument to do type inference from. 6518 if (TheCall->getNumArgs() < 1) { 6519 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6520 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 6521 return ExprError(); 6522 } 6523 6524 // Inspect the first argument of the atomic builtin. This should always be 6525 // a pointer type, whose element is an integral scalar or pointer type. 6526 // Because it is a pointer type, we don't have to worry about any implicit 6527 // casts here. 6528 // FIXME: We don't allow floating point scalars as input. 6529 Expr *FirstArg = TheCall->getArg(0); 6530 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 6531 if (FirstArgResult.isInvalid()) 6532 return ExprError(); 6533 FirstArg = FirstArgResult.get(); 6534 TheCall->setArg(0, FirstArg); 6535 6536 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 6537 if (!pointerType) { 6538 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 6539 << FirstArg->getType() << FirstArg->getSourceRange(); 6540 return ExprError(); 6541 } 6542 6543 QualType ValType = pointerType->getPointeeType(); 6544 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6545 !ValType->isBlockPointerType()) { 6546 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 6547 << FirstArg->getType() << FirstArg->getSourceRange(); 6548 return ExprError(); 6549 } 6550 6551 if (ValType.isConstQualified()) { 6552 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 6553 << FirstArg->getType() << FirstArg->getSourceRange(); 6554 return ExprError(); 6555 } 6556 6557 switch (ValType.getObjCLifetime()) { 6558 case Qualifiers::OCL_None: 6559 case Qualifiers::OCL_ExplicitNone: 6560 // okay 6561 break; 6562 6563 case Qualifiers::OCL_Weak: 6564 case Qualifiers::OCL_Strong: 6565 case Qualifiers::OCL_Autoreleasing: 6566 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 6567 << ValType << FirstArg->getSourceRange(); 6568 return ExprError(); 6569 } 6570 6571 // Strip any qualifiers off ValType. 6572 ValType = ValType.getUnqualifiedType(); 6573 6574 // The majority of builtins return a value, but a few have special return 6575 // types, so allow them to override appropriately below. 6576 QualType ResultType = ValType; 6577 6578 // We need to figure out which concrete builtin this maps onto. For example, 6579 // __sync_fetch_and_add with a 2 byte object turns into 6580 // __sync_fetch_and_add_2. 6581 #define BUILTIN_ROW(x) \ 6582 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 6583 Builtin::BI##x##_8, Builtin::BI##x##_16 } 6584 6585 static const unsigned BuiltinIndices[][5] = { 6586 BUILTIN_ROW(__sync_fetch_and_add), 6587 BUILTIN_ROW(__sync_fetch_and_sub), 6588 BUILTIN_ROW(__sync_fetch_and_or), 6589 BUILTIN_ROW(__sync_fetch_and_and), 6590 BUILTIN_ROW(__sync_fetch_and_xor), 6591 BUILTIN_ROW(__sync_fetch_and_nand), 6592 6593 BUILTIN_ROW(__sync_add_and_fetch), 6594 BUILTIN_ROW(__sync_sub_and_fetch), 6595 BUILTIN_ROW(__sync_and_and_fetch), 6596 BUILTIN_ROW(__sync_or_and_fetch), 6597 BUILTIN_ROW(__sync_xor_and_fetch), 6598 BUILTIN_ROW(__sync_nand_and_fetch), 6599 6600 BUILTIN_ROW(__sync_val_compare_and_swap), 6601 BUILTIN_ROW(__sync_bool_compare_and_swap), 6602 BUILTIN_ROW(__sync_lock_test_and_set), 6603 BUILTIN_ROW(__sync_lock_release), 6604 BUILTIN_ROW(__sync_swap) 6605 }; 6606 #undef BUILTIN_ROW 6607 6608 // Determine the index of the size. 6609 unsigned SizeIndex; 6610 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 6611 case 1: SizeIndex = 0; break; 6612 case 2: SizeIndex = 1; break; 6613 case 4: SizeIndex = 2; break; 6614 case 8: SizeIndex = 3; break; 6615 case 16: SizeIndex = 4; break; 6616 default: 6617 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 6618 << FirstArg->getType() << FirstArg->getSourceRange(); 6619 return ExprError(); 6620 } 6621 6622 // Each of these builtins has one pointer argument, followed by some number of 6623 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 6624 // that we ignore. Find out which row of BuiltinIndices to read from as well 6625 // as the number of fixed args. 6626 unsigned BuiltinID = FDecl->getBuiltinID(); 6627 unsigned BuiltinIndex, NumFixed = 1; 6628 bool WarnAboutSemanticsChange = false; 6629 switch (BuiltinID) { 6630 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 6631 case Builtin::BI__sync_fetch_and_add: 6632 case Builtin::BI__sync_fetch_and_add_1: 6633 case Builtin::BI__sync_fetch_and_add_2: 6634 case Builtin::BI__sync_fetch_and_add_4: 6635 case Builtin::BI__sync_fetch_and_add_8: 6636 case Builtin::BI__sync_fetch_and_add_16: 6637 BuiltinIndex = 0; 6638 break; 6639 6640 case Builtin::BI__sync_fetch_and_sub: 6641 case Builtin::BI__sync_fetch_and_sub_1: 6642 case Builtin::BI__sync_fetch_and_sub_2: 6643 case Builtin::BI__sync_fetch_and_sub_4: 6644 case Builtin::BI__sync_fetch_and_sub_8: 6645 case Builtin::BI__sync_fetch_and_sub_16: 6646 BuiltinIndex = 1; 6647 break; 6648 6649 case Builtin::BI__sync_fetch_and_or: 6650 case Builtin::BI__sync_fetch_and_or_1: 6651 case Builtin::BI__sync_fetch_and_or_2: 6652 case Builtin::BI__sync_fetch_and_or_4: 6653 case Builtin::BI__sync_fetch_and_or_8: 6654 case Builtin::BI__sync_fetch_and_or_16: 6655 BuiltinIndex = 2; 6656 break; 6657 6658 case Builtin::BI__sync_fetch_and_and: 6659 case Builtin::BI__sync_fetch_and_and_1: 6660 case Builtin::BI__sync_fetch_and_and_2: 6661 case Builtin::BI__sync_fetch_and_and_4: 6662 case Builtin::BI__sync_fetch_and_and_8: 6663 case Builtin::BI__sync_fetch_and_and_16: 6664 BuiltinIndex = 3; 6665 break; 6666 6667 case Builtin::BI__sync_fetch_and_xor: 6668 case Builtin::BI__sync_fetch_and_xor_1: 6669 case Builtin::BI__sync_fetch_and_xor_2: 6670 case Builtin::BI__sync_fetch_and_xor_4: 6671 case Builtin::BI__sync_fetch_and_xor_8: 6672 case Builtin::BI__sync_fetch_and_xor_16: 6673 BuiltinIndex = 4; 6674 break; 6675 6676 case Builtin::BI__sync_fetch_and_nand: 6677 case Builtin::BI__sync_fetch_and_nand_1: 6678 case Builtin::BI__sync_fetch_and_nand_2: 6679 case Builtin::BI__sync_fetch_and_nand_4: 6680 case Builtin::BI__sync_fetch_and_nand_8: 6681 case Builtin::BI__sync_fetch_and_nand_16: 6682 BuiltinIndex = 5; 6683 WarnAboutSemanticsChange = true; 6684 break; 6685 6686 case Builtin::BI__sync_add_and_fetch: 6687 case Builtin::BI__sync_add_and_fetch_1: 6688 case Builtin::BI__sync_add_and_fetch_2: 6689 case Builtin::BI__sync_add_and_fetch_4: 6690 case Builtin::BI__sync_add_and_fetch_8: 6691 case Builtin::BI__sync_add_and_fetch_16: 6692 BuiltinIndex = 6; 6693 break; 6694 6695 case Builtin::BI__sync_sub_and_fetch: 6696 case Builtin::BI__sync_sub_and_fetch_1: 6697 case Builtin::BI__sync_sub_and_fetch_2: 6698 case Builtin::BI__sync_sub_and_fetch_4: 6699 case Builtin::BI__sync_sub_and_fetch_8: 6700 case Builtin::BI__sync_sub_and_fetch_16: 6701 BuiltinIndex = 7; 6702 break; 6703 6704 case Builtin::BI__sync_and_and_fetch: 6705 case Builtin::BI__sync_and_and_fetch_1: 6706 case Builtin::BI__sync_and_and_fetch_2: 6707 case Builtin::BI__sync_and_and_fetch_4: 6708 case Builtin::BI__sync_and_and_fetch_8: 6709 case Builtin::BI__sync_and_and_fetch_16: 6710 BuiltinIndex = 8; 6711 break; 6712 6713 case Builtin::BI__sync_or_and_fetch: 6714 case Builtin::BI__sync_or_and_fetch_1: 6715 case Builtin::BI__sync_or_and_fetch_2: 6716 case Builtin::BI__sync_or_and_fetch_4: 6717 case Builtin::BI__sync_or_and_fetch_8: 6718 case Builtin::BI__sync_or_and_fetch_16: 6719 BuiltinIndex = 9; 6720 break; 6721 6722 case Builtin::BI__sync_xor_and_fetch: 6723 case Builtin::BI__sync_xor_and_fetch_1: 6724 case Builtin::BI__sync_xor_and_fetch_2: 6725 case Builtin::BI__sync_xor_and_fetch_4: 6726 case Builtin::BI__sync_xor_and_fetch_8: 6727 case Builtin::BI__sync_xor_and_fetch_16: 6728 BuiltinIndex = 10; 6729 break; 6730 6731 case Builtin::BI__sync_nand_and_fetch: 6732 case Builtin::BI__sync_nand_and_fetch_1: 6733 case Builtin::BI__sync_nand_and_fetch_2: 6734 case Builtin::BI__sync_nand_and_fetch_4: 6735 case Builtin::BI__sync_nand_and_fetch_8: 6736 case Builtin::BI__sync_nand_and_fetch_16: 6737 BuiltinIndex = 11; 6738 WarnAboutSemanticsChange = true; 6739 break; 6740 6741 case Builtin::BI__sync_val_compare_and_swap: 6742 case Builtin::BI__sync_val_compare_and_swap_1: 6743 case Builtin::BI__sync_val_compare_and_swap_2: 6744 case Builtin::BI__sync_val_compare_and_swap_4: 6745 case Builtin::BI__sync_val_compare_and_swap_8: 6746 case Builtin::BI__sync_val_compare_and_swap_16: 6747 BuiltinIndex = 12; 6748 NumFixed = 2; 6749 break; 6750 6751 case Builtin::BI__sync_bool_compare_and_swap: 6752 case Builtin::BI__sync_bool_compare_and_swap_1: 6753 case Builtin::BI__sync_bool_compare_and_swap_2: 6754 case Builtin::BI__sync_bool_compare_and_swap_4: 6755 case Builtin::BI__sync_bool_compare_and_swap_8: 6756 case Builtin::BI__sync_bool_compare_and_swap_16: 6757 BuiltinIndex = 13; 6758 NumFixed = 2; 6759 ResultType = Context.BoolTy; 6760 break; 6761 6762 case Builtin::BI__sync_lock_test_and_set: 6763 case Builtin::BI__sync_lock_test_and_set_1: 6764 case Builtin::BI__sync_lock_test_and_set_2: 6765 case Builtin::BI__sync_lock_test_and_set_4: 6766 case Builtin::BI__sync_lock_test_and_set_8: 6767 case Builtin::BI__sync_lock_test_and_set_16: 6768 BuiltinIndex = 14; 6769 break; 6770 6771 case Builtin::BI__sync_lock_release: 6772 case Builtin::BI__sync_lock_release_1: 6773 case Builtin::BI__sync_lock_release_2: 6774 case Builtin::BI__sync_lock_release_4: 6775 case Builtin::BI__sync_lock_release_8: 6776 case Builtin::BI__sync_lock_release_16: 6777 BuiltinIndex = 15; 6778 NumFixed = 0; 6779 ResultType = Context.VoidTy; 6780 break; 6781 6782 case Builtin::BI__sync_swap: 6783 case Builtin::BI__sync_swap_1: 6784 case Builtin::BI__sync_swap_2: 6785 case Builtin::BI__sync_swap_4: 6786 case Builtin::BI__sync_swap_8: 6787 case Builtin::BI__sync_swap_16: 6788 BuiltinIndex = 16; 6789 break; 6790 } 6791 6792 // Now that we know how many fixed arguments we expect, first check that we 6793 // have at least that many. 6794 if (TheCall->getNumArgs() < 1+NumFixed) { 6795 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6796 << 0 << 1 + NumFixed << TheCall->getNumArgs() 6797 << Callee->getSourceRange(); 6798 return ExprError(); 6799 } 6800 6801 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 6802 << Callee->getSourceRange(); 6803 6804 if (WarnAboutSemanticsChange) { 6805 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 6806 << Callee->getSourceRange(); 6807 } 6808 6809 // Get the decl for the concrete builtin from this, we can tell what the 6810 // concrete integer type we should convert to is. 6811 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 6812 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 6813 FunctionDecl *NewBuiltinDecl; 6814 if (NewBuiltinID == BuiltinID) 6815 NewBuiltinDecl = FDecl; 6816 else { 6817 // Perform builtin lookup to avoid redeclaring it. 6818 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 6819 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 6820 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 6821 assert(Res.getFoundDecl()); 6822 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 6823 if (!NewBuiltinDecl) 6824 return ExprError(); 6825 } 6826 6827 // The first argument --- the pointer --- has a fixed type; we 6828 // deduce the types of the rest of the arguments accordingly. Walk 6829 // the remaining arguments, converting them to the deduced value type. 6830 for (unsigned i = 0; i != NumFixed; ++i) { 6831 ExprResult Arg = TheCall->getArg(i+1); 6832 6833 // GCC does an implicit conversion to the pointer or integer ValType. This 6834 // can fail in some cases (1i -> int**), check for this error case now. 6835 // Initialize the argument. 6836 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6837 ValType, /*consume*/ false); 6838 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6839 if (Arg.isInvalid()) 6840 return ExprError(); 6841 6842 // Okay, we have something that *can* be converted to the right type. Check 6843 // to see if there is a potentially weird extension going on here. This can 6844 // happen when you do an atomic operation on something like an char* and 6845 // pass in 42. The 42 gets converted to char. This is even more strange 6846 // for things like 45.123 -> char, etc. 6847 // FIXME: Do this check. 6848 TheCall->setArg(i+1, Arg.get()); 6849 } 6850 6851 // Create a new DeclRefExpr to refer to the new decl. 6852 DeclRefExpr *NewDRE = DeclRefExpr::Create( 6853 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 6854 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 6855 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 6856 6857 // Set the callee in the CallExpr. 6858 // FIXME: This loses syntactic information. 6859 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 6860 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 6861 CK_BuiltinFnToFnPtr); 6862 TheCall->setCallee(PromotedCall.get()); 6863 6864 // Change the result type of the call to match the original value type. This 6865 // is arbitrary, but the codegen for these builtins ins design to handle it 6866 // gracefully. 6867 TheCall->setType(ResultType); 6868 6869 // Prohibit problematic uses of bit-precise integer types with atomic 6870 // builtins. The arguments would have already been converted to the first 6871 // argument's type, so only need to check the first argument. 6872 const auto *BitIntValType = ValType->getAs<BitIntType>(); 6873 if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) { 6874 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 6875 return ExprError(); 6876 } 6877 6878 return TheCallResult; 6879 } 6880 6881 /// SemaBuiltinNontemporalOverloaded - We have a call to 6882 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 6883 /// overloaded function based on the pointer type of its last argument. 6884 /// 6885 /// This function goes through and does final semantic checking for these 6886 /// builtins. 6887 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 6888 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 6889 DeclRefExpr *DRE = 6890 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6891 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6892 unsigned BuiltinID = FDecl->getBuiltinID(); 6893 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 6894 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 6895 "Unexpected nontemporal load/store builtin!"); 6896 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 6897 unsigned numArgs = isStore ? 2 : 1; 6898 6899 // Ensure that we have the proper number of arguments. 6900 if (checkArgCount(*this, TheCall, numArgs)) 6901 return ExprError(); 6902 6903 // Inspect the last argument of the nontemporal builtin. This should always 6904 // be a pointer type, from which we imply the type of the memory access. 6905 // Because it is a pointer type, we don't have to worry about any implicit 6906 // casts here. 6907 Expr *PointerArg = TheCall->getArg(numArgs - 1); 6908 ExprResult PointerArgResult = 6909 DefaultFunctionArrayLvalueConversion(PointerArg); 6910 6911 if (PointerArgResult.isInvalid()) 6912 return ExprError(); 6913 PointerArg = PointerArgResult.get(); 6914 TheCall->setArg(numArgs - 1, PointerArg); 6915 6916 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 6917 if (!pointerType) { 6918 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 6919 << PointerArg->getType() << PointerArg->getSourceRange(); 6920 return ExprError(); 6921 } 6922 6923 QualType ValType = pointerType->getPointeeType(); 6924 6925 // Strip any qualifiers off ValType. 6926 ValType = ValType.getUnqualifiedType(); 6927 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6928 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 6929 !ValType->isVectorType()) { 6930 Diag(DRE->getBeginLoc(), 6931 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 6932 << PointerArg->getType() << PointerArg->getSourceRange(); 6933 return ExprError(); 6934 } 6935 6936 if (!isStore) { 6937 TheCall->setType(ValType); 6938 return TheCallResult; 6939 } 6940 6941 ExprResult ValArg = TheCall->getArg(0); 6942 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6943 Context, ValType, /*consume*/ false); 6944 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 6945 if (ValArg.isInvalid()) 6946 return ExprError(); 6947 6948 TheCall->setArg(0, ValArg.get()); 6949 TheCall->setType(Context.VoidTy); 6950 return TheCallResult; 6951 } 6952 6953 /// CheckObjCString - Checks that the argument to the builtin 6954 /// CFString constructor is correct 6955 /// Note: It might also make sense to do the UTF-16 conversion here (would 6956 /// simplify the backend). 6957 bool Sema::CheckObjCString(Expr *Arg) { 6958 Arg = Arg->IgnoreParenCasts(); 6959 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 6960 6961 if (!Literal || !Literal->isAscii()) { 6962 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 6963 << Arg->getSourceRange(); 6964 return true; 6965 } 6966 6967 if (Literal->containsNonAsciiOrNull()) { 6968 StringRef String = Literal->getString(); 6969 unsigned NumBytes = String.size(); 6970 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 6971 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 6972 llvm::UTF16 *ToPtr = &ToBuf[0]; 6973 6974 llvm::ConversionResult Result = 6975 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 6976 ToPtr + NumBytes, llvm::strictConversion); 6977 // Check for conversion failure. 6978 if (Result != llvm::conversionOK) 6979 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 6980 << Arg->getSourceRange(); 6981 } 6982 return false; 6983 } 6984 6985 /// CheckObjCString - Checks that the format string argument to the os_log() 6986 /// and os_trace() functions is correct, and converts it to const char *. 6987 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 6988 Arg = Arg->IgnoreParenCasts(); 6989 auto *Literal = dyn_cast<StringLiteral>(Arg); 6990 if (!Literal) { 6991 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 6992 Literal = ObjcLiteral->getString(); 6993 } 6994 } 6995 6996 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 6997 return ExprError( 6998 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 6999 << Arg->getSourceRange()); 7000 } 7001 7002 ExprResult Result(Literal); 7003 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 7004 InitializedEntity Entity = 7005 InitializedEntity::InitializeParameter(Context, ResultTy, false); 7006 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 7007 return Result; 7008 } 7009 7010 /// Check that the user is calling the appropriate va_start builtin for the 7011 /// target and calling convention. 7012 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 7013 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 7014 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 7015 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 7016 TT.getArch() == llvm::Triple::aarch64_32); 7017 bool IsWindows = TT.isOSWindows(); 7018 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 7019 if (IsX64 || IsAArch64) { 7020 CallingConv CC = CC_C; 7021 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 7022 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 7023 if (IsMSVAStart) { 7024 // Don't allow this in System V ABI functions. 7025 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 7026 return S.Diag(Fn->getBeginLoc(), 7027 diag::err_ms_va_start_used_in_sysv_function); 7028 } else { 7029 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 7030 // On x64 Windows, don't allow this in System V ABI functions. 7031 // (Yes, that means there's no corresponding way to support variadic 7032 // System V ABI functions on Windows.) 7033 if ((IsWindows && CC == CC_X86_64SysV) || 7034 (!IsWindows && CC == CC_Win64)) 7035 return S.Diag(Fn->getBeginLoc(), 7036 diag::err_va_start_used_in_wrong_abi_function) 7037 << !IsWindows; 7038 } 7039 return false; 7040 } 7041 7042 if (IsMSVAStart) 7043 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 7044 return false; 7045 } 7046 7047 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 7048 ParmVarDecl **LastParam = nullptr) { 7049 // Determine whether the current function, block, or obj-c method is variadic 7050 // and get its parameter list. 7051 bool IsVariadic = false; 7052 ArrayRef<ParmVarDecl *> Params; 7053 DeclContext *Caller = S.CurContext; 7054 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 7055 IsVariadic = Block->isVariadic(); 7056 Params = Block->parameters(); 7057 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 7058 IsVariadic = FD->isVariadic(); 7059 Params = FD->parameters(); 7060 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 7061 IsVariadic = MD->isVariadic(); 7062 // FIXME: This isn't correct for methods (results in bogus warning). 7063 Params = MD->parameters(); 7064 } else if (isa<CapturedDecl>(Caller)) { 7065 // We don't support va_start in a CapturedDecl. 7066 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 7067 return true; 7068 } else { 7069 // This must be some other declcontext that parses exprs. 7070 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 7071 return true; 7072 } 7073 7074 if (!IsVariadic) { 7075 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 7076 return true; 7077 } 7078 7079 if (LastParam) 7080 *LastParam = Params.empty() ? nullptr : Params.back(); 7081 7082 return false; 7083 } 7084 7085 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 7086 /// for validity. Emit an error and return true on failure; return false 7087 /// on success. 7088 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 7089 Expr *Fn = TheCall->getCallee(); 7090 7091 if (checkVAStartABI(*this, BuiltinID, Fn)) 7092 return true; 7093 7094 if (checkArgCount(*this, TheCall, 2)) 7095 return true; 7096 7097 // Type-check the first argument normally. 7098 if (checkBuiltinArgument(*this, TheCall, 0)) 7099 return true; 7100 7101 // Check that the current function is variadic, and get its last parameter. 7102 ParmVarDecl *LastParam; 7103 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 7104 return true; 7105 7106 // Verify that the second argument to the builtin is the last argument of the 7107 // current function or method. 7108 bool SecondArgIsLastNamedArgument = false; 7109 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 7110 7111 // These are valid if SecondArgIsLastNamedArgument is false after the next 7112 // block. 7113 QualType Type; 7114 SourceLocation ParamLoc; 7115 bool IsCRegister = false; 7116 7117 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 7118 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 7119 SecondArgIsLastNamedArgument = PV == LastParam; 7120 7121 Type = PV->getType(); 7122 ParamLoc = PV->getLocation(); 7123 IsCRegister = 7124 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 7125 } 7126 } 7127 7128 if (!SecondArgIsLastNamedArgument) 7129 Diag(TheCall->getArg(1)->getBeginLoc(), 7130 diag::warn_second_arg_of_va_start_not_last_named_param); 7131 else if (IsCRegister || Type->isReferenceType() || 7132 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 7133 // Promotable integers are UB, but enumerations need a bit of 7134 // extra checking to see what their promotable type actually is. 7135 if (!Type->isPromotableIntegerType()) 7136 return false; 7137 if (!Type->isEnumeralType()) 7138 return true; 7139 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 7140 return !(ED && 7141 Context.typesAreCompatible(ED->getPromotionType(), Type)); 7142 }()) { 7143 unsigned Reason = 0; 7144 if (Type->isReferenceType()) Reason = 1; 7145 else if (IsCRegister) Reason = 2; 7146 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 7147 Diag(ParamLoc, diag::note_parameter_type) << Type; 7148 } 7149 7150 TheCall->setType(Context.VoidTy); 7151 return false; 7152 } 7153 7154 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 7155 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool { 7156 const LangOptions &LO = getLangOpts(); 7157 7158 if (LO.CPlusPlus) 7159 return Arg->getType() 7160 .getCanonicalType() 7161 .getTypePtr() 7162 ->getPointeeType() 7163 .withoutLocalFastQualifiers() == Context.CharTy; 7164 7165 // In C, allow aliasing through `char *`, this is required for AArch64 at 7166 // least. 7167 return true; 7168 }; 7169 7170 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 7171 // const char *named_addr); 7172 7173 Expr *Func = Call->getCallee(); 7174 7175 if (Call->getNumArgs() < 3) 7176 return Diag(Call->getEndLoc(), 7177 diag::err_typecheck_call_too_few_args_at_least) 7178 << 0 /*function call*/ << 3 << Call->getNumArgs(); 7179 7180 // Type-check the first argument normally. 7181 if (checkBuiltinArgument(*this, Call, 0)) 7182 return true; 7183 7184 // Check that the current function is variadic. 7185 if (checkVAStartIsInVariadicFunction(*this, Func)) 7186 return true; 7187 7188 // __va_start on Windows does not validate the parameter qualifiers 7189 7190 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 7191 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 7192 7193 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 7194 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 7195 7196 const QualType &ConstCharPtrTy = 7197 Context.getPointerType(Context.CharTy.withConst()); 7198 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1)) 7199 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 7200 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 7201 << 0 /* qualifier difference */ 7202 << 3 /* parameter mismatch */ 7203 << 2 << Arg1->getType() << ConstCharPtrTy; 7204 7205 const QualType SizeTy = Context.getSizeType(); 7206 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 7207 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 7208 << Arg2->getType() << SizeTy << 1 /* different class */ 7209 << 0 /* qualifier difference */ 7210 << 3 /* parameter mismatch */ 7211 << 3 << Arg2->getType() << SizeTy; 7212 7213 return false; 7214 } 7215 7216 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 7217 /// friends. This is declared to take (...), so we have to check everything. 7218 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 7219 if (checkArgCount(*this, TheCall, 2)) 7220 return true; 7221 7222 ExprResult OrigArg0 = TheCall->getArg(0); 7223 ExprResult OrigArg1 = TheCall->getArg(1); 7224 7225 // Do standard promotions between the two arguments, returning their common 7226 // type. 7227 QualType Res = UsualArithmeticConversions( 7228 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 7229 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 7230 return true; 7231 7232 // Make sure any conversions are pushed back into the call; this is 7233 // type safe since unordered compare builtins are declared as "_Bool 7234 // foo(...)". 7235 TheCall->setArg(0, OrigArg0.get()); 7236 TheCall->setArg(1, OrigArg1.get()); 7237 7238 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 7239 return false; 7240 7241 // If the common type isn't a real floating type, then the arguments were 7242 // invalid for this operation. 7243 if (Res.isNull() || !Res->isRealFloatingType()) 7244 return Diag(OrigArg0.get()->getBeginLoc(), 7245 diag::err_typecheck_call_invalid_ordered_compare) 7246 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 7247 << SourceRange(OrigArg0.get()->getBeginLoc(), 7248 OrigArg1.get()->getEndLoc()); 7249 7250 return false; 7251 } 7252 7253 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 7254 /// __builtin_isnan and friends. This is declared to take (...), so we have 7255 /// to check everything. We expect the last argument to be a floating point 7256 /// value. 7257 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 7258 if (checkArgCount(*this, TheCall, NumArgs)) 7259 return true; 7260 7261 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 7262 // on all preceding parameters just being int. Try all of those. 7263 for (unsigned i = 0; i < NumArgs - 1; ++i) { 7264 Expr *Arg = TheCall->getArg(i); 7265 7266 if (Arg->isTypeDependent()) 7267 return false; 7268 7269 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 7270 7271 if (Res.isInvalid()) 7272 return true; 7273 TheCall->setArg(i, Res.get()); 7274 } 7275 7276 Expr *OrigArg = TheCall->getArg(NumArgs-1); 7277 7278 if (OrigArg->isTypeDependent()) 7279 return false; 7280 7281 // Usual Unary Conversions will convert half to float, which we want for 7282 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 7283 // type how it is, but do normal L->Rvalue conversions. 7284 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 7285 OrigArg = UsualUnaryConversions(OrigArg).get(); 7286 else 7287 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 7288 TheCall->setArg(NumArgs - 1, OrigArg); 7289 7290 // This operation requires a non-_Complex floating-point number. 7291 if (!OrigArg->getType()->isRealFloatingType()) 7292 return Diag(OrigArg->getBeginLoc(), 7293 diag::err_typecheck_call_invalid_unary_fp) 7294 << OrigArg->getType() << OrigArg->getSourceRange(); 7295 7296 return false; 7297 } 7298 7299 /// Perform semantic analysis for a call to __builtin_complex. 7300 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 7301 if (checkArgCount(*this, TheCall, 2)) 7302 return true; 7303 7304 bool Dependent = false; 7305 for (unsigned I = 0; I != 2; ++I) { 7306 Expr *Arg = TheCall->getArg(I); 7307 QualType T = Arg->getType(); 7308 if (T->isDependentType()) { 7309 Dependent = true; 7310 continue; 7311 } 7312 7313 // Despite supporting _Complex int, GCC requires a real floating point type 7314 // for the operands of __builtin_complex. 7315 if (!T->isRealFloatingType()) { 7316 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 7317 << Arg->getType() << Arg->getSourceRange(); 7318 } 7319 7320 ExprResult Converted = DefaultLvalueConversion(Arg); 7321 if (Converted.isInvalid()) 7322 return true; 7323 TheCall->setArg(I, Converted.get()); 7324 } 7325 7326 if (Dependent) { 7327 TheCall->setType(Context.DependentTy); 7328 return false; 7329 } 7330 7331 Expr *Real = TheCall->getArg(0); 7332 Expr *Imag = TheCall->getArg(1); 7333 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 7334 return Diag(Real->getBeginLoc(), 7335 diag::err_typecheck_call_different_arg_types) 7336 << Real->getType() << Imag->getType() 7337 << Real->getSourceRange() << Imag->getSourceRange(); 7338 } 7339 7340 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 7341 // don't allow this builtin to form those types either. 7342 // FIXME: Should we allow these types? 7343 if (Real->getType()->isFloat16Type()) 7344 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 7345 << "_Float16"; 7346 if (Real->getType()->isHalfType()) 7347 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 7348 << "half"; 7349 7350 TheCall->setType(Context.getComplexType(Real->getType())); 7351 return false; 7352 } 7353 7354 // Customized Sema Checking for VSX builtins that have the following signature: 7355 // vector [...] builtinName(vector [...], vector [...], const int); 7356 // Which takes the same type of vectors (any legal vector type) for the first 7357 // two arguments and takes compile time constant for the third argument. 7358 // Example builtins are : 7359 // vector double vec_xxpermdi(vector double, vector double, int); 7360 // vector short vec_xxsldwi(vector short, vector short, int); 7361 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 7362 unsigned ExpectedNumArgs = 3; 7363 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 7364 return true; 7365 7366 // Check the third argument is a compile time constant 7367 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 7368 return Diag(TheCall->getBeginLoc(), 7369 diag::err_vsx_builtin_nonconstant_argument) 7370 << 3 /* argument index */ << TheCall->getDirectCallee() 7371 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 7372 TheCall->getArg(2)->getEndLoc()); 7373 7374 QualType Arg1Ty = TheCall->getArg(0)->getType(); 7375 QualType Arg2Ty = TheCall->getArg(1)->getType(); 7376 7377 // Check the type of argument 1 and argument 2 are vectors. 7378 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 7379 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 7380 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 7381 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 7382 << TheCall->getDirectCallee() 7383 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7384 TheCall->getArg(1)->getEndLoc()); 7385 } 7386 7387 // Check the first two arguments are the same type. 7388 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 7389 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 7390 << TheCall->getDirectCallee() 7391 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7392 TheCall->getArg(1)->getEndLoc()); 7393 } 7394 7395 // When default clang type checking is turned off and the customized type 7396 // checking is used, the returning type of the function must be explicitly 7397 // set. Otherwise it is _Bool by default. 7398 TheCall->setType(Arg1Ty); 7399 7400 return false; 7401 } 7402 7403 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 7404 // This is declared to take (...), so we have to check everything. 7405 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 7406 if (TheCall->getNumArgs() < 2) 7407 return ExprError(Diag(TheCall->getEndLoc(), 7408 diag::err_typecheck_call_too_few_args_at_least) 7409 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 7410 << TheCall->getSourceRange()); 7411 7412 // Determine which of the following types of shufflevector we're checking: 7413 // 1) unary, vector mask: (lhs, mask) 7414 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 7415 QualType resType = TheCall->getArg(0)->getType(); 7416 unsigned numElements = 0; 7417 7418 if (!TheCall->getArg(0)->isTypeDependent() && 7419 !TheCall->getArg(1)->isTypeDependent()) { 7420 QualType LHSType = TheCall->getArg(0)->getType(); 7421 QualType RHSType = TheCall->getArg(1)->getType(); 7422 7423 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 7424 return ExprError( 7425 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 7426 << TheCall->getDirectCallee() 7427 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7428 TheCall->getArg(1)->getEndLoc())); 7429 7430 numElements = LHSType->castAs<VectorType>()->getNumElements(); 7431 unsigned numResElements = TheCall->getNumArgs() - 2; 7432 7433 // Check to see if we have a call with 2 vector arguments, the unary shuffle 7434 // with mask. If so, verify that RHS is an integer vector type with the 7435 // same number of elts as lhs. 7436 if (TheCall->getNumArgs() == 2) { 7437 if (!RHSType->hasIntegerRepresentation() || 7438 RHSType->castAs<VectorType>()->getNumElements() != numElements) 7439 return ExprError(Diag(TheCall->getBeginLoc(), 7440 diag::err_vec_builtin_incompatible_vector) 7441 << TheCall->getDirectCallee() 7442 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 7443 TheCall->getArg(1)->getEndLoc())); 7444 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 7445 return ExprError(Diag(TheCall->getBeginLoc(), 7446 diag::err_vec_builtin_incompatible_vector) 7447 << TheCall->getDirectCallee() 7448 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7449 TheCall->getArg(1)->getEndLoc())); 7450 } else if (numElements != numResElements) { 7451 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 7452 resType = Context.getVectorType(eltType, numResElements, 7453 VectorType::GenericVector); 7454 } 7455 } 7456 7457 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 7458 if (TheCall->getArg(i)->isTypeDependent() || 7459 TheCall->getArg(i)->isValueDependent()) 7460 continue; 7461 7462 Optional<llvm::APSInt> Result; 7463 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 7464 return ExprError(Diag(TheCall->getBeginLoc(), 7465 diag::err_shufflevector_nonconstant_argument) 7466 << TheCall->getArg(i)->getSourceRange()); 7467 7468 // Allow -1 which will be translated to undef in the IR. 7469 if (Result->isSigned() && Result->isAllOnes()) 7470 continue; 7471 7472 if (Result->getActiveBits() > 64 || 7473 Result->getZExtValue() >= numElements * 2) 7474 return ExprError(Diag(TheCall->getBeginLoc(), 7475 diag::err_shufflevector_argument_too_large) 7476 << TheCall->getArg(i)->getSourceRange()); 7477 } 7478 7479 SmallVector<Expr*, 32> exprs; 7480 7481 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 7482 exprs.push_back(TheCall->getArg(i)); 7483 TheCall->setArg(i, nullptr); 7484 } 7485 7486 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 7487 TheCall->getCallee()->getBeginLoc(), 7488 TheCall->getRParenLoc()); 7489 } 7490 7491 /// SemaConvertVectorExpr - Handle __builtin_convertvector 7492 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 7493 SourceLocation BuiltinLoc, 7494 SourceLocation RParenLoc) { 7495 ExprValueKind VK = VK_PRValue; 7496 ExprObjectKind OK = OK_Ordinary; 7497 QualType DstTy = TInfo->getType(); 7498 QualType SrcTy = E->getType(); 7499 7500 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 7501 return ExprError(Diag(BuiltinLoc, 7502 diag::err_convertvector_non_vector) 7503 << E->getSourceRange()); 7504 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 7505 return ExprError(Diag(BuiltinLoc, 7506 diag::err_convertvector_non_vector_type)); 7507 7508 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 7509 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 7510 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 7511 if (SrcElts != DstElts) 7512 return ExprError(Diag(BuiltinLoc, 7513 diag::err_convertvector_incompatible_vector) 7514 << E->getSourceRange()); 7515 } 7516 7517 return new (Context) 7518 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 7519 } 7520 7521 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 7522 // This is declared to take (const void*, ...) and can take two 7523 // optional constant int args. 7524 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 7525 unsigned NumArgs = TheCall->getNumArgs(); 7526 7527 if (NumArgs > 3) 7528 return Diag(TheCall->getEndLoc(), 7529 diag::err_typecheck_call_too_many_args_at_most) 7530 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7531 7532 // Argument 0 is checked for us and the remaining arguments must be 7533 // constant integers. 7534 for (unsigned i = 1; i != NumArgs; ++i) 7535 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 7536 return true; 7537 7538 return false; 7539 } 7540 7541 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence. 7542 bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) { 7543 if (!Context.getTargetInfo().checkArithmeticFenceSupported()) 7544 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 7545 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7546 if (checkArgCount(*this, TheCall, 1)) 7547 return true; 7548 Expr *Arg = TheCall->getArg(0); 7549 if (Arg->isInstantiationDependent()) 7550 return false; 7551 7552 QualType ArgTy = Arg->getType(); 7553 if (!ArgTy->hasFloatingRepresentation()) 7554 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector) 7555 << ArgTy; 7556 if (Arg->isLValue()) { 7557 ExprResult FirstArg = DefaultLvalueConversion(Arg); 7558 TheCall->setArg(0, FirstArg.get()); 7559 } 7560 TheCall->setType(TheCall->getArg(0)->getType()); 7561 return false; 7562 } 7563 7564 /// SemaBuiltinAssume - Handle __assume (MS Extension). 7565 // __assume does not evaluate its arguments, and should warn if its argument 7566 // has side effects. 7567 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 7568 Expr *Arg = TheCall->getArg(0); 7569 if (Arg->isInstantiationDependent()) return false; 7570 7571 if (Arg->HasSideEffects(Context)) 7572 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 7573 << Arg->getSourceRange() 7574 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 7575 7576 return false; 7577 } 7578 7579 /// Handle __builtin_alloca_with_align. This is declared 7580 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 7581 /// than 8. 7582 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 7583 // The alignment must be a constant integer. 7584 Expr *Arg = TheCall->getArg(1); 7585 7586 // We can't check the value of a dependent argument. 7587 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7588 if (const auto *UE = 7589 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 7590 if (UE->getKind() == UETT_AlignOf || 7591 UE->getKind() == UETT_PreferredAlignOf) 7592 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 7593 << Arg->getSourceRange(); 7594 7595 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 7596 7597 if (!Result.isPowerOf2()) 7598 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7599 << Arg->getSourceRange(); 7600 7601 if (Result < Context.getCharWidth()) 7602 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 7603 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 7604 7605 if (Result > std::numeric_limits<int32_t>::max()) 7606 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 7607 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 7608 } 7609 7610 return false; 7611 } 7612 7613 /// Handle __builtin_assume_aligned. This is declared 7614 /// as (const void*, size_t, ...) and can take one optional constant int arg. 7615 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 7616 unsigned NumArgs = TheCall->getNumArgs(); 7617 7618 if (NumArgs > 3) 7619 return Diag(TheCall->getEndLoc(), 7620 diag::err_typecheck_call_too_many_args_at_most) 7621 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7622 7623 // The alignment must be a constant integer. 7624 Expr *Arg = TheCall->getArg(1); 7625 7626 // We can't check the value of a dependent argument. 7627 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7628 llvm::APSInt Result; 7629 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7630 return true; 7631 7632 if (!Result.isPowerOf2()) 7633 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7634 << Arg->getSourceRange(); 7635 7636 if (Result > Sema::MaximumAlignment) 7637 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 7638 << Arg->getSourceRange() << Sema::MaximumAlignment; 7639 } 7640 7641 if (NumArgs > 2) { 7642 ExprResult Arg(TheCall->getArg(2)); 7643 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 7644 Context.getSizeType(), false); 7645 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7646 if (Arg.isInvalid()) return true; 7647 TheCall->setArg(2, Arg.get()); 7648 } 7649 7650 return false; 7651 } 7652 7653 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 7654 unsigned BuiltinID = 7655 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 7656 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 7657 7658 unsigned NumArgs = TheCall->getNumArgs(); 7659 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 7660 if (NumArgs < NumRequiredArgs) { 7661 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 7662 << 0 /* function call */ << NumRequiredArgs << NumArgs 7663 << TheCall->getSourceRange(); 7664 } 7665 if (NumArgs >= NumRequiredArgs + 0x100) { 7666 return Diag(TheCall->getEndLoc(), 7667 diag::err_typecheck_call_too_many_args_at_most) 7668 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 7669 << TheCall->getSourceRange(); 7670 } 7671 unsigned i = 0; 7672 7673 // For formatting call, check buffer arg. 7674 if (!IsSizeCall) { 7675 ExprResult Arg(TheCall->getArg(i)); 7676 InitializedEntity Entity = InitializedEntity::InitializeParameter( 7677 Context, Context.VoidPtrTy, false); 7678 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7679 if (Arg.isInvalid()) 7680 return true; 7681 TheCall->setArg(i, Arg.get()); 7682 i++; 7683 } 7684 7685 // Check string literal arg. 7686 unsigned FormatIdx = i; 7687 { 7688 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 7689 if (Arg.isInvalid()) 7690 return true; 7691 TheCall->setArg(i, Arg.get()); 7692 i++; 7693 } 7694 7695 // Make sure variadic args are scalar. 7696 unsigned FirstDataArg = i; 7697 while (i < NumArgs) { 7698 ExprResult Arg = DefaultVariadicArgumentPromotion( 7699 TheCall->getArg(i), VariadicFunction, nullptr); 7700 if (Arg.isInvalid()) 7701 return true; 7702 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 7703 if (ArgSize.getQuantity() >= 0x100) { 7704 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 7705 << i << (int)ArgSize.getQuantity() << 0xff 7706 << TheCall->getSourceRange(); 7707 } 7708 TheCall->setArg(i, Arg.get()); 7709 i++; 7710 } 7711 7712 // Check formatting specifiers. NOTE: We're only doing this for the non-size 7713 // call to avoid duplicate diagnostics. 7714 if (!IsSizeCall) { 7715 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 7716 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 7717 bool Success = CheckFormatArguments( 7718 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 7719 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 7720 CheckedVarArgs); 7721 if (!Success) 7722 return true; 7723 } 7724 7725 if (IsSizeCall) { 7726 TheCall->setType(Context.getSizeType()); 7727 } else { 7728 TheCall->setType(Context.VoidPtrTy); 7729 } 7730 return false; 7731 } 7732 7733 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 7734 /// TheCall is a constant expression. 7735 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 7736 llvm::APSInt &Result) { 7737 Expr *Arg = TheCall->getArg(ArgNum); 7738 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7739 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7740 7741 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 7742 7743 Optional<llvm::APSInt> R; 7744 if (!(R = Arg->getIntegerConstantExpr(Context))) 7745 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 7746 << FDecl->getDeclName() << Arg->getSourceRange(); 7747 Result = *R; 7748 return false; 7749 } 7750 7751 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 7752 /// TheCall is a constant expression in the range [Low, High]. 7753 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 7754 int Low, int High, bool RangeIsError) { 7755 if (isConstantEvaluated()) 7756 return false; 7757 llvm::APSInt Result; 7758 7759 // We can't check the value of a dependent argument. 7760 Expr *Arg = TheCall->getArg(ArgNum); 7761 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7762 return false; 7763 7764 // Check constant-ness first. 7765 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7766 return true; 7767 7768 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 7769 if (RangeIsError) 7770 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 7771 << toString(Result, 10) << Low << High << Arg->getSourceRange(); 7772 else 7773 // Defer the warning until we know if the code will be emitted so that 7774 // dead code can ignore this. 7775 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 7776 PDiag(diag::warn_argument_invalid_range) 7777 << toString(Result, 10) << Low << High 7778 << Arg->getSourceRange()); 7779 } 7780 7781 return false; 7782 } 7783 7784 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 7785 /// TheCall is a constant expression is a multiple of Num.. 7786 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 7787 unsigned Num) { 7788 llvm::APSInt Result; 7789 7790 // We can't check the value of a dependent argument. 7791 Expr *Arg = TheCall->getArg(ArgNum); 7792 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7793 return false; 7794 7795 // Check constant-ness first. 7796 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7797 return true; 7798 7799 if (Result.getSExtValue() % Num != 0) 7800 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 7801 << Num << Arg->getSourceRange(); 7802 7803 return false; 7804 } 7805 7806 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 7807 /// constant expression representing a power of 2. 7808 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 7809 llvm::APSInt Result; 7810 7811 // We can't check the value of a dependent argument. 7812 Expr *Arg = TheCall->getArg(ArgNum); 7813 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7814 return false; 7815 7816 // Check constant-ness first. 7817 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7818 return true; 7819 7820 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 7821 // and only if x is a power of 2. 7822 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 7823 return false; 7824 7825 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 7826 << Arg->getSourceRange(); 7827 } 7828 7829 static bool IsShiftedByte(llvm::APSInt Value) { 7830 if (Value.isNegative()) 7831 return false; 7832 7833 // Check if it's a shifted byte, by shifting it down 7834 while (true) { 7835 // If the value fits in the bottom byte, the check passes. 7836 if (Value < 0x100) 7837 return true; 7838 7839 // Otherwise, if the value has _any_ bits in the bottom byte, the check 7840 // fails. 7841 if ((Value & 0xFF) != 0) 7842 return false; 7843 7844 // If the bottom 8 bits are all 0, but something above that is nonzero, 7845 // then shifting the value right by 8 bits won't affect whether it's a 7846 // shifted byte or not. So do that, and go round again. 7847 Value >>= 8; 7848 } 7849 } 7850 7851 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 7852 /// a constant expression representing an arbitrary byte value shifted left by 7853 /// a multiple of 8 bits. 7854 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 7855 unsigned ArgBits) { 7856 llvm::APSInt Result; 7857 7858 // We can't check the value of a dependent argument. 7859 Expr *Arg = TheCall->getArg(ArgNum); 7860 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7861 return false; 7862 7863 // Check constant-ness first. 7864 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7865 return true; 7866 7867 // Truncate to the given size. 7868 Result = Result.getLoBits(ArgBits); 7869 Result.setIsUnsigned(true); 7870 7871 if (IsShiftedByte(Result)) 7872 return false; 7873 7874 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 7875 << Arg->getSourceRange(); 7876 } 7877 7878 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 7879 /// TheCall is a constant expression representing either a shifted byte value, 7880 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 7881 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 7882 /// Arm MVE intrinsics. 7883 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 7884 int ArgNum, 7885 unsigned ArgBits) { 7886 llvm::APSInt Result; 7887 7888 // We can't check the value of a dependent argument. 7889 Expr *Arg = TheCall->getArg(ArgNum); 7890 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7891 return false; 7892 7893 // Check constant-ness first. 7894 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7895 return true; 7896 7897 // Truncate to the given size. 7898 Result = Result.getLoBits(ArgBits); 7899 Result.setIsUnsigned(true); 7900 7901 // Check to see if it's in either of the required forms. 7902 if (IsShiftedByte(Result) || 7903 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 7904 return false; 7905 7906 return Diag(TheCall->getBeginLoc(), 7907 diag::err_argument_not_shifted_byte_or_xxff) 7908 << Arg->getSourceRange(); 7909 } 7910 7911 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 7912 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 7913 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 7914 if (checkArgCount(*this, TheCall, 2)) 7915 return true; 7916 Expr *Arg0 = TheCall->getArg(0); 7917 Expr *Arg1 = TheCall->getArg(1); 7918 7919 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7920 if (FirstArg.isInvalid()) 7921 return true; 7922 QualType FirstArgType = FirstArg.get()->getType(); 7923 if (!FirstArgType->isAnyPointerType()) 7924 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7925 << "first" << FirstArgType << Arg0->getSourceRange(); 7926 TheCall->setArg(0, FirstArg.get()); 7927 7928 ExprResult SecArg = DefaultLvalueConversion(Arg1); 7929 if (SecArg.isInvalid()) 7930 return true; 7931 QualType SecArgType = SecArg.get()->getType(); 7932 if (!SecArgType->isIntegerType()) 7933 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7934 << "second" << SecArgType << Arg1->getSourceRange(); 7935 7936 // Derive the return type from the pointer argument. 7937 TheCall->setType(FirstArgType); 7938 return false; 7939 } 7940 7941 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 7942 if (checkArgCount(*this, TheCall, 2)) 7943 return true; 7944 7945 Expr *Arg0 = TheCall->getArg(0); 7946 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7947 if (FirstArg.isInvalid()) 7948 return true; 7949 QualType FirstArgType = FirstArg.get()->getType(); 7950 if (!FirstArgType->isAnyPointerType()) 7951 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7952 << "first" << FirstArgType << Arg0->getSourceRange(); 7953 TheCall->setArg(0, FirstArg.get()); 7954 7955 // Derive the return type from the pointer argument. 7956 TheCall->setType(FirstArgType); 7957 7958 // Second arg must be an constant in range [0,15] 7959 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7960 } 7961 7962 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 7963 if (checkArgCount(*this, TheCall, 2)) 7964 return true; 7965 Expr *Arg0 = TheCall->getArg(0); 7966 Expr *Arg1 = TheCall->getArg(1); 7967 7968 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7969 if (FirstArg.isInvalid()) 7970 return true; 7971 QualType FirstArgType = FirstArg.get()->getType(); 7972 if (!FirstArgType->isAnyPointerType()) 7973 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7974 << "first" << FirstArgType << Arg0->getSourceRange(); 7975 7976 QualType SecArgType = Arg1->getType(); 7977 if (!SecArgType->isIntegerType()) 7978 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7979 << "second" << SecArgType << Arg1->getSourceRange(); 7980 TheCall->setType(Context.IntTy); 7981 return false; 7982 } 7983 7984 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 7985 BuiltinID == AArch64::BI__builtin_arm_stg) { 7986 if (checkArgCount(*this, TheCall, 1)) 7987 return true; 7988 Expr *Arg0 = TheCall->getArg(0); 7989 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7990 if (FirstArg.isInvalid()) 7991 return true; 7992 7993 QualType FirstArgType = FirstArg.get()->getType(); 7994 if (!FirstArgType->isAnyPointerType()) 7995 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7996 << "first" << FirstArgType << Arg0->getSourceRange(); 7997 TheCall->setArg(0, FirstArg.get()); 7998 7999 // Derive the return type from the pointer argument. 8000 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 8001 TheCall->setType(FirstArgType); 8002 return false; 8003 } 8004 8005 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 8006 Expr *ArgA = TheCall->getArg(0); 8007 Expr *ArgB = TheCall->getArg(1); 8008 8009 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 8010 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 8011 8012 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 8013 return true; 8014 8015 QualType ArgTypeA = ArgExprA.get()->getType(); 8016 QualType ArgTypeB = ArgExprB.get()->getType(); 8017 8018 auto isNull = [&] (Expr *E) -> bool { 8019 return E->isNullPointerConstant( 8020 Context, Expr::NPC_ValueDependentIsNotNull); }; 8021 8022 // argument should be either a pointer or null 8023 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 8024 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 8025 << "first" << ArgTypeA << ArgA->getSourceRange(); 8026 8027 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 8028 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 8029 << "second" << ArgTypeB << ArgB->getSourceRange(); 8030 8031 // Ensure Pointee types are compatible 8032 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 8033 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 8034 QualType pointeeA = ArgTypeA->getPointeeType(); 8035 QualType pointeeB = ArgTypeB->getPointeeType(); 8036 if (!Context.typesAreCompatible( 8037 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 8038 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 8039 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 8040 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 8041 << ArgB->getSourceRange(); 8042 } 8043 } 8044 8045 // at least one argument should be pointer type 8046 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 8047 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 8048 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 8049 8050 if (isNull(ArgA)) // adopt type of the other pointer 8051 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 8052 8053 if (isNull(ArgB)) 8054 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 8055 8056 TheCall->setArg(0, ArgExprA.get()); 8057 TheCall->setArg(1, ArgExprB.get()); 8058 TheCall->setType(Context.LongLongTy); 8059 return false; 8060 } 8061 assert(false && "Unhandled ARM MTE intrinsic"); 8062 return true; 8063 } 8064 8065 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 8066 /// TheCall is an ARM/AArch64 special register string literal. 8067 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 8068 int ArgNum, unsigned ExpectedFieldNum, 8069 bool AllowName) { 8070 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 8071 BuiltinID == ARM::BI__builtin_arm_wsr64 || 8072 BuiltinID == ARM::BI__builtin_arm_rsr || 8073 BuiltinID == ARM::BI__builtin_arm_rsrp || 8074 BuiltinID == ARM::BI__builtin_arm_wsr || 8075 BuiltinID == ARM::BI__builtin_arm_wsrp; 8076 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 8077 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 8078 BuiltinID == AArch64::BI__builtin_arm_rsr || 8079 BuiltinID == AArch64::BI__builtin_arm_rsrp || 8080 BuiltinID == AArch64::BI__builtin_arm_wsr || 8081 BuiltinID == AArch64::BI__builtin_arm_wsrp; 8082 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 8083 8084 // We can't check the value of a dependent argument. 8085 Expr *Arg = TheCall->getArg(ArgNum); 8086 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8087 return false; 8088 8089 // Check if the argument is a string literal. 8090 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 8091 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 8092 << Arg->getSourceRange(); 8093 8094 // Check the type of special register given. 8095 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 8096 SmallVector<StringRef, 6> Fields; 8097 Reg.split(Fields, ":"); 8098 8099 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 8100 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 8101 << Arg->getSourceRange(); 8102 8103 // If the string is the name of a register then we cannot check that it is 8104 // valid here but if the string is of one the forms described in ACLE then we 8105 // can check that the supplied fields are integers and within the valid 8106 // ranges. 8107 if (Fields.size() > 1) { 8108 bool FiveFields = Fields.size() == 5; 8109 8110 bool ValidString = true; 8111 if (IsARMBuiltin) { 8112 ValidString &= Fields[0].startswith_insensitive("cp") || 8113 Fields[0].startswith_insensitive("p"); 8114 if (ValidString) 8115 Fields[0] = Fields[0].drop_front( 8116 Fields[0].startswith_insensitive("cp") ? 2 : 1); 8117 8118 ValidString &= Fields[2].startswith_insensitive("c"); 8119 if (ValidString) 8120 Fields[2] = Fields[2].drop_front(1); 8121 8122 if (FiveFields) { 8123 ValidString &= Fields[3].startswith_insensitive("c"); 8124 if (ValidString) 8125 Fields[3] = Fields[3].drop_front(1); 8126 } 8127 } 8128 8129 SmallVector<int, 5> Ranges; 8130 if (FiveFields) 8131 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 8132 else 8133 Ranges.append({15, 7, 15}); 8134 8135 for (unsigned i=0; i<Fields.size(); ++i) { 8136 int IntField; 8137 ValidString &= !Fields[i].getAsInteger(10, IntField); 8138 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 8139 } 8140 8141 if (!ValidString) 8142 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 8143 << Arg->getSourceRange(); 8144 } else if (IsAArch64Builtin && Fields.size() == 1) { 8145 // If the register name is one of those that appear in the condition below 8146 // and the special register builtin being used is one of the write builtins, 8147 // then we require that the argument provided for writing to the register 8148 // is an integer constant expression. This is because it will be lowered to 8149 // an MSR (immediate) instruction, so we need to know the immediate at 8150 // compile time. 8151 if (TheCall->getNumArgs() != 2) 8152 return false; 8153 8154 std::string RegLower = Reg.lower(); 8155 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 8156 RegLower != "pan" && RegLower != "uao") 8157 return false; 8158 8159 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 8160 } 8161 8162 return false; 8163 } 8164 8165 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 8166 /// Emit an error and return true on failure; return false on success. 8167 /// TypeStr is a string containing the type descriptor of the value returned by 8168 /// the builtin and the descriptors of the expected type of the arguments. 8169 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, 8170 const char *TypeStr) { 8171 8172 assert((TypeStr[0] != '\0') && 8173 "Invalid types in PPC MMA builtin declaration"); 8174 8175 switch (BuiltinID) { 8176 default: 8177 // This function is called in CheckPPCBuiltinFunctionCall where the 8178 // BuiltinID is guaranteed to be an MMA or pair vector memop builtin, here 8179 // we are isolating the pair vector memop builtins that can be used with mma 8180 // off so the default case is every builtin that requires mma and paired 8181 // vector memops. 8182 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 8183 diag::err_ppc_builtin_only_on_arch, "10") || 8184 SemaFeatureCheck(*this, TheCall, "mma", 8185 diag::err_ppc_builtin_only_on_arch, "10")) 8186 return true; 8187 break; 8188 case PPC::BI__builtin_vsx_lxvp: 8189 case PPC::BI__builtin_vsx_stxvp: 8190 case PPC::BI__builtin_vsx_assemble_pair: 8191 case PPC::BI__builtin_vsx_disassemble_pair: 8192 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 8193 diag::err_ppc_builtin_only_on_arch, "10")) 8194 return true; 8195 break; 8196 } 8197 8198 unsigned Mask = 0; 8199 unsigned ArgNum = 0; 8200 8201 // The first type in TypeStr is the type of the value returned by the 8202 // builtin. So we first read that type and change the type of TheCall. 8203 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8204 TheCall->setType(type); 8205 8206 while (*TypeStr != '\0') { 8207 Mask = 0; 8208 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8209 if (ArgNum >= TheCall->getNumArgs()) { 8210 ArgNum++; 8211 break; 8212 } 8213 8214 Expr *Arg = TheCall->getArg(ArgNum); 8215 QualType PassedType = Arg->getType(); 8216 QualType StrippedRVType = PassedType.getCanonicalType(); 8217 8218 // Strip Restrict/Volatile qualifiers. 8219 if (StrippedRVType.isRestrictQualified() || 8220 StrippedRVType.isVolatileQualified()) 8221 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType(); 8222 8223 // The only case where the argument type and expected type are allowed to 8224 // mismatch is if the argument type is a non-void pointer (or array) and 8225 // expected type is a void pointer. 8226 if (StrippedRVType != ExpectedType) 8227 if (!(ExpectedType->isVoidPointerType() && 8228 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType()))) 8229 return Diag(Arg->getBeginLoc(), 8230 diag::err_typecheck_convert_incompatible) 8231 << PassedType << ExpectedType << 1 << 0 << 0; 8232 8233 // If the value of the Mask is not 0, we have a constraint in the size of 8234 // the integer argument so here we ensure the argument is a constant that 8235 // is in the valid range. 8236 if (Mask != 0 && 8237 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 8238 return true; 8239 8240 ArgNum++; 8241 } 8242 8243 // In case we exited early from the previous loop, there are other types to 8244 // read from TypeStr. So we need to read them all to ensure we have the right 8245 // number of arguments in TheCall and if it is not the case, to display a 8246 // better error message. 8247 while (*TypeStr != '\0') { 8248 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8249 ArgNum++; 8250 } 8251 if (checkArgCount(*this, TheCall, ArgNum)) 8252 return true; 8253 8254 return false; 8255 } 8256 8257 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 8258 /// This checks that the target supports __builtin_longjmp and 8259 /// that val is a constant 1. 8260 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 8261 if (!Context.getTargetInfo().hasSjLjLowering()) 8262 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 8263 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8264 8265 Expr *Arg = TheCall->getArg(1); 8266 llvm::APSInt Result; 8267 8268 // TODO: This is less than ideal. Overload this to take a value. 8269 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 8270 return true; 8271 8272 if (Result != 1) 8273 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 8274 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 8275 8276 return false; 8277 } 8278 8279 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 8280 /// This checks that the target supports __builtin_setjmp. 8281 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 8282 if (!Context.getTargetInfo().hasSjLjLowering()) 8283 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 8284 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8285 return false; 8286 } 8287 8288 namespace { 8289 8290 class UncoveredArgHandler { 8291 enum { Unknown = -1, AllCovered = -2 }; 8292 8293 signed FirstUncoveredArg = Unknown; 8294 SmallVector<const Expr *, 4> DiagnosticExprs; 8295 8296 public: 8297 UncoveredArgHandler() = default; 8298 8299 bool hasUncoveredArg() const { 8300 return (FirstUncoveredArg >= 0); 8301 } 8302 8303 unsigned getUncoveredArg() const { 8304 assert(hasUncoveredArg() && "no uncovered argument"); 8305 return FirstUncoveredArg; 8306 } 8307 8308 void setAllCovered() { 8309 // A string has been found with all arguments covered, so clear out 8310 // the diagnostics. 8311 DiagnosticExprs.clear(); 8312 FirstUncoveredArg = AllCovered; 8313 } 8314 8315 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 8316 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 8317 8318 // Don't update if a previous string covers all arguments. 8319 if (FirstUncoveredArg == AllCovered) 8320 return; 8321 8322 // UncoveredArgHandler tracks the highest uncovered argument index 8323 // and with it all the strings that match this index. 8324 if (NewFirstUncoveredArg == FirstUncoveredArg) 8325 DiagnosticExprs.push_back(StrExpr); 8326 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 8327 DiagnosticExprs.clear(); 8328 DiagnosticExprs.push_back(StrExpr); 8329 FirstUncoveredArg = NewFirstUncoveredArg; 8330 } 8331 } 8332 8333 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 8334 }; 8335 8336 enum StringLiteralCheckType { 8337 SLCT_NotALiteral, 8338 SLCT_UncheckedLiteral, 8339 SLCT_CheckedLiteral 8340 }; 8341 8342 } // namespace 8343 8344 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 8345 BinaryOperatorKind BinOpKind, 8346 bool AddendIsRight) { 8347 unsigned BitWidth = Offset.getBitWidth(); 8348 unsigned AddendBitWidth = Addend.getBitWidth(); 8349 // There might be negative interim results. 8350 if (Addend.isUnsigned()) { 8351 Addend = Addend.zext(++AddendBitWidth); 8352 Addend.setIsSigned(true); 8353 } 8354 // Adjust the bit width of the APSInts. 8355 if (AddendBitWidth > BitWidth) { 8356 Offset = Offset.sext(AddendBitWidth); 8357 BitWidth = AddendBitWidth; 8358 } else if (BitWidth > AddendBitWidth) { 8359 Addend = Addend.sext(BitWidth); 8360 } 8361 8362 bool Ov = false; 8363 llvm::APSInt ResOffset = Offset; 8364 if (BinOpKind == BO_Add) 8365 ResOffset = Offset.sadd_ov(Addend, Ov); 8366 else { 8367 assert(AddendIsRight && BinOpKind == BO_Sub && 8368 "operator must be add or sub with addend on the right"); 8369 ResOffset = Offset.ssub_ov(Addend, Ov); 8370 } 8371 8372 // We add an offset to a pointer here so we should support an offset as big as 8373 // possible. 8374 if (Ov) { 8375 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 8376 "index (intermediate) result too big"); 8377 Offset = Offset.sext(2 * BitWidth); 8378 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 8379 return; 8380 } 8381 8382 Offset = ResOffset; 8383 } 8384 8385 namespace { 8386 8387 // This is a wrapper class around StringLiteral to support offsetted string 8388 // literals as format strings. It takes the offset into account when returning 8389 // the string and its length or the source locations to display notes correctly. 8390 class FormatStringLiteral { 8391 const StringLiteral *FExpr; 8392 int64_t Offset; 8393 8394 public: 8395 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 8396 : FExpr(fexpr), Offset(Offset) {} 8397 8398 StringRef getString() const { 8399 return FExpr->getString().drop_front(Offset); 8400 } 8401 8402 unsigned getByteLength() const { 8403 return FExpr->getByteLength() - getCharByteWidth() * Offset; 8404 } 8405 8406 unsigned getLength() const { return FExpr->getLength() - Offset; } 8407 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 8408 8409 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 8410 8411 QualType getType() const { return FExpr->getType(); } 8412 8413 bool isAscii() const { return FExpr->isAscii(); } 8414 bool isWide() const { return FExpr->isWide(); } 8415 bool isUTF8() const { return FExpr->isUTF8(); } 8416 bool isUTF16() const { return FExpr->isUTF16(); } 8417 bool isUTF32() const { return FExpr->isUTF32(); } 8418 bool isPascal() const { return FExpr->isPascal(); } 8419 8420 SourceLocation getLocationOfByte( 8421 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 8422 const TargetInfo &Target, unsigned *StartToken = nullptr, 8423 unsigned *StartTokenByteOffset = nullptr) const { 8424 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 8425 StartToken, StartTokenByteOffset); 8426 } 8427 8428 SourceLocation getBeginLoc() const LLVM_READONLY { 8429 return FExpr->getBeginLoc().getLocWithOffset(Offset); 8430 } 8431 8432 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 8433 }; 8434 8435 } // namespace 8436 8437 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 8438 const Expr *OrigFormatExpr, 8439 ArrayRef<const Expr *> Args, 8440 bool HasVAListArg, unsigned format_idx, 8441 unsigned firstDataArg, 8442 Sema::FormatStringType Type, 8443 bool inFunctionCall, 8444 Sema::VariadicCallType CallType, 8445 llvm::SmallBitVector &CheckedVarArgs, 8446 UncoveredArgHandler &UncoveredArg, 8447 bool IgnoreStringsWithoutSpecifiers); 8448 8449 // Determine if an expression is a string literal or constant string. 8450 // If this function returns false on the arguments to a function expecting a 8451 // format string, we will usually need to emit a warning. 8452 // True string literals are then checked by CheckFormatString. 8453 static StringLiteralCheckType 8454 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 8455 bool HasVAListArg, unsigned format_idx, 8456 unsigned firstDataArg, Sema::FormatStringType Type, 8457 Sema::VariadicCallType CallType, bool InFunctionCall, 8458 llvm::SmallBitVector &CheckedVarArgs, 8459 UncoveredArgHandler &UncoveredArg, 8460 llvm::APSInt Offset, 8461 bool IgnoreStringsWithoutSpecifiers = false) { 8462 if (S.isConstantEvaluated()) 8463 return SLCT_NotALiteral; 8464 tryAgain: 8465 assert(Offset.isSigned() && "invalid offset"); 8466 8467 if (E->isTypeDependent() || E->isValueDependent()) 8468 return SLCT_NotALiteral; 8469 8470 E = E->IgnoreParenCasts(); 8471 8472 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 8473 // Technically -Wformat-nonliteral does not warn about this case. 8474 // The behavior of printf and friends in this case is implementation 8475 // dependent. Ideally if the format string cannot be null then 8476 // it should have a 'nonnull' attribute in the function prototype. 8477 return SLCT_UncheckedLiteral; 8478 8479 switch (E->getStmtClass()) { 8480 case Stmt::BinaryConditionalOperatorClass: 8481 case Stmt::ConditionalOperatorClass: { 8482 // The expression is a literal if both sub-expressions were, and it was 8483 // completely checked only if both sub-expressions were checked. 8484 const AbstractConditionalOperator *C = 8485 cast<AbstractConditionalOperator>(E); 8486 8487 // Determine whether it is necessary to check both sub-expressions, for 8488 // example, because the condition expression is a constant that can be 8489 // evaluated at compile time. 8490 bool CheckLeft = true, CheckRight = true; 8491 8492 bool Cond; 8493 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 8494 S.isConstantEvaluated())) { 8495 if (Cond) 8496 CheckRight = false; 8497 else 8498 CheckLeft = false; 8499 } 8500 8501 // We need to maintain the offsets for the right and the left hand side 8502 // separately to check if every possible indexed expression is a valid 8503 // string literal. They might have different offsets for different string 8504 // literals in the end. 8505 StringLiteralCheckType Left; 8506 if (!CheckLeft) 8507 Left = SLCT_UncheckedLiteral; 8508 else { 8509 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 8510 HasVAListArg, format_idx, firstDataArg, 8511 Type, CallType, InFunctionCall, 8512 CheckedVarArgs, UncoveredArg, Offset, 8513 IgnoreStringsWithoutSpecifiers); 8514 if (Left == SLCT_NotALiteral || !CheckRight) { 8515 return Left; 8516 } 8517 } 8518 8519 StringLiteralCheckType Right = checkFormatStringExpr( 8520 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 8521 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8522 IgnoreStringsWithoutSpecifiers); 8523 8524 return (CheckLeft && Left < Right) ? Left : Right; 8525 } 8526 8527 case Stmt::ImplicitCastExprClass: 8528 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 8529 goto tryAgain; 8530 8531 case Stmt::OpaqueValueExprClass: 8532 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 8533 E = src; 8534 goto tryAgain; 8535 } 8536 return SLCT_NotALiteral; 8537 8538 case Stmt::PredefinedExprClass: 8539 // While __func__, etc., are technically not string literals, they 8540 // cannot contain format specifiers and thus are not a security 8541 // liability. 8542 return SLCT_UncheckedLiteral; 8543 8544 case Stmt::DeclRefExprClass: { 8545 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 8546 8547 // As an exception, do not flag errors for variables binding to 8548 // const string literals. 8549 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 8550 bool isConstant = false; 8551 QualType T = DR->getType(); 8552 8553 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 8554 isConstant = AT->getElementType().isConstant(S.Context); 8555 } else if (const PointerType *PT = T->getAs<PointerType>()) { 8556 isConstant = T.isConstant(S.Context) && 8557 PT->getPointeeType().isConstant(S.Context); 8558 } else if (T->isObjCObjectPointerType()) { 8559 // In ObjC, there is usually no "const ObjectPointer" type, 8560 // so don't check if the pointee type is constant. 8561 isConstant = T.isConstant(S.Context); 8562 } 8563 8564 if (isConstant) { 8565 if (const Expr *Init = VD->getAnyInitializer()) { 8566 // Look through initializers like const char c[] = { "foo" } 8567 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 8568 if (InitList->isStringLiteralInit()) 8569 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 8570 } 8571 return checkFormatStringExpr(S, Init, Args, 8572 HasVAListArg, format_idx, 8573 firstDataArg, Type, CallType, 8574 /*InFunctionCall*/ false, CheckedVarArgs, 8575 UncoveredArg, Offset); 8576 } 8577 } 8578 8579 // For vprintf* functions (i.e., HasVAListArg==true), we add a 8580 // special check to see if the format string is a function parameter 8581 // of the function calling the printf function. If the function 8582 // has an attribute indicating it is a printf-like function, then we 8583 // should suppress warnings concerning non-literals being used in a call 8584 // to a vprintf function. For example: 8585 // 8586 // void 8587 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 8588 // va_list ap; 8589 // va_start(ap, fmt); 8590 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 8591 // ... 8592 // } 8593 if (HasVAListArg) { 8594 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 8595 if (const Decl *D = dyn_cast<Decl>(PV->getDeclContext())) { 8596 int PVIndex = PV->getFunctionScopeIndex() + 1; 8597 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) { 8598 // adjust for implicit parameter 8599 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) 8600 if (MD->isInstance()) 8601 ++PVIndex; 8602 // We also check if the formats are compatible. 8603 // We can't pass a 'scanf' string to a 'printf' function. 8604 if (PVIndex == PVFormat->getFormatIdx() && 8605 Type == S.GetFormatStringType(PVFormat)) 8606 return SLCT_UncheckedLiteral; 8607 } 8608 } 8609 } 8610 } 8611 } 8612 8613 return SLCT_NotALiteral; 8614 } 8615 8616 case Stmt::CallExprClass: 8617 case Stmt::CXXMemberCallExprClass: { 8618 const CallExpr *CE = cast<CallExpr>(E); 8619 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 8620 bool IsFirst = true; 8621 StringLiteralCheckType CommonResult; 8622 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 8623 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 8624 StringLiteralCheckType Result = checkFormatStringExpr( 8625 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 8626 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8627 IgnoreStringsWithoutSpecifiers); 8628 if (IsFirst) { 8629 CommonResult = Result; 8630 IsFirst = false; 8631 } 8632 } 8633 if (!IsFirst) 8634 return CommonResult; 8635 8636 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 8637 unsigned BuiltinID = FD->getBuiltinID(); 8638 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 8639 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 8640 const Expr *Arg = CE->getArg(0); 8641 return checkFormatStringExpr(S, Arg, Args, 8642 HasVAListArg, format_idx, 8643 firstDataArg, Type, CallType, 8644 InFunctionCall, CheckedVarArgs, 8645 UncoveredArg, Offset, 8646 IgnoreStringsWithoutSpecifiers); 8647 } 8648 } 8649 } 8650 8651 return SLCT_NotALiteral; 8652 } 8653 case Stmt::ObjCMessageExprClass: { 8654 const auto *ME = cast<ObjCMessageExpr>(E); 8655 if (const auto *MD = ME->getMethodDecl()) { 8656 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 8657 // As a special case heuristic, if we're using the method -[NSBundle 8658 // localizedStringForKey:value:table:], ignore any key strings that lack 8659 // format specifiers. The idea is that if the key doesn't have any 8660 // format specifiers then its probably just a key to map to the 8661 // localized strings. If it does have format specifiers though, then its 8662 // likely that the text of the key is the format string in the 8663 // programmer's language, and should be checked. 8664 const ObjCInterfaceDecl *IFace; 8665 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 8666 IFace->getIdentifier()->isStr("NSBundle") && 8667 MD->getSelector().isKeywordSelector( 8668 {"localizedStringForKey", "value", "table"})) { 8669 IgnoreStringsWithoutSpecifiers = true; 8670 } 8671 8672 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 8673 return checkFormatStringExpr( 8674 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 8675 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8676 IgnoreStringsWithoutSpecifiers); 8677 } 8678 } 8679 8680 return SLCT_NotALiteral; 8681 } 8682 case Stmt::ObjCStringLiteralClass: 8683 case Stmt::StringLiteralClass: { 8684 const StringLiteral *StrE = nullptr; 8685 8686 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 8687 StrE = ObjCFExpr->getString(); 8688 else 8689 StrE = cast<StringLiteral>(E); 8690 8691 if (StrE) { 8692 if (Offset.isNegative() || Offset > StrE->getLength()) { 8693 // TODO: It would be better to have an explicit warning for out of 8694 // bounds literals. 8695 return SLCT_NotALiteral; 8696 } 8697 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 8698 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 8699 firstDataArg, Type, InFunctionCall, CallType, 8700 CheckedVarArgs, UncoveredArg, 8701 IgnoreStringsWithoutSpecifiers); 8702 return SLCT_CheckedLiteral; 8703 } 8704 8705 return SLCT_NotALiteral; 8706 } 8707 case Stmt::BinaryOperatorClass: { 8708 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 8709 8710 // A string literal + an int offset is still a string literal. 8711 if (BinOp->isAdditiveOp()) { 8712 Expr::EvalResult LResult, RResult; 8713 8714 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 8715 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8716 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 8717 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8718 8719 if (LIsInt != RIsInt) { 8720 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 8721 8722 if (LIsInt) { 8723 if (BinOpKind == BO_Add) { 8724 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 8725 E = BinOp->getRHS(); 8726 goto tryAgain; 8727 } 8728 } else { 8729 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 8730 E = BinOp->getLHS(); 8731 goto tryAgain; 8732 } 8733 } 8734 } 8735 8736 return SLCT_NotALiteral; 8737 } 8738 case Stmt::UnaryOperatorClass: { 8739 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 8740 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 8741 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 8742 Expr::EvalResult IndexResult; 8743 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 8744 Expr::SE_NoSideEffects, 8745 S.isConstantEvaluated())) { 8746 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 8747 /*RHS is int*/ true); 8748 E = ASE->getBase(); 8749 goto tryAgain; 8750 } 8751 } 8752 8753 return SLCT_NotALiteral; 8754 } 8755 8756 default: 8757 return SLCT_NotALiteral; 8758 } 8759 } 8760 8761 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 8762 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 8763 .Case("scanf", FST_Scanf) 8764 .Cases("printf", "printf0", FST_Printf) 8765 .Cases("NSString", "CFString", FST_NSString) 8766 .Case("strftime", FST_Strftime) 8767 .Case("strfmon", FST_Strfmon) 8768 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 8769 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 8770 .Case("os_trace", FST_OSLog) 8771 .Case("os_log", FST_OSLog) 8772 .Default(FST_Unknown); 8773 } 8774 8775 /// CheckFormatArguments - Check calls to printf and scanf (and similar 8776 /// functions) for correct use of format strings. 8777 /// Returns true if a format string has been fully checked. 8778 bool Sema::CheckFormatArguments(const FormatAttr *Format, 8779 ArrayRef<const Expr *> Args, 8780 bool IsCXXMember, 8781 VariadicCallType CallType, 8782 SourceLocation Loc, SourceRange Range, 8783 llvm::SmallBitVector &CheckedVarArgs) { 8784 FormatStringInfo FSI; 8785 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 8786 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 8787 FSI.FirstDataArg, GetFormatStringType(Format), 8788 CallType, Loc, Range, CheckedVarArgs); 8789 return false; 8790 } 8791 8792 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 8793 bool HasVAListArg, unsigned format_idx, 8794 unsigned firstDataArg, FormatStringType Type, 8795 VariadicCallType CallType, 8796 SourceLocation Loc, SourceRange Range, 8797 llvm::SmallBitVector &CheckedVarArgs) { 8798 // CHECK: printf/scanf-like function is called with no format string. 8799 if (format_idx >= Args.size()) { 8800 Diag(Loc, diag::warn_missing_format_string) << Range; 8801 return false; 8802 } 8803 8804 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 8805 8806 // CHECK: format string is not a string literal. 8807 // 8808 // Dynamically generated format strings are difficult to 8809 // automatically vet at compile time. Requiring that format strings 8810 // are string literals: (1) permits the checking of format strings by 8811 // the compiler and thereby (2) can practically remove the source of 8812 // many format string exploits. 8813 8814 // Format string can be either ObjC string (e.g. @"%d") or 8815 // C string (e.g. "%d") 8816 // ObjC string uses the same format specifiers as C string, so we can use 8817 // the same format string checking logic for both ObjC and C strings. 8818 UncoveredArgHandler UncoveredArg; 8819 StringLiteralCheckType CT = 8820 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 8821 format_idx, firstDataArg, Type, CallType, 8822 /*IsFunctionCall*/ true, CheckedVarArgs, 8823 UncoveredArg, 8824 /*no string offset*/ llvm::APSInt(64, false) = 0); 8825 8826 // Generate a diagnostic where an uncovered argument is detected. 8827 if (UncoveredArg.hasUncoveredArg()) { 8828 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 8829 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 8830 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 8831 } 8832 8833 if (CT != SLCT_NotALiteral) 8834 // Literal format string found, check done! 8835 return CT == SLCT_CheckedLiteral; 8836 8837 // Strftime is particular as it always uses a single 'time' argument, 8838 // so it is safe to pass a non-literal string. 8839 if (Type == FST_Strftime) 8840 return false; 8841 8842 // Do not emit diag when the string param is a macro expansion and the 8843 // format is either NSString or CFString. This is a hack to prevent 8844 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 8845 // which are usually used in place of NS and CF string literals. 8846 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 8847 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 8848 return false; 8849 8850 // If there are no arguments specified, warn with -Wformat-security, otherwise 8851 // warn only with -Wformat-nonliteral. 8852 if (Args.size() == firstDataArg) { 8853 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 8854 << OrigFormatExpr->getSourceRange(); 8855 switch (Type) { 8856 default: 8857 break; 8858 case FST_Kprintf: 8859 case FST_FreeBSDKPrintf: 8860 case FST_Printf: 8861 Diag(FormatLoc, diag::note_format_security_fixit) 8862 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 8863 break; 8864 case FST_NSString: 8865 Diag(FormatLoc, diag::note_format_security_fixit) 8866 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 8867 break; 8868 } 8869 } else { 8870 Diag(FormatLoc, diag::warn_format_nonliteral) 8871 << OrigFormatExpr->getSourceRange(); 8872 } 8873 return false; 8874 } 8875 8876 namespace { 8877 8878 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 8879 protected: 8880 Sema &S; 8881 const FormatStringLiteral *FExpr; 8882 const Expr *OrigFormatExpr; 8883 const Sema::FormatStringType FSType; 8884 const unsigned FirstDataArg; 8885 const unsigned NumDataArgs; 8886 const char *Beg; // Start of format string. 8887 const bool HasVAListArg; 8888 ArrayRef<const Expr *> Args; 8889 unsigned FormatIdx; 8890 llvm::SmallBitVector CoveredArgs; 8891 bool usesPositionalArgs = false; 8892 bool atFirstArg = true; 8893 bool inFunctionCall; 8894 Sema::VariadicCallType CallType; 8895 llvm::SmallBitVector &CheckedVarArgs; 8896 UncoveredArgHandler &UncoveredArg; 8897 8898 public: 8899 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 8900 const Expr *origFormatExpr, 8901 const Sema::FormatStringType type, unsigned firstDataArg, 8902 unsigned numDataArgs, const char *beg, bool hasVAListArg, 8903 ArrayRef<const Expr *> Args, unsigned formatIdx, 8904 bool inFunctionCall, Sema::VariadicCallType callType, 8905 llvm::SmallBitVector &CheckedVarArgs, 8906 UncoveredArgHandler &UncoveredArg) 8907 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 8908 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 8909 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 8910 inFunctionCall(inFunctionCall), CallType(callType), 8911 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 8912 CoveredArgs.resize(numDataArgs); 8913 CoveredArgs.reset(); 8914 } 8915 8916 void DoneProcessing(); 8917 8918 void HandleIncompleteSpecifier(const char *startSpecifier, 8919 unsigned specifierLen) override; 8920 8921 void HandleInvalidLengthModifier( 8922 const analyze_format_string::FormatSpecifier &FS, 8923 const analyze_format_string::ConversionSpecifier &CS, 8924 const char *startSpecifier, unsigned specifierLen, 8925 unsigned DiagID); 8926 8927 void HandleNonStandardLengthModifier( 8928 const analyze_format_string::FormatSpecifier &FS, 8929 const char *startSpecifier, unsigned specifierLen); 8930 8931 void HandleNonStandardConversionSpecifier( 8932 const analyze_format_string::ConversionSpecifier &CS, 8933 const char *startSpecifier, unsigned specifierLen); 8934 8935 void HandlePosition(const char *startPos, unsigned posLen) override; 8936 8937 void HandleInvalidPosition(const char *startSpecifier, 8938 unsigned specifierLen, 8939 analyze_format_string::PositionContext p) override; 8940 8941 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 8942 8943 void HandleNullChar(const char *nullCharacter) override; 8944 8945 template <typename Range> 8946 static void 8947 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 8948 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 8949 bool IsStringLocation, Range StringRange, 8950 ArrayRef<FixItHint> Fixit = None); 8951 8952 protected: 8953 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 8954 const char *startSpec, 8955 unsigned specifierLen, 8956 const char *csStart, unsigned csLen); 8957 8958 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 8959 const char *startSpec, 8960 unsigned specifierLen); 8961 8962 SourceRange getFormatStringRange(); 8963 CharSourceRange getSpecifierRange(const char *startSpecifier, 8964 unsigned specifierLen); 8965 SourceLocation getLocationOfByte(const char *x); 8966 8967 const Expr *getDataArg(unsigned i) const; 8968 8969 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 8970 const analyze_format_string::ConversionSpecifier &CS, 8971 const char *startSpecifier, unsigned specifierLen, 8972 unsigned argIndex); 8973 8974 template <typename Range> 8975 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 8976 bool IsStringLocation, Range StringRange, 8977 ArrayRef<FixItHint> Fixit = None); 8978 }; 8979 8980 } // namespace 8981 8982 SourceRange CheckFormatHandler::getFormatStringRange() { 8983 return OrigFormatExpr->getSourceRange(); 8984 } 8985 8986 CharSourceRange CheckFormatHandler:: 8987 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 8988 SourceLocation Start = getLocationOfByte(startSpecifier); 8989 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 8990 8991 // Advance the end SourceLocation by one due to half-open ranges. 8992 End = End.getLocWithOffset(1); 8993 8994 return CharSourceRange::getCharRange(Start, End); 8995 } 8996 8997 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 8998 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 8999 S.getLangOpts(), S.Context.getTargetInfo()); 9000 } 9001 9002 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 9003 unsigned specifierLen){ 9004 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 9005 getLocationOfByte(startSpecifier), 9006 /*IsStringLocation*/true, 9007 getSpecifierRange(startSpecifier, specifierLen)); 9008 } 9009 9010 void CheckFormatHandler::HandleInvalidLengthModifier( 9011 const analyze_format_string::FormatSpecifier &FS, 9012 const analyze_format_string::ConversionSpecifier &CS, 9013 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 9014 using namespace analyze_format_string; 9015 9016 const LengthModifier &LM = FS.getLengthModifier(); 9017 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 9018 9019 // See if we know how to fix this length modifier. 9020 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 9021 if (FixedLM) { 9022 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 9023 getLocationOfByte(LM.getStart()), 9024 /*IsStringLocation*/true, 9025 getSpecifierRange(startSpecifier, specifierLen)); 9026 9027 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 9028 << FixedLM->toString() 9029 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 9030 9031 } else { 9032 FixItHint Hint; 9033 if (DiagID == diag::warn_format_nonsensical_length) 9034 Hint = FixItHint::CreateRemoval(LMRange); 9035 9036 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 9037 getLocationOfByte(LM.getStart()), 9038 /*IsStringLocation*/true, 9039 getSpecifierRange(startSpecifier, specifierLen), 9040 Hint); 9041 } 9042 } 9043 9044 void CheckFormatHandler::HandleNonStandardLengthModifier( 9045 const analyze_format_string::FormatSpecifier &FS, 9046 const char *startSpecifier, unsigned specifierLen) { 9047 using namespace analyze_format_string; 9048 9049 const LengthModifier &LM = FS.getLengthModifier(); 9050 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 9051 9052 // See if we know how to fix this length modifier. 9053 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 9054 if (FixedLM) { 9055 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9056 << LM.toString() << 0, 9057 getLocationOfByte(LM.getStart()), 9058 /*IsStringLocation*/true, 9059 getSpecifierRange(startSpecifier, specifierLen)); 9060 9061 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 9062 << FixedLM->toString() 9063 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 9064 9065 } else { 9066 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9067 << LM.toString() << 0, 9068 getLocationOfByte(LM.getStart()), 9069 /*IsStringLocation*/true, 9070 getSpecifierRange(startSpecifier, specifierLen)); 9071 } 9072 } 9073 9074 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 9075 const analyze_format_string::ConversionSpecifier &CS, 9076 const char *startSpecifier, unsigned specifierLen) { 9077 using namespace analyze_format_string; 9078 9079 // See if we know how to fix this conversion specifier. 9080 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 9081 if (FixedCS) { 9082 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9083 << CS.toString() << /*conversion specifier*/1, 9084 getLocationOfByte(CS.getStart()), 9085 /*IsStringLocation*/true, 9086 getSpecifierRange(startSpecifier, specifierLen)); 9087 9088 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 9089 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 9090 << FixedCS->toString() 9091 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 9092 } else { 9093 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9094 << CS.toString() << /*conversion specifier*/1, 9095 getLocationOfByte(CS.getStart()), 9096 /*IsStringLocation*/true, 9097 getSpecifierRange(startSpecifier, specifierLen)); 9098 } 9099 } 9100 9101 void CheckFormatHandler::HandlePosition(const char *startPos, 9102 unsigned posLen) { 9103 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 9104 getLocationOfByte(startPos), 9105 /*IsStringLocation*/true, 9106 getSpecifierRange(startPos, posLen)); 9107 } 9108 9109 void 9110 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 9111 analyze_format_string::PositionContext p) { 9112 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 9113 << (unsigned) p, 9114 getLocationOfByte(startPos), /*IsStringLocation*/true, 9115 getSpecifierRange(startPos, posLen)); 9116 } 9117 9118 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 9119 unsigned posLen) { 9120 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 9121 getLocationOfByte(startPos), 9122 /*IsStringLocation*/true, 9123 getSpecifierRange(startPos, posLen)); 9124 } 9125 9126 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 9127 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 9128 // The presence of a null character is likely an error. 9129 EmitFormatDiagnostic( 9130 S.PDiag(diag::warn_printf_format_string_contains_null_char), 9131 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 9132 getFormatStringRange()); 9133 } 9134 } 9135 9136 // Note that this may return NULL if there was an error parsing or building 9137 // one of the argument expressions. 9138 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 9139 return Args[FirstDataArg + i]; 9140 } 9141 9142 void CheckFormatHandler::DoneProcessing() { 9143 // Does the number of data arguments exceed the number of 9144 // format conversions in the format string? 9145 if (!HasVAListArg) { 9146 // Find any arguments that weren't covered. 9147 CoveredArgs.flip(); 9148 signed notCoveredArg = CoveredArgs.find_first(); 9149 if (notCoveredArg >= 0) { 9150 assert((unsigned)notCoveredArg < NumDataArgs); 9151 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 9152 } else { 9153 UncoveredArg.setAllCovered(); 9154 } 9155 } 9156 } 9157 9158 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 9159 const Expr *ArgExpr) { 9160 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 9161 "Invalid state"); 9162 9163 if (!ArgExpr) 9164 return; 9165 9166 SourceLocation Loc = ArgExpr->getBeginLoc(); 9167 9168 if (S.getSourceManager().isInSystemMacro(Loc)) 9169 return; 9170 9171 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 9172 for (auto E : DiagnosticExprs) 9173 PDiag << E->getSourceRange(); 9174 9175 CheckFormatHandler::EmitFormatDiagnostic( 9176 S, IsFunctionCall, DiagnosticExprs[0], 9177 PDiag, Loc, /*IsStringLocation*/false, 9178 DiagnosticExprs[0]->getSourceRange()); 9179 } 9180 9181 bool 9182 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 9183 SourceLocation Loc, 9184 const char *startSpec, 9185 unsigned specifierLen, 9186 const char *csStart, 9187 unsigned csLen) { 9188 bool keepGoing = true; 9189 if (argIndex < NumDataArgs) { 9190 // Consider the argument coverered, even though the specifier doesn't 9191 // make sense. 9192 CoveredArgs.set(argIndex); 9193 } 9194 else { 9195 // If argIndex exceeds the number of data arguments we 9196 // don't issue a warning because that is just a cascade of warnings (and 9197 // they may have intended '%%' anyway). We don't want to continue processing 9198 // the format string after this point, however, as we will like just get 9199 // gibberish when trying to match arguments. 9200 keepGoing = false; 9201 } 9202 9203 StringRef Specifier(csStart, csLen); 9204 9205 // If the specifier in non-printable, it could be the first byte of a UTF-8 9206 // sequence. In that case, print the UTF-8 code point. If not, print the byte 9207 // hex value. 9208 std::string CodePointStr; 9209 if (!llvm::sys::locale::isPrint(*csStart)) { 9210 llvm::UTF32 CodePoint; 9211 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 9212 const llvm::UTF8 *E = 9213 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 9214 llvm::ConversionResult Result = 9215 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 9216 9217 if (Result != llvm::conversionOK) { 9218 unsigned char FirstChar = *csStart; 9219 CodePoint = (llvm::UTF32)FirstChar; 9220 } 9221 9222 llvm::raw_string_ostream OS(CodePointStr); 9223 if (CodePoint < 256) 9224 OS << "\\x" << llvm::format("%02x", CodePoint); 9225 else if (CodePoint <= 0xFFFF) 9226 OS << "\\u" << llvm::format("%04x", CodePoint); 9227 else 9228 OS << "\\U" << llvm::format("%08x", CodePoint); 9229 OS.flush(); 9230 Specifier = CodePointStr; 9231 } 9232 9233 EmitFormatDiagnostic( 9234 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 9235 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 9236 9237 return keepGoing; 9238 } 9239 9240 void 9241 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 9242 const char *startSpec, 9243 unsigned specifierLen) { 9244 EmitFormatDiagnostic( 9245 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 9246 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 9247 } 9248 9249 bool 9250 CheckFormatHandler::CheckNumArgs( 9251 const analyze_format_string::FormatSpecifier &FS, 9252 const analyze_format_string::ConversionSpecifier &CS, 9253 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 9254 9255 if (argIndex >= NumDataArgs) { 9256 PartialDiagnostic PDiag = FS.usesPositionalArg() 9257 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 9258 << (argIndex+1) << NumDataArgs) 9259 : S.PDiag(diag::warn_printf_insufficient_data_args); 9260 EmitFormatDiagnostic( 9261 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 9262 getSpecifierRange(startSpecifier, specifierLen)); 9263 9264 // Since more arguments than conversion tokens are given, by extension 9265 // all arguments are covered, so mark this as so. 9266 UncoveredArg.setAllCovered(); 9267 return false; 9268 } 9269 return true; 9270 } 9271 9272 template<typename Range> 9273 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 9274 SourceLocation Loc, 9275 bool IsStringLocation, 9276 Range StringRange, 9277 ArrayRef<FixItHint> FixIt) { 9278 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 9279 Loc, IsStringLocation, StringRange, FixIt); 9280 } 9281 9282 /// If the format string is not within the function call, emit a note 9283 /// so that the function call and string are in diagnostic messages. 9284 /// 9285 /// \param InFunctionCall if true, the format string is within the function 9286 /// call and only one diagnostic message will be produced. Otherwise, an 9287 /// extra note will be emitted pointing to location of the format string. 9288 /// 9289 /// \param ArgumentExpr the expression that is passed as the format string 9290 /// argument in the function call. Used for getting locations when two 9291 /// diagnostics are emitted. 9292 /// 9293 /// \param PDiag the callee should already have provided any strings for the 9294 /// diagnostic message. This function only adds locations and fixits 9295 /// to diagnostics. 9296 /// 9297 /// \param Loc primary location for diagnostic. If two diagnostics are 9298 /// required, one will be at Loc and a new SourceLocation will be created for 9299 /// the other one. 9300 /// 9301 /// \param IsStringLocation if true, Loc points to the format string should be 9302 /// used for the note. Otherwise, Loc points to the argument list and will 9303 /// be used with PDiag. 9304 /// 9305 /// \param StringRange some or all of the string to highlight. This is 9306 /// templated so it can accept either a CharSourceRange or a SourceRange. 9307 /// 9308 /// \param FixIt optional fix it hint for the format string. 9309 template <typename Range> 9310 void CheckFormatHandler::EmitFormatDiagnostic( 9311 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 9312 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 9313 Range StringRange, ArrayRef<FixItHint> FixIt) { 9314 if (InFunctionCall) { 9315 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 9316 D << StringRange; 9317 D << FixIt; 9318 } else { 9319 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 9320 << ArgumentExpr->getSourceRange(); 9321 9322 const Sema::SemaDiagnosticBuilder &Note = 9323 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 9324 diag::note_format_string_defined); 9325 9326 Note << StringRange; 9327 Note << FixIt; 9328 } 9329 } 9330 9331 //===--- CHECK: Printf format string checking ------------------------------===// 9332 9333 namespace { 9334 9335 class CheckPrintfHandler : public CheckFormatHandler { 9336 public: 9337 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 9338 const Expr *origFormatExpr, 9339 const Sema::FormatStringType type, unsigned firstDataArg, 9340 unsigned numDataArgs, bool isObjC, const char *beg, 9341 bool hasVAListArg, ArrayRef<const Expr *> Args, 9342 unsigned formatIdx, bool inFunctionCall, 9343 Sema::VariadicCallType CallType, 9344 llvm::SmallBitVector &CheckedVarArgs, 9345 UncoveredArgHandler &UncoveredArg) 9346 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 9347 numDataArgs, beg, hasVAListArg, Args, formatIdx, 9348 inFunctionCall, CallType, CheckedVarArgs, 9349 UncoveredArg) {} 9350 9351 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 9352 9353 /// Returns true if '%@' specifiers are allowed in the format string. 9354 bool allowsObjCArg() const { 9355 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 9356 FSType == Sema::FST_OSTrace; 9357 } 9358 9359 bool HandleInvalidPrintfConversionSpecifier( 9360 const analyze_printf::PrintfSpecifier &FS, 9361 const char *startSpecifier, 9362 unsigned specifierLen) override; 9363 9364 void handleInvalidMaskType(StringRef MaskType) override; 9365 9366 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 9367 const char *startSpecifier, unsigned specifierLen, 9368 const TargetInfo &Target) override; 9369 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9370 const char *StartSpecifier, 9371 unsigned SpecifierLen, 9372 const Expr *E); 9373 9374 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 9375 const char *startSpecifier, unsigned specifierLen); 9376 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 9377 const analyze_printf::OptionalAmount &Amt, 9378 unsigned type, 9379 const char *startSpecifier, unsigned specifierLen); 9380 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9381 const analyze_printf::OptionalFlag &flag, 9382 const char *startSpecifier, unsigned specifierLen); 9383 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 9384 const analyze_printf::OptionalFlag &ignoredFlag, 9385 const analyze_printf::OptionalFlag &flag, 9386 const char *startSpecifier, unsigned specifierLen); 9387 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 9388 const Expr *E); 9389 9390 void HandleEmptyObjCModifierFlag(const char *startFlag, 9391 unsigned flagLen) override; 9392 9393 void HandleInvalidObjCModifierFlag(const char *startFlag, 9394 unsigned flagLen) override; 9395 9396 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 9397 const char *flagsEnd, 9398 const char *conversionPosition) 9399 override; 9400 }; 9401 9402 } // namespace 9403 9404 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 9405 const analyze_printf::PrintfSpecifier &FS, 9406 const char *startSpecifier, 9407 unsigned specifierLen) { 9408 const analyze_printf::PrintfConversionSpecifier &CS = 9409 FS.getConversionSpecifier(); 9410 9411 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 9412 getLocationOfByte(CS.getStart()), 9413 startSpecifier, specifierLen, 9414 CS.getStart(), CS.getLength()); 9415 } 9416 9417 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 9418 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 9419 } 9420 9421 bool CheckPrintfHandler::HandleAmount( 9422 const analyze_format_string::OptionalAmount &Amt, 9423 unsigned k, const char *startSpecifier, 9424 unsigned specifierLen) { 9425 if (Amt.hasDataArgument()) { 9426 if (!HasVAListArg) { 9427 unsigned argIndex = Amt.getArgIndex(); 9428 if (argIndex >= NumDataArgs) { 9429 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 9430 << k, 9431 getLocationOfByte(Amt.getStart()), 9432 /*IsStringLocation*/true, 9433 getSpecifierRange(startSpecifier, specifierLen)); 9434 // Don't do any more checking. We will just emit 9435 // spurious errors. 9436 return false; 9437 } 9438 9439 // Type check the data argument. It should be an 'int'. 9440 // Although not in conformance with C99, we also allow the argument to be 9441 // an 'unsigned int' as that is a reasonably safe case. GCC also 9442 // doesn't emit a warning for that case. 9443 CoveredArgs.set(argIndex); 9444 const Expr *Arg = getDataArg(argIndex); 9445 if (!Arg) 9446 return false; 9447 9448 QualType T = Arg->getType(); 9449 9450 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 9451 assert(AT.isValid()); 9452 9453 if (!AT.matchesType(S.Context, T)) { 9454 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 9455 << k << AT.getRepresentativeTypeName(S.Context) 9456 << T << Arg->getSourceRange(), 9457 getLocationOfByte(Amt.getStart()), 9458 /*IsStringLocation*/true, 9459 getSpecifierRange(startSpecifier, specifierLen)); 9460 // Don't do any more checking. We will just emit 9461 // spurious errors. 9462 return false; 9463 } 9464 } 9465 } 9466 return true; 9467 } 9468 9469 void CheckPrintfHandler::HandleInvalidAmount( 9470 const analyze_printf::PrintfSpecifier &FS, 9471 const analyze_printf::OptionalAmount &Amt, 9472 unsigned type, 9473 const char *startSpecifier, 9474 unsigned specifierLen) { 9475 const analyze_printf::PrintfConversionSpecifier &CS = 9476 FS.getConversionSpecifier(); 9477 9478 FixItHint fixit = 9479 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 9480 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 9481 Amt.getConstantLength())) 9482 : FixItHint(); 9483 9484 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 9485 << type << CS.toString(), 9486 getLocationOfByte(Amt.getStart()), 9487 /*IsStringLocation*/true, 9488 getSpecifierRange(startSpecifier, specifierLen), 9489 fixit); 9490 } 9491 9492 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9493 const analyze_printf::OptionalFlag &flag, 9494 const char *startSpecifier, 9495 unsigned specifierLen) { 9496 // Warn about pointless flag with a fixit removal. 9497 const analyze_printf::PrintfConversionSpecifier &CS = 9498 FS.getConversionSpecifier(); 9499 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 9500 << flag.toString() << CS.toString(), 9501 getLocationOfByte(flag.getPosition()), 9502 /*IsStringLocation*/true, 9503 getSpecifierRange(startSpecifier, specifierLen), 9504 FixItHint::CreateRemoval( 9505 getSpecifierRange(flag.getPosition(), 1))); 9506 } 9507 9508 void CheckPrintfHandler::HandleIgnoredFlag( 9509 const analyze_printf::PrintfSpecifier &FS, 9510 const analyze_printf::OptionalFlag &ignoredFlag, 9511 const analyze_printf::OptionalFlag &flag, 9512 const char *startSpecifier, 9513 unsigned specifierLen) { 9514 // Warn about ignored flag with a fixit removal. 9515 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 9516 << ignoredFlag.toString() << flag.toString(), 9517 getLocationOfByte(ignoredFlag.getPosition()), 9518 /*IsStringLocation*/true, 9519 getSpecifierRange(startSpecifier, specifierLen), 9520 FixItHint::CreateRemoval( 9521 getSpecifierRange(ignoredFlag.getPosition(), 1))); 9522 } 9523 9524 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 9525 unsigned flagLen) { 9526 // Warn about an empty flag. 9527 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 9528 getLocationOfByte(startFlag), 9529 /*IsStringLocation*/true, 9530 getSpecifierRange(startFlag, flagLen)); 9531 } 9532 9533 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 9534 unsigned flagLen) { 9535 // Warn about an invalid flag. 9536 auto Range = getSpecifierRange(startFlag, flagLen); 9537 StringRef flag(startFlag, flagLen); 9538 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 9539 getLocationOfByte(startFlag), 9540 /*IsStringLocation*/true, 9541 Range, FixItHint::CreateRemoval(Range)); 9542 } 9543 9544 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 9545 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 9546 // Warn about using '[...]' without a '@' conversion. 9547 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 9548 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 9549 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 9550 getLocationOfByte(conversionPosition), 9551 /*IsStringLocation*/true, 9552 Range, FixItHint::CreateRemoval(Range)); 9553 } 9554 9555 // Determines if the specified is a C++ class or struct containing 9556 // a member with the specified name and kind (e.g. a CXXMethodDecl named 9557 // "c_str()"). 9558 template<typename MemberKind> 9559 static llvm::SmallPtrSet<MemberKind*, 1> 9560 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 9561 const RecordType *RT = Ty->getAs<RecordType>(); 9562 llvm::SmallPtrSet<MemberKind*, 1> Results; 9563 9564 if (!RT) 9565 return Results; 9566 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 9567 if (!RD || !RD->getDefinition()) 9568 return Results; 9569 9570 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 9571 Sema::LookupMemberName); 9572 R.suppressDiagnostics(); 9573 9574 // We just need to include all members of the right kind turned up by the 9575 // filter, at this point. 9576 if (S.LookupQualifiedName(R, RT->getDecl())) 9577 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 9578 NamedDecl *decl = (*I)->getUnderlyingDecl(); 9579 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 9580 Results.insert(FK); 9581 } 9582 return Results; 9583 } 9584 9585 /// Check if we could call '.c_str()' on an object. 9586 /// 9587 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 9588 /// allow the call, or if it would be ambiguous). 9589 bool Sema::hasCStrMethod(const Expr *E) { 9590 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9591 9592 MethodSet Results = 9593 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 9594 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9595 MI != ME; ++MI) 9596 if ((*MI)->getMinRequiredArguments() == 0) 9597 return true; 9598 return false; 9599 } 9600 9601 // Check if a (w)string was passed when a (w)char* was needed, and offer a 9602 // better diagnostic if so. AT is assumed to be valid. 9603 // Returns true when a c_str() conversion method is found. 9604 bool CheckPrintfHandler::checkForCStrMembers( 9605 const analyze_printf::ArgType &AT, const Expr *E) { 9606 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9607 9608 MethodSet Results = 9609 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 9610 9611 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9612 MI != ME; ++MI) { 9613 const CXXMethodDecl *Method = *MI; 9614 if (Method->getMinRequiredArguments() == 0 && 9615 AT.matchesType(S.Context, Method->getReturnType())) { 9616 // FIXME: Suggest parens if the expression needs them. 9617 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 9618 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 9619 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 9620 return true; 9621 } 9622 } 9623 9624 return false; 9625 } 9626 9627 bool CheckPrintfHandler::HandlePrintfSpecifier( 9628 const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, 9629 unsigned specifierLen, const TargetInfo &Target) { 9630 using namespace analyze_format_string; 9631 using namespace analyze_printf; 9632 9633 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 9634 9635 if (FS.consumesDataArgument()) { 9636 if (atFirstArg) { 9637 atFirstArg = false; 9638 usesPositionalArgs = FS.usesPositionalArg(); 9639 } 9640 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9641 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9642 startSpecifier, specifierLen); 9643 return false; 9644 } 9645 } 9646 9647 // First check if the field width, precision, and conversion specifier 9648 // have matching data arguments. 9649 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 9650 startSpecifier, specifierLen)) { 9651 return false; 9652 } 9653 9654 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 9655 startSpecifier, specifierLen)) { 9656 return false; 9657 } 9658 9659 if (!CS.consumesDataArgument()) { 9660 // FIXME: Technically specifying a precision or field width here 9661 // makes no sense. Worth issuing a warning at some point. 9662 return true; 9663 } 9664 9665 // Consume the argument. 9666 unsigned argIndex = FS.getArgIndex(); 9667 if (argIndex < NumDataArgs) { 9668 // The check to see if the argIndex is valid will come later. 9669 // We set the bit here because we may exit early from this 9670 // function if we encounter some other error. 9671 CoveredArgs.set(argIndex); 9672 } 9673 9674 // FreeBSD kernel extensions. 9675 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 9676 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 9677 // We need at least two arguments. 9678 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 9679 return false; 9680 9681 // Claim the second argument. 9682 CoveredArgs.set(argIndex + 1); 9683 9684 // Type check the first argument (int for %b, pointer for %D) 9685 const Expr *Ex = getDataArg(argIndex); 9686 const analyze_printf::ArgType &AT = 9687 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 9688 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 9689 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 9690 EmitFormatDiagnostic( 9691 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9692 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 9693 << false << Ex->getSourceRange(), 9694 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9695 getSpecifierRange(startSpecifier, specifierLen)); 9696 9697 // Type check the second argument (char * for both %b and %D) 9698 Ex = getDataArg(argIndex + 1); 9699 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 9700 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 9701 EmitFormatDiagnostic( 9702 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9703 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 9704 << false << Ex->getSourceRange(), 9705 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9706 getSpecifierRange(startSpecifier, specifierLen)); 9707 9708 return true; 9709 } 9710 9711 // Check for using an Objective-C specific conversion specifier 9712 // in a non-ObjC literal. 9713 if (!allowsObjCArg() && CS.isObjCArg()) { 9714 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9715 specifierLen); 9716 } 9717 9718 // %P can only be used with os_log. 9719 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 9720 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9721 specifierLen); 9722 } 9723 9724 // %n is not allowed with os_log. 9725 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 9726 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 9727 getLocationOfByte(CS.getStart()), 9728 /*IsStringLocation*/ false, 9729 getSpecifierRange(startSpecifier, specifierLen)); 9730 9731 return true; 9732 } 9733 9734 // Only scalars are allowed for os_trace. 9735 if (FSType == Sema::FST_OSTrace && 9736 (CS.getKind() == ConversionSpecifier::PArg || 9737 CS.getKind() == ConversionSpecifier::sArg || 9738 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 9739 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9740 specifierLen); 9741 } 9742 9743 // Check for use of public/private annotation outside of os_log(). 9744 if (FSType != Sema::FST_OSLog) { 9745 if (FS.isPublic().isSet()) { 9746 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9747 << "public", 9748 getLocationOfByte(FS.isPublic().getPosition()), 9749 /*IsStringLocation*/ false, 9750 getSpecifierRange(startSpecifier, specifierLen)); 9751 } 9752 if (FS.isPrivate().isSet()) { 9753 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9754 << "private", 9755 getLocationOfByte(FS.isPrivate().getPosition()), 9756 /*IsStringLocation*/ false, 9757 getSpecifierRange(startSpecifier, specifierLen)); 9758 } 9759 } 9760 9761 const llvm::Triple &Triple = Target.getTriple(); 9762 if (CS.getKind() == ConversionSpecifier::nArg && 9763 (Triple.isAndroid() || Triple.isOSFuchsia())) { 9764 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported), 9765 getLocationOfByte(CS.getStart()), 9766 /*IsStringLocation*/ false, 9767 getSpecifierRange(startSpecifier, specifierLen)); 9768 } 9769 9770 // Check for invalid use of field width 9771 if (!FS.hasValidFieldWidth()) { 9772 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 9773 startSpecifier, specifierLen); 9774 } 9775 9776 // Check for invalid use of precision 9777 if (!FS.hasValidPrecision()) { 9778 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 9779 startSpecifier, specifierLen); 9780 } 9781 9782 // Precision is mandatory for %P specifier. 9783 if (CS.getKind() == ConversionSpecifier::PArg && 9784 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 9785 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 9786 getLocationOfByte(startSpecifier), 9787 /*IsStringLocation*/ false, 9788 getSpecifierRange(startSpecifier, specifierLen)); 9789 } 9790 9791 // Check each flag does not conflict with any other component. 9792 if (!FS.hasValidThousandsGroupingPrefix()) 9793 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 9794 if (!FS.hasValidLeadingZeros()) 9795 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 9796 if (!FS.hasValidPlusPrefix()) 9797 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 9798 if (!FS.hasValidSpacePrefix()) 9799 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 9800 if (!FS.hasValidAlternativeForm()) 9801 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 9802 if (!FS.hasValidLeftJustified()) 9803 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 9804 9805 // Check that flags are not ignored by another flag 9806 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 9807 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 9808 startSpecifier, specifierLen); 9809 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 9810 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 9811 startSpecifier, specifierLen); 9812 9813 // Check the length modifier is valid with the given conversion specifier. 9814 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9815 S.getLangOpts())) 9816 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9817 diag::warn_format_nonsensical_length); 9818 else if (!FS.hasStandardLengthModifier()) 9819 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9820 else if (!FS.hasStandardLengthConversionCombination()) 9821 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9822 diag::warn_format_non_standard_conversion_spec); 9823 9824 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9825 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9826 9827 // The remaining checks depend on the data arguments. 9828 if (HasVAListArg) 9829 return true; 9830 9831 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9832 return false; 9833 9834 const Expr *Arg = getDataArg(argIndex); 9835 if (!Arg) 9836 return true; 9837 9838 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 9839 } 9840 9841 static bool requiresParensToAddCast(const Expr *E) { 9842 // FIXME: We should have a general way to reason about operator 9843 // precedence and whether parens are actually needed here. 9844 // Take care of a few common cases where they aren't. 9845 const Expr *Inside = E->IgnoreImpCasts(); 9846 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 9847 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 9848 9849 switch (Inside->getStmtClass()) { 9850 case Stmt::ArraySubscriptExprClass: 9851 case Stmt::CallExprClass: 9852 case Stmt::CharacterLiteralClass: 9853 case Stmt::CXXBoolLiteralExprClass: 9854 case Stmt::DeclRefExprClass: 9855 case Stmt::FloatingLiteralClass: 9856 case Stmt::IntegerLiteralClass: 9857 case Stmt::MemberExprClass: 9858 case Stmt::ObjCArrayLiteralClass: 9859 case Stmt::ObjCBoolLiteralExprClass: 9860 case Stmt::ObjCBoxedExprClass: 9861 case Stmt::ObjCDictionaryLiteralClass: 9862 case Stmt::ObjCEncodeExprClass: 9863 case Stmt::ObjCIvarRefExprClass: 9864 case Stmt::ObjCMessageExprClass: 9865 case Stmt::ObjCPropertyRefExprClass: 9866 case Stmt::ObjCStringLiteralClass: 9867 case Stmt::ObjCSubscriptRefExprClass: 9868 case Stmt::ParenExprClass: 9869 case Stmt::StringLiteralClass: 9870 case Stmt::UnaryOperatorClass: 9871 return false; 9872 default: 9873 return true; 9874 } 9875 } 9876 9877 static std::pair<QualType, StringRef> 9878 shouldNotPrintDirectly(const ASTContext &Context, 9879 QualType IntendedTy, 9880 const Expr *E) { 9881 // Use a 'while' to peel off layers of typedefs. 9882 QualType TyTy = IntendedTy; 9883 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 9884 StringRef Name = UserTy->getDecl()->getName(); 9885 QualType CastTy = llvm::StringSwitch<QualType>(Name) 9886 .Case("CFIndex", Context.getNSIntegerType()) 9887 .Case("NSInteger", Context.getNSIntegerType()) 9888 .Case("NSUInteger", Context.getNSUIntegerType()) 9889 .Case("SInt32", Context.IntTy) 9890 .Case("UInt32", Context.UnsignedIntTy) 9891 .Default(QualType()); 9892 9893 if (!CastTy.isNull()) 9894 return std::make_pair(CastTy, Name); 9895 9896 TyTy = UserTy->desugar(); 9897 } 9898 9899 // Strip parens if necessary. 9900 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 9901 return shouldNotPrintDirectly(Context, 9902 PE->getSubExpr()->getType(), 9903 PE->getSubExpr()); 9904 9905 // If this is a conditional expression, then its result type is constructed 9906 // via usual arithmetic conversions and thus there might be no necessary 9907 // typedef sugar there. Recurse to operands to check for NSInteger & 9908 // Co. usage condition. 9909 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 9910 QualType TrueTy, FalseTy; 9911 StringRef TrueName, FalseName; 9912 9913 std::tie(TrueTy, TrueName) = 9914 shouldNotPrintDirectly(Context, 9915 CO->getTrueExpr()->getType(), 9916 CO->getTrueExpr()); 9917 std::tie(FalseTy, FalseName) = 9918 shouldNotPrintDirectly(Context, 9919 CO->getFalseExpr()->getType(), 9920 CO->getFalseExpr()); 9921 9922 if (TrueTy == FalseTy) 9923 return std::make_pair(TrueTy, TrueName); 9924 else if (TrueTy.isNull()) 9925 return std::make_pair(FalseTy, FalseName); 9926 else if (FalseTy.isNull()) 9927 return std::make_pair(TrueTy, TrueName); 9928 } 9929 9930 return std::make_pair(QualType(), StringRef()); 9931 } 9932 9933 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 9934 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 9935 /// type do not count. 9936 static bool 9937 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 9938 QualType From = ICE->getSubExpr()->getType(); 9939 QualType To = ICE->getType(); 9940 // It's an integer promotion if the destination type is the promoted 9941 // source type. 9942 if (ICE->getCastKind() == CK_IntegralCast && 9943 From->isPromotableIntegerType() && 9944 S.Context.getPromotedIntegerType(From) == To) 9945 return true; 9946 // Look through vector types, since we do default argument promotion for 9947 // those in OpenCL. 9948 if (const auto *VecTy = From->getAs<ExtVectorType>()) 9949 From = VecTy->getElementType(); 9950 if (const auto *VecTy = To->getAs<ExtVectorType>()) 9951 To = VecTy->getElementType(); 9952 // It's a floating promotion if the source type is a lower rank. 9953 return ICE->getCastKind() == CK_FloatingCast && 9954 S.Context.getFloatingTypeOrder(From, To) < 0; 9955 } 9956 9957 bool 9958 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9959 const char *StartSpecifier, 9960 unsigned SpecifierLen, 9961 const Expr *E) { 9962 using namespace analyze_format_string; 9963 using namespace analyze_printf; 9964 9965 // Now type check the data expression that matches the 9966 // format specifier. 9967 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 9968 if (!AT.isValid()) 9969 return true; 9970 9971 QualType ExprTy = E->getType(); 9972 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 9973 ExprTy = TET->getUnderlyingExpr()->getType(); 9974 } 9975 9976 // Diagnose attempts to print a boolean value as a character. Unlike other 9977 // -Wformat diagnostics, this is fine from a type perspective, but it still 9978 // doesn't make sense. 9979 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 9980 E->isKnownToHaveBooleanValue()) { 9981 const CharSourceRange &CSR = 9982 getSpecifierRange(StartSpecifier, SpecifierLen); 9983 SmallString<4> FSString; 9984 llvm::raw_svector_ostream os(FSString); 9985 FS.toString(os); 9986 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 9987 << FSString, 9988 E->getExprLoc(), false, CSR); 9989 return true; 9990 } 9991 9992 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 9993 if (Match == analyze_printf::ArgType::Match) 9994 return true; 9995 9996 // Look through argument promotions for our error message's reported type. 9997 // This includes the integral and floating promotions, but excludes array 9998 // and function pointer decay (seeing that an argument intended to be a 9999 // string has type 'char [6]' is probably more confusing than 'char *') and 10000 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 10001 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 10002 if (isArithmeticArgumentPromotion(S, ICE)) { 10003 E = ICE->getSubExpr(); 10004 ExprTy = E->getType(); 10005 10006 // Check if we didn't match because of an implicit cast from a 'char' 10007 // or 'short' to an 'int'. This is done because printf is a varargs 10008 // function. 10009 if (ICE->getType() == S.Context.IntTy || 10010 ICE->getType() == S.Context.UnsignedIntTy) { 10011 // All further checking is done on the subexpression 10012 const analyze_printf::ArgType::MatchKind ImplicitMatch = 10013 AT.matchesType(S.Context, ExprTy); 10014 if (ImplicitMatch == analyze_printf::ArgType::Match) 10015 return true; 10016 if (ImplicitMatch == ArgType::NoMatchPedantic || 10017 ImplicitMatch == ArgType::NoMatchTypeConfusion) 10018 Match = ImplicitMatch; 10019 } 10020 } 10021 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 10022 // Special case for 'a', which has type 'int' in C. 10023 // Note, however, that we do /not/ want to treat multibyte constants like 10024 // 'MooV' as characters! This form is deprecated but still exists. In 10025 // addition, don't treat expressions as of type 'char' if one byte length 10026 // modifier is provided. 10027 if (ExprTy == S.Context.IntTy && 10028 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 10029 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 10030 ExprTy = S.Context.CharTy; 10031 } 10032 10033 // Look through enums to their underlying type. 10034 bool IsEnum = false; 10035 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 10036 ExprTy = EnumTy->getDecl()->getIntegerType(); 10037 IsEnum = true; 10038 } 10039 10040 // %C in an Objective-C context prints a unichar, not a wchar_t. 10041 // If the argument is an integer of some kind, believe the %C and suggest 10042 // a cast instead of changing the conversion specifier. 10043 QualType IntendedTy = ExprTy; 10044 if (isObjCContext() && 10045 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 10046 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 10047 !ExprTy->isCharType()) { 10048 // 'unichar' is defined as a typedef of unsigned short, but we should 10049 // prefer using the typedef if it is visible. 10050 IntendedTy = S.Context.UnsignedShortTy; 10051 10052 // While we are here, check if the value is an IntegerLiteral that happens 10053 // to be within the valid range. 10054 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 10055 const llvm::APInt &V = IL->getValue(); 10056 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 10057 return true; 10058 } 10059 10060 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 10061 Sema::LookupOrdinaryName); 10062 if (S.LookupName(Result, S.getCurScope())) { 10063 NamedDecl *ND = Result.getFoundDecl(); 10064 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 10065 if (TD->getUnderlyingType() == IntendedTy) 10066 IntendedTy = S.Context.getTypedefType(TD); 10067 } 10068 } 10069 } 10070 10071 // Special-case some of Darwin's platform-independence types by suggesting 10072 // casts to primitive types that are known to be large enough. 10073 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 10074 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 10075 QualType CastTy; 10076 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 10077 if (!CastTy.isNull()) { 10078 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 10079 // (long in ASTContext). Only complain to pedants. 10080 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 10081 (AT.isSizeT() || AT.isPtrdiffT()) && 10082 AT.matchesType(S.Context, CastTy)) 10083 Match = ArgType::NoMatchPedantic; 10084 IntendedTy = CastTy; 10085 ShouldNotPrintDirectly = true; 10086 } 10087 } 10088 10089 // We may be able to offer a FixItHint if it is a supported type. 10090 PrintfSpecifier fixedFS = FS; 10091 bool Success = 10092 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 10093 10094 if (Success) { 10095 // Get the fix string from the fixed format specifier 10096 SmallString<16> buf; 10097 llvm::raw_svector_ostream os(buf); 10098 fixedFS.toString(os); 10099 10100 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 10101 10102 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 10103 unsigned Diag; 10104 switch (Match) { 10105 case ArgType::Match: llvm_unreachable("expected non-matching"); 10106 case ArgType::NoMatchPedantic: 10107 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 10108 break; 10109 case ArgType::NoMatchTypeConfusion: 10110 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 10111 break; 10112 case ArgType::NoMatch: 10113 Diag = diag::warn_format_conversion_argument_type_mismatch; 10114 break; 10115 } 10116 10117 // In this case, the specifier is wrong and should be changed to match 10118 // the argument. 10119 EmitFormatDiagnostic(S.PDiag(Diag) 10120 << AT.getRepresentativeTypeName(S.Context) 10121 << IntendedTy << IsEnum << E->getSourceRange(), 10122 E->getBeginLoc(), 10123 /*IsStringLocation*/ false, SpecRange, 10124 FixItHint::CreateReplacement(SpecRange, os.str())); 10125 } else { 10126 // The canonical type for formatting this value is different from the 10127 // actual type of the expression. (This occurs, for example, with Darwin's 10128 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 10129 // should be printed as 'long' for 64-bit compatibility.) 10130 // Rather than emitting a normal format/argument mismatch, we want to 10131 // add a cast to the recommended type (and correct the format string 10132 // if necessary). 10133 SmallString<16> CastBuf; 10134 llvm::raw_svector_ostream CastFix(CastBuf); 10135 CastFix << "("; 10136 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 10137 CastFix << ")"; 10138 10139 SmallVector<FixItHint,4> Hints; 10140 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 10141 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 10142 10143 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 10144 // If there's already a cast present, just replace it. 10145 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 10146 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 10147 10148 } else if (!requiresParensToAddCast(E)) { 10149 // If the expression has high enough precedence, 10150 // just write the C-style cast. 10151 Hints.push_back( 10152 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 10153 } else { 10154 // Otherwise, add parens around the expression as well as the cast. 10155 CastFix << "("; 10156 Hints.push_back( 10157 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 10158 10159 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 10160 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 10161 } 10162 10163 if (ShouldNotPrintDirectly) { 10164 // The expression has a type that should not be printed directly. 10165 // We extract the name from the typedef because we don't want to show 10166 // the underlying type in the diagnostic. 10167 StringRef Name; 10168 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 10169 Name = TypedefTy->getDecl()->getName(); 10170 else 10171 Name = CastTyName; 10172 unsigned Diag = Match == ArgType::NoMatchPedantic 10173 ? diag::warn_format_argument_needs_cast_pedantic 10174 : diag::warn_format_argument_needs_cast; 10175 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 10176 << E->getSourceRange(), 10177 E->getBeginLoc(), /*IsStringLocation=*/false, 10178 SpecRange, Hints); 10179 } else { 10180 // In this case, the expression could be printed using a different 10181 // specifier, but we've decided that the specifier is probably correct 10182 // and we should cast instead. Just use the normal warning message. 10183 EmitFormatDiagnostic( 10184 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10185 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 10186 << E->getSourceRange(), 10187 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 10188 } 10189 } 10190 } else { 10191 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 10192 SpecifierLen); 10193 // Since the warning for passing non-POD types to variadic functions 10194 // was deferred until now, we emit a warning for non-POD 10195 // arguments here. 10196 switch (S.isValidVarArgType(ExprTy)) { 10197 case Sema::VAK_Valid: 10198 case Sema::VAK_ValidInCXX11: { 10199 unsigned Diag; 10200 switch (Match) { 10201 case ArgType::Match: llvm_unreachable("expected non-matching"); 10202 case ArgType::NoMatchPedantic: 10203 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 10204 break; 10205 case ArgType::NoMatchTypeConfusion: 10206 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 10207 break; 10208 case ArgType::NoMatch: 10209 Diag = diag::warn_format_conversion_argument_type_mismatch; 10210 break; 10211 } 10212 10213 EmitFormatDiagnostic( 10214 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 10215 << IsEnum << CSR << E->getSourceRange(), 10216 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10217 break; 10218 } 10219 case Sema::VAK_Undefined: 10220 case Sema::VAK_MSVCUndefined: 10221 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 10222 << S.getLangOpts().CPlusPlus11 << ExprTy 10223 << CallType 10224 << AT.getRepresentativeTypeName(S.Context) << CSR 10225 << E->getSourceRange(), 10226 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10227 checkForCStrMembers(AT, E); 10228 break; 10229 10230 case Sema::VAK_Invalid: 10231 if (ExprTy->isObjCObjectType()) 10232 EmitFormatDiagnostic( 10233 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 10234 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 10235 << AT.getRepresentativeTypeName(S.Context) << CSR 10236 << E->getSourceRange(), 10237 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10238 else 10239 // FIXME: If this is an initializer list, suggest removing the braces 10240 // or inserting a cast to the target type. 10241 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 10242 << isa<InitListExpr>(E) << ExprTy << CallType 10243 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 10244 break; 10245 } 10246 10247 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 10248 "format string specifier index out of range"); 10249 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 10250 } 10251 10252 return true; 10253 } 10254 10255 //===--- CHECK: Scanf format string checking ------------------------------===// 10256 10257 namespace { 10258 10259 class CheckScanfHandler : public CheckFormatHandler { 10260 public: 10261 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 10262 const Expr *origFormatExpr, Sema::FormatStringType type, 10263 unsigned firstDataArg, unsigned numDataArgs, 10264 const char *beg, bool hasVAListArg, 10265 ArrayRef<const Expr *> Args, unsigned formatIdx, 10266 bool inFunctionCall, Sema::VariadicCallType CallType, 10267 llvm::SmallBitVector &CheckedVarArgs, 10268 UncoveredArgHandler &UncoveredArg) 10269 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 10270 numDataArgs, beg, hasVAListArg, Args, formatIdx, 10271 inFunctionCall, CallType, CheckedVarArgs, 10272 UncoveredArg) {} 10273 10274 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 10275 const char *startSpecifier, 10276 unsigned specifierLen) override; 10277 10278 bool HandleInvalidScanfConversionSpecifier( 10279 const analyze_scanf::ScanfSpecifier &FS, 10280 const char *startSpecifier, 10281 unsigned specifierLen) override; 10282 10283 void HandleIncompleteScanList(const char *start, const char *end) override; 10284 }; 10285 10286 } // namespace 10287 10288 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 10289 const char *end) { 10290 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 10291 getLocationOfByte(end), /*IsStringLocation*/true, 10292 getSpecifierRange(start, end - start)); 10293 } 10294 10295 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 10296 const analyze_scanf::ScanfSpecifier &FS, 10297 const char *startSpecifier, 10298 unsigned specifierLen) { 10299 const analyze_scanf::ScanfConversionSpecifier &CS = 10300 FS.getConversionSpecifier(); 10301 10302 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 10303 getLocationOfByte(CS.getStart()), 10304 startSpecifier, specifierLen, 10305 CS.getStart(), CS.getLength()); 10306 } 10307 10308 bool CheckScanfHandler::HandleScanfSpecifier( 10309 const analyze_scanf::ScanfSpecifier &FS, 10310 const char *startSpecifier, 10311 unsigned specifierLen) { 10312 using namespace analyze_scanf; 10313 using namespace analyze_format_string; 10314 10315 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 10316 10317 // Handle case where '%' and '*' don't consume an argument. These shouldn't 10318 // be used to decide if we are using positional arguments consistently. 10319 if (FS.consumesDataArgument()) { 10320 if (atFirstArg) { 10321 atFirstArg = false; 10322 usesPositionalArgs = FS.usesPositionalArg(); 10323 } 10324 else if (usesPositionalArgs != FS.usesPositionalArg()) { 10325 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 10326 startSpecifier, specifierLen); 10327 return false; 10328 } 10329 } 10330 10331 // Check if the field with is non-zero. 10332 const OptionalAmount &Amt = FS.getFieldWidth(); 10333 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 10334 if (Amt.getConstantAmount() == 0) { 10335 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 10336 Amt.getConstantLength()); 10337 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 10338 getLocationOfByte(Amt.getStart()), 10339 /*IsStringLocation*/true, R, 10340 FixItHint::CreateRemoval(R)); 10341 } 10342 } 10343 10344 if (!FS.consumesDataArgument()) { 10345 // FIXME: Technically specifying a precision or field width here 10346 // makes no sense. Worth issuing a warning at some point. 10347 return true; 10348 } 10349 10350 // Consume the argument. 10351 unsigned argIndex = FS.getArgIndex(); 10352 if (argIndex < NumDataArgs) { 10353 // The check to see if the argIndex is valid will come later. 10354 // We set the bit here because we may exit early from this 10355 // function if we encounter some other error. 10356 CoveredArgs.set(argIndex); 10357 } 10358 10359 // Check the length modifier is valid with the given conversion specifier. 10360 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 10361 S.getLangOpts())) 10362 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10363 diag::warn_format_nonsensical_length); 10364 else if (!FS.hasStandardLengthModifier()) 10365 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 10366 else if (!FS.hasStandardLengthConversionCombination()) 10367 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10368 diag::warn_format_non_standard_conversion_spec); 10369 10370 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 10371 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 10372 10373 // The remaining checks depend on the data arguments. 10374 if (HasVAListArg) 10375 return true; 10376 10377 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 10378 return false; 10379 10380 // Check that the argument type matches the format specifier. 10381 const Expr *Ex = getDataArg(argIndex); 10382 if (!Ex) 10383 return true; 10384 10385 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 10386 10387 if (!AT.isValid()) { 10388 return true; 10389 } 10390 10391 analyze_format_string::ArgType::MatchKind Match = 10392 AT.matchesType(S.Context, Ex->getType()); 10393 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 10394 if (Match == analyze_format_string::ArgType::Match) 10395 return true; 10396 10397 ScanfSpecifier fixedFS = FS; 10398 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 10399 S.getLangOpts(), S.Context); 10400 10401 unsigned Diag = 10402 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 10403 : diag::warn_format_conversion_argument_type_mismatch; 10404 10405 if (Success) { 10406 // Get the fix string from the fixed format specifier. 10407 SmallString<128> buf; 10408 llvm::raw_svector_ostream os(buf); 10409 fixedFS.toString(os); 10410 10411 EmitFormatDiagnostic( 10412 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 10413 << Ex->getType() << false << Ex->getSourceRange(), 10414 Ex->getBeginLoc(), 10415 /*IsStringLocation*/ false, 10416 getSpecifierRange(startSpecifier, specifierLen), 10417 FixItHint::CreateReplacement( 10418 getSpecifierRange(startSpecifier, specifierLen), os.str())); 10419 } else { 10420 EmitFormatDiagnostic(S.PDiag(Diag) 10421 << AT.getRepresentativeTypeName(S.Context) 10422 << Ex->getType() << false << Ex->getSourceRange(), 10423 Ex->getBeginLoc(), 10424 /*IsStringLocation*/ false, 10425 getSpecifierRange(startSpecifier, specifierLen)); 10426 } 10427 10428 return true; 10429 } 10430 10431 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 10432 const Expr *OrigFormatExpr, 10433 ArrayRef<const Expr *> Args, 10434 bool HasVAListArg, unsigned format_idx, 10435 unsigned firstDataArg, 10436 Sema::FormatStringType Type, 10437 bool inFunctionCall, 10438 Sema::VariadicCallType CallType, 10439 llvm::SmallBitVector &CheckedVarArgs, 10440 UncoveredArgHandler &UncoveredArg, 10441 bool IgnoreStringsWithoutSpecifiers) { 10442 // CHECK: is the format string a wide literal? 10443 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 10444 CheckFormatHandler::EmitFormatDiagnostic( 10445 S, inFunctionCall, Args[format_idx], 10446 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 10447 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10448 return; 10449 } 10450 10451 // Str - The format string. NOTE: this is NOT null-terminated! 10452 StringRef StrRef = FExpr->getString(); 10453 const char *Str = StrRef.data(); 10454 // Account for cases where the string literal is truncated in a declaration. 10455 const ConstantArrayType *T = 10456 S.Context.getAsConstantArrayType(FExpr->getType()); 10457 assert(T && "String literal not of constant array type!"); 10458 size_t TypeSize = T->getSize().getZExtValue(); 10459 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10460 const unsigned numDataArgs = Args.size() - firstDataArg; 10461 10462 if (IgnoreStringsWithoutSpecifiers && 10463 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 10464 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 10465 return; 10466 10467 // Emit a warning if the string literal is truncated and does not contain an 10468 // embedded null character. 10469 if (TypeSize <= StrRef.size() && !StrRef.substr(0, TypeSize).contains('\0')) { 10470 CheckFormatHandler::EmitFormatDiagnostic( 10471 S, inFunctionCall, Args[format_idx], 10472 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 10473 FExpr->getBeginLoc(), 10474 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 10475 return; 10476 } 10477 10478 // CHECK: empty format string? 10479 if (StrLen == 0 && numDataArgs > 0) { 10480 CheckFormatHandler::EmitFormatDiagnostic( 10481 S, inFunctionCall, Args[format_idx], 10482 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 10483 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10484 return; 10485 } 10486 10487 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 10488 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 10489 Type == Sema::FST_OSTrace) { 10490 CheckPrintfHandler H( 10491 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 10492 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 10493 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 10494 CheckedVarArgs, UncoveredArg); 10495 10496 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 10497 S.getLangOpts(), 10498 S.Context.getTargetInfo(), 10499 Type == Sema::FST_FreeBSDKPrintf)) 10500 H.DoneProcessing(); 10501 } else if (Type == Sema::FST_Scanf) { 10502 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 10503 numDataArgs, Str, HasVAListArg, Args, format_idx, 10504 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 10505 10506 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 10507 S.getLangOpts(), 10508 S.Context.getTargetInfo())) 10509 H.DoneProcessing(); 10510 } // TODO: handle other formats 10511 } 10512 10513 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 10514 // Str - The format string. NOTE: this is NOT null-terminated! 10515 StringRef StrRef = FExpr->getString(); 10516 const char *Str = StrRef.data(); 10517 // Account for cases where the string literal is truncated in a declaration. 10518 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 10519 assert(T && "String literal not of constant array type!"); 10520 size_t TypeSize = T->getSize().getZExtValue(); 10521 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10522 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 10523 getLangOpts(), 10524 Context.getTargetInfo()); 10525 } 10526 10527 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 10528 10529 // Returns the related absolute value function that is larger, of 0 if one 10530 // does not exist. 10531 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 10532 switch (AbsFunction) { 10533 default: 10534 return 0; 10535 10536 case Builtin::BI__builtin_abs: 10537 return Builtin::BI__builtin_labs; 10538 case Builtin::BI__builtin_labs: 10539 return Builtin::BI__builtin_llabs; 10540 case Builtin::BI__builtin_llabs: 10541 return 0; 10542 10543 case Builtin::BI__builtin_fabsf: 10544 return Builtin::BI__builtin_fabs; 10545 case Builtin::BI__builtin_fabs: 10546 return Builtin::BI__builtin_fabsl; 10547 case Builtin::BI__builtin_fabsl: 10548 return 0; 10549 10550 case Builtin::BI__builtin_cabsf: 10551 return Builtin::BI__builtin_cabs; 10552 case Builtin::BI__builtin_cabs: 10553 return Builtin::BI__builtin_cabsl; 10554 case Builtin::BI__builtin_cabsl: 10555 return 0; 10556 10557 case Builtin::BIabs: 10558 return Builtin::BIlabs; 10559 case Builtin::BIlabs: 10560 return Builtin::BIllabs; 10561 case Builtin::BIllabs: 10562 return 0; 10563 10564 case Builtin::BIfabsf: 10565 return Builtin::BIfabs; 10566 case Builtin::BIfabs: 10567 return Builtin::BIfabsl; 10568 case Builtin::BIfabsl: 10569 return 0; 10570 10571 case Builtin::BIcabsf: 10572 return Builtin::BIcabs; 10573 case Builtin::BIcabs: 10574 return Builtin::BIcabsl; 10575 case Builtin::BIcabsl: 10576 return 0; 10577 } 10578 } 10579 10580 // Returns the argument type of the absolute value function. 10581 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 10582 unsigned AbsType) { 10583 if (AbsType == 0) 10584 return QualType(); 10585 10586 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 10587 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 10588 if (Error != ASTContext::GE_None) 10589 return QualType(); 10590 10591 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 10592 if (!FT) 10593 return QualType(); 10594 10595 if (FT->getNumParams() != 1) 10596 return QualType(); 10597 10598 return FT->getParamType(0); 10599 } 10600 10601 // Returns the best absolute value function, or zero, based on type and 10602 // current absolute value function. 10603 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 10604 unsigned AbsFunctionKind) { 10605 unsigned BestKind = 0; 10606 uint64_t ArgSize = Context.getTypeSize(ArgType); 10607 for (unsigned Kind = AbsFunctionKind; Kind != 0; 10608 Kind = getLargerAbsoluteValueFunction(Kind)) { 10609 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 10610 if (Context.getTypeSize(ParamType) >= ArgSize) { 10611 if (BestKind == 0) 10612 BestKind = Kind; 10613 else if (Context.hasSameType(ParamType, ArgType)) { 10614 BestKind = Kind; 10615 break; 10616 } 10617 } 10618 } 10619 return BestKind; 10620 } 10621 10622 enum AbsoluteValueKind { 10623 AVK_Integer, 10624 AVK_Floating, 10625 AVK_Complex 10626 }; 10627 10628 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 10629 if (T->isIntegralOrEnumerationType()) 10630 return AVK_Integer; 10631 if (T->isRealFloatingType()) 10632 return AVK_Floating; 10633 if (T->isAnyComplexType()) 10634 return AVK_Complex; 10635 10636 llvm_unreachable("Type not integer, floating, or complex"); 10637 } 10638 10639 // Changes the absolute value function to a different type. Preserves whether 10640 // the function is a builtin. 10641 static unsigned changeAbsFunction(unsigned AbsKind, 10642 AbsoluteValueKind ValueKind) { 10643 switch (ValueKind) { 10644 case AVK_Integer: 10645 switch (AbsKind) { 10646 default: 10647 return 0; 10648 case Builtin::BI__builtin_fabsf: 10649 case Builtin::BI__builtin_fabs: 10650 case Builtin::BI__builtin_fabsl: 10651 case Builtin::BI__builtin_cabsf: 10652 case Builtin::BI__builtin_cabs: 10653 case Builtin::BI__builtin_cabsl: 10654 return Builtin::BI__builtin_abs; 10655 case Builtin::BIfabsf: 10656 case Builtin::BIfabs: 10657 case Builtin::BIfabsl: 10658 case Builtin::BIcabsf: 10659 case Builtin::BIcabs: 10660 case Builtin::BIcabsl: 10661 return Builtin::BIabs; 10662 } 10663 case AVK_Floating: 10664 switch (AbsKind) { 10665 default: 10666 return 0; 10667 case Builtin::BI__builtin_abs: 10668 case Builtin::BI__builtin_labs: 10669 case Builtin::BI__builtin_llabs: 10670 case Builtin::BI__builtin_cabsf: 10671 case Builtin::BI__builtin_cabs: 10672 case Builtin::BI__builtin_cabsl: 10673 return Builtin::BI__builtin_fabsf; 10674 case Builtin::BIabs: 10675 case Builtin::BIlabs: 10676 case Builtin::BIllabs: 10677 case Builtin::BIcabsf: 10678 case Builtin::BIcabs: 10679 case Builtin::BIcabsl: 10680 return Builtin::BIfabsf; 10681 } 10682 case AVK_Complex: 10683 switch (AbsKind) { 10684 default: 10685 return 0; 10686 case Builtin::BI__builtin_abs: 10687 case Builtin::BI__builtin_labs: 10688 case Builtin::BI__builtin_llabs: 10689 case Builtin::BI__builtin_fabsf: 10690 case Builtin::BI__builtin_fabs: 10691 case Builtin::BI__builtin_fabsl: 10692 return Builtin::BI__builtin_cabsf; 10693 case Builtin::BIabs: 10694 case Builtin::BIlabs: 10695 case Builtin::BIllabs: 10696 case Builtin::BIfabsf: 10697 case Builtin::BIfabs: 10698 case Builtin::BIfabsl: 10699 return Builtin::BIcabsf; 10700 } 10701 } 10702 llvm_unreachable("Unable to convert function"); 10703 } 10704 10705 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 10706 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 10707 if (!FnInfo) 10708 return 0; 10709 10710 switch (FDecl->getBuiltinID()) { 10711 default: 10712 return 0; 10713 case Builtin::BI__builtin_abs: 10714 case Builtin::BI__builtin_fabs: 10715 case Builtin::BI__builtin_fabsf: 10716 case Builtin::BI__builtin_fabsl: 10717 case Builtin::BI__builtin_labs: 10718 case Builtin::BI__builtin_llabs: 10719 case Builtin::BI__builtin_cabs: 10720 case Builtin::BI__builtin_cabsf: 10721 case Builtin::BI__builtin_cabsl: 10722 case Builtin::BIabs: 10723 case Builtin::BIlabs: 10724 case Builtin::BIllabs: 10725 case Builtin::BIfabs: 10726 case Builtin::BIfabsf: 10727 case Builtin::BIfabsl: 10728 case Builtin::BIcabs: 10729 case Builtin::BIcabsf: 10730 case Builtin::BIcabsl: 10731 return FDecl->getBuiltinID(); 10732 } 10733 llvm_unreachable("Unknown Builtin type"); 10734 } 10735 10736 // If the replacement is valid, emit a note with replacement function. 10737 // Additionally, suggest including the proper header if not already included. 10738 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 10739 unsigned AbsKind, QualType ArgType) { 10740 bool EmitHeaderHint = true; 10741 const char *HeaderName = nullptr; 10742 const char *FunctionName = nullptr; 10743 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 10744 FunctionName = "std::abs"; 10745 if (ArgType->isIntegralOrEnumerationType()) { 10746 HeaderName = "cstdlib"; 10747 } else if (ArgType->isRealFloatingType()) { 10748 HeaderName = "cmath"; 10749 } else { 10750 llvm_unreachable("Invalid Type"); 10751 } 10752 10753 // Lookup all std::abs 10754 if (NamespaceDecl *Std = S.getStdNamespace()) { 10755 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 10756 R.suppressDiagnostics(); 10757 S.LookupQualifiedName(R, Std); 10758 10759 for (const auto *I : R) { 10760 const FunctionDecl *FDecl = nullptr; 10761 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 10762 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 10763 } else { 10764 FDecl = dyn_cast<FunctionDecl>(I); 10765 } 10766 if (!FDecl) 10767 continue; 10768 10769 // Found std::abs(), check that they are the right ones. 10770 if (FDecl->getNumParams() != 1) 10771 continue; 10772 10773 // Check that the parameter type can handle the argument. 10774 QualType ParamType = FDecl->getParamDecl(0)->getType(); 10775 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 10776 S.Context.getTypeSize(ArgType) <= 10777 S.Context.getTypeSize(ParamType)) { 10778 // Found a function, don't need the header hint. 10779 EmitHeaderHint = false; 10780 break; 10781 } 10782 } 10783 } 10784 } else { 10785 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 10786 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 10787 10788 if (HeaderName) { 10789 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 10790 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 10791 R.suppressDiagnostics(); 10792 S.LookupName(R, S.getCurScope()); 10793 10794 if (R.isSingleResult()) { 10795 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 10796 if (FD && FD->getBuiltinID() == AbsKind) { 10797 EmitHeaderHint = false; 10798 } else { 10799 return; 10800 } 10801 } else if (!R.empty()) { 10802 return; 10803 } 10804 } 10805 } 10806 10807 S.Diag(Loc, diag::note_replace_abs_function) 10808 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 10809 10810 if (!HeaderName) 10811 return; 10812 10813 if (!EmitHeaderHint) 10814 return; 10815 10816 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 10817 << FunctionName; 10818 } 10819 10820 template <std::size_t StrLen> 10821 static bool IsStdFunction(const FunctionDecl *FDecl, 10822 const char (&Str)[StrLen]) { 10823 if (!FDecl) 10824 return false; 10825 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 10826 return false; 10827 if (!FDecl->isInStdNamespace()) 10828 return false; 10829 10830 return true; 10831 } 10832 10833 // Warn when using the wrong abs() function. 10834 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 10835 const FunctionDecl *FDecl) { 10836 if (Call->getNumArgs() != 1) 10837 return; 10838 10839 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 10840 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 10841 if (AbsKind == 0 && !IsStdAbs) 10842 return; 10843 10844 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10845 QualType ParamType = Call->getArg(0)->getType(); 10846 10847 // Unsigned types cannot be negative. Suggest removing the absolute value 10848 // function call. 10849 if (ArgType->isUnsignedIntegerType()) { 10850 const char *FunctionName = 10851 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 10852 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 10853 Diag(Call->getExprLoc(), diag::note_remove_abs) 10854 << FunctionName 10855 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 10856 return; 10857 } 10858 10859 // Taking the absolute value of a pointer is very suspicious, they probably 10860 // wanted to index into an array, dereference a pointer, call a function, etc. 10861 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 10862 unsigned DiagType = 0; 10863 if (ArgType->isFunctionType()) 10864 DiagType = 1; 10865 else if (ArgType->isArrayType()) 10866 DiagType = 2; 10867 10868 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 10869 return; 10870 } 10871 10872 // std::abs has overloads which prevent most of the absolute value problems 10873 // from occurring. 10874 if (IsStdAbs) 10875 return; 10876 10877 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 10878 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 10879 10880 // The argument and parameter are the same kind. Check if they are the right 10881 // size. 10882 if (ArgValueKind == ParamValueKind) { 10883 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 10884 return; 10885 10886 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 10887 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 10888 << FDecl << ArgType << ParamType; 10889 10890 if (NewAbsKind == 0) 10891 return; 10892 10893 emitReplacement(*this, Call->getExprLoc(), 10894 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10895 return; 10896 } 10897 10898 // ArgValueKind != ParamValueKind 10899 // The wrong type of absolute value function was used. Attempt to find the 10900 // proper one. 10901 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 10902 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 10903 if (NewAbsKind == 0) 10904 return; 10905 10906 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 10907 << FDecl << ParamValueKind << ArgValueKind; 10908 10909 emitReplacement(*this, Call->getExprLoc(), 10910 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10911 } 10912 10913 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 10914 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 10915 const FunctionDecl *FDecl) { 10916 if (!Call || !FDecl) return; 10917 10918 // Ignore template specializations and macros. 10919 if (inTemplateInstantiation()) return; 10920 if (Call->getExprLoc().isMacroID()) return; 10921 10922 // Only care about the one template argument, two function parameter std::max 10923 if (Call->getNumArgs() != 2) return; 10924 if (!IsStdFunction(FDecl, "max")) return; 10925 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 10926 if (!ArgList) return; 10927 if (ArgList->size() != 1) return; 10928 10929 // Check that template type argument is unsigned integer. 10930 const auto& TA = ArgList->get(0); 10931 if (TA.getKind() != TemplateArgument::Type) return; 10932 QualType ArgType = TA.getAsType(); 10933 if (!ArgType->isUnsignedIntegerType()) return; 10934 10935 // See if either argument is a literal zero. 10936 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 10937 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 10938 if (!MTE) return false; 10939 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 10940 if (!Num) return false; 10941 if (Num->getValue() != 0) return false; 10942 return true; 10943 }; 10944 10945 const Expr *FirstArg = Call->getArg(0); 10946 const Expr *SecondArg = Call->getArg(1); 10947 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 10948 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 10949 10950 // Only warn when exactly one argument is zero. 10951 if (IsFirstArgZero == IsSecondArgZero) return; 10952 10953 SourceRange FirstRange = FirstArg->getSourceRange(); 10954 SourceRange SecondRange = SecondArg->getSourceRange(); 10955 10956 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 10957 10958 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 10959 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 10960 10961 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 10962 SourceRange RemovalRange; 10963 if (IsFirstArgZero) { 10964 RemovalRange = SourceRange(FirstRange.getBegin(), 10965 SecondRange.getBegin().getLocWithOffset(-1)); 10966 } else { 10967 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 10968 SecondRange.getEnd()); 10969 } 10970 10971 Diag(Call->getExprLoc(), diag::note_remove_max_call) 10972 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 10973 << FixItHint::CreateRemoval(RemovalRange); 10974 } 10975 10976 //===--- CHECK: Standard memory functions ---------------------------------===// 10977 10978 /// Takes the expression passed to the size_t parameter of functions 10979 /// such as memcmp, strncat, etc and warns if it's a comparison. 10980 /// 10981 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 10982 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 10983 IdentifierInfo *FnName, 10984 SourceLocation FnLoc, 10985 SourceLocation RParenLoc) { 10986 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 10987 if (!Size) 10988 return false; 10989 10990 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 10991 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 10992 return false; 10993 10994 SourceRange SizeRange = Size->getSourceRange(); 10995 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 10996 << SizeRange << FnName; 10997 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 10998 << FnName 10999 << FixItHint::CreateInsertion( 11000 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 11001 << FixItHint::CreateRemoval(RParenLoc); 11002 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 11003 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 11004 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 11005 ")"); 11006 11007 return true; 11008 } 11009 11010 /// Determine whether the given type is or contains a dynamic class type 11011 /// (e.g., whether it has a vtable). 11012 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 11013 bool &IsContained) { 11014 // Look through array types while ignoring qualifiers. 11015 const Type *Ty = T->getBaseElementTypeUnsafe(); 11016 IsContained = false; 11017 11018 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 11019 RD = RD ? RD->getDefinition() : nullptr; 11020 if (!RD || RD->isInvalidDecl()) 11021 return nullptr; 11022 11023 if (RD->isDynamicClass()) 11024 return RD; 11025 11026 // Check all the fields. If any bases were dynamic, the class is dynamic. 11027 // It's impossible for a class to transitively contain itself by value, so 11028 // infinite recursion is impossible. 11029 for (auto *FD : RD->fields()) { 11030 bool SubContained; 11031 if (const CXXRecordDecl *ContainedRD = 11032 getContainedDynamicClass(FD->getType(), SubContained)) { 11033 IsContained = true; 11034 return ContainedRD; 11035 } 11036 } 11037 11038 return nullptr; 11039 } 11040 11041 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 11042 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 11043 if (Unary->getKind() == UETT_SizeOf) 11044 return Unary; 11045 return nullptr; 11046 } 11047 11048 /// If E is a sizeof expression, returns its argument expression, 11049 /// otherwise returns NULL. 11050 static const Expr *getSizeOfExprArg(const Expr *E) { 11051 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 11052 if (!SizeOf->isArgumentType()) 11053 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 11054 return nullptr; 11055 } 11056 11057 /// If E is a sizeof expression, returns its argument type. 11058 static QualType getSizeOfArgType(const Expr *E) { 11059 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 11060 return SizeOf->getTypeOfArgument(); 11061 return QualType(); 11062 } 11063 11064 namespace { 11065 11066 struct SearchNonTrivialToInitializeField 11067 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 11068 using Super = 11069 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 11070 11071 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 11072 11073 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 11074 SourceLocation SL) { 11075 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 11076 asDerived().visitArray(PDIK, AT, SL); 11077 return; 11078 } 11079 11080 Super::visitWithKind(PDIK, FT, SL); 11081 } 11082 11083 void visitARCStrong(QualType FT, SourceLocation SL) { 11084 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 11085 } 11086 void visitARCWeak(QualType FT, SourceLocation SL) { 11087 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 11088 } 11089 void visitStruct(QualType FT, SourceLocation SL) { 11090 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 11091 visit(FD->getType(), FD->getLocation()); 11092 } 11093 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 11094 const ArrayType *AT, SourceLocation SL) { 11095 visit(getContext().getBaseElementType(AT), SL); 11096 } 11097 void visitTrivial(QualType FT, SourceLocation SL) {} 11098 11099 static void diag(QualType RT, const Expr *E, Sema &S) { 11100 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 11101 } 11102 11103 ASTContext &getContext() { return S.getASTContext(); } 11104 11105 const Expr *E; 11106 Sema &S; 11107 }; 11108 11109 struct SearchNonTrivialToCopyField 11110 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 11111 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 11112 11113 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 11114 11115 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 11116 SourceLocation SL) { 11117 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 11118 asDerived().visitArray(PCK, AT, SL); 11119 return; 11120 } 11121 11122 Super::visitWithKind(PCK, FT, SL); 11123 } 11124 11125 void visitARCStrong(QualType FT, SourceLocation SL) { 11126 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 11127 } 11128 void visitARCWeak(QualType FT, SourceLocation SL) { 11129 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 11130 } 11131 void visitStruct(QualType FT, SourceLocation SL) { 11132 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 11133 visit(FD->getType(), FD->getLocation()); 11134 } 11135 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 11136 SourceLocation SL) { 11137 visit(getContext().getBaseElementType(AT), SL); 11138 } 11139 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 11140 SourceLocation SL) {} 11141 void visitTrivial(QualType FT, SourceLocation SL) {} 11142 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 11143 11144 static void diag(QualType RT, const Expr *E, Sema &S) { 11145 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 11146 } 11147 11148 ASTContext &getContext() { return S.getASTContext(); } 11149 11150 const Expr *E; 11151 Sema &S; 11152 }; 11153 11154 } 11155 11156 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 11157 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 11158 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 11159 11160 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 11161 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 11162 return false; 11163 11164 return doesExprLikelyComputeSize(BO->getLHS()) || 11165 doesExprLikelyComputeSize(BO->getRHS()); 11166 } 11167 11168 return getAsSizeOfExpr(SizeofExpr) != nullptr; 11169 } 11170 11171 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 11172 /// 11173 /// \code 11174 /// #define MACRO 0 11175 /// foo(MACRO); 11176 /// foo(0); 11177 /// \endcode 11178 /// 11179 /// This should return true for the first call to foo, but not for the second 11180 /// (regardless of whether foo is a macro or function). 11181 static bool isArgumentExpandedFromMacro(SourceManager &SM, 11182 SourceLocation CallLoc, 11183 SourceLocation ArgLoc) { 11184 if (!CallLoc.isMacroID()) 11185 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 11186 11187 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 11188 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 11189 } 11190 11191 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 11192 /// last two arguments transposed. 11193 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 11194 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 11195 return; 11196 11197 const Expr *SizeArg = 11198 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 11199 11200 auto isLiteralZero = [](const Expr *E) { 11201 return (isa<IntegerLiteral>(E) && 11202 cast<IntegerLiteral>(E)->getValue() == 0) || 11203 (isa<CharacterLiteral>(E) && 11204 cast<CharacterLiteral>(E)->getValue() == 0); 11205 }; 11206 11207 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 11208 SourceLocation CallLoc = Call->getRParenLoc(); 11209 SourceManager &SM = S.getSourceManager(); 11210 if (isLiteralZero(SizeArg) && 11211 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 11212 11213 SourceLocation DiagLoc = SizeArg->getExprLoc(); 11214 11215 // Some platforms #define bzero to __builtin_memset. See if this is the 11216 // case, and if so, emit a better diagnostic. 11217 if (BId == Builtin::BIbzero || 11218 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 11219 CallLoc, SM, S.getLangOpts()) == "bzero")) { 11220 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 11221 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 11222 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 11223 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 11224 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 11225 } 11226 return; 11227 } 11228 11229 // If the second argument to a memset is a sizeof expression and the third 11230 // isn't, this is also likely an error. This should catch 11231 // 'memset(buf, sizeof(buf), 0xff)'. 11232 if (BId == Builtin::BImemset && 11233 doesExprLikelyComputeSize(Call->getArg(1)) && 11234 !doesExprLikelyComputeSize(Call->getArg(2))) { 11235 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 11236 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 11237 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 11238 return; 11239 } 11240 } 11241 11242 /// Check for dangerous or invalid arguments to memset(). 11243 /// 11244 /// This issues warnings on known problematic, dangerous or unspecified 11245 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 11246 /// function calls. 11247 /// 11248 /// \param Call The call expression to diagnose. 11249 void Sema::CheckMemaccessArguments(const CallExpr *Call, 11250 unsigned BId, 11251 IdentifierInfo *FnName) { 11252 assert(BId != 0); 11253 11254 // It is possible to have a non-standard definition of memset. Validate 11255 // we have enough arguments, and if not, abort further checking. 11256 unsigned ExpectedNumArgs = 11257 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 11258 if (Call->getNumArgs() < ExpectedNumArgs) 11259 return; 11260 11261 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 11262 BId == Builtin::BIstrndup ? 1 : 2); 11263 unsigned LenArg = 11264 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 11265 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 11266 11267 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 11268 Call->getBeginLoc(), Call->getRParenLoc())) 11269 return; 11270 11271 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 11272 CheckMemaccessSize(*this, BId, Call); 11273 11274 // We have special checking when the length is a sizeof expression. 11275 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 11276 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 11277 llvm::FoldingSetNodeID SizeOfArgID; 11278 11279 // Although widely used, 'bzero' is not a standard function. Be more strict 11280 // with the argument types before allowing diagnostics and only allow the 11281 // form bzero(ptr, sizeof(...)). 11282 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 11283 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 11284 return; 11285 11286 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 11287 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 11288 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 11289 11290 QualType DestTy = Dest->getType(); 11291 QualType PointeeTy; 11292 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 11293 PointeeTy = DestPtrTy->getPointeeType(); 11294 11295 // Never warn about void type pointers. This can be used to suppress 11296 // false positives. 11297 if (PointeeTy->isVoidType()) 11298 continue; 11299 11300 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 11301 // actually comparing the expressions for equality. Because computing the 11302 // expression IDs can be expensive, we only do this if the diagnostic is 11303 // enabled. 11304 if (SizeOfArg && 11305 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 11306 SizeOfArg->getExprLoc())) { 11307 // We only compute IDs for expressions if the warning is enabled, and 11308 // cache the sizeof arg's ID. 11309 if (SizeOfArgID == llvm::FoldingSetNodeID()) 11310 SizeOfArg->Profile(SizeOfArgID, Context, true); 11311 llvm::FoldingSetNodeID DestID; 11312 Dest->Profile(DestID, Context, true); 11313 if (DestID == SizeOfArgID) { 11314 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 11315 // over sizeof(src) as well. 11316 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 11317 StringRef ReadableName = FnName->getName(); 11318 11319 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 11320 if (UnaryOp->getOpcode() == UO_AddrOf) 11321 ActionIdx = 1; // If its an address-of operator, just remove it. 11322 if (!PointeeTy->isIncompleteType() && 11323 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 11324 ActionIdx = 2; // If the pointee's size is sizeof(char), 11325 // suggest an explicit length. 11326 11327 // If the function is defined as a builtin macro, do not show macro 11328 // expansion. 11329 SourceLocation SL = SizeOfArg->getExprLoc(); 11330 SourceRange DSR = Dest->getSourceRange(); 11331 SourceRange SSR = SizeOfArg->getSourceRange(); 11332 SourceManager &SM = getSourceManager(); 11333 11334 if (SM.isMacroArgExpansion(SL)) { 11335 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 11336 SL = SM.getSpellingLoc(SL); 11337 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 11338 SM.getSpellingLoc(DSR.getEnd())); 11339 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 11340 SM.getSpellingLoc(SSR.getEnd())); 11341 } 11342 11343 DiagRuntimeBehavior(SL, SizeOfArg, 11344 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 11345 << ReadableName 11346 << PointeeTy 11347 << DestTy 11348 << DSR 11349 << SSR); 11350 DiagRuntimeBehavior(SL, SizeOfArg, 11351 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 11352 << ActionIdx 11353 << SSR); 11354 11355 break; 11356 } 11357 } 11358 11359 // Also check for cases where the sizeof argument is the exact same 11360 // type as the memory argument, and where it points to a user-defined 11361 // record type. 11362 if (SizeOfArgTy != QualType()) { 11363 if (PointeeTy->isRecordType() && 11364 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 11365 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 11366 PDiag(diag::warn_sizeof_pointer_type_memaccess) 11367 << FnName << SizeOfArgTy << ArgIdx 11368 << PointeeTy << Dest->getSourceRange() 11369 << LenExpr->getSourceRange()); 11370 break; 11371 } 11372 } 11373 } else if (DestTy->isArrayType()) { 11374 PointeeTy = DestTy; 11375 } 11376 11377 if (PointeeTy == QualType()) 11378 continue; 11379 11380 // Always complain about dynamic classes. 11381 bool IsContained; 11382 if (const CXXRecordDecl *ContainedRD = 11383 getContainedDynamicClass(PointeeTy, IsContained)) { 11384 11385 unsigned OperationType = 0; 11386 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 11387 // "overwritten" if we're warning about the destination for any call 11388 // but memcmp; otherwise a verb appropriate to the call. 11389 if (ArgIdx != 0 || IsCmp) { 11390 if (BId == Builtin::BImemcpy) 11391 OperationType = 1; 11392 else if(BId == Builtin::BImemmove) 11393 OperationType = 2; 11394 else if (IsCmp) 11395 OperationType = 3; 11396 } 11397 11398 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11399 PDiag(diag::warn_dyn_class_memaccess) 11400 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 11401 << IsContained << ContainedRD << OperationType 11402 << Call->getCallee()->getSourceRange()); 11403 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 11404 BId != Builtin::BImemset) 11405 DiagRuntimeBehavior( 11406 Dest->getExprLoc(), Dest, 11407 PDiag(diag::warn_arc_object_memaccess) 11408 << ArgIdx << FnName << PointeeTy 11409 << Call->getCallee()->getSourceRange()); 11410 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 11411 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 11412 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 11413 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11414 PDiag(diag::warn_cstruct_memaccess) 11415 << ArgIdx << FnName << PointeeTy << 0); 11416 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 11417 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 11418 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 11419 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11420 PDiag(diag::warn_cstruct_memaccess) 11421 << ArgIdx << FnName << PointeeTy << 1); 11422 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 11423 } else { 11424 continue; 11425 } 11426 } else 11427 continue; 11428 11429 DiagRuntimeBehavior( 11430 Dest->getExprLoc(), Dest, 11431 PDiag(diag::note_bad_memaccess_silence) 11432 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 11433 break; 11434 } 11435 } 11436 11437 // A little helper routine: ignore addition and subtraction of integer literals. 11438 // This intentionally does not ignore all integer constant expressions because 11439 // we don't want to remove sizeof(). 11440 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 11441 Ex = Ex->IgnoreParenCasts(); 11442 11443 while (true) { 11444 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 11445 if (!BO || !BO->isAdditiveOp()) 11446 break; 11447 11448 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 11449 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 11450 11451 if (isa<IntegerLiteral>(RHS)) 11452 Ex = LHS; 11453 else if (isa<IntegerLiteral>(LHS)) 11454 Ex = RHS; 11455 else 11456 break; 11457 } 11458 11459 return Ex; 11460 } 11461 11462 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 11463 ASTContext &Context) { 11464 // Only handle constant-sized or VLAs, but not flexible members. 11465 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 11466 // Only issue the FIXIT for arrays of size > 1. 11467 if (CAT->getSize().getSExtValue() <= 1) 11468 return false; 11469 } else if (!Ty->isVariableArrayType()) { 11470 return false; 11471 } 11472 return true; 11473 } 11474 11475 // Warn if the user has made the 'size' argument to strlcpy or strlcat 11476 // be the size of the source, instead of the destination. 11477 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 11478 IdentifierInfo *FnName) { 11479 11480 // Don't crash if the user has the wrong number of arguments 11481 unsigned NumArgs = Call->getNumArgs(); 11482 if ((NumArgs != 3) && (NumArgs != 4)) 11483 return; 11484 11485 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 11486 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 11487 const Expr *CompareWithSrc = nullptr; 11488 11489 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 11490 Call->getBeginLoc(), Call->getRParenLoc())) 11491 return; 11492 11493 // Look for 'strlcpy(dst, x, sizeof(x))' 11494 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 11495 CompareWithSrc = Ex; 11496 else { 11497 // Look for 'strlcpy(dst, x, strlen(x))' 11498 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 11499 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 11500 SizeCall->getNumArgs() == 1) 11501 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 11502 } 11503 } 11504 11505 if (!CompareWithSrc) 11506 return; 11507 11508 // Determine if the argument to sizeof/strlen is equal to the source 11509 // argument. In principle there's all kinds of things you could do 11510 // here, for instance creating an == expression and evaluating it with 11511 // EvaluateAsBooleanCondition, but this uses a more direct technique: 11512 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 11513 if (!SrcArgDRE) 11514 return; 11515 11516 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 11517 if (!CompareWithSrcDRE || 11518 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 11519 return; 11520 11521 const Expr *OriginalSizeArg = Call->getArg(2); 11522 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 11523 << OriginalSizeArg->getSourceRange() << FnName; 11524 11525 // Output a FIXIT hint if the destination is an array (rather than a 11526 // pointer to an array). This could be enhanced to handle some 11527 // pointers if we know the actual size, like if DstArg is 'array+2' 11528 // we could say 'sizeof(array)-2'. 11529 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 11530 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 11531 return; 11532 11533 SmallString<128> sizeString; 11534 llvm::raw_svector_ostream OS(sizeString); 11535 OS << "sizeof("; 11536 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11537 OS << ")"; 11538 11539 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 11540 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 11541 OS.str()); 11542 } 11543 11544 /// Check if two expressions refer to the same declaration. 11545 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 11546 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 11547 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 11548 return D1->getDecl() == D2->getDecl(); 11549 return false; 11550 } 11551 11552 static const Expr *getStrlenExprArg(const Expr *E) { 11553 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 11554 const FunctionDecl *FD = CE->getDirectCallee(); 11555 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 11556 return nullptr; 11557 return CE->getArg(0)->IgnoreParenCasts(); 11558 } 11559 return nullptr; 11560 } 11561 11562 // Warn on anti-patterns as the 'size' argument to strncat. 11563 // The correct size argument should look like following: 11564 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 11565 void Sema::CheckStrncatArguments(const CallExpr *CE, 11566 IdentifierInfo *FnName) { 11567 // Don't crash if the user has the wrong number of arguments. 11568 if (CE->getNumArgs() < 3) 11569 return; 11570 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 11571 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 11572 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 11573 11574 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 11575 CE->getRParenLoc())) 11576 return; 11577 11578 // Identify common expressions, which are wrongly used as the size argument 11579 // to strncat and may lead to buffer overflows. 11580 unsigned PatternType = 0; 11581 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 11582 // - sizeof(dst) 11583 if (referToTheSameDecl(SizeOfArg, DstArg)) 11584 PatternType = 1; 11585 // - sizeof(src) 11586 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 11587 PatternType = 2; 11588 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 11589 if (BE->getOpcode() == BO_Sub) { 11590 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 11591 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 11592 // - sizeof(dst) - strlen(dst) 11593 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 11594 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 11595 PatternType = 1; 11596 // - sizeof(src) - (anything) 11597 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 11598 PatternType = 2; 11599 } 11600 } 11601 11602 if (PatternType == 0) 11603 return; 11604 11605 // Generate the diagnostic. 11606 SourceLocation SL = LenArg->getBeginLoc(); 11607 SourceRange SR = LenArg->getSourceRange(); 11608 SourceManager &SM = getSourceManager(); 11609 11610 // If the function is defined as a builtin macro, do not show macro expansion. 11611 if (SM.isMacroArgExpansion(SL)) { 11612 SL = SM.getSpellingLoc(SL); 11613 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 11614 SM.getSpellingLoc(SR.getEnd())); 11615 } 11616 11617 // Check if the destination is an array (rather than a pointer to an array). 11618 QualType DstTy = DstArg->getType(); 11619 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 11620 Context); 11621 if (!isKnownSizeArray) { 11622 if (PatternType == 1) 11623 Diag(SL, diag::warn_strncat_wrong_size) << SR; 11624 else 11625 Diag(SL, diag::warn_strncat_src_size) << SR; 11626 return; 11627 } 11628 11629 if (PatternType == 1) 11630 Diag(SL, diag::warn_strncat_large_size) << SR; 11631 else 11632 Diag(SL, diag::warn_strncat_src_size) << SR; 11633 11634 SmallString<128> sizeString; 11635 llvm::raw_svector_ostream OS(sizeString); 11636 OS << "sizeof("; 11637 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11638 OS << ") - "; 11639 OS << "strlen("; 11640 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11641 OS << ") - 1"; 11642 11643 Diag(SL, diag::note_strncat_wrong_size) 11644 << FixItHint::CreateReplacement(SR, OS.str()); 11645 } 11646 11647 namespace { 11648 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 11649 const UnaryOperator *UnaryExpr, const Decl *D) { 11650 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 11651 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 11652 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 11653 return; 11654 } 11655 } 11656 11657 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 11658 const UnaryOperator *UnaryExpr) { 11659 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 11660 const Decl *D = Lvalue->getDecl(); 11661 if (isa<DeclaratorDecl>(D)) 11662 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType()) 11663 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 11664 } 11665 11666 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 11667 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 11668 Lvalue->getMemberDecl()); 11669 } 11670 11671 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 11672 const UnaryOperator *UnaryExpr) { 11673 const auto *Lambda = dyn_cast<LambdaExpr>( 11674 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 11675 if (!Lambda) 11676 return; 11677 11678 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 11679 << CalleeName << 2 /*object: lambda expression*/; 11680 } 11681 11682 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 11683 const DeclRefExpr *Lvalue) { 11684 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 11685 if (Var == nullptr) 11686 return; 11687 11688 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 11689 << CalleeName << 0 /*object: */ << Var; 11690 } 11691 11692 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 11693 const CastExpr *Cast) { 11694 SmallString<128> SizeString; 11695 llvm::raw_svector_ostream OS(SizeString); 11696 11697 clang::CastKind Kind = Cast->getCastKind(); 11698 if (Kind == clang::CK_BitCast && 11699 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 11700 return; 11701 if (Kind == clang::CK_IntegralToPointer && 11702 !isa<IntegerLiteral>( 11703 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 11704 return; 11705 11706 switch (Cast->getCastKind()) { 11707 case clang::CK_BitCast: 11708 case clang::CK_IntegralToPointer: 11709 case clang::CK_FunctionToPointerDecay: 11710 OS << '\''; 11711 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 11712 OS << '\''; 11713 break; 11714 default: 11715 return; 11716 } 11717 11718 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 11719 << CalleeName << 0 /*object: */ << OS.str(); 11720 } 11721 } // namespace 11722 11723 /// Alerts the user that they are attempting to free a non-malloc'd object. 11724 void Sema::CheckFreeArguments(const CallExpr *E) { 11725 const std::string CalleeName = 11726 cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 11727 11728 { // Prefer something that doesn't involve a cast to make things simpler. 11729 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 11730 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 11731 switch (UnaryExpr->getOpcode()) { 11732 case UnaryOperator::Opcode::UO_AddrOf: 11733 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 11734 case UnaryOperator::Opcode::UO_Plus: 11735 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 11736 default: 11737 break; 11738 } 11739 11740 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 11741 if (Lvalue->getType()->isArrayType()) 11742 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 11743 11744 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 11745 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 11746 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 11747 return; 11748 } 11749 11750 if (isa<BlockExpr>(Arg)) { 11751 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 11752 << CalleeName << 1 /*object: block*/; 11753 return; 11754 } 11755 } 11756 // Maybe the cast was important, check after the other cases. 11757 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 11758 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 11759 } 11760 11761 void 11762 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 11763 SourceLocation ReturnLoc, 11764 bool isObjCMethod, 11765 const AttrVec *Attrs, 11766 const FunctionDecl *FD) { 11767 // Check if the return value is null but should not be. 11768 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 11769 (!isObjCMethod && isNonNullType(Context, lhsType))) && 11770 CheckNonNullExpr(*this, RetValExp)) 11771 Diag(ReturnLoc, diag::warn_null_ret) 11772 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 11773 11774 // C++11 [basic.stc.dynamic.allocation]p4: 11775 // If an allocation function declared with a non-throwing 11776 // exception-specification fails to allocate storage, it shall return 11777 // a null pointer. Any other allocation function that fails to allocate 11778 // storage shall indicate failure only by throwing an exception [...] 11779 if (FD) { 11780 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 11781 if (Op == OO_New || Op == OO_Array_New) { 11782 const FunctionProtoType *Proto 11783 = FD->getType()->castAs<FunctionProtoType>(); 11784 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 11785 CheckNonNullExpr(*this, RetValExp)) 11786 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 11787 << FD << getLangOpts().CPlusPlus11; 11788 } 11789 } 11790 11791 // PPC MMA non-pointer types are not allowed as return type. Checking the type 11792 // here prevent the user from using a PPC MMA type as trailing return type. 11793 if (Context.getTargetInfo().getTriple().isPPC64()) 11794 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 11795 } 11796 11797 /// Check for comparisons of floating-point values using == and !=. Issue a 11798 /// warning if the comparison is not likely to do what the programmer intended. 11799 void Sema::CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS, 11800 BinaryOperatorKind Opcode) { 11801 // Match and capture subexpressions such as "(float) X == 0.1". 11802 FloatingLiteral *FPLiteral; 11803 CastExpr *FPCast; 11804 auto getCastAndLiteral = [&FPLiteral, &FPCast](Expr *L, Expr *R) { 11805 FPLiteral = dyn_cast<FloatingLiteral>(L->IgnoreParens()); 11806 FPCast = dyn_cast<CastExpr>(R->IgnoreParens()); 11807 return FPLiteral && FPCast; 11808 }; 11809 11810 if (getCastAndLiteral(LHS, RHS) || getCastAndLiteral(RHS, LHS)) { 11811 auto *SourceTy = FPCast->getSubExpr()->getType()->getAs<BuiltinType>(); 11812 auto *TargetTy = FPLiteral->getType()->getAs<BuiltinType>(); 11813 if (SourceTy && TargetTy && SourceTy->isFloatingPoint() && 11814 TargetTy->isFloatingPoint()) { 11815 bool Lossy; 11816 llvm::APFloat TargetC = FPLiteral->getValue(); 11817 TargetC.convert(Context.getFloatTypeSemantics(QualType(SourceTy, 0)), 11818 llvm::APFloat::rmNearestTiesToEven, &Lossy); 11819 if (Lossy) { 11820 // If the literal cannot be represented in the source type, then a 11821 // check for == is always false and check for != is always true. 11822 Diag(Loc, diag::warn_float_compare_literal) 11823 << (Opcode == BO_EQ) << QualType(SourceTy, 0) 11824 << LHS->getSourceRange() << RHS->getSourceRange(); 11825 return; 11826 } 11827 } 11828 } 11829 11830 // Match a more general floating-point equality comparison (-Wfloat-equal). 11831 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 11832 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 11833 11834 // Special case: check for x == x (which is OK). 11835 // Do not emit warnings for such cases. 11836 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 11837 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 11838 if (DRL->getDecl() == DRR->getDecl()) 11839 return; 11840 11841 // Special case: check for comparisons against literals that can be exactly 11842 // represented by APFloat. In such cases, do not emit a warning. This 11843 // is a heuristic: often comparison against such literals are used to 11844 // detect if a value in a variable has not changed. This clearly can 11845 // lead to false negatives. 11846 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 11847 if (FLL->isExact()) 11848 return; 11849 } else 11850 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 11851 if (FLR->isExact()) 11852 return; 11853 11854 // Check for comparisons with builtin types. 11855 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 11856 if (CL->getBuiltinCallee()) 11857 return; 11858 11859 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 11860 if (CR->getBuiltinCallee()) 11861 return; 11862 11863 // Emit the diagnostic. 11864 Diag(Loc, diag::warn_floatingpoint_eq) 11865 << LHS->getSourceRange() << RHS->getSourceRange(); 11866 } 11867 11868 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 11869 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 11870 11871 namespace { 11872 11873 /// Structure recording the 'active' range of an integer-valued 11874 /// expression. 11875 struct IntRange { 11876 /// The number of bits active in the int. Note that this includes exactly one 11877 /// sign bit if !NonNegative. 11878 unsigned Width; 11879 11880 /// True if the int is known not to have negative values. If so, all leading 11881 /// bits before Width are known zero, otherwise they are known to be the 11882 /// same as the MSB within Width. 11883 bool NonNegative; 11884 11885 IntRange(unsigned Width, bool NonNegative) 11886 : Width(Width), NonNegative(NonNegative) {} 11887 11888 /// Number of bits excluding the sign bit. 11889 unsigned valueBits() const { 11890 return NonNegative ? Width : Width - 1; 11891 } 11892 11893 /// Returns the range of the bool type. 11894 static IntRange forBoolType() { 11895 return IntRange(1, true); 11896 } 11897 11898 /// Returns the range of an opaque value of the given integral type. 11899 static IntRange forValueOfType(ASTContext &C, QualType T) { 11900 return forValueOfCanonicalType(C, 11901 T->getCanonicalTypeInternal().getTypePtr()); 11902 } 11903 11904 /// Returns the range of an opaque value of a canonical integral type. 11905 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 11906 assert(T->isCanonicalUnqualified()); 11907 11908 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11909 T = VT->getElementType().getTypePtr(); 11910 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11911 T = CT->getElementType().getTypePtr(); 11912 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11913 T = AT->getValueType().getTypePtr(); 11914 11915 if (!C.getLangOpts().CPlusPlus) { 11916 // For enum types in C code, use the underlying datatype. 11917 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11918 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 11919 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 11920 // For enum types in C++, use the known bit width of the enumerators. 11921 EnumDecl *Enum = ET->getDecl(); 11922 // In C++11, enums can have a fixed underlying type. Use this type to 11923 // compute the range. 11924 if (Enum->isFixed()) { 11925 return IntRange(C.getIntWidth(QualType(T, 0)), 11926 !ET->isSignedIntegerOrEnumerationType()); 11927 } 11928 11929 unsigned NumPositive = Enum->getNumPositiveBits(); 11930 unsigned NumNegative = Enum->getNumNegativeBits(); 11931 11932 if (NumNegative == 0) 11933 return IntRange(NumPositive, true/*NonNegative*/); 11934 else 11935 return IntRange(std::max(NumPositive + 1, NumNegative), 11936 false/*NonNegative*/); 11937 } 11938 11939 if (const auto *EIT = dyn_cast<BitIntType>(T)) 11940 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11941 11942 const BuiltinType *BT = cast<BuiltinType>(T); 11943 assert(BT->isInteger()); 11944 11945 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11946 } 11947 11948 /// Returns the "target" range of a canonical integral type, i.e. 11949 /// the range of values expressible in the type. 11950 /// 11951 /// This matches forValueOfCanonicalType except that enums have the 11952 /// full range of their type, not the range of their enumerators. 11953 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 11954 assert(T->isCanonicalUnqualified()); 11955 11956 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11957 T = VT->getElementType().getTypePtr(); 11958 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11959 T = CT->getElementType().getTypePtr(); 11960 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11961 T = AT->getValueType().getTypePtr(); 11962 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11963 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 11964 11965 if (const auto *EIT = dyn_cast<BitIntType>(T)) 11966 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11967 11968 const BuiltinType *BT = cast<BuiltinType>(T); 11969 assert(BT->isInteger()); 11970 11971 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11972 } 11973 11974 /// Returns the supremum of two ranges: i.e. their conservative merge. 11975 static IntRange join(IntRange L, IntRange R) { 11976 bool Unsigned = L.NonNegative && R.NonNegative; 11977 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 11978 L.NonNegative && R.NonNegative); 11979 } 11980 11981 /// Return the range of a bitwise-AND of the two ranges. 11982 static IntRange bit_and(IntRange L, IntRange R) { 11983 unsigned Bits = std::max(L.Width, R.Width); 11984 bool NonNegative = false; 11985 if (L.NonNegative) { 11986 Bits = std::min(Bits, L.Width); 11987 NonNegative = true; 11988 } 11989 if (R.NonNegative) { 11990 Bits = std::min(Bits, R.Width); 11991 NonNegative = true; 11992 } 11993 return IntRange(Bits, NonNegative); 11994 } 11995 11996 /// Return the range of a sum of the two ranges. 11997 static IntRange sum(IntRange L, IntRange R) { 11998 bool Unsigned = L.NonNegative && R.NonNegative; 11999 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 12000 Unsigned); 12001 } 12002 12003 /// Return the range of a difference of the two ranges. 12004 static IntRange difference(IntRange L, IntRange R) { 12005 // We need a 1-bit-wider range if: 12006 // 1) LHS can be negative: least value can be reduced. 12007 // 2) RHS can be negative: greatest value can be increased. 12008 bool CanWiden = !L.NonNegative || !R.NonNegative; 12009 bool Unsigned = L.NonNegative && R.Width == 0; 12010 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 12011 !Unsigned, 12012 Unsigned); 12013 } 12014 12015 /// Return the range of a product of the two ranges. 12016 static IntRange product(IntRange L, IntRange R) { 12017 // If both LHS and RHS can be negative, we can form 12018 // -2^L * -2^R = 2^(L + R) 12019 // which requires L + R + 1 value bits to represent. 12020 bool CanWiden = !L.NonNegative && !R.NonNegative; 12021 bool Unsigned = L.NonNegative && R.NonNegative; 12022 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 12023 Unsigned); 12024 } 12025 12026 /// Return the range of a remainder operation between the two ranges. 12027 static IntRange rem(IntRange L, IntRange R) { 12028 // The result of a remainder can't be larger than the result of 12029 // either side. The sign of the result is the sign of the LHS. 12030 bool Unsigned = L.NonNegative; 12031 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 12032 Unsigned); 12033 } 12034 }; 12035 12036 } // namespace 12037 12038 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 12039 unsigned MaxWidth) { 12040 if (value.isSigned() && value.isNegative()) 12041 return IntRange(value.getMinSignedBits(), false); 12042 12043 if (value.getBitWidth() > MaxWidth) 12044 value = value.trunc(MaxWidth); 12045 12046 // isNonNegative() just checks the sign bit without considering 12047 // signedness. 12048 return IntRange(value.getActiveBits(), true); 12049 } 12050 12051 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 12052 unsigned MaxWidth) { 12053 if (result.isInt()) 12054 return GetValueRange(C, result.getInt(), MaxWidth); 12055 12056 if (result.isVector()) { 12057 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 12058 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 12059 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 12060 R = IntRange::join(R, El); 12061 } 12062 return R; 12063 } 12064 12065 if (result.isComplexInt()) { 12066 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 12067 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 12068 return IntRange::join(R, I); 12069 } 12070 12071 // This can happen with lossless casts to intptr_t of "based" lvalues. 12072 // Assume it might use arbitrary bits. 12073 // FIXME: The only reason we need to pass the type in here is to get 12074 // the sign right on this one case. It would be nice if APValue 12075 // preserved this. 12076 assert(result.isLValue() || result.isAddrLabelDiff()); 12077 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 12078 } 12079 12080 static QualType GetExprType(const Expr *E) { 12081 QualType Ty = E->getType(); 12082 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 12083 Ty = AtomicRHS->getValueType(); 12084 return Ty; 12085 } 12086 12087 /// Pseudo-evaluate the given integer expression, estimating the 12088 /// range of values it might take. 12089 /// 12090 /// \param MaxWidth The width to which the value will be truncated. 12091 /// \param Approximate If \c true, return a likely range for the result: in 12092 /// particular, assume that arithmetic on narrower types doesn't leave 12093 /// those types. If \c false, return a range including all possible 12094 /// result values. 12095 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 12096 bool InConstantContext, bool Approximate) { 12097 E = E->IgnoreParens(); 12098 12099 // Try a full evaluation first. 12100 Expr::EvalResult result; 12101 if (E->EvaluateAsRValue(result, C, InConstantContext)) 12102 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 12103 12104 // I think we only want to look through implicit casts here; if the 12105 // user has an explicit widening cast, we should treat the value as 12106 // being of the new, wider type. 12107 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 12108 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 12109 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 12110 Approximate); 12111 12112 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 12113 12114 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 12115 CE->getCastKind() == CK_BooleanToSignedIntegral; 12116 12117 // Assume that non-integer casts can span the full range of the type. 12118 if (!isIntegerCast) 12119 return OutputTypeRange; 12120 12121 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 12122 std::min(MaxWidth, OutputTypeRange.Width), 12123 InConstantContext, Approximate); 12124 12125 // Bail out if the subexpr's range is as wide as the cast type. 12126 if (SubRange.Width >= OutputTypeRange.Width) 12127 return OutputTypeRange; 12128 12129 // Otherwise, we take the smaller width, and we're non-negative if 12130 // either the output type or the subexpr is. 12131 return IntRange(SubRange.Width, 12132 SubRange.NonNegative || OutputTypeRange.NonNegative); 12133 } 12134 12135 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 12136 // If we can fold the condition, just take that operand. 12137 bool CondResult; 12138 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 12139 return GetExprRange(C, 12140 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 12141 MaxWidth, InConstantContext, Approximate); 12142 12143 // Otherwise, conservatively merge. 12144 // GetExprRange requires an integer expression, but a throw expression 12145 // results in a void type. 12146 Expr *E = CO->getTrueExpr(); 12147 IntRange L = E->getType()->isVoidType() 12148 ? IntRange{0, true} 12149 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 12150 E = CO->getFalseExpr(); 12151 IntRange R = E->getType()->isVoidType() 12152 ? IntRange{0, true} 12153 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 12154 return IntRange::join(L, R); 12155 } 12156 12157 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 12158 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 12159 12160 switch (BO->getOpcode()) { 12161 case BO_Cmp: 12162 llvm_unreachable("builtin <=> should have class type"); 12163 12164 // Boolean-valued operations are single-bit and positive. 12165 case BO_LAnd: 12166 case BO_LOr: 12167 case BO_LT: 12168 case BO_GT: 12169 case BO_LE: 12170 case BO_GE: 12171 case BO_EQ: 12172 case BO_NE: 12173 return IntRange::forBoolType(); 12174 12175 // The type of the assignments is the type of the LHS, so the RHS 12176 // is not necessarily the same type. 12177 case BO_MulAssign: 12178 case BO_DivAssign: 12179 case BO_RemAssign: 12180 case BO_AddAssign: 12181 case BO_SubAssign: 12182 case BO_XorAssign: 12183 case BO_OrAssign: 12184 // TODO: bitfields? 12185 return IntRange::forValueOfType(C, GetExprType(E)); 12186 12187 // Simple assignments just pass through the RHS, which will have 12188 // been coerced to the LHS type. 12189 case BO_Assign: 12190 // TODO: bitfields? 12191 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 12192 Approximate); 12193 12194 // Operations with opaque sources are black-listed. 12195 case BO_PtrMemD: 12196 case BO_PtrMemI: 12197 return IntRange::forValueOfType(C, GetExprType(E)); 12198 12199 // Bitwise-and uses the *infinum* of the two source ranges. 12200 case BO_And: 12201 case BO_AndAssign: 12202 Combine = IntRange::bit_and; 12203 break; 12204 12205 // Left shift gets black-listed based on a judgement call. 12206 case BO_Shl: 12207 // ...except that we want to treat '1 << (blah)' as logically 12208 // positive. It's an important idiom. 12209 if (IntegerLiteral *I 12210 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 12211 if (I->getValue() == 1) { 12212 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 12213 return IntRange(R.Width, /*NonNegative*/ true); 12214 } 12215 } 12216 LLVM_FALLTHROUGH; 12217 12218 case BO_ShlAssign: 12219 return IntRange::forValueOfType(C, GetExprType(E)); 12220 12221 // Right shift by a constant can narrow its left argument. 12222 case BO_Shr: 12223 case BO_ShrAssign: { 12224 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 12225 Approximate); 12226 12227 // If the shift amount is a positive constant, drop the width by 12228 // that much. 12229 if (Optional<llvm::APSInt> shift = 12230 BO->getRHS()->getIntegerConstantExpr(C)) { 12231 if (shift->isNonNegative()) { 12232 unsigned zext = shift->getZExtValue(); 12233 if (zext >= L.Width) 12234 L.Width = (L.NonNegative ? 0 : 1); 12235 else 12236 L.Width -= zext; 12237 } 12238 } 12239 12240 return L; 12241 } 12242 12243 // Comma acts as its right operand. 12244 case BO_Comma: 12245 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 12246 Approximate); 12247 12248 case BO_Add: 12249 if (!Approximate) 12250 Combine = IntRange::sum; 12251 break; 12252 12253 case BO_Sub: 12254 if (BO->getLHS()->getType()->isPointerType()) 12255 return IntRange::forValueOfType(C, GetExprType(E)); 12256 if (!Approximate) 12257 Combine = IntRange::difference; 12258 break; 12259 12260 case BO_Mul: 12261 if (!Approximate) 12262 Combine = IntRange::product; 12263 break; 12264 12265 // The width of a division result is mostly determined by the size 12266 // of the LHS. 12267 case BO_Div: { 12268 // Don't 'pre-truncate' the operands. 12269 unsigned opWidth = C.getIntWidth(GetExprType(E)); 12270 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 12271 Approximate); 12272 12273 // If the divisor is constant, use that. 12274 if (Optional<llvm::APSInt> divisor = 12275 BO->getRHS()->getIntegerConstantExpr(C)) { 12276 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 12277 if (log2 >= L.Width) 12278 L.Width = (L.NonNegative ? 0 : 1); 12279 else 12280 L.Width = std::min(L.Width - log2, MaxWidth); 12281 return L; 12282 } 12283 12284 // Otherwise, just use the LHS's width. 12285 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 12286 // could be -1. 12287 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 12288 Approximate); 12289 return IntRange(L.Width, L.NonNegative && R.NonNegative); 12290 } 12291 12292 case BO_Rem: 12293 Combine = IntRange::rem; 12294 break; 12295 12296 // The default behavior is okay for these. 12297 case BO_Xor: 12298 case BO_Or: 12299 break; 12300 } 12301 12302 // Combine the two ranges, but limit the result to the type in which we 12303 // performed the computation. 12304 QualType T = GetExprType(E); 12305 unsigned opWidth = C.getIntWidth(T); 12306 IntRange L = 12307 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 12308 IntRange R = 12309 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 12310 IntRange C = Combine(L, R); 12311 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 12312 C.Width = std::min(C.Width, MaxWidth); 12313 return C; 12314 } 12315 12316 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 12317 switch (UO->getOpcode()) { 12318 // Boolean-valued operations are white-listed. 12319 case UO_LNot: 12320 return IntRange::forBoolType(); 12321 12322 // Operations with opaque sources are black-listed. 12323 case UO_Deref: 12324 case UO_AddrOf: // should be impossible 12325 return IntRange::forValueOfType(C, GetExprType(E)); 12326 12327 default: 12328 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 12329 Approximate); 12330 } 12331 } 12332 12333 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 12334 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 12335 Approximate); 12336 12337 if (const auto *BitField = E->getSourceBitField()) 12338 return IntRange(BitField->getBitWidthValue(C), 12339 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 12340 12341 return IntRange::forValueOfType(C, GetExprType(E)); 12342 } 12343 12344 static IntRange GetExprRange(ASTContext &C, const Expr *E, 12345 bool InConstantContext, bool Approximate) { 12346 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 12347 Approximate); 12348 } 12349 12350 /// Checks whether the given value, which currently has the given 12351 /// source semantics, has the same value when coerced through the 12352 /// target semantics. 12353 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 12354 const llvm::fltSemantics &Src, 12355 const llvm::fltSemantics &Tgt) { 12356 llvm::APFloat truncated = value; 12357 12358 bool ignored; 12359 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 12360 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 12361 12362 return truncated.bitwiseIsEqual(value); 12363 } 12364 12365 /// Checks whether the given value, which currently has the given 12366 /// source semantics, has the same value when coerced through the 12367 /// target semantics. 12368 /// 12369 /// The value might be a vector of floats (or a complex number). 12370 static bool IsSameFloatAfterCast(const APValue &value, 12371 const llvm::fltSemantics &Src, 12372 const llvm::fltSemantics &Tgt) { 12373 if (value.isFloat()) 12374 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 12375 12376 if (value.isVector()) { 12377 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 12378 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 12379 return false; 12380 return true; 12381 } 12382 12383 assert(value.isComplexFloat()); 12384 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 12385 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 12386 } 12387 12388 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 12389 bool IsListInit = false); 12390 12391 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 12392 // Suppress cases where we are comparing against an enum constant. 12393 if (const DeclRefExpr *DR = 12394 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 12395 if (isa<EnumConstantDecl>(DR->getDecl())) 12396 return true; 12397 12398 // Suppress cases where the value is expanded from a macro, unless that macro 12399 // is how a language represents a boolean literal. This is the case in both C 12400 // and Objective-C. 12401 SourceLocation BeginLoc = E->getBeginLoc(); 12402 if (BeginLoc.isMacroID()) { 12403 StringRef MacroName = Lexer::getImmediateMacroName( 12404 BeginLoc, S.getSourceManager(), S.getLangOpts()); 12405 return MacroName != "YES" && MacroName != "NO" && 12406 MacroName != "true" && MacroName != "false"; 12407 } 12408 12409 return false; 12410 } 12411 12412 static bool isKnownToHaveUnsignedValue(Expr *E) { 12413 return E->getType()->isIntegerType() && 12414 (!E->getType()->isSignedIntegerType() || 12415 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 12416 } 12417 12418 namespace { 12419 /// The promoted range of values of a type. In general this has the 12420 /// following structure: 12421 /// 12422 /// |-----------| . . . |-----------| 12423 /// ^ ^ ^ ^ 12424 /// Min HoleMin HoleMax Max 12425 /// 12426 /// ... where there is only a hole if a signed type is promoted to unsigned 12427 /// (in which case Min and Max are the smallest and largest representable 12428 /// values). 12429 struct PromotedRange { 12430 // Min, or HoleMax if there is a hole. 12431 llvm::APSInt PromotedMin; 12432 // Max, or HoleMin if there is a hole. 12433 llvm::APSInt PromotedMax; 12434 12435 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 12436 if (R.Width == 0) 12437 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 12438 else if (R.Width >= BitWidth && !Unsigned) { 12439 // Promotion made the type *narrower*. This happens when promoting 12440 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 12441 // Treat all values of 'signed int' as being in range for now. 12442 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 12443 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 12444 } else { 12445 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 12446 .extOrTrunc(BitWidth); 12447 PromotedMin.setIsUnsigned(Unsigned); 12448 12449 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 12450 .extOrTrunc(BitWidth); 12451 PromotedMax.setIsUnsigned(Unsigned); 12452 } 12453 } 12454 12455 // Determine whether this range is contiguous (has no hole). 12456 bool isContiguous() const { return PromotedMin <= PromotedMax; } 12457 12458 // Where a constant value is within the range. 12459 enum ComparisonResult { 12460 LT = 0x1, 12461 LE = 0x2, 12462 GT = 0x4, 12463 GE = 0x8, 12464 EQ = 0x10, 12465 NE = 0x20, 12466 InRangeFlag = 0x40, 12467 12468 Less = LE | LT | NE, 12469 Min = LE | InRangeFlag, 12470 InRange = InRangeFlag, 12471 Max = GE | InRangeFlag, 12472 Greater = GE | GT | NE, 12473 12474 OnlyValue = LE | GE | EQ | InRangeFlag, 12475 InHole = NE 12476 }; 12477 12478 ComparisonResult compare(const llvm::APSInt &Value) const { 12479 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 12480 Value.isUnsigned() == PromotedMin.isUnsigned()); 12481 if (!isContiguous()) { 12482 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 12483 if (Value.isMinValue()) return Min; 12484 if (Value.isMaxValue()) return Max; 12485 if (Value >= PromotedMin) return InRange; 12486 if (Value <= PromotedMax) return InRange; 12487 return InHole; 12488 } 12489 12490 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 12491 case -1: return Less; 12492 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 12493 case 1: 12494 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 12495 case -1: return InRange; 12496 case 0: return Max; 12497 case 1: return Greater; 12498 } 12499 } 12500 12501 llvm_unreachable("impossible compare result"); 12502 } 12503 12504 static llvm::Optional<StringRef> 12505 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 12506 if (Op == BO_Cmp) { 12507 ComparisonResult LTFlag = LT, GTFlag = GT; 12508 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 12509 12510 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 12511 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 12512 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 12513 return llvm::None; 12514 } 12515 12516 ComparisonResult TrueFlag, FalseFlag; 12517 if (Op == BO_EQ) { 12518 TrueFlag = EQ; 12519 FalseFlag = NE; 12520 } else if (Op == BO_NE) { 12521 TrueFlag = NE; 12522 FalseFlag = EQ; 12523 } else { 12524 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 12525 TrueFlag = LT; 12526 FalseFlag = GE; 12527 } else { 12528 TrueFlag = GT; 12529 FalseFlag = LE; 12530 } 12531 if (Op == BO_GE || Op == BO_LE) 12532 std::swap(TrueFlag, FalseFlag); 12533 } 12534 if (R & TrueFlag) 12535 return StringRef("true"); 12536 if (R & FalseFlag) 12537 return StringRef("false"); 12538 return llvm::None; 12539 } 12540 }; 12541 } 12542 12543 static bool HasEnumType(Expr *E) { 12544 // Strip off implicit integral promotions. 12545 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 12546 if (ICE->getCastKind() != CK_IntegralCast && 12547 ICE->getCastKind() != CK_NoOp) 12548 break; 12549 E = ICE->getSubExpr(); 12550 } 12551 12552 return E->getType()->isEnumeralType(); 12553 } 12554 12555 static int classifyConstantValue(Expr *Constant) { 12556 // The values of this enumeration are used in the diagnostics 12557 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 12558 enum ConstantValueKind { 12559 Miscellaneous = 0, 12560 LiteralTrue, 12561 LiteralFalse 12562 }; 12563 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 12564 return BL->getValue() ? ConstantValueKind::LiteralTrue 12565 : ConstantValueKind::LiteralFalse; 12566 return ConstantValueKind::Miscellaneous; 12567 } 12568 12569 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 12570 Expr *Constant, Expr *Other, 12571 const llvm::APSInt &Value, 12572 bool RhsConstant) { 12573 if (S.inTemplateInstantiation()) 12574 return false; 12575 12576 Expr *OriginalOther = Other; 12577 12578 Constant = Constant->IgnoreParenImpCasts(); 12579 Other = Other->IgnoreParenImpCasts(); 12580 12581 // Suppress warnings on tautological comparisons between values of the same 12582 // enumeration type. There are only two ways we could warn on this: 12583 // - If the constant is outside the range of representable values of 12584 // the enumeration. In such a case, we should warn about the cast 12585 // to enumeration type, not about the comparison. 12586 // - If the constant is the maximum / minimum in-range value. For an 12587 // enumeratin type, such comparisons can be meaningful and useful. 12588 if (Constant->getType()->isEnumeralType() && 12589 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 12590 return false; 12591 12592 IntRange OtherValueRange = GetExprRange( 12593 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 12594 12595 QualType OtherT = Other->getType(); 12596 if (const auto *AT = OtherT->getAs<AtomicType>()) 12597 OtherT = AT->getValueType(); 12598 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 12599 12600 // Special case for ObjC BOOL on targets where its a typedef for a signed char 12601 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 12602 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 12603 S.NSAPIObj->isObjCBOOLType(OtherT) && 12604 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 12605 12606 // Whether we're treating Other as being a bool because of the form of 12607 // expression despite it having another type (typically 'int' in C). 12608 bool OtherIsBooleanDespiteType = 12609 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 12610 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 12611 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 12612 12613 // Check if all values in the range of possible values of this expression 12614 // lead to the same comparison outcome. 12615 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 12616 Value.isUnsigned()); 12617 auto Cmp = OtherPromotedValueRange.compare(Value); 12618 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 12619 if (!Result) 12620 return false; 12621 12622 // Also consider the range determined by the type alone. This allows us to 12623 // classify the warning under the proper diagnostic group. 12624 bool TautologicalTypeCompare = false; 12625 { 12626 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 12627 Value.isUnsigned()); 12628 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 12629 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 12630 RhsConstant)) { 12631 TautologicalTypeCompare = true; 12632 Cmp = TypeCmp; 12633 Result = TypeResult; 12634 } 12635 } 12636 12637 // Don't warn if the non-constant operand actually always evaluates to the 12638 // same value. 12639 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 12640 return false; 12641 12642 // Suppress the diagnostic for an in-range comparison if the constant comes 12643 // from a macro or enumerator. We don't want to diagnose 12644 // 12645 // some_long_value <= INT_MAX 12646 // 12647 // when sizeof(int) == sizeof(long). 12648 bool InRange = Cmp & PromotedRange::InRangeFlag; 12649 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 12650 return false; 12651 12652 // A comparison of an unsigned bit-field against 0 is really a type problem, 12653 // even though at the type level the bit-field might promote to 'signed int'. 12654 if (Other->refersToBitField() && InRange && Value == 0 && 12655 Other->getType()->isUnsignedIntegerOrEnumerationType()) 12656 TautologicalTypeCompare = true; 12657 12658 // If this is a comparison to an enum constant, include that 12659 // constant in the diagnostic. 12660 const EnumConstantDecl *ED = nullptr; 12661 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 12662 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 12663 12664 // Should be enough for uint128 (39 decimal digits) 12665 SmallString<64> PrettySourceValue; 12666 llvm::raw_svector_ostream OS(PrettySourceValue); 12667 if (ED) { 12668 OS << '\'' << *ED << "' (" << Value << ")"; 12669 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 12670 Constant->IgnoreParenImpCasts())) { 12671 OS << (BL->getValue() ? "YES" : "NO"); 12672 } else { 12673 OS << Value; 12674 } 12675 12676 if (!TautologicalTypeCompare) { 12677 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 12678 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 12679 << E->getOpcodeStr() << OS.str() << *Result 12680 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12681 return true; 12682 } 12683 12684 if (IsObjCSignedCharBool) { 12685 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12686 S.PDiag(diag::warn_tautological_compare_objc_bool) 12687 << OS.str() << *Result); 12688 return true; 12689 } 12690 12691 // FIXME: We use a somewhat different formatting for the in-range cases and 12692 // cases involving boolean values for historical reasons. We should pick a 12693 // consistent way of presenting these diagnostics. 12694 if (!InRange || Other->isKnownToHaveBooleanValue()) { 12695 12696 S.DiagRuntimeBehavior( 12697 E->getOperatorLoc(), E, 12698 S.PDiag(!InRange ? diag::warn_out_of_range_compare 12699 : diag::warn_tautological_bool_compare) 12700 << OS.str() << classifyConstantValue(Constant) << OtherT 12701 << OtherIsBooleanDespiteType << *Result 12702 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 12703 } else { 12704 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 12705 unsigned Diag = 12706 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 12707 ? (HasEnumType(OriginalOther) 12708 ? diag::warn_unsigned_enum_always_true_comparison 12709 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 12710 : diag::warn_unsigned_always_true_comparison) 12711 : diag::warn_tautological_constant_compare; 12712 12713 S.Diag(E->getOperatorLoc(), Diag) 12714 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 12715 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12716 } 12717 12718 return true; 12719 } 12720 12721 /// Analyze the operands of the given comparison. Implements the 12722 /// fallback case from AnalyzeComparison. 12723 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 12724 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12725 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12726 } 12727 12728 /// Implements -Wsign-compare. 12729 /// 12730 /// \param E the binary operator to check for warnings 12731 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 12732 // The type the comparison is being performed in. 12733 QualType T = E->getLHS()->getType(); 12734 12735 // Only analyze comparison operators where both sides have been converted to 12736 // the same type. 12737 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 12738 return AnalyzeImpConvsInComparison(S, E); 12739 12740 // Don't analyze value-dependent comparisons directly. 12741 if (E->isValueDependent()) 12742 return AnalyzeImpConvsInComparison(S, E); 12743 12744 Expr *LHS = E->getLHS(); 12745 Expr *RHS = E->getRHS(); 12746 12747 if (T->isIntegralType(S.Context)) { 12748 Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context); 12749 Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context); 12750 12751 // We don't care about expressions whose result is a constant. 12752 if (RHSValue && LHSValue) 12753 return AnalyzeImpConvsInComparison(S, E); 12754 12755 // We only care about expressions where just one side is literal 12756 if ((bool)RHSValue ^ (bool)LHSValue) { 12757 // Is the constant on the RHS or LHS? 12758 const bool RhsConstant = (bool)RHSValue; 12759 Expr *Const = RhsConstant ? RHS : LHS; 12760 Expr *Other = RhsConstant ? LHS : RHS; 12761 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 12762 12763 // Check whether an integer constant comparison results in a value 12764 // of 'true' or 'false'. 12765 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 12766 return AnalyzeImpConvsInComparison(S, E); 12767 } 12768 } 12769 12770 if (!T->hasUnsignedIntegerRepresentation()) { 12771 // We don't do anything special if this isn't an unsigned integral 12772 // comparison: we're only interested in integral comparisons, and 12773 // signed comparisons only happen in cases we don't care to warn about. 12774 return AnalyzeImpConvsInComparison(S, E); 12775 } 12776 12777 LHS = LHS->IgnoreParenImpCasts(); 12778 RHS = RHS->IgnoreParenImpCasts(); 12779 12780 if (!S.getLangOpts().CPlusPlus) { 12781 // Avoid warning about comparison of integers with different signs when 12782 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 12783 // the type of `E`. 12784 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 12785 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12786 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 12787 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12788 } 12789 12790 // Check to see if one of the (unmodified) operands is of different 12791 // signedness. 12792 Expr *signedOperand, *unsignedOperand; 12793 if (LHS->getType()->hasSignedIntegerRepresentation()) { 12794 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 12795 "unsigned comparison between two signed integer expressions?"); 12796 signedOperand = LHS; 12797 unsignedOperand = RHS; 12798 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 12799 signedOperand = RHS; 12800 unsignedOperand = LHS; 12801 } else { 12802 return AnalyzeImpConvsInComparison(S, E); 12803 } 12804 12805 // Otherwise, calculate the effective range of the signed operand. 12806 IntRange signedRange = GetExprRange( 12807 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 12808 12809 // Go ahead and analyze implicit conversions in the operands. Note 12810 // that we skip the implicit conversions on both sides. 12811 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 12812 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 12813 12814 // If the signed range is non-negative, -Wsign-compare won't fire. 12815 if (signedRange.NonNegative) 12816 return; 12817 12818 // For (in)equality comparisons, if the unsigned operand is a 12819 // constant which cannot collide with a overflowed signed operand, 12820 // then reinterpreting the signed operand as unsigned will not 12821 // change the result of the comparison. 12822 if (E->isEqualityOp()) { 12823 unsigned comparisonWidth = S.Context.getIntWidth(T); 12824 IntRange unsignedRange = 12825 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 12826 /*Approximate*/ true); 12827 12828 // We should never be unable to prove that the unsigned operand is 12829 // non-negative. 12830 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 12831 12832 if (unsignedRange.Width < comparisonWidth) 12833 return; 12834 } 12835 12836 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12837 S.PDiag(diag::warn_mixed_sign_comparison) 12838 << LHS->getType() << RHS->getType() 12839 << LHS->getSourceRange() << RHS->getSourceRange()); 12840 } 12841 12842 /// Analyzes an attempt to assign the given value to a bitfield. 12843 /// 12844 /// Returns true if there was something fishy about the attempt. 12845 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 12846 SourceLocation InitLoc) { 12847 assert(Bitfield->isBitField()); 12848 if (Bitfield->isInvalidDecl()) 12849 return false; 12850 12851 // White-list bool bitfields. 12852 QualType BitfieldType = Bitfield->getType(); 12853 if (BitfieldType->isBooleanType()) 12854 return false; 12855 12856 if (BitfieldType->isEnumeralType()) { 12857 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 12858 // If the underlying enum type was not explicitly specified as an unsigned 12859 // type and the enum contain only positive values, MSVC++ will cause an 12860 // inconsistency by storing this as a signed type. 12861 if (S.getLangOpts().CPlusPlus11 && 12862 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 12863 BitfieldEnumDecl->getNumPositiveBits() > 0 && 12864 BitfieldEnumDecl->getNumNegativeBits() == 0) { 12865 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 12866 << BitfieldEnumDecl; 12867 } 12868 } 12869 12870 if (Bitfield->getType()->isBooleanType()) 12871 return false; 12872 12873 // Ignore value- or type-dependent expressions. 12874 if (Bitfield->getBitWidth()->isValueDependent() || 12875 Bitfield->getBitWidth()->isTypeDependent() || 12876 Init->isValueDependent() || 12877 Init->isTypeDependent()) 12878 return false; 12879 12880 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 12881 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 12882 12883 Expr::EvalResult Result; 12884 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 12885 Expr::SE_AllowSideEffects)) { 12886 // The RHS is not constant. If the RHS has an enum type, make sure the 12887 // bitfield is wide enough to hold all the values of the enum without 12888 // truncation. 12889 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 12890 EnumDecl *ED = EnumTy->getDecl(); 12891 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 12892 12893 // Enum types are implicitly signed on Windows, so check if there are any 12894 // negative enumerators to see if the enum was intended to be signed or 12895 // not. 12896 bool SignedEnum = ED->getNumNegativeBits() > 0; 12897 12898 // Check for surprising sign changes when assigning enum values to a 12899 // bitfield of different signedness. If the bitfield is signed and we 12900 // have exactly the right number of bits to store this unsigned enum, 12901 // suggest changing the enum to an unsigned type. This typically happens 12902 // on Windows where unfixed enums always use an underlying type of 'int'. 12903 unsigned DiagID = 0; 12904 if (SignedEnum && !SignedBitfield) { 12905 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 12906 } else if (SignedBitfield && !SignedEnum && 12907 ED->getNumPositiveBits() == FieldWidth) { 12908 DiagID = diag::warn_signed_bitfield_enum_conversion; 12909 } 12910 12911 if (DiagID) { 12912 S.Diag(InitLoc, DiagID) << Bitfield << ED; 12913 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 12914 SourceRange TypeRange = 12915 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 12916 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 12917 << SignedEnum << TypeRange; 12918 } 12919 12920 // Compute the required bitwidth. If the enum has negative values, we need 12921 // one more bit than the normal number of positive bits to represent the 12922 // sign bit. 12923 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 12924 ED->getNumNegativeBits()) 12925 : ED->getNumPositiveBits(); 12926 12927 // Check the bitwidth. 12928 if (BitsNeeded > FieldWidth) { 12929 Expr *WidthExpr = Bitfield->getBitWidth(); 12930 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 12931 << Bitfield << ED; 12932 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 12933 << BitsNeeded << ED << WidthExpr->getSourceRange(); 12934 } 12935 } 12936 12937 return false; 12938 } 12939 12940 llvm::APSInt Value = Result.Val.getInt(); 12941 12942 unsigned OriginalWidth = Value.getBitWidth(); 12943 12944 if (!Value.isSigned() || Value.isNegative()) 12945 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 12946 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 12947 OriginalWidth = Value.getMinSignedBits(); 12948 12949 if (OriginalWidth <= FieldWidth) 12950 return false; 12951 12952 // Compute the value which the bitfield will contain. 12953 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 12954 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 12955 12956 // Check whether the stored value is equal to the original value. 12957 TruncatedValue = TruncatedValue.extend(OriginalWidth); 12958 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 12959 return false; 12960 12961 // Special-case bitfields of width 1: booleans are naturally 0/1, and 12962 // therefore don't strictly fit into a signed bitfield of width 1. 12963 if (FieldWidth == 1 && Value == 1) 12964 return false; 12965 12966 std::string PrettyValue = toString(Value, 10); 12967 std::string PrettyTrunc = toString(TruncatedValue, 10); 12968 12969 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 12970 << PrettyValue << PrettyTrunc << OriginalInit->getType() 12971 << Init->getSourceRange(); 12972 12973 return true; 12974 } 12975 12976 /// Analyze the given simple or compound assignment for warning-worthy 12977 /// operations. 12978 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 12979 // Just recurse on the LHS. 12980 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12981 12982 // We want to recurse on the RHS as normal unless we're assigning to 12983 // a bitfield. 12984 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 12985 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 12986 E->getOperatorLoc())) { 12987 // Recurse, ignoring any implicit conversions on the RHS. 12988 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 12989 E->getOperatorLoc()); 12990 } 12991 } 12992 12993 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12994 12995 // Diagnose implicitly sequentially-consistent atomic assignment. 12996 if (E->getLHS()->getType()->isAtomicType()) 12997 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 12998 } 12999 13000 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 13001 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 13002 SourceLocation CContext, unsigned diag, 13003 bool pruneControlFlow = false) { 13004 if (pruneControlFlow) { 13005 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13006 S.PDiag(diag) 13007 << SourceType << T << E->getSourceRange() 13008 << SourceRange(CContext)); 13009 return; 13010 } 13011 S.Diag(E->getExprLoc(), diag) 13012 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 13013 } 13014 13015 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 13016 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 13017 SourceLocation CContext, 13018 unsigned diag, bool pruneControlFlow = false) { 13019 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 13020 } 13021 13022 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 13023 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 13024 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 13025 } 13026 13027 static void adornObjCBoolConversionDiagWithTernaryFixit( 13028 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 13029 Expr *Ignored = SourceExpr->IgnoreImplicit(); 13030 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 13031 Ignored = OVE->getSourceExpr(); 13032 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 13033 isa<BinaryOperator>(Ignored) || 13034 isa<CXXOperatorCallExpr>(Ignored); 13035 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 13036 if (NeedsParens) 13037 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 13038 << FixItHint::CreateInsertion(EndLoc, ")"); 13039 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 13040 } 13041 13042 /// Diagnose an implicit cast from a floating point value to an integer value. 13043 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 13044 SourceLocation CContext) { 13045 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 13046 const bool PruneWarnings = S.inTemplateInstantiation(); 13047 13048 Expr *InnerE = E->IgnoreParenImpCasts(); 13049 // We also want to warn on, e.g., "int i = -1.234" 13050 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 13051 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 13052 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 13053 13054 const bool IsLiteral = 13055 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 13056 13057 llvm::APFloat Value(0.0); 13058 bool IsConstant = 13059 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 13060 if (!IsConstant) { 13061 if (isObjCSignedCharBool(S, T)) { 13062 return adornObjCBoolConversionDiagWithTernaryFixit( 13063 S, E, 13064 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 13065 << E->getType()); 13066 } 13067 13068 return DiagnoseImpCast(S, E, T, CContext, 13069 diag::warn_impcast_float_integer, PruneWarnings); 13070 } 13071 13072 bool isExact = false; 13073 13074 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 13075 T->hasUnsignedIntegerRepresentation()); 13076 llvm::APFloat::opStatus Result = Value.convertToInteger( 13077 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 13078 13079 // FIXME: Force the precision of the source value down so we don't print 13080 // digits which are usually useless (we don't really care here if we 13081 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 13082 // would automatically print the shortest representation, but it's a bit 13083 // tricky to implement. 13084 SmallString<16> PrettySourceValue; 13085 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 13086 precision = (precision * 59 + 195) / 196; 13087 Value.toString(PrettySourceValue, precision); 13088 13089 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 13090 return adornObjCBoolConversionDiagWithTernaryFixit( 13091 S, E, 13092 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 13093 << PrettySourceValue); 13094 } 13095 13096 if (Result == llvm::APFloat::opOK && isExact) { 13097 if (IsLiteral) return; 13098 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 13099 PruneWarnings); 13100 } 13101 13102 // Conversion of a floating-point value to a non-bool integer where the 13103 // integral part cannot be represented by the integer type is undefined. 13104 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 13105 return DiagnoseImpCast( 13106 S, E, T, CContext, 13107 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 13108 : diag::warn_impcast_float_to_integer_out_of_range, 13109 PruneWarnings); 13110 13111 unsigned DiagID = 0; 13112 if (IsLiteral) { 13113 // Warn on floating point literal to integer. 13114 DiagID = diag::warn_impcast_literal_float_to_integer; 13115 } else if (IntegerValue == 0) { 13116 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 13117 return DiagnoseImpCast(S, E, T, CContext, 13118 diag::warn_impcast_float_integer, PruneWarnings); 13119 } 13120 // Warn on non-zero to zero conversion. 13121 DiagID = diag::warn_impcast_float_to_integer_zero; 13122 } else { 13123 if (IntegerValue.isUnsigned()) { 13124 if (!IntegerValue.isMaxValue()) { 13125 return DiagnoseImpCast(S, E, T, CContext, 13126 diag::warn_impcast_float_integer, PruneWarnings); 13127 } 13128 } else { // IntegerValue.isSigned() 13129 if (!IntegerValue.isMaxSignedValue() && 13130 !IntegerValue.isMinSignedValue()) { 13131 return DiagnoseImpCast(S, E, T, CContext, 13132 diag::warn_impcast_float_integer, PruneWarnings); 13133 } 13134 } 13135 // Warn on evaluatable floating point expression to integer conversion. 13136 DiagID = diag::warn_impcast_float_to_integer; 13137 } 13138 13139 SmallString<16> PrettyTargetValue; 13140 if (IsBool) 13141 PrettyTargetValue = Value.isZero() ? "false" : "true"; 13142 else 13143 IntegerValue.toString(PrettyTargetValue); 13144 13145 if (PruneWarnings) { 13146 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13147 S.PDiag(DiagID) 13148 << E->getType() << T.getUnqualifiedType() 13149 << PrettySourceValue << PrettyTargetValue 13150 << E->getSourceRange() << SourceRange(CContext)); 13151 } else { 13152 S.Diag(E->getExprLoc(), DiagID) 13153 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 13154 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 13155 } 13156 } 13157 13158 /// Analyze the given compound assignment for the possible losing of 13159 /// floating-point precision. 13160 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 13161 assert(isa<CompoundAssignOperator>(E) && 13162 "Must be compound assignment operation"); 13163 // Recurse on the LHS and RHS in here 13164 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 13165 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 13166 13167 if (E->getLHS()->getType()->isAtomicType()) 13168 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 13169 13170 // Now check the outermost expression 13171 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 13172 const auto *RBT = cast<CompoundAssignOperator>(E) 13173 ->getComputationResultType() 13174 ->getAs<BuiltinType>(); 13175 13176 // The below checks assume source is floating point. 13177 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 13178 13179 // If source is floating point but target is an integer. 13180 if (ResultBT->isInteger()) 13181 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 13182 E->getExprLoc(), diag::warn_impcast_float_integer); 13183 13184 if (!ResultBT->isFloatingPoint()) 13185 return; 13186 13187 // If both source and target are floating points, warn about losing precision. 13188 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 13189 QualType(ResultBT, 0), QualType(RBT, 0)); 13190 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 13191 // warn about dropping FP rank. 13192 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 13193 diag::warn_impcast_float_result_precision); 13194 } 13195 13196 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 13197 IntRange Range) { 13198 if (!Range.Width) return "0"; 13199 13200 llvm::APSInt ValueInRange = Value; 13201 ValueInRange.setIsSigned(!Range.NonNegative); 13202 ValueInRange = ValueInRange.trunc(Range.Width); 13203 return toString(ValueInRange, 10); 13204 } 13205 13206 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 13207 if (!isa<ImplicitCastExpr>(Ex)) 13208 return false; 13209 13210 Expr *InnerE = Ex->IgnoreParenImpCasts(); 13211 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 13212 const Type *Source = 13213 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 13214 if (Target->isDependentType()) 13215 return false; 13216 13217 const BuiltinType *FloatCandidateBT = 13218 dyn_cast<BuiltinType>(ToBool ? Source : Target); 13219 const Type *BoolCandidateType = ToBool ? Target : Source; 13220 13221 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 13222 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 13223 } 13224 13225 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 13226 SourceLocation CC) { 13227 unsigned NumArgs = TheCall->getNumArgs(); 13228 for (unsigned i = 0; i < NumArgs; ++i) { 13229 Expr *CurrA = TheCall->getArg(i); 13230 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 13231 continue; 13232 13233 bool IsSwapped = ((i > 0) && 13234 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 13235 IsSwapped |= ((i < (NumArgs - 1)) && 13236 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 13237 if (IsSwapped) { 13238 // Warn on this floating-point to bool conversion. 13239 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 13240 CurrA->getType(), CC, 13241 diag::warn_impcast_floating_point_to_bool); 13242 } 13243 } 13244 } 13245 13246 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 13247 SourceLocation CC) { 13248 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 13249 E->getExprLoc())) 13250 return; 13251 13252 // Don't warn on functions which have return type nullptr_t. 13253 if (isa<CallExpr>(E)) 13254 return; 13255 13256 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 13257 const Expr::NullPointerConstantKind NullKind = 13258 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 13259 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 13260 return; 13261 13262 // Return if target type is a safe conversion. 13263 if (T->isAnyPointerType() || T->isBlockPointerType() || 13264 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 13265 return; 13266 13267 SourceLocation Loc = E->getSourceRange().getBegin(); 13268 13269 // Venture through the macro stacks to get to the source of macro arguments. 13270 // The new location is a better location than the complete location that was 13271 // passed in. 13272 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 13273 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 13274 13275 // __null is usually wrapped in a macro. Go up a macro if that is the case. 13276 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 13277 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 13278 Loc, S.SourceMgr, S.getLangOpts()); 13279 if (MacroName == "NULL") 13280 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 13281 } 13282 13283 // Only warn if the null and context location are in the same macro expansion. 13284 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 13285 return; 13286 13287 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 13288 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 13289 << FixItHint::CreateReplacement(Loc, 13290 S.getFixItZeroLiteralForType(T, Loc)); 13291 } 13292 13293 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 13294 ObjCArrayLiteral *ArrayLiteral); 13295 13296 static void 13297 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 13298 ObjCDictionaryLiteral *DictionaryLiteral); 13299 13300 /// Check a single element within a collection literal against the 13301 /// target element type. 13302 static void checkObjCCollectionLiteralElement(Sema &S, 13303 QualType TargetElementType, 13304 Expr *Element, 13305 unsigned ElementKind) { 13306 // Skip a bitcast to 'id' or qualified 'id'. 13307 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 13308 if (ICE->getCastKind() == CK_BitCast && 13309 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 13310 Element = ICE->getSubExpr(); 13311 } 13312 13313 QualType ElementType = Element->getType(); 13314 ExprResult ElementResult(Element); 13315 if (ElementType->getAs<ObjCObjectPointerType>() && 13316 S.CheckSingleAssignmentConstraints(TargetElementType, 13317 ElementResult, 13318 false, false) 13319 != Sema::Compatible) { 13320 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 13321 << ElementType << ElementKind << TargetElementType 13322 << Element->getSourceRange(); 13323 } 13324 13325 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 13326 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 13327 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 13328 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 13329 } 13330 13331 /// Check an Objective-C array literal being converted to the given 13332 /// target type. 13333 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 13334 ObjCArrayLiteral *ArrayLiteral) { 13335 if (!S.NSArrayDecl) 13336 return; 13337 13338 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 13339 if (!TargetObjCPtr) 13340 return; 13341 13342 if (TargetObjCPtr->isUnspecialized() || 13343 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 13344 != S.NSArrayDecl->getCanonicalDecl()) 13345 return; 13346 13347 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 13348 if (TypeArgs.size() != 1) 13349 return; 13350 13351 QualType TargetElementType = TypeArgs[0]; 13352 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 13353 checkObjCCollectionLiteralElement(S, TargetElementType, 13354 ArrayLiteral->getElement(I), 13355 0); 13356 } 13357 } 13358 13359 /// Check an Objective-C dictionary literal being converted to the given 13360 /// target type. 13361 static void 13362 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 13363 ObjCDictionaryLiteral *DictionaryLiteral) { 13364 if (!S.NSDictionaryDecl) 13365 return; 13366 13367 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 13368 if (!TargetObjCPtr) 13369 return; 13370 13371 if (TargetObjCPtr->isUnspecialized() || 13372 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 13373 != S.NSDictionaryDecl->getCanonicalDecl()) 13374 return; 13375 13376 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 13377 if (TypeArgs.size() != 2) 13378 return; 13379 13380 QualType TargetKeyType = TypeArgs[0]; 13381 QualType TargetObjectType = TypeArgs[1]; 13382 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 13383 auto Element = DictionaryLiteral->getKeyValueElement(I); 13384 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 13385 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 13386 } 13387 } 13388 13389 // Helper function to filter out cases for constant width constant conversion. 13390 // Don't warn on char array initialization or for non-decimal values. 13391 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 13392 SourceLocation CC) { 13393 // If initializing from a constant, and the constant starts with '0', 13394 // then it is a binary, octal, or hexadecimal. Allow these constants 13395 // to fill all the bits, even if there is a sign change. 13396 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 13397 const char FirstLiteralCharacter = 13398 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 13399 if (FirstLiteralCharacter == '0') 13400 return false; 13401 } 13402 13403 // If the CC location points to a '{', and the type is char, then assume 13404 // assume it is an array initialization. 13405 if (CC.isValid() && T->isCharType()) { 13406 const char FirstContextCharacter = 13407 S.getSourceManager().getCharacterData(CC)[0]; 13408 if (FirstContextCharacter == '{') 13409 return false; 13410 } 13411 13412 return true; 13413 } 13414 13415 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 13416 const auto *IL = dyn_cast<IntegerLiteral>(E); 13417 if (!IL) { 13418 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 13419 if (UO->getOpcode() == UO_Minus) 13420 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 13421 } 13422 } 13423 13424 return IL; 13425 } 13426 13427 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 13428 E = E->IgnoreParenImpCasts(); 13429 SourceLocation ExprLoc = E->getExprLoc(); 13430 13431 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 13432 BinaryOperator::Opcode Opc = BO->getOpcode(); 13433 Expr::EvalResult Result; 13434 // Do not diagnose unsigned shifts. 13435 if (Opc == BO_Shl) { 13436 const auto *LHS = getIntegerLiteral(BO->getLHS()); 13437 const auto *RHS = getIntegerLiteral(BO->getRHS()); 13438 if (LHS && LHS->getValue() == 0) 13439 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 13440 else if (!E->isValueDependent() && LHS && RHS && 13441 RHS->getValue().isNonNegative() && 13442 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 13443 S.Diag(ExprLoc, diag::warn_left_shift_always) 13444 << (Result.Val.getInt() != 0); 13445 else if (E->getType()->isSignedIntegerType()) 13446 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 13447 } 13448 } 13449 13450 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 13451 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 13452 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 13453 if (!LHS || !RHS) 13454 return; 13455 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 13456 (RHS->getValue() == 0 || RHS->getValue() == 1)) 13457 // Do not diagnose common idioms. 13458 return; 13459 if (LHS->getValue() != 0 && RHS->getValue() != 0) 13460 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 13461 } 13462 } 13463 13464 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 13465 SourceLocation CC, 13466 bool *ICContext = nullptr, 13467 bool IsListInit = false) { 13468 if (E->isTypeDependent() || E->isValueDependent()) return; 13469 13470 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 13471 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 13472 if (Source == Target) return; 13473 if (Target->isDependentType()) return; 13474 13475 // If the conversion context location is invalid don't complain. We also 13476 // don't want to emit a warning if the issue occurs from the expansion of 13477 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 13478 // delay this check as long as possible. Once we detect we are in that 13479 // scenario, we just return. 13480 if (CC.isInvalid()) 13481 return; 13482 13483 if (Source->isAtomicType()) 13484 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 13485 13486 // Diagnose implicit casts to bool. 13487 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 13488 if (isa<StringLiteral>(E)) 13489 // Warn on string literal to bool. Checks for string literals in logical 13490 // and expressions, for instance, assert(0 && "error here"), are 13491 // prevented by a check in AnalyzeImplicitConversions(). 13492 return DiagnoseImpCast(S, E, T, CC, 13493 diag::warn_impcast_string_literal_to_bool); 13494 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 13495 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 13496 // This covers the literal expressions that evaluate to Objective-C 13497 // objects. 13498 return DiagnoseImpCast(S, E, T, CC, 13499 diag::warn_impcast_objective_c_literal_to_bool); 13500 } 13501 if (Source->isPointerType() || Source->canDecayToPointerType()) { 13502 // Warn on pointer to bool conversion that is always true. 13503 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 13504 SourceRange(CC)); 13505 } 13506 } 13507 13508 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 13509 // is a typedef for signed char (macOS), then that constant value has to be 1 13510 // or 0. 13511 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 13512 Expr::EvalResult Result; 13513 if (E->EvaluateAsInt(Result, S.getASTContext(), 13514 Expr::SE_AllowSideEffects)) { 13515 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 13516 adornObjCBoolConversionDiagWithTernaryFixit( 13517 S, E, 13518 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 13519 << toString(Result.Val.getInt(), 10)); 13520 } 13521 return; 13522 } 13523 } 13524 13525 // Check implicit casts from Objective-C collection literals to specialized 13526 // collection types, e.g., NSArray<NSString *> *. 13527 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 13528 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 13529 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 13530 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 13531 13532 // Strip vector types. 13533 if (isa<VectorType>(Source)) { 13534 if (Target->isVLSTBuiltinType() && 13535 (S.Context.areCompatibleSveTypes(QualType(Target, 0), 13536 QualType(Source, 0)) || 13537 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), 13538 QualType(Source, 0)))) 13539 return; 13540 13541 if (!isa<VectorType>(Target)) { 13542 if (S.SourceMgr.isInSystemMacro(CC)) 13543 return; 13544 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 13545 } 13546 13547 // If the vector cast is cast between two vectors of the same size, it is 13548 // a bitcast, not a conversion. 13549 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 13550 return; 13551 13552 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 13553 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 13554 } 13555 if (auto VecTy = dyn_cast<VectorType>(Target)) 13556 Target = VecTy->getElementType().getTypePtr(); 13557 13558 // Strip complex types. 13559 if (isa<ComplexType>(Source)) { 13560 if (!isa<ComplexType>(Target)) { 13561 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 13562 return; 13563 13564 return DiagnoseImpCast(S, E, T, CC, 13565 S.getLangOpts().CPlusPlus 13566 ? diag::err_impcast_complex_scalar 13567 : diag::warn_impcast_complex_scalar); 13568 } 13569 13570 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 13571 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 13572 } 13573 13574 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 13575 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 13576 13577 // Strip SVE vector types 13578 if (SourceBT && SourceBT->isVLSTBuiltinType()) { 13579 // Need the original target type for vector type checks 13580 const Type *OriginalTarget = S.Context.getCanonicalType(T).getTypePtr(); 13581 // Handle conversion from scalable to fixed when msve-vector-bits is 13582 // specified 13583 if (S.Context.areCompatibleSveTypes(QualType(OriginalTarget, 0), 13584 QualType(Source, 0)) || 13585 S.Context.areLaxCompatibleSveTypes(QualType(OriginalTarget, 0), 13586 QualType(Source, 0))) 13587 return; 13588 13589 // If the vector cast is cast between two vectors of the same size, it is 13590 // a bitcast, not a conversion. 13591 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 13592 return; 13593 13594 Source = SourceBT->getSveEltType(S.Context).getTypePtr(); 13595 } 13596 13597 if (TargetBT && TargetBT->isVLSTBuiltinType()) 13598 Target = TargetBT->getSveEltType(S.Context).getTypePtr(); 13599 13600 // If the source is floating point... 13601 if (SourceBT && SourceBT->isFloatingPoint()) { 13602 // ...and the target is floating point... 13603 if (TargetBT && TargetBT->isFloatingPoint()) { 13604 // ...then warn if we're dropping FP rank. 13605 13606 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 13607 QualType(SourceBT, 0), QualType(TargetBT, 0)); 13608 if (Order > 0) { 13609 // Don't warn about float constants that are precisely 13610 // representable in the target type. 13611 Expr::EvalResult result; 13612 if (E->EvaluateAsRValue(result, S.Context)) { 13613 // Value might be a float, a float vector, or a float complex. 13614 if (IsSameFloatAfterCast(result.Val, 13615 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 13616 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 13617 return; 13618 } 13619 13620 if (S.SourceMgr.isInSystemMacro(CC)) 13621 return; 13622 13623 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 13624 } 13625 // ... or possibly if we're increasing rank, too 13626 else if (Order < 0) { 13627 if (S.SourceMgr.isInSystemMacro(CC)) 13628 return; 13629 13630 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 13631 } 13632 return; 13633 } 13634 13635 // If the target is integral, always warn. 13636 if (TargetBT && TargetBT->isInteger()) { 13637 if (S.SourceMgr.isInSystemMacro(CC)) 13638 return; 13639 13640 DiagnoseFloatingImpCast(S, E, T, CC); 13641 } 13642 13643 // Detect the case where a call result is converted from floating-point to 13644 // to bool, and the final argument to the call is converted from bool, to 13645 // discover this typo: 13646 // 13647 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 13648 // 13649 // FIXME: This is an incredibly special case; is there some more general 13650 // way to detect this class of misplaced-parentheses bug? 13651 if (Target->isBooleanType() && isa<CallExpr>(E)) { 13652 // Check last argument of function call to see if it is an 13653 // implicit cast from a type matching the type the result 13654 // is being cast to. 13655 CallExpr *CEx = cast<CallExpr>(E); 13656 if (unsigned NumArgs = CEx->getNumArgs()) { 13657 Expr *LastA = CEx->getArg(NumArgs - 1); 13658 Expr *InnerE = LastA->IgnoreParenImpCasts(); 13659 if (isa<ImplicitCastExpr>(LastA) && 13660 InnerE->getType()->isBooleanType()) { 13661 // Warn on this floating-point to bool conversion 13662 DiagnoseImpCast(S, E, T, CC, 13663 diag::warn_impcast_floating_point_to_bool); 13664 } 13665 } 13666 } 13667 return; 13668 } 13669 13670 // Valid casts involving fixed point types should be accounted for here. 13671 if (Source->isFixedPointType()) { 13672 if (Target->isUnsaturatedFixedPointType()) { 13673 Expr::EvalResult Result; 13674 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 13675 S.isConstantEvaluated())) { 13676 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 13677 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 13678 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 13679 if (Value > MaxVal || Value < MinVal) { 13680 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13681 S.PDiag(diag::warn_impcast_fixed_point_range) 13682 << Value.toString() << T 13683 << E->getSourceRange() 13684 << clang::SourceRange(CC)); 13685 return; 13686 } 13687 } 13688 } else if (Target->isIntegerType()) { 13689 Expr::EvalResult Result; 13690 if (!S.isConstantEvaluated() && 13691 E->EvaluateAsFixedPoint(Result, S.Context, 13692 Expr::SE_AllowSideEffects)) { 13693 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 13694 13695 bool Overflowed; 13696 llvm::APSInt IntResult = FXResult.convertToInt( 13697 S.Context.getIntWidth(T), 13698 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 13699 13700 if (Overflowed) { 13701 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13702 S.PDiag(diag::warn_impcast_fixed_point_range) 13703 << FXResult.toString() << T 13704 << E->getSourceRange() 13705 << clang::SourceRange(CC)); 13706 return; 13707 } 13708 } 13709 } 13710 } else if (Target->isUnsaturatedFixedPointType()) { 13711 if (Source->isIntegerType()) { 13712 Expr::EvalResult Result; 13713 if (!S.isConstantEvaluated() && 13714 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 13715 llvm::APSInt Value = Result.Val.getInt(); 13716 13717 bool Overflowed; 13718 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 13719 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 13720 13721 if (Overflowed) { 13722 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13723 S.PDiag(diag::warn_impcast_fixed_point_range) 13724 << toString(Value, /*Radix=*/10) << T 13725 << E->getSourceRange() 13726 << clang::SourceRange(CC)); 13727 return; 13728 } 13729 } 13730 } 13731 } 13732 13733 // If we are casting an integer type to a floating point type without 13734 // initialization-list syntax, we might lose accuracy if the floating 13735 // point type has a narrower significand than the integer type. 13736 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 13737 TargetBT->isFloatingType() && !IsListInit) { 13738 // Determine the number of precision bits in the source integer type. 13739 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 13740 /*Approximate*/ true); 13741 unsigned int SourcePrecision = SourceRange.Width; 13742 13743 // Determine the number of precision bits in the 13744 // target floating point type. 13745 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 13746 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13747 13748 if (SourcePrecision > 0 && TargetPrecision > 0 && 13749 SourcePrecision > TargetPrecision) { 13750 13751 if (Optional<llvm::APSInt> SourceInt = 13752 E->getIntegerConstantExpr(S.Context)) { 13753 // If the source integer is a constant, convert it to the target 13754 // floating point type. Issue a warning if the value changes 13755 // during the whole conversion. 13756 llvm::APFloat TargetFloatValue( 13757 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13758 llvm::APFloat::opStatus ConversionStatus = 13759 TargetFloatValue.convertFromAPInt( 13760 *SourceInt, SourceBT->isSignedInteger(), 13761 llvm::APFloat::rmNearestTiesToEven); 13762 13763 if (ConversionStatus != llvm::APFloat::opOK) { 13764 SmallString<32> PrettySourceValue; 13765 SourceInt->toString(PrettySourceValue, 10); 13766 SmallString<32> PrettyTargetValue; 13767 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 13768 13769 S.DiagRuntimeBehavior( 13770 E->getExprLoc(), E, 13771 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 13772 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13773 << E->getSourceRange() << clang::SourceRange(CC)); 13774 } 13775 } else { 13776 // Otherwise, the implicit conversion may lose precision. 13777 DiagnoseImpCast(S, E, T, CC, 13778 diag::warn_impcast_integer_float_precision); 13779 } 13780 } 13781 } 13782 13783 DiagnoseNullConversion(S, E, T, CC); 13784 13785 S.DiscardMisalignedMemberAddress(Target, E); 13786 13787 if (Target->isBooleanType()) 13788 DiagnoseIntInBoolContext(S, E); 13789 13790 if (!Source->isIntegerType() || !Target->isIntegerType()) 13791 return; 13792 13793 // TODO: remove this early return once the false positives for constant->bool 13794 // in templates, macros, etc, are reduced or removed. 13795 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 13796 return; 13797 13798 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 13799 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 13800 return adornObjCBoolConversionDiagWithTernaryFixit( 13801 S, E, 13802 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 13803 << E->getType()); 13804 } 13805 13806 IntRange SourceTypeRange = 13807 IntRange::forTargetOfCanonicalType(S.Context, Source); 13808 IntRange LikelySourceRange = 13809 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 13810 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 13811 13812 if (LikelySourceRange.Width > TargetRange.Width) { 13813 // If the source is a constant, use a default-on diagnostic. 13814 // TODO: this should happen for bitfield stores, too. 13815 Expr::EvalResult Result; 13816 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 13817 S.isConstantEvaluated())) { 13818 llvm::APSInt Value(32); 13819 Value = Result.Val.getInt(); 13820 13821 if (S.SourceMgr.isInSystemMacro(CC)) 13822 return; 13823 13824 std::string PrettySourceValue = toString(Value, 10); 13825 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13826 13827 S.DiagRuntimeBehavior( 13828 E->getExprLoc(), E, 13829 S.PDiag(diag::warn_impcast_integer_precision_constant) 13830 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13831 << E->getSourceRange() << SourceRange(CC)); 13832 return; 13833 } 13834 13835 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 13836 if (S.SourceMgr.isInSystemMacro(CC)) 13837 return; 13838 13839 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 13840 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 13841 /* pruneControlFlow */ true); 13842 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 13843 } 13844 13845 if (TargetRange.Width > SourceTypeRange.Width) { 13846 if (auto *UO = dyn_cast<UnaryOperator>(E)) 13847 if (UO->getOpcode() == UO_Minus) 13848 if (Source->isUnsignedIntegerType()) { 13849 if (Target->isUnsignedIntegerType()) 13850 return DiagnoseImpCast(S, E, T, CC, 13851 diag::warn_impcast_high_order_zero_bits); 13852 if (Target->isSignedIntegerType()) 13853 return DiagnoseImpCast(S, E, T, CC, 13854 diag::warn_impcast_nonnegative_result); 13855 } 13856 } 13857 13858 if (TargetRange.Width == LikelySourceRange.Width && 13859 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 13860 Source->isSignedIntegerType()) { 13861 // Warn when doing a signed to signed conversion, warn if the positive 13862 // source value is exactly the width of the target type, which will 13863 // cause a negative value to be stored. 13864 13865 Expr::EvalResult Result; 13866 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 13867 !S.SourceMgr.isInSystemMacro(CC)) { 13868 llvm::APSInt Value = Result.Val.getInt(); 13869 if (isSameWidthConstantConversion(S, E, T, CC)) { 13870 std::string PrettySourceValue = toString(Value, 10); 13871 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13872 13873 S.DiagRuntimeBehavior( 13874 E->getExprLoc(), E, 13875 S.PDiag(diag::warn_impcast_integer_precision_constant) 13876 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13877 << E->getSourceRange() << SourceRange(CC)); 13878 return; 13879 } 13880 } 13881 13882 // Fall through for non-constants to give a sign conversion warning. 13883 } 13884 13885 if ((!isa<EnumType>(Target) || !isa<EnumType>(Source)) && 13886 ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 13887 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 13888 LikelySourceRange.Width == TargetRange.Width))) { 13889 if (S.SourceMgr.isInSystemMacro(CC)) 13890 return; 13891 13892 unsigned DiagID = diag::warn_impcast_integer_sign; 13893 13894 // Traditionally, gcc has warned about this under -Wsign-compare. 13895 // We also want to warn about it in -Wconversion. 13896 // So if -Wconversion is off, use a completely identical diagnostic 13897 // in the sign-compare group. 13898 // The conditional-checking code will 13899 if (ICContext) { 13900 DiagID = diag::warn_impcast_integer_sign_conditional; 13901 *ICContext = true; 13902 } 13903 13904 return DiagnoseImpCast(S, E, T, CC, DiagID); 13905 } 13906 13907 // Diagnose conversions between different enumeration types. 13908 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 13909 // type, to give us better diagnostics. 13910 QualType SourceType = E->getType(); 13911 if (!S.getLangOpts().CPlusPlus) { 13912 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 13913 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 13914 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 13915 SourceType = S.Context.getTypeDeclType(Enum); 13916 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 13917 } 13918 } 13919 13920 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 13921 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 13922 if (SourceEnum->getDecl()->hasNameForLinkage() && 13923 TargetEnum->getDecl()->hasNameForLinkage() && 13924 SourceEnum != TargetEnum) { 13925 if (S.SourceMgr.isInSystemMacro(CC)) 13926 return; 13927 13928 return DiagnoseImpCast(S, E, SourceType, T, CC, 13929 diag::warn_impcast_different_enum_types); 13930 } 13931 } 13932 13933 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13934 SourceLocation CC, QualType T); 13935 13936 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 13937 SourceLocation CC, bool &ICContext) { 13938 E = E->IgnoreParenImpCasts(); 13939 13940 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 13941 return CheckConditionalOperator(S, CO, CC, T); 13942 13943 AnalyzeImplicitConversions(S, E, CC); 13944 if (E->getType() != T) 13945 return CheckImplicitConversion(S, E, T, CC, &ICContext); 13946 } 13947 13948 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13949 SourceLocation CC, QualType T) { 13950 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 13951 13952 Expr *TrueExpr = E->getTrueExpr(); 13953 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 13954 TrueExpr = BCO->getCommon(); 13955 13956 bool Suspicious = false; 13957 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 13958 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 13959 13960 if (T->isBooleanType()) 13961 DiagnoseIntInBoolContext(S, E); 13962 13963 // If -Wconversion would have warned about either of the candidates 13964 // for a signedness conversion to the context type... 13965 if (!Suspicious) return; 13966 13967 // ...but it's currently ignored... 13968 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 13969 return; 13970 13971 // ...then check whether it would have warned about either of the 13972 // candidates for a signedness conversion to the condition type. 13973 if (E->getType() == T) return; 13974 13975 Suspicious = false; 13976 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 13977 E->getType(), CC, &Suspicious); 13978 if (!Suspicious) 13979 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 13980 E->getType(), CC, &Suspicious); 13981 } 13982 13983 /// Check conversion of given expression to boolean. 13984 /// Input argument E is a logical expression. 13985 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 13986 if (S.getLangOpts().Bool) 13987 return; 13988 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 13989 return; 13990 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 13991 } 13992 13993 namespace { 13994 struct AnalyzeImplicitConversionsWorkItem { 13995 Expr *E; 13996 SourceLocation CC; 13997 bool IsListInit; 13998 }; 13999 } 14000 14001 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 14002 /// that should be visited are added to WorkList. 14003 static void AnalyzeImplicitConversions( 14004 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 14005 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 14006 Expr *OrigE = Item.E; 14007 SourceLocation CC = Item.CC; 14008 14009 QualType T = OrigE->getType(); 14010 Expr *E = OrigE->IgnoreParenImpCasts(); 14011 14012 // Propagate whether we are in a C++ list initialization expression. 14013 // If so, we do not issue warnings for implicit int-float conversion 14014 // precision loss, because C++11 narrowing already handles it. 14015 bool IsListInit = Item.IsListInit || 14016 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 14017 14018 if (E->isTypeDependent() || E->isValueDependent()) 14019 return; 14020 14021 Expr *SourceExpr = E; 14022 // Examine, but don't traverse into the source expression of an 14023 // OpaqueValueExpr, since it may have multiple parents and we don't want to 14024 // emit duplicate diagnostics. Its fine to examine the form or attempt to 14025 // evaluate it in the context of checking the specific conversion to T though. 14026 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 14027 if (auto *Src = OVE->getSourceExpr()) 14028 SourceExpr = Src; 14029 14030 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 14031 if (UO->getOpcode() == UO_Not && 14032 UO->getSubExpr()->isKnownToHaveBooleanValue()) 14033 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 14034 << OrigE->getSourceRange() << T->isBooleanType() 14035 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 14036 14037 if (const auto *BO = dyn_cast<BinaryOperator>(SourceExpr)) 14038 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) && 14039 BO->getLHS()->isKnownToHaveBooleanValue() && 14040 BO->getRHS()->isKnownToHaveBooleanValue() && 14041 BO->getLHS()->HasSideEffects(S.Context) && 14042 BO->getRHS()->HasSideEffects(S.Context)) { 14043 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical) 14044 << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange() 14045 << FixItHint::CreateReplacement( 14046 BO->getOperatorLoc(), 14047 (BO->getOpcode() == BO_And ? "&&" : "||")); 14048 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int); 14049 } 14050 14051 // For conditional operators, we analyze the arguments as if they 14052 // were being fed directly into the output. 14053 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 14054 CheckConditionalOperator(S, CO, CC, T); 14055 return; 14056 } 14057 14058 // Check implicit argument conversions for function calls. 14059 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 14060 CheckImplicitArgumentConversions(S, Call, CC); 14061 14062 // Go ahead and check any implicit conversions we might have skipped. 14063 // The non-canonical typecheck is just an optimization; 14064 // CheckImplicitConversion will filter out dead implicit conversions. 14065 if (SourceExpr->getType() != T) 14066 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 14067 14068 // Now continue drilling into this expression. 14069 14070 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 14071 // The bound subexpressions in a PseudoObjectExpr are not reachable 14072 // as transitive children. 14073 // FIXME: Use a more uniform representation for this. 14074 for (auto *SE : POE->semantics()) 14075 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 14076 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 14077 } 14078 14079 // Skip past explicit casts. 14080 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 14081 E = CE->getSubExpr()->IgnoreParenImpCasts(); 14082 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 14083 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 14084 WorkList.push_back({E, CC, IsListInit}); 14085 return; 14086 } 14087 14088 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 14089 // Do a somewhat different check with comparison operators. 14090 if (BO->isComparisonOp()) 14091 return AnalyzeComparison(S, BO); 14092 14093 // And with simple assignments. 14094 if (BO->getOpcode() == BO_Assign) 14095 return AnalyzeAssignment(S, BO); 14096 // And with compound assignments. 14097 if (BO->isAssignmentOp()) 14098 return AnalyzeCompoundAssignment(S, BO); 14099 } 14100 14101 // These break the otherwise-useful invariant below. Fortunately, 14102 // we don't really need to recurse into them, because any internal 14103 // expressions should have been analyzed already when they were 14104 // built into statements. 14105 if (isa<StmtExpr>(E)) return; 14106 14107 // Don't descend into unevaluated contexts. 14108 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 14109 14110 // Now just recurse over the expression's children. 14111 CC = E->getExprLoc(); 14112 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 14113 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 14114 for (Stmt *SubStmt : E->children()) { 14115 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 14116 if (!ChildExpr) 14117 continue; 14118 14119 if (auto *CSE = dyn_cast<CoroutineSuspendExpr>(E)) 14120 if (ChildExpr == CSE->getOperand()) 14121 // Do not recurse over a CoroutineSuspendExpr's operand. 14122 // The operand is also a subexpression of getCommonExpr(), and 14123 // recursing into it directly would produce duplicate diagnostics. 14124 continue; 14125 14126 if (IsLogicalAndOperator && 14127 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 14128 // Ignore checking string literals that are in logical and operators. 14129 // This is a common pattern for asserts. 14130 continue; 14131 WorkList.push_back({ChildExpr, CC, IsListInit}); 14132 } 14133 14134 if (BO && BO->isLogicalOp()) { 14135 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 14136 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 14137 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 14138 14139 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 14140 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 14141 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 14142 } 14143 14144 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 14145 if (U->getOpcode() == UO_LNot) { 14146 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 14147 } else if (U->getOpcode() != UO_AddrOf) { 14148 if (U->getSubExpr()->getType()->isAtomicType()) 14149 S.Diag(U->getSubExpr()->getBeginLoc(), 14150 diag::warn_atomic_implicit_seq_cst); 14151 } 14152 } 14153 } 14154 14155 /// AnalyzeImplicitConversions - Find and report any interesting 14156 /// implicit conversions in the given expression. There are a couple 14157 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 14158 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 14159 bool IsListInit/*= false*/) { 14160 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 14161 WorkList.push_back({OrigE, CC, IsListInit}); 14162 while (!WorkList.empty()) 14163 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 14164 } 14165 14166 /// Diagnose integer type and any valid implicit conversion to it. 14167 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 14168 // Taking into account implicit conversions, 14169 // allow any integer. 14170 if (!E->getType()->isIntegerType()) { 14171 S.Diag(E->getBeginLoc(), 14172 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 14173 return true; 14174 } 14175 // Potentially emit standard warnings for implicit conversions if enabled 14176 // using -Wconversion. 14177 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 14178 return false; 14179 } 14180 14181 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 14182 // Returns true when emitting a warning about taking the address of a reference. 14183 static bool CheckForReference(Sema &SemaRef, const Expr *E, 14184 const PartialDiagnostic &PD) { 14185 E = E->IgnoreParenImpCasts(); 14186 14187 const FunctionDecl *FD = nullptr; 14188 14189 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 14190 if (!DRE->getDecl()->getType()->isReferenceType()) 14191 return false; 14192 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 14193 if (!M->getMemberDecl()->getType()->isReferenceType()) 14194 return false; 14195 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 14196 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 14197 return false; 14198 FD = Call->getDirectCallee(); 14199 } else { 14200 return false; 14201 } 14202 14203 SemaRef.Diag(E->getExprLoc(), PD); 14204 14205 // If possible, point to location of function. 14206 if (FD) { 14207 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 14208 } 14209 14210 return true; 14211 } 14212 14213 // Returns true if the SourceLocation is expanded from any macro body. 14214 // Returns false if the SourceLocation is invalid, is from not in a macro 14215 // expansion, or is from expanded from a top-level macro argument. 14216 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 14217 if (Loc.isInvalid()) 14218 return false; 14219 14220 while (Loc.isMacroID()) { 14221 if (SM.isMacroBodyExpansion(Loc)) 14222 return true; 14223 Loc = SM.getImmediateMacroCallerLoc(Loc); 14224 } 14225 14226 return false; 14227 } 14228 14229 /// Diagnose pointers that are always non-null. 14230 /// \param E the expression containing the pointer 14231 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 14232 /// compared to a null pointer 14233 /// \param IsEqual True when the comparison is equal to a null pointer 14234 /// \param Range Extra SourceRange to highlight in the diagnostic 14235 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 14236 Expr::NullPointerConstantKind NullKind, 14237 bool IsEqual, SourceRange Range) { 14238 if (!E) 14239 return; 14240 14241 // Don't warn inside macros. 14242 if (E->getExprLoc().isMacroID()) { 14243 const SourceManager &SM = getSourceManager(); 14244 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 14245 IsInAnyMacroBody(SM, Range.getBegin())) 14246 return; 14247 } 14248 E = E->IgnoreImpCasts(); 14249 14250 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 14251 14252 if (isa<CXXThisExpr>(E)) { 14253 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 14254 : diag::warn_this_bool_conversion; 14255 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 14256 return; 14257 } 14258 14259 bool IsAddressOf = false; 14260 14261 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 14262 if (UO->getOpcode() != UO_AddrOf) 14263 return; 14264 IsAddressOf = true; 14265 E = UO->getSubExpr(); 14266 } 14267 14268 if (IsAddressOf) { 14269 unsigned DiagID = IsCompare 14270 ? diag::warn_address_of_reference_null_compare 14271 : diag::warn_address_of_reference_bool_conversion; 14272 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 14273 << IsEqual; 14274 if (CheckForReference(*this, E, PD)) { 14275 return; 14276 } 14277 } 14278 14279 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 14280 bool IsParam = isa<NonNullAttr>(NonnullAttr); 14281 std::string Str; 14282 llvm::raw_string_ostream S(Str); 14283 E->printPretty(S, nullptr, getPrintingPolicy()); 14284 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 14285 : diag::warn_cast_nonnull_to_bool; 14286 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 14287 << E->getSourceRange() << Range << IsEqual; 14288 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 14289 }; 14290 14291 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 14292 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 14293 if (auto *Callee = Call->getDirectCallee()) { 14294 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 14295 ComplainAboutNonnullParamOrCall(A); 14296 return; 14297 } 14298 } 14299 } 14300 14301 // Expect to find a single Decl. Skip anything more complicated. 14302 ValueDecl *D = nullptr; 14303 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 14304 D = R->getDecl(); 14305 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 14306 D = M->getMemberDecl(); 14307 } 14308 14309 // Weak Decls can be null. 14310 if (!D || D->isWeak()) 14311 return; 14312 14313 // Check for parameter decl with nonnull attribute 14314 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 14315 if (getCurFunction() && 14316 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 14317 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 14318 ComplainAboutNonnullParamOrCall(A); 14319 return; 14320 } 14321 14322 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 14323 // Skip function template not specialized yet. 14324 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 14325 return; 14326 auto ParamIter = llvm::find(FD->parameters(), PV); 14327 assert(ParamIter != FD->param_end()); 14328 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 14329 14330 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 14331 if (!NonNull->args_size()) { 14332 ComplainAboutNonnullParamOrCall(NonNull); 14333 return; 14334 } 14335 14336 for (const ParamIdx &ArgNo : NonNull->args()) { 14337 if (ArgNo.getASTIndex() == ParamNo) { 14338 ComplainAboutNonnullParamOrCall(NonNull); 14339 return; 14340 } 14341 } 14342 } 14343 } 14344 } 14345 } 14346 14347 QualType T = D->getType(); 14348 const bool IsArray = T->isArrayType(); 14349 const bool IsFunction = T->isFunctionType(); 14350 14351 // Address of function is used to silence the function warning. 14352 if (IsAddressOf && IsFunction) { 14353 return; 14354 } 14355 14356 // Found nothing. 14357 if (!IsAddressOf && !IsFunction && !IsArray) 14358 return; 14359 14360 // Pretty print the expression for the diagnostic. 14361 std::string Str; 14362 llvm::raw_string_ostream S(Str); 14363 E->printPretty(S, nullptr, getPrintingPolicy()); 14364 14365 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 14366 : diag::warn_impcast_pointer_to_bool; 14367 enum { 14368 AddressOf, 14369 FunctionPointer, 14370 ArrayPointer 14371 } DiagType; 14372 if (IsAddressOf) 14373 DiagType = AddressOf; 14374 else if (IsFunction) 14375 DiagType = FunctionPointer; 14376 else if (IsArray) 14377 DiagType = ArrayPointer; 14378 else 14379 llvm_unreachable("Could not determine diagnostic."); 14380 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 14381 << Range << IsEqual; 14382 14383 if (!IsFunction) 14384 return; 14385 14386 // Suggest '&' to silence the function warning. 14387 Diag(E->getExprLoc(), diag::note_function_warning_silence) 14388 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 14389 14390 // Check to see if '()' fixit should be emitted. 14391 QualType ReturnType; 14392 UnresolvedSet<4> NonTemplateOverloads; 14393 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 14394 if (ReturnType.isNull()) 14395 return; 14396 14397 if (IsCompare) { 14398 // There are two cases here. If there is null constant, the only suggest 14399 // for a pointer return type. If the null is 0, then suggest if the return 14400 // type is a pointer or an integer type. 14401 if (!ReturnType->isPointerType()) { 14402 if (NullKind == Expr::NPCK_ZeroExpression || 14403 NullKind == Expr::NPCK_ZeroLiteral) { 14404 if (!ReturnType->isIntegerType()) 14405 return; 14406 } else { 14407 return; 14408 } 14409 } 14410 } else { // !IsCompare 14411 // For function to bool, only suggest if the function pointer has bool 14412 // return type. 14413 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 14414 return; 14415 } 14416 Diag(E->getExprLoc(), diag::note_function_to_function_call) 14417 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 14418 } 14419 14420 /// Diagnoses "dangerous" implicit conversions within the given 14421 /// expression (which is a full expression). Implements -Wconversion 14422 /// and -Wsign-compare. 14423 /// 14424 /// \param CC the "context" location of the implicit conversion, i.e. 14425 /// the most location of the syntactic entity requiring the implicit 14426 /// conversion 14427 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 14428 // Don't diagnose in unevaluated contexts. 14429 if (isUnevaluatedContext()) 14430 return; 14431 14432 // Don't diagnose for value- or type-dependent expressions. 14433 if (E->isTypeDependent() || E->isValueDependent()) 14434 return; 14435 14436 // Check for array bounds violations in cases where the check isn't triggered 14437 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 14438 // ArraySubscriptExpr is on the RHS of a variable initialization. 14439 CheckArrayAccess(E); 14440 14441 // This is not the right CC for (e.g.) a variable initialization. 14442 AnalyzeImplicitConversions(*this, E, CC); 14443 } 14444 14445 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 14446 /// Input argument E is a logical expression. 14447 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 14448 ::CheckBoolLikeConversion(*this, E, CC); 14449 } 14450 14451 /// Diagnose when expression is an integer constant expression and its evaluation 14452 /// results in integer overflow 14453 void Sema::CheckForIntOverflow (Expr *E) { 14454 // Use a work list to deal with nested struct initializers. 14455 SmallVector<Expr *, 2> Exprs(1, E); 14456 14457 do { 14458 Expr *OriginalE = Exprs.pop_back_val(); 14459 Expr *E = OriginalE->IgnoreParenCasts(); 14460 14461 if (isa<BinaryOperator>(E)) { 14462 E->EvaluateForOverflow(Context); 14463 continue; 14464 } 14465 14466 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 14467 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 14468 else if (isa<ObjCBoxedExpr>(OriginalE)) 14469 E->EvaluateForOverflow(Context); 14470 else if (auto Call = dyn_cast<CallExpr>(E)) 14471 Exprs.append(Call->arg_begin(), Call->arg_end()); 14472 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 14473 Exprs.append(Message->arg_begin(), Message->arg_end()); 14474 } while (!Exprs.empty()); 14475 } 14476 14477 namespace { 14478 14479 /// Visitor for expressions which looks for unsequenced operations on the 14480 /// same object. 14481 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 14482 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 14483 14484 /// A tree of sequenced regions within an expression. Two regions are 14485 /// unsequenced if one is an ancestor or a descendent of the other. When we 14486 /// finish processing an expression with sequencing, such as a comma 14487 /// expression, we fold its tree nodes into its parent, since they are 14488 /// unsequenced with respect to nodes we will visit later. 14489 class SequenceTree { 14490 struct Value { 14491 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 14492 unsigned Parent : 31; 14493 unsigned Merged : 1; 14494 }; 14495 SmallVector<Value, 8> Values; 14496 14497 public: 14498 /// A region within an expression which may be sequenced with respect 14499 /// to some other region. 14500 class Seq { 14501 friend class SequenceTree; 14502 14503 unsigned Index; 14504 14505 explicit Seq(unsigned N) : Index(N) {} 14506 14507 public: 14508 Seq() : Index(0) {} 14509 }; 14510 14511 SequenceTree() { Values.push_back(Value(0)); } 14512 Seq root() const { return Seq(0); } 14513 14514 /// Create a new sequence of operations, which is an unsequenced 14515 /// subset of \p Parent. This sequence of operations is sequenced with 14516 /// respect to other children of \p Parent. 14517 Seq allocate(Seq Parent) { 14518 Values.push_back(Value(Parent.Index)); 14519 return Seq(Values.size() - 1); 14520 } 14521 14522 /// Merge a sequence of operations into its parent. 14523 void merge(Seq S) { 14524 Values[S.Index].Merged = true; 14525 } 14526 14527 /// Determine whether two operations are unsequenced. This operation 14528 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 14529 /// should have been merged into its parent as appropriate. 14530 bool isUnsequenced(Seq Cur, Seq Old) { 14531 unsigned C = representative(Cur.Index); 14532 unsigned Target = representative(Old.Index); 14533 while (C >= Target) { 14534 if (C == Target) 14535 return true; 14536 C = Values[C].Parent; 14537 } 14538 return false; 14539 } 14540 14541 private: 14542 /// Pick a representative for a sequence. 14543 unsigned representative(unsigned K) { 14544 if (Values[K].Merged) 14545 // Perform path compression as we go. 14546 return Values[K].Parent = representative(Values[K].Parent); 14547 return K; 14548 } 14549 }; 14550 14551 /// An object for which we can track unsequenced uses. 14552 using Object = const NamedDecl *; 14553 14554 /// Different flavors of object usage which we track. We only track the 14555 /// least-sequenced usage of each kind. 14556 enum UsageKind { 14557 /// A read of an object. Multiple unsequenced reads are OK. 14558 UK_Use, 14559 14560 /// A modification of an object which is sequenced before the value 14561 /// computation of the expression, such as ++n in C++. 14562 UK_ModAsValue, 14563 14564 /// A modification of an object which is not sequenced before the value 14565 /// computation of the expression, such as n++. 14566 UK_ModAsSideEffect, 14567 14568 UK_Count = UK_ModAsSideEffect + 1 14569 }; 14570 14571 /// Bundle together a sequencing region and the expression corresponding 14572 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 14573 struct Usage { 14574 const Expr *UsageExpr; 14575 SequenceTree::Seq Seq; 14576 14577 Usage() : UsageExpr(nullptr) {} 14578 }; 14579 14580 struct UsageInfo { 14581 Usage Uses[UK_Count]; 14582 14583 /// Have we issued a diagnostic for this object already? 14584 bool Diagnosed; 14585 14586 UsageInfo() : Diagnosed(false) {} 14587 }; 14588 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 14589 14590 Sema &SemaRef; 14591 14592 /// Sequenced regions within the expression. 14593 SequenceTree Tree; 14594 14595 /// Declaration modifications and references which we have seen. 14596 UsageInfoMap UsageMap; 14597 14598 /// The region we are currently within. 14599 SequenceTree::Seq Region; 14600 14601 /// Filled in with declarations which were modified as a side-effect 14602 /// (that is, post-increment operations). 14603 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 14604 14605 /// Expressions to check later. We defer checking these to reduce 14606 /// stack usage. 14607 SmallVectorImpl<const Expr *> &WorkList; 14608 14609 /// RAII object wrapping the visitation of a sequenced subexpression of an 14610 /// expression. At the end of this process, the side-effects of the evaluation 14611 /// become sequenced with respect to the value computation of the result, so 14612 /// we downgrade any UK_ModAsSideEffect within the evaluation to 14613 /// UK_ModAsValue. 14614 struct SequencedSubexpression { 14615 SequencedSubexpression(SequenceChecker &Self) 14616 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 14617 Self.ModAsSideEffect = &ModAsSideEffect; 14618 } 14619 14620 ~SequencedSubexpression() { 14621 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 14622 // Add a new usage with usage kind UK_ModAsValue, and then restore 14623 // the previous usage with UK_ModAsSideEffect (thus clearing it if 14624 // the previous one was empty). 14625 UsageInfo &UI = Self.UsageMap[M.first]; 14626 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 14627 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 14628 SideEffectUsage = M.second; 14629 } 14630 Self.ModAsSideEffect = OldModAsSideEffect; 14631 } 14632 14633 SequenceChecker &Self; 14634 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 14635 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 14636 }; 14637 14638 /// RAII object wrapping the visitation of a subexpression which we might 14639 /// choose to evaluate as a constant. If any subexpression is evaluated and 14640 /// found to be non-constant, this allows us to suppress the evaluation of 14641 /// the outer expression. 14642 class EvaluationTracker { 14643 public: 14644 EvaluationTracker(SequenceChecker &Self) 14645 : Self(Self), Prev(Self.EvalTracker) { 14646 Self.EvalTracker = this; 14647 } 14648 14649 ~EvaluationTracker() { 14650 Self.EvalTracker = Prev; 14651 if (Prev) 14652 Prev->EvalOK &= EvalOK; 14653 } 14654 14655 bool evaluate(const Expr *E, bool &Result) { 14656 if (!EvalOK || E->isValueDependent()) 14657 return false; 14658 EvalOK = E->EvaluateAsBooleanCondition( 14659 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 14660 return EvalOK; 14661 } 14662 14663 private: 14664 SequenceChecker &Self; 14665 EvaluationTracker *Prev; 14666 bool EvalOK = true; 14667 } *EvalTracker = nullptr; 14668 14669 /// Find the object which is produced by the specified expression, 14670 /// if any. 14671 Object getObject(const Expr *E, bool Mod) const { 14672 E = E->IgnoreParenCasts(); 14673 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 14674 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 14675 return getObject(UO->getSubExpr(), Mod); 14676 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 14677 if (BO->getOpcode() == BO_Comma) 14678 return getObject(BO->getRHS(), Mod); 14679 if (Mod && BO->isAssignmentOp()) 14680 return getObject(BO->getLHS(), Mod); 14681 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 14682 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 14683 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 14684 return ME->getMemberDecl(); 14685 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 14686 // FIXME: If this is a reference, map through to its value. 14687 return DRE->getDecl(); 14688 return nullptr; 14689 } 14690 14691 /// Note that an object \p O was modified or used by an expression 14692 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 14693 /// the object \p O as obtained via the \p UsageMap. 14694 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 14695 // Get the old usage for the given object and usage kind. 14696 Usage &U = UI.Uses[UK]; 14697 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 14698 // If we have a modification as side effect and are in a sequenced 14699 // subexpression, save the old Usage so that we can restore it later 14700 // in SequencedSubexpression::~SequencedSubexpression. 14701 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 14702 ModAsSideEffect->push_back(std::make_pair(O, U)); 14703 // Then record the new usage with the current sequencing region. 14704 U.UsageExpr = UsageExpr; 14705 U.Seq = Region; 14706 } 14707 } 14708 14709 /// Check whether a modification or use of an object \p O in an expression 14710 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 14711 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 14712 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 14713 /// usage and false we are checking for a mod-use unsequenced usage. 14714 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 14715 UsageKind OtherKind, bool IsModMod) { 14716 if (UI.Diagnosed) 14717 return; 14718 14719 const Usage &U = UI.Uses[OtherKind]; 14720 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 14721 return; 14722 14723 const Expr *Mod = U.UsageExpr; 14724 const Expr *ModOrUse = UsageExpr; 14725 if (OtherKind == UK_Use) 14726 std::swap(Mod, ModOrUse); 14727 14728 SemaRef.DiagRuntimeBehavior( 14729 Mod->getExprLoc(), {Mod, ModOrUse}, 14730 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 14731 : diag::warn_unsequenced_mod_use) 14732 << O << SourceRange(ModOrUse->getExprLoc())); 14733 UI.Diagnosed = true; 14734 } 14735 14736 // A note on note{Pre, Post}{Use, Mod}: 14737 // 14738 // (It helps to follow the algorithm with an expression such as 14739 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 14740 // operations before C++17 and both are well-defined in C++17). 14741 // 14742 // When visiting a node which uses/modify an object we first call notePreUse 14743 // or notePreMod before visiting its sub-expression(s). At this point the 14744 // children of the current node have not yet been visited and so the eventual 14745 // uses/modifications resulting from the children of the current node have not 14746 // been recorded yet. 14747 // 14748 // We then visit the children of the current node. After that notePostUse or 14749 // notePostMod is called. These will 1) detect an unsequenced modification 14750 // as side effect (as in "k++ + k") and 2) add a new usage with the 14751 // appropriate usage kind. 14752 // 14753 // We also have to be careful that some operation sequences modification as 14754 // side effect as well (for example: || or ,). To account for this we wrap 14755 // the visitation of such a sub-expression (for example: the LHS of || or ,) 14756 // with SequencedSubexpression. SequencedSubexpression is an RAII object 14757 // which record usages which are modifications as side effect, and then 14758 // downgrade them (or more accurately restore the previous usage which was a 14759 // modification as side effect) when exiting the scope of the sequenced 14760 // subexpression. 14761 14762 void notePreUse(Object O, const Expr *UseExpr) { 14763 UsageInfo &UI = UsageMap[O]; 14764 // Uses conflict with other modifications. 14765 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 14766 } 14767 14768 void notePostUse(Object O, const Expr *UseExpr) { 14769 UsageInfo &UI = UsageMap[O]; 14770 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 14771 /*IsModMod=*/false); 14772 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 14773 } 14774 14775 void notePreMod(Object O, const Expr *ModExpr) { 14776 UsageInfo &UI = UsageMap[O]; 14777 // Modifications conflict with other modifications and with uses. 14778 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 14779 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 14780 } 14781 14782 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 14783 UsageInfo &UI = UsageMap[O]; 14784 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 14785 /*IsModMod=*/true); 14786 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 14787 } 14788 14789 public: 14790 SequenceChecker(Sema &S, const Expr *E, 14791 SmallVectorImpl<const Expr *> &WorkList) 14792 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 14793 Visit(E); 14794 // Silence a -Wunused-private-field since WorkList is now unused. 14795 // TODO: Evaluate if it can be used, and if not remove it. 14796 (void)this->WorkList; 14797 } 14798 14799 void VisitStmt(const Stmt *S) { 14800 // Skip all statements which aren't expressions for now. 14801 } 14802 14803 void VisitExpr(const Expr *E) { 14804 // By default, just recurse to evaluated subexpressions. 14805 Base::VisitStmt(E); 14806 } 14807 14808 void VisitCastExpr(const CastExpr *E) { 14809 Object O = Object(); 14810 if (E->getCastKind() == CK_LValueToRValue) 14811 O = getObject(E->getSubExpr(), false); 14812 14813 if (O) 14814 notePreUse(O, E); 14815 VisitExpr(E); 14816 if (O) 14817 notePostUse(O, E); 14818 } 14819 14820 void VisitSequencedExpressions(const Expr *SequencedBefore, 14821 const Expr *SequencedAfter) { 14822 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 14823 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 14824 SequenceTree::Seq OldRegion = Region; 14825 14826 { 14827 SequencedSubexpression SeqBefore(*this); 14828 Region = BeforeRegion; 14829 Visit(SequencedBefore); 14830 } 14831 14832 Region = AfterRegion; 14833 Visit(SequencedAfter); 14834 14835 Region = OldRegion; 14836 14837 Tree.merge(BeforeRegion); 14838 Tree.merge(AfterRegion); 14839 } 14840 14841 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 14842 // C++17 [expr.sub]p1: 14843 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 14844 // expression E1 is sequenced before the expression E2. 14845 if (SemaRef.getLangOpts().CPlusPlus17) 14846 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 14847 else { 14848 Visit(ASE->getLHS()); 14849 Visit(ASE->getRHS()); 14850 } 14851 } 14852 14853 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14854 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14855 void VisitBinPtrMem(const BinaryOperator *BO) { 14856 // C++17 [expr.mptr.oper]p4: 14857 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 14858 // the expression E1 is sequenced before the expression E2. 14859 if (SemaRef.getLangOpts().CPlusPlus17) 14860 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14861 else { 14862 Visit(BO->getLHS()); 14863 Visit(BO->getRHS()); 14864 } 14865 } 14866 14867 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14868 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14869 void VisitBinShlShr(const BinaryOperator *BO) { 14870 // C++17 [expr.shift]p4: 14871 // The expression E1 is sequenced before the expression E2. 14872 if (SemaRef.getLangOpts().CPlusPlus17) 14873 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14874 else { 14875 Visit(BO->getLHS()); 14876 Visit(BO->getRHS()); 14877 } 14878 } 14879 14880 void VisitBinComma(const BinaryOperator *BO) { 14881 // C++11 [expr.comma]p1: 14882 // Every value computation and side effect associated with the left 14883 // expression is sequenced before every value computation and side 14884 // effect associated with the right expression. 14885 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14886 } 14887 14888 void VisitBinAssign(const BinaryOperator *BO) { 14889 SequenceTree::Seq RHSRegion; 14890 SequenceTree::Seq LHSRegion; 14891 if (SemaRef.getLangOpts().CPlusPlus17) { 14892 RHSRegion = Tree.allocate(Region); 14893 LHSRegion = Tree.allocate(Region); 14894 } else { 14895 RHSRegion = Region; 14896 LHSRegion = Region; 14897 } 14898 SequenceTree::Seq OldRegion = Region; 14899 14900 // C++11 [expr.ass]p1: 14901 // [...] the assignment is sequenced after the value computation 14902 // of the right and left operands, [...] 14903 // 14904 // so check it before inspecting the operands and update the 14905 // map afterwards. 14906 Object O = getObject(BO->getLHS(), /*Mod=*/true); 14907 if (O) 14908 notePreMod(O, BO); 14909 14910 if (SemaRef.getLangOpts().CPlusPlus17) { 14911 // C++17 [expr.ass]p1: 14912 // [...] The right operand is sequenced before the left operand. [...] 14913 { 14914 SequencedSubexpression SeqBefore(*this); 14915 Region = RHSRegion; 14916 Visit(BO->getRHS()); 14917 } 14918 14919 Region = LHSRegion; 14920 Visit(BO->getLHS()); 14921 14922 if (O && isa<CompoundAssignOperator>(BO)) 14923 notePostUse(O, BO); 14924 14925 } else { 14926 // C++11 does not specify any sequencing between the LHS and RHS. 14927 Region = LHSRegion; 14928 Visit(BO->getLHS()); 14929 14930 if (O && isa<CompoundAssignOperator>(BO)) 14931 notePostUse(O, BO); 14932 14933 Region = RHSRegion; 14934 Visit(BO->getRHS()); 14935 } 14936 14937 // C++11 [expr.ass]p1: 14938 // the assignment is sequenced [...] before the value computation of the 14939 // assignment expression. 14940 // C11 6.5.16/3 has no such rule. 14941 Region = OldRegion; 14942 if (O) 14943 notePostMod(O, BO, 14944 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14945 : UK_ModAsSideEffect); 14946 if (SemaRef.getLangOpts().CPlusPlus17) { 14947 Tree.merge(RHSRegion); 14948 Tree.merge(LHSRegion); 14949 } 14950 } 14951 14952 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 14953 VisitBinAssign(CAO); 14954 } 14955 14956 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14957 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14958 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 14959 Object O = getObject(UO->getSubExpr(), true); 14960 if (!O) 14961 return VisitExpr(UO); 14962 14963 notePreMod(O, UO); 14964 Visit(UO->getSubExpr()); 14965 // C++11 [expr.pre.incr]p1: 14966 // the expression ++x is equivalent to x+=1 14967 notePostMod(O, UO, 14968 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14969 : UK_ModAsSideEffect); 14970 } 14971 14972 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14973 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14974 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 14975 Object O = getObject(UO->getSubExpr(), true); 14976 if (!O) 14977 return VisitExpr(UO); 14978 14979 notePreMod(O, UO); 14980 Visit(UO->getSubExpr()); 14981 notePostMod(O, UO, UK_ModAsSideEffect); 14982 } 14983 14984 void VisitBinLOr(const BinaryOperator *BO) { 14985 // C++11 [expr.log.or]p2: 14986 // If the second expression is evaluated, every value computation and 14987 // side effect associated with the first expression is sequenced before 14988 // every value computation and side effect associated with the 14989 // second expression. 14990 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14991 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14992 SequenceTree::Seq OldRegion = Region; 14993 14994 EvaluationTracker Eval(*this); 14995 { 14996 SequencedSubexpression Sequenced(*this); 14997 Region = LHSRegion; 14998 Visit(BO->getLHS()); 14999 } 15000 15001 // C++11 [expr.log.or]p1: 15002 // [...] the second operand is not evaluated if the first operand 15003 // evaluates to true. 15004 bool EvalResult = false; 15005 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 15006 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 15007 if (ShouldVisitRHS) { 15008 Region = RHSRegion; 15009 Visit(BO->getRHS()); 15010 } 15011 15012 Region = OldRegion; 15013 Tree.merge(LHSRegion); 15014 Tree.merge(RHSRegion); 15015 } 15016 15017 void VisitBinLAnd(const BinaryOperator *BO) { 15018 // C++11 [expr.log.and]p2: 15019 // If the second expression is evaluated, every value computation and 15020 // side effect associated with the first expression is sequenced before 15021 // every value computation and side effect associated with the 15022 // second expression. 15023 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 15024 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 15025 SequenceTree::Seq OldRegion = Region; 15026 15027 EvaluationTracker Eval(*this); 15028 { 15029 SequencedSubexpression Sequenced(*this); 15030 Region = LHSRegion; 15031 Visit(BO->getLHS()); 15032 } 15033 15034 // C++11 [expr.log.and]p1: 15035 // [...] the second operand is not evaluated if the first operand is false. 15036 bool EvalResult = false; 15037 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 15038 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 15039 if (ShouldVisitRHS) { 15040 Region = RHSRegion; 15041 Visit(BO->getRHS()); 15042 } 15043 15044 Region = OldRegion; 15045 Tree.merge(LHSRegion); 15046 Tree.merge(RHSRegion); 15047 } 15048 15049 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 15050 // C++11 [expr.cond]p1: 15051 // [...] Every value computation and side effect associated with the first 15052 // expression is sequenced before every value computation and side effect 15053 // associated with the second or third expression. 15054 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 15055 15056 // No sequencing is specified between the true and false expression. 15057 // However since exactly one of both is going to be evaluated we can 15058 // consider them to be sequenced. This is needed to avoid warning on 15059 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 15060 // both the true and false expressions because we can't evaluate x. 15061 // This will still allow us to detect an expression like (pre C++17) 15062 // "(x ? y += 1 : y += 2) = y". 15063 // 15064 // We don't wrap the visitation of the true and false expression with 15065 // SequencedSubexpression because we don't want to downgrade modifications 15066 // as side effect in the true and false expressions after the visition 15067 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 15068 // not warn between the two "y++", but we should warn between the "y++" 15069 // and the "y". 15070 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 15071 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 15072 SequenceTree::Seq OldRegion = Region; 15073 15074 EvaluationTracker Eval(*this); 15075 { 15076 SequencedSubexpression Sequenced(*this); 15077 Region = ConditionRegion; 15078 Visit(CO->getCond()); 15079 } 15080 15081 // C++11 [expr.cond]p1: 15082 // [...] The first expression is contextually converted to bool (Clause 4). 15083 // It is evaluated and if it is true, the result of the conditional 15084 // expression is the value of the second expression, otherwise that of the 15085 // third expression. Only one of the second and third expressions is 15086 // evaluated. [...] 15087 bool EvalResult = false; 15088 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 15089 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 15090 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 15091 if (ShouldVisitTrueExpr) { 15092 Region = TrueRegion; 15093 Visit(CO->getTrueExpr()); 15094 } 15095 if (ShouldVisitFalseExpr) { 15096 Region = FalseRegion; 15097 Visit(CO->getFalseExpr()); 15098 } 15099 15100 Region = OldRegion; 15101 Tree.merge(ConditionRegion); 15102 Tree.merge(TrueRegion); 15103 Tree.merge(FalseRegion); 15104 } 15105 15106 void VisitCallExpr(const CallExpr *CE) { 15107 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 15108 15109 if (CE->isUnevaluatedBuiltinCall(Context)) 15110 return; 15111 15112 // C++11 [intro.execution]p15: 15113 // When calling a function [...], every value computation and side effect 15114 // associated with any argument expression, or with the postfix expression 15115 // designating the called function, is sequenced before execution of every 15116 // expression or statement in the body of the function [and thus before 15117 // the value computation of its result]. 15118 SequencedSubexpression Sequenced(*this); 15119 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 15120 // C++17 [expr.call]p5 15121 // The postfix-expression is sequenced before each expression in the 15122 // expression-list and any default argument. [...] 15123 SequenceTree::Seq CalleeRegion; 15124 SequenceTree::Seq OtherRegion; 15125 if (SemaRef.getLangOpts().CPlusPlus17) { 15126 CalleeRegion = Tree.allocate(Region); 15127 OtherRegion = Tree.allocate(Region); 15128 } else { 15129 CalleeRegion = Region; 15130 OtherRegion = Region; 15131 } 15132 SequenceTree::Seq OldRegion = Region; 15133 15134 // Visit the callee expression first. 15135 Region = CalleeRegion; 15136 if (SemaRef.getLangOpts().CPlusPlus17) { 15137 SequencedSubexpression Sequenced(*this); 15138 Visit(CE->getCallee()); 15139 } else { 15140 Visit(CE->getCallee()); 15141 } 15142 15143 // Then visit the argument expressions. 15144 Region = OtherRegion; 15145 for (const Expr *Argument : CE->arguments()) 15146 Visit(Argument); 15147 15148 Region = OldRegion; 15149 if (SemaRef.getLangOpts().CPlusPlus17) { 15150 Tree.merge(CalleeRegion); 15151 Tree.merge(OtherRegion); 15152 } 15153 }); 15154 } 15155 15156 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 15157 // C++17 [over.match.oper]p2: 15158 // [...] the operator notation is first transformed to the equivalent 15159 // function-call notation as summarized in Table 12 (where @ denotes one 15160 // of the operators covered in the specified subclause). However, the 15161 // operands are sequenced in the order prescribed for the built-in 15162 // operator (Clause 8). 15163 // 15164 // From the above only overloaded binary operators and overloaded call 15165 // operators have sequencing rules in C++17 that we need to handle 15166 // separately. 15167 if (!SemaRef.getLangOpts().CPlusPlus17 || 15168 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 15169 return VisitCallExpr(CXXOCE); 15170 15171 enum { 15172 NoSequencing, 15173 LHSBeforeRHS, 15174 RHSBeforeLHS, 15175 LHSBeforeRest 15176 } SequencingKind; 15177 switch (CXXOCE->getOperator()) { 15178 case OO_Equal: 15179 case OO_PlusEqual: 15180 case OO_MinusEqual: 15181 case OO_StarEqual: 15182 case OO_SlashEqual: 15183 case OO_PercentEqual: 15184 case OO_CaretEqual: 15185 case OO_AmpEqual: 15186 case OO_PipeEqual: 15187 case OO_LessLessEqual: 15188 case OO_GreaterGreaterEqual: 15189 SequencingKind = RHSBeforeLHS; 15190 break; 15191 15192 case OO_LessLess: 15193 case OO_GreaterGreater: 15194 case OO_AmpAmp: 15195 case OO_PipePipe: 15196 case OO_Comma: 15197 case OO_ArrowStar: 15198 case OO_Subscript: 15199 SequencingKind = LHSBeforeRHS; 15200 break; 15201 15202 case OO_Call: 15203 SequencingKind = LHSBeforeRest; 15204 break; 15205 15206 default: 15207 SequencingKind = NoSequencing; 15208 break; 15209 } 15210 15211 if (SequencingKind == NoSequencing) 15212 return VisitCallExpr(CXXOCE); 15213 15214 // This is a call, so all subexpressions are sequenced before the result. 15215 SequencedSubexpression Sequenced(*this); 15216 15217 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 15218 assert(SemaRef.getLangOpts().CPlusPlus17 && 15219 "Should only get there with C++17 and above!"); 15220 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 15221 "Should only get there with an overloaded binary operator" 15222 " or an overloaded call operator!"); 15223 15224 if (SequencingKind == LHSBeforeRest) { 15225 assert(CXXOCE->getOperator() == OO_Call && 15226 "We should only have an overloaded call operator here!"); 15227 15228 // This is very similar to VisitCallExpr, except that we only have the 15229 // C++17 case. The postfix-expression is the first argument of the 15230 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 15231 // are in the following arguments. 15232 // 15233 // Note that we intentionally do not visit the callee expression since 15234 // it is just a decayed reference to a function. 15235 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 15236 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 15237 SequenceTree::Seq OldRegion = Region; 15238 15239 assert(CXXOCE->getNumArgs() >= 1 && 15240 "An overloaded call operator must have at least one argument" 15241 " for the postfix-expression!"); 15242 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 15243 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 15244 CXXOCE->getNumArgs() - 1); 15245 15246 // Visit the postfix-expression first. 15247 { 15248 Region = PostfixExprRegion; 15249 SequencedSubexpression Sequenced(*this); 15250 Visit(PostfixExpr); 15251 } 15252 15253 // Then visit the argument expressions. 15254 Region = ArgsRegion; 15255 for (const Expr *Arg : Args) 15256 Visit(Arg); 15257 15258 Region = OldRegion; 15259 Tree.merge(PostfixExprRegion); 15260 Tree.merge(ArgsRegion); 15261 } else { 15262 assert(CXXOCE->getNumArgs() == 2 && 15263 "Should only have two arguments here!"); 15264 assert((SequencingKind == LHSBeforeRHS || 15265 SequencingKind == RHSBeforeLHS) && 15266 "Unexpected sequencing kind!"); 15267 15268 // We do not visit the callee expression since it is just a decayed 15269 // reference to a function. 15270 const Expr *E1 = CXXOCE->getArg(0); 15271 const Expr *E2 = CXXOCE->getArg(1); 15272 if (SequencingKind == RHSBeforeLHS) 15273 std::swap(E1, E2); 15274 15275 return VisitSequencedExpressions(E1, E2); 15276 } 15277 }); 15278 } 15279 15280 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 15281 // This is a call, so all subexpressions are sequenced before the result. 15282 SequencedSubexpression Sequenced(*this); 15283 15284 if (!CCE->isListInitialization()) 15285 return VisitExpr(CCE); 15286 15287 // In C++11, list initializations are sequenced. 15288 SmallVector<SequenceTree::Seq, 32> Elts; 15289 SequenceTree::Seq Parent = Region; 15290 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 15291 E = CCE->arg_end(); 15292 I != E; ++I) { 15293 Region = Tree.allocate(Parent); 15294 Elts.push_back(Region); 15295 Visit(*I); 15296 } 15297 15298 // Forget that the initializers are sequenced. 15299 Region = Parent; 15300 for (unsigned I = 0; I < Elts.size(); ++I) 15301 Tree.merge(Elts[I]); 15302 } 15303 15304 void VisitInitListExpr(const InitListExpr *ILE) { 15305 if (!SemaRef.getLangOpts().CPlusPlus11) 15306 return VisitExpr(ILE); 15307 15308 // In C++11, list initializations are sequenced. 15309 SmallVector<SequenceTree::Seq, 32> Elts; 15310 SequenceTree::Seq Parent = Region; 15311 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 15312 const Expr *E = ILE->getInit(I); 15313 if (!E) 15314 continue; 15315 Region = Tree.allocate(Parent); 15316 Elts.push_back(Region); 15317 Visit(E); 15318 } 15319 15320 // Forget that the initializers are sequenced. 15321 Region = Parent; 15322 for (unsigned I = 0; I < Elts.size(); ++I) 15323 Tree.merge(Elts[I]); 15324 } 15325 }; 15326 15327 } // namespace 15328 15329 void Sema::CheckUnsequencedOperations(const Expr *E) { 15330 SmallVector<const Expr *, 8> WorkList; 15331 WorkList.push_back(E); 15332 while (!WorkList.empty()) { 15333 const Expr *Item = WorkList.pop_back_val(); 15334 SequenceChecker(*this, Item, WorkList); 15335 } 15336 } 15337 15338 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 15339 bool IsConstexpr) { 15340 llvm::SaveAndRestore<bool> ConstantContext( 15341 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 15342 CheckImplicitConversions(E, CheckLoc); 15343 if (!E->isInstantiationDependent()) 15344 CheckUnsequencedOperations(E); 15345 if (!IsConstexpr && !E->isValueDependent()) 15346 CheckForIntOverflow(E); 15347 DiagnoseMisalignedMembers(); 15348 } 15349 15350 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 15351 FieldDecl *BitField, 15352 Expr *Init) { 15353 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 15354 } 15355 15356 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 15357 SourceLocation Loc) { 15358 if (!PType->isVariablyModifiedType()) 15359 return; 15360 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 15361 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 15362 return; 15363 } 15364 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 15365 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 15366 return; 15367 } 15368 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 15369 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 15370 return; 15371 } 15372 15373 const ArrayType *AT = S.Context.getAsArrayType(PType); 15374 if (!AT) 15375 return; 15376 15377 if (AT->getSizeModifier() != ArrayType::Star) { 15378 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 15379 return; 15380 } 15381 15382 S.Diag(Loc, diag::err_array_star_in_function_definition); 15383 } 15384 15385 /// CheckParmsForFunctionDef - Check that the parameters of the given 15386 /// function are appropriate for the definition of a function. This 15387 /// takes care of any checks that cannot be performed on the 15388 /// declaration itself, e.g., that the types of each of the function 15389 /// parameters are complete. 15390 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 15391 bool CheckParameterNames) { 15392 bool HasInvalidParm = false; 15393 for (ParmVarDecl *Param : Parameters) { 15394 // C99 6.7.5.3p4: the parameters in a parameter type list in a 15395 // function declarator that is part of a function definition of 15396 // that function shall not have incomplete type. 15397 // 15398 // This is also C++ [dcl.fct]p6. 15399 if (!Param->isInvalidDecl() && 15400 RequireCompleteType(Param->getLocation(), Param->getType(), 15401 diag::err_typecheck_decl_incomplete_type)) { 15402 Param->setInvalidDecl(); 15403 HasInvalidParm = true; 15404 } 15405 15406 // C99 6.9.1p5: If the declarator includes a parameter type list, the 15407 // declaration of each parameter shall include an identifier. 15408 if (CheckParameterNames && Param->getIdentifier() == nullptr && 15409 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 15410 // Diagnose this as an extension in C17 and earlier. 15411 if (!getLangOpts().C2x) 15412 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 15413 } 15414 15415 // C99 6.7.5.3p12: 15416 // If the function declarator is not part of a definition of that 15417 // function, parameters may have incomplete type and may use the [*] 15418 // notation in their sequences of declarator specifiers to specify 15419 // variable length array types. 15420 QualType PType = Param->getOriginalType(); 15421 // FIXME: This diagnostic should point the '[*]' if source-location 15422 // information is added for it. 15423 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 15424 15425 // If the parameter is a c++ class type and it has to be destructed in the 15426 // callee function, declare the destructor so that it can be called by the 15427 // callee function. Do not perform any direct access check on the dtor here. 15428 if (!Param->isInvalidDecl()) { 15429 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 15430 if (!ClassDecl->isInvalidDecl() && 15431 !ClassDecl->hasIrrelevantDestructor() && 15432 !ClassDecl->isDependentContext() && 15433 ClassDecl->isParamDestroyedInCallee()) { 15434 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 15435 MarkFunctionReferenced(Param->getLocation(), Destructor); 15436 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 15437 } 15438 } 15439 } 15440 15441 // Parameters with the pass_object_size attribute only need to be marked 15442 // constant at function definitions. Because we lack information about 15443 // whether we're on a declaration or definition when we're instantiating the 15444 // attribute, we need to check for constness here. 15445 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 15446 if (!Param->getType().isConstQualified()) 15447 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 15448 << Attr->getSpelling() << 1; 15449 15450 // Check for parameter names shadowing fields from the class. 15451 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 15452 // The owning context for the parameter should be the function, but we 15453 // want to see if this function's declaration context is a record. 15454 DeclContext *DC = Param->getDeclContext(); 15455 if (DC && DC->isFunctionOrMethod()) { 15456 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 15457 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 15458 RD, /*DeclIsField*/ false); 15459 } 15460 } 15461 } 15462 15463 return HasInvalidParm; 15464 } 15465 15466 Optional<std::pair<CharUnits, CharUnits>> 15467 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 15468 15469 /// Compute the alignment and offset of the base class object given the 15470 /// derived-to-base cast expression and the alignment and offset of the derived 15471 /// class object. 15472 static std::pair<CharUnits, CharUnits> 15473 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 15474 CharUnits BaseAlignment, CharUnits Offset, 15475 ASTContext &Ctx) { 15476 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 15477 ++PathI) { 15478 const CXXBaseSpecifier *Base = *PathI; 15479 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 15480 if (Base->isVirtual()) { 15481 // The complete object may have a lower alignment than the non-virtual 15482 // alignment of the base, in which case the base may be misaligned. Choose 15483 // the smaller of the non-virtual alignment and BaseAlignment, which is a 15484 // conservative lower bound of the complete object alignment. 15485 CharUnits NonVirtualAlignment = 15486 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 15487 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 15488 Offset = CharUnits::Zero(); 15489 } else { 15490 const ASTRecordLayout &RL = 15491 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 15492 Offset += RL.getBaseClassOffset(BaseDecl); 15493 } 15494 DerivedType = Base->getType(); 15495 } 15496 15497 return std::make_pair(BaseAlignment, Offset); 15498 } 15499 15500 /// Compute the alignment and offset of a binary additive operator. 15501 static Optional<std::pair<CharUnits, CharUnits>> 15502 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 15503 bool IsSub, ASTContext &Ctx) { 15504 QualType PointeeType = PtrE->getType()->getPointeeType(); 15505 15506 if (!PointeeType->isConstantSizeType()) 15507 return llvm::None; 15508 15509 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 15510 15511 if (!P) 15512 return llvm::None; 15513 15514 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 15515 if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 15516 CharUnits Offset = EltSize * IdxRes->getExtValue(); 15517 if (IsSub) 15518 Offset = -Offset; 15519 return std::make_pair(P->first, P->second + Offset); 15520 } 15521 15522 // If the integer expression isn't a constant expression, compute the lower 15523 // bound of the alignment using the alignment and offset of the pointer 15524 // expression and the element size. 15525 return std::make_pair( 15526 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 15527 CharUnits::Zero()); 15528 } 15529 15530 /// This helper function takes an lvalue expression and returns the alignment of 15531 /// a VarDecl and a constant offset from the VarDecl. 15532 Optional<std::pair<CharUnits, CharUnits>> 15533 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 15534 E = E->IgnoreParens(); 15535 switch (E->getStmtClass()) { 15536 default: 15537 break; 15538 case Stmt::CStyleCastExprClass: 15539 case Stmt::CXXStaticCastExprClass: 15540 case Stmt::ImplicitCastExprClass: { 15541 auto *CE = cast<CastExpr>(E); 15542 const Expr *From = CE->getSubExpr(); 15543 switch (CE->getCastKind()) { 15544 default: 15545 break; 15546 case CK_NoOp: 15547 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15548 case CK_UncheckedDerivedToBase: 15549 case CK_DerivedToBase: { 15550 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15551 if (!P) 15552 break; 15553 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 15554 P->second, Ctx); 15555 } 15556 } 15557 break; 15558 } 15559 case Stmt::ArraySubscriptExprClass: { 15560 auto *ASE = cast<ArraySubscriptExpr>(E); 15561 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 15562 false, Ctx); 15563 } 15564 case Stmt::DeclRefExprClass: { 15565 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 15566 // FIXME: If VD is captured by copy or is an escaping __block variable, 15567 // use the alignment of VD's type. 15568 if (!VD->getType()->isReferenceType()) 15569 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 15570 if (VD->hasInit()) 15571 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 15572 } 15573 break; 15574 } 15575 case Stmt::MemberExprClass: { 15576 auto *ME = cast<MemberExpr>(E); 15577 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 15578 if (!FD || FD->getType()->isReferenceType() || 15579 FD->getParent()->isInvalidDecl()) 15580 break; 15581 Optional<std::pair<CharUnits, CharUnits>> P; 15582 if (ME->isArrow()) 15583 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 15584 else 15585 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 15586 if (!P) 15587 break; 15588 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 15589 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 15590 return std::make_pair(P->first, 15591 P->second + CharUnits::fromQuantity(Offset)); 15592 } 15593 case Stmt::UnaryOperatorClass: { 15594 auto *UO = cast<UnaryOperator>(E); 15595 switch (UO->getOpcode()) { 15596 default: 15597 break; 15598 case UO_Deref: 15599 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 15600 } 15601 break; 15602 } 15603 case Stmt::BinaryOperatorClass: { 15604 auto *BO = cast<BinaryOperator>(E); 15605 auto Opcode = BO->getOpcode(); 15606 switch (Opcode) { 15607 default: 15608 break; 15609 case BO_Comma: 15610 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 15611 } 15612 break; 15613 } 15614 } 15615 return llvm::None; 15616 } 15617 15618 /// This helper function takes a pointer expression and returns the alignment of 15619 /// a VarDecl and a constant offset from the VarDecl. 15620 Optional<std::pair<CharUnits, CharUnits>> 15621 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 15622 E = E->IgnoreParens(); 15623 switch (E->getStmtClass()) { 15624 default: 15625 break; 15626 case Stmt::CStyleCastExprClass: 15627 case Stmt::CXXStaticCastExprClass: 15628 case Stmt::ImplicitCastExprClass: { 15629 auto *CE = cast<CastExpr>(E); 15630 const Expr *From = CE->getSubExpr(); 15631 switch (CE->getCastKind()) { 15632 default: 15633 break; 15634 case CK_NoOp: 15635 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15636 case CK_ArrayToPointerDecay: 15637 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15638 case CK_UncheckedDerivedToBase: 15639 case CK_DerivedToBase: { 15640 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15641 if (!P) 15642 break; 15643 return getDerivedToBaseAlignmentAndOffset( 15644 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 15645 } 15646 } 15647 break; 15648 } 15649 case Stmt::CXXThisExprClass: { 15650 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 15651 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 15652 return std::make_pair(Alignment, CharUnits::Zero()); 15653 } 15654 case Stmt::UnaryOperatorClass: { 15655 auto *UO = cast<UnaryOperator>(E); 15656 if (UO->getOpcode() == UO_AddrOf) 15657 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 15658 break; 15659 } 15660 case Stmt::BinaryOperatorClass: { 15661 auto *BO = cast<BinaryOperator>(E); 15662 auto Opcode = BO->getOpcode(); 15663 switch (Opcode) { 15664 default: 15665 break; 15666 case BO_Add: 15667 case BO_Sub: { 15668 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 15669 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 15670 std::swap(LHS, RHS); 15671 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 15672 Ctx); 15673 } 15674 case BO_Comma: 15675 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 15676 } 15677 break; 15678 } 15679 } 15680 return llvm::None; 15681 } 15682 15683 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 15684 // See if we can compute the alignment of a VarDecl and an offset from it. 15685 Optional<std::pair<CharUnits, CharUnits>> P = 15686 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 15687 15688 if (P) 15689 return P->first.alignmentAtOffset(P->second); 15690 15691 // If that failed, return the type's alignment. 15692 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 15693 } 15694 15695 /// CheckCastAlign - Implements -Wcast-align, which warns when a 15696 /// pointer cast increases the alignment requirements. 15697 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 15698 // This is actually a lot of work to potentially be doing on every 15699 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 15700 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 15701 return; 15702 15703 // Ignore dependent types. 15704 if (T->isDependentType() || Op->getType()->isDependentType()) 15705 return; 15706 15707 // Require that the destination be a pointer type. 15708 const PointerType *DestPtr = T->getAs<PointerType>(); 15709 if (!DestPtr) return; 15710 15711 // If the destination has alignment 1, we're done. 15712 QualType DestPointee = DestPtr->getPointeeType(); 15713 if (DestPointee->isIncompleteType()) return; 15714 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 15715 if (DestAlign.isOne()) return; 15716 15717 // Require that the source be a pointer type. 15718 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 15719 if (!SrcPtr) return; 15720 QualType SrcPointee = SrcPtr->getPointeeType(); 15721 15722 // Explicitly allow casts from cv void*. We already implicitly 15723 // allowed casts to cv void*, since they have alignment 1. 15724 // Also allow casts involving incomplete types, which implicitly 15725 // includes 'void'. 15726 if (SrcPointee->isIncompleteType()) return; 15727 15728 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 15729 15730 if (SrcAlign >= DestAlign) return; 15731 15732 Diag(TRange.getBegin(), diag::warn_cast_align) 15733 << Op->getType() << T 15734 << static_cast<unsigned>(SrcAlign.getQuantity()) 15735 << static_cast<unsigned>(DestAlign.getQuantity()) 15736 << TRange << Op->getSourceRange(); 15737 } 15738 15739 /// Check whether this array fits the idiom of a size-one tail padded 15740 /// array member of a struct. 15741 /// 15742 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 15743 /// commonly used to emulate flexible arrays in C89 code. 15744 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 15745 const NamedDecl *ND) { 15746 if (Size != 1 || !ND) return false; 15747 15748 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 15749 if (!FD) return false; 15750 15751 // Don't consider sizes resulting from macro expansions or template argument 15752 // substitution to form C89 tail-padded arrays. 15753 15754 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 15755 while (TInfo) { 15756 TypeLoc TL = TInfo->getTypeLoc(); 15757 // Look through typedefs. 15758 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 15759 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 15760 TInfo = TDL->getTypeSourceInfo(); 15761 continue; 15762 } 15763 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 15764 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 15765 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 15766 return false; 15767 } 15768 break; 15769 } 15770 15771 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 15772 if (!RD) return false; 15773 if (RD->isUnion()) return false; 15774 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 15775 if (!CRD->isStandardLayout()) return false; 15776 } 15777 15778 // See if this is the last field decl in the record. 15779 const Decl *D = FD; 15780 while ((D = D->getNextDeclInContext())) 15781 if (isa<FieldDecl>(D)) 15782 return false; 15783 return true; 15784 } 15785 15786 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 15787 const ArraySubscriptExpr *ASE, 15788 bool AllowOnePastEnd, bool IndexNegated) { 15789 // Already diagnosed by the constant evaluator. 15790 if (isConstantEvaluated()) 15791 return; 15792 15793 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 15794 if (IndexExpr->isValueDependent()) 15795 return; 15796 15797 const Type *EffectiveType = 15798 BaseExpr->getType()->getPointeeOrArrayElementType(); 15799 BaseExpr = BaseExpr->IgnoreParenCasts(); 15800 const ConstantArrayType *ArrayTy = 15801 Context.getAsConstantArrayType(BaseExpr->getType()); 15802 15803 const Type *BaseType = 15804 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); 15805 bool IsUnboundedArray = (BaseType == nullptr); 15806 if (EffectiveType->isDependentType() || 15807 (!IsUnboundedArray && BaseType->isDependentType())) 15808 return; 15809 15810 Expr::EvalResult Result; 15811 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 15812 return; 15813 15814 llvm::APSInt index = Result.Val.getInt(); 15815 if (IndexNegated) { 15816 index.setIsUnsigned(false); 15817 index = -index; 15818 } 15819 15820 const NamedDecl *ND = nullptr; 15821 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15822 ND = DRE->getDecl(); 15823 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 15824 ND = ME->getMemberDecl(); 15825 15826 if (IsUnboundedArray) { 15827 if (EffectiveType->isFunctionType()) 15828 return; 15829 if (index.isUnsigned() || !index.isNegative()) { 15830 const auto &ASTC = getASTContext(); 15831 unsigned AddrBits = 15832 ASTC.getTargetInfo().getPointerWidth(ASTC.getTargetAddressSpace( 15833 EffectiveType->getCanonicalTypeInternal())); 15834 if (index.getBitWidth() < AddrBits) 15835 index = index.zext(AddrBits); 15836 Optional<CharUnits> ElemCharUnits = 15837 ASTC.getTypeSizeInCharsIfKnown(EffectiveType); 15838 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void 15839 // pointer) bounds-checking isn't meaningful. 15840 if (!ElemCharUnits) 15841 return; 15842 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity()); 15843 // If index has more active bits than address space, we already know 15844 // we have a bounds violation to warn about. Otherwise, compute 15845 // address of (index + 1)th element, and warn about bounds violation 15846 // only if that address exceeds address space. 15847 if (index.getActiveBits() <= AddrBits) { 15848 bool Overflow; 15849 llvm::APInt Product(index); 15850 Product += 1; 15851 Product = Product.umul_ov(ElemBytes, Overflow); 15852 if (!Overflow && Product.getActiveBits() <= AddrBits) 15853 return; 15854 } 15855 15856 // Need to compute max possible elements in address space, since that 15857 // is included in diag message. 15858 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits); 15859 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth())); 15860 MaxElems += 1; 15861 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth()); 15862 MaxElems = MaxElems.udiv(ElemBytes); 15863 15864 unsigned DiagID = 15865 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds 15866 : diag::warn_ptr_arith_exceeds_max_addressable_bounds; 15867 15868 // Diag message shows element size in bits and in "bytes" (platform- 15869 // dependent CharUnits) 15870 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15871 PDiag(DiagID) 15872 << toString(index, 10, true) << AddrBits 15873 << (unsigned)ASTC.toBits(*ElemCharUnits) 15874 << toString(ElemBytes, 10, false) 15875 << toString(MaxElems, 10, false) 15876 << (unsigned)MaxElems.getLimitedValue(~0U) 15877 << IndexExpr->getSourceRange()); 15878 15879 if (!ND) { 15880 // Try harder to find a NamedDecl to point at in the note. 15881 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15882 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15883 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15884 ND = DRE->getDecl(); 15885 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15886 ND = ME->getMemberDecl(); 15887 } 15888 15889 if (ND) 15890 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15891 PDiag(diag::note_array_declared_here) << ND); 15892 } 15893 return; 15894 } 15895 15896 if (index.isUnsigned() || !index.isNegative()) { 15897 // It is possible that the type of the base expression after 15898 // IgnoreParenCasts is incomplete, even though the type of the base 15899 // expression before IgnoreParenCasts is complete (see PR39746 for an 15900 // example). In this case we have no information about whether the array 15901 // access exceeds the array bounds. However we can still diagnose an array 15902 // access which precedes the array bounds. 15903 if (BaseType->isIncompleteType()) 15904 return; 15905 15906 llvm::APInt size = ArrayTy->getSize(); 15907 if (!size.isStrictlyPositive()) 15908 return; 15909 15910 if (BaseType != EffectiveType) { 15911 // Make sure we're comparing apples to apples when comparing index to size 15912 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 15913 uint64_t array_typesize = Context.getTypeSize(BaseType); 15914 // Handle ptrarith_typesize being zero, such as when casting to void* 15915 if (!ptrarith_typesize) ptrarith_typesize = 1; 15916 if (ptrarith_typesize != array_typesize) { 15917 // There's a cast to a different size type involved 15918 uint64_t ratio = array_typesize / ptrarith_typesize; 15919 // TODO: Be smarter about handling cases where array_typesize is not a 15920 // multiple of ptrarith_typesize 15921 if (ptrarith_typesize * ratio == array_typesize) 15922 size *= llvm::APInt(size.getBitWidth(), ratio); 15923 } 15924 } 15925 15926 if (size.getBitWidth() > index.getBitWidth()) 15927 index = index.zext(size.getBitWidth()); 15928 else if (size.getBitWidth() < index.getBitWidth()) 15929 size = size.zext(index.getBitWidth()); 15930 15931 // For array subscripting the index must be less than size, but for pointer 15932 // arithmetic also allow the index (offset) to be equal to size since 15933 // computing the next address after the end of the array is legal and 15934 // commonly done e.g. in C++ iterators and range-based for loops. 15935 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 15936 return; 15937 15938 // Also don't warn for arrays of size 1 which are members of some 15939 // structure. These are often used to approximate flexible arrays in C89 15940 // code. 15941 if (IsTailPaddedMemberArray(*this, size, ND)) 15942 return; 15943 15944 // Suppress the warning if the subscript expression (as identified by the 15945 // ']' location) and the index expression are both from macro expansions 15946 // within a system header. 15947 if (ASE) { 15948 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 15949 ASE->getRBracketLoc()); 15950 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 15951 SourceLocation IndexLoc = 15952 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 15953 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 15954 return; 15955 } 15956 } 15957 15958 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds 15959 : diag::warn_ptr_arith_exceeds_bounds; 15960 15961 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15962 PDiag(DiagID) << toString(index, 10, true) 15963 << toString(size, 10, true) 15964 << (unsigned)size.getLimitedValue(~0U) 15965 << IndexExpr->getSourceRange()); 15966 } else { 15967 unsigned DiagID = diag::warn_array_index_precedes_bounds; 15968 if (!ASE) { 15969 DiagID = diag::warn_ptr_arith_precedes_bounds; 15970 if (index.isNegative()) index = -index; 15971 } 15972 15973 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15974 PDiag(DiagID) << toString(index, 10, true) 15975 << IndexExpr->getSourceRange()); 15976 } 15977 15978 if (!ND) { 15979 // Try harder to find a NamedDecl to point at in the note. 15980 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15981 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15982 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15983 ND = DRE->getDecl(); 15984 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15985 ND = ME->getMemberDecl(); 15986 } 15987 15988 if (ND) 15989 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15990 PDiag(diag::note_array_declared_here) << ND); 15991 } 15992 15993 void Sema::CheckArrayAccess(const Expr *expr) { 15994 int AllowOnePastEnd = 0; 15995 while (expr) { 15996 expr = expr->IgnoreParenImpCasts(); 15997 switch (expr->getStmtClass()) { 15998 case Stmt::ArraySubscriptExprClass: { 15999 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 16000 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 16001 AllowOnePastEnd > 0); 16002 expr = ASE->getBase(); 16003 break; 16004 } 16005 case Stmt::MemberExprClass: { 16006 expr = cast<MemberExpr>(expr)->getBase(); 16007 break; 16008 } 16009 case Stmt::OMPArraySectionExprClass: { 16010 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 16011 if (ASE->getLowerBound()) 16012 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 16013 /*ASE=*/nullptr, AllowOnePastEnd > 0); 16014 return; 16015 } 16016 case Stmt::UnaryOperatorClass: { 16017 // Only unwrap the * and & unary operators 16018 const UnaryOperator *UO = cast<UnaryOperator>(expr); 16019 expr = UO->getSubExpr(); 16020 switch (UO->getOpcode()) { 16021 case UO_AddrOf: 16022 AllowOnePastEnd++; 16023 break; 16024 case UO_Deref: 16025 AllowOnePastEnd--; 16026 break; 16027 default: 16028 return; 16029 } 16030 break; 16031 } 16032 case Stmt::ConditionalOperatorClass: { 16033 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 16034 if (const Expr *lhs = cond->getLHS()) 16035 CheckArrayAccess(lhs); 16036 if (const Expr *rhs = cond->getRHS()) 16037 CheckArrayAccess(rhs); 16038 return; 16039 } 16040 case Stmt::CXXOperatorCallExprClass: { 16041 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 16042 for (const auto *Arg : OCE->arguments()) 16043 CheckArrayAccess(Arg); 16044 return; 16045 } 16046 default: 16047 return; 16048 } 16049 } 16050 } 16051 16052 //===--- CHECK: Objective-C retain cycles ----------------------------------// 16053 16054 namespace { 16055 16056 struct RetainCycleOwner { 16057 VarDecl *Variable = nullptr; 16058 SourceRange Range; 16059 SourceLocation Loc; 16060 bool Indirect = false; 16061 16062 RetainCycleOwner() = default; 16063 16064 void setLocsFrom(Expr *e) { 16065 Loc = e->getExprLoc(); 16066 Range = e->getSourceRange(); 16067 } 16068 }; 16069 16070 } // namespace 16071 16072 /// Consider whether capturing the given variable can possibly lead to 16073 /// a retain cycle. 16074 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 16075 // In ARC, it's captured strongly iff the variable has __strong 16076 // lifetime. In MRR, it's captured strongly if the variable is 16077 // __block and has an appropriate type. 16078 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 16079 return false; 16080 16081 owner.Variable = var; 16082 if (ref) 16083 owner.setLocsFrom(ref); 16084 return true; 16085 } 16086 16087 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 16088 while (true) { 16089 e = e->IgnoreParens(); 16090 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 16091 switch (cast->getCastKind()) { 16092 case CK_BitCast: 16093 case CK_LValueBitCast: 16094 case CK_LValueToRValue: 16095 case CK_ARCReclaimReturnedObject: 16096 e = cast->getSubExpr(); 16097 continue; 16098 16099 default: 16100 return false; 16101 } 16102 } 16103 16104 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 16105 ObjCIvarDecl *ivar = ref->getDecl(); 16106 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 16107 return false; 16108 16109 // Try to find a retain cycle in the base. 16110 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 16111 return false; 16112 16113 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 16114 owner.Indirect = true; 16115 return true; 16116 } 16117 16118 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 16119 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 16120 if (!var) return false; 16121 return considerVariable(var, ref, owner); 16122 } 16123 16124 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 16125 if (member->isArrow()) return false; 16126 16127 // Don't count this as an indirect ownership. 16128 e = member->getBase(); 16129 continue; 16130 } 16131 16132 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 16133 // Only pay attention to pseudo-objects on property references. 16134 ObjCPropertyRefExpr *pre 16135 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 16136 ->IgnoreParens()); 16137 if (!pre) return false; 16138 if (pre->isImplicitProperty()) return false; 16139 ObjCPropertyDecl *property = pre->getExplicitProperty(); 16140 if (!property->isRetaining() && 16141 !(property->getPropertyIvarDecl() && 16142 property->getPropertyIvarDecl()->getType() 16143 .getObjCLifetime() == Qualifiers::OCL_Strong)) 16144 return false; 16145 16146 owner.Indirect = true; 16147 if (pre->isSuperReceiver()) { 16148 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 16149 if (!owner.Variable) 16150 return false; 16151 owner.Loc = pre->getLocation(); 16152 owner.Range = pre->getSourceRange(); 16153 return true; 16154 } 16155 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 16156 ->getSourceExpr()); 16157 continue; 16158 } 16159 16160 // Array ivars? 16161 16162 return false; 16163 } 16164 } 16165 16166 namespace { 16167 16168 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 16169 ASTContext &Context; 16170 VarDecl *Variable; 16171 Expr *Capturer = nullptr; 16172 bool VarWillBeReased = false; 16173 16174 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 16175 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 16176 Context(Context), Variable(variable) {} 16177 16178 void VisitDeclRefExpr(DeclRefExpr *ref) { 16179 if (ref->getDecl() == Variable && !Capturer) 16180 Capturer = ref; 16181 } 16182 16183 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 16184 if (Capturer) return; 16185 Visit(ref->getBase()); 16186 if (Capturer && ref->isFreeIvar()) 16187 Capturer = ref; 16188 } 16189 16190 void VisitBlockExpr(BlockExpr *block) { 16191 // Look inside nested blocks 16192 if (block->getBlockDecl()->capturesVariable(Variable)) 16193 Visit(block->getBlockDecl()->getBody()); 16194 } 16195 16196 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 16197 if (Capturer) return; 16198 if (OVE->getSourceExpr()) 16199 Visit(OVE->getSourceExpr()); 16200 } 16201 16202 void VisitBinaryOperator(BinaryOperator *BinOp) { 16203 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 16204 return; 16205 Expr *LHS = BinOp->getLHS(); 16206 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 16207 if (DRE->getDecl() != Variable) 16208 return; 16209 if (Expr *RHS = BinOp->getRHS()) { 16210 RHS = RHS->IgnoreParenCasts(); 16211 Optional<llvm::APSInt> Value; 16212 VarWillBeReased = 16213 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 16214 *Value == 0); 16215 } 16216 } 16217 } 16218 }; 16219 16220 } // namespace 16221 16222 /// Check whether the given argument is a block which captures a 16223 /// variable. 16224 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 16225 assert(owner.Variable && owner.Loc.isValid()); 16226 16227 e = e->IgnoreParenCasts(); 16228 16229 // Look through [^{...} copy] and Block_copy(^{...}). 16230 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 16231 Selector Cmd = ME->getSelector(); 16232 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 16233 e = ME->getInstanceReceiver(); 16234 if (!e) 16235 return nullptr; 16236 e = e->IgnoreParenCasts(); 16237 } 16238 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 16239 if (CE->getNumArgs() == 1) { 16240 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 16241 if (Fn) { 16242 const IdentifierInfo *FnI = Fn->getIdentifier(); 16243 if (FnI && FnI->isStr("_Block_copy")) { 16244 e = CE->getArg(0)->IgnoreParenCasts(); 16245 } 16246 } 16247 } 16248 } 16249 16250 BlockExpr *block = dyn_cast<BlockExpr>(e); 16251 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 16252 return nullptr; 16253 16254 FindCaptureVisitor visitor(S.Context, owner.Variable); 16255 visitor.Visit(block->getBlockDecl()->getBody()); 16256 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 16257 } 16258 16259 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 16260 RetainCycleOwner &owner) { 16261 assert(capturer); 16262 assert(owner.Variable && owner.Loc.isValid()); 16263 16264 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 16265 << owner.Variable << capturer->getSourceRange(); 16266 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 16267 << owner.Indirect << owner.Range; 16268 } 16269 16270 /// Check for a keyword selector that starts with the word 'add' or 16271 /// 'set'. 16272 static bool isSetterLikeSelector(Selector sel) { 16273 if (sel.isUnarySelector()) return false; 16274 16275 StringRef str = sel.getNameForSlot(0); 16276 while (!str.empty() && str.front() == '_') str = str.substr(1); 16277 if (str.startswith("set")) 16278 str = str.substr(3); 16279 else if (str.startswith("add")) { 16280 // Specially allow 'addOperationWithBlock:'. 16281 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 16282 return false; 16283 str = str.substr(3); 16284 } 16285 else 16286 return false; 16287 16288 if (str.empty()) return true; 16289 return !isLowercase(str.front()); 16290 } 16291 16292 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 16293 ObjCMessageExpr *Message) { 16294 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 16295 Message->getReceiverInterface(), 16296 NSAPI::ClassId_NSMutableArray); 16297 if (!IsMutableArray) { 16298 return None; 16299 } 16300 16301 Selector Sel = Message->getSelector(); 16302 16303 Optional<NSAPI::NSArrayMethodKind> MKOpt = 16304 S.NSAPIObj->getNSArrayMethodKind(Sel); 16305 if (!MKOpt) { 16306 return None; 16307 } 16308 16309 NSAPI::NSArrayMethodKind MK = *MKOpt; 16310 16311 switch (MK) { 16312 case NSAPI::NSMutableArr_addObject: 16313 case NSAPI::NSMutableArr_insertObjectAtIndex: 16314 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 16315 return 0; 16316 case NSAPI::NSMutableArr_replaceObjectAtIndex: 16317 return 1; 16318 16319 default: 16320 return None; 16321 } 16322 16323 return None; 16324 } 16325 16326 static 16327 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 16328 ObjCMessageExpr *Message) { 16329 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 16330 Message->getReceiverInterface(), 16331 NSAPI::ClassId_NSMutableDictionary); 16332 if (!IsMutableDictionary) { 16333 return None; 16334 } 16335 16336 Selector Sel = Message->getSelector(); 16337 16338 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 16339 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 16340 if (!MKOpt) { 16341 return None; 16342 } 16343 16344 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 16345 16346 switch (MK) { 16347 case NSAPI::NSMutableDict_setObjectForKey: 16348 case NSAPI::NSMutableDict_setValueForKey: 16349 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 16350 return 0; 16351 16352 default: 16353 return None; 16354 } 16355 16356 return None; 16357 } 16358 16359 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 16360 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 16361 Message->getReceiverInterface(), 16362 NSAPI::ClassId_NSMutableSet); 16363 16364 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 16365 Message->getReceiverInterface(), 16366 NSAPI::ClassId_NSMutableOrderedSet); 16367 if (!IsMutableSet && !IsMutableOrderedSet) { 16368 return None; 16369 } 16370 16371 Selector Sel = Message->getSelector(); 16372 16373 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 16374 if (!MKOpt) { 16375 return None; 16376 } 16377 16378 NSAPI::NSSetMethodKind MK = *MKOpt; 16379 16380 switch (MK) { 16381 case NSAPI::NSMutableSet_addObject: 16382 case NSAPI::NSOrderedSet_setObjectAtIndex: 16383 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 16384 case NSAPI::NSOrderedSet_insertObjectAtIndex: 16385 return 0; 16386 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 16387 return 1; 16388 } 16389 16390 return None; 16391 } 16392 16393 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 16394 if (!Message->isInstanceMessage()) { 16395 return; 16396 } 16397 16398 Optional<int> ArgOpt; 16399 16400 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 16401 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 16402 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 16403 return; 16404 } 16405 16406 int ArgIndex = *ArgOpt; 16407 16408 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 16409 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 16410 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 16411 } 16412 16413 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 16414 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 16415 if (ArgRE->isObjCSelfExpr()) { 16416 Diag(Message->getSourceRange().getBegin(), 16417 diag::warn_objc_circular_container) 16418 << ArgRE->getDecl() << StringRef("'super'"); 16419 } 16420 } 16421 } else { 16422 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 16423 16424 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 16425 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 16426 } 16427 16428 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 16429 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 16430 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 16431 ValueDecl *Decl = ReceiverRE->getDecl(); 16432 Diag(Message->getSourceRange().getBegin(), 16433 diag::warn_objc_circular_container) 16434 << Decl << Decl; 16435 if (!ArgRE->isObjCSelfExpr()) { 16436 Diag(Decl->getLocation(), 16437 diag::note_objc_circular_container_declared_here) 16438 << Decl; 16439 } 16440 } 16441 } 16442 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 16443 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 16444 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 16445 ObjCIvarDecl *Decl = IvarRE->getDecl(); 16446 Diag(Message->getSourceRange().getBegin(), 16447 diag::warn_objc_circular_container) 16448 << Decl << Decl; 16449 Diag(Decl->getLocation(), 16450 diag::note_objc_circular_container_declared_here) 16451 << Decl; 16452 } 16453 } 16454 } 16455 } 16456 } 16457 16458 /// Check a message send to see if it's likely to cause a retain cycle. 16459 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 16460 // Only check instance methods whose selector looks like a setter. 16461 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 16462 return; 16463 16464 // Try to find a variable that the receiver is strongly owned by. 16465 RetainCycleOwner owner; 16466 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 16467 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 16468 return; 16469 } else { 16470 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 16471 owner.Variable = getCurMethodDecl()->getSelfDecl(); 16472 owner.Loc = msg->getSuperLoc(); 16473 owner.Range = msg->getSuperLoc(); 16474 } 16475 16476 // Check whether the receiver is captured by any of the arguments. 16477 const ObjCMethodDecl *MD = msg->getMethodDecl(); 16478 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 16479 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 16480 // noescape blocks should not be retained by the method. 16481 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 16482 continue; 16483 return diagnoseRetainCycle(*this, capturer, owner); 16484 } 16485 } 16486 } 16487 16488 /// Check a property assign to see if it's likely to cause a retain cycle. 16489 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 16490 RetainCycleOwner owner; 16491 if (!findRetainCycleOwner(*this, receiver, owner)) 16492 return; 16493 16494 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 16495 diagnoseRetainCycle(*this, capturer, owner); 16496 } 16497 16498 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 16499 RetainCycleOwner Owner; 16500 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 16501 return; 16502 16503 // Because we don't have an expression for the variable, we have to set the 16504 // location explicitly here. 16505 Owner.Loc = Var->getLocation(); 16506 Owner.Range = Var->getSourceRange(); 16507 16508 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 16509 diagnoseRetainCycle(*this, Capturer, Owner); 16510 } 16511 16512 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 16513 Expr *RHS, bool isProperty) { 16514 // Check if RHS is an Objective-C object literal, which also can get 16515 // immediately zapped in a weak reference. Note that we explicitly 16516 // allow ObjCStringLiterals, since those are designed to never really die. 16517 RHS = RHS->IgnoreParenImpCasts(); 16518 16519 // This enum needs to match with the 'select' in 16520 // warn_objc_arc_literal_assign (off-by-1). 16521 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 16522 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 16523 return false; 16524 16525 S.Diag(Loc, diag::warn_arc_literal_assign) 16526 << (unsigned) Kind 16527 << (isProperty ? 0 : 1) 16528 << RHS->getSourceRange(); 16529 16530 return true; 16531 } 16532 16533 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 16534 Qualifiers::ObjCLifetime LT, 16535 Expr *RHS, bool isProperty) { 16536 // Strip off any implicit cast added to get to the one ARC-specific. 16537 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16538 if (cast->getCastKind() == CK_ARCConsumeObject) { 16539 S.Diag(Loc, diag::warn_arc_retained_assign) 16540 << (LT == Qualifiers::OCL_ExplicitNone) 16541 << (isProperty ? 0 : 1) 16542 << RHS->getSourceRange(); 16543 return true; 16544 } 16545 RHS = cast->getSubExpr(); 16546 } 16547 16548 if (LT == Qualifiers::OCL_Weak && 16549 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 16550 return true; 16551 16552 return false; 16553 } 16554 16555 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 16556 QualType LHS, Expr *RHS) { 16557 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 16558 16559 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 16560 return false; 16561 16562 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 16563 return true; 16564 16565 return false; 16566 } 16567 16568 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 16569 Expr *LHS, Expr *RHS) { 16570 QualType LHSType; 16571 // PropertyRef on LHS type need be directly obtained from 16572 // its declaration as it has a PseudoType. 16573 ObjCPropertyRefExpr *PRE 16574 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 16575 if (PRE && !PRE->isImplicitProperty()) { 16576 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16577 if (PD) 16578 LHSType = PD->getType(); 16579 } 16580 16581 if (LHSType.isNull()) 16582 LHSType = LHS->getType(); 16583 16584 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 16585 16586 if (LT == Qualifiers::OCL_Weak) { 16587 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 16588 getCurFunction()->markSafeWeakUse(LHS); 16589 } 16590 16591 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 16592 return; 16593 16594 // FIXME. Check for other life times. 16595 if (LT != Qualifiers::OCL_None) 16596 return; 16597 16598 if (PRE) { 16599 if (PRE->isImplicitProperty()) 16600 return; 16601 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16602 if (!PD) 16603 return; 16604 16605 unsigned Attributes = PD->getPropertyAttributes(); 16606 if (Attributes & ObjCPropertyAttribute::kind_assign) { 16607 // when 'assign' attribute was not explicitly specified 16608 // by user, ignore it and rely on property type itself 16609 // for lifetime info. 16610 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 16611 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 16612 LHSType->isObjCRetainableType()) 16613 return; 16614 16615 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16616 if (cast->getCastKind() == CK_ARCConsumeObject) { 16617 Diag(Loc, diag::warn_arc_retained_property_assign) 16618 << RHS->getSourceRange(); 16619 return; 16620 } 16621 RHS = cast->getSubExpr(); 16622 } 16623 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 16624 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 16625 return; 16626 } 16627 } 16628 } 16629 16630 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 16631 16632 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 16633 SourceLocation StmtLoc, 16634 const NullStmt *Body) { 16635 // Do not warn if the body is a macro that expands to nothing, e.g: 16636 // 16637 // #define CALL(x) 16638 // if (condition) 16639 // CALL(0); 16640 if (Body->hasLeadingEmptyMacro()) 16641 return false; 16642 16643 // Get line numbers of statement and body. 16644 bool StmtLineInvalid; 16645 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 16646 &StmtLineInvalid); 16647 if (StmtLineInvalid) 16648 return false; 16649 16650 bool BodyLineInvalid; 16651 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 16652 &BodyLineInvalid); 16653 if (BodyLineInvalid) 16654 return false; 16655 16656 // Warn if null statement and body are on the same line. 16657 if (StmtLine != BodyLine) 16658 return false; 16659 16660 return true; 16661 } 16662 16663 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 16664 const Stmt *Body, 16665 unsigned DiagID) { 16666 // Since this is a syntactic check, don't emit diagnostic for template 16667 // instantiations, this just adds noise. 16668 if (CurrentInstantiationScope) 16669 return; 16670 16671 // The body should be a null statement. 16672 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16673 if (!NBody) 16674 return; 16675 16676 // Do the usual checks. 16677 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16678 return; 16679 16680 Diag(NBody->getSemiLoc(), DiagID); 16681 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16682 } 16683 16684 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 16685 const Stmt *PossibleBody) { 16686 assert(!CurrentInstantiationScope); // Ensured by caller 16687 16688 SourceLocation StmtLoc; 16689 const Stmt *Body; 16690 unsigned DiagID; 16691 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 16692 StmtLoc = FS->getRParenLoc(); 16693 Body = FS->getBody(); 16694 DiagID = diag::warn_empty_for_body; 16695 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 16696 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 16697 Body = WS->getBody(); 16698 DiagID = diag::warn_empty_while_body; 16699 } else 16700 return; // Neither `for' nor `while'. 16701 16702 // The body should be a null statement. 16703 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16704 if (!NBody) 16705 return; 16706 16707 // Skip expensive checks if diagnostic is disabled. 16708 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 16709 return; 16710 16711 // Do the usual checks. 16712 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16713 return; 16714 16715 // `for(...);' and `while(...);' are popular idioms, so in order to keep 16716 // noise level low, emit diagnostics only if for/while is followed by a 16717 // CompoundStmt, e.g.: 16718 // for (int i = 0; i < n; i++); 16719 // { 16720 // a(i); 16721 // } 16722 // or if for/while is followed by a statement with more indentation 16723 // than for/while itself: 16724 // for (int i = 0; i < n; i++); 16725 // a(i); 16726 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 16727 if (!ProbableTypo) { 16728 bool BodyColInvalid; 16729 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 16730 PossibleBody->getBeginLoc(), &BodyColInvalid); 16731 if (BodyColInvalid) 16732 return; 16733 16734 bool StmtColInvalid; 16735 unsigned StmtCol = 16736 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 16737 if (StmtColInvalid) 16738 return; 16739 16740 if (BodyCol > StmtCol) 16741 ProbableTypo = true; 16742 } 16743 16744 if (ProbableTypo) { 16745 Diag(NBody->getSemiLoc(), DiagID); 16746 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16747 } 16748 } 16749 16750 //===--- CHECK: Warn on self move with std::move. -------------------------===// 16751 16752 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 16753 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 16754 SourceLocation OpLoc) { 16755 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 16756 return; 16757 16758 if (inTemplateInstantiation()) 16759 return; 16760 16761 // Strip parens and casts away. 16762 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 16763 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 16764 16765 // Check for a call expression 16766 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 16767 if (!CE || CE->getNumArgs() != 1) 16768 return; 16769 16770 // Check for a call to std::move 16771 if (!CE->isCallToStdMove()) 16772 return; 16773 16774 // Get argument from std::move 16775 RHSExpr = CE->getArg(0); 16776 16777 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 16778 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 16779 16780 // Two DeclRefExpr's, check that the decls are the same. 16781 if (LHSDeclRef && RHSDeclRef) { 16782 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16783 return; 16784 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16785 RHSDeclRef->getDecl()->getCanonicalDecl()) 16786 return; 16787 16788 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16789 << LHSExpr->getSourceRange() 16790 << RHSExpr->getSourceRange(); 16791 return; 16792 } 16793 16794 // Member variables require a different approach to check for self moves. 16795 // MemberExpr's are the same if every nested MemberExpr refers to the same 16796 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 16797 // the base Expr's are CXXThisExpr's. 16798 const Expr *LHSBase = LHSExpr; 16799 const Expr *RHSBase = RHSExpr; 16800 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 16801 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 16802 if (!LHSME || !RHSME) 16803 return; 16804 16805 while (LHSME && RHSME) { 16806 if (LHSME->getMemberDecl()->getCanonicalDecl() != 16807 RHSME->getMemberDecl()->getCanonicalDecl()) 16808 return; 16809 16810 LHSBase = LHSME->getBase(); 16811 RHSBase = RHSME->getBase(); 16812 LHSME = dyn_cast<MemberExpr>(LHSBase); 16813 RHSME = dyn_cast<MemberExpr>(RHSBase); 16814 } 16815 16816 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 16817 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 16818 if (LHSDeclRef && RHSDeclRef) { 16819 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16820 return; 16821 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16822 RHSDeclRef->getDecl()->getCanonicalDecl()) 16823 return; 16824 16825 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16826 << LHSExpr->getSourceRange() 16827 << RHSExpr->getSourceRange(); 16828 return; 16829 } 16830 16831 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 16832 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16833 << LHSExpr->getSourceRange() 16834 << RHSExpr->getSourceRange(); 16835 } 16836 16837 //===--- Layout compatibility ----------------------------------------------// 16838 16839 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 16840 16841 /// Check if two enumeration types are layout-compatible. 16842 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 16843 // C++11 [dcl.enum] p8: 16844 // Two enumeration types are layout-compatible if they have the same 16845 // underlying type. 16846 return ED1->isComplete() && ED2->isComplete() && 16847 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 16848 } 16849 16850 /// Check if two fields are layout-compatible. 16851 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 16852 FieldDecl *Field2) { 16853 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 16854 return false; 16855 16856 if (Field1->isBitField() != Field2->isBitField()) 16857 return false; 16858 16859 if (Field1->isBitField()) { 16860 // Make sure that the bit-fields are the same length. 16861 unsigned Bits1 = Field1->getBitWidthValue(C); 16862 unsigned Bits2 = Field2->getBitWidthValue(C); 16863 16864 if (Bits1 != Bits2) 16865 return false; 16866 } 16867 16868 return true; 16869 } 16870 16871 /// Check if two standard-layout structs are layout-compatible. 16872 /// (C++11 [class.mem] p17) 16873 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 16874 RecordDecl *RD2) { 16875 // If both records are C++ classes, check that base classes match. 16876 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 16877 // If one of records is a CXXRecordDecl we are in C++ mode, 16878 // thus the other one is a CXXRecordDecl, too. 16879 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 16880 // Check number of base classes. 16881 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 16882 return false; 16883 16884 // Check the base classes. 16885 for (CXXRecordDecl::base_class_const_iterator 16886 Base1 = D1CXX->bases_begin(), 16887 BaseEnd1 = D1CXX->bases_end(), 16888 Base2 = D2CXX->bases_begin(); 16889 Base1 != BaseEnd1; 16890 ++Base1, ++Base2) { 16891 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 16892 return false; 16893 } 16894 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 16895 // If only RD2 is a C++ class, it should have zero base classes. 16896 if (D2CXX->getNumBases() > 0) 16897 return false; 16898 } 16899 16900 // Check the fields. 16901 RecordDecl::field_iterator Field2 = RD2->field_begin(), 16902 Field2End = RD2->field_end(), 16903 Field1 = RD1->field_begin(), 16904 Field1End = RD1->field_end(); 16905 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 16906 if (!isLayoutCompatible(C, *Field1, *Field2)) 16907 return false; 16908 } 16909 if (Field1 != Field1End || Field2 != Field2End) 16910 return false; 16911 16912 return true; 16913 } 16914 16915 /// Check if two standard-layout unions are layout-compatible. 16916 /// (C++11 [class.mem] p18) 16917 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 16918 RecordDecl *RD2) { 16919 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 16920 for (auto *Field2 : RD2->fields()) 16921 UnmatchedFields.insert(Field2); 16922 16923 for (auto *Field1 : RD1->fields()) { 16924 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 16925 I = UnmatchedFields.begin(), 16926 E = UnmatchedFields.end(); 16927 16928 for ( ; I != E; ++I) { 16929 if (isLayoutCompatible(C, Field1, *I)) { 16930 bool Result = UnmatchedFields.erase(*I); 16931 (void) Result; 16932 assert(Result); 16933 break; 16934 } 16935 } 16936 if (I == E) 16937 return false; 16938 } 16939 16940 return UnmatchedFields.empty(); 16941 } 16942 16943 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 16944 RecordDecl *RD2) { 16945 if (RD1->isUnion() != RD2->isUnion()) 16946 return false; 16947 16948 if (RD1->isUnion()) 16949 return isLayoutCompatibleUnion(C, RD1, RD2); 16950 else 16951 return isLayoutCompatibleStruct(C, RD1, RD2); 16952 } 16953 16954 /// Check if two types are layout-compatible in C++11 sense. 16955 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 16956 if (T1.isNull() || T2.isNull()) 16957 return false; 16958 16959 // C++11 [basic.types] p11: 16960 // If two types T1 and T2 are the same type, then T1 and T2 are 16961 // layout-compatible types. 16962 if (C.hasSameType(T1, T2)) 16963 return true; 16964 16965 T1 = T1.getCanonicalType().getUnqualifiedType(); 16966 T2 = T2.getCanonicalType().getUnqualifiedType(); 16967 16968 const Type::TypeClass TC1 = T1->getTypeClass(); 16969 const Type::TypeClass TC2 = T2->getTypeClass(); 16970 16971 if (TC1 != TC2) 16972 return false; 16973 16974 if (TC1 == Type::Enum) { 16975 return isLayoutCompatible(C, 16976 cast<EnumType>(T1)->getDecl(), 16977 cast<EnumType>(T2)->getDecl()); 16978 } else if (TC1 == Type::Record) { 16979 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 16980 return false; 16981 16982 return isLayoutCompatible(C, 16983 cast<RecordType>(T1)->getDecl(), 16984 cast<RecordType>(T2)->getDecl()); 16985 } 16986 16987 return false; 16988 } 16989 16990 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 16991 16992 /// Given a type tag expression find the type tag itself. 16993 /// 16994 /// \param TypeExpr Type tag expression, as it appears in user's code. 16995 /// 16996 /// \param VD Declaration of an identifier that appears in a type tag. 16997 /// 16998 /// \param MagicValue Type tag magic value. 16999 /// 17000 /// \param isConstantEvaluated whether the evalaution should be performed in 17001 17002 /// constant context. 17003 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 17004 const ValueDecl **VD, uint64_t *MagicValue, 17005 bool isConstantEvaluated) { 17006 while(true) { 17007 if (!TypeExpr) 17008 return false; 17009 17010 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 17011 17012 switch (TypeExpr->getStmtClass()) { 17013 case Stmt::UnaryOperatorClass: { 17014 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 17015 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 17016 TypeExpr = UO->getSubExpr(); 17017 continue; 17018 } 17019 return false; 17020 } 17021 17022 case Stmt::DeclRefExprClass: { 17023 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 17024 *VD = DRE->getDecl(); 17025 return true; 17026 } 17027 17028 case Stmt::IntegerLiteralClass: { 17029 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 17030 llvm::APInt MagicValueAPInt = IL->getValue(); 17031 if (MagicValueAPInt.getActiveBits() <= 64) { 17032 *MagicValue = MagicValueAPInt.getZExtValue(); 17033 return true; 17034 } else 17035 return false; 17036 } 17037 17038 case Stmt::BinaryConditionalOperatorClass: 17039 case Stmt::ConditionalOperatorClass: { 17040 const AbstractConditionalOperator *ACO = 17041 cast<AbstractConditionalOperator>(TypeExpr); 17042 bool Result; 17043 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 17044 isConstantEvaluated)) { 17045 if (Result) 17046 TypeExpr = ACO->getTrueExpr(); 17047 else 17048 TypeExpr = ACO->getFalseExpr(); 17049 continue; 17050 } 17051 return false; 17052 } 17053 17054 case Stmt::BinaryOperatorClass: { 17055 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 17056 if (BO->getOpcode() == BO_Comma) { 17057 TypeExpr = BO->getRHS(); 17058 continue; 17059 } 17060 return false; 17061 } 17062 17063 default: 17064 return false; 17065 } 17066 } 17067 } 17068 17069 /// Retrieve the C type corresponding to type tag TypeExpr. 17070 /// 17071 /// \param TypeExpr Expression that specifies a type tag. 17072 /// 17073 /// \param MagicValues Registered magic values. 17074 /// 17075 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 17076 /// kind. 17077 /// 17078 /// \param TypeInfo Information about the corresponding C type. 17079 /// 17080 /// \param isConstantEvaluated whether the evalaution should be performed in 17081 /// constant context. 17082 /// 17083 /// \returns true if the corresponding C type was found. 17084 static bool GetMatchingCType( 17085 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 17086 const ASTContext &Ctx, 17087 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 17088 *MagicValues, 17089 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 17090 bool isConstantEvaluated) { 17091 FoundWrongKind = false; 17092 17093 // Variable declaration that has type_tag_for_datatype attribute. 17094 const ValueDecl *VD = nullptr; 17095 17096 uint64_t MagicValue; 17097 17098 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 17099 return false; 17100 17101 if (VD) { 17102 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 17103 if (I->getArgumentKind() != ArgumentKind) { 17104 FoundWrongKind = true; 17105 return false; 17106 } 17107 TypeInfo.Type = I->getMatchingCType(); 17108 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 17109 TypeInfo.MustBeNull = I->getMustBeNull(); 17110 return true; 17111 } 17112 return false; 17113 } 17114 17115 if (!MagicValues) 17116 return false; 17117 17118 llvm::DenseMap<Sema::TypeTagMagicValue, 17119 Sema::TypeTagData>::const_iterator I = 17120 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 17121 if (I == MagicValues->end()) 17122 return false; 17123 17124 TypeInfo = I->second; 17125 return true; 17126 } 17127 17128 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 17129 uint64_t MagicValue, QualType Type, 17130 bool LayoutCompatible, 17131 bool MustBeNull) { 17132 if (!TypeTagForDatatypeMagicValues) 17133 TypeTagForDatatypeMagicValues.reset( 17134 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 17135 17136 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 17137 (*TypeTagForDatatypeMagicValues)[Magic] = 17138 TypeTagData(Type, LayoutCompatible, MustBeNull); 17139 } 17140 17141 static bool IsSameCharType(QualType T1, QualType T2) { 17142 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 17143 if (!BT1) 17144 return false; 17145 17146 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 17147 if (!BT2) 17148 return false; 17149 17150 BuiltinType::Kind T1Kind = BT1->getKind(); 17151 BuiltinType::Kind T2Kind = BT2->getKind(); 17152 17153 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 17154 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 17155 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 17156 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 17157 } 17158 17159 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 17160 const ArrayRef<const Expr *> ExprArgs, 17161 SourceLocation CallSiteLoc) { 17162 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 17163 bool IsPointerAttr = Attr->getIsPointer(); 17164 17165 // Retrieve the argument representing the 'type_tag'. 17166 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 17167 if (TypeTagIdxAST >= ExprArgs.size()) { 17168 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 17169 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 17170 return; 17171 } 17172 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 17173 bool FoundWrongKind; 17174 TypeTagData TypeInfo; 17175 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 17176 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 17177 TypeInfo, isConstantEvaluated())) { 17178 if (FoundWrongKind) 17179 Diag(TypeTagExpr->getExprLoc(), 17180 diag::warn_type_tag_for_datatype_wrong_kind) 17181 << TypeTagExpr->getSourceRange(); 17182 return; 17183 } 17184 17185 // Retrieve the argument representing the 'arg_idx'. 17186 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 17187 if (ArgumentIdxAST >= ExprArgs.size()) { 17188 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 17189 << 1 << Attr->getArgumentIdx().getSourceIndex(); 17190 return; 17191 } 17192 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 17193 if (IsPointerAttr) { 17194 // Skip implicit cast of pointer to `void *' (as a function argument). 17195 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 17196 if (ICE->getType()->isVoidPointerType() && 17197 ICE->getCastKind() == CK_BitCast) 17198 ArgumentExpr = ICE->getSubExpr(); 17199 } 17200 QualType ArgumentType = ArgumentExpr->getType(); 17201 17202 // Passing a `void*' pointer shouldn't trigger a warning. 17203 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 17204 return; 17205 17206 if (TypeInfo.MustBeNull) { 17207 // Type tag with matching void type requires a null pointer. 17208 if (!ArgumentExpr->isNullPointerConstant(Context, 17209 Expr::NPC_ValueDependentIsNotNull)) { 17210 Diag(ArgumentExpr->getExprLoc(), 17211 diag::warn_type_safety_null_pointer_required) 17212 << ArgumentKind->getName() 17213 << ArgumentExpr->getSourceRange() 17214 << TypeTagExpr->getSourceRange(); 17215 } 17216 return; 17217 } 17218 17219 QualType RequiredType = TypeInfo.Type; 17220 if (IsPointerAttr) 17221 RequiredType = Context.getPointerType(RequiredType); 17222 17223 bool mismatch = false; 17224 if (!TypeInfo.LayoutCompatible) { 17225 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 17226 17227 // C++11 [basic.fundamental] p1: 17228 // Plain char, signed char, and unsigned char are three distinct types. 17229 // 17230 // But we treat plain `char' as equivalent to `signed char' or `unsigned 17231 // char' depending on the current char signedness mode. 17232 if (mismatch) 17233 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 17234 RequiredType->getPointeeType())) || 17235 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 17236 mismatch = false; 17237 } else 17238 if (IsPointerAttr) 17239 mismatch = !isLayoutCompatible(Context, 17240 ArgumentType->getPointeeType(), 17241 RequiredType->getPointeeType()); 17242 else 17243 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 17244 17245 if (mismatch) 17246 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 17247 << ArgumentType << ArgumentKind 17248 << TypeInfo.LayoutCompatible << RequiredType 17249 << ArgumentExpr->getSourceRange() 17250 << TypeTagExpr->getSourceRange(); 17251 } 17252 17253 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 17254 CharUnits Alignment) { 17255 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 17256 } 17257 17258 void Sema::DiagnoseMisalignedMembers() { 17259 for (MisalignedMember &m : MisalignedMembers) { 17260 const NamedDecl *ND = m.RD; 17261 if (ND->getName().empty()) { 17262 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 17263 ND = TD; 17264 } 17265 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 17266 << m.MD << ND << m.E->getSourceRange(); 17267 } 17268 MisalignedMembers.clear(); 17269 } 17270 17271 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 17272 E = E->IgnoreParens(); 17273 if (!T->isPointerType() && !T->isIntegerType()) 17274 return; 17275 if (isa<UnaryOperator>(E) && 17276 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 17277 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 17278 if (isa<MemberExpr>(Op)) { 17279 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 17280 if (MA != MisalignedMembers.end() && 17281 (T->isIntegerType() || 17282 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 17283 Context.getTypeAlignInChars( 17284 T->getPointeeType()) <= MA->Alignment)))) 17285 MisalignedMembers.erase(MA); 17286 } 17287 } 17288 } 17289 17290 void Sema::RefersToMemberWithReducedAlignment( 17291 Expr *E, 17292 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 17293 Action) { 17294 const auto *ME = dyn_cast<MemberExpr>(E); 17295 if (!ME) 17296 return; 17297 17298 // No need to check expressions with an __unaligned-qualified type. 17299 if (E->getType().getQualifiers().hasUnaligned()) 17300 return; 17301 17302 // For a chain of MemberExpr like "a.b.c.d" this list 17303 // will keep FieldDecl's like [d, c, b]. 17304 SmallVector<FieldDecl *, 4> ReverseMemberChain; 17305 const MemberExpr *TopME = nullptr; 17306 bool AnyIsPacked = false; 17307 do { 17308 QualType BaseType = ME->getBase()->getType(); 17309 if (BaseType->isDependentType()) 17310 return; 17311 if (ME->isArrow()) 17312 BaseType = BaseType->getPointeeType(); 17313 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 17314 if (RD->isInvalidDecl()) 17315 return; 17316 17317 ValueDecl *MD = ME->getMemberDecl(); 17318 auto *FD = dyn_cast<FieldDecl>(MD); 17319 // We do not care about non-data members. 17320 if (!FD || FD->isInvalidDecl()) 17321 return; 17322 17323 AnyIsPacked = 17324 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 17325 ReverseMemberChain.push_back(FD); 17326 17327 TopME = ME; 17328 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 17329 } while (ME); 17330 assert(TopME && "We did not compute a topmost MemberExpr!"); 17331 17332 // Not the scope of this diagnostic. 17333 if (!AnyIsPacked) 17334 return; 17335 17336 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 17337 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 17338 // TODO: The innermost base of the member expression may be too complicated. 17339 // For now, just disregard these cases. This is left for future 17340 // improvement. 17341 if (!DRE && !isa<CXXThisExpr>(TopBase)) 17342 return; 17343 17344 // Alignment expected by the whole expression. 17345 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 17346 17347 // No need to do anything else with this case. 17348 if (ExpectedAlignment.isOne()) 17349 return; 17350 17351 // Synthesize offset of the whole access. 17352 CharUnits Offset; 17353 for (const FieldDecl *FD : llvm::reverse(ReverseMemberChain)) 17354 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(FD)); 17355 17356 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 17357 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 17358 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 17359 17360 // The base expression of the innermost MemberExpr may give 17361 // stronger guarantees than the class containing the member. 17362 if (DRE && !TopME->isArrow()) { 17363 const ValueDecl *VD = DRE->getDecl(); 17364 if (!VD->getType()->isReferenceType()) 17365 CompleteObjectAlignment = 17366 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 17367 } 17368 17369 // Check if the synthesized offset fulfills the alignment. 17370 if (Offset % ExpectedAlignment != 0 || 17371 // It may fulfill the offset it but the effective alignment may still be 17372 // lower than the expected expression alignment. 17373 CompleteObjectAlignment < ExpectedAlignment) { 17374 // If this happens, we want to determine a sensible culprit of this. 17375 // Intuitively, watching the chain of member expressions from right to 17376 // left, we start with the required alignment (as required by the field 17377 // type) but some packed attribute in that chain has reduced the alignment. 17378 // It may happen that another packed structure increases it again. But if 17379 // we are here such increase has not been enough. So pointing the first 17380 // FieldDecl that either is packed or else its RecordDecl is, 17381 // seems reasonable. 17382 FieldDecl *FD = nullptr; 17383 CharUnits Alignment; 17384 for (FieldDecl *FDI : ReverseMemberChain) { 17385 if (FDI->hasAttr<PackedAttr>() || 17386 FDI->getParent()->hasAttr<PackedAttr>()) { 17387 FD = FDI; 17388 Alignment = std::min( 17389 Context.getTypeAlignInChars(FD->getType()), 17390 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 17391 break; 17392 } 17393 } 17394 assert(FD && "We did not find a packed FieldDecl!"); 17395 Action(E, FD->getParent(), FD, Alignment); 17396 } 17397 } 17398 17399 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 17400 using namespace std::placeholders; 17401 17402 RefersToMemberWithReducedAlignment( 17403 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 17404 _2, _3, _4)); 17405 } 17406 17407 // Check if \p Ty is a valid type for the elementwise math builtins. If it is 17408 // not a valid type, emit an error message and return true. Otherwise return 17409 // false. 17410 static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc, 17411 QualType Ty) { 17412 if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) { 17413 S.Diag(Loc, diag::err_builtin_invalid_arg_type) 17414 << 1 << /* vector, integer or float ty*/ 0 << Ty; 17415 return true; 17416 } 17417 return false; 17418 } 17419 17420 bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) { 17421 if (checkArgCount(*this, TheCall, 1)) 17422 return true; 17423 17424 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 17425 if (A.isInvalid()) 17426 return true; 17427 17428 TheCall->setArg(0, A.get()); 17429 QualType TyA = A.get()->getType(); 17430 17431 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 17432 return true; 17433 17434 TheCall->setType(TyA); 17435 return false; 17436 } 17437 17438 bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) { 17439 if (checkArgCount(*this, TheCall, 2)) 17440 return true; 17441 17442 ExprResult A = TheCall->getArg(0); 17443 ExprResult B = TheCall->getArg(1); 17444 // Do standard promotions between the two arguments, returning their common 17445 // type. 17446 QualType Res = 17447 UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison); 17448 if (A.isInvalid() || B.isInvalid()) 17449 return true; 17450 17451 QualType TyA = A.get()->getType(); 17452 QualType TyB = B.get()->getType(); 17453 17454 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType()) 17455 return Diag(A.get()->getBeginLoc(), 17456 diag::err_typecheck_call_different_arg_types) 17457 << TyA << TyB; 17458 17459 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 17460 return true; 17461 17462 TheCall->setArg(0, A.get()); 17463 TheCall->setArg(1, B.get()); 17464 TheCall->setType(Res); 17465 return false; 17466 } 17467 17468 bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) { 17469 if (checkArgCount(*this, TheCall, 1)) 17470 return true; 17471 17472 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 17473 if (A.isInvalid()) 17474 return true; 17475 17476 TheCall->setArg(0, A.get()); 17477 return false; 17478 } 17479 17480 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 17481 ExprResult CallResult) { 17482 if (checkArgCount(*this, TheCall, 1)) 17483 return ExprError(); 17484 17485 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 17486 if (MatrixArg.isInvalid()) 17487 return MatrixArg; 17488 Expr *Matrix = MatrixArg.get(); 17489 17490 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 17491 if (!MType) { 17492 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17493 << 1 << /* matrix ty*/ 1 << Matrix->getType(); 17494 return ExprError(); 17495 } 17496 17497 // Create returned matrix type by swapping rows and columns of the argument 17498 // matrix type. 17499 QualType ResultType = Context.getConstantMatrixType( 17500 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 17501 17502 // Change the return type to the type of the returned matrix. 17503 TheCall->setType(ResultType); 17504 17505 // Update call argument to use the possibly converted matrix argument. 17506 TheCall->setArg(0, Matrix); 17507 return CallResult; 17508 } 17509 17510 // Get and verify the matrix dimensions. 17511 static llvm::Optional<unsigned> 17512 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 17513 SourceLocation ErrorPos; 17514 Optional<llvm::APSInt> Value = 17515 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 17516 if (!Value) { 17517 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 17518 << Name; 17519 return {}; 17520 } 17521 uint64_t Dim = Value->getZExtValue(); 17522 if (!ConstantMatrixType::isDimensionValid(Dim)) { 17523 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 17524 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 17525 return {}; 17526 } 17527 return Dim; 17528 } 17529 17530 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 17531 ExprResult CallResult) { 17532 if (!getLangOpts().MatrixTypes) { 17533 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 17534 return ExprError(); 17535 } 17536 17537 if (checkArgCount(*this, TheCall, 4)) 17538 return ExprError(); 17539 17540 unsigned PtrArgIdx = 0; 17541 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17542 Expr *RowsExpr = TheCall->getArg(1); 17543 Expr *ColumnsExpr = TheCall->getArg(2); 17544 Expr *StrideExpr = TheCall->getArg(3); 17545 17546 bool ArgError = false; 17547 17548 // Check pointer argument. 17549 { 17550 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17551 if (PtrConv.isInvalid()) 17552 return PtrConv; 17553 PtrExpr = PtrConv.get(); 17554 TheCall->setArg(0, PtrExpr); 17555 if (PtrExpr->isTypeDependent()) { 17556 TheCall->setType(Context.DependentTy); 17557 return TheCall; 17558 } 17559 } 17560 17561 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17562 QualType ElementTy; 17563 if (!PtrTy) { 17564 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17565 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17566 ArgError = true; 17567 } else { 17568 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 17569 17570 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 17571 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17572 << PtrArgIdx + 1 << /* pointer to element ty*/ 2 17573 << PtrExpr->getType(); 17574 ArgError = true; 17575 } 17576 } 17577 17578 // Apply default Lvalue conversions and convert the expression to size_t. 17579 auto ApplyArgumentConversions = [this](Expr *E) { 17580 ExprResult Conv = DefaultLvalueConversion(E); 17581 if (Conv.isInvalid()) 17582 return Conv; 17583 17584 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 17585 }; 17586 17587 // Apply conversion to row and column expressions. 17588 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 17589 if (!RowsConv.isInvalid()) { 17590 RowsExpr = RowsConv.get(); 17591 TheCall->setArg(1, RowsExpr); 17592 } else 17593 RowsExpr = nullptr; 17594 17595 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 17596 if (!ColumnsConv.isInvalid()) { 17597 ColumnsExpr = ColumnsConv.get(); 17598 TheCall->setArg(2, ColumnsExpr); 17599 } else 17600 ColumnsExpr = nullptr; 17601 17602 // If any any part of the result matrix type is still pending, just use 17603 // Context.DependentTy, until all parts are resolved. 17604 if ((RowsExpr && RowsExpr->isTypeDependent()) || 17605 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 17606 TheCall->setType(Context.DependentTy); 17607 return CallResult; 17608 } 17609 17610 // Check row and column dimensions. 17611 llvm::Optional<unsigned> MaybeRows; 17612 if (RowsExpr) 17613 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 17614 17615 llvm::Optional<unsigned> MaybeColumns; 17616 if (ColumnsExpr) 17617 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 17618 17619 // Check stride argument. 17620 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 17621 if (StrideConv.isInvalid()) 17622 return ExprError(); 17623 StrideExpr = StrideConv.get(); 17624 TheCall->setArg(3, StrideExpr); 17625 17626 if (MaybeRows) { 17627 if (Optional<llvm::APSInt> Value = 17628 StrideExpr->getIntegerConstantExpr(Context)) { 17629 uint64_t Stride = Value->getZExtValue(); 17630 if (Stride < *MaybeRows) { 17631 Diag(StrideExpr->getBeginLoc(), 17632 diag::err_builtin_matrix_stride_too_small); 17633 ArgError = true; 17634 } 17635 } 17636 } 17637 17638 if (ArgError || !MaybeRows || !MaybeColumns) 17639 return ExprError(); 17640 17641 TheCall->setType( 17642 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 17643 return CallResult; 17644 } 17645 17646 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 17647 ExprResult CallResult) { 17648 if (checkArgCount(*this, TheCall, 3)) 17649 return ExprError(); 17650 17651 unsigned PtrArgIdx = 1; 17652 Expr *MatrixExpr = TheCall->getArg(0); 17653 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17654 Expr *StrideExpr = TheCall->getArg(2); 17655 17656 bool ArgError = false; 17657 17658 { 17659 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 17660 if (MatrixConv.isInvalid()) 17661 return MatrixConv; 17662 MatrixExpr = MatrixConv.get(); 17663 TheCall->setArg(0, MatrixExpr); 17664 } 17665 if (MatrixExpr->isTypeDependent()) { 17666 TheCall->setType(Context.DependentTy); 17667 return TheCall; 17668 } 17669 17670 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 17671 if (!MatrixTy) { 17672 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17673 << 1 << /*matrix ty */ 1 << MatrixExpr->getType(); 17674 ArgError = true; 17675 } 17676 17677 { 17678 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17679 if (PtrConv.isInvalid()) 17680 return PtrConv; 17681 PtrExpr = PtrConv.get(); 17682 TheCall->setArg(1, PtrExpr); 17683 if (PtrExpr->isTypeDependent()) { 17684 TheCall->setType(Context.DependentTy); 17685 return TheCall; 17686 } 17687 } 17688 17689 // Check pointer argument. 17690 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17691 if (!PtrTy) { 17692 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17693 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17694 ArgError = true; 17695 } else { 17696 QualType ElementTy = PtrTy->getPointeeType(); 17697 if (ElementTy.isConstQualified()) { 17698 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 17699 ArgError = true; 17700 } 17701 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 17702 if (MatrixTy && 17703 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 17704 Diag(PtrExpr->getBeginLoc(), 17705 diag::err_builtin_matrix_pointer_arg_mismatch) 17706 << ElementTy << MatrixTy->getElementType(); 17707 ArgError = true; 17708 } 17709 } 17710 17711 // Apply default Lvalue conversions and convert the stride expression to 17712 // size_t. 17713 { 17714 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 17715 if (StrideConv.isInvalid()) 17716 return StrideConv; 17717 17718 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 17719 if (StrideConv.isInvalid()) 17720 return StrideConv; 17721 StrideExpr = StrideConv.get(); 17722 TheCall->setArg(2, StrideExpr); 17723 } 17724 17725 // Check stride argument. 17726 if (MatrixTy) { 17727 if (Optional<llvm::APSInt> Value = 17728 StrideExpr->getIntegerConstantExpr(Context)) { 17729 uint64_t Stride = Value->getZExtValue(); 17730 if (Stride < MatrixTy->getNumRows()) { 17731 Diag(StrideExpr->getBeginLoc(), 17732 diag::err_builtin_matrix_stride_too_small); 17733 ArgError = true; 17734 } 17735 } 17736 } 17737 17738 if (ArgError) 17739 return ExprError(); 17740 17741 return CallResult; 17742 } 17743 17744 /// \brief Enforce the bounds of a TCB 17745 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 17746 /// directly calls other functions in the same TCB as marked by the enforce_tcb 17747 /// and enforce_tcb_leaf attributes. 17748 void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc, 17749 const NamedDecl *Callee) { 17750 const NamedDecl *Caller = getCurFunctionOrMethodDecl(); 17751 17752 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>()) 17753 return; 17754 17755 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 17756 // all TCBs the callee is a part of. 17757 llvm::StringSet<> CalleeTCBs; 17758 for (const auto *A : Callee->specific_attrs<EnforceTCBAttr>()) 17759 CalleeTCBs.insert(A->getTCBName()); 17760 for (const auto *A : Callee->specific_attrs<EnforceTCBLeafAttr>()) 17761 CalleeTCBs.insert(A->getTCBName()); 17762 17763 // Go through the TCBs the caller is a part of and emit warnings if Caller 17764 // is in a TCB that the Callee is not. 17765 for (const auto *A : Caller->specific_attrs<EnforceTCBAttr>()) { 17766 StringRef CallerTCB = A->getTCBName(); 17767 if (CalleeTCBs.count(CallerTCB) == 0) { 17768 this->Diag(CallExprLoc, diag::warn_tcb_enforcement_violation) 17769 << Callee << CallerTCB; 17770 } 17771 } 17772 } 17773